blob: 10e65dc8b1441054093545d2b8a3a360bbb2f424 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <runtime_svc.h>
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000033#include <platform.h>
34#include <context.h>
35#include "cm_macros.S"
Achin Gupta4f6ad662013-10-25 09:08:21 +010036
37 .globl runtime_exceptions
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000038 .globl el3_exit
39 .globl get_exception_stack
Achin Gupta4f6ad662013-10-25 09:08:21 +010040
Achin Guptab739f222014-01-18 16:50:09 +000041 .section .vectors, "ax"; .align 11
42
Achin Gupta4f6ad662013-10-25 09:08:21 +010043 .align 7
44runtime_exceptions:
45 /* -----------------------------------------------------
46 * Current EL with _sp_el0 : 0x0 - 0x180
47 * -----------------------------------------------------
48 */
49sync_exception_sp_el0:
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000050 /* -----------------------------------------------------
51 * We don't expect any synchronous exceptions from EL3
52 * -----------------------------------------------------
53 */
54 wfi
55 b sync_exception_sp_el0
Achin Gupta4f6ad662013-10-25 09:08:21 +010056
57 .align 7
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000058 /* -----------------------------------------------------
59 * EL3 code is non-reentrant. Any asynchronous exception
60 * is a serious error. Loop infinitely.
61 * -----------------------------------------------------
62 */
Achin Gupta4f6ad662013-10-25 09:08:21 +010063irq_sp_el0:
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000064 handle_async_exception IRQ_SP_EL0
65 b irq_sp_el0
Achin Gupta4f6ad662013-10-25 09:08:21 +010066
67 .align 7
68fiq_sp_el0:
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000069 handle_async_exception FIQ_SP_EL0
70 b fiq_sp_el0
Achin Gupta4f6ad662013-10-25 09:08:21 +010071
72 .align 7
73serror_sp_el0:
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000074 handle_async_exception SERROR_SP_EL0
75 b serror_sp_el0
Achin Gupta4f6ad662013-10-25 09:08:21 +010076
77 /* -----------------------------------------------------
78 * Current EL with SPx: 0x200 - 0x380
79 * -----------------------------------------------------
80 */
81 .align 7
82sync_exception_sp_elx:
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000083 /* -----------------------------------------------------
84 * This exception will trigger if anything went wrong
85 * during a previous exception entry or exit or while
86 * handling an earlier unexpected synchronous exception.
87 * In any case we cannot rely on SP_EL3. Switching to a
88 * known safe area of memory will corrupt at least a
89 * single register. It is best to enter wfi in loop as
90 * that will preserve the system state for analysis
91 * through a debugger later.
92 * -----------------------------------------------------
93 */
94 wfi
95 b sync_exception_sp_elx
Achin Gupta4f6ad662013-10-25 09:08:21 +010096
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000097 /* -----------------------------------------------------
98 * As mentioned in the previous comment, all bets are
99 * off if SP_EL3 cannot be relied upon. Report their
100 * occurrence.
101 * -----------------------------------------------------
102 */
Achin Gupta4f6ad662013-10-25 09:08:21 +0100103 .align 7
104irq_sp_elx:
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000105 b irq_sp_elx
Achin Gupta4f6ad662013-10-25 09:08:21 +0100106 .align 7
107fiq_sp_elx:
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000108 b fiq_sp_elx
Achin Gupta4f6ad662013-10-25 09:08:21 +0100109 .align 7
110serror_sp_elx:
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000111 b serror_sp_elx
Achin Gupta4f6ad662013-10-25 09:08:21 +0100112
113 /* -----------------------------------------------------
114 * Lower EL using AArch64 : 0x400 - 0x580
115 * -----------------------------------------------------
116 */
117 .align 7
118sync_exception_aarch64:
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000119 /* -----------------------------------------------------
120 * This exception vector will be the entry point for
121 * SMCs and traps that are unhandled at lower ELs most
122 * commonly. SP_EL3 should point to a valid cpu context
123 * where the general purpose and system register state
124 * can be saved.
125 * -----------------------------------------------------
126 */
127 handle_sync_exception
Achin Gupta4f6ad662013-10-25 09:08:21 +0100128
129 .align 7
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000130 /* -----------------------------------------------------
131 * Asynchronous exceptions from lower ELs are not
132 * currently supported. Report their occurrence.
133 * -----------------------------------------------------
134 */
Achin Gupta4f6ad662013-10-25 09:08:21 +0100135irq_aarch64:
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000136 handle_async_exception IRQ_AARCH64
137 b irq_aarch64
Achin Gupta4f6ad662013-10-25 09:08:21 +0100138
139 .align 7
140fiq_aarch64:
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000141 handle_async_exception FIQ_AARCH64
142 b fiq_aarch64
Achin Gupta4f6ad662013-10-25 09:08:21 +0100143
144 .align 7
145serror_aarch64:
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000146 handle_async_exception SERROR_AARCH64
147 b serror_aarch64
Achin Gupta4f6ad662013-10-25 09:08:21 +0100148
149 /* -----------------------------------------------------
150 * Lower EL using AArch32 : 0x600 - 0x780
151 * -----------------------------------------------------
152 */
153 .align 7
154sync_exception_aarch32:
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000155 /* -----------------------------------------------------
156 * This exception vector will be the entry point for
157 * SMCs and traps that are unhandled at lower ELs most
158 * commonly. SP_EL3 should point to a valid cpu context
159 * where the general purpose and system register state
160 * can be saved.
161 * -----------------------------------------------------
162 */
163 handle_sync_exception
Achin Gupta4f6ad662013-10-25 09:08:21 +0100164
165 .align 7
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000166 /* -----------------------------------------------------
167 * Asynchronous exceptions from lower ELs are not
168 * currently supported. Report their occurrence.
169 * -----------------------------------------------------
170 */
Achin Gupta4f6ad662013-10-25 09:08:21 +0100171irq_aarch32:
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000172 handle_async_exception IRQ_AARCH32
173 b irq_aarch32
Achin Gupta4f6ad662013-10-25 09:08:21 +0100174
175 .align 7
176fiq_aarch32:
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000177 handle_async_exception FIQ_AARCH32
178 b fiq_aarch32
Achin Gupta4f6ad662013-10-25 09:08:21 +0100179
180 .align 7
181serror_aarch32:
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000182 handle_async_exception SERROR_AARCH32
183 b serror_aarch32
184 .align 7
185
186 .section .text, "ax"
187 /* -----------------------------------------------------
188 * The following code handles secure monitor calls.
189 * Depending upon the execution state from where the SMC
190 * has been invoked, it frees some general purpose
191 * registers to perform the remaining tasks. They
192 * involve finding the runtime service handler that is
193 * the target of the SMC & switching to runtime stacks
194 * (SP_EL0) before calling the handler.
195 *
196 * Note that x30 has been explicitly saved and can be
197 * used here
198 * -----------------------------------------------------
199 */
200smc_handler32:
201 /* Check whether aarch32 issued an SMC64 */
202 tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited
203
204 /* -----------------------------------------------------
205 * Since we're are coming from aarch32, x8-x18 need to
206 * be saved as per SMC32 calling convention. If a lower
207 * EL in aarch64 is making an SMC32 call then it must
208 * have saved x8-x17 already therein.
209 * -----------------------------------------------------
210 */
211 stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
212 stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
213 stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
214 stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
215 stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
216
217 /* x4-x7, x18, sp_el0 are saved below */
218
219smc_handler64:
220 /* -----------------------------------------------------
221 * Populate the parameters for the SMC handler. We
222 * already have x0-x4 in place. x5 will point to a
223 * cookie (not used now). x6 will point to the context
224 * structure (SP_EL3) and x7 will contain flags we need
225 * to pass to the handler Hence save x5-x7. Note that x4
226 * only needs to be preserved for AArch32 callers but we
227 * do it for AArch64 callers as well for convenience
228 * -----------------------------------------------------
229 */
230 stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
231 stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
232
233 mov x5, xzr
234 mov x6, sp
235
236 /* Get the unique owning entity number */
237 ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
238 ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
239 orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH
240
241 adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
242
243 /* Load descriptor index from array of indices */
244 adr x14, rt_svc_descs_indices
245 ldrb w15, [x14, x16]
246
247 /* Save x18 and SP_EL0 */
248 mrs x17, sp_el0
249 stp x18, x17, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
250
251 /* -----------------------------------------------------
252 * Restore the saved C runtime stack value which will
253 * become the new SP_EL0 i.e. EL3 runtime stack. It was
254 * saved in the 'cpu_context' structure prior to the last
255 * ERET from EL3.
256 * -----------------------------------------------------
257 */
258 ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
259
260 /*
261 * Any index greater than 127 is invalid. Check bit 7 for
262 * a valid index
263 */
264 tbnz w15, 7, smc_unknown
265
266 /* Switch to SP_EL0 */
267 msr spsel, #0
268
269 /* -----------------------------------------------------
270 * Get the descriptor using the index
271 * x11 = (base + off), x15 = index
272 *
273 * handler = (base + off) + (index << log2(size))
274 * -----------------------------------------------------
275 */
276 lsl w10, w15, #RT_SVC_SIZE_LOG2
277 ldr x15, [x11, w10, uxtw]
278
279 /* -----------------------------------------------------
280 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there
281 * is a world switch during SMC handling.
282 * TODO: Revisit if all system registers can be saved
283 * later.
284 * -----------------------------------------------------
285 */
286 mrs x16, spsr_el3
287 mrs x17, elr_el3
288 mrs x18, scr_el3
289 stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
290 stp x18, xzr, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
291
292 /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
293 bfi x7, x18, #0, #1
294
295 mov sp, x12
296
297 /* -----------------------------------------------------
298 * Call the Secure Monitor Call handler and then drop
299 * directly into el3_exit() which will program any
300 * remaining architectural state prior to issuing the
301 * ERET to the desired lower EL.
302 * -----------------------------------------------------
303 */
304#if DEBUG
305 cbz x15, rt_svc_fw_critical_error
306#endif
307 blr x15
308
309 /* -----------------------------------------------------
310 * This routine assumes that the SP_EL3 is pointing to
311 * a valid context structure from where the gp regs and
312 * other special registers can be retrieved.
313 * -----------------------------------------------------
314 */
315el3_exit: ; .type el3_exit, %function
316 /* -----------------------------------------------------
317 * Save the current SP_EL0 i.e. the EL3 runtime stack
318 * which will be used for handling the next SMC. Then
319 * switch to SP_EL3
320 * -----------------------------------------------------
321 */
322 mov x17, sp
323 msr spsel, #1
324 str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
325
326 /* -----------------------------------------------------
327 * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
328 * -----------------------------------------------------
329 */
330 ldp x18, xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
331 ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
332 msr scr_el3, x18
333 msr spsr_el3, x16
334 msr elr_el3, x17
335
336 /* Restore saved general purpose registers and return */
337 bl restore_scratch_registers
338 ldp x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
Achin Gupta4f6ad662013-10-25 09:08:21 +0100339 eret
340
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000341smc_unknown:
342 /*
343 * Here we restore x4-x18 regardless of where we came from. AArch32
344 * callers will find the registers contents unchanged, but AArch64
345 * callers will find the registers modified (with stale earlier NS
346 * content). Either way, we aren't leaking any secure information
347 * through them
348 */
349 bl restore_scratch_registers_callee
350
351smc_prohibited:
352 ldp x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
353 mov w0, #SMC_UNK
354 eret
355
356rt_svc_fw_critical_error:
357 b rt_svc_fw_critical_error
358
359 /* -----------------------------------------------------
360 * The following functions are used to saved and restore
361 * all the caller saved registers as per the aapcs_64.
362 * These are not macros to ensure their invocation fits
363 * within the 32 instructions per exception vector.
364 * -----------------------------------------------------
365 */
366save_scratch_registers: ; .type save_scratch_registers, %function
367 stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
368 stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
369 stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
370 stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
371 stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
372 stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
373 stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
374 stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
375 stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
376 mrs x17, sp_el0
377 stp x18, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
378 ret
379
380restore_scratch_registers: ; .type restore_scratch_registers, %function
381 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
382 ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
383
384restore_scratch_registers_callee:
385 ldp x18, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
386
387 ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
388 ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
389 ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
390 ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
391 ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
392 ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
393
394 msr sp_el0, x17
395 ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
396 ret
397
398 /* -----------------------------------------------------
399 * 256 bytes of exception stack for each cpu
400 * -----------------------------------------------------
401 */
402#if DEBUG
403#define PCPU_EXCEPTION_STACK_SIZE 0x300
404#else
405#define PCPU_EXCEPTION_STACK_SIZE 0x100
406#endif
407 /* -----------------------------------------------------
408 * void get_exception_stack (uint64_t mpidr) : This
409 * function is used to allocate a small stack for
410 * reporting unhandled exceptions
411 * -----------------------------------------------------
412 */
413get_exception_stack: ; .type get_exception_stack, %function
414 mov x10, x30 // lr
415 bl platform_get_core_pos
416 add x0, x0, #1
417 mov x1, #PCPU_EXCEPTION_STACK_SIZE
418 mul x0, x0, x1
419 ldr x1, =pcpu_exception_stack
420 add x0, x1, x0
421 ret x10
422
423 /* -----------------------------------------------------
424 * Per-cpu exception stacks in normal memory.
425 * -----------------------------------------------------
426 */
427 .section data, "aw", %nobits; .align 6
428
429pcpu_exception_stack:
430 /* Zero fill */
431 .space (PLATFORM_CORE_COUNT * PCPU_EXCEPTION_STACK_SIZE), 0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100432