blob: 494ccd79723b1352f8de2e44c5dd1a9f613d9268 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dimitris Papastamos04159512018-01-22 11:53:04 +00002 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta4f6ad662013-10-25 09:08:21 +01005 */
6
7#include <arch.h>
Dan Handley714a0d22014-04-09 13:13:04 +01008#include <asm_macros.S>
Dan Handley2bd4ef22014-04-09 13:14:54 +01009#include <context.h>
dp-arm3cac7862016-09-19 11:18:44 +010010#include <cpu_data.h>
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +000011#include <ea_handle.h>
Achin Gupta9cf2bb72014-05-09 11:07:09 +010012#include <interrupt_mgmt.h>
Dan Handleyed6ff952014-05-14 17:44:19 +010013#include <platform_def.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010014#include <runtime_svc.h>
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +010015#include <smccc.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010016
17 .globl runtime_exceptions
Achin Gupta4f6ad662013-10-25 09:08:21 +010018
Dimitris Papastamos446f7f12017-11-30 14:53:53 +000019 .globl sync_exception_sp_el0
20 .globl irq_sp_el0
21 .globl fiq_sp_el0
22 .globl serror_sp_el0
23
24 .globl sync_exception_sp_elx
25 .globl irq_sp_elx
26 .globl fiq_sp_elx
27 .globl serror_sp_elx
28
29 .globl sync_exception_aarch64
30 .globl irq_aarch64
31 .globl fiq_aarch64
32 .globl serror_aarch64
33
34 .globl sync_exception_aarch32
35 .globl irq_aarch32
36 .globl fiq_aarch32
37 .globl serror_aarch32
38
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +000039 /*
40 * Handle External Abort by delegating to the platform's EA handler.
41 * Once the platform handler returns, the macro exits EL3 and returns to
42 * where the abort was taken from.
43 *
44 * This macro assumes that x30 is available for use.
45 *
46 * 'abort_type' is a constant passed to the platform handler, indicating
47 * the cause of the External Abort.
48 */
49 .macro handle_ea abort_type
50 /* Save GP registers */
51 bl save_gp_registers
52
53 /* Setup exception class and syndrome arguments for platform handler */
54 mov x0, \abort_type
55 mrs x1, esr_el3
56 adr x30, el3_exit
57 b delegate_ea
58 .endm
59
Douglas Raillard0980eed2016-11-09 17:48:27 +000060 /* ---------------------------------------------------------------------
61 * This macro handles Synchronous exceptions.
62 * Only SMC exceptions are supported.
63 * ---------------------------------------------------------------------
Achin Gupta9cf2bb72014-05-09 11:07:09 +010064 */
65 .macro handle_sync_exception
Achin Guptaed1744e2014-08-04 23:13:10 +010066 /* Enable the SError interrupt */
67 msr daifclr, #DAIF_ABT_BIT
68
Achin Gupta9cf2bb72014-05-09 11:07:09 +010069 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
dp-arm3cac7862016-09-19 11:18:44 +010070
71#if ENABLE_RUNTIME_INSTRUMENTATION
dp-arm3cac7862016-09-19 11:18:44 +010072 /*
Douglas Raillard0980eed2016-11-09 17:48:27 +000073 * Read the timestamp value and store it in per-cpu data. The value
74 * will be extracted from per-cpu data by the C level SMC handler and
75 * saved to the PMF timestamp region.
dp-arm3cac7862016-09-19 11:18:44 +010076 */
77 mrs x30, cntpct_el0
78 str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
79 mrs x29, tpidr_el3
80 str x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
81 ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
82#endif
83
Achin Gupta9cf2bb72014-05-09 11:07:09 +010084 mrs x30, esr_el3
85 ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
86
Douglas Raillard0980eed2016-11-09 17:48:27 +000087 /* Handle SMC exceptions separately from other synchronous exceptions */
Achin Gupta9cf2bb72014-05-09 11:07:09 +010088 cmp x30, #EC_AARCH32_SMC
89 b.eq smc_handler32
90
91 cmp x30, #EC_AARCH64_SMC
92 b.eq smc_handler64
93
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +000094 /* Check for I/D aborts from lower EL */
95 cmp x30, #EC_IABORT_LOWER_EL
96 b.eq 1f
97
98 cmp x30, #EC_DABORT_LOWER_EL
99 b.ne 2f
100
1011:
102 /* Test for EA bit in the instruction syndrome */
103 mrs x30, esr_el3
104 tbz x30, #ESR_ISS_EABORT_EA_BIT, 2f
105 handle_ea #ERROR_EA_SYNC
106
1072:
Douglas Raillard0980eed2016-11-09 17:48:27 +0000108 /* Other kinds of synchronous exceptions are not handled */
Julius Werner67ebde72017-07-27 14:59:34 -0700109 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
110 b report_unhandled_exception
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100111 .endm
112
113
Douglas Raillard0980eed2016-11-09 17:48:27 +0000114 /* ---------------------------------------------------------------------
115 * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
116 * interrupts.
117 * ---------------------------------------------------------------------
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100118 */
119 .macro handle_interrupt_exception label
Achin Guptaed1744e2014-08-04 23:13:10 +0100120 /* Enable the SError interrupt */
121 msr daifclr, #DAIF_ABT_BIT
122
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100123 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
124 bl save_gp_registers
125
Douglas Raillard0980eed2016-11-09 17:48:27 +0000126 /* Save the EL3 system registers needed to return from this exception */
Achin Gupta979992e2015-05-13 17:57:18 +0100127 mrs x0, spsr_el3
128 mrs x1, elr_el3
129 stp x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
130
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100131 /* Switch to the runtime stack i.e. SP_EL0 */
132 ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
133 mov x20, sp
134 msr spsel, #0
135 mov sp, x2
136
137 /*
Douglas Raillard0980eed2016-11-09 17:48:27 +0000138 * Find out whether this is a valid interrupt type.
139 * If the interrupt controller reports a spurious interrupt then return
140 * to where we came from.
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100141 */
Dan Handley701fea72014-05-27 16:17:21 +0100142 bl plat_ic_get_pending_interrupt_type
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100143 cmp x0, #INTR_TYPE_INVAL
144 b.eq interrupt_exit_\label
145
146 /*
Douglas Raillard0980eed2016-11-09 17:48:27 +0000147 * Get the registered handler for this interrupt type.
148 * A NULL return value could be 'cause of the following conditions:
Achin Gupta979992e2015-05-13 17:57:18 +0100149 *
Douglas Raillard0980eed2016-11-09 17:48:27 +0000150 * a. An interrupt of a type was routed correctly but a handler for its
151 * type was not registered.
Achin Gupta979992e2015-05-13 17:57:18 +0100152 *
Douglas Raillard0980eed2016-11-09 17:48:27 +0000153 * b. An interrupt of a type was not routed correctly so a handler for
154 * its type was not registered.
Achin Gupta979992e2015-05-13 17:57:18 +0100155 *
Douglas Raillard0980eed2016-11-09 17:48:27 +0000156 * c. An interrupt of a type was routed correctly to EL3, but was
157 * deasserted before its pending state could be read. Another
158 * interrupt of a different type pended at the same time and its
159 * type was reported as pending instead. However, a handler for this
160 * type was not registered.
Achin Gupta979992e2015-05-13 17:57:18 +0100161 *
Douglas Raillard0980eed2016-11-09 17:48:27 +0000162 * a. and b. can only happen due to a programming error. The
163 * occurrence of c. could be beyond the control of Trusted Firmware.
164 * It makes sense to return from this exception instead of reporting an
165 * error.
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100166 */
167 bl get_interrupt_type_handler
Achin Gupta979992e2015-05-13 17:57:18 +0100168 cbz x0, interrupt_exit_\label
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100169 mov x21, x0
170
171 mov x0, #INTR_ID_UNAVAILABLE
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100172
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100173 /* Set the current security state in the 'flags' parameter */
174 mrs x2, scr_el3
175 ubfx x1, x2, #0, #1
176
177 /* Restore the reference to the 'handle' i.e. SP_EL3 */
178 mov x2, x20
179
Douglas Raillard0980eed2016-11-09 17:48:27 +0000180 /* x3 will point to a cookie (not used now) */
Soby Mathew799f0ab2014-05-27 16:54:31 +0100181 mov x3, xzr
182
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100183 /* Call the interrupt type handler */
184 blr x21
185
186interrupt_exit_\label:
187 /* Return from exception, possibly in a different security state */
188 b el3_exit
189
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100190 .endm
191
192
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100193vector_base runtime_exceptions
194
Douglas Raillard0980eed2016-11-09 17:48:27 +0000195 /* ---------------------------------------------------------------------
196 * Current EL with SP_EL0 : 0x0 - 0x200
197 * ---------------------------------------------------------------------
Achin Gupta4f6ad662013-10-25 09:08:21 +0100198 */
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100199vector_entry sync_exception_sp_el0
Douglas Raillard0980eed2016-11-09 17:48:27 +0000200 /* We don't expect any synchronous exceptions from EL3 */
Julius Werner67ebde72017-07-27 14:59:34 -0700201 b report_unhandled_exception
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000202 check_vector_size sync_exception_sp_el0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100203
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100204vector_entry irq_sp_el0
Douglas Raillard0980eed2016-11-09 17:48:27 +0000205 /*
206 * EL3 code is non-reentrant. Any asynchronous exception is a serious
207 * error. Loop infinitely.
208 */
Julius Werner67ebde72017-07-27 14:59:34 -0700209 b report_unhandled_interrupt
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000210 check_vector_size irq_sp_el0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100211
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100212
213vector_entry fiq_sp_el0
Julius Werner67ebde72017-07-27 14:59:34 -0700214 b report_unhandled_interrupt
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000215 check_vector_size fiq_sp_el0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100216
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100217
218vector_entry serror_sp_el0
Julius Werner67ebde72017-07-27 14:59:34 -0700219 b report_unhandled_exception
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000220 check_vector_size serror_sp_el0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100221
Douglas Raillard0980eed2016-11-09 17:48:27 +0000222 /* ---------------------------------------------------------------------
223 * Current EL with SP_ELx: 0x200 - 0x400
224 * ---------------------------------------------------------------------
Achin Gupta4f6ad662013-10-25 09:08:21 +0100225 */
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100226vector_entry sync_exception_sp_elx
Douglas Raillard0980eed2016-11-09 17:48:27 +0000227 /*
228 * This exception will trigger if anything went wrong during a previous
229 * exception entry or exit or while handling an earlier unexpected
230 * synchronous exception. There is a high probability that SP_EL3 is
231 * corrupted.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000232 */
Julius Werner67ebde72017-07-27 14:59:34 -0700233 b report_unhandled_exception
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000234 check_vector_size sync_exception_sp_elx
Achin Gupta4f6ad662013-10-25 09:08:21 +0100235
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100236vector_entry irq_sp_elx
Julius Werner67ebde72017-07-27 14:59:34 -0700237 b report_unhandled_interrupt
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000238 check_vector_size irq_sp_elx
239
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100240vector_entry fiq_sp_elx
Julius Werner67ebde72017-07-27 14:59:34 -0700241 b report_unhandled_interrupt
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000242 check_vector_size fiq_sp_elx
243
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100244vector_entry serror_sp_elx
Julius Werner67ebde72017-07-27 14:59:34 -0700245 b report_unhandled_exception
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000246 check_vector_size serror_sp_elx
Achin Gupta4f6ad662013-10-25 09:08:21 +0100247
Douglas Raillard0980eed2016-11-09 17:48:27 +0000248 /* ---------------------------------------------------------------------
Sandrine Bailleux046cd3f2014-08-06 11:27:23 +0100249 * Lower EL using AArch64 : 0x400 - 0x600
Douglas Raillard0980eed2016-11-09 17:48:27 +0000250 * ---------------------------------------------------------------------
Achin Gupta4f6ad662013-10-25 09:08:21 +0100251 */
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100252vector_entry sync_exception_aarch64
Douglas Raillard0980eed2016-11-09 17:48:27 +0000253 /*
254 * This exception vector will be the entry point for SMCs and traps
255 * that are unhandled at lower ELs most commonly. SP_EL3 should point
256 * to a valid cpu context where the general purpose and system register
257 * state can be saved.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000258 */
259 handle_sync_exception
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000260 check_vector_size sync_exception_aarch64
Achin Gupta4f6ad662013-10-25 09:08:21 +0100261
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100262vector_entry irq_aarch64
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100263 handle_interrupt_exception irq_aarch64
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000264 check_vector_size irq_aarch64
Achin Gupta4f6ad662013-10-25 09:08:21 +0100265
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100266vector_entry fiq_aarch64
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100267 handle_interrupt_exception fiq_aarch64
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000268 check_vector_size fiq_aarch64
Achin Gupta4f6ad662013-10-25 09:08:21 +0100269
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100270vector_entry serror_aarch64
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +0000271 msr daifclr, #DAIF_ABT_BIT
272
Douglas Raillard0980eed2016-11-09 17:48:27 +0000273 /*
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +0000274 * Explicitly save x30 so as to free up a register and to enable
275 * branching
Douglas Raillard0980eed2016-11-09 17:48:27 +0000276 */
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +0000277 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
278 handle_ea #ERROR_EA_ASYNC
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000279 check_vector_size serror_aarch64
Achin Gupta4f6ad662013-10-25 09:08:21 +0100280
Douglas Raillard0980eed2016-11-09 17:48:27 +0000281 /* ---------------------------------------------------------------------
Sandrine Bailleux046cd3f2014-08-06 11:27:23 +0100282 * Lower EL using AArch32 : 0x600 - 0x800
Douglas Raillard0980eed2016-11-09 17:48:27 +0000283 * ---------------------------------------------------------------------
Achin Gupta4f6ad662013-10-25 09:08:21 +0100284 */
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100285vector_entry sync_exception_aarch32
Douglas Raillard0980eed2016-11-09 17:48:27 +0000286 /*
287 * This exception vector will be the entry point for SMCs and traps
288 * that are unhandled at lower ELs most commonly. SP_EL3 should point
289 * to a valid cpu context where the general purpose and system register
290 * state can be saved.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000291 */
292 handle_sync_exception
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000293 check_vector_size sync_exception_aarch32
Achin Gupta4f6ad662013-10-25 09:08:21 +0100294
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100295vector_entry irq_aarch32
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100296 handle_interrupt_exception irq_aarch32
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000297 check_vector_size irq_aarch32
Achin Gupta4f6ad662013-10-25 09:08:21 +0100298
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100299vector_entry fiq_aarch32
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100300 handle_interrupt_exception fiq_aarch32
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000301 check_vector_size fiq_aarch32
Achin Gupta4f6ad662013-10-25 09:08:21 +0100302
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100303vector_entry serror_aarch32
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +0000304 msr daifclr, #DAIF_ABT_BIT
305
Douglas Raillard0980eed2016-11-09 17:48:27 +0000306 /*
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +0000307 * Explicitly save x30 so as to free up a register and to enable
308 * branching
Douglas Raillard0980eed2016-11-09 17:48:27 +0000309 */
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +0000310 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
311 handle_ea #ERROR_EA_ASYNC
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000312 check_vector_size serror_aarch32
313
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000314
Douglas Raillard0980eed2016-11-09 17:48:27 +0000315 /* ---------------------------------------------------------------------
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100316 * This macro takes an argument in x16 that is the index in the
317 * 'rt_svc_descs_indices' array, checks that the value in the array is
318 * valid, and loads in x15 the pointer to the handler of that service.
319 * ---------------------------------------------------------------------
320 */
321 .macro load_rt_svc_desc_pointer
322 /* Load descriptor index from array of indices */
323 adr x14, rt_svc_descs_indices
324 ldrb w15, [x14, x16]
325
326#if SMCCC_MAJOR_VERSION == 1
327 /* Any index greater than 127 is invalid. Check bit 7. */
328 tbnz w15, 7, smc_unknown
329#elif SMCCC_MAJOR_VERSION == 2
330 /* Verify that the top 3 bits of the loaded index are 0 (w15 <= 31) */
331 cmp w15, #31
332 b.hi smc_unknown
333#endif /* SMCCC_MAJOR_VERSION */
334
335 /*
336 * Get the descriptor using the index
337 * x11 = (base + off), w15 = index
338 *
339 * handler = (base + off) + (index << log2(size))
340 */
341 adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
342 lsl w10, w15, #RT_SVC_SIZE_LOG2
343 ldr x15, [x11, w10, uxtw]
344 .endm
345
346 /* ---------------------------------------------------------------------
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000347 * The following code handles secure monitor calls.
Douglas Raillard0980eed2016-11-09 17:48:27 +0000348 * Depending upon the execution state from where the SMC has been
349 * invoked, it frees some general purpose registers to perform the
350 * remaining tasks. They involve finding the runtime service handler
351 * that is the target of the SMC & switching to runtime stacks (SP_EL0)
352 * before calling the handler.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000353 *
Douglas Raillard0980eed2016-11-09 17:48:27 +0000354 * Note that x30 has been explicitly saved and can be used here
355 * ---------------------------------------------------------------------
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000356 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000357func smc_handler
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000358smc_handler32:
359 /* Check whether aarch32 issued an SMC64 */
360 tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited
361
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000362smc_handler64:
Douglas Raillard0980eed2016-11-09 17:48:27 +0000363 /*
364 * Populate the parameters for the SMC handler.
365 * We already have x0-x4 in place. x5 will point to a cookie (not used
366 * now). x6 will point to the context structure (SP_EL3) and x7 will
Dimitris Papastamos04159512018-01-22 11:53:04 +0000367 * contain flags we need to pass to the handler.
Douglas Raillard0980eed2016-11-09 17:48:27 +0000368 *
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100369 * Save x4-x29 and sp_el0.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000370 */
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +0000371 stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
372 stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
373 stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
374 stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
375 stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
376 stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
377 stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
378 stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
379 stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
380 stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
381 stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
382 stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
383 stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
384 mrs x18, sp_el0
385 str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
Soby Mathew6c5192a2014-04-30 15:36:37 +0100386
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000387 mov x5, xzr
388 mov x6, sp
389
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100390#if SMCCC_MAJOR_VERSION == 1
391
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000392 /* Get the unique owning entity number */
393 ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
394 ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
395 orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH
396
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100397 load_rt_svc_desc_pointer
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000398
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100399#elif SMCCC_MAJOR_VERSION == 2
400
401 /* Bit 31 must be set */
402 tbz x0, #FUNCID_TYPE_SHIFT, smc_unknown
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000403
Douglas Raillard0980eed2016-11-09 17:48:27 +0000404 /*
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100405 * Check MSB of namespace to decide between compatibility/vendor and
406 * SPCI/SPRT
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000407 */
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100408 tbz x0, #(FUNCID_NAMESPACE_SHIFT + 1), compat_or_vendor
409
410 /* Namespaces SPRT and SPCI currently unimplemented */
411 b smc_unknown
412
413compat_or_vendor:
414
415 /* Namespace is b'00 (compatibility) or b'01 (vendor) */
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000416
417 /*
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100418 * Add the LSB of the namespace (bit [28]) to the OEN [27:24] to create
419 * a 5-bit index into the rt_svc_descs_indices array.
420 *
421 * The low 16 entries of the rt_svc_descs_indices array correspond to
422 * OENs of the compatibility namespace and the top 16 entries of the
423 * array are assigned to the vendor namespace descriptor.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000424 */
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100425 ubfx x16, x0, #FUNCID_OEN_SHIFT, #(FUNCID_OEN_WIDTH + 1)
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000426
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100427 load_rt_svc_desc_pointer
428
429#endif /* SMCCC_MAJOR_VERSION */
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000430
Douglas Raillard0980eed2016-11-09 17:48:27 +0000431 /*
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100432 * Restore the saved C runtime stack value which will become the new
433 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
434 * structure prior to the last ERET from EL3.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000435 */
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100436 ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
437
438 /* Switch to SP_EL0 */
439 msr spsel, #0
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000440
Douglas Raillard0980eed2016-11-09 17:48:27 +0000441 /*
442 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world
443 * switch during SMC handling.
444 * TODO: Revisit if all system registers can be saved later.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000445 */
446 mrs x16, spsr_el3
447 mrs x17, elr_el3
448 mrs x18, scr_el3
449 stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
Achin Guptae1aa5162014-06-26 09:58:52 +0100450 str x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000451
452 /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
453 bfi x7, x18, #0, #1
454
455 mov sp, x12
456
Douglas Raillard0980eed2016-11-09 17:48:27 +0000457 /*
458 * Call the Secure Monitor Call handler and then drop directly into
459 * el3_exit() which will program any remaining architectural state
460 * prior to issuing the ERET to the desired lower EL.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000461 */
462#if DEBUG
463 cbz x15, rt_svc_fw_critical_error
464#endif
465 blr x15
466
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100467 b el3_exit
Achin Gupta4f6ad662013-10-25 09:08:21 +0100468
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000469smc_unknown:
470 /*
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +0000471 * Unknown SMC call. Populate return value with SMC_UNK, restore
472 * GP registers, and return to caller.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000473 */
Antonio Nino Diaze4794b72018-02-14 14:22:29 +0000474 mov x0, #SMC_UNK
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +0000475 str x0, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
476 b restore_gp_registers_eret
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000477
478smc_prohibited:
Soby Mathew6c5192a2014-04-30 15:36:37 +0100479 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
Antonio Nino Diaze4794b72018-02-14 14:22:29 +0000480 mov x0, #SMC_UNK
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000481 eret
482
483rt_svc_fw_critical_error:
Douglas Raillard0980eed2016-11-09 17:48:27 +0000484 /* Switch to SP_ELx */
485 msr spsel, #1
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000486 no_ret report_unhandled_exception
Kévin Petita877c252015-03-24 14:03:57 +0000487endfunc smc_handler
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +0000488
489/*
490 * Delegate External Abort handling to platform's EA handler. This function
491 * assumes that all GP registers have been saved by the caller.
492 *
493 * x0: EA reason
494 * x1: EA syndrome
495 */
496func delegate_ea
497 /* Save EL3 state */
498 mrs x2, spsr_el3
499 mrs x3, elr_el3
500 stp x2, x3, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
501
502 /*
503 * Save ESR as handling might involve lower ELs, and returning back to
504 * EL3 from there would trample the original ESR.
505 */
506 mrs x4, scr_el3
507 mrs x5, esr_el3
508 stp x4, x5, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
509
510 /*
511 * Setup rest of arguments, and call platform External Abort handler.
512 *
513 * x0: EA reason (already in place)
514 * x1: Exception syndrome (already in place).
515 * x2: Cookie (unused for now).
516 * x3: Context pointer.
517 * x4: Flags (security state from SCR for now).
518 */
519 mov x2, xzr
520 mov x3, sp
521 ubfx x4, x4, #0, #1
522
523 /* Switch to runtime stack */
524 ldr x5, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
525 msr spsel, #0
526 mov sp, x5
527
528 mov x29, x30
529 bl plat_ea_handler
530 mov x30, x29
531
532 /* Make SP point to context */
533 msr spsel, #1
534
535 /* Restore EL3 state */
536 ldp x1, x2, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
537 msr spsr_el3, x1
538 msr elr_el3, x2
539
540 /* Restore ESR_EL3 and SCR_EL3 */
541 ldp x3, x4, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
542 msr scr_el3, x3
543 msr esr_el3, x4
544
545 ret
546endfunc delegate_ea