blob: 346cd3b313a5e56fccdfbc7e6b46cd80ae50b9af [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dimitris Papastamos04159512018-01-22 11:53:04 +00002 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta4f6ad662013-10-25 09:08:21 +01005 */
6
7#include <arch.h>
Dan Handley714a0d22014-04-09 13:13:04 +01008#include <asm_macros.S>
Dan Handley2bd4ef22014-04-09 13:14:54 +01009#include <context.h>
dp-arm3cac7862016-09-19 11:18:44 +010010#include <cpu_data.h>
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +000011#include <ea_handle.h>
Achin Gupta9cf2bb72014-05-09 11:07:09 +010012#include <interrupt_mgmt.h>
Dan Handleyed6ff952014-05-14 17:44:19 +010013#include <platform_def.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010014#include <runtime_svc.h>
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +010015#include <smccc.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010016
17 .globl runtime_exceptions
Achin Gupta4f6ad662013-10-25 09:08:21 +010018
Dimitris Papastamos446f7f12017-11-30 14:53:53 +000019 .globl sync_exception_sp_el0
20 .globl irq_sp_el0
21 .globl fiq_sp_el0
22 .globl serror_sp_el0
23
24 .globl sync_exception_sp_elx
25 .globl irq_sp_elx
26 .globl fiq_sp_elx
27 .globl serror_sp_elx
28
29 .globl sync_exception_aarch64
30 .globl irq_aarch64
31 .globl fiq_aarch64
32 .globl serror_aarch64
33
34 .globl sync_exception_aarch32
35 .globl irq_aarch32
36 .globl fiq_aarch32
37 .globl serror_aarch32
38
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +000039 /*
Jeenu Viswambharan9a7ce2f2018-04-04 16:07:11 +010040 * Macro that prepares entry to EL3 upon taking an exception.
41 *
42 * With RAS_EXTENSION, this macro synchronizes pending errors with an ESB
43 * instruction. When an error is thus synchronized, the handling is
44 * delegated to platform EA handler.
45 *
46 * Without RAS_EXTENSION, this macro just saves x30, and unmasks
47 * Asynchronous External Aborts.
48 */
49 .macro check_and_unmask_ea
50#if RAS_EXTENSION
51 /* Synchronize pending External Aborts */
52 esb
53
54 /* Unmask the SError interrupt */
55 msr daifclr, #DAIF_ABT_BIT
56
57 /*
58 * Explicitly save x30 so as to free up a register and to enable
59 * branching
60 */
61 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
62
63 /* Check for SErrors synchronized by the ESB instruction */
64 mrs x30, DISR_EL1
65 tbz x30, #DISR_A_BIT, 1f
66
67 /* Save GP registers and restore them afterwards */
68 bl save_gp_registers
69 mov x0, #ERROR_EA_ESB
70 mrs x1, DISR_EL1
71 bl delegate_ea
72 bl restore_gp_registers
73
741:
75#else
76 /* Unmask the SError interrupt */
77 msr daifclr, #DAIF_ABT_BIT
78
79 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
80#endif
81 .endm
82
83 /*
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +000084 * Handle External Abort by delegating to the platform's EA handler.
85 * Once the platform handler returns, the macro exits EL3 and returns to
86 * where the abort was taken from.
87 *
88 * This macro assumes that x30 is available for use.
89 *
90 * 'abort_type' is a constant passed to the platform handler, indicating
91 * the cause of the External Abort.
92 */
93 .macro handle_ea abort_type
94 /* Save GP registers */
95 bl save_gp_registers
96
97 /* Setup exception class and syndrome arguments for platform handler */
98 mov x0, \abort_type
99 mrs x1, esr_el3
100 adr x30, el3_exit
101 b delegate_ea
102 .endm
103
Douglas Raillard0980eed2016-11-09 17:48:27 +0000104 /* ---------------------------------------------------------------------
105 * This macro handles Synchronous exceptions.
106 * Only SMC exceptions are supported.
107 * ---------------------------------------------------------------------
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100108 */
109 .macro handle_sync_exception
dp-arm3cac7862016-09-19 11:18:44 +0100110#if ENABLE_RUNTIME_INSTRUMENTATION
dp-arm3cac7862016-09-19 11:18:44 +0100111 /*
Douglas Raillard0980eed2016-11-09 17:48:27 +0000112 * Read the timestamp value and store it in per-cpu data. The value
113 * will be extracted from per-cpu data by the C level SMC handler and
114 * saved to the PMF timestamp region.
dp-arm3cac7862016-09-19 11:18:44 +0100115 */
116 mrs x30, cntpct_el0
117 str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
118 mrs x29, tpidr_el3
119 str x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
120 ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
121#endif
122
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100123 mrs x30, esr_el3
124 ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
125
Douglas Raillard0980eed2016-11-09 17:48:27 +0000126 /* Handle SMC exceptions separately from other synchronous exceptions */
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100127 cmp x30, #EC_AARCH32_SMC
128 b.eq smc_handler32
129
130 cmp x30, #EC_AARCH64_SMC
131 b.eq smc_handler64
132
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +0000133 /* Check for I/D aborts from lower EL */
134 cmp x30, #EC_IABORT_LOWER_EL
135 b.eq 1f
136
137 cmp x30, #EC_DABORT_LOWER_EL
138 b.ne 2f
139
1401:
141 /* Test for EA bit in the instruction syndrome */
142 mrs x30, esr_el3
143 tbz x30, #ESR_ISS_EABORT_EA_BIT, 2f
144 handle_ea #ERROR_EA_SYNC
145
1462:
Douglas Raillard0980eed2016-11-09 17:48:27 +0000147 /* Other kinds of synchronous exceptions are not handled */
Julius Werner67ebde72017-07-27 14:59:34 -0700148 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
149 b report_unhandled_exception
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100150 .endm
151
152
Douglas Raillard0980eed2016-11-09 17:48:27 +0000153 /* ---------------------------------------------------------------------
154 * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
155 * interrupts.
156 * ---------------------------------------------------------------------
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100157 */
158 .macro handle_interrupt_exception label
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100159 bl save_gp_registers
Douglas Raillard0980eed2016-11-09 17:48:27 +0000160 /* Save the EL3 system registers needed to return from this exception */
Achin Gupta979992e2015-05-13 17:57:18 +0100161 mrs x0, spsr_el3
162 mrs x1, elr_el3
163 stp x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
164
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100165 /* Switch to the runtime stack i.e. SP_EL0 */
166 ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
167 mov x20, sp
168 msr spsel, #0
169 mov sp, x2
170
171 /*
Douglas Raillard0980eed2016-11-09 17:48:27 +0000172 * Find out whether this is a valid interrupt type.
173 * If the interrupt controller reports a spurious interrupt then return
174 * to where we came from.
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100175 */
Dan Handley701fea72014-05-27 16:17:21 +0100176 bl plat_ic_get_pending_interrupt_type
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100177 cmp x0, #INTR_TYPE_INVAL
178 b.eq interrupt_exit_\label
179
180 /*
Douglas Raillard0980eed2016-11-09 17:48:27 +0000181 * Get the registered handler for this interrupt type.
182 * A NULL return value could be 'cause of the following conditions:
Achin Gupta979992e2015-05-13 17:57:18 +0100183 *
Douglas Raillard0980eed2016-11-09 17:48:27 +0000184 * a. An interrupt of a type was routed correctly but a handler for its
185 * type was not registered.
Achin Gupta979992e2015-05-13 17:57:18 +0100186 *
Douglas Raillard0980eed2016-11-09 17:48:27 +0000187 * b. An interrupt of a type was not routed correctly so a handler for
188 * its type was not registered.
Achin Gupta979992e2015-05-13 17:57:18 +0100189 *
Douglas Raillard0980eed2016-11-09 17:48:27 +0000190 * c. An interrupt of a type was routed correctly to EL3, but was
191 * deasserted before its pending state could be read. Another
192 * interrupt of a different type pended at the same time and its
193 * type was reported as pending instead. However, a handler for this
194 * type was not registered.
Achin Gupta979992e2015-05-13 17:57:18 +0100195 *
Douglas Raillard0980eed2016-11-09 17:48:27 +0000196 * a. and b. can only happen due to a programming error. The
197 * occurrence of c. could be beyond the control of Trusted Firmware.
198 * It makes sense to return from this exception instead of reporting an
199 * error.
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100200 */
201 bl get_interrupt_type_handler
Achin Gupta979992e2015-05-13 17:57:18 +0100202 cbz x0, interrupt_exit_\label
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100203 mov x21, x0
204
205 mov x0, #INTR_ID_UNAVAILABLE
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100206
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100207 /* Set the current security state in the 'flags' parameter */
208 mrs x2, scr_el3
209 ubfx x1, x2, #0, #1
210
211 /* Restore the reference to the 'handle' i.e. SP_EL3 */
212 mov x2, x20
213
Douglas Raillard0980eed2016-11-09 17:48:27 +0000214 /* x3 will point to a cookie (not used now) */
Soby Mathew799f0ab2014-05-27 16:54:31 +0100215 mov x3, xzr
216
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100217 /* Call the interrupt type handler */
218 blr x21
219
220interrupt_exit_\label:
221 /* Return from exception, possibly in a different security state */
222 b el3_exit
223
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100224 .endm
225
226
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100227vector_base runtime_exceptions
228
Douglas Raillard0980eed2016-11-09 17:48:27 +0000229 /* ---------------------------------------------------------------------
230 * Current EL with SP_EL0 : 0x0 - 0x200
231 * ---------------------------------------------------------------------
Achin Gupta4f6ad662013-10-25 09:08:21 +0100232 */
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100233vector_entry sync_exception_sp_el0
Douglas Raillard0980eed2016-11-09 17:48:27 +0000234 /* We don't expect any synchronous exceptions from EL3 */
Julius Werner67ebde72017-07-27 14:59:34 -0700235 b report_unhandled_exception
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000236 check_vector_size sync_exception_sp_el0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100237
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100238vector_entry irq_sp_el0
Douglas Raillard0980eed2016-11-09 17:48:27 +0000239 /*
240 * EL3 code is non-reentrant. Any asynchronous exception is a serious
241 * error. Loop infinitely.
242 */
Julius Werner67ebde72017-07-27 14:59:34 -0700243 b report_unhandled_interrupt
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000244 check_vector_size irq_sp_el0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100245
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100246
247vector_entry fiq_sp_el0
Julius Werner67ebde72017-07-27 14:59:34 -0700248 b report_unhandled_interrupt
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000249 check_vector_size fiq_sp_el0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100250
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100251
252vector_entry serror_sp_el0
Julius Werner67ebde72017-07-27 14:59:34 -0700253 b report_unhandled_exception
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000254 check_vector_size serror_sp_el0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100255
Douglas Raillard0980eed2016-11-09 17:48:27 +0000256 /* ---------------------------------------------------------------------
257 * Current EL with SP_ELx: 0x200 - 0x400
258 * ---------------------------------------------------------------------
Achin Gupta4f6ad662013-10-25 09:08:21 +0100259 */
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100260vector_entry sync_exception_sp_elx
Douglas Raillard0980eed2016-11-09 17:48:27 +0000261 /*
262 * This exception will trigger if anything went wrong during a previous
263 * exception entry or exit or while handling an earlier unexpected
264 * synchronous exception. There is a high probability that SP_EL3 is
265 * corrupted.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000266 */
Julius Werner67ebde72017-07-27 14:59:34 -0700267 b report_unhandled_exception
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000268 check_vector_size sync_exception_sp_elx
Achin Gupta4f6ad662013-10-25 09:08:21 +0100269
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100270vector_entry irq_sp_elx
Julius Werner67ebde72017-07-27 14:59:34 -0700271 b report_unhandled_interrupt
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000272 check_vector_size irq_sp_elx
273
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100274vector_entry fiq_sp_elx
Julius Werner67ebde72017-07-27 14:59:34 -0700275 b report_unhandled_interrupt
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000276 check_vector_size fiq_sp_elx
277
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100278vector_entry serror_sp_elx
Julius Werner67ebde72017-07-27 14:59:34 -0700279 b report_unhandled_exception
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000280 check_vector_size serror_sp_elx
Achin Gupta4f6ad662013-10-25 09:08:21 +0100281
Douglas Raillard0980eed2016-11-09 17:48:27 +0000282 /* ---------------------------------------------------------------------
Sandrine Bailleux046cd3f2014-08-06 11:27:23 +0100283 * Lower EL using AArch64 : 0x400 - 0x600
Douglas Raillard0980eed2016-11-09 17:48:27 +0000284 * ---------------------------------------------------------------------
Achin Gupta4f6ad662013-10-25 09:08:21 +0100285 */
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100286vector_entry sync_exception_aarch64
Douglas Raillard0980eed2016-11-09 17:48:27 +0000287 /*
288 * This exception vector will be the entry point for SMCs and traps
289 * that are unhandled at lower ELs most commonly. SP_EL3 should point
290 * to a valid cpu context where the general purpose and system register
291 * state can be saved.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000292 */
Jeenu Viswambharan9a7ce2f2018-04-04 16:07:11 +0100293 check_and_unmask_ea
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000294 handle_sync_exception
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000295 check_vector_size sync_exception_aarch64
Achin Gupta4f6ad662013-10-25 09:08:21 +0100296
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100297vector_entry irq_aarch64
Jeenu Viswambharan9a7ce2f2018-04-04 16:07:11 +0100298 check_and_unmask_ea
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100299 handle_interrupt_exception irq_aarch64
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000300 check_vector_size irq_aarch64
Achin Gupta4f6ad662013-10-25 09:08:21 +0100301
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100302vector_entry fiq_aarch64
Jeenu Viswambharan9a7ce2f2018-04-04 16:07:11 +0100303 check_and_unmask_ea
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100304 handle_interrupt_exception fiq_aarch64
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000305 check_vector_size fiq_aarch64
Achin Gupta4f6ad662013-10-25 09:08:21 +0100306
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100307vector_entry serror_aarch64
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +0000308 msr daifclr, #DAIF_ABT_BIT
309
Douglas Raillard0980eed2016-11-09 17:48:27 +0000310 /*
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +0000311 * Explicitly save x30 so as to free up a register and to enable
312 * branching
Douglas Raillard0980eed2016-11-09 17:48:27 +0000313 */
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +0000314 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
315 handle_ea #ERROR_EA_ASYNC
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000316 check_vector_size serror_aarch64
Achin Gupta4f6ad662013-10-25 09:08:21 +0100317
Douglas Raillard0980eed2016-11-09 17:48:27 +0000318 /* ---------------------------------------------------------------------
Sandrine Bailleux046cd3f2014-08-06 11:27:23 +0100319 * Lower EL using AArch32 : 0x600 - 0x800
Douglas Raillard0980eed2016-11-09 17:48:27 +0000320 * ---------------------------------------------------------------------
Achin Gupta4f6ad662013-10-25 09:08:21 +0100321 */
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100322vector_entry sync_exception_aarch32
Douglas Raillard0980eed2016-11-09 17:48:27 +0000323 /*
324 * This exception vector will be the entry point for SMCs and traps
325 * that are unhandled at lower ELs most commonly. SP_EL3 should point
326 * to a valid cpu context where the general purpose and system register
327 * state can be saved.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000328 */
Jeenu Viswambharan9a7ce2f2018-04-04 16:07:11 +0100329 check_and_unmask_ea
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000330 handle_sync_exception
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000331 check_vector_size sync_exception_aarch32
Achin Gupta4f6ad662013-10-25 09:08:21 +0100332
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100333vector_entry irq_aarch32
Jeenu Viswambharan9a7ce2f2018-04-04 16:07:11 +0100334 check_and_unmask_ea
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100335 handle_interrupt_exception irq_aarch32
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000336 check_vector_size irq_aarch32
Achin Gupta4f6ad662013-10-25 09:08:21 +0100337
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100338vector_entry fiq_aarch32
Jeenu Viswambharan9a7ce2f2018-04-04 16:07:11 +0100339 check_and_unmask_ea
Achin Gupta9cf2bb72014-05-09 11:07:09 +0100340 handle_interrupt_exception fiq_aarch32
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000341 check_vector_size fiq_aarch32
Achin Gupta4f6ad662013-10-25 09:08:21 +0100342
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +0100343vector_entry serror_aarch32
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +0000344 msr daifclr, #DAIF_ABT_BIT
345
Douglas Raillard0980eed2016-11-09 17:48:27 +0000346 /*
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +0000347 * Explicitly save x30 so as to free up a register and to enable
348 * branching
Douglas Raillard0980eed2016-11-09 17:48:27 +0000349 */
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +0000350 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
351 handle_ea #ERROR_EA_ASYNC
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000352 check_vector_size serror_aarch32
353
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000354
Douglas Raillard0980eed2016-11-09 17:48:27 +0000355 /* ---------------------------------------------------------------------
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100356 * This macro takes an argument in x16 that is the index in the
357 * 'rt_svc_descs_indices' array, checks that the value in the array is
358 * valid, and loads in x15 the pointer to the handler of that service.
359 * ---------------------------------------------------------------------
360 */
361 .macro load_rt_svc_desc_pointer
362 /* Load descriptor index from array of indices */
363 adr x14, rt_svc_descs_indices
364 ldrb w15, [x14, x16]
365
366#if SMCCC_MAJOR_VERSION == 1
367 /* Any index greater than 127 is invalid. Check bit 7. */
368 tbnz w15, 7, smc_unknown
369#elif SMCCC_MAJOR_VERSION == 2
370 /* Verify that the top 3 bits of the loaded index are 0 (w15 <= 31) */
371 cmp w15, #31
372 b.hi smc_unknown
373#endif /* SMCCC_MAJOR_VERSION */
374
375 /*
376 * Get the descriptor using the index
377 * x11 = (base + off), w15 = index
378 *
379 * handler = (base + off) + (index << log2(size))
380 */
381 adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
382 lsl w10, w15, #RT_SVC_SIZE_LOG2
383 ldr x15, [x11, w10, uxtw]
384 .endm
385
386 /* ---------------------------------------------------------------------
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000387 * The following code handles secure monitor calls.
Douglas Raillard0980eed2016-11-09 17:48:27 +0000388 * Depending upon the execution state from where the SMC has been
389 * invoked, it frees some general purpose registers to perform the
390 * remaining tasks. They involve finding the runtime service handler
391 * that is the target of the SMC & switching to runtime stacks (SP_EL0)
392 * before calling the handler.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000393 *
Douglas Raillard0980eed2016-11-09 17:48:27 +0000394 * Note that x30 has been explicitly saved and can be used here
395 * ---------------------------------------------------------------------
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000396 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000397func smc_handler
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000398smc_handler32:
399 /* Check whether aarch32 issued an SMC64 */
400 tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited
401
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000402smc_handler64:
Douglas Raillard0980eed2016-11-09 17:48:27 +0000403 /*
404 * Populate the parameters for the SMC handler.
405 * We already have x0-x4 in place. x5 will point to a cookie (not used
406 * now). x6 will point to the context structure (SP_EL3) and x7 will
Dimitris Papastamos04159512018-01-22 11:53:04 +0000407 * contain flags we need to pass to the handler.
Douglas Raillard0980eed2016-11-09 17:48:27 +0000408 *
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100409 * Save x4-x29 and sp_el0.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000410 */
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +0000411 stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
412 stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
413 stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
414 stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
415 stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
416 stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
417 stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
418 stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
419 stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
420 stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
421 stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
422 stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
423 stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
424 mrs x18, sp_el0
425 str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
Soby Mathew6c5192a2014-04-30 15:36:37 +0100426
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000427 mov x5, xzr
428 mov x6, sp
429
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100430#if SMCCC_MAJOR_VERSION == 1
431
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000432 /* Get the unique owning entity number */
433 ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
434 ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
435 orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH
436
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100437 load_rt_svc_desc_pointer
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000438
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100439#elif SMCCC_MAJOR_VERSION == 2
440
441 /* Bit 31 must be set */
442 tbz x0, #FUNCID_TYPE_SHIFT, smc_unknown
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000443
Douglas Raillard0980eed2016-11-09 17:48:27 +0000444 /*
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100445 * Check MSB of namespace to decide between compatibility/vendor and
446 * SPCI/SPRT
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000447 */
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100448 tbz x0, #(FUNCID_NAMESPACE_SHIFT + 1), compat_or_vendor
449
450 /* Namespaces SPRT and SPCI currently unimplemented */
451 b smc_unknown
452
453compat_or_vendor:
454
455 /* Namespace is b'00 (compatibility) or b'01 (vendor) */
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000456
457 /*
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100458 * Add the LSB of the namespace (bit [28]) to the OEN [27:24] to create
459 * a 5-bit index into the rt_svc_descs_indices array.
460 *
461 * The low 16 entries of the rt_svc_descs_indices array correspond to
462 * OENs of the compatibility namespace and the top 16 entries of the
463 * array are assigned to the vendor namespace descriptor.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000464 */
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100465 ubfx x16, x0, #FUNCID_OEN_SHIFT, #(FUNCID_OEN_WIDTH + 1)
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000466
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100467 load_rt_svc_desc_pointer
468
469#endif /* SMCCC_MAJOR_VERSION */
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000470
Douglas Raillard0980eed2016-11-09 17:48:27 +0000471 /*
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100472 * Restore the saved C runtime stack value which will become the new
473 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
474 * structure prior to the last ERET from EL3.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000475 */
Antonio Nino Diaz35c8cfc2018-04-23 15:43:29 +0100476 ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
477
478 /* Switch to SP_EL0 */
479 msr spsel, #0
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000480
Douglas Raillard0980eed2016-11-09 17:48:27 +0000481 /*
482 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world
483 * switch during SMC handling.
484 * TODO: Revisit if all system registers can be saved later.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000485 */
486 mrs x16, spsr_el3
487 mrs x17, elr_el3
488 mrs x18, scr_el3
489 stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
Achin Guptae1aa5162014-06-26 09:58:52 +0100490 str x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000491
492 /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
493 bfi x7, x18, #0, #1
494
495 mov sp, x12
496
Douglas Raillard0980eed2016-11-09 17:48:27 +0000497 /*
498 * Call the Secure Monitor Call handler and then drop directly into
499 * el3_exit() which will program any remaining architectural state
500 * prior to issuing the ERET to the desired lower EL.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000501 */
502#if DEBUG
503 cbz x15, rt_svc_fw_critical_error
504#endif
505 blr x15
506
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100507 b el3_exit
Achin Gupta4f6ad662013-10-25 09:08:21 +0100508
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000509smc_unknown:
510 /*
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +0000511 * Unknown SMC call. Populate return value with SMC_UNK, restore
512 * GP registers, and return to caller.
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000513 */
Antonio Nino Diaze4794b72018-02-14 14:22:29 +0000514 mov x0, #SMC_UNK
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +0000515 str x0, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
516 b restore_gp_registers_eret
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000517
518smc_prohibited:
Soby Mathew6c5192a2014-04-30 15:36:37 +0100519 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
Antonio Nino Diaze4794b72018-02-14 14:22:29 +0000520 mov x0, #SMC_UNK
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000521 eret
522
523rt_svc_fw_critical_error:
Douglas Raillard0980eed2016-11-09 17:48:27 +0000524 /* Switch to SP_ELx */
525 msr spsel, #1
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000526 no_ret report_unhandled_exception
Kévin Petita877c252015-03-24 14:03:57 +0000527endfunc smc_handler
Jeenu Viswambharan96c7df02017-11-30 12:54:15 +0000528
529/*
530 * Delegate External Abort handling to platform's EA handler. This function
531 * assumes that all GP registers have been saved by the caller.
532 *
533 * x0: EA reason
534 * x1: EA syndrome
535 */
536func delegate_ea
537 /* Save EL3 state */
538 mrs x2, spsr_el3
539 mrs x3, elr_el3
540 stp x2, x3, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
541
542 /*
543 * Save ESR as handling might involve lower ELs, and returning back to
544 * EL3 from there would trample the original ESR.
545 */
546 mrs x4, scr_el3
547 mrs x5, esr_el3
548 stp x4, x5, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
549
550 /*
551 * Setup rest of arguments, and call platform External Abort handler.
552 *
553 * x0: EA reason (already in place)
554 * x1: Exception syndrome (already in place).
555 * x2: Cookie (unused for now).
556 * x3: Context pointer.
557 * x4: Flags (security state from SCR for now).
558 */
559 mov x2, xzr
560 mov x3, sp
561 ubfx x4, x4, #0, #1
562
563 /* Switch to runtime stack */
564 ldr x5, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
565 msr spsel, #0
566 mov sp, x5
567
568 mov x29, x30
569 bl plat_ea_handler
570 mov x30, x29
571
572 /* Make SP point to context */
573 msr spsel, #1
574
575 /* Restore EL3 state */
576 ldp x1, x2, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
577 msr spsr_el3, x1
578 msr elr_el3, x2
579
580 /* Restore ESR_EL3 and SCR_EL3 */
581 ldp x3, x4, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
582 msr scr_el3, x3
583 msr esr_el3, x4
584
585 ret
586endfunc delegate_ea