blob: 11014252066965bfc21d7a491124464f781d7d4b [file] [log] [blame]
Achin Gupta9ac63c52014-01-16 12:08:03 +00001/*
Antonio Nino Diaz13adfb12019-01-30 20:41:31 +00002 * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
Achin Gupta9ac63c52014-01-16 12:08:03 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta9ac63c52014-01-16 12:08:03 +00005 */
6
Dan Handley2bd4ef22014-04-09 13:14:54 +01007#include <arch.h>
Andrew Thoelke38bde412014-03-18 13:46:55 +00008#include <asm_macros.S>
Dan Handley2bd4ef22014-04-09 13:14:54 +01009#include <context.h>
Achin Gupta9ac63c52014-01-16 12:08:03 +000010
Yatharth Kochar6c0566c2015-10-02 17:56:48 +010011 .global el1_sysregs_context_save
12 .global el1_sysregs_context_restore
13#if CTX_INCLUDE_FPREGS
14 .global fpregs_context_save
15 .global fpregs_context_restore
16#endif
Alexei Fedorovf41355c2019-09-13 14:11:59 +010017 .global save_gp_pmcr_pauth_regs
18 .global restore_gp_pmcr_pauth_regs
Yatharth Kochar6c0566c2015-10-02 17:56:48 +010019 .global el3_exit
20
Alexei Fedorovf41355c2019-09-13 14:11:59 +010021/* ------------------------------------------------------------------
22 * The following function strictly follows the AArch64 PCS to use
23 * x9-x17 (temporary caller-saved registers) to save EL1 system
24 * register context. It assumes that 'x0' is pointing to a
25 * 'el1_sys_regs' structure where the register context will be saved.
26 * ------------------------------------------------------------------
Achin Gupta9ac63c52014-01-16 12:08:03 +000027 */
Andrew Thoelke38bde412014-03-18 13:46:55 +000028func el1_sysregs_context_save
Achin Gupta9ac63c52014-01-16 12:08:03 +000029
30 mrs x9, spsr_el1
31 mrs x10, elr_el1
32 stp x9, x10, [x0, #CTX_SPSR_EL1]
33
Achin Gupta9ac63c52014-01-16 12:08:03 +000034 mrs x15, sctlr_el1
35 mrs x16, actlr_el1
36 stp x15, x16, [x0, #CTX_SCTLR_EL1]
37
38 mrs x17, cpacr_el1
39 mrs x9, csselr_el1
40 stp x17, x9, [x0, #CTX_CPACR_EL1]
41
42 mrs x10, sp_el1
43 mrs x11, esr_el1
44 stp x10, x11, [x0, #CTX_SP_EL1]
45
46 mrs x12, ttbr0_el1
47 mrs x13, ttbr1_el1
48 stp x12, x13, [x0, #CTX_TTBR0_EL1]
49
50 mrs x14, mair_el1
51 mrs x15, amair_el1
52 stp x14, x15, [x0, #CTX_MAIR_EL1]
53
54 mrs x16, tcr_el1
55 mrs x17, tpidr_el1
56 stp x16, x17, [x0, #CTX_TCR_EL1]
57
58 mrs x9, tpidr_el0
59 mrs x10, tpidrro_el0
60 stp x9, x10, [x0, #CTX_TPIDR_EL0]
61
Achin Gupta9ac63c52014-01-16 12:08:03 +000062 mrs x13, par_el1
63 mrs x14, far_el1
64 stp x13, x14, [x0, #CTX_PAR_EL1]
65
66 mrs x15, afsr0_el1
67 mrs x16, afsr1_el1
68 stp x15, x16, [x0, #CTX_AFSR0_EL1]
69
70 mrs x17, contextidr_el1
71 mrs x9, vbar_el1
72 stp x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
73
Soby Mathewd75d2ba2016-05-17 14:01:32 +010074 /* Save AArch32 system registers if the build has instructed so */
75#if CTX_INCLUDE_AARCH32_REGS
76 mrs x11, spsr_abt
77 mrs x12, spsr_und
78 stp x11, x12, [x0, #CTX_SPSR_ABT]
79
80 mrs x13, spsr_irq
81 mrs x14, spsr_fiq
82 stp x13, x14, [x0, #CTX_SPSR_IRQ]
83
84 mrs x15, dacr32_el2
85 mrs x16, ifsr32_el2
86 stp x15, x16, [x0, #CTX_DACR32_EL2]
Soby Mathewd75d2ba2016-05-17 14:01:32 +010087#endif
88
Jeenu Viswambharand1b60152014-05-12 15:28:47 +010089 /* Save NS timer registers if the build has instructed so */
90#if NS_TIMER_SWITCH
Achin Gupta9ac63c52014-01-16 12:08:03 +000091 mrs x10, cntp_ctl_el0
92 mrs x11, cntp_cval_el0
93 stp x10, x11, [x0, #CTX_CNTP_CTL_EL0]
94
95 mrs x12, cntv_ctl_el0
96 mrs x13, cntv_cval_el0
97 stp x12, x13, [x0, #CTX_CNTV_CTL_EL0]
98
99 mrs x14, cntkctl_el1
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100100 str x14, [x0, #CTX_CNTKCTL_EL1]
101#endif
102
Justin Chadwell1c7c13a2019-07-18 14:25:33 +0100103 /* Save MTE system registers if the build has instructed so */
104#if CTX_INCLUDE_MTE_REGS
105 mrs x15, TFSRE0_EL1
106 mrs x16, TFSR_EL1
107 stp x15, x16, [x0, #CTX_TFSRE0_EL1]
108
109 mrs x9, RGSR_EL1
110 mrs x10, GCR_EL1
111 stp x9, x10, [x0, #CTX_RGSR_EL1]
112#endif
113
Achin Gupta9ac63c52014-01-16 12:08:03 +0000114 ret
Kévin Petita877c252015-03-24 14:03:57 +0000115endfunc el1_sysregs_context_save
Achin Gupta9ac63c52014-01-16 12:08:03 +0000116
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100117/* ------------------------------------------------------------------
118 * The following function strictly follows the AArch64 PCS to use
119 * x9-x17 (temporary caller-saved registers) to restore EL1 system
120 * register context. It assumes that 'x0' is pointing to a
121 * 'el1_sys_regs' structure from where the register context will be
122 * restored
123 * ------------------------------------------------------------------
Achin Gupta9ac63c52014-01-16 12:08:03 +0000124 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000125func el1_sysregs_context_restore
Achin Gupta9ac63c52014-01-16 12:08:03 +0000126
127 ldp x9, x10, [x0, #CTX_SPSR_EL1]
128 msr spsr_el1, x9
129 msr elr_el1, x10
130
Achin Gupta9ac63c52014-01-16 12:08:03 +0000131 ldp x15, x16, [x0, #CTX_SCTLR_EL1]
132 msr sctlr_el1, x15
133 msr actlr_el1, x16
134
135 ldp x17, x9, [x0, #CTX_CPACR_EL1]
136 msr cpacr_el1, x17
137 msr csselr_el1, x9
138
139 ldp x10, x11, [x0, #CTX_SP_EL1]
140 msr sp_el1, x10
141 msr esr_el1, x11
142
143 ldp x12, x13, [x0, #CTX_TTBR0_EL1]
144 msr ttbr0_el1, x12
145 msr ttbr1_el1, x13
146
147 ldp x14, x15, [x0, #CTX_MAIR_EL1]
148 msr mair_el1, x14
149 msr amair_el1, x15
150
151 ldp x16, x17, [x0, #CTX_TCR_EL1]
152 msr tcr_el1, x16
153 msr tpidr_el1, x17
154
155 ldp x9, x10, [x0, #CTX_TPIDR_EL0]
156 msr tpidr_el0, x9
157 msr tpidrro_el0, x10
158
Achin Gupta9ac63c52014-01-16 12:08:03 +0000159 ldp x13, x14, [x0, #CTX_PAR_EL1]
160 msr par_el1, x13
161 msr far_el1, x14
162
163 ldp x15, x16, [x0, #CTX_AFSR0_EL1]
164 msr afsr0_el1, x15
165 msr afsr1_el1, x16
166
167 ldp x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
168 msr contextidr_el1, x17
169 msr vbar_el1, x9
170
Soby Mathewd75d2ba2016-05-17 14:01:32 +0100171 /* Restore AArch32 system registers if the build has instructed so */
172#if CTX_INCLUDE_AARCH32_REGS
173 ldp x11, x12, [x0, #CTX_SPSR_ABT]
174 msr spsr_abt, x11
175 msr spsr_und, x12
176
177 ldp x13, x14, [x0, #CTX_SPSR_IRQ]
178 msr spsr_irq, x13
179 msr spsr_fiq, x14
180
181 ldp x15, x16, [x0, #CTX_DACR32_EL2]
182 msr dacr32_el2, x15
183 msr ifsr32_el2, x16
Soby Mathewd75d2ba2016-05-17 14:01:32 +0100184#endif
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100185 /* Restore NS timer registers if the build has instructed so */
186#if NS_TIMER_SWITCH
Achin Gupta9ac63c52014-01-16 12:08:03 +0000187 ldp x10, x11, [x0, #CTX_CNTP_CTL_EL0]
188 msr cntp_ctl_el0, x10
189 msr cntp_cval_el0, x11
190
191 ldp x12, x13, [x0, #CTX_CNTV_CTL_EL0]
192 msr cntv_ctl_el0, x12
193 msr cntv_cval_el0, x13
194
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100195 ldr x14, [x0, #CTX_CNTKCTL_EL1]
Achin Gupta9ac63c52014-01-16 12:08:03 +0000196 msr cntkctl_el1, x14
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100197#endif
Justin Chadwell1c7c13a2019-07-18 14:25:33 +0100198 /* Restore MTE system registers if the build has instructed so */
199#if CTX_INCLUDE_MTE_REGS
200 ldp x11, x12, [x0, #CTX_TFSRE0_EL1]
201 msr TFSRE0_EL1, x11
202 msr TFSR_EL1, x12
203
204 ldp x13, x14, [x0, #CTX_RGSR_EL1]
205 msr RGSR_EL1, x13
206 msr GCR_EL1, x14
207#endif
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100208
Achin Gupta9ac63c52014-01-16 12:08:03 +0000209 /* No explict ISB required here as ERET covers it */
Achin Gupta9ac63c52014-01-16 12:08:03 +0000210 ret
Kévin Petita877c252015-03-24 14:03:57 +0000211endfunc el1_sysregs_context_restore
Achin Gupta9ac63c52014-01-16 12:08:03 +0000212
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100213/* ------------------------------------------------------------------
214 * The following function follows the aapcs_64 strictly to use
215 * x9-x17 (temporary caller-saved registers according to AArch64 PCS)
216 * to save floating point register context. It assumes that 'x0' is
217 * pointing to a 'fp_regs' structure where the register context will
Achin Gupta9ac63c52014-01-16 12:08:03 +0000218 * be saved.
219 *
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100220 * Access to VFP registers will trap if CPTR_EL3.TFP is set.
221 * However currently we don't use VFP registers nor set traps in
222 * Trusted Firmware, and assume it's cleared.
Achin Gupta9ac63c52014-01-16 12:08:03 +0000223 *
224 * TODO: Revisit when VFP is used in secure world
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100225 * ------------------------------------------------------------------
Achin Gupta9ac63c52014-01-16 12:08:03 +0000226 */
Juan Castillo258e94f2014-06-25 17:26:36 +0100227#if CTX_INCLUDE_FPREGS
Andrew Thoelke38bde412014-03-18 13:46:55 +0000228func fpregs_context_save
Achin Gupta9ac63c52014-01-16 12:08:03 +0000229 stp q0, q1, [x0, #CTX_FP_Q0]
230 stp q2, q3, [x0, #CTX_FP_Q2]
231 stp q4, q5, [x0, #CTX_FP_Q4]
232 stp q6, q7, [x0, #CTX_FP_Q6]
233 stp q8, q9, [x0, #CTX_FP_Q8]
234 stp q10, q11, [x0, #CTX_FP_Q10]
235 stp q12, q13, [x0, #CTX_FP_Q12]
236 stp q14, q15, [x0, #CTX_FP_Q14]
237 stp q16, q17, [x0, #CTX_FP_Q16]
238 stp q18, q19, [x0, #CTX_FP_Q18]
239 stp q20, q21, [x0, #CTX_FP_Q20]
240 stp q22, q23, [x0, #CTX_FP_Q22]
241 stp q24, q25, [x0, #CTX_FP_Q24]
242 stp q26, q27, [x0, #CTX_FP_Q26]
243 stp q28, q29, [x0, #CTX_FP_Q28]
244 stp q30, q31, [x0, #CTX_FP_Q30]
245
246 mrs x9, fpsr
247 str x9, [x0, #CTX_FP_FPSR]
248
249 mrs x10, fpcr
250 str x10, [x0, #CTX_FP_FPCR]
251
David Cunadod1a1fd42017-10-20 11:30:57 +0100252#if CTX_INCLUDE_AARCH32_REGS
253 mrs x11, fpexc32_el2
254 str x11, [x0, #CTX_FP_FPEXC32_EL2]
255#endif
Achin Gupta9ac63c52014-01-16 12:08:03 +0000256 ret
Kévin Petita877c252015-03-24 14:03:57 +0000257endfunc fpregs_context_save
Achin Gupta9ac63c52014-01-16 12:08:03 +0000258
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100259/* ------------------------------------------------------------------
260 * The following function follows the aapcs_64 strictly to use x9-x17
261 * (temporary caller-saved registers according to AArch64 PCS) to
262 * restore floating point register context. It assumes that 'x0' is
263 * pointing to a 'fp_regs' structure from where the register context
Achin Gupta9ac63c52014-01-16 12:08:03 +0000264 * will be restored.
265 *
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100266 * Access to VFP registers will trap if CPTR_EL3.TFP is set.
267 * However currently we don't use VFP registers nor set traps in
268 * Trusted Firmware, and assume it's cleared.
Achin Gupta9ac63c52014-01-16 12:08:03 +0000269 *
270 * TODO: Revisit when VFP is used in secure world
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100271 * ------------------------------------------------------------------
Achin Gupta9ac63c52014-01-16 12:08:03 +0000272 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000273func fpregs_context_restore
Achin Gupta9ac63c52014-01-16 12:08:03 +0000274 ldp q0, q1, [x0, #CTX_FP_Q0]
275 ldp q2, q3, [x0, #CTX_FP_Q2]
276 ldp q4, q5, [x0, #CTX_FP_Q4]
277 ldp q6, q7, [x0, #CTX_FP_Q6]
278 ldp q8, q9, [x0, #CTX_FP_Q8]
279 ldp q10, q11, [x0, #CTX_FP_Q10]
280 ldp q12, q13, [x0, #CTX_FP_Q12]
281 ldp q14, q15, [x0, #CTX_FP_Q14]
282 ldp q16, q17, [x0, #CTX_FP_Q16]
283 ldp q18, q19, [x0, #CTX_FP_Q18]
284 ldp q20, q21, [x0, #CTX_FP_Q20]
285 ldp q22, q23, [x0, #CTX_FP_Q22]
286 ldp q24, q25, [x0, #CTX_FP_Q24]
287 ldp q26, q27, [x0, #CTX_FP_Q26]
288 ldp q28, q29, [x0, #CTX_FP_Q28]
289 ldp q30, q31, [x0, #CTX_FP_Q30]
290
291 ldr x9, [x0, #CTX_FP_FPSR]
292 msr fpsr, x9
293
Soby Mathewe77e1162015-12-03 09:42:50 +0000294 ldr x10, [x0, #CTX_FP_FPCR]
Achin Gupta9ac63c52014-01-16 12:08:03 +0000295 msr fpcr, x10
296
David Cunadod1a1fd42017-10-20 11:30:57 +0100297#if CTX_INCLUDE_AARCH32_REGS
298 ldr x11, [x0, #CTX_FP_FPEXC32_EL2]
299 msr fpexc32_el2, x11
300#endif
Achin Gupta9ac63c52014-01-16 12:08:03 +0000301 /*
302 * No explict ISB required here as ERET to
Sandrine Bailleuxf4119ec2015-12-17 13:58:58 +0000303 * switch to secure EL1 or non-secure world
Achin Gupta9ac63c52014-01-16 12:08:03 +0000304 * covers it
305 */
306
307 ret
Kévin Petita877c252015-03-24 14:03:57 +0000308endfunc fpregs_context_restore
Juan Castillo258e94f2014-06-25 17:26:36 +0100309#endif /* CTX_INCLUDE_FPREGS */
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100310
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100311/* ------------------------------------------------------------------
312 * The following function is used to save and restore all the general
313 * purpose and ARMv8.3-PAuth (if enabled) registers.
314 * It also checks if Secure Cycle Counter is not disabled in MDCR_EL3
315 * when ARMv8.5-PMU is implemented, and if called from Non-secure
316 * state saves PMCR_EL0 and disables Cycle Counter.
317 *
318 * Ideally we would only save and restore the callee saved registers
319 * when a world switch occurs but that type of implementation is more
320 * complex. So currently we will always save and restore these
321 * registers on entry and exit of EL3.
322 * These are not macros to ensure their invocation fits within the 32
323 * instructions per exception vector.
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100324 * clobbers: x18
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100325 * ------------------------------------------------------------------
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100326 */
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100327func save_gp_pmcr_pauth_regs
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100328 stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
329 stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
330 stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
331 stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
332 stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
333 stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
334 stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
335 stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
336 stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
337 stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
338 stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
339 stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
340 stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
341 stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
342 stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
343 mrs x18, sp_el0
344 str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100345
346 /* ----------------------------------------------------------
347 * Check if earlier initialization MDCR_EL3.SCCD to 1 failed,
348 * meaning that ARMv8-PMU is not implemented and PMCR_EL0
349 * should be saved in non-secure context.
350 * ----------------------------------------------------------
351 */
352 mrs x9, mdcr_el3
353 tst x9, #MDCR_SCCD_BIT
354 bne 1f
355
356 /* Secure Cycle Counter is not disabled */
357 mrs x9, pmcr_el0
358
359 /* Check caller's security state */
360 mrs x10, scr_el3
361 tst x10, #SCR_NS_BIT
362 beq 2f
363
364 /* Save PMCR_EL0 if called from Non-secure state */
365 str x9, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
366
367 /* Disable cycle counter when event counting is prohibited */
3682: orr x9, x9, #PMCR_EL0_DP_BIT
369 msr pmcr_el0, x9
370 isb
3711:
372#if CTX_INCLUDE_PAUTH_REGS
373 /* ----------------------------------------------------------
374 * Save the ARMv8.3-PAuth keys as they are not banked
375 * by exception level
376 * ----------------------------------------------------------
377 */
378 add x19, sp, #CTX_PAUTH_REGS_OFFSET
379
380 mrs x20, APIAKeyLo_EL1 /* x21:x20 = APIAKey */
381 mrs x21, APIAKeyHi_EL1
382 mrs x22, APIBKeyLo_EL1 /* x23:x22 = APIBKey */
383 mrs x23, APIBKeyHi_EL1
384 mrs x24, APDAKeyLo_EL1 /* x25:x24 = APDAKey */
385 mrs x25, APDAKeyHi_EL1
386 mrs x26, APDBKeyLo_EL1 /* x27:x26 = APDBKey */
387 mrs x27, APDBKeyHi_EL1
388 mrs x28, APGAKeyLo_EL1 /* x29:x28 = APGAKey */
389 mrs x29, APGAKeyHi_EL1
390
391 stp x20, x21, [x19, #CTX_PACIAKEY_LO]
392 stp x22, x23, [x19, #CTX_PACIBKEY_LO]
393 stp x24, x25, [x19, #CTX_PACDAKEY_LO]
394 stp x26, x27, [x19, #CTX_PACDBKEY_LO]
395 stp x28, x29, [x19, #CTX_PACGAKEY_LO]
396#endif /* CTX_INCLUDE_PAUTH_REGS */
397
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100398 ret
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100399endfunc save_gp_pmcr_pauth_regs
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100400
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100401/* ------------------------------------------------------------------
402 * This function restores ARMv8.3-PAuth (if enabled) and all general
403 * purpose registers except x30 from the CPU context.
404 * x30 register must be explicitly restored by the caller.
405 * ------------------------------------------------------------------
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +0000406 */
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100407func restore_gp_pmcr_pauth_regs
408#if CTX_INCLUDE_PAUTH_REGS
409 /* Restore the ARMv8.3 PAuth keys */
410 add x10, sp, #CTX_PAUTH_REGS_OFFSET
411
412 ldp x0, x1, [x10, #CTX_PACIAKEY_LO] /* x1:x0 = APIAKey */
413 ldp x2, x3, [x10, #CTX_PACIBKEY_LO] /* x3:x2 = APIBKey */
414 ldp x4, x5, [x10, #CTX_PACDAKEY_LO] /* x5:x4 = APDAKey */
415 ldp x6, x7, [x10, #CTX_PACDBKEY_LO] /* x7:x6 = APDBKey */
416 ldp x8, x9, [x10, #CTX_PACGAKEY_LO] /* x9:x8 = APGAKey */
417
418 msr APIAKeyLo_EL1, x0
419 msr APIAKeyHi_EL1, x1
420 msr APIBKeyLo_EL1, x2
421 msr APIBKeyHi_EL1, x3
422 msr APDAKeyLo_EL1, x4
423 msr APDAKeyHi_EL1, x5
424 msr APDBKeyLo_EL1, x6
425 msr APDBKeyHi_EL1, x7
426 msr APGAKeyLo_EL1, x8
427 msr APGAKeyHi_EL1, x9
428#endif /* CTX_INCLUDE_PAUTH_REGS */
429
430 /* ----------------------------------------------------------
431 * Restore PMCR_EL0 when returning to Non-secure state if
432 * Secure Cycle Counter is not disabled in MDCR_EL3 when
433 * ARMv8.5-PMU is implemented.
434 * ----------------------------------------------------------
435 */
436 mrs x0, scr_el3
437 tst x0, #SCR_NS_BIT
438 beq 2f
439
440 /* ----------------------------------------------------------
441 * Back to Non-secure state.
442 * Check if earlier initialization MDCR_EL3.SCCD to 1 failed,
443 * meaning that ARMv8-PMU is not implemented and PMCR_EL0
444 * should be restored from non-secure context.
445 * ----------------------------------------------------------
446 */
447 mrs x0, mdcr_el3
448 tst x0, #MDCR_SCCD_BIT
449 bne 2f
450 ldr x0, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
451 msr pmcr_el0, x0
4522:
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100453 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
454 ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100455 ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
456 ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
457 ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
458 ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
459 ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
460 ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +0000461 ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100462 ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
463 ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
464 ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
465 ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
466 ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +0000467 ldr x28, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
468 msr sp_el0, x28
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100469 ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +0000470 ret
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100471endfunc restore_gp_pmcr_pauth_regs
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +0000472
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100473/* ------------------------------------------------------------------
474 * This routine assumes that the SP_EL3 is pointing to a valid
475 * context structure from where the gp regs and other special
476 * registers can be retrieved.
477 * ------------------------------------------------------------------
Antonio Nino Diaz13adfb12019-01-30 20:41:31 +0000478 */
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100479func el3_exit
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100480 /* ----------------------------------------------------------
481 * Save the current SP_EL0 i.e. the EL3 runtime stack which
482 * will be used for handling the next SMC.
483 * Then switch to SP_EL3.
484 * ----------------------------------------------------------
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100485 */
486 mov x17, sp
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100487 msr spsel, #MODE_SP_ELX
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100488 str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
489
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100490 /* ----------------------------------------------------------
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100491 * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100492 * ----------------------------------------------------------
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100493 */
494 ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
495 ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
496 msr scr_el3, x18
497 msr spsr_el3, x16
498 msr elr_el3, x17
499
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100500#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100501 /* ----------------------------------------------------------
502 * Restore mitigation state as it was on entry to EL3
503 * ----------------------------------------------------------
504 */
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100505 ldr x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100506 cbz x17, 1f
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100507 blr x17
Antonio Nino Diaz13adfb12019-01-30 20:41:31 +00005081:
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100509#endif
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100510 /* ----------------------------------------------------------
511 * Restore general purpose (including x30), PMCR_EL0 and
512 * ARMv8.3-PAuth registers.
513 * Exit EL3 via ERET to a lower exception level.
514 * ----------------------------------------------------------
515 */
516 bl restore_gp_pmcr_pauth_regs
517 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100518
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100519#if IMAGE_BL31 && RAS_EXTENSION
520 /* ----------------------------------------------------------
521 * Issue Error Synchronization Barrier to synchronize SErrors
522 * before exiting EL3. We're running with EAs unmasked, so
523 * any synchronized errors would be taken immediately;
524 * therefore no need to inspect DISR_EL1 register.
525 * ----------------------------------------------------------
526 */
527 esb
Antonio Nino Diaz594811b2019-01-31 11:58:00 +0000528#endif
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100529 eret
Antonio Nino Diaz594811b2019-01-31 11:58:00 +0000530
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100531endfunc el3_exit