blob: 37bb12c80420c2ba7d43734a8ef3bdc7adcc9bfe [file] [log] [blame]
Achin Gupta9ac63c52014-01-16 12:08:03 +00001/*
Antonio Nino Diaz13adfb12019-01-30 20:41:31 +00002 * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
Achin Gupta9ac63c52014-01-16 12:08:03 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta9ac63c52014-01-16 12:08:03 +00005 */
6
Dan Handley2bd4ef22014-04-09 13:14:54 +01007#include <arch.h>
Andrew Thoelke38bde412014-03-18 13:46:55 +00008#include <asm_macros.S>
Dan Handley2bd4ef22014-04-09 13:14:54 +01009#include <context.h>
Achin Gupta9ac63c52014-01-16 12:08:03 +000010
Yatharth Kochar6c0566c2015-10-02 17:56:48 +010011 .global el1_sysregs_context_save
12 .global el1_sysregs_context_restore
13#if CTX_INCLUDE_FPREGS
14 .global fpregs_context_save
15 .global fpregs_context_restore
16#endif
Antonio Nino Diaz594811b2019-01-31 11:58:00 +000017#if CTX_INCLUDE_PAUTH_REGS
18 .global pauth_context_restore
19 .global pauth_context_save
20#endif
Antonio Nino Diaz25cda672019-02-19 11:53:51 +000021#if ENABLE_PAUTH
22 .global pauth_load_bl_apiakey
23#endif
Yatharth Kochar6c0566c2015-10-02 17:56:48 +010024 .global save_gp_registers
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +000025 .global restore_gp_registers
Yatharth Kochar6c0566c2015-10-02 17:56:48 +010026 .global restore_gp_registers_eret
Alexei Fedorov503bbf32019-08-13 15:17:53 +010027 .global save_pmcr_disable_pmu
Yatharth Kochar6c0566c2015-10-02 17:56:48 +010028 .global el3_exit
29
Achin Gupta9ac63c52014-01-16 12:08:03 +000030/* -----------------------------------------------------
Alexei Fedorov503bbf32019-08-13 15:17:53 +010031 * If ARMv8.5-PMU is implemented, cycle counting is
32 * disabled by seting MDCR_EL3.SCCD to 1.
33 * -----------------------------------------------------
34 */
35func save_pmcr_disable_pmu
36 /* -----------------------------------------------------
37 * Check if earlier initialization MDCR_EL3.SCCD to 1
38 * failed, meaning that ARMv8-PMU is not implemented and
39 * PMCR_EL0 should be saved in non-secure context.
40 * -----------------------------------------------------
41 */
42 mrs x9, mdcr_el3
43 tst x9, #MDCR_SCCD_BIT
44 bne 1f
45
46 /* Secure Cycle Counter is not disabled */
47 mrs x9, pmcr_el0
48
49 /* Check caller's security state */
50 mrs x10, scr_el3
51 tst x10, #SCR_NS_BIT
52 beq 2f
53
54 /* Save PMCR_EL0 if called from Non-secure state */
55 str x9, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
56
57 /* Disable cycle counter when event counting is prohibited */
582: orr x9, x9, #PMCR_EL0_DP_BIT
59 msr pmcr_el0, x9
60
61 isb
621: ret
63endfunc save_pmcr_disable_pmu
64
65/* -----------------------------------------------------
Achin Gupta9ac63c52014-01-16 12:08:03 +000066 * The following function strictly follows the AArch64
67 * PCS to use x9-x17 (temporary caller-saved registers)
Achin Gupta9ac63c52014-01-16 12:08:03 +000068 * to save EL1 system register context. It assumes that
69 * 'x0' is pointing to a 'el1_sys_regs' structure where
70 * the register context will be saved.
71 * -----------------------------------------------------
72 */
Andrew Thoelke38bde412014-03-18 13:46:55 +000073func el1_sysregs_context_save
Achin Gupta9ac63c52014-01-16 12:08:03 +000074
75 mrs x9, spsr_el1
76 mrs x10, elr_el1
77 stp x9, x10, [x0, #CTX_SPSR_EL1]
78
Achin Gupta9ac63c52014-01-16 12:08:03 +000079 mrs x15, sctlr_el1
80 mrs x16, actlr_el1
81 stp x15, x16, [x0, #CTX_SCTLR_EL1]
82
83 mrs x17, cpacr_el1
84 mrs x9, csselr_el1
85 stp x17, x9, [x0, #CTX_CPACR_EL1]
86
87 mrs x10, sp_el1
88 mrs x11, esr_el1
89 stp x10, x11, [x0, #CTX_SP_EL1]
90
91 mrs x12, ttbr0_el1
92 mrs x13, ttbr1_el1
93 stp x12, x13, [x0, #CTX_TTBR0_EL1]
94
95 mrs x14, mair_el1
96 mrs x15, amair_el1
97 stp x14, x15, [x0, #CTX_MAIR_EL1]
98
99 mrs x16, tcr_el1
100 mrs x17, tpidr_el1
101 stp x16, x17, [x0, #CTX_TCR_EL1]
102
103 mrs x9, tpidr_el0
104 mrs x10, tpidrro_el0
105 stp x9, x10, [x0, #CTX_TPIDR_EL0]
106
Achin Gupta9ac63c52014-01-16 12:08:03 +0000107 mrs x13, par_el1
108 mrs x14, far_el1
109 stp x13, x14, [x0, #CTX_PAR_EL1]
110
111 mrs x15, afsr0_el1
112 mrs x16, afsr1_el1
113 stp x15, x16, [x0, #CTX_AFSR0_EL1]
114
115 mrs x17, contextidr_el1
116 mrs x9, vbar_el1
117 stp x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
118
Soby Mathewd75d2ba2016-05-17 14:01:32 +0100119 /* Save AArch32 system registers if the build has instructed so */
120#if CTX_INCLUDE_AARCH32_REGS
121 mrs x11, spsr_abt
122 mrs x12, spsr_und
123 stp x11, x12, [x0, #CTX_SPSR_ABT]
124
125 mrs x13, spsr_irq
126 mrs x14, spsr_fiq
127 stp x13, x14, [x0, #CTX_SPSR_IRQ]
128
129 mrs x15, dacr32_el2
130 mrs x16, ifsr32_el2
131 stp x15, x16, [x0, #CTX_DACR32_EL2]
Soby Mathewd75d2ba2016-05-17 14:01:32 +0100132#endif
133
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100134 /* Save NS timer registers if the build has instructed so */
135#if NS_TIMER_SWITCH
Achin Gupta9ac63c52014-01-16 12:08:03 +0000136 mrs x10, cntp_ctl_el0
137 mrs x11, cntp_cval_el0
138 stp x10, x11, [x0, #CTX_CNTP_CTL_EL0]
139
140 mrs x12, cntv_ctl_el0
141 mrs x13, cntv_cval_el0
142 stp x12, x13, [x0, #CTX_CNTV_CTL_EL0]
143
144 mrs x14, cntkctl_el1
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100145 str x14, [x0, #CTX_CNTKCTL_EL1]
146#endif
147
Justin Chadwell1c7c13a2019-07-18 14:25:33 +0100148 /* Save MTE system registers if the build has instructed so */
149#if CTX_INCLUDE_MTE_REGS
150 mrs x15, TFSRE0_EL1
151 mrs x16, TFSR_EL1
152 stp x15, x16, [x0, #CTX_TFSRE0_EL1]
153
154 mrs x9, RGSR_EL1
155 mrs x10, GCR_EL1
156 stp x9, x10, [x0, #CTX_RGSR_EL1]
157#endif
158
Achin Gupta9ac63c52014-01-16 12:08:03 +0000159 ret
Kévin Petita877c252015-03-24 14:03:57 +0000160endfunc el1_sysregs_context_save
Achin Gupta9ac63c52014-01-16 12:08:03 +0000161
162/* -----------------------------------------------------
163 * The following function strictly follows the AArch64
164 * PCS to use x9-x17 (temporary caller-saved registers)
165 * to restore EL1 system register context. It assumes
166 * that 'x0' is pointing to a 'el1_sys_regs' structure
167 * from where the register context will be restored
168 * -----------------------------------------------------
169 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000170func el1_sysregs_context_restore
Achin Gupta9ac63c52014-01-16 12:08:03 +0000171
172 ldp x9, x10, [x0, #CTX_SPSR_EL1]
173 msr spsr_el1, x9
174 msr elr_el1, x10
175
Achin Gupta9ac63c52014-01-16 12:08:03 +0000176 ldp x15, x16, [x0, #CTX_SCTLR_EL1]
177 msr sctlr_el1, x15
178 msr actlr_el1, x16
179
180 ldp x17, x9, [x0, #CTX_CPACR_EL1]
181 msr cpacr_el1, x17
182 msr csselr_el1, x9
183
184 ldp x10, x11, [x0, #CTX_SP_EL1]
185 msr sp_el1, x10
186 msr esr_el1, x11
187
188 ldp x12, x13, [x0, #CTX_TTBR0_EL1]
189 msr ttbr0_el1, x12
190 msr ttbr1_el1, x13
191
192 ldp x14, x15, [x0, #CTX_MAIR_EL1]
193 msr mair_el1, x14
194 msr amair_el1, x15
195
196 ldp x16, x17, [x0, #CTX_TCR_EL1]
197 msr tcr_el1, x16
198 msr tpidr_el1, x17
199
200 ldp x9, x10, [x0, #CTX_TPIDR_EL0]
201 msr tpidr_el0, x9
202 msr tpidrro_el0, x10
203
Achin Gupta9ac63c52014-01-16 12:08:03 +0000204 ldp x13, x14, [x0, #CTX_PAR_EL1]
205 msr par_el1, x13
206 msr far_el1, x14
207
208 ldp x15, x16, [x0, #CTX_AFSR0_EL1]
209 msr afsr0_el1, x15
210 msr afsr1_el1, x16
211
212 ldp x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
213 msr contextidr_el1, x17
214 msr vbar_el1, x9
215
Soby Mathewd75d2ba2016-05-17 14:01:32 +0100216 /* Restore AArch32 system registers if the build has instructed so */
217#if CTX_INCLUDE_AARCH32_REGS
218 ldp x11, x12, [x0, #CTX_SPSR_ABT]
219 msr spsr_abt, x11
220 msr spsr_und, x12
221
222 ldp x13, x14, [x0, #CTX_SPSR_IRQ]
223 msr spsr_irq, x13
224 msr spsr_fiq, x14
225
226 ldp x15, x16, [x0, #CTX_DACR32_EL2]
227 msr dacr32_el2, x15
228 msr ifsr32_el2, x16
Soby Mathewd75d2ba2016-05-17 14:01:32 +0100229#endif
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100230 /* Restore NS timer registers if the build has instructed so */
231#if NS_TIMER_SWITCH
Achin Gupta9ac63c52014-01-16 12:08:03 +0000232 ldp x10, x11, [x0, #CTX_CNTP_CTL_EL0]
233 msr cntp_ctl_el0, x10
234 msr cntp_cval_el0, x11
235
236 ldp x12, x13, [x0, #CTX_CNTV_CTL_EL0]
237 msr cntv_ctl_el0, x12
238 msr cntv_cval_el0, x13
239
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100240 ldr x14, [x0, #CTX_CNTKCTL_EL1]
Achin Gupta9ac63c52014-01-16 12:08:03 +0000241 msr cntkctl_el1, x14
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100242#endif
Justin Chadwell1c7c13a2019-07-18 14:25:33 +0100243 /* Restore MTE system registers if the build has instructed so */
244#if CTX_INCLUDE_MTE_REGS
245 ldp x11, x12, [x0, #CTX_TFSRE0_EL1]
246 msr TFSRE0_EL1, x11
247 msr TFSR_EL1, x12
248
249 ldp x13, x14, [x0, #CTX_RGSR_EL1]
250 msr RGSR_EL1, x13
251 msr GCR_EL1, x14
252#endif
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100253
Achin Gupta9ac63c52014-01-16 12:08:03 +0000254 /* No explict ISB required here as ERET covers it */
Achin Gupta9ac63c52014-01-16 12:08:03 +0000255 ret
Kévin Petita877c252015-03-24 14:03:57 +0000256endfunc el1_sysregs_context_restore
Achin Gupta9ac63c52014-01-16 12:08:03 +0000257
258/* -----------------------------------------------------
Sandrine Bailleux046cd3f2014-08-06 11:27:23 +0100259 * The following function follows the aapcs_64 strictly
Achin Gupta9ac63c52014-01-16 12:08:03 +0000260 * to use x9-x17 (temporary caller-saved registers
261 * according to AArch64 PCS) to save floating point
262 * register context. It assumes that 'x0' is pointing to
263 * a 'fp_regs' structure where the register context will
264 * be saved.
265 *
266 * Access to VFP registers will trap if CPTR_EL3.TFP is
267 * set. However currently we don't use VFP registers
268 * nor set traps in Trusted Firmware, and assume it's
269 * cleared
270 *
271 * TODO: Revisit when VFP is used in secure world
272 * -----------------------------------------------------
273 */
Juan Castillo258e94f2014-06-25 17:26:36 +0100274#if CTX_INCLUDE_FPREGS
Andrew Thoelke38bde412014-03-18 13:46:55 +0000275func fpregs_context_save
Achin Gupta9ac63c52014-01-16 12:08:03 +0000276 stp q0, q1, [x0, #CTX_FP_Q0]
277 stp q2, q3, [x0, #CTX_FP_Q2]
278 stp q4, q5, [x0, #CTX_FP_Q4]
279 stp q6, q7, [x0, #CTX_FP_Q6]
280 stp q8, q9, [x0, #CTX_FP_Q8]
281 stp q10, q11, [x0, #CTX_FP_Q10]
282 stp q12, q13, [x0, #CTX_FP_Q12]
283 stp q14, q15, [x0, #CTX_FP_Q14]
284 stp q16, q17, [x0, #CTX_FP_Q16]
285 stp q18, q19, [x0, #CTX_FP_Q18]
286 stp q20, q21, [x0, #CTX_FP_Q20]
287 stp q22, q23, [x0, #CTX_FP_Q22]
288 stp q24, q25, [x0, #CTX_FP_Q24]
289 stp q26, q27, [x0, #CTX_FP_Q26]
290 stp q28, q29, [x0, #CTX_FP_Q28]
291 stp q30, q31, [x0, #CTX_FP_Q30]
292
293 mrs x9, fpsr
294 str x9, [x0, #CTX_FP_FPSR]
295
296 mrs x10, fpcr
297 str x10, [x0, #CTX_FP_FPCR]
298
David Cunadod1a1fd42017-10-20 11:30:57 +0100299#if CTX_INCLUDE_AARCH32_REGS
300 mrs x11, fpexc32_el2
301 str x11, [x0, #CTX_FP_FPEXC32_EL2]
302#endif
Achin Gupta9ac63c52014-01-16 12:08:03 +0000303 ret
Kévin Petita877c252015-03-24 14:03:57 +0000304endfunc fpregs_context_save
Achin Gupta9ac63c52014-01-16 12:08:03 +0000305
306/* -----------------------------------------------------
307 * The following function follows the aapcs_64 strictly
308 * to use x9-x17 (temporary caller-saved registers
309 * according to AArch64 PCS) to restore floating point
310 * register context. It assumes that 'x0' is pointing to
311 * a 'fp_regs' structure from where the register context
312 * will be restored.
313 *
314 * Access to VFP registers will trap if CPTR_EL3.TFP is
315 * set. However currently we don't use VFP registers
316 * nor set traps in Trusted Firmware, and assume it's
317 * cleared
318 *
319 * TODO: Revisit when VFP is used in secure world
320 * -----------------------------------------------------
321 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000322func fpregs_context_restore
Achin Gupta9ac63c52014-01-16 12:08:03 +0000323 ldp q0, q1, [x0, #CTX_FP_Q0]
324 ldp q2, q3, [x0, #CTX_FP_Q2]
325 ldp q4, q5, [x0, #CTX_FP_Q4]
326 ldp q6, q7, [x0, #CTX_FP_Q6]
327 ldp q8, q9, [x0, #CTX_FP_Q8]
328 ldp q10, q11, [x0, #CTX_FP_Q10]
329 ldp q12, q13, [x0, #CTX_FP_Q12]
330 ldp q14, q15, [x0, #CTX_FP_Q14]
331 ldp q16, q17, [x0, #CTX_FP_Q16]
332 ldp q18, q19, [x0, #CTX_FP_Q18]
333 ldp q20, q21, [x0, #CTX_FP_Q20]
334 ldp q22, q23, [x0, #CTX_FP_Q22]
335 ldp q24, q25, [x0, #CTX_FP_Q24]
336 ldp q26, q27, [x0, #CTX_FP_Q26]
337 ldp q28, q29, [x0, #CTX_FP_Q28]
338 ldp q30, q31, [x0, #CTX_FP_Q30]
339
340 ldr x9, [x0, #CTX_FP_FPSR]
341 msr fpsr, x9
342
Soby Mathewe77e1162015-12-03 09:42:50 +0000343 ldr x10, [x0, #CTX_FP_FPCR]
Achin Gupta9ac63c52014-01-16 12:08:03 +0000344 msr fpcr, x10
345
David Cunadod1a1fd42017-10-20 11:30:57 +0100346#if CTX_INCLUDE_AARCH32_REGS
347 ldr x11, [x0, #CTX_FP_FPEXC32_EL2]
348 msr fpexc32_el2, x11
349#endif
Achin Gupta9ac63c52014-01-16 12:08:03 +0000350 /*
351 * No explict ISB required here as ERET to
Sandrine Bailleuxf4119ec2015-12-17 13:58:58 +0000352 * switch to secure EL1 or non-secure world
Achin Gupta9ac63c52014-01-16 12:08:03 +0000353 * covers it
354 */
355
356 ret
Kévin Petita877c252015-03-24 14:03:57 +0000357endfunc fpregs_context_restore
Juan Castillo258e94f2014-06-25 17:26:36 +0100358#endif /* CTX_INCLUDE_FPREGS */
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100359
Antonio Nino Diaz594811b2019-01-31 11:58:00 +0000360#if CTX_INCLUDE_PAUTH_REGS
361/* -----------------------------------------------------
362 * The following function strictly follows the AArch64
363 * PCS to use x9-x17 (temporary caller-saved registers)
364 * to save the ARMv8.3-PAuth register context. It assumes
365 * that 'sp' is pointing to a 'cpu_context_t' structure
366 * to where the register context will be saved.
367 * -----------------------------------------------------
368 */
369func pauth_context_save
370 add x11, sp, #CTX_PAUTH_REGS_OFFSET
371
372 mrs x9, APIAKeyLo_EL1
373 mrs x10, APIAKeyHi_EL1
374 stp x9, x10, [x11, #CTX_PACIAKEY_LO]
375
376 mrs x9, APIBKeyLo_EL1
377 mrs x10, APIBKeyHi_EL1
378 stp x9, x10, [x11, #CTX_PACIBKEY_LO]
379
380 mrs x9, APDAKeyLo_EL1
381 mrs x10, APDAKeyHi_EL1
382 stp x9, x10, [x11, #CTX_PACDAKEY_LO]
383
384 mrs x9, APDBKeyLo_EL1
385 mrs x10, APDBKeyHi_EL1
386 stp x9, x10, [x11, #CTX_PACDBKEY_LO]
387
388 mrs x9, APGAKeyLo_EL1
389 mrs x10, APGAKeyHi_EL1
390 stp x9, x10, [x11, #CTX_PACGAKEY_LO]
391
392 ret
393endfunc pauth_context_save
394
395/* -----------------------------------------------------
396 * The following function strictly follows the AArch64
397 * PCS to use x9-x17 (temporary caller-saved registers)
398 * to restore the ARMv8.3-PAuth register context. It assumes
399 * that 'sp' is pointing to a 'cpu_context_t' structure
400 * from where the register context will be restored.
401 * -----------------------------------------------------
402 */
403func pauth_context_restore
404 add x11, sp, #CTX_PAUTH_REGS_OFFSET
405
406 ldp x9, x10, [x11, #CTX_PACIAKEY_LO]
407 msr APIAKeyLo_EL1, x9
408 msr APIAKeyHi_EL1, x10
409
Sandrine Bailleuxb5cf10f2019-03-14 11:38:01 +0100410 ldp x9, x10, [x11, #CTX_PACIBKEY_LO]
Antonio Nino Diaz594811b2019-01-31 11:58:00 +0000411 msr APIBKeyLo_EL1, x9
412 msr APIBKeyHi_EL1, x10
413
414 ldp x9, x10, [x11, #CTX_PACDAKEY_LO]
415 msr APDAKeyLo_EL1, x9
416 msr APDAKeyHi_EL1, x10
417
418 ldp x9, x10, [x11, #CTX_PACDBKEY_LO]
419 msr APDBKeyLo_EL1, x9
420 msr APDBKeyHi_EL1, x10
421
422 ldp x9, x10, [x11, #CTX_PACGAKEY_LO]
423 msr APGAKeyLo_EL1, x9
424 msr APGAKeyHi_EL1, x10
425
426 ret
427endfunc pauth_context_restore
428#endif /* CTX_INCLUDE_PAUTH_REGS */
429
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100430/* -----------------------------------------------------
Antonio Nino Diaz25cda672019-02-19 11:53:51 +0000431 * The following function strictly follows the AArch64
432 * PCS to use x9-x17 (temporary caller-saved registers)
433 * to load the APIA key used by the firmware.
434 * -----------------------------------------------------
435 */
436#if ENABLE_PAUTH
437func pauth_load_bl_apiakey
438 /* Load instruction key A used by the Trusted Firmware. */
439 adrp x11, plat_apiakey
440 add x11, x11, :lo12:plat_apiakey
441 ldp x9, x10, [x11, #0]
442
443 msr APIAKeyLo_EL1, x9
444 msr APIAKeyHi_EL1, x10
445
446 ret
447endfunc pauth_load_bl_apiakey
448#endif /* ENABLE_PAUTH */
449
450/* -----------------------------------------------------
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100451 * The following functions are used to save and restore
452 * all the general purpose registers. Ideally we would
453 * only save and restore the callee saved registers when
454 * a world switch occurs but that type of implementation
455 * is more complex. So currently we will always save and
456 * restore these registers on entry and exit of EL3.
457 * These are not macros to ensure their invocation fits
458 * within the 32 instructions per exception vector.
459 * clobbers: x18
460 * -----------------------------------------------------
461 */
462func save_gp_registers
463 stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
464 stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
465 stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
466 stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
467 stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
468 stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
469 stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
470 stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
471 stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
472 stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
473 stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
474 stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
475 stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
476 stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
477 stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
478 mrs x18, sp_el0
479 str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
480 ret
481endfunc save_gp_registers
482
Antonio Nino Diaz13adfb12019-01-30 20:41:31 +0000483/* -----------------------------------------------------
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +0000484 * This function restores all general purpose registers except x30 from the
485 * CPU context. x30 register must be explicitly restored by the caller.
Antonio Nino Diaz13adfb12019-01-30 20:41:31 +0000486 * -----------------------------------------------------
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +0000487 */
488func restore_gp_registers
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100489 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
490 ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100491 ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
492 ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
493 ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
494 ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
495 ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
496 ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +0000497 ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100498 ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
499 ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
500 ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
501 ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
502 ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +0000503 ldr x28, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
504 msr sp_el0, x28
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100505 ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +0000506 ret
507endfunc restore_gp_registers
508
Antonio Nino Diaz13adfb12019-01-30 20:41:31 +0000509/* -----------------------------------------------------
Antonio Nino Diaz56b68ad2019-02-28 13:35:21 +0000510 * Restore general purpose registers (including x30), and exit EL3 via ERET to
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +0000511 * a lower exception level.
Antonio Nino Diaz13adfb12019-01-30 20:41:31 +0000512 * -----------------------------------------------------
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +0000513 */
514func restore_gp_registers_eret
515 bl restore_gp_registers
516 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
Jeenu Viswambharan9a7ce2f2018-04-04 16:07:11 +0100517
518#if IMAGE_BL31 && RAS_EXTENSION
519 /*
520 * Issue Error Synchronization Barrier to synchronize SErrors before
521 * exiting EL3. We're running with EAs unmasked, so any synchronized
522 * errors would be taken immediately; therefore no need to inspect
523 * DISR_EL1 register.
524 */
525 esb
526#endif
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100527 eret
Jeenu Viswambharan23d05a82017-11-29 16:59:34 +0000528endfunc restore_gp_registers_eret
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100529
Antonio Nino Diaz13adfb12019-01-30 20:41:31 +0000530/* -----------------------------------------------------
531 * This routine assumes that the SP_EL3 is pointing to
532 * a valid context structure from where the gp regs and
533 * other special registers can be retrieved.
534 * -----------------------------------------------------
535 */
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100536func el3_exit
537 /* -----------------------------------------------------
538 * Save the current SP_EL0 i.e. the EL3 runtime stack
539 * which will be used for handling the next SMC. Then
540 * switch to SP_EL3
541 * -----------------------------------------------------
542 */
543 mov x17, sp
544 msr spsel, #1
545 str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
546
547 /* -----------------------------------------------------
548 * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
549 * -----------------------------------------------------
550 */
551 ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
552 ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
553 msr scr_el3, x18
554 msr spsr_el3, x16
555 msr elr_el3, x17
556
Alexei Fedorov503bbf32019-08-13 15:17:53 +0100557 /* -----------------------------------------------------
558 * Restore PMCR_EL0 when returning to Non-secure state
559 * if Secure Cycle Counter is not disabled in MDCR_EL3
560 * when ARMv8.5-PMU is implemented
561 * -----------------------------------------------------
562 */
563 tst x18, #SCR_NS_BIT
564 beq 2f
565
566 /* -----------------------------------------------------
567 * Back to Non-secure state.
568 * Check if earlier initialization MDCR_EL3.SCCD to 1
569 * failed, meaning that ARMv8-PMU is not implemented and
570 * PMCR_EL0 should be restored from non-secure context.
571 * -----------------------------------------------------
572 */
573 mrs x17, mdcr_el3
574 tst x17, #MDCR_SCCD_BIT
575 bne 2f
576 ldr x17, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
577 msr pmcr_el0, x17
5782:
579
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100580#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639
581 /* Restore mitigation state as it was on entry to EL3 */
582 ldr x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
583 cmp x17, xzr
584 beq 1f
585 blr x17
Antonio Nino Diaz13adfb12019-01-30 20:41:31 +00005861:
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100587#endif
588
Antonio Nino Diaz594811b2019-01-31 11:58:00 +0000589#if CTX_INCLUDE_PAUTH_REGS
590 /* Restore ARMv8.3-PAuth registers */
591 bl pauth_context_restore
592#endif
593
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100594 /* Restore saved general purpose registers and return */
595 b restore_gp_registers_eret
596endfunc el3_exit