blob: db16a9f0e82d0c61bf9d7e0e9afffa86f651d979 [file] [log] [blame]
Achin Gupta9ac63c52014-01-16 12:08:03 +00001/*
dp-armee3457b2017-05-23 09:32:49 +01002 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
Achin Gupta9ac63c52014-01-16 12:08:03 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta9ac63c52014-01-16 12:08:03 +00005 */
6
Dan Handley2bd4ef22014-04-09 13:14:54 +01007#include <arch.h>
Andrew Thoelke38bde412014-03-18 13:46:55 +00008#include <asm_macros.S>
Dan Handley2bd4ef22014-04-09 13:14:54 +01009#include <context.h>
Achin Gupta9ac63c52014-01-16 12:08:03 +000010
Yatharth Kochar6c0566c2015-10-02 17:56:48 +010011 .global el1_sysregs_context_save
dp-armee3457b2017-05-23 09:32:49 +010012 .global el1_sysregs_context_save_post_ops
Yatharth Kochar6c0566c2015-10-02 17:56:48 +010013 .global el1_sysregs_context_restore
14#if CTX_INCLUDE_FPREGS
15 .global fpregs_context_save
16 .global fpregs_context_restore
17#endif
18 .global save_gp_registers
19 .global restore_gp_registers_eret
20 .global restore_gp_registers_callee_eret
21 .global el3_exit
22
Achin Gupta9ac63c52014-01-16 12:08:03 +000023/* -----------------------------------------------------
24 * The following function strictly follows the AArch64
25 * PCS to use x9-x17 (temporary caller-saved registers)
Achin Gupta9ac63c52014-01-16 12:08:03 +000026 * to save EL1 system register context. It assumes that
27 * 'x0' is pointing to a 'el1_sys_regs' structure where
28 * the register context will be saved.
29 * -----------------------------------------------------
30 */
Andrew Thoelke38bde412014-03-18 13:46:55 +000031func el1_sysregs_context_save
Achin Gupta9ac63c52014-01-16 12:08:03 +000032
33 mrs x9, spsr_el1
34 mrs x10, elr_el1
35 stp x9, x10, [x0, #CTX_SPSR_EL1]
36
Achin Gupta9ac63c52014-01-16 12:08:03 +000037 mrs x15, sctlr_el1
38 mrs x16, actlr_el1
39 stp x15, x16, [x0, #CTX_SCTLR_EL1]
40
41 mrs x17, cpacr_el1
42 mrs x9, csselr_el1
43 stp x17, x9, [x0, #CTX_CPACR_EL1]
44
45 mrs x10, sp_el1
46 mrs x11, esr_el1
47 stp x10, x11, [x0, #CTX_SP_EL1]
48
49 mrs x12, ttbr0_el1
50 mrs x13, ttbr1_el1
51 stp x12, x13, [x0, #CTX_TTBR0_EL1]
52
53 mrs x14, mair_el1
54 mrs x15, amair_el1
55 stp x14, x15, [x0, #CTX_MAIR_EL1]
56
57 mrs x16, tcr_el1
58 mrs x17, tpidr_el1
59 stp x16, x17, [x0, #CTX_TCR_EL1]
60
61 mrs x9, tpidr_el0
62 mrs x10, tpidrro_el0
63 stp x9, x10, [x0, #CTX_TPIDR_EL0]
64
Achin Gupta9ac63c52014-01-16 12:08:03 +000065 mrs x13, par_el1
66 mrs x14, far_el1
67 stp x13, x14, [x0, #CTX_PAR_EL1]
68
69 mrs x15, afsr0_el1
70 mrs x16, afsr1_el1
71 stp x15, x16, [x0, #CTX_AFSR0_EL1]
72
73 mrs x17, contextidr_el1
74 mrs x9, vbar_el1
75 stp x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
76
David Cunado4168f2f2017-10-02 17:41:39 +010077 mrs x10, pmcr_el0
78 str x10, [x0, #CTX_PMCR_EL0]
79
Soby Mathewd75d2ba2016-05-17 14:01:32 +010080 /* Save AArch32 system registers if the build has instructed so */
81#if CTX_INCLUDE_AARCH32_REGS
82 mrs x11, spsr_abt
83 mrs x12, spsr_und
84 stp x11, x12, [x0, #CTX_SPSR_ABT]
85
86 mrs x13, spsr_irq
87 mrs x14, spsr_fiq
88 stp x13, x14, [x0, #CTX_SPSR_IRQ]
89
90 mrs x15, dacr32_el2
91 mrs x16, ifsr32_el2
92 stp x15, x16, [x0, #CTX_DACR32_EL2]
93
94 mrs x17, fpexc32_el2
95 str x17, [x0, #CTX_FP_FPEXC32_EL2]
96#endif
97
Jeenu Viswambharand1b60152014-05-12 15:28:47 +010098 /* Save NS timer registers if the build has instructed so */
99#if NS_TIMER_SWITCH
Achin Gupta9ac63c52014-01-16 12:08:03 +0000100 mrs x10, cntp_ctl_el0
101 mrs x11, cntp_cval_el0
102 stp x10, x11, [x0, #CTX_CNTP_CTL_EL0]
103
104 mrs x12, cntv_ctl_el0
105 mrs x13, cntv_cval_el0
106 stp x12, x13, [x0, #CTX_CNTV_CTL_EL0]
107
108 mrs x14, cntkctl_el1
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100109 str x14, [x0, #CTX_CNTKCTL_EL1]
110#endif
111
Achin Gupta9ac63c52014-01-16 12:08:03 +0000112 ret
Kévin Petita877c252015-03-24 14:03:57 +0000113endfunc el1_sysregs_context_save
Achin Gupta9ac63c52014-01-16 12:08:03 +0000114
115/* -----------------------------------------------------
116 * The following function strictly follows the AArch64
117 * PCS to use x9-x17 (temporary caller-saved registers)
dp-armee3457b2017-05-23 09:32:49 +0100118 * to do post operations after saving the EL1 system
119 * register context.
120 * -----------------------------------------------------
121 */
122func el1_sysregs_context_save_post_ops
123#if ENABLE_SPE_FOR_LOWER_ELS
124 /* Detect if SPE is implemented */
125 mrs x9, id_aa64dfr0_el1
126 ubfx x9, x9, #ID_AA64DFR0_PMS_SHIFT, #ID_AA64DFR0_PMS_LENGTH
127 cmp x9, #0x1
128 b.ne 1f
129
130 /*
131 * Before switching from normal world to secure world
132 * the profiling buffers need to be drained out to memory. This is
133 * required to avoid an invalid memory access when TTBR is switched
134 * for entry to SEL1.
135 */
136 .arch armv8.2-a+profile
137 psb csync
138 dsb nsh
139 .arch armv8-a
1401:
141#endif
142 ret
143endfunc el1_sysregs_context_save_post_ops
144
145/* -----------------------------------------------------
146 * The following function strictly follows the AArch64
147 * PCS to use x9-x17 (temporary caller-saved registers)
Achin Gupta9ac63c52014-01-16 12:08:03 +0000148 * to restore EL1 system register context. It assumes
149 * that 'x0' is pointing to a 'el1_sys_regs' structure
150 * from where the register context will be restored
151 * -----------------------------------------------------
152 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000153func el1_sysregs_context_restore
Achin Gupta9ac63c52014-01-16 12:08:03 +0000154
155 ldp x9, x10, [x0, #CTX_SPSR_EL1]
156 msr spsr_el1, x9
157 msr elr_el1, x10
158
Achin Gupta9ac63c52014-01-16 12:08:03 +0000159 ldp x15, x16, [x0, #CTX_SCTLR_EL1]
160 msr sctlr_el1, x15
161 msr actlr_el1, x16
162
163 ldp x17, x9, [x0, #CTX_CPACR_EL1]
164 msr cpacr_el1, x17
165 msr csselr_el1, x9
166
167 ldp x10, x11, [x0, #CTX_SP_EL1]
168 msr sp_el1, x10
169 msr esr_el1, x11
170
171 ldp x12, x13, [x0, #CTX_TTBR0_EL1]
172 msr ttbr0_el1, x12
173 msr ttbr1_el1, x13
174
175 ldp x14, x15, [x0, #CTX_MAIR_EL1]
176 msr mair_el1, x14
177 msr amair_el1, x15
178
179 ldp x16, x17, [x0, #CTX_TCR_EL1]
180 msr tcr_el1, x16
181 msr tpidr_el1, x17
182
183 ldp x9, x10, [x0, #CTX_TPIDR_EL0]
184 msr tpidr_el0, x9
185 msr tpidrro_el0, x10
186
Achin Gupta9ac63c52014-01-16 12:08:03 +0000187 ldp x13, x14, [x0, #CTX_PAR_EL1]
188 msr par_el1, x13
189 msr far_el1, x14
190
191 ldp x15, x16, [x0, #CTX_AFSR0_EL1]
192 msr afsr0_el1, x15
193 msr afsr1_el1, x16
194
195 ldp x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
196 msr contextidr_el1, x17
197 msr vbar_el1, x9
198
David Cunado4168f2f2017-10-02 17:41:39 +0100199 ldr x10, [x0, #CTX_PMCR_EL0]
200 msr pmcr_el0, x10
201
Soby Mathewd75d2ba2016-05-17 14:01:32 +0100202 /* Restore AArch32 system registers if the build has instructed so */
203#if CTX_INCLUDE_AARCH32_REGS
204 ldp x11, x12, [x0, #CTX_SPSR_ABT]
205 msr spsr_abt, x11
206 msr spsr_und, x12
207
208 ldp x13, x14, [x0, #CTX_SPSR_IRQ]
209 msr spsr_irq, x13
210 msr spsr_fiq, x14
211
212 ldp x15, x16, [x0, #CTX_DACR32_EL2]
213 msr dacr32_el2, x15
214 msr ifsr32_el2, x16
215
216 ldr x17, [x0, #CTX_FP_FPEXC32_EL2]
217 msr fpexc32_el2, x17
218#endif
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100219 /* Restore NS timer registers if the build has instructed so */
220#if NS_TIMER_SWITCH
Achin Gupta9ac63c52014-01-16 12:08:03 +0000221 ldp x10, x11, [x0, #CTX_CNTP_CTL_EL0]
222 msr cntp_ctl_el0, x10
223 msr cntp_cval_el0, x11
224
225 ldp x12, x13, [x0, #CTX_CNTV_CTL_EL0]
226 msr cntv_ctl_el0, x12
227 msr cntv_cval_el0, x13
228
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100229 ldr x14, [x0, #CTX_CNTKCTL_EL1]
Achin Gupta9ac63c52014-01-16 12:08:03 +0000230 msr cntkctl_el1, x14
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100231#endif
232
Achin Gupta9ac63c52014-01-16 12:08:03 +0000233 /* No explict ISB required here as ERET covers it */
Achin Gupta9ac63c52014-01-16 12:08:03 +0000234 ret
Kévin Petita877c252015-03-24 14:03:57 +0000235endfunc el1_sysregs_context_restore
Achin Gupta9ac63c52014-01-16 12:08:03 +0000236
237/* -----------------------------------------------------
Sandrine Bailleux046cd3f2014-08-06 11:27:23 +0100238 * The following function follows the aapcs_64 strictly
Achin Gupta9ac63c52014-01-16 12:08:03 +0000239 * to use x9-x17 (temporary caller-saved registers
240 * according to AArch64 PCS) to save floating point
241 * register context. It assumes that 'x0' is pointing to
242 * a 'fp_regs' structure where the register context will
243 * be saved.
244 *
245 * Access to VFP registers will trap if CPTR_EL3.TFP is
246 * set. However currently we don't use VFP registers
247 * nor set traps in Trusted Firmware, and assume it's
248 * cleared
249 *
250 * TODO: Revisit when VFP is used in secure world
251 * -----------------------------------------------------
252 */
Juan Castillo258e94f2014-06-25 17:26:36 +0100253#if CTX_INCLUDE_FPREGS
Andrew Thoelke38bde412014-03-18 13:46:55 +0000254func fpregs_context_save
Achin Gupta9ac63c52014-01-16 12:08:03 +0000255 stp q0, q1, [x0, #CTX_FP_Q0]
256 stp q2, q3, [x0, #CTX_FP_Q2]
257 stp q4, q5, [x0, #CTX_FP_Q4]
258 stp q6, q7, [x0, #CTX_FP_Q6]
259 stp q8, q9, [x0, #CTX_FP_Q8]
260 stp q10, q11, [x0, #CTX_FP_Q10]
261 stp q12, q13, [x0, #CTX_FP_Q12]
262 stp q14, q15, [x0, #CTX_FP_Q14]
263 stp q16, q17, [x0, #CTX_FP_Q16]
264 stp q18, q19, [x0, #CTX_FP_Q18]
265 stp q20, q21, [x0, #CTX_FP_Q20]
266 stp q22, q23, [x0, #CTX_FP_Q22]
267 stp q24, q25, [x0, #CTX_FP_Q24]
268 stp q26, q27, [x0, #CTX_FP_Q26]
269 stp q28, q29, [x0, #CTX_FP_Q28]
270 stp q30, q31, [x0, #CTX_FP_Q30]
271
272 mrs x9, fpsr
273 str x9, [x0, #CTX_FP_FPSR]
274
275 mrs x10, fpcr
276 str x10, [x0, #CTX_FP_FPCR]
277
278 ret
Kévin Petita877c252015-03-24 14:03:57 +0000279endfunc fpregs_context_save
Achin Gupta9ac63c52014-01-16 12:08:03 +0000280
281/* -----------------------------------------------------
282 * The following function follows the aapcs_64 strictly
283 * to use x9-x17 (temporary caller-saved registers
284 * according to AArch64 PCS) to restore floating point
285 * register context. It assumes that 'x0' is pointing to
286 * a 'fp_regs' structure from where the register context
287 * will be restored.
288 *
289 * Access to VFP registers will trap if CPTR_EL3.TFP is
290 * set. However currently we don't use VFP registers
291 * nor set traps in Trusted Firmware, and assume it's
292 * cleared
293 *
294 * TODO: Revisit when VFP is used in secure world
295 * -----------------------------------------------------
296 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000297func fpregs_context_restore
Achin Gupta9ac63c52014-01-16 12:08:03 +0000298 ldp q0, q1, [x0, #CTX_FP_Q0]
299 ldp q2, q3, [x0, #CTX_FP_Q2]
300 ldp q4, q5, [x0, #CTX_FP_Q4]
301 ldp q6, q7, [x0, #CTX_FP_Q6]
302 ldp q8, q9, [x0, #CTX_FP_Q8]
303 ldp q10, q11, [x0, #CTX_FP_Q10]
304 ldp q12, q13, [x0, #CTX_FP_Q12]
305 ldp q14, q15, [x0, #CTX_FP_Q14]
306 ldp q16, q17, [x0, #CTX_FP_Q16]
307 ldp q18, q19, [x0, #CTX_FP_Q18]
308 ldp q20, q21, [x0, #CTX_FP_Q20]
309 ldp q22, q23, [x0, #CTX_FP_Q22]
310 ldp q24, q25, [x0, #CTX_FP_Q24]
311 ldp q26, q27, [x0, #CTX_FP_Q26]
312 ldp q28, q29, [x0, #CTX_FP_Q28]
313 ldp q30, q31, [x0, #CTX_FP_Q30]
314
315 ldr x9, [x0, #CTX_FP_FPSR]
316 msr fpsr, x9
317
Soby Mathewe77e1162015-12-03 09:42:50 +0000318 ldr x10, [x0, #CTX_FP_FPCR]
Achin Gupta9ac63c52014-01-16 12:08:03 +0000319 msr fpcr, x10
320
321 /*
322 * No explict ISB required here as ERET to
Sandrine Bailleuxf4119ec2015-12-17 13:58:58 +0000323 * switch to secure EL1 or non-secure world
Achin Gupta9ac63c52014-01-16 12:08:03 +0000324 * covers it
325 */
326
327 ret
Kévin Petita877c252015-03-24 14:03:57 +0000328endfunc fpregs_context_restore
Juan Castillo258e94f2014-06-25 17:26:36 +0100329#endif /* CTX_INCLUDE_FPREGS */
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100330
331/* -----------------------------------------------------
332 * The following functions are used to save and restore
333 * all the general purpose registers. Ideally we would
334 * only save and restore the callee saved registers when
335 * a world switch occurs but that type of implementation
336 * is more complex. So currently we will always save and
337 * restore these registers on entry and exit of EL3.
338 * These are not macros to ensure their invocation fits
339 * within the 32 instructions per exception vector.
340 * clobbers: x18
341 * -----------------------------------------------------
342 */
343func save_gp_registers
344 stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
345 stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
346 stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
347 stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
348 stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
349 stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
350 stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
351 stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
352 stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
353 stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
354 stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
355 stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
356 stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
357 stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
358 stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
359 mrs x18, sp_el0
360 str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
361 ret
362endfunc save_gp_registers
363
364func restore_gp_registers_eret
365 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
366 ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
367 b restore_gp_registers_callee_eret
368endfunc restore_gp_registers_eret
369
370func restore_gp_registers_callee_eret
371 ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
372 ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
373 ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
374 ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
375 ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
376 ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
377 ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
378 ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
379 ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
380 ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
381 ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
382 ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
dp-armee3457b2017-05-23 09:32:49 +0100383 ldp x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100384 msr sp_el0, x17
385 ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
386 eret
387endfunc restore_gp_registers_callee_eret
388
389 /* -----------------------------------------------------
390 * This routine assumes that the SP_EL3 is pointing to
391 * a valid context structure from where the gp regs and
392 * other special registers can be retrieved.
393 * -----------------------------------------------------
394 */
395func el3_exit
396 /* -----------------------------------------------------
397 * Save the current SP_EL0 i.e. the EL3 runtime stack
398 * which will be used for handling the next SMC. Then
399 * switch to SP_EL3
400 * -----------------------------------------------------
401 */
402 mov x17, sp
403 msr spsel, #1
404 str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
405
406 /* -----------------------------------------------------
407 * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
408 * -----------------------------------------------------
409 */
410 ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
411 ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
412 msr scr_el3, x18
413 msr spsr_el3, x16
414 msr elr_el3, x17
415
416 /* Restore saved general purpose registers and return */
417 b restore_gp_registers_eret
418endfunc el3_exit