blob: 7982e50ba70edf8d13466d4659390452329c515f [file] [log] [blame]
Achin Gupta9ac63c52014-01-16 12:08:03 +00001/*
Soby Mathew0d786072016-03-24 16:56:29 +00002 * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
Achin Gupta9ac63c52014-01-16 12:08:03 +00003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley2bd4ef22014-04-09 13:14:54 +010031#include <arch.h>
Andrew Thoelke38bde412014-03-18 13:46:55 +000032#include <asm_macros.S>
Dan Handley2bd4ef22014-04-09 13:14:54 +010033#include <context.h>
Achin Gupta9ac63c52014-01-16 12:08:03 +000034
Yatharth Kochar6c0566c2015-10-02 17:56:48 +010035 .global el1_sysregs_context_save
36 .global el1_sysregs_context_restore
37#if CTX_INCLUDE_FPREGS
38 .global fpregs_context_save
39 .global fpregs_context_restore
40#endif
41 .global save_gp_registers
42 .global restore_gp_registers_eret
43 .global restore_gp_registers_callee_eret
44 .global el3_exit
45
Achin Gupta9ac63c52014-01-16 12:08:03 +000046/* -----------------------------------------------------
47 * The following function strictly follows the AArch64
48 * PCS to use x9-x17 (temporary caller-saved registers)
Achin Gupta9ac63c52014-01-16 12:08:03 +000049 * to save EL1 system register context. It assumes that
50 * 'x0' is pointing to a 'el1_sys_regs' structure where
51 * the register context will be saved.
52 * -----------------------------------------------------
53 */
Andrew Thoelke38bde412014-03-18 13:46:55 +000054func el1_sysregs_context_save
Achin Gupta9ac63c52014-01-16 12:08:03 +000055
56 mrs x9, spsr_el1
57 mrs x10, elr_el1
58 stp x9, x10, [x0, #CTX_SPSR_EL1]
59
Achin Gupta9ac63c52014-01-16 12:08:03 +000060 mrs x15, sctlr_el1
61 mrs x16, actlr_el1
62 stp x15, x16, [x0, #CTX_SCTLR_EL1]
63
64 mrs x17, cpacr_el1
65 mrs x9, csselr_el1
66 stp x17, x9, [x0, #CTX_CPACR_EL1]
67
68 mrs x10, sp_el1
69 mrs x11, esr_el1
70 stp x10, x11, [x0, #CTX_SP_EL1]
71
72 mrs x12, ttbr0_el1
73 mrs x13, ttbr1_el1
74 stp x12, x13, [x0, #CTX_TTBR0_EL1]
75
76 mrs x14, mair_el1
77 mrs x15, amair_el1
78 stp x14, x15, [x0, #CTX_MAIR_EL1]
79
80 mrs x16, tcr_el1
81 mrs x17, tpidr_el1
82 stp x16, x17, [x0, #CTX_TCR_EL1]
83
84 mrs x9, tpidr_el0
85 mrs x10, tpidrro_el0
86 stp x9, x10, [x0, #CTX_TPIDR_EL0]
87
Achin Gupta9ac63c52014-01-16 12:08:03 +000088 mrs x13, par_el1
89 mrs x14, far_el1
90 stp x13, x14, [x0, #CTX_PAR_EL1]
91
92 mrs x15, afsr0_el1
93 mrs x16, afsr1_el1
94 stp x15, x16, [x0, #CTX_AFSR0_EL1]
95
96 mrs x17, contextidr_el1
97 mrs x9, vbar_el1
98 stp x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
99
Soby Mathewd75d2ba2016-05-17 14:01:32 +0100100 /* Save AArch32 system registers if the build has instructed so */
101#if CTX_INCLUDE_AARCH32_REGS
102 mrs x11, spsr_abt
103 mrs x12, spsr_und
104 stp x11, x12, [x0, #CTX_SPSR_ABT]
105
106 mrs x13, spsr_irq
107 mrs x14, spsr_fiq
108 stp x13, x14, [x0, #CTX_SPSR_IRQ]
109
110 mrs x15, dacr32_el2
111 mrs x16, ifsr32_el2
112 stp x15, x16, [x0, #CTX_DACR32_EL2]
113
114 mrs x17, fpexc32_el2
115 str x17, [x0, #CTX_FP_FPEXC32_EL2]
116#endif
117
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100118 /* Save NS timer registers if the build has instructed so */
119#if NS_TIMER_SWITCH
Achin Gupta9ac63c52014-01-16 12:08:03 +0000120 mrs x10, cntp_ctl_el0
121 mrs x11, cntp_cval_el0
122 stp x10, x11, [x0, #CTX_CNTP_CTL_EL0]
123
124 mrs x12, cntv_ctl_el0
125 mrs x13, cntv_cval_el0
126 stp x12, x13, [x0, #CTX_CNTV_CTL_EL0]
127
128 mrs x14, cntkctl_el1
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100129 str x14, [x0, #CTX_CNTKCTL_EL1]
130#endif
131
Achin Gupta9ac63c52014-01-16 12:08:03 +0000132 ret
Kévin Petita877c252015-03-24 14:03:57 +0000133endfunc el1_sysregs_context_save
Achin Gupta9ac63c52014-01-16 12:08:03 +0000134
135/* -----------------------------------------------------
136 * The following function strictly follows the AArch64
137 * PCS to use x9-x17 (temporary caller-saved registers)
138 * to restore EL1 system register context. It assumes
139 * that 'x0' is pointing to a 'el1_sys_regs' structure
140 * from where the register context will be restored
141 * -----------------------------------------------------
142 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000143func el1_sysregs_context_restore
Achin Gupta9ac63c52014-01-16 12:08:03 +0000144
145 ldp x9, x10, [x0, #CTX_SPSR_EL1]
146 msr spsr_el1, x9
147 msr elr_el1, x10
148
Achin Gupta9ac63c52014-01-16 12:08:03 +0000149 ldp x15, x16, [x0, #CTX_SCTLR_EL1]
150 msr sctlr_el1, x15
151 msr actlr_el1, x16
152
153 ldp x17, x9, [x0, #CTX_CPACR_EL1]
154 msr cpacr_el1, x17
155 msr csselr_el1, x9
156
157 ldp x10, x11, [x0, #CTX_SP_EL1]
158 msr sp_el1, x10
159 msr esr_el1, x11
160
161 ldp x12, x13, [x0, #CTX_TTBR0_EL1]
162 msr ttbr0_el1, x12
163 msr ttbr1_el1, x13
164
165 ldp x14, x15, [x0, #CTX_MAIR_EL1]
166 msr mair_el1, x14
167 msr amair_el1, x15
168
169 ldp x16, x17, [x0, #CTX_TCR_EL1]
170 msr tcr_el1, x16
171 msr tpidr_el1, x17
172
173 ldp x9, x10, [x0, #CTX_TPIDR_EL0]
174 msr tpidr_el0, x9
175 msr tpidrro_el0, x10
176
Achin Gupta9ac63c52014-01-16 12:08:03 +0000177 ldp x13, x14, [x0, #CTX_PAR_EL1]
178 msr par_el1, x13
179 msr far_el1, x14
180
181 ldp x15, x16, [x0, #CTX_AFSR0_EL1]
182 msr afsr0_el1, x15
183 msr afsr1_el1, x16
184
185 ldp x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
186 msr contextidr_el1, x17
187 msr vbar_el1, x9
188
Soby Mathewd75d2ba2016-05-17 14:01:32 +0100189 /* Restore AArch32 system registers if the build has instructed so */
190#if CTX_INCLUDE_AARCH32_REGS
191 ldp x11, x12, [x0, #CTX_SPSR_ABT]
192 msr spsr_abt, x11
193 msr spsr_und, x12
194
195 ldp x13, x14, [x0, #CTX_SPSR_IRQ]
196 msr spsr_irq, x13
197 msr spsr_fiq, x14
198
199 ldp x15, x16, [x0, #CTX_DACR32_EL2]
200 msr dacr32_el2, x15
201 msr ifsr32_el2, x16
202
203 ldr x17, [x0, #CTX_FP_FPEXC32_EL2]
204 msr fpexc32_el2, x17
205#endif
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100206 /* Restore NS timer registers if the build has instructed so */
207#if NS_TIMER_SWITCH
Achin Gupta9ac63c52014-01-16 12:08:03 +0000208 ldp x10, x11, [x0, #CTX_CNTP_CTL_EL0]
209 msr cntp_ctl_el0, x10
210 msr cntp_cval_el0, x11
211
212 ldp x12, x13, [x0, #CTX_CNTV_CTL_EL0]
213 msr cntv_ctl_el0, x12
214 msr cntv_cval_el0, x13
215
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100216 ldr x14, [x0, #CTX_CNTKCTL_EL1]
Achin Gupta9ac63c52014-01-16 12:08:03 +0000217 msr cntkctl_el1, x14
Jeenu Viswambharand1b60152014-05-12 15:28:47 +0100218#endif
219
Achin Gupta9ac63c52014-01-16 12:08:03 +0000220 /* No explict ISB required here as ERET covers it */
Achin Gupta9ac63c52014-01-16 12:08:03 +0000221 ret
Kévin Petita877c252015-03-24 14:03:57 +0000222endfunc el1_sysregs_context_restore
Achin Gupta9ac63c52014-01-16 12:08:03 +0000223
224/* -----------------------------------------------------
Sandrine Bailleux046cd3f2014-08-06 11:27:23 +0100225 * The following function follows the aapcs_64 strictly
Achin Gupta9ac63c52014-01-16 12:08:03 +0000226 * to use x9-x17 (temporary caller-saved registers
227 * according to AArch64 PCS) to save floating point
228 * register context. It assumes that 'x0' is pointing to
229 * a 'fp_regs' structure where the register context will
230 * be saved.
231 *
232 * Access to VFP registers will trap if CPTR_EL3.TFP is
233 * set. However currently we don't use VFP registers
234 * nor set traps in Trusted Firmware, and assume it's
235 * cleared
236 *
237 * TODO: Revisit when VFP is used in secure world
238 * -----------------------------------------------------
239 */
Juan Castillo258e94f2014-06-25 17:26:36 +0100240#if CTX_INCLUDE_FPREGS
Andrew Thoelke38bde412014-03-18 13:46:55 +0000241func fpregs_context_save
Achin Gupta9ac63c52014-01-16 12:08:03 +0000242 stp q0, q1, [x0, #CTX_FP_Q0]
243 stp q2, q3, [x0, #CTX_FP_Q2]
244 stp q4, q5, [x0, #CTX_FP_Q4]
245 stp q6, q7, [x0, #CTX_FP_Q6]
246 stp q8, q9, [x0, #CTX_FP_Q8]
247 stp q10, q11, [x0, #CTX_FP_Q10]
248 stp q12, q13, [x0, #CTX_FP_Q12]
249 stp q14, q15, [x0, #CTX_FP_Q14]
250 stp q16, q17, [x0, #CTX_FP_Q16]
251 stp q18, q19, [x0, #CTX_FP_Q18]
252 stp q20, q21, [x0, #CTX_FP_Q20]
253 stp q22, q23, [x0, #CTX_FP_Q22]
254 stp q24, q25, [x0, #CTX_FP_Q24]
255 stp q26, q27, [x0, #CTX_FP_Q26]
256 stp q28, q29, [x0, #CTX_FP_Q28]
257 stp q30, q31, [x0, #CTX_FP_Q30]
258
259 mrs x9, fpsr
260 str x9, [x0, #CTX_FP_FPSR]
261
262 mrs x10, fpcr
263 str x10, [x0, #CTX_FP_FPCR]
264
265 ret
Kévin Petita877c252015-03-24 14:03:57 +0000266endfunc fpregs_context_save
Achin Gupta9ac63c52014-01-16 12:08:03 +0000267
268/* -----------------------------------------------------
269 * The following function follows the aapcs_64 strictly
270 * to use x9-x17 (temporary caller-saved registers
271 * according to AArch64 PCS) to restore floating point
272 * register context. It assumes that 'x0' is pointing to
273 * a 'fp_regs' structure from where the register context
274 * will be restored.
275 *
276 * Access to VFP registers will trap if CPTR_EL3.TFP is
277 * set. However currently we don't use VFP registers
278 * nor set traps in Trusted Firmware, and assume it's
279 * cleared
280 *
281 * TODO: Revisit when VFP is used in secure world
282 * -----------------------------------------------------
283 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000284func fpregs_context_restore
Achin Gupta9ac63c52014-01-16 12:08:03 +0000285 ldp q0, q1, [x0, #CTX_FP_Q0]
286 ldp q2, q3, [x0, #CTX_FP_Q2]
287 ldp q4, q5, [x0, #CTX_FP_Q4]
288 ldp q6, q7, [x0, #CTX_FP_Q6]
289 ldp q8, q9, [x0, #CTX_FP_Q8]
290 ldp q10, q11, [x0, #CTX_FP_Q10]
291 ldp q12, q13, [x0, #CTX_FP_Q12]
292 ldp q14, q15, [x0, #CTX_FP_Q14]
293 ldp q16, q17, [x0, #CTX_FP_Q16]
294 ldp q18, q19, [x0, #CTX_FP_Q18]
295 ldp q20, q21, [x0, #CTX_FP_Q20]
296 ldp q22, q23, [x0, #CTX_FP_Q22]
297 ldp q24, q25, [x0, #CTX_FP_Q24]
298 ldp q26, q27, [x0, #CTX_FP_Q26]
299 ldp q28, q29, [x0, #CTX_FP_Q28]
300 ldp q30, q31, [x0, #CTX_FP_Q30]
301
302 ldr x9, [x0, #CTX_FP_FPSR]
303 msr fpsr, x9
304
Soby Mathewe77e1162015-12-03 09:42:50 +0000305 ldr x10, [x0, #CTX_FP_FPCR]
Achin Gupta9ac63c52014-01-16 12:08:03 +0000306 msr fpcr, x10
307
308 /*
309 * No explict ISB required here as ERET to
Sandrine Bailleuxf4119ec2015-12-17 13:58:58 +0000310 * switch to secure EL1 or non-secure world
Achin Gupta9ac63c52014-01-16 12:08:03 +0000311 * covers it
312 */
313
314 ret
Kévin Petita877c252015-03-24 14:03:57 +0000315endfunc fpregs_context_restore
Juan Castillo258e94f2014-06-25 17:26:36 +0100316#endif /* CTX_INCLUDE_FPREGS */
Yatharth Kochar6c0566c2015-10-02 17:56:48 +0100317
318/* -----------------------------------------------------
319 * The following functions are used to save and restore
320 * all the general purpose registers. Ideally we would
321 * only save and restore the callee saved registers when
322 * a world switch occurs but that type of implementation
323 * is more complex. So currently we will always save and
324 * restore these registers on entry and exit of EL3.
325 * These are not macros to ensure their invocation fits
326 * within the 32 instructions per exception vector.
327 * clobbers: x18
328 * -----------------------------------------------------
329 */
330func save_gp_registers
331 stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
332 stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
333 stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
334 stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
335 stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
336 stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
337 stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
338 stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
339 stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
340 stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
341 stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
342 stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
343 stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
344 stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
345 stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
346 mrs x18, sp_el0
347 str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
348 ret
349endfunc save_gp_registers
350
351func restore_gp_registers_eret
352 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
353 ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
354 b restore_gp_registers_callee_eret
355endfunc restore_gp_registers_eret
356
357func restore_gp_registers_callee_eret
358 ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
359 ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
360 ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
361 ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
362 ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
363 ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
364 ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
365 ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
366 ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
367 ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
368 ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
369 ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
370 ldp x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
371 msr sp_el0, x17
372 ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
373 eret
374endfunc restore_gp_registers_callee_eret
375
376 /* -----------------------------------------------------
377 * This routine assumes that the SP_EL3 is pointing to
378 * a valid context structure from where the gp regs and
379 * other special registers can be retrieved.
380 * -----------------------------------------------------
381 */
382func el3_exit
383 /* -----------------------------------------------------
384 * Save the current SP_EL0 i.e. the EL3 runtime stack
385 * which will be used for handling the next SMC. Then
386 * switch to SP_EL3
387 * -----------------------------------------------------
388 */
389 mov x17, sp
390 msr spsel, #1
391 str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
392
393 /* -----------------------------------------------------
394 * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
395 * -----------------------------------------------------
396 */
397 ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
398 ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
399 msr scr_el3, x18
400 msr spsr_el3, x16
401 msr elr_el3, x17
402
403 /* Restore saved general purpose registers and return */
404 b restore_gp_registers_eret
405endfunc el3_exit