blob: 48f6981bb2190d6ba9a4df230a77ebb0e38ebf01 [file] [log] [blame]
Achin Gupta7c88f3f2014-02-18 18:09:12 +00001/*
Jeenu Viswambharan0859d2c2018-04-27 16:28:12 +01002 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
Achin Gupta7c88f3f2014-02-18 18:09:12 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta7c88f3f2014-02-18 18:09:12 +00005 */
6
Achin Gupta7c88f3f2014-02-18 18:09:12 +00007#include <arch.h>
Andrew Thoelke38bde412014-03-18 13:46:55 +00008#include <asm_macros.S>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00009#include <bl32/tsp/tsp.h>
10#include <lib/xlat_tables/xlat_tables_defs.h>
11
Dan Handleye2c27f52014-08-01 17:58:27 +010012#include "../tsp_private.h"
Achin Gupta7c88f3f2014-02-18 18:09:12 +000013
14
15 .globl tsp_entrypoint
Andrew Thoelke891c4ca2014-05-20 21:43:27 +010016 .globl tsp_vector_table
Achin Gupta7c88f3f2014-02-18 18:09:12 +000017
Soby Mathew9f71f702014-05-09 20:49:17 +010018
19
Achin Gupta7c88f3f2014-02-18 18:09:12 +000020 /* ---------------------------------------------
21 * Populate the params in x0-x7 from the pointer
22 * to the smc args structure in x0.
23 * ---------------------------------------------
24 */
25 .macro restore_args_call_smc
26 ldp x6, x7, [x0, #TSP_ARG6]
27 ldp x4, x5, [x0, #TSP_ARG4]
28 ldp x2, x3, [x0, #TSP_ARG2]
29 ldp x0, x1, [x0, #TSP_ARG0]
30 smc #0
31 .endm
32
Achin Gupta76717892014-05-09 11:42:56 +010033 .macro save_eret_context reg1 reg2
34 mrs \reg1, elr_el1
35 mrs \reg2, spsr_el1
36 stp \reg1, \reg2, [sp, #-0x10]!
37 stp x30, x18, [sp, #-0x10]!
38 .endm
39
40 .macro restore_eret_context reg1 reg2
41 ldp x30, x18, [sp], #0x10
42 ldp \reg1, \reg2, [sp], #0x10
43 msr elr_el1, \reg1
44 msr spsr_el1, \reg2
45 .endm
46
Julius Wernerb4c75e92017-08-01 15:16:36 -070047func tsp_entrypoint _align=3
Achin Gupta7c88f3f2014-02-18 18:09:12 +000048
49 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +000050 * Set the exception vector to something sane.
51 * ---------------------------------------------
52 */
Achin Guptaa4f50c22014-05-09 12:17:56 +010053 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +000054 msr vbar_el1, x0
Achin Guptaed1744e2014-08-04 23:13:10 +010055 isb
Achin Gupta7c88f3f2014-02-18 18:09:12 +000056
57 /* ---------------------------------------------
Achin Guptaed1744e2014-08-04 23:13:10 +010058 * Enable the SError interrupt now that the
59 * exception vectors have been setup.
60 * ---------------------------------------------
61 */
62 msr daifclr, #DAIF_ABT_BIT
63
64 /* ---------------------------------------------
Achin Gupta9f098352014-07-18 18:38:28 +010065 * Enable the instruction cache, stack pointer
66 * and data access alignment checks
Achin Gupta7c88f3f2014-02-18 18:09:12 +000067 * ---------------------------------------------
68 */
Achin Gupta9f098352014-07-18 18:38:28 +010069 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +000070 mrs x0, sctlr_el1
Achin Gupta9f098352014-07-18 18:38:28 +010071 orr x0, x0, x1
Achin Gupta7c88f3f2014-02-18 18:09:12 +000072 msr sctlr_el1, x0
73 isb
74
75 /* ---------------------------------------------
Achin Guptae9c4a642015-09-11 16:03:13 +010076 * Invalidate the RW memory used by the BL32
77 * image. This includes the data and NOBITS
78 * sections. This is done to safeguard against
79 * possible corruption of this memory by dirty
80 * cache lines in a system cache as a result of
81 * use by an earlier boot loader stage.
82 * ---------------------------------------------
83 */
84 adr x0, __RW_START__
85 adr x1, __RW_END__
86 sub x1, x1, x0
87 bl inv_dcache_range
88
89 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +000090 * Zero out NOBITS sections. There are 2 of them:
91 * - the .bss section;
92 * - the coherent memory section.
93 * ---------------------------------------------
94 */
95 ldr x0, =__BSS_START__
96 ldr x1, =__BSS_SIZE__
Douglas Raillard21362a92016-12-02 13:51:54 +000097 bl zeromem
Achin Gupta7c88f3f2014-02-18 18:09:12 +000098
Soby Mathew2ae20432015-01-08 18:02:44 +000099#if USE_COHERENT_MEM
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000100 ldr x0, =__COHERENT_RAM_START__
101 ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__
Douglas Raillard21362a92016-12-02 13:51:54 +0000102 bl zeromem
Soby Mathew2ae20432015-01-08 18:02:44 +0000103#endif
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000104
105 /* --------------------------------------------
Achin Guptaf4a97092014-06-25 19:26:22 +0100106 * Allocate a stack whose memory will be marked
107 * as Normal-IS-WBWA when the MMU is enabled.
108 * There is no risk of reading stale stack
109 * memory after enabling the MMU as only the
110 * primary cpu is running at the moment.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000111 * --------------------------------------------
112 */
Soby Mathewda43b662015-07-08 21:45:46 +0100113 bl plat_set_my_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000114
115 /* ---------------------------------------------
Douglas Raillard306593d2017-02-24 18:14:15 +0000116 * Initialize the stack protector canary before
117 * any C code is called.
118 * ---------------------------------------------
119 */
120#if STACK_PROTECTOR_ENABLED
121 bl update_stack_protector_canary
122#endif
123
124 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000125 * Perform early platform setup & platform
126 * specific early arch. setup e.g. mmu setup
127 * ---------------------------------------------
128 */
Dan Handley4fd2f5c2014-08-04 11:41:20 +0100129 bl tsp_early_platform_setup
130 bl tsp_plat_arch_setup
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000131
132 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000133 * Jump to main function.
134 * ---------------------------------------------
135 */
136 bl tsp_main
137
138 /* ---------------------------------------------
139 * Tell TSPD that we are done initialising
140 * ---------------------------------------------
141 */
142 mov x1, x0
143 mov x0, #TSP_ENTRY_DONE
144 smc #0
145
146tsp_entrypoint_panic:
147 b tsp_entrypoint_panic
Kévin Petita877c252015-03-24 14:03:57 +0000148endfunc tsp_entrypoint
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000149
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100150
151 /* -------------------------------------------
152 * Table of entrypoint vectors provided to the
153 * TSPD for the various entrypoints
154 * -------------------------------------------
155 */
156func tsp_vector_table
David Cunado28f69ab2017-04-05 11:34:03 +0100157 b tsp_yield_smc_entry
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100158 b tsp_fast_smc_entry
159 b tsp_cpu_on_entry
160 b tsp_cpu_off_entry
161 b tsp_cpu_resume_entry
162 b tsp_cpu_suspend_entry
Soby Mathewbec98512015-09-03 18:29:38 +0100163 b tsp_sel1_intr_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100164 b tsp_system_off_entry
165 b tsp_system_reset_entry
David Cunado28f69ab2017-04-05 11:34:03 +0100166 b tsp_abort_yield_smc_entry
Kévin Petita877c252015-03-24 14:03:57 +0000167endfunc tsp_vector_table
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100168
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000169 /*---------------------------------------------
170 * This entrypoint is used by the TSPD when this
171 * cpu is to be turned off through a CPU_OFF
172 * psci call to ask the TSP to perform any
173 * bookeeping necessary. In the current
174 * implementation, the TSPD expects the TSP to
175 * re-initialise its state so nothing is done
176 * here except for acknowledging the request.
177 * ---------------------------------------------
178 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000179func tsp_cpu_off_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000180 bl tsp_cpu_off_main
181 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000182endfunc tsp_cpu_off_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000183
184 /*---------------------------------------------
Juan Castillo4dc4a472014-08-12 11:17:06 +0100185 * This entrypoint is used by the TSPD when the
186 * system is about to be switched off (through
187 * a SYSTEM_OFF psci call) to ask the TSP to
188 * perform any necessary bookkeeping.
189 * ---------------------------------------------
190 */
191func tsp_system_off_entry
192 bl tsp_system_off_main
193 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000194endfunc tsp_system_off_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100195
196 /*---------------------------------------------
197 * This entrypoint is used by the TSPD when the
198 * system is about to be reset (through a
199 * SYSTEM_RESET psci call) to ask the TSP to
200 * perform any necessary bookkeeping.
201 * ---------------------------------------------
202 */
203func tsp_system_reset_entry
204 bl tsp_system_reset_main
205 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000206endfunc tsp_system_reset_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100207
208 /*---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000209 * This entrypoint is used by the TSPD when this
210 * cpu is turned on using a CPU_ON psci call to
211 * ask the TSP to initialise itself i.e. setup
212 * the mmu, stacks etc. Minimal architectural
213 * state will be initialised by the TSPD when
214 * this function is entered i.e. Caches and MMU
215 * will be turned off, the execution state
216 * will be aarch64 and exceptions masked.
217 * ---------------------------------------------
218 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000219func tsp_cpu_on_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000220 /* ---------------------------------------------
221 * Set the exception vector to something sane.
222 * ---------------------------------------------
223 */
Achin Guptaa4f50c22014-05-09 12:17:56 +0100224 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000225 msr vbar_el1, x0
Achin Guptaed1744e2014-08-04 23:13:10 +0100226 isb
227
228 /* Enable the SError interrupt */
229 msr daifclr, #DAIF_ABT_BIT
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000230
231 /* ---------------------------------------------
Achin Gupta9f098352014-07-18 18:38:28 +0100232 * Enable the instruction cache, stack pointer
233 * and data access alignment checks
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000234 * ---------------------------------------------
235 */
Achin Gupta9f098352014-07-18 18:38:28 +0100236 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000237 mrs x0, sctlr_el1
Achin Gupta9f098352014-07-18 18:38:28 +0100238 orr x0, x0, x1
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000239 msr sctlr_el1, x0
240 isb
241
242 /* --------------------------------------------
Achin Guptae1aa5162014-06-26 09:58:52 +0100243 * Give ourselves a stack whose memory will be
244 * marked as Normal-IS-WBWA when the MMU is
245 * enabled.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000246 * --------------------------------------------
247 */
Soby Mathewda43b662015-07-08 21:45:46 +0100248 bl plat_set_my_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000249
Achin Guptae1aa5162014-06-26 09:58:52 +0100250 /* --------------------------------------------
Jeenu Viswambharan0859d2c2018-04-27 16:28:12 +0100251 * Enable MMU and D-caches together.
Achin Guptae1aa5162014-06-26 09:58:52 +0100252 * --------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000253 */
Jeenu Viswambharan0859d2c2018-04-27 16:28:12 +0100254 mov x0, #0
Dan Handleyb226a4d2014-05-16 14:08:45 +0100255 bl bl32_plat_enable_mmu
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000256
257 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000258 * Enter C runtime to perform any remaining
259 * book keeping
260 * ---------------------------------------------
261 */
262 bl tsp_cpu_on_main
263 restore_args_call_smc
264
265 /* Should never reach here */
266tsp_cpu_on_entry_panic:
267 b tsp_cpu_on_entry_panic
Kévin Petita877c252015-03-24 14:03:57 +0000268endfunc tsp_cpu_on_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000269
270 /*---------------------------------------------
271 * This entrypoint is used by the TSPD when this
272 * cpu is to be suspended through a CPU_SUSPEND
273 * psci call to ask the TSP to perform any
274 * bookeeping necessary. In the current
275 * implementation, the TSPD saves and restores
276 * the EL1 state.
277 * ---------------------------------------------
278 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000279func tsp_cpu_suspend_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000280 bl tsp_cpu_suspend_main
281 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000282endfunc tsp_cpu_suspend_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000283
Soby Mathewbec98512015-09-03 18:29:38 +0100284 /*-------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100285 * This entrypoint is used by the TSPD to pass
Soby Mathew78664242015-11-13 02:08:43 +0000286 * control for `synchronously` handling a S-EL1
287 * Interrupt which was triggered while executing
288 * in normal world. 'x0' contains a magic number
289 * which indicates this. TSPD expects control to
290 * be handed back at the end of interrupt
291 * processing. This is done through an SMC.
292 * The handover agreement is:
Achin Gupta76717892014-05-09 11:42:56 +0100293 *
294 * 1. PSTATE.DAIF are set upon entry. 'x1' has
295 * the ELR_EL3 from the non-secure state.
296 * 2. TSP has to preserve the callee saved
297 * general purpose registers, SP_EL1/EL0 and
298 * LR.
299 * 3. TSP has to preserve the system and vfp
300 * registers (if applicable).
301 * 4. TSP can use 'x0-x18' to enable its C
302 * runtime.
303 * 5. TSP returns to TSPD using an SMC with
Soby Mathewbec98512015-09-03 18:29:38 +0100304 * 'x0' = TSP_HANDLED_S_EL1_INTR
305 * ------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100306 */
Soby Mathewbec98512015-09-03 18:29:38 +0100307func tsp_sel1_intr_entry
Achin Gupta76717892014-05-09 11:42:56 +0100308#if DEBUG
Soby Mathew78664242015-11-13 02:08:43 +0000309 mov_imm x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
Achin Gupta76717892014-05-09 11:42:56 +0100310 cmp x0, x2
Soby Mathewbec98512015-09-03 18:29:38 +0100311 b.ne tsp_sel1_int_entry_panic
Achin Gupta76717892014-05-09 11:42:56 +0100312#endif
Soby Mathewbec98512015-09-03 18:29:38 +0100313 /*-------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100314 * Save any previous context needed to perform
315 * an exception return from S-EL1 e.g. context
Soby Mathewbec98512015-09-03 18:29:38 +0100316 * from a previous Non secure Interrupt.
317 * Update statistics and handle the S-EL1
318 * interrupt before returning to the TSPD.
Achin Gupta76717892014-05-09 11:42:56 +0100319 * IRQ/FIQs are not enabled since that will
320 * complicate the implementation. Execution
321 * will be transferred back to the normal world
Soby Mathew78664242015-11-13 02:08:43 +0000322 * in any case. The handler can return 0
323 * if the interrupt was handled or TSP_PREEMPTED
324 * if the expected interrupt was preempted
325 * by an interrupt that should be handled in EL3
326 * e.g. Group 0 interrupt in GICv3. In both
327 * the cases switch to EL3 using SMC with id
328 * TSP_HANDLED_S_EL1_INTR. Any other return value
329 * from the handler will result in panic.
Soby Mathewbec98512015-09-03 18:29:38 +0100330 * ------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100331 */
332 save_eret_context x2 x3
Soby Mathewbec98512015-09-03 18:29:38 +0100333 bl tsp_update_sync_sel1_intr_stats
334 bl tsp_common_int_handler
Soby Mathew78664242015-11-13 02:08:43 +0000335 /* Check if the S-EL1 interrupt has been handled */
336 cbnz x0, tsp_sel1_intr_check_preemption
337 b tsp_sel1_intr_return
338tsp_sel1_intr_check_preemption:
339 /* Check if the S-EL1 interrupt has been preempted */
340 mov_imm x1, TSP_PREEMPTED
341 cmp x0, x1
342 b.ne tsp_sel1_int_entry_panic
343tsp_sel1_intr_return:
344 mov_imm x0, TSP_HANDLED_S_EL1_INTR
Achin Gupta76717892014-05-09 11:42:56 +0100345 restore_eret_context x2 x3
Achin Gupta76717892014-05-09 11:42:56 +0100346 smc #0
347
Soby Mathew78664242015-11-13 02:08:43 +0000348 /* Should never reach here */
Soby Mathewbec98512015-09-03 18:29:38 +0100349tsp_sel1_int_entry_panic:
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000350 no_ret plat_panic_handler
Soby Mathewbec98512015-09-03 18:29:38 +0100351endfunc tsp_sel1_intr_entry
Achin Gupta76717892014-05-09 11:42:56 +0100352
353 /*---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000354 * This entrypoint is used by the TSPD when this
355 * cpu resumes execution after an earlier
356 * CPU_SUSPEND psci call to ask the TSP to
357 * restore its saved context. In the current
358 * implementation, the TSPD saves and restores
359 * EL1 state so nothing is done here apart from
360 * acknowledging the request.
361 * ---------------------------------------------
362 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000363func tsp_cpu_resume_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000364 bl tsp_cpu_resume_main
365 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000366
367 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000368 no_ret plat_panic_handler
Kévin Petita877c252015-03-24 14:03:57 +0000369endfunc tsp_cpu_resume_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000370
371 /*---------------------------------------------
372 * This entrypoint is used by the TSPD to ask
373 * the TSP to service a fast smc request.
374 * ---------------------------------------------
375 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000376func tsp_fast_smc_entry
Soby Mathew9f71f702014-05-09 20:49:17 +0100377 bl tsp_smc_handler
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000378 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000379
380 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000381 no_ret plat_panic_handler
Kévin Petita877c252015-03-24 14:03:57 +0000382endfunc tsp_fast_smc_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000383
Soby Mathew9f71f702014-05-09 20:49:17 +0100384 /*---------------------------------------------
385 * This entrypoint is used by the TSPD to ask
David Cunado28f69ab2017-04-05 11:34:03 +0100386 * the TSP to service a Yielding SMC request.
Soby Mathew9f71f702014-05-09 20:49:17 +0100387 * We will enable preemption during execution
388 * of tsp_smc_handler.
389 * ---------------------------------------------
390 */
David Cunado28f69ab2017-04-05 11:34:03 +0100391func tsp_yield_smc_entry
Soby Mathew9f71f702014-05-09 20:49:17 +0100392 msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
393 bl tsp_smc_handler
394 msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
395 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000396
397 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000398 no_ret plat_panic_handler
David Cunado28f69ab2017-04-05 11:34:03 +0100399endfunc tsp_yield_smc_entry
Douglas Raillardf2129652016-11-24 15:43:19 +0000400
401 /*---------------------------------------------------------------------
David Cunado28f69ab2017-04-05 11:34:03 +0100402 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
Douglas Raillardf2129652016-11-24 15:43:19 +0000403 * SMC. It could be on behalf of non-secure world or because a CPU
404 * suspend/CPU off request needs to abort the preempted SMC.
405 * --------------------------------------------------------------------
406 */
David Cunado28f69ab2017-04-05 11:34:03 +0100407func tsp_abort_yield_smc_entry
Douglas Raillardf2129652016-11-24 15:43:19 +0000408
409 /*
410 * Exceptions masking is already done by the TSPD when entering this
411 * hook so there is no need to do it here.
412 */
413
414 /* Reset the stack used by the pre-empted SMC */
415 bl plat_set_my_stack
416
417 /*
418 * Allow some cleanup such as releasing locks.
419 */
420 bl tsp_abort_smc_handler
421
422 restore_args_call_smc
423
424 /* Should never reach here */
425 bl plat_panic_handler
David Cunado28f69ab2017-04-05 11:34:03 +0100426endfunc tsp_abort_yield_smc_entry