blob: 5d9da857882ac527d19faa9387f1c13fcff61403 [file] [log] [blame]
Achin Gupta7c88f3f2014-02-18 18:09:12 +00001/*
Jeenu Viswambharan0859d2c2018-04-27 16:28:12 +01002 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
Achin Gupta7c88f3f2014-02-18 18:09:12 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta7c88f3f2014-02-18 18:09:12 +00005 */
6
Achin Gupta7c88f3f2014-02-18 18:09:12 +00007#include <arch.h>
Andrew Thoelke38bde412014-03-18 13:46:55 +00008#include <asm_macros.S>
Dan Handley2bd4ef22014-04-09 13:14:54 +01009#include <tsp.h>
Antonio Nino Diaz4ef91f12017-02-20 14:22:22 +000010#include <xlat_tables_defs.h>
Dan Handleye2c27f52014-08-01 17:58:27 +010011#include "../tsp_private.h"
Achin Gupta7c88f3f2014-02-18 18:09:12 +000012
13
14 .globl tsp_entrypoint
Andrew Thoelke891c4ca2014-05-20 21:43:27 +010015 .globl tsp_vector_table
Achin Gupta7c88f3f2014-02-18 18:09:12 +000016
Soby Mathew9f71f702014-05-09 20:49:17 +010017
18
Achin Gupta7c88f3f2014-02-18 18:09:12 +000019 /* ---------------------------------------------
20 * Populate the params in x0-x7 from the pointer
21 * to the smc args structure in x0.
22 * ---------------------------------------------
23 */
24 .macro restore_args_call_smc
25 ldp x6, x7, [x0, #TSP_ARG6]
26 ldp x4, x5, [x0, #TSP_ARG4]
27 ldp x2, x3, [x0, #TSP_ARG2]
28 ldp x0, x1, [x0, #TSP_ARG0]
29 smc #0
30 .endm
31
Achin Gupta76717892014-05-09 11:42:56 +010032 .macro save_eret_context reg1 reg2
33 mrs \reg1, elr_el1
34 mrs \reg2, spsr_el1
35 stp \reg1, \reg2, [sp, #-0x10]!
36 stp x30, x18, [sp, #-0x10]!
37 .endm
38
39 .macro restore_eret_context reg1 reg2
40 ldp x30, x18, [sp], #0x10
41 ldp \reg1, \reg2, [sp], #0x10
42 msr elr_el1, \reg1
43 msr spsr_el1, \reg2
44 .endm
45
Julius Wernerb4c75e92017-08-01 15:16:36 -070046func tsp_entrypoint _align=3
Achin Gupta7c88f3f2014-02-18 18:09:12 +000047
48 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +000049 * Set the exception vector to something sane.
50 * ---------------------------------------------
51 */
Achin Guptaa4f50c22014-05-09 12:17:56 +010052 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +000053 msr vbar_el1, x0
Achin Guptaed1744e2014-08-04 23:13:10 +010054 isb
Achin Gupta7c88f3f2014-02-18 18:09:12 +000055
56 /* ---------------------------------------------
Achin Guptaed1744e2014-08-04 23:13:10 +010057 * Enable the SError interrupt now that the
58 * exception vectors have been setup.
59 * ---------------------------------------------
60 */
61 msr daifclr, #DAIF_ABT_BIT
62
63 /* ---------------------------------------------
Achin Gupta9f098352014-07-18 18:38:28 +010064 * Enable the instruction cache, stack pointer
65 * and data access alignment checks
Achin Gupta7c88f3f2014-02-18 18:09:12 +000066 * ---------------------------------------------
67 */
Achin Gupta9f098352014-07-18 18:38:28 +010068 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +000069 mrs x0, sctlr_el1
Achin Gupta9f098352014-07-18 18:38:28 +010070 orr x0, x0, x1
Achin Gupta7c88f3f2014-02-18 18:09:12 +000071 msr sctlr_el1, x0
72 isb
73
74 /* ---------------------------------------------
Achin Guptae9c4a642015-09-11 16:03:13 +010075 * Invalidate the RW memory used by the BL32
76 * image. This includes the data and NOBITS
77 * sections. This is done to safeguard against
78 * possible corruption of this memory by dirty
79 * cache lines in a system cache as a result of
80 * use by an earlier boot loader stage.
81 * ---------------------------------------------
82 */
83 adr x0, __RW_START__
84 adr x1, __RW_END__
85 sub x1, x1, x0
86 bl inv_dcache_range
87
88 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +000089 * Zero out NOBITS sections. There are 2 of them:
90 * - the .bss section;
91 * - the coherent memory section.
92 * ---------------------------------------------
93 */
94 ldr x0, =__BSS_START__
95 ldr x1, =__BSS_SIZE__
Douglas Raillard21362a92016-12-02 13:51:54 +000096 bl zeromem
Achin Gupta7c88f3f2014-02-18 18:09:12 +000097
Soby Mathew2ae20432015-01-08 18:02:44 +000098#if USE_COHERENT_MEM
Achin Gupta7c88f3f2014-02-18 18:09:12 +000099 ldr x0, =__COHERENT_RAM_START__
100 ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__
Douglas Raillard21362a92016-12-02 13:51:54 +0000101 bl zeromem
Soby Mathew2ae20432015-01-08 18:02:44 +0000102#endif
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000103
104 /* --------------------------------------------
Achin Guptaf4a97092014-06-25 19:26:22 +0100105 * Allocate a stack whose memory will be marked
106 * as Normal-IS-WBWA when the MMU is enabled.
107 * There is no risk of reading stale stack
108 * memory after enabling the MMU as only the
109 * primary cpu is running at the moment.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000110 * --------------------------------------------
111 */
Soby Mathewda43b662015-07-08 21:45:46 +0100112 bl plat_set_my_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000113
114 /* ---------------------------------------------
Douglas Raillard306593d2017-02-24 18:14:15 +0000115 * Initialize the stack protector canary before
116 * any C code is called.
117 * ---------------------------------------------
118 */
119#if STACK_PROTECTOR_ENABLED
120 bl update_stack_protector_canary
121#endif
122
123 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000124 * Perform early platform setup & platform
125 * specific early arch. setup e.g. mmu setup
126 * ---------------------------------------------
127 */
Dan Handley4fd2f5c2014-08-04 11:41:20 +0100128 bl tsp_early_platform_setup
129 bl tsp_plat_arch_setup
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000130
131 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000132 * Jump to main function.
133 * ---------------------------------------------
134 */
135 bl tsp_main
136
137 /* ---------------------------------------------
138 * Tell TSPD that we are done initialising
139 * ---------------------------------------------
140 */
141 mov x1, x0
142 mov x0, #TSP_ENTRY_DONE
143 smc #0
144
145tsp_entrypoint_panic:
146 b tsp_entrypoint_panic
Kévin Petita877c252015-03-24 14:03:57 +0000147endfunc tsp_entrypoint
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000148
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100149
150 /* -------------------------------------------
151 * Table of entrypoint vectors provided to the
152 * TSPD for the various entrypoints
153 * -------------------------------------------
154 */
155func tsp_vector_table
David Cunado28f69ab2017-04-05 11:34:03 +0100156 b tsp_yield_smc_entry
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100157 b tsp_fast_smc_entry
158 b tsp_cpu_on_entry
159 b tsp_cpu_off_entry
160 b tsp_cpu_resume_entry
161 b tsp_cpu_suspend_entry
Soby Mathewbec98512015-09-03 18:29:38 +0100162 b tsp_sel1_intr_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100163 b tsp_system_off_entry
164 b tsp_system_reset_entry
David Cunado28f69ab2017-04-05 11:34:03 +0100165 b tsp_abort_yield_smc_entry
Kévin Petita877c252015-03-24 14:03:57 +0000166endfunc tsp_vector_table
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100167
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000168 /*---------------------------------------------
169 * This entrypoint is used by the TSPD when this
170 * cpu is to be turned off through a CPU_OFF
171 * psci call to ask the TSP to perform any
172 * bookeeping necessary. In the current
173 * implementation, the TSPD expects the TSP to
174 * re-initialise its state so nothing is done
175 * here except for acknowledging the request.
176 * ---------------------------------------------
177 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000178func tsp_cpu_off_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000179 bl tsp_cpu_off_main
180 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000181endfunc tsp_cpu_off_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000182
183 /*---------------------------------------------
Juan Castillo4dc4a472014-08-12 11:17:06 +0100184 * This entrypoint is used by the TSPD when the
185 * system is about to be switched off (through
186 * a SYSTEM_OFF psci call) to ask the TSP to
187 * perform any necessary bookkeeping.
188 * ---------------------------------------------
189 */
190func tsp_system_off_entry
191 bl tsp_system_off_main
192 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000193endfunc tsp_system_off_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100194
195 /*---------------------------------------------
196 * This entrypoint is used by the TSPD when the
197 * system is about to be reset (through a
198 * SYSTEM_RESET psci call) to ask the TSP to
199 * perform any necessary bookkeeping.
200 * ---------------------------------------------
201 */
202func tsp_system_reset_entry
203 bl tsp_system_reset_main
204 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000205endfunc tsp_system_reset_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100206
207 /*---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000208 * This entrypoint is used by the TSPD when this
209 * cpu is turned on using a CPU_ON psci call to
210 * ask the TSP to initialise itself i.e. setup
211 * the mmu, stacks etc. Minimal architectural
212 * state will be initialised by the TSPD when
213 * this function is entered i.e. Caches and MMU
214 * will be turned off, the execution state
215 * will be aarch64 and exceptions masked.
216 * ---------------------------------------------
217 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000218func tsp_cpu_on_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000219 /* ---------------------------------------------
220 * Set the exception vector to something sane.
221 * ---------------------------------------------
222 */
Achin Guptaa4f50c22014-05-09 12:17:56 +0100223 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000224 msr vbar_el1, x0
Achin Guptaed1744e2014-08-04 23:13:10 +0100225 isb
226
227 /* Enable the SError interrupt */
228 msr daifclr, #DAIF_ABT_BIT
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000229
230 /* ---------------------------------------------
Achin Gupta9f098352014-07-18 18:38:28 +0100231 * Enable the instruction cache, stack pointer
232 * and data access alignment checks
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000233 * ---------------------------------------------
234 */
Achin Gupta9f098352014-07-18 18:38:28 +0100235 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000236 mrs x0, sctlr_el1
Achin Gupta9f098352014-07-18 18:38:28 +0100237 orr x0, x0, x1
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000238 msr sctlr_el1, x0
239 isb
240
241 /* --------------------------------------------
Achin Guptae1aa5162014-06-26 09:58:52 +0100242 * Give ourselves a stack whose memory will be
243 * marked as Normal-IS-WBWA when the MMU is
244 * enabled.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000245 * --------------------------------------------
246 */
Soby Mathewda43b662015-07-08 21:45:46 +0100247 bl plat_set_my_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000248
Achin Guptae1aa5162014-06-26 09:58:52 +0100249 /* --------------------------------------------
Jeenu Viswambharan0859d2c2018-04-27 16:28:12 +0100250 * Enable MMU and D-caches together.
Achin Guptae1aa5162014-06-26 09:58:52 +0100251 * --------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000252 */
Jeenu Viswambharan0859d2c2018-04-27 16:28:12 +0100253 mov x0, #0
Dan Handleyb226a4d2014-05-16 14:08:45 +0100254 bl bl32_plat_enable_mmu
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000255
256 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000257 * Enter C runtime to perform any remaining
258 * book keeping
259 * ---------------------------------------------
260 */
261 bl tsp_cpu_on_main
262 restore_args_call_smc
263
264 /* Should never reach here */
265tsp_cpu_on_entry_panic:
266 b tsp_cpu_on_entry_panic
Kévin Petita877c252015-03-24 14:03:57 +0000267endfunc tsp_cpu_on_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000268
269 /*---------------------------------------------
270 * This entrypoint is used by the TSPD when this
271 * cpu is to be suspended through a CPU_SUSPEND
272 * psci call to ask the TSP to perform any
273 * bookeeping necessary. In the current
274 * implementation, the TSPD saves and restores
275 * the EL1 state.
276 * ---------------------------------------------
277 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000278func tsp_cpu_suspend_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000279 bl tsp_cpu_suspend_main
280 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000281endfunc tsp_cpu_suspend_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000282
Soby Mathewbec98512015-09-03 18:29:38 +0100283 /*-------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100284 * This entrypoint is used by the TSPD to pass
Soby Mathew78664242015-11-13 02:08:43 +0000285 * control for `synchronously` handling a S-EL1
286 * Interrupt which was triggered while executing
287 * in normal world. 'x0' contains a magic number
288 * which indicates this. TSPD expects control to
289 * be handed back at the end of interrupt
290 * processing. This is done through an SMC.
291 * The handover agreement is:
Achin Gupta76717892014-05-09 11:42:56 +0100292 *
293 * 1. PSTATE.DAIF are set upon entry. 'x1' has
294 * the ELR_EL3 from the non-secure state.
295 * 2. TSP has to preserve the callee saved
296 * general purpose registers, SP_EL1/EL0 and
297 * LR.
298 * 3. TSP has to preserve the system and vfp
299 * registers (if applicable).
300 * 4. TSP can use 'x0-x18' to enable its C
301 * runtime.
302 * 5. TSP returns to TSPD using an SMC with
Soby Mathewbec98512015-09-03 18:29:38 +0100303 * 'x0' = TSP_HANDLED_S_EL1_INTR
304 * ------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100305 */
Soby Mathewbec98512015-09-03 18:29:38 +0100306func tsp_sel1_intr_entry
Achin Gupta76717892014-05-09 11:42:56 +0100307#if DEBUG
Soby Mathew78664242015-11-13 02:08:43 +0000308 mov_imm x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
Achin Gupta76717892014-05-09 11:42:56 +0100309 cmp x0, x2
Soby Mathewbec98512015-09-03 18:29:38 +0100310 b.ne tsp_sel1_int_entry_panic
Achin Gupta76717892014-05-09 11:42:56 +0100311#endif
Soby Mathewbec98512015-09-03 18:29:38 +0100312 /*-------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100313 * Save any previous context needed to perform
314 * an exception return from S-EL1 e.g. context
Soby Mathewbec98512015-09-03 18:29:38 +0100315 * from a previous Non secure Interrupt.
316 * Update statistics and handle the S-EL1
317 * interrupt before returning to the TSPD.
Achin Gupta76717892014-05-09 11:42:56 +0100318 * IRQ/FIQs are not enabled since that will
319 * complicate the implementation. Execution
320 * will be transferred back to the normal world
Soby Mathew78664242015-11-13 02:08:43 +0000321 * in any case. The handler can return 0
322 * if the interrupt was handled or TSP_PREEMPTED
323 * if the expected interrupt was preempted
324 * by an interrupt that should be handled in EL3
325 * e.g. Group 0 interrupt in GICv3. In both
326 * the cases switch to EL3 using SMC with id
327 * TSP_HANDLED_S_EL1_INTR. Any other return value
328 * from the handler will result in panic.
Soby Mathewbec98512015-09-03 18:29:38 +0100329 * ------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100330 */
331 save_eret_context x2 x3
Soby Mathewbec98512015-09-03 18:29:38 +0100332 bl tsp_update_sync_sel1_intr_stats
333 bl tsp_common_int_handler
Soby Mathew78664242015-11-13 02:08:43 +0000334 /* Check if the S-EL1 interrupt has been handled */
335 cbnz x0, tsp_sel1_intr_check_preemption
336 b tsp_sel1_intr_return
337tsp_sel1_intr_check_preemption:
338 /* Check if the S-EL1 interrupt has been preempted */
339 mov_imm x1, TSP_PREEMPTED
340 cmp x0, x1
341 b.ne tsp_sel1_int_entry_panic
342tsp_sel1_intr_return:
343 mov_imm x0, TSP_HANDLED_S_EL1_INTR
Achin Gupta76717892014-05-09 11:42:56 +0100344 restore_eret_context x2 x3
Achin Gupta76717892014-05-09 11:42:56 +0100345 smc #0
346
Soby Mathew78664242015-11-13 02:08:43 +0000347 /* Should never reach here */
Soby Mathewbec98512015-09-03 18:29:38 +0100348tsp_sel1_int_entry_panic:
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000349 no_ret plat_panic_handler
Soby Mathewbec98512015-09-03 18:29:38 +0100350endfunc tsp_sel1_intr_entry
Achin Gupta76717892014-05-09 11:42:56 +0100351
352 /*---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000353 * This entrypoint is used by the TSPD when this
354 * cpu resumes execution after an earlier
355 * CPU_SUSPEND psci call to ask the TSP to
356 * restore its saved context. In the current
357 * implementation, the TSPD saves and restores
358 * EL1 state so nothing is done here apart from
359 * acknowledging the request.
360 * ---------------------------------------------
361 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000362func tsp_cpu_resume_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000363 bl tsp_cpu_resume_main
364 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000365
366 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000367 no_ret plat_panic_handler
Kévin Petita877c252015-03-24 14:03:57 +0000368endfunc tsp_cpu_resume_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000369
370 /*---------------------------------------------
371 * This entrypoint is used by the TSPD to ask
372 * the TSP to service a fast smc request.
373 * ---------------------------------------------
374 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000375func tsp_fast_smc_entry
Soby Mathew9f71f702014-05-09 20:49:17 +0100376 bl tsp_smc_handler
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000377 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000378
379 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000380 no_ret plat_panic_handler
Kévin Petita877c252015-03-24 14:03:57 +0000381endfunc tsp_fast_smc_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000382
Soby Mathew9f71f702014-05-09 20:49:17 +0100383 /*---------------------------------------------
384 * This entrypoint is used by the TSPD to ask
David Cunado28f69ab2017-04-05 11:34:03 +0100385 * the TSP to service a Yielding SMC request.
Soby Mathew9f71f702014-05-09 20:49:17 +0100386 * We will enable preemption during execution
387 * of tsp_smc_handler.
388 * ---------------------------------------------
389 */
David Cunado28f69ab2017-04-05 11:34:03 +0100390func tsp_yield_smc_entry
Soby Mathew9f71f702014-05-09 20:49:17 +0100391 msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
392 bl tsp_smc_handler
393 msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
394 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000395
396 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000397 no_ret plat_panic_handler
David Cunado28f69ab2017-04-05 11:34:03 +0100398endfunc tsp_yield_smc_entry
Douglas Raillardf2129652016-11-24 15:43:19 +0000399
400 /*---------------------------------------------------------------------
David Cunado28f69ab2017-04-05 11:34:03 +0100401 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
Douglas Raillardf2129652016-11-24 15:43:19 +0000402 * SMC. It could be on behalf of non-secure world or because a CPU
403 * suspend/CPU off request needs to abort the preempted SMC.
404 * --------------------------------------------------------------------
405 */
David Cunado28f69ab2017-04-05 11:34:03 +0100406func tsp_abort_yield_smc_entry
Douglas Raillardf2129652016-11-24 15:43:19 +0000407
408 /*
409 * Exceptions masking is already done by the TSPD when entering this
410 * hook so there is no need to do it here.
411 */
412
413 /* Reset the stack used by the pre-empted SMC */
414 bl plat_set_my_stack
415
416 /*
417 * Allow some cleanup such as releasing locks.
418 */
419 bl tsp_abort_smc_handler
420
421 restore_args_call_smc
422
423 /* Should never reach here */
424 bl plat_panic_handler
David Cunado28f69ab2017-04-05 11:34:03 +0100425endfunc tsp_abort_yield_smc_entry