blob: 2c3257852ceef98da2bc26ac63a987ac9141820e [file] [log] [blame]
Achin Gupta7c88f3f2014-02-18 18:09:12 +00001/*
Douglas Raillard21362a92016-12-02 13:51:54 +00002 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
Achin Gupta7c88f3f2014-02-18 18:09:12 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta7c88f3f2014-02-18 18:09:12 +00005 */
6
Achin Gupta7c88f3f2014-02-18 18:09:12 +00007#include <arch.h>
Andrew Thoelke38bde412014-03-18 13:46:55 +00008#include <asm_macros.S>
Dan Handley2bd4ef22014-04-09 13:14:54 +01009#include <tsp.h>
Antonio Nino Diaz4ef91f12017-02-20 14:22:22 +000010#include <xlat_tables_defs.h>
Dan Handleye2c27f52014-08-01 17:58:27 +010011#include "../tsp_private.h"
Achin Gupta7c88f3f2014-02-18 18:09:12 +000012
13
14 .globl tsp_entrypoint
Andrew Thoelke891c4ca2014-05-20 21:43:27 +010015 .globl tsp_vector_table
Achin Gupta7c88f3f2014-02-18 18:09:12 +000016
Soby Mathew9f71f702014-05-09 20:49:17 +010017
18
Achin Gupta7c88f3f2014-02-18 18:09:12 +000019 /* ---------------------------------------------
20 * Populate the params in x0-x7 from the pointer
21 * to the smc args structure in x0.
22 * ---------------------------------------------
23 */
24 .macro restore_args_call_smc
25 ldp x6, x7, [x0, #TSP_ARG6]
26 ldp x4, x5, [x0, #TSP_ARG4]
27 ldp x2, x3, [x0, #TSP_ARG2]
28 ldp x0, x1, [x0, #TSP_ARG0]
29 smc #0
30 .endm
31
Achin Gupta76717892014-05-09 11:42:56 +010032 .macro save_eret_context reg1 reg2
33 mrs \reg1, elr_el1
34 mrs \reg2, spsr_el1
35 stp \reg1, \reg2, [sp, #-0x10]!
36 stp x30, x18, [sp, #-0x10]!
37 .endm
38
39 .macro restore_eret_context reg1 reg2
40 ldp x30, x18, [sp], #0x10
41 ldp \reg1, \reg2, [sp], #0x10
42 msr elr_el1, \reg1
43 msr spsr_el1, \reg2
44 .endm
45
46 .section .text, "ax"
47 .align 3
Achin Gupta7c88f3f2014-02-18 18:09:12 +000048
Andrew Thoelke38bde412014-03-18 13:46:55 +000049func tsp_entrypoint
Achin Gupta7c88f3f2014-02-18 18:09:12 +000050
51 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +000052 * Set the exception vector to something sane.
53 * ---------------------------------------------
54 */
Achin Guptaa4f50c22014-05-09 12:17:56 +010055 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +000056 msr vbar_el1, x0
Achin Guptaed1744e2014-08-04 23:13:10 +010057 isb
Achin Gupta7c88f3f2014-02-18 18:09:12 +000058
59 /* ---------------------------------------------
Achin Guptaed1744e2014-08-04 23:13:10 +010060 * Enable the SError interrupt now that the
61 * exception vectors have been setup.
62 * ---------------------------------------------
63 */
64 msr daifclr, #DAIF_ABT_BIT
65
66 /* ---------------------------------------------
Achin Gupta9f098352014-07-18 18:38:28 +010067 * Enable the instruction cache, stack pointer
68 * and data access alignment checks
Achin Gupta7c88f3f2014-02-18 18:09:12 +000069 * ---------------------------------------------
70 */
Achin Gupta9f098352014-07-18 18:38:28 +010071 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +000072 mrs x0, sctlr_el1
Achin Gupta9f098352014-07-18 18:38:28 +010073 orr x0, x0, x1
Achin Gupta7c88f3f2014-02-18 18:09:12 +000074 msr sctlr_el1, x0
75 isb
76
77 /* ---------------------------------------------
Achin Guptae9c4a642015-09-11 16:03:13 +010078 * Invalidate the RW memory used by the BL32
79 * image. This includes the data and NOBITS
80 * sections. This is done to safeguard against
81 * possible corruption of this memory by dirty
82 * cache lines in a system cache as a result of
83 * use by an earlier boot loader stage.
84 * ---------------------------------------------
85 */
86 adr x0, __RW_START__
87 adr x1, __RW_END__
88 sub x1, x1, x0
89 bl inv_dcache_range
90
91 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +000092 * Zero out NOBITS sections. There are 2 of them:
93 * - the .bss section;
94 * - the coherent memory section.
95 * ---------------------------------------------
96 */
97 ldr x0, =__BSS_START__
98 ldr x1, =__BSS_SIZE__
Douglas Raillard21362a92016-12-02 13:51:54 +000099 bl zeromem
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000100
Soby Mathew2ae20432015-01-08 18:02:44 +0000101#if USE_COHERENT_MEM
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000102 ldr x0, =__COHERENT_RAM_START__
103 ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__
Douglas Raillard21362a92016-12-02 13:51:54 +0000104 bl zeromem
Soby Mathew2ae20432015-01-08 18:02:44 +0000105#endif
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000106
107 /* --------------------------------------------
Achin Guptaf4a97092014-06-25 19:26:22 +0100108 * Allocate a stack whose memory will be marked
109 * as Normal-IS-WBWA when the MMU is enabled.
110 * There is no risk of reading stale stack
111 * memory after enabling the MMU as only the
112 * primary cpu is running at the moment.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000113 * --------------------------------------------
114 */
Soby Mathewda43b662015-07-08 21:45:46 +0100115 bl plat_set_my_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000116
117 /* ---------------------------------------------
Douglas Raillard306593d2017-02-24 18:14:15 +0000118 * Initialize the stack protector canary before
119 * any C code is called.
120 * ---------------------------------------------
121 */
122#if STACK_PROTECTOR_ENABLED
123 bl update_stack_protector_canary
124#endif
125
126 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000127 * Perform early platform setup & platform
128 * specific early arch. setup e.g. mmu setup
129 * ---------------------------------------------
130 */
Dan Handley4fd2f5c2014-08-04 11:41:20 +0100131 bl tsp_early_platform_setup
132 bl tsp_plat_arch_setup
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000133
134 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000135 * Jump to main function.
136 * ---------------------------------------------
137 */
138 bl tsp_main
139
140 /* ---------------------------------------------
141 * Tell TSPD that we are done initialising
142 * ---------------------------------------------
143 */
144 mov x1, x0
145 mov x0, #TSP_ENTRY_DONE
146 smc #0
147
148tsp_entrypoint_panic:
149 b tsp_entrypoint_panic
Kévin Petita877c252015-03-24 14:03:57 +0000150endfunc tsp_entrypoint
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000151
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100152
153 /* -------------------------------------------
154 * Table of entrypoint vectors provided to the
155 * TSPD for the various entrypoints
156 * -------------------------------------------
157 */
158func tsp_vector_table
David Cunado28f69ab2017-04-05 11:34:03 +0100159 b tsp_yield_smc_entry
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100160 b tsp_fast_smc_entry
161 b tsp_cpu_on_entry
162 b tsp_cpu_off_entry
163 b tsp_cpu_resume_entry
164 b tsp_cpu_suspend_entry
Soby Mathewbec98512015-09-03 18:29:38 +0100165 b tsp_sel1_intr_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100166 b tsp_system_off_entry
167 b tsp_system_reset_entry
David Cunado28f69ab2017-04-05 11:34:03 +0100168 b tsp_abort_yield_smc_entry
Kévin Petita877c252015-03-24 14:03:57 +0000169endfunc tsp_vector_table
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100170
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000171 /*---------------------------------------------
172 * This entrypoint is used by the TSPD when this
173 * cpu is to be turned off through a CPU_OFF
174 * psci call to ask the TSP to perform any
175 * bookeeping necessary. In the current
176 * implementation, the TSPD expects the TSP to
177 * re-initialise its state so nothing is done
178 * here except for acknowledging the request.
179 * ---------------------------------------------
180 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000181func tsp_cpu_off_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000182 bl tsp_cpu_off_main
183 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000184endfunc tsp_cpu_off_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000185
186 /*---------------------------------------------
Juan Castillo4dc4a472014-08-12 11:17:06 +0100187 * This entrypoint is used by the TSPD when the
188 * system is about to be switched off (through
189 * a SYSTEM_OFF psci call) to ask the TSP to
190 * perform any necessary bookkeeping.
191 * ---------------------------------------------
192 */
193func tsp_system_off_entry
194 bl tsp_system_off_main
195 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000196endfunc tsp_system_off_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100197
198 /*---------------------------------------------
199 * This entrypoint is used by the TSPD when the
200 * system is about to be reset (through a
201 * SYSTEM_RESET psci call) to ask the TSP to
202 * perform any necessary bookkeeping.
203 * ---------------------------------------------
204 */
205func tsp_system_reset_entry
206 bl tsp_system_reset_main
207 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000208endfunc tsp_system_reset_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100209
210 /*---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000211 * This entrypoint is used by the TSPD when this
212 * cpu is turned on using a CPU_ON psci call to
213 * ask the TSP to initialise itself i.e. setup
214 * the mmu, stacks etc. Minimal architectural
215 * state will be initialised by the TSPD when
216 * this function is entered i.e. Caches and MMU
217 * will be turned off, the execution state
218 * will be aarch64 and exceptions masked.
219 * ---------------------------------------------
220 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000221func tsp_cpu_on_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000222 /* ---------------------------------------------
223 * Set the exception vector to something sane.
224 * ---------------------------------------------
225 */
Achin Guptaa4f50c22014-05-09 12:17:56 +0100226 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000227 msr vbar_el1, x0
Achin Guptaed1744e2014-08-04 23:13:10 +0100228 isb
229
230 /* Enable the SError interrupt */
231 msr daifclr, #DAIF_ABT_BIT
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000232
233 /* ---------------------------------------------
Achin Gupta9f098352014-07-18 18:38:28 +0100234 * Enable the instruction cache, stack pointer
235 * and data access alignment checks
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000236 * ---------------------------------------------
237 */
Achin Gupta9f098352014-07-18 18:38:28 +0100238 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000239 mrs x0, sctlr_el1
Achin Gupta9f098352014-07-18 18:38:28 +0100240 orr x0, x0, x1
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000241 msr sctlr_el1, x0
242 isb
243
244 /* --------------------------------------------
Achin Guptae1aa5162014-06-26 09:58:52 +0100245 * Give ourselves a stack whose memory will be
246 * marked as Normal-IS-WBWA when the MMU is
247 * enabled.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000248 * --------------------------------------------
249 */
Soby Mathewda43b662015-07-08 21:45:46 +0100250 bl plat_set_my_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000251
Achin Guptae1aa5162014-06-26 09:58:52 +0100252 /* --------------------------------------------
253 * Enable the MMU with the DCache disabled. It
254 * is safe to use stacks allocated in normal
255 * memory as a result. All memory accesses are
256 * marked nGnRnE when the MMU is disabled. So
257 * all the stack writes will make it to memory.
258 * All memory accesses are marked Non-cacheable
259 * when the MMU is enabled but D$ is disabled.
260 * So used stack memory is guaranteed to be
261 * visible immediately after the MMU is enabled
262 * Enabling the DCache at the same time as the
263 * MMU can lead to speculatively fetched and
264 * possibly stale stack memory being read from
265 * other caches. This can lead to coherency
266 * issues.
267 * --------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000268 */
Achin Guptae1aa5162014-06-26 09:58:52 +0100269 mov x0, #DISABLE_DCACHE
Dan Handleyb226a4d2014-05-16 14:08:45 +0100270 bl bl32_plat_enable_mmu
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000271
272 /* ---------------------------------------------
Achin Guptae1aa5162014-06-26 09:58:52 +0100273 * Enable the Data cache now that the MMU has
274 * been enabled. The stack has been unwound. It
275 * will be written first before being read. This
276 * will invalidate any stale cache lines resi-
277 * -dent in other caches. We assume that
278 * interconnect coherency has been enabled for
279 * this cluster by EL3 firmware.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000280 * ---------------------------------------------
281 */
Achin Guptae1aa5162014-06-26 09:58:52 +0100282 mrs x0, sctlr_el1
283 orr x0, x0, #SCTLR_C_BIT
284 msr sctlr_el1, x0
285 isb
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000286
287 /* ---------------------------------------------
288 * Enter C runtime to perform any remaining
289 * book keeping
290 * ---------------------------------------------
291 */
292 bl tsp_cpu_on_main
293 restore_args_call_smc
294
295 /* Should never reach here */
296tsp_cpu_on_entry_panic:
297 b tsp_cpu_on_entry_panic
Kévin Petita877c252015-03-24 14:03:57 +0000298endfunc tsp_cpu_on_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000299
300 /*---------------------------------------------
301 * This entrypoint is used by the TSPD when this
302 * cpu is to be suspended through a CPU_SUSPEND
303 * psci call to ask the TSP to perform any
304 * bookeeping necessary. In the current
305 * implementation, the TSPD saves and restores
306 * the EL1 state.
307 * ---------------------------------------------
308 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000309func tsp_cpu_suspend_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000310 bl tsp_cpu_suspend_main
311 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000312endfunc tsp_cpu_suspend_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000313
Soby Mathewbec98512015-09-03 18:29:38 +0100314 /*-------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100315 * This entrypoint is used by the TSPD to pass
Soby Mathew78664242015-11-13 02:08:43 +0000316 * control for `synchronously` handling a S-EL1
317 * Interrupt which was triggered while executing
318 * in normal world. 'x0' contains a magic number
319 * which indicates this. TSPD expects control to
320 * be handed back at the end of interrupt
321 * processing. This is done through an SMC.
322 * The handover agreement is:
Achin Gupta76717892014-05-09 11:42:56 +0100323 *
324 * 1. PSTATE.DAIF are set upon entry. 'x1' has
325 * the ELR_EL3 from the non-secure state.
326 * 2. TSP has to preserve the callee saved
327 * general purpose registers, SP_EL1/EL0 and
328 * LR.
329 * 3. TSP has to preserve the system and vfp
330 * registers (if applicable).
331 * 4. TSP can use 'x0-x18' to enable its C
332 * runtime.
333 * 5. TSP returns to TSPD using an SMC with
Soby Mathewbec98512015-09-03 18:29:38 +0100334 * 'x0' = TSP_HANDLED_S_EL1_INTR
335 * ------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100336 */
Soby Mathewbec98512015-09-03 18:29:38 +0100337func tsp_sel1_intr_entry
Achin Gupta76717892014-05-09 11:42:56 +0100338#if DEBUG
Soby Mathew78664242015-11-13 02:08:43 +0000339 mov_imm x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
Achin Gupta76717892014-05-09 11:42:56 +0100340 cmp x0, x2
Soby Mathewbec98512015-09-03 18:29:38 +0100341 b.ne tsp_sel1_int_entry_panic
Achin Gupta76717892014-05-09 11:42:56 +0100342#endif
Soby Mathewbec98512015-09-03 18:29:38 +0100343 /*-------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100344 * Save any previous context needed to perform
345 * an exception return from S-EL1 e.g. context
Soby Mathewbec98512015-09-03 18:29:38 +0100346 * from a previous Non secure Interrupt.
347 * Update statistics and handle the S-EL1
348 * interrupt before returning to the TSPD.
Achin Gupta76717892014-05-09 11:42:56 +0100349 * IRQ/FIQs are not enabled since that will
350 * complicate the implementation. Execution
351 * will be transferred back to the normal world
Soby Mathew78664242015-11-13 02:08:43 +0000352 * in any case. The handler can return 0
353 * if the interrupt was handled or TSP_PREEMPTED
354 * if the expected interrupt was preempted
355 * by an interrupt that should be handled in EL3
356 * e.g. Group 0 interrupt in GICv3. In both
357 * the cases switch to EL3 using SMC with id
358 * TSP_HANDLED_S_EL1_INTR. Any other return value
359 * from the handler will result in panic.
Soby Mathewbec98512015-09-03 18:29:38 +0100360 * ------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100361 */
362 save_eret_context x2 x3
Soby Mathewbec98512015-09-03 18:29:38 +0100363 bl tsp_update_sync_sel1_intr_stats
364 bl tsp_common_int_handler
Soby Mathew78664242015-11-13 02:08:43 +0000365 /* Check if the S-EL1 interrupt has been handled */
366 cbnz x0, tsp_sel1_intr_check_preemption
367 b tsp_sel1_intr_return
368tsp_sel1_intr_check_preemption:
369 /* Check if the S-EL1 interrupt has been preempted */
370 mov_imm x1, TSP_PREEMPTED
371 cmp x0, x1
372 b.ne tsp_sel1_int_entry_panic
373tsp_sel1_intr_return:
374 mov_imm x0, TSP_HANDLED_S_EL1_INTR
Achin Gupta76717892014-05-09 11:42:56 +0100375 restore_eret_context x2 x3
Achin Gupta76717892014-05-09 11:42:56 +0100376 smc #0
377
Soby Mathew78664242015-11-13 02:08:43 +0000378 /* Should never reach here */
Soby Mathewbec98512015-09-03 18:29:38 +0100379tsp_sel1_int_entry_panic:
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000380 no_ret plat_panic_handler
Soby Mathewbec98512015-09-03 18:29:38 +0100381endfunc tsp_sel1_intr_entry
Achin Gupta76717892014-05-09 11:42:56 +0100382
383 /*---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000384 * This entrypoint is used by the TSPD when this
385 * cpu resumes execution after an earlier
386 * CPU_SUSPEND psci call to ask the TSP to
387 * restore its saved context. In the current
388 * implementation, the TSPD saves and restores
389 * EL1 state so nothing is done here apart from
390 * acknowledging the request.
391 * ---------------------------------------------
392 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000393func tsp_cpu_resume_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000394 bl tsp_cpu_resume_main
395 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000396
397 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000398 no_ret plat_panic_handler
Kévin Petita877c252015-03-24 14:03:57 +0000399endfunc tsp_cpu_resume_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000400
401 /*---------------------------------------------
402 * This entrypoint is used by the TSPD to ask
403 * the TSP to service a fast smc request.
404 * ---------------------------------------------
405 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000406func tsp_fast_smc_entry
Soby Mathew9f71f702014-05-09 20:49:17 +0100407 bl tsp_smc_handler
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000408 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000409
410 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000411 no_ret plat_panic_handler
Kévin Petita877c252015-03-24 14:03:57 +0000412endfunc tsp_fast_smc_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000413
Soby Mathew9f71f702014-05-09 20:49:17 +0100414 /*---------------------------------------------
415 * This entrypoint is used by the TSPD to ask
David Cunado28f69ab2017-04-05 11:34:03 +0100416 * the TSP to service a Yielding SMC request.
Soby Mathew9f71f702014-05-09 20:49:17 +0100417 * We will enable preemption during execution
418 * of tsp_smc_handler.
419 * ---------------------------------------------
420 */
David Cunado28f69ab2017-04-05 11:34:03 +0100421func tsp_yield_smc_entry
Soby Mathew9f71f702014-05-09 20:49:17 +0100422 msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
423 bl tsp_smc_handler
424 msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
425 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000426
427 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000428 no_ret plat_panic_handler
David Cunado28f69ab2017-04-05 11:34:03 +0100429endfunc tsp_yield_smc_entry
Douglas Raillardf2129652016-11-24 15:43:19 +0000430
431 /*---------------------------------------------------------------------
David Cunado28f69ab2017-04-05 11:34:03 +0100432 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
Douglas Raillardf2129652016-11-24 15:43:19 +0000433 * SMC. It could be on behalf of non-secure world or because a CPU
434 * suspend/CPU off request needs to abort the preempted SMC.
435 * --------------------------------------------------------------------
436 */
David Cunado28f69ab2017-04-05 11:34:03 +0100437func tsp_abort_yield_smc_entry
Douglas Raillardf2129652016-11-24 15:43:19 +0000438
439 /*
440 * Exceptions masking is already done by the TSPD when entering this
441 * hook so there is no need to do it here.
442 */
443
444 /* Reset the stack used by the pre-empted SMC */
445 bl plat_set_my_stack
446
447 /*
448 * Allow some cleanup such as releasing locks.
449 */
450 bl tsp_abort_smc_handler
451
452 restore_args_call_smc
453
454 /* Should never reach here */
455 bl plat_panic_handler
David Cunado28f69ab2017-04-05 11:34:03 +0100456endfunc tsp_abort_yield_smc_entry