blob: cd08ce7d8de5061b89cd15079d883fca79b8189a [file] [log] [blame]
Achin Gupta7c88f3f2014-02-18 18:09:12 +00001/*
Antonio Nino Diaze61ece02019-02-26 11:41:03 +00002 * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
Achin Gupta7c88f3f2014-02-18 18:09:12 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta7c88f3f2014-02-18 18:09:12 +00005 */
6
Achin Gupta7c88f3f2014-02-18 18:09:12 +00007#include <arch.h>
Andrew Thoelke38bde412014-03-18 13:46:55 +00008#include <asm_macros.S>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00009#include <bl32/tsp/tsp.h>
10#include <lib/xlat_tables/xlat_tables_defs.h>
11
Dan Handleye2c27f52014-08-01 17:58:27 +010012#include "../tsp_private.h"
Achin Gupta7c88f3f2014-02-18 18:09:12 +000013
14
15 .globl tsp_entrypoint
Andrew Thoelke891c4ca2014-05-20 21:43:27 +010016 .globl tsp_vector_table
Achin Gupta7c88f3f2014-02-18 18:09:12 +000017
Soby Mathew9f71f702014-05-09 20:49:17 +010018
19
Achin Gupta7c88f3f2014-02-18 18:09:12 +000020 /* ---------------------------------------------
21 * Populate the params in x0-x7 from the pointer
22 * to the smc args structure in x0.
23 * ---------------------------------------------
24 */
25 .macro restore_args_call_smc
26 ldp x6, x7, [x0, #TSP_ARG6]
27 ldp x4, x5, [x0, #TSP_ARG4]
28 ldp x2, x3, [x0, #TSP_ARG2]
29 ldp x0, x1, [x0, #TSP_ARG0]
30 smc #0
31 .endm
32
Achin Gupta76717892014-05-09 11:42:56 +010033 .macro save_eret_context reg1 reg2
34 mrs \reg1, elr_el1
35 mrs \reg2, spsr_el1
36 stp \reg1, \reg2, [sp, #-0x10]!
37 stp x30, x18, [sp, #-0x10]!
38 .endm
39
40 .macro restore_eret_context reg1 reg2
41 ldp x30, x18, [sp], #0x10
42 ldp \reg1, \reg2, [sp], #0x10
43 msr elr_el1, \reg1
44 msr spsr_el1, \reg2
45 .endm
46
Julius Wernerb4c75e92017-08-01 15:16:36 -070047func tsp_entrypoint _align=3
Achin Gupta7c88f3f2014-02-18 18:09:12 +000048
49 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +000050 * Set the exception vector to something sane.
51 * ---------------------------------------------
52 */
Achin Guptaa4f50c22014-05-09 12:17:56 +010053 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +000054 msr vbar_el1, x0
Achin Guptaed1744e2014-08-04 23:13:10 +010055 isb
Achin Gupta7c88f3f2014-02-18 18:09:12 +000056
57 /* ---------------------------------------------
Achin Guptaed1744e2014-08-04 23:13:10 +010058 * Enable the SError interrupt now that the
59 * exception vectors have been setup.
60 * ---------------------------------------------
61 */
62 msr daifclr, #DAIF_ABT_BIT
63
64 /* ---------------------------------------------
Achin Gupta9f098352014-07-18 18:38:28 +010065 * Enable the instruction cache, stack pointer
John Tsichritzisd5a59602019-03-04 16:42:54 +000066 * and data access alignment checks and disable
67 * speculative loads.
Achin Gupta7c88f3f2014-02-18 18:09:12 +000068 * ---------------------------------------------
69 */
Achin Gupta9f098352014-07-18 18:38:28 +010070 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +000071 mrs x0, sctlr_el1
Achin Gupta9f098352014-07-18 18:38:28 +010072 orr x0, x0, x1
John Tsichritzisd5a59602019-03-04 16:42:54 +000073 bic x0, x0, #SCTLR_DSSBS_BIT
Achin Gupta7c88f3f2014-02-18 18:09:12 +000074 msr sctlr_el1, x0
75 isb
76
77 /* ---------------------------------------------
Achin Guptae9c4a642015-09-11 16:03:13 +010078 * Invalidate the RW memory used by the BL32
79 * image. This includes the data and NOBITS
80 * sections. This is done to safeguard against
81 * possible corruption of this memory by dirty
82 * cache lines in a system cache as a result of
83 * use by an earlier boot loader stage.
84 * ---------------------------------------------
85 */
86 adr x0, __RW_START__
87 adr x1, __RW_END__
88 sub x1, x1, x0
89 bl inv_dcache_range
90
91 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +000092 * Zero out NOBITS sections. There are 2 of them:
93 * - the .bss section;
94 * - the coherent memory section.
95 * ---------------------------------------------
96 */
97 ldr x0, =__BSS_START__
98 ldr x1, =__BSS_SIZE__
Douglas Raillard21362a92016-12-02 13:51:54 +000099 bl zeromem
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000100
Soby Mathew2ae20432015-01-08 18:02:44 +0000101#if USE_COHERENT_MEM
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000102 ldr x0, =__COHERENT_RAM_START__
103 ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__
Douglas Raillard21362a92016-12-02 13:51:54 +0000104 bl zeromem
Soby Mathew2ae20432015-01-08 18:02:44 +0000105#endif
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000106
107 /* --------------------------------------------
Achin Guptaf4a97092014-06-25 19:26:22 +0100108 * Allocate a stack whose memory will be marked
109 * as Normal-IS-WBWA when the MMU is enabled.
110 * There is no risk of reading stale stack
111 * memory after enabling the MMU as only the
112 * primary cpu is running at the moment.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000113 * --------------------------------------------
114 */
Soby Mathewda43b662015-07-08 21:45:46 +0100115 bl plat_set_my_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000116
117 /* ---------------------------------------------
Douglas Raillard306593d2017-02-24 18:14:15 +0000118 * Initialize the stack protector canary before
119 * any C code is called.
120 * ---------------------------------------------
121 */
122#if STACK_PROTECTOR_ENABLED
123 bl update_stack_protector_canary
124#endif
125
126 /* ---------------------------------------------
Antonio Nino Diaze61ece02019-02-26 11:41:03 +0000127 * Perform TSP setup
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000128 * ---------------------------------------------
129 */
Antonio Nino Diaze61ece02019-02-26 11:41:03 +0000130 bl tsp_setup
131
132 /* ---------------------------------------------
133 * Enable pointer authentication
134 * ---------------------------------------------
135 */
136#if ENABLE_PAUTH
137 mrs x0, sctlr_el1
138 orr x0, x0, #SCTLR_EnIA_BIT
139 msr sctlr_el1, x0
140 isb
141#endif /* ENABLE_PAUTH */
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000142
143 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000144 * Jump to main function.
145 * ---------------------------------------------
146 */
147 bl tsp_main
148
149 /* ---------------------------------------------
150 * Tell TSPD that we are done initialising
151 * ---------------------------------------------
152 */
153 mov x1, x0
154 mov x0, #TSP_ENTRY_DONE
155 smc #0
156
157tsp_entrypoint_panic:
158 b tsp_entrypoint_panic
Kévin Petita877c252015-03-24 14:03:57 +0000159endfunc tsp_entrypoint
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000160
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100161
162 /* -------------------------------------------
163 * Table of entrypoint vectors provided to the
164 * TSPD for the various entrypoints
165 * -------------------------------------------
166 */
167func tsp_vector_table
David Cunado28f69ab2017-04-05 11:34:03 +0100168 b tsp_yield_smc_entry
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100169 b tsp_fast_smc_entry
170 b tsp_cpu_on_entry
171 b tsp_cpu_off_entry
172 b tsp_cpu_resume_entry
173 b tsp_cpu_suspend_entry
Soby Mathewbec98512015-09-03 18:29:38 +0100174 b tsp_sel1_intr_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100175 b tsp_system_off_entry
176 b tsp_system_reset_entry
David Cunado28f69ab2017-04-05 11:34:03 +0100177 b tsp_abort_yield_smc_entry
Kévin Petita877c252015-03-24 14:03:57 +0000178endfunc tsp_vector_table
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100179
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000180 /*---------------------------------------------
181 * This entrypoint is used by the TSPD when this
182 * cpu is to be turned off through a CPU_OFF
183 * psci call to ask the TSP to perform any
184 * bookeeping necessary. In the current
185 * implementation, the TSPD expects the TSP to
186 * re-initialise its state so nothing is done
187 * here except for acknowledging the request.
188 * ---------------------------------------------
189 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000190func tsp_cpu_off_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000191 bl tsp_cpu_off_main
192 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000193endfunc tsp_cpu_off_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000194
195 /*---------------------------------------------
Juan Castillo4dc4a472014-08-12 11:17:06 +0100196 * This entrypoint is used by the TSPD when the
197 * system is about to be switched off (through
198 * a SYSTEM_OFF psci call) to ask the TSP to
199 * perform any necessary bookkeeping.
200 * ---------------------------------------------
201 */
202func tsp_system_off_entry
203 bl tsp_system_off_main
204 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000205endfunc tsp_system_off_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100206
207 /*---------------------------------------------
208 * This entrypoint is used by the TSPD when the
209 * system is about to be reset (through a
210 * SYSTEM_RESET psci call) to ask the TSP to
211 * perform any necessary bookkeeping.
212 * ---------------------------------------------
213 */
214func tsp_system_reset_entry
215 bl tsp_system_reset_main
216 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000217endfunc tsp_system_reset_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100218
219 /*---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000220 * This entrypoint is used by the TSPD when this
221 * cpu is turned on using a CPU_ON psci call to
222 * ask the TSP to initialise itself i.e. setup
223 * the mmu, stacks etc. Minimal architectural
224 * state will be initialised by the TSPD when
225 * this function is entered i.e. Caches and MMU
226 * will be turned off, the execution state
227 * will be aarch64 and exceptions masked.
228 * ---------------------------------------------
229 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000230func tsp_cpu_on_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000231 /* ---------------------------------------------
232 * Set the exception vector to something sane.
233 * ---------------------------------------------
234 */
Achin Guptaa4f50c22014-05-09 12:17:56 +0100235 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000236 msr vbar_el1, x0
Achin Guptaed1744e2014-08-04 23:13:10 +0100237 isb
238
239 /* Enable the SError interrupt */
240 msr daifclr, #DAIF_ABT_BIT
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000241
242 /* ---------------------------------------------
Achin Gupta9f098352014-07-18 18:38:28 +0100243 * Enable the instruction cache, stack pointer
244 * and data access alignment checks
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000245 * ---------------------------------------------
246 */
Achin Gupta9f098352014-07-18 18:38:28 +0100247 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000248 mrs x0, sctlr_el1
Achin Gupta9f098352014-07-18 18:38:28 +0100249 orr x0, x0, x1
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000250 msr sctlr_el1, x0
251 isb
252
253 /* --------------------------------------------
Achin Guptae1aa5162014-06-26 09:58:52 +0100254 * Give ourselves a stack whose memory will be
255 * marked as Normal-IS-WBWA when the MMU is
256 * enabled.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000257 * --------------------------------------------
258 */
Soby Mathewda43b662015-07-08 21:45:46 +0100259 bl plat_set_my_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000260
Achin Guptae1aa5162014-06-26 09:58:52 +0100261 /* --------------------------------------------
Jeenu Viswambharan0859d2c2018-04-27 16:28:12 +0100262 * Enable MMU and D-caches together.
Achin Guptae1aa5162014-06-26 09:58:52 +0100263 * --------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000264 */
Jeenu Viswambharan0859d2c2018-04-27 16:28:12 +0100265 mov x0, #0
Dan Handleyb226a4d2014-05-16 14:08:45 +0100266 bl bl32_plat_enable_mmu
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000267
268 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000269 * Enter C runtime to perform any remaining
270 * book keeping
271 * ---------------------------------------------
272 */
273 bl tsp_cpu_on_main
274 restore_args_call_smc
275
276 /* Should never reach here */
277tsp_cpu_on_entry_panic:
278 b tsp_cpu_on_entry_panic
Kévin Petita877c252015-03-24 14:03:57 +0000279endfunc tsp_cpu_on_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000280
281 /*---------------------------------------------
282 * This entrypoint is used by the TSPD when this
283 * cpu is to be suspended through a CPU_SUSPEND
284 * psci call to ask the TSP to perform any
285 * bookeeping necessary. In the current
286 * implementation, the TSPD saves and restores
287 * the EL1 state.
288 * ---------------------------------------------
289 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000290func tsp_cpu_suspend_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000291 bl tsp_cpu_suspend_main
292 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000293endfunc tsp_cpu_suspend_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000294
Soby Mathewbec98512015-09-03 18:29:38 +0100295 /*-------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100296 * This entrypoint is used by the TSPD to pass
Soby Mathew78664242015-11-13 02:08:43 +0000297 * control for `synchronously` handling a S-EL1
298 * Interrupt which was triggered while executing
299 * in normal world. 'x0' contains a magic number
300 * which indicates this. TSPD expects control to
301 * be handed back at the end of interrupt
302 * processing. This is done through an SMC.
303 * The handover agreement is:
Achin Gupta76717892014-05-09 11:42:56 +0100304 *
305 * 1. PSTATE.DAIF are set upon entry. 'x1' has
306 * the ELR_EL3 from the non-secure state.
307 * 2. TSP has to preserve the callee saved
308 * general purpose registers, SP_EL1/EL0 and
309 * LR.
310 * 3. TSP has to preserve the system and vfp
311 * registers (if applicable).
312 * 4. TSP can use 'x0-x18' to enable its C
313 * runtime.
314 * 5. TSP returns to TSPD using an SMC with
Soby Mathewbec98512015-09-03 18:29:38 +0100315 * 'x0' = TSP_HANDLED_S_EL1_INTR
316 * ------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100317 */
Soby Mathewbec98512015-09-03 18:29:38 +0100318func tsp_sel1_intr_entry
Achin Gupta76717892014-05-09 11:42:56 +0100319#if DEBUG
Soby Mathew78664242015-11-13 02:08:43 +0000320 mov_imm x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
Achin Gupta76717892014-05-09 11:42:56 +0100321 cmp x0, x2
Soby Mathewbec98512015-09-03 18:29:38 +0100322 b.ne tsp_sel1_int_entry_panic
Achin Gupta76717892014-05-09 11:42:56 +0100323#endif
Soby Mathewbec98512015-09-03 18:29:38 +0100324 /*-------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100325 * Save any previous context needed to perform
326 * an exception return from S-EL1 e.g. context
Soby Mathewbec98512015-09-03 18:29:38 +0100327 * from a previous Non secure Interrupt.
328 * Update statistics and handle the S-EL1
329 * interrupt before returning to the TSPD.
Achin Gupta76717892014-05-09 11:42:56 +0100330 * IRQ/FIQs are not enabled since that will
331 * complicate the implementation. Execution
332 * will be transferred back to the normal world
Soby Mathew78664242015-11-13 02:08:43 +0000333 * in any case. The handler can return 0
334 * if the interrupt was handled or TSP_PREEMPTED
335 * if the expected interrupt was preempted
336 * by an interrupt that should be handled in EL3
337 * e.g. Group 0 interrupt in GICv3. In both
338 * the cases switch to EL3 using SMC with id
339 * TSP_HANDLED_S_EL1_INTR. Any other return value
340 * from the handler will result in panic.
Soby Mathewbec98512015-09-03 18:29:38 +0100341 * ------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100342 */
343 save_eret_context x2 x3
Soby Mathewbec98512015-09-03 18:29:38 +0100344 bl tsp_update_sync_sel1_intr_stats
345 bl tsp_common_int_handler
Soby Mathew78664242015-11-13 02:08:43 +0000346 /* Check if the S-EL1 interrupt has been handled */
347 cbnz x0, tsp_sel1_intr_check_preemption
348 b tsp_sel1_intr_return
349tsp_sel1_intr_check_preemption:
350 /* Check if the S-EL1 interrupt has been preempted */
351 mov_imm x1, TSP_PREEMPTED
352 cmp x0, x1
353 b.ne tsp_sel1_int_entry_panic
354tsp_sel1_intr_return:
355 mov_imm x0, TSP_HANDLED_S_EL1_INTR
Achin Gupta76717892014-05-09 11:42:56 +0100356 restore_eret_context x2 x3
Achin Gupta76717892014-05-09 11:42:56 +0100357 smc #0
358
Soby Mathew78664242015-11-13 02:08:43 +0000359 /* Should never reach here */
Soby Mathewbec98512015-09-03 18:29:38 +0100360tsp_sel1_int_entry_panic:
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000361 no_ret plat_panic_handler
Soby Mathewbec98512015-09-03 18:29:38 +0100362endfunc tsp_sel1_intr_entry
Achin Gupta76717892014-05-09 11:42:56 +0100363
364 /*---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000365 * This entrypoint is used by the TSPD when this
366 * cpu resumes execution after an earlier
367 * CPU_SUSPEND psci call to ask the TSP to
368 * restore its saved context. In the current
369 * implementation, the TSPD saves and restores
370 * EL1 state so nothing is done here apart from
371 * acknowledging the request.
372 * ---------------------------------------------
373 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000374func tsp_cpu_resume_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000375 bl tsp_cpu_resume_main
376 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000377
378 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000379 no_ret plat_panic_handler
Kévin Petita877c252015-03-24 14:03:57 +0000380endfunc tsp_cpu_resume_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000381
382 /*---------------------------------------------
383 * This entrypoint is used by the TSPD to ask
384 * the TSP to service a fast smc request.
385 * ---------------------------------------------
386 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000387func tsp_fast_smc_entry
Soby Mathew9f71f702014-05-09 20:49:17 +0100388 bl tsp_smc_handler
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000389 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000390
391 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000392 no_ret plat_panic_handler
Kévin Petita877c252015-03-24 14:03:57 +0000393endfunc tsp_fast_smc_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000394
Soby Mathew9f71f702014-05-09 20:49:17 +0100395 /*---------------------------------------------
396 * This entrypoint is used by the TSPD to ask
David Cunado28f69ab2017-04-05 11:34:03 +0100397 * the TSP to service a Yielding SMC request.
Soby Mathew9f71f702014-05-09 20:49:17 +0100398 * We will enable preemption during execution
399 * of tsp_smc_handler.
400 * ---------------------------------------------
401 */
David Cunado28f69ab2017-04-05 11:34:03 +0100402func tsp_yield_smc_entry
Soby Mathew9f71f702014-05-09 20:49:17 +0100403 msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
404 bl tsp_smc_handler
405 msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
406 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000407
408 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000409 no_ret plat_panic_handler
David Cunado28f69ab2017-04-05 11:34:03 +0100410endfunc tsp_yield_smc_entry
Douglas Raillardf2129652016-11-24 15:43:19 +0000411
412 /*---------------------------------------------------------------------
David Cunado28f69ab2017-04-05 11:34:03 +0100413 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
Douglas Raillardf2129652016-11-24 15:43:19 +0000414 * SMC. It could be on behalf of non-secure world or because a CPU
415 * suspend/CPU off request needs to abort the preempted SMC.
416 * --------------------------------------------------------------------
417 */
David Cunado28f69ab2017-04-05 11:34:03 +0100418func tsp_abort_yield_smc_entry
Douglas Raillardf2129652016-11-24 15:43:19 +0000419
420 /*
421 * Exceptions masking is already done by the TSPD when entering this
422 * hook so there is no need to do it here.
423 */
424
425 /* Reset the stack used by the pre-empted SMC */
426 bl plat_set_my_stack
427
428 /*
429 * Allow some cleanup such as releasing locks.
430 */
431 bl tsp_abort_smc_handler
432
433 restore_args_call_smc
434
435 /* Should never reach here */
436 bl plat_panic_handler
David Cunado28f69ab2017-04-05 11:34:03 +0100437endfunc tsp_abort_yield_smc_entry