blob: 489183c52a4ea2927102108b148c2be6ebf0b3dd [file] [log] [blame]
Achin Gupta7c88f3f2014-02-18 18:09:12 +00001/*
Douglas Raillard21362a92016-12-02 13:51:54 +00002 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
Achin Gupta7c88f3f2014-02-18 18:09:12 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta7c88f3f2014-02-18 18:09:12 +00005 */
6
Achin Gupta7c88f3f2014-02-18 18:09:12 +00007#include <arch.h>
Andrew Thoelke38bde412014-03-18 13:46:55 +00008#include <asm_macros.S>
Dan Handley2bd4ef22014-04-09 13:14:54 +01009#include <tsp.h>
Antonio Nino Diaz4ef91f12017-02-20 14:22:22 +000010#include <xlat_tables_defs.h>
Dan Handleye2c27f52014-08-01 17:58:27 +010011#include "../tsp_private.h"
Achin Gupta7c88f3f2014-02-18 18:09:12 +000012
13
14 .globl tsp_entrypoint
Andrew Thoelke891c4ca2014-05-20 21:43:27 +010015 .globl tsp_vector_table
Achin Gupta7c88f3f2014-02-18 18:09:12 +000016
Soby Mathew9f71f702014-05-09 20:49:17 +010017
18
Achin Gupta7c88f3f2014-02-18 18:09:12 +000019 /* ---------------------------------------------
20 * Populate the params in x0-x7 from the pointer
21 * to the smc args structure in x0.
22 * ---------------------------------------------
23 */
24 .macro restore_args_call_smc
25 ldp x6, x7, [x0, #TSP_ARG6]
26 ldp x4, x5, [x0, #TSP_ARG4]
27 ldp x2, x3, [x0, #TSP_ARG2]
28 ldp x0, x1, [x0, #TSP_ARG0]
29 smc #0
30 .endm
31
Achin Gupta76717892014-05-09 11:42:56 +010032 .macro save_eret_context reg1 reg2
33 mrs \reg1, elr_el1
34 mrs \reg2, spsr_el1
35 stp \reg1, \reg2, [sp, #-0x10]!
36 stp x30, x18, [sp, #-0x10]!
37 .endm
38
39 .macro restore_eret_context reg1 reg2
40 ldp x30, x18, [sp], #0x10
41 ldp \reg1, \reg2, [sp], #0x10
42 msr elr_el1, \reg1
43 msr spsr_el1, \reg2
44 .endm
45
Julius Wernerb4c75e92017-08-01 15:16:36 -070046func tsp_entrypoint _align=3
Achin Gupta7c88f3f2014-02-18 18:09:12 +000047
48 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +000049 * Set the exception vector to something sane.
50 * ---------------------------------------------
51 */
Achin Guptaa4f50c22014-05-09 12:17:56 +010052 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +000053 msr vbar_el1, x0
Achin Guptaed1744e2014-08-04 23:13:10 +010054 isb
Achin Gupta7c88f3f2014-02-18 18:09:12 +000055
56 /* ---------------------------------------------
Achin Guptaed1744e2014-08-04 23:13:10 +010057 * Enable the SError interrupt now that the
58 * exception vectors have been setup.
59 * ---------------------------------------------
60 */
61 msr daifclr, #DAIF_ABT_BIT
62
63 /* ---------------------------------------------
Achin Gupta9f098352014-07-18 18:38:28 +010064 * Enable the instruction cache, stack pointer
65 * and data access alignment checks
Achin Gupta7c88f3f2014-02-18 18:09:12 +000066 * ---------------------------------------------
67 */
Achin Gupta9f098352014-07-18 18:38:28 +010068 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +000069 mrs x0, sctlr_el1
Achin Gupta9f098352014-07-18 18:38:28 +010070 orr x0, x0, x1
Achin Gupta7c88f3f2014-02-18 18:09:12 +000071 msr sctlr_el1, x0
72 isb
73
74 /* ---------------------------------------------
Achin Guptae9c4a642015-09-11 16:03:13 +010075 * Invalidate the RW memory used by the BL32
76 * image. This includes the data and NOBITS
77 * sections. This is done to safeguard against
78 * possible corruption of this memory by dirty
79 * cache lines in a system cache as a result of
80 * use by an earlier boot loader stage.
81 * ---------------------------------------------
82 */
83 adr x0, __RW_START__
84 adr x1, __RW_END__
85 sub x1, x1, x0
86 bl inv_dcache_range
87
88 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +000089 * Zero out NOBITS sections. There are 2 of them:
90 * - the .bss section;
91 * - the coherent memory section.
92 * ---------------------------------------------
93 */
94 ldr x0, =__BSS_START__
95 ldr x1, =__BSS_SIZE__
Douglas Raillard21362a92016-12-02 13:51:54 +000096 bl zeromem
Achin Gupta7c88f3f2014-02-18 18:09:12 +000097
Soby Mathew2ae20432015-01-08 18:02:44 +000098#if USE_COHERENT_MEM
Achin Gupta7c88f3f2014-02-18 18:09:12 +000099 ldr x0, =__COHERENT_RAM_START__
100 ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__
Douglas Raillard21362a92016-12-02 13:51:54 +0000101 bl zeromem
Soby Mathew2ae20432015-01-08 18:02:44 +0000102#endif
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000103
104 /* --------------------------------------------
Achin Guptaf4a97092014-06-25 19:26:22 +0100105 * Allocate a stack whose memory will be marked
106 * as Normal-IS-WBWA when the MMU is enabled.
107 * There is no risk of reading stale stack
108 * memory after enabling the MMU as only the
109 * primary cpu is running at the moment.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000110 * --------------------------------------------
111 */
Soby Mathewda43b662015-07-08 21:45:46 +0100112 bl plat_set_my_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000113
114 /* ---------------------------------------------
Douglas Raillard306593d2017-02-24 18:14:15 +0000115 * Initialize the stack protector canary before
116 * any C code is called.
117 * ---------------------------------------------
118 */
119#if STACK_PROTECTOR_ENABLED
120 bl update_stack_protector_canary
121#endif
122
123 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000124 * Perform early platform setup & platform
125 * specific early arch. setup e.g. mmu setup
126 * ---------------------------------------------
127 */
Dan Handley4fd2f5c2014-08-04 11:41:20 +0100128 bl tsp_early_platform_setup
129 bl tsp_plat_arch_setup
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000130
131 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000132 * Jump to main function.
133 * ---------------------------------------------
134 */
135 bl tsp_main
136
137 /* ---------------------------------------------
138 * Tell TSPD that we are done initialising
139 * ---------------------------------------------
140 */
141 mov x1, x0
142 mov x0, #TSP_ENTRY_DONE
143 smc #0
144
145tsp_entrypoint_panic:
146 b tsp_entrypoint_panic
Kévin Petita877c252015-03-24 14:03:57 +0000147endfunc tsp_entrypoint
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000148
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100149
150 /* -------------------------------------------
151 * Table of entrypoint vectors provided to the
152 * TSPD for the various entrypoints
153 * -------------------------------------------
154 */
155func tsp_vector_table
David Cunado28f69ab2017-04-05 11:34:03 +0100156 b tsp_yield_smc_entry
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100157 b tsp_fast_smc_entry
158 b tsp_cpu_on_entry
159 b tsp_cpu_off_entry
160 b tsp_cpu_resume_entry
161 b tsp_cpu_suspend_entry
Soby Mathewbec98512015-09-03 18:29:38 +0100162 b tsp_sel1_intr_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100163 b tsp_system_off_entry
164 b tsp_system_reset_entry
David Cunado28f69ab2017-04-05 11:34:03 +0100165 b tsp_abort_yield_smc_entry
Kévin Petita877c252015-03-24 14:03:57 +0000166endfunc tsp_vector_table
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100167
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000168 /*---------------------------------------------
169 * This entrypoint is used by the TSPD when this
170 * cpu is to be turned off through a CPU_OFF
171 * psci call to ask the TSP to perform any
172 * bookeeping necessary. In the current
173 * implementation, the TSPD expects the TSP to
174 * re-initialise its state so nothing is done
175 * here except for acknowledging the request.
176 * ---------------------------------------------
177 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000178func tsp_cpu_off_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000179 bl tsp_cpu_off_main
180 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000181endfunc tsp_cpu_off_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000182
183 /*---------------------------------------------
Juan Castillo4dc4a472014-08-12 11:17:06 +0100184 * This entrypoint is used by the TSPD when the
185 * system is about to be switched off (through
186 * a SYSTEM_OFF psci call) to ask the TSP to
187 * perform any necessary bookkeeping.
188 * ---------------------------------------------
189 */
190func tsp_system_off_entry
191 bl tsp_system_off_main
192 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000193endfunc tsp_system_off_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100194
195 /*---------------------------------------------
196 * This entrypoint is used by the TSPD when the
197 * system is about to be reset (through a
198 * SYSTEM_RESET psci call) to ask the TSP to
199 * perform any necessary bookkeeping.
200 * ---------------------------------------------
201 */
202func tsp_system_reset_entry
203 bl tsp_system_reset_main
204 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000205endfunc tsp_system_reset_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100206
207 /*---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000208 * This entrypoint is used by the TSPD when this
209 * cpu is turned on using a CPU_ON psci call to
210 * ask the TSP to initialise itself i.e. setup
211 * the mmu, stacks etc. Minimal architectural
212 * state will be initialised by the TSPD when
213 * this function is entered i.e. Caches and MMU
214 * will be turned off, the execution state
215 * will be aarch64 and exceptions masked.
216 * ---------------------------------------------
217 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000218func tsp_cpu_on_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000219 /* ---------------------------------------------
220 * Set the exception vector to something sane.
221 * ---------------------------------------------
222 */
Achin Guptaa4f50c22014-05-09 12:17:56 +0100223 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000224 msr vbar_el1, x0
Achin Guptaed1744e2014-08-04 23:13:10 +0100225 isb
226
227 /* Enable the SError interrupt */
228 msr daifclr, #DAIF_ABT_BIT
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000229
230 /* ---------------------------------------------
Achin Gupta9f098352014-07-18 18:38:28 +0100231 * Enable the instruction cache, stack pointer
232 * and data access alignment checks
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000233 * ---------------------------------------------
234 */
Achin Gupta9f098352014-07-18 18:38:28 +0100235 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000236 mrs x0, sctlr_el1
Achin Gupta9f098352014-07-18 18:38:28 +0100237 orr x0, x0, x1
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000238 msr sctlr_el1, x0
239 isb
240
241 /* --------------------------------------------
Achin Guptae1aa5162014-06-26 09:58:52 +0100242 * Give ourselves a stack whose memory will be
243 * marked as Normal-IS-WBWA when the MMU is
244 * enabled.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000245 * --------------------------------------------
246 */
Soby Mathewda43b662015-07-08 21:45:46 +0100247 bl plat_set_my_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000248
Achin Guptae1aa5162014-06-26 09:58:52 +0100249 /* --------------------------------------------
250 * Enable the MMU with the DCache disabled. It
251 * is safe to use stacks allocated in normal
252 * memory as a result. All memory accesses are
253 * marked nGnRnE when the MMU is disabled. So
254 * all the stack writes will make it to memory.
255 * All memory accesses are marked Non-cacheable
256 * when the MMU is enabled but D$ is disabled.
257 * So used stack memory is guaranteed to be
258 * visible immediately after the MMU is enabled
259 * Enabling the DCache at the same time as the
260 * MMU can lead to speculatively fetched and
261 * possibly stale stack memory being read from
262 * other caches. This can lead to coherency
263 * issues.
264 * --------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000265 */
Achin Guptae1aa5162014-06-26 09:58:52 +0100266 mov x0, #DISABLE_DCACHE
Dan Handleyb226a4d2014-05-16 14:08:45 +0100267 bl bl32_plat_enable_mmu
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000268
269 /* ---------------------------------------------
Achin Guptae1aa5162014-06-26 09:58:52 +0100270 * Enable the Data cache now that the MMU has
271 * been enabled. The stack has been unwound. It
272 * will be written first before being read. This
273 * will invalidate any stale cache lines resi-
274 * -dent in other caches. We assume that
275 * interconnect coherency has been enabled for
276 * this cluster by EL3 firmware.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000277 * ---------------------------------------------
278 */
Achin Guptae1aa5162014-06-26 09:58:52 +0100279 mrs x0, sctlr_el1
280 orr x0, x0, #SCTLR_C_BIT
281 msr sctlr_el1, x0
282 isb
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000283
284 /* ---------------------------------------------
285 * Enter C runtime to perform any remaining
286 * book keeping
287 * ---------------------------------------------
288 */
289 bl tsp_cpu_on_main
290 restore_args_call_smc
291
292 /* Should never reach here */
293tsp_cpu_on_entry_panic:
294 b tsp_cpu_on_entry_panic
Kévin Petita877c252015-03-24 14:03:57 +0000295endfunc tsp_cpu_on_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000296
297 /*---------------------------------------------
298 * This entrypoint is used by the TSPD when this
299 * cpu is to be suspended through a CPU_SUSPEND
300 * psci call to ask the TSP to perform any
301 * bookeeping necessary. In the current
302 * implementation, the TSPD saves and restores
303 * the EL1 state.
304 * ---------------------------------------------
305 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000306func tsp_cpu_suspend_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000307 bl tsp_cpu_suspend_main
308 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000309endfunc tsp_cpu_suspend_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000310
Soby Mathewbec98512015-09-03 18:29:38 +0100311 /*-------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100312 * This entrypoint is used by the TSPD to pass
Soby Mathew78664242015-11-13 02:08:43 +0000313 * control for `synchronously` handling a S-EL1
314 * Interrupt which was triggered while executing
315 * in normal world. 'x0' contains a magic number
316 * which indicates this. TSPD expects control to
317 * be handed back at the end of interrupt
318 * processing. This is done through an SMC.
319 * The handover agreement is:
Achin Gupta76717892014-05-09 11:42:56 +0100320 *
321 * 1. PSTATE.DAIF are set upon entry. 'x1' has
322 * the ELR_EL3 from the non-secure state.
323 * 2. TSP has to preserve the callee saved
324 * general purpose registers, SP_EL1/EL0 and
325 * LR.
326 * 3. TSP has to preserve the system and vfp
327 * registers (if applicable).
328 * 4. TSP can use 'x0-x18' to enable its C
329 * runtime.
330 * 5. TSP returns to TSPD using an SMC with
Soby Mathewbec98512015-09-03 18:29:38 +0100331 * 'x0' = TSP_HANDLED_S_EL1_INTR
332 * ------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100333 */
Soby Mathewbec98512015-09-03 18:29:38 +0100334func tsp_sel1_intr_entry
Achin Gupta76717892014-05-09 11:42:56 +0100335#if DEBUG
Soby Mathew78664242015-11-13 02:08:43 +0000336 mov_imm x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
Achin Gupta76717892014-05-09 11:42:56 +0100337 cmp x0, x2
Soby Mathewbec98512015-09-03 18:29:38 +0100338 b.ne tsp_sel1_int_entry_panic
Achin Gupta76717892014-05-09 11:42:56 +0100339#endif
Soby Mathewbec98512015-09-03 18:29:38 +0100340 /*-------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100341 * Save any previous context needed to perform
342 * an exception return from S-EL1 e.g. context
Soby Mathewbec98512015-09-03 18:29:38 +0100343 * from a previous Non secure Interrupt.
344 * Update statistics and handle the S-EL1
345 * interrupt before returning to the TSPD.
Achin Gupta76717892014-05-09 11:42:56 +0100346 * IRQ/FIQs are not enabled since that will
347 * complicate the implementation. Execution
348 * will be transferred back to the normal world
Soby Mathew78664242015-11-13 02:08:43 +0000349 * in any case. The handler can return 0
350 * if the interrupt was handled or TSP_PREEMPTED
351 * if the expected interrupt was preempted
352 * by an interrupt that should be handled in EL3
353 * e.g. Group 0 interrupt in GICv3. In both
354 * the cases switch to EL3 using SMC with id
355 * TSP_HANDLED_S_EL1_INTR. Any other return value
356 * from the handler will result in panic.
Soby Mathewbec98512015-09-03 18:29:38 +0100357 * ------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100358 */
359 save_eret_context x2 x3
Soby Mathewbec98512015-09-03 18:29:38 +0100360 bl tsp_update_sync_sel1_intr_stats
361 bl tsp_common_int_handler
Soby Mathew78664242015-11-13 02:08:43 +0000362 /* Check if the S-EL1 interrupt has been handled */
363 cbnz x0, tsp_sel1_intr_check_preemption
364 b tsp_sel1_intr_return
365tsp_sel1_intr_check_preemption:
366 /* Check if the S-EL1 interrupt has been preempted */
367 mov_imm x1, TSP_PREEMPTED
368 cmp x0, x1
369 b.ne tsp_sel1_int_entry_panic
370tsp_sel1_intr_return:
371 mov_imm x0, TSP_HANDLED_S_EL1_INTR
Achin Gupta76717892014-05-09 11:42:56 +0100372 restore_eret_context x2 x3
Achin Gupta76717892014-05-09 11:42:56 +0100373 smc #0
374
Soby Mathew78664242015-11-13 02:08:43 +0000375 /* Should never reach here */
Soby Mathewbec98512015-09-03 18:29:38 +0100376tsp_sel1_int_entry_panic:
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000377 no_ret plat_panic_handler
Soby Mathewbec98512015-09-03 18:29:38 +0100378endfunc tsp_sel1_intr_entry
Achin Gupta76717892014-05-09 11:42:56 +0100379
380 /*---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000381 * This entrypoint is used by the TSPD when this
382 * cpu resumes execution after an earlier
383 * CPU_SUSPEND psci call to ask the TSP to
384 * restore its saved context. In the current
385 * implementation, the TSPD saves and restores
386 * EL1 state so nothing is done here apart from
387 * acknowledging the request.
388 * ---------------------------------------------
389 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000390func tsp_cpu_resume_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000391 bl tsp_cpu_resume_main
392 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000393
394 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000395 no_ret plat_panic_handler
Kévin Petita877c252015-03-24 14:03:57 +0000396endfunc tsp_cpu_resume_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000397
398 /*---------------------------------------------
399 * This entrypoint is used by the TSPD to ask
400 * the TSP to service a fast smc request.
401 * ---------------------------------------------
402 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000403func tsp_fast_smc_entry
Soby Mathew9f71f702014-05-09 20:49:17 +0100404 bl tsp_smc_handler
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000405 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000406
407 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000408 no_ret plat_panic_handler
Kévin Petita877c252015-03-24 14:03:57 +0000409endfunc tsp_fast_smc_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000410
Soby Mathew9f71f702014-05-09 20:49:17 +0100411 /*---------------------------------------------
412 * This entrypoint is used by the TSPD to ask
David Cunado28f69ab2017-04-05 11:34:03 +0100413 * the TSP to service a Yielding SMC request.
Soby Mathew9f71f702014-05-09 20:49:17 +0100414 * We will enable preemption during execution
415 * of tsp_smc_handler.
416 * ---------------------------------------------
417 */
David Cunado28f69ab2017-04-05 11:34:03 +0100418func tsp_yield_smc_entry
Soby Mathew9f71f702014-05-09 20:49:17 +0100419 msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
420 bl tsp_smc_handler
421 msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
422 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000423
424 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000425 no_ret plat_panic_handler
David Cunado28f69ab2017-04-05 11:34:03 +0100426endfunc tsp_yield_smc_entry
Douglas Raillardf2129652016-11-24 15:43:19 +0000427
428 /*---------------------------------------------------------------------
David Cunado28f69ab2017-04-05 11:34:03 +0100429 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
Douglas Raillardf2129652016-11-24 15:43:19 +0000430 * SMC. It could be on behalf of non-secure world or because a CPU
431 * suspend/CPU off request needs to abort the preempted SMC.
432 * --------------------------------------------------------------------
433 */
David Cunado28f69ab2017-04-05 11:34:03 +0100434func tsp_abort_yield_smc_entry
Douglas Raillardf2129652016-11-24 15:43:19 +0000435
436 /*
437 * Exceptions masking is already done by the TSPD when entering this
438 * hook so there is no need to do it here.
439 */
440
441 /* Reset the stack used by the pre-empted SMC */
442 bl plat_set_my_stack
443
444 /*
445 * Allow some cleanup such as releasing locks.
446 */
447 bl tsp_abort_smc_handler
448
449 restore_args_call_smc
450
451 /* Should never reach here */
452 bl plat_panic_handler
David Cunado28f69ab2017-04-05 11:34:03 +0100453endfunc tsp_abort_yield_smc_entry