blob: 1d3ec21a7b94267053ba407f02ffdb2d9edb8e97 [file] [log] [blame]
Achin Gupta7c88f3f2014-02-18 18:09:12 +00001/*
Antonio Nino Diaze61ece02019-02-26 11:41:03 +00002 * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
Achin Gupta7c88f3f2014-02-18 18:09:12 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta7c88f3f2014-02-18 18:09:12 +00005 */
6
Achin Gupta7c88f3f2014-02-18 18:09:12 +00007#include <arch.h>
Andrew Thoelke38bde412014-03-18 13:46:55 +00008#include <asm_macros.S>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00009#include <bl32/tsp/tsp.h>
10#include <lib/xlat_tables/xlat_tables_defs.h>
11
Dan Handleye2c27f52014-08-01 17:58:27 +010012#include "../tsp_private.h"
Achin Gupta7c88f3f2014-02-18 18:09:12 +000013
14
15 .globl tsp_entrypoint
Andrew Thoelke891c4ca2014-05-20 21:43:27 +010016 .globl tsp_vector_table
Achin Gupta7c88f3f2014-02-18 18:09:12 +000017
Soby Mathew9f71f702014-05-09 20:49:17 +010018
19
Achin Gupta7c88f3f2014-02-18 18:09:12 +000020 /* ---------------------------------------------
21 * Populate the params in x0-x7 from the pointer
22 * to the smc args structure in x0.
23 * ---------------------------------------------
24 */
25 .macro restore_args_call_smc
26 ldp x6, x7, [x0, #TSP_ARG6]
27 ldp x4, x5, [x0, #TSP_ARG4]
28 ldp x2, x3, [x0, #TSP_ARG2]
29 ldp x0, x1, [x0, #TSP_ARG0]
30 smc #0
31 .endm
32
Achin Gupta76717892014-05-09 11:42:56 +010033 .macro save_eret_context reg1 reg2
34 mrs \reg1, elr_el1
35 mrs \reg2, spsr_el1
36 stp \reg1, \reg2, [sp, #-0x10]!
37 stp x30, x18, [sp, #-0x10]!
38 .endm
39
40 .macro restore_eret_context reg1 reg2
41 ldp x30, x18, [sp], #0x10
42 ldp \reg1, \reg2, [sp], #0x10
43 msr elr_el1, \reg1
44 msr spsr_el1, \reg2
45 .endm
46
Julius Wernerb4c75e92017-08-01 15:16:36 -070047func tsp_entrypoint _align=3
Achin Gupta7c88f3f2014-02-18 18:09:12 +000048
49 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +000050 * Set the exception vector to something sane.
51 * ---------------------------------------------
52 */
Achin Guptaa4f50c22014-05-09 12:17:56 +010053 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +000054 msr vbar_el1, x0
Achin Guptaed1744e2014-08-04 23:13:10 +010055 isb
Achin Gupta7c88f3f2014-02-18 18:09:12 +000056
57 /* ---------------------------------------------
Achin Guptaed1744e2014-08-04 23:13:10 +010058 * Enable the SError interrupt now that the
59 * exception vectors have been setup.
60 * ---------------------------------------------
61 */
62 msr daifclr, #DAIF_ABT_BIT
63
64 /* ---------------------------------------------
Achin Gupta9f098352014-07-18 18:38:28 +010065 * Enable the instruction cache, stack pointer
John Tsichritzisd5a59602019-03-04 16:42:54 +000066 * and data access alignment checks and disable
67 * speculative loads.
Achin Gupta7c88f3f2014-02-18 18:09:12 +000068 * ---------------------------------------------
69 */
Achin Gupta9f098352014-07-18 18:38:28 +010070 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +000071 mrs x0, sctlr_el1
Achin Gupta9f098352014-07-18 18:38:28 +010072 orr x0, x0, x1
John Tsichritzisd5a59602019-03-04 16:42:54 +000073 bic x0, x0, #SCTLR_DSSBS_BIT
Achin Gupta7c88f3f2014-02-18 18:09:12 +000074 msr sctlr_el1, x0
75 isb
76
77 /* ---------------------------------------------
Achin Guptae9c4a642015-09-11 16:03:13 +010078 * Invalidate the RW memory used by the BL32
79 * image. This includes the data and NOBITS
80 * sections. This is done to safeguard against
81 * possible corruption of this memory by dirty
82 * cache lines in a system cache as a result of
83 * use by an earlier boot loader stage.
84 * ---------------------------------------------
85 */
86 adr x0, __RW_START__
87 adr x1, __RW_END__
88 sub x1, x1, x0
89 bl inv_dcache_range
90
91 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +000092 * Zero out NOBITS sections. There are 2 of them:
93 * - the .bss section;
94 * - the coherent memory section.
95 * ---------------------------------------------
96 */
97 ldr x0, =__BSS_START__
98 ldr x1, =__BSS_SIZE__
Douglas Raillard21362a92016-12-02 13:51:54 +000099 bl zeromem
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000100
Soby Mathew2ae20432015-01-08 18:02:44 +0000101#if USE_COHERENT_MEM
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000102 ldr x0, =__COHERENT_RAM_START__
103 ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__
Douglas Raillard21362a92016-12-02 13:51:54 +0000104 bl zeromem
Soby Mathew2ae20432015-01-08 18:02:44 +0000105#endif
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000106
107 /* --------------------------------------------
Achin Guptaf4a97092014-06-25 19:26:22 +0100108 * Allocate a stack whose memory will be marked
109 * as Normal-IS-WBWA when the MMU is enabled.
110 * There is no risk of reading stale stack
111 * memory after enabling the MMU as only the
112 * primary cpu is running at the moment.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000113 * --------------------------------------------
114 */
Soby Mathewda43b662015-07-08 21:45:46 +0100115 bl plat_set_my_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000116
117 /* ---------------------------------------------
Douglas Raillard306593d2017-02-24 18:14:15 +0000118 * Initialize the stack protector canary before
119 * any C code is called.
120 * ---------------------------------------------
121 */
122#if STACK_PROTECTOR_ENABLED
123 bl update_stack_protector_canary
124#endif
125
126 /* ---------------------------------------------
Antonio Nino Diaze61ece02019-02-26 11:41:03 +0000127 * Perform TSP setup
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000128 * ---------------------------------------------
129 */
Antonio Nino Diaze61ece02019-02-26 11:41:03 +0000130 bl tsp_setup
131
Antonio Nino Diaze61ece02019-02-26 11:41:03 +0000132#if ENABLE_PAUTH
Alexei Fedorov90f2e882019-05-24 12:17:09 +0100133 /* ---------------------------------------------
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100134 * Program APIAKey_EL1
135 * and enable pointer authentication
Alexei Fedorov90f2e882019-05-24 12:17:09 +0100136 * ---------------------------------------------
137 */
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100138 bl pauth_init_enable_el1
Antonio Nino Diaze61ece02019-02-26 11:41:03 +0000139#endif /* ENABLE_PAUTH */
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000140
141 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000142 * Jump to main function.
143 * ---------------------------------------------
144 */
145 bl tsp_main
146
147 /* ---------------------------------------------
148 * Tell TSPD that we are done initialising
149 * ---------------------------------------------
150 */
151 mov x1, x0
152 mov x0, #TSP_ENTRY_DONE
153 smc #0
154
155tsp_entrypoint_panic:
156 b tsp_entrypoint_panic
Kévin Petita877c252015-03-24 14:03:57 +0000157endfunc tsp_entrypoint
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000158
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100159
160 /* -------------------------------------------
161 * Table of entrypoint vectors provided to the
162 * TSPD for the various entrypoints
163 * -------------------------------------------
164 */
Alexei Fedorov90f2e882019-05-24 12:17:09 +0100165vector_base tsp_vector_table
David Cunado28f69ab2017-04-05 11:34:03 +0100166 b tsp_yield_smc_entry
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100167 b tsp_fast_smc_entry
168 b tsp_cpu_on_entry
169 b tsp_cpu_off_entry
170 b tsp_cpu_resume_entry
171 b tsp_cpu_suspend_entry
Soby Mathewbec98512015-09-03 18:29:38 +0100172 b tsp_sel1_intr_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100173 b tsp_system_off_entry
174 b tsp_system_reset_entry
David Cunado28f69ab2017-04-05 11:34:03 +0100175 b tsp_abort_yield_smc_entry
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100176
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000177 /*---------------------------------------------
178 * This entrypoint is used by the TSPD when this
179 * cpu is to be turned off through a CPU_OFF
180 * psci call to ask the TSP to perform any
181 * bookeeping necessary. In the current
182 * implementation, the TSPD expects the TSP to
183 * re-initialise its state so nothing is done
184 * here except for acknowledging the request.
185 * ---------------------------------------------
186 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000187func tsp_cpu_off_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000188 bl tsp_cpu_off_main
189 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000190endfunc tsp_cpu_off_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000191
192 /*---------------------------------------------
Juan Castillo4dc4a472014-08-12 11:17:06 +0100193 * This entrypoint is used by the TSPD when the
194 * system is about to be switched off (through
195 * a SYSTEM_OFF psci call) to ask the TSP to
196 * perform any necessary bookkeeping.
197 * ---------------------------------------------
198 */
199func tsp_system_off_entry
200 bl tsp_system_off_main
201 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000202endfunc tsp_system_off_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100203
204 /*---------------------------------------------
205 * This entrypoint is used by the TSPD when the
206 * system is about to be reset (through a
207 * SYSTEM_RESET psci call) to ask the TSP to
208 * perform any necessary bookkeeping.
209 * ---------------------------------------------
210 */
211func tsp_system_reset_entry
212 bl tsp_system_reset_main
213 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000214endfunc tsp_system_reset_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100215
216 /*---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000217 * This entrypoint is used by the TSPD when this
218 * cpu is turned on using a CPU_ON psci call to
219 * ask the TSP to initialise itself i.e. setup
220 * the mmu, stacks etc. Minimal architectural
221 * state will be initialised by the TSPD when
222 * this function is entered i.e. Caches and MMU
223 * will be turned off, the execution state
224 * will be aarch64 and exceptions masked.
225 * ---------------------------------------------
226 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000227func tsp_cpu_on_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000228 /* ---------------------------------------------
229 * Set the exception vector to something sane.
230 * ---------------------------------------------
231 */
Achin Guptaa4f50c22014-05-09 12:17:56 +0100232 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000233 msr vbar_el1, x0
Achin Guptaed1744e2014-08-04 23:13:10 +0100234 isb
235
236 /* Enable the SError interrupt */
237 msr daifclr, #DAIF_ABT_BIT
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000238
239 /* ---------------------------------------------
Achin Gupta9f098352014-07-18 18:38:28 +0100240 * Enable the instruction cache, stack pointer
241 * and data access alignment checks
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000242 * ---------------------------------------------
243 */
Achin Gupta9f098352014-07-18 18:38:28 +0100244 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000245 mrs x0, sctlr_el1
Achin Gupta9f098352014-07-18 18:38:28 +0100246 orr x0, x0, x1
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000247 msr sctlr_el1, x0
248 isb
249
250 /* --------------------------------------------
Achin Guptae1aa5162014-06-26 09:58:52 +0100251 * Give ourselves a stack whose memory will be
252 * marked as Normal-IS-WBWA when the MMU is
253 * enabled.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000254 * --------------------------------------------
255 */
Soby Mathewda43b662015-07-08 21:45:46 +0100256 bl plat_set_my_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000257
Achin Guptae1aa5162014-06-26 09:58:52 +0100258 /* --------------------------------------------
Jeenu Viswambharan0859d2c2018-04-27 16:28:12 +0100259 * Enable MMU and D-caches together.
Achin Guptae1aa5162014-06-26 09:58:52 +0100260 * --------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000261 */
Jeenu Viswambharan0859d2c2018-04-27 16:28:12 +0100262 mov x0, #0
Dan Handleyb226a4d2014-05-16 14:08:45 +0100263 bl bl32_plat_enable_mmu
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000264
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100265#if ENABLE_PAUTH
266 /* ---------------------------------------------
267 * Program APIAKey_EL1
268 * and enable pointer authentication
269 * ---------------------------------------------
270 */
271 bl pauth_init_enable_el1
272#endif /* ENABLE_PAUTH */
273
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000274 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000275 * Enter C runtime to perform any remaining
276 * book keeping
277 * ---------------------------------------------
278 */
279 bl tsp_cpu_on_main
280 restore_args_call_smc
281
282 /* Should never reach here */
283tsp_cpu_on_entry_panic:
284 b tsp_cpu_on_entry_panic
Kévin Petita877c252015-03-24 14:03:57 +0000285endfunc tsp_cpu_on_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000286
287 /*---------------------------------------------
288 * This entrypoint is used by the TSPD when this
289 * cpu is to be suspended through a CPU_SUSPEND
290 * psci call to ask the TSP to perform any
291 * bookeeping necessary. In the current
292 * implementation, the TSPD saves and restores
293 * the EL1 state.
294 * ---------------------------------------------
295 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000296func tsp_cpu_suspend_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000297 bl tsp_cpu_suspend_main
298 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000299endfunc tsp_cpu_suspend_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000300
Soby Mathewbec98512015-09-03 18:29:38 +0100301 /*-------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100302 * This entrypoint is used by the TSPD to pass
Soby Mathew78664242015-11-13 02:08:43 +0000303 * control for `synchronously` handling a S-EL1
304 * Interrupt which was triggered while executing
305 * in normal world. 'x0' contains a magic number
306 * which indicates this. TSPD expects control to
307 * be handed back at the end of interrupt
308 * processing. This is done through an SMC.
309 * The handover agreement is:
Achin Gupta76717892014-05-09 11:42:56 +0100310 *
311 * 1. PSTATE.DAIF are set upon entry. 'x1' has
312 * the ELR_EL3 from the non-secure state.
313 * 2. TSP has to preserve the callee saved
314 * general purpose registers, SP_EL1/EL0 and
315 * LR.
316 * 3. TSP has to preserve the system and vfp
317 * registers (if applicable).
318 * 4. TSP can use 'x0-x18' to enable its C
319 * runtime.
320 * 5. TSP returns to TSPD using an SMC with
Soby Mathewbec98512015-09-03 18:29:38 +0100321 * 'x0' = TSP_HANDLED_S_EL1_INTR
322 * ------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100323 */
Soby Mathewbec98512015-09-03 18:29:38 +0100324func tsp_sel1_intr_entry
Achin Gupta76717892014-05-09 11:42:56 +0100325#if DEBUG
Soby Mathew78664242015-11-13 02:08:43 +0000326 mov_imm x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
Achin Gupta76717892014-05-09 11:42:56 +0100327 cmp x0, x2
Soby Mathewbec98512015-09-03 18:29:38 +0100328 b.ne tsp_sel1_int_entry_panic
Achin Gupta76717892014-05-09 11:42:56 +0100329#endif
Soby Mathewbec98512015-09-03 18:29:38 +0100330 /*-------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100331 * Save any previous context needed to perform
332 * an exception return from S-EL1 e.g. context
Soby Mathewbec98512015-09-03 18:29:38 +0100333 * from a previous Non secure Interrupt.
334 * Update statistics and handle the S-EL1
335 * interrupt before returning to the TSPD.
Achin Gupta76717892014-05-09 11:42:56 +0100336 * IRQ/FIQs are not enabled since that will
337 * complicate the implementation. Execution
338 * will be transferred back to the normal world
Soby Mathew78664242015-11-13 02:08:43 +0000339 * in any case. The handler can return 0
340 * if the interrupt was handled or TSP_PREEMPTED
341 * if the expected interrupt was preempted
342 * by an interrupt that should be handled in EL3
343 * e.g. Group 0 interrupt in GICv3. In both
344 * the cases switch to EL3 using SMC with id
345 * TSP_HANDLED_S_EL1_INTR. Any other return value
346 * from the handler will result in panic.
Soby Mathewbec98512015-09-03 18:29:38 +0100347 * ------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100348 */
349 save_eret_context x2 x3
Soby Mathewbec98512015-09-03 18:29:38 +0100350 bl tsp_update_sync_sel1_intr_stats
351 bl tsp_common_int_handler
Soby Mathew78664242015-11-13 02:08:43 +0000352 /* Check if the S-EL1 interrupt has been handled */
353 cbnz x0, tsp_sel1_intr_check_preemption
354 b tsp_sel1_intr_return
355tsp_sel1_intr_check_preemption:
356 /* Check if the S-EL1 interrupt has been preempted */
357 mov_imm x1, TSP_PREEMPTED
358 cmp x0, x1
359 b.ne tsp_sel1_int_entry_panic
360tsp_sel1_intr_return:
361 mov_imm x0, TSP_HANDLED_S_EL1_INTR
Achin Gupta76717892014-05-09 11:42:56 +0100362 restore_eret_context x2 x3
Achin Gupta76717892014-05-09 11:42:56 +0100363 smc #0
364
Soby Mathew78664242015-11-13 02:08:43 +0000365 /* Should never reach here */
Soby Mathewbec98512015-09-03 18:29:38 +0100366tsp_sel1_int_entry_panic:
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000367 no_ret plat_panic_handler
Soby Mathewbec98512015-09-03 18:29:38 +0100368endfunc tsp_sel1_intr_entry
Achin Gupta76717892014-05-09 11:42:56 +0100369
370 /*---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000371 * This entrypoint is used by the TSPD when this
372 * cpu resumes execution after an earlier
373 * CPU_SUSPEND psci call to ask the TSP to
374 * restore its saved context. In the current
375 * implementation, the TSPD saves and restores
376 * EL1 state so nothing is done here apart from
377 * acknowledging the request.
378 * ---------------------------------------------
379 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000380func tsp_cpu_resume_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000381 bl tsp_cpu_resume_main
382 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000383
384 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000385 no_ret plat_panic_handler
Kévin Petita877c252015-03-24 14:03:57 +0000386endfunc tsp_cpu_resume_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000387
388 /*---------------------------------------------
389 * This entrypoint is used by the TSPD to ask
390 * the TSP to service a fast smc request.
391 * ---------------------------------------------
392 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000393func tsp_fast_smc_entry
Soby Mathew9f71f702014-05-09 20:49:17 +0100394 bl tsp_smc_handler
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000395 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000396
397 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000398 no_ret plat_panic_handler
Kévin Petita877c252015-03-24 14:03:57 +0000399endfunc tsp_fast_smc_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000400
Soby Mathew9f71f702014-05-09 20:49:17 +0100401 /*---------------------------------------------
402 * This entrypoint is used by the TSPD to ask
David Cunado28f69ab2017-04-05 11:34:03 +0100403 * the TSP to service a Yielding SMC request.
Soby Mathew9f71f702014-05-09 20:49:17 +0100404 * We will enable preemption during execution
405 * of tsp_smc_handler.
406 * ---------------------------------------------
407 */
David Cunado28f69ab2017-04-05 11:34:03 +0100408func tsp_yield_smc_entry
Soby Mathew9f71f702014-05-09 20:49:17 +0100409 msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
410 bl tsp_smc_handler
411 msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
412 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000413
414 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000415 no_ret plat_panic_handler
David Cunado28f69ab2017-04-05 11:34:03 +0100416endfunc tsp_yield_smc_entry
Douglas Raillardf2129652016-11-24 15:43:19 +0000417
418 /*---------------------------------------------------------------------
David Cunado28f69ab2017-04-05 11:34:03 +0100419 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
Douglas Raillardf2129652016-11-24 15:43:19 +0000420 * SMC. It could be on behalf of non-secure world or because a CPU
421 * suspend/CPU off request needs to abort the preempted SMC.
422 * --------------------------------------------------------------------
423 */
David Cunado28f69ab2017-04-05 11:34:03 +0100424func tsp_abort_yield_smc_entry
Douglas Raillardf2129652016-11-24 15:43:19 +0000425
426 /*
427 * Exceptions masking is already done by the TSPD when entering this
428 * hook so there is no need to do it here.
429 */
430
431 /* Reset the stack used by the pre-empted SMC */
432 bl plat_set_my_stack
433
434 /*
435 * Allow some cleanup such as releasing locks.
436 */
437 bl tsp_abort_smc_handler
438
439 restore_args_call_smc
440
441 /* Should never reach here */
442 bl plat_panic_handler
David Cunado28f69ab2017-04-05 11:34:03 +0100443endfunc tsp_abort_yield_smc_entry