blob: 7d77f478bd9b983f5f7e2b644ff501ccdcf6380a [file] [log] [blame]
Achin Gupta7c88f3f2014-02-18 18:09:12 +00001/*
Yann Gautiere57bce82020-08-18 14:42:41 +02002 * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
Achin Gupta7c88f3f2014-02-18 18:09:12 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta7c88f3f2014-02-18 18:09:12 +00005 */
6
Masahiro Yamadade634f82020-01-17 13:45:14 +09007#include <platform_def.h>
8
Achin Gupta7c88f3f2014-02-18 18:09:12 +00009#include <arch.h>
Andrew Thoelke38bde412014-03-18 13:46:55 +000010#include <asm_macros.S>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011#include <bl32/tsp/tsp.h>
12#include <lib/xlat_tables/xlat_tables_defs.h>
13
Dan Handleye2c27f52014-08-01 17:58:27 +010014#include "../tsp_private.h"
Achin Gupta7c88f3f2014-02-18 18:09:12 +000015
16
17 .globl tsp_entrypoint
Andrew Thoelke891c4ca2014-05-20 21:43:27 +010018 .globl tsp_vector_table
Achin Gupta7c88f3f2014-02-18 18:09:12 +000019
Soby Mathew9f71f702014-05-09 20:49:17 +010020
21
Achin Gupta7c88f3f2014-02-18 18:09:12 +000022 /* ---------------------------------------------
23 * Populate the params in x0-x7 from the pointer
24 * to the smc args structure in x0.
25 * ---------------------------------------------
26 */
27 .macro restore_args_call_smc
28 ldp x6, x7, [x0, #TSP_ARG6]
29 ldp x4, x5, [x0, #TSP_ARG4]
30 ldp x2, x3, [x0, #TSP_ARG2]
31 ldp x0, x1, [x0, #TSP_ARG0]
32 smc #0
33 .endm
34
Achin Gupta76717892014-05-09 11:42:56 +010035 .macro save_eret_context reg1 reg2
36 mrs \reg1, elr_el1
37 mrs \reg2, spsr_el1
38 stp \reg1, \reg2, [sp, #-0x10]!
39 stp x30, x18, [sp, #-0x10]!
40 .endm
41
42 .macro restore_eret_context reg1 reg2
43 ldp x30, x18, [sp], #0x10
44 ldp \reg1, \reg2, [sp], #0x10
45 msr elr_el1, \reg1
46 msr spsr_el1, \reg2
47 .endm
48
Julius Wernerb4c75e92017-08-01 15:16:36 -070049func tsp_entrypoint _align=3
Achin Gupta7c88f3f2014-02-18 18:09:12 +000050
Masahiro Yamadade634f82020-01-17 13:45:14 +090051#if ENABLE_PIE
52 /*
53 * ------------------------------------------------------------
54 * If PIE is enabled fixup the Global descriptor Table only
55 * once during primary core cold boot path.
56 *
57 * Compile time base address, required for fixup, is calculated
58 * using "pie_fixup" label present within first page.
59 * ------------------------------------------------------------
60 */
61 pie_fixup:
62 ldr x0, =pie_fixup
Jimmy Brissoned202072020-08-04 16:18:52 -050063 and x0, x0, #~(PAGE_SIZE_MASK)
Masahiro Yamadade634f82020-01-17 13:45:14 +090064 mov_imm x1, (BL32_LIMIT - BL32_BASE)
65 add x1, x1, x0
66 bl fixup_gdt_reloc
67#endif /* ENABLE_PIE */
68
Achin Gupta7c88f3f2014-02-18 18:09:12 +000069 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +000070 * Set the exception vector to something sane.
71 * ---------------------------------------------
72 */
Achin Guptaa4f50c22014-05-09 12:17:56 +010073 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +000074 msr vbar_el1, x0
Achin Guptaed1744e2014-08-04 23:13:10 +010075 isb
Achin Gupta7c88f3f2014-02-18 18:09:12 +000076
77 /* ---------------------------------------------
Achin Guptaed1744e2014-08-04 23:13:10 +010078 * Enable the SError interrupt now that the
79 * exception vectors have been setup.
80 * ---------------------------------------------
81 */
82 msr daifclr, #DAIF_ABT_BIT
83
84 /* ---------------------------------------------
Achin Gupta9f098352014-07-18 18:38:28 +010085 * Enable the instruction cache, stack pointer
John Tsichritzisd5a59602019-03-04 16:42:54 +000086 * and data access alignment checks and disable
87 * speculative loads.
Achin Gupta7c88f3f2014-02-18 18:09:12 +000088 * ---------------------------------------------
89 */
Achin Gupta9f098352014-07-18 18:38:28 +010090 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +000091 mrs x0, sctlr_el1
Achin Gupta9f098352014-07-18 18:38:28 +010092 orr x0, x0, x1
John Tsichritzisd5a59602019-03-04 16:42:54 +000093 bic x0, x0, #SCTLR_DSSBS_BIT
Achin Gupta7c88f3f2014-02-18 18:09:12 +000094 msr sctlr_el1, x0
95 isb
96
97 /* ---------------------------------------------
Achin Guptae9c4a642015-09-11 16:03:13 +010098 * Invalidate the RW memory used by the BL32
99 * image. This includes the data and NOBITS
100 * sections. This is done to safeguard against
101 * possible corruption of this memory by dirty
102 * cache lines in a system cache as a result of
Zelalem Awekeb0d69e82021-10-15 17:25:52 -0500103 * use by an earlier boot loader stage. If PIE
104 * is enabled however, RO sections including the
105 * GOT may be modified during pie fixup.
106 * Therefore, to be on the safe side, invalidate
107 * the entire image region if PIE is enabled.
Achin Guptae9c4a642015-09-11 16:03:13 +0100108 * ---------------------------------------------
109 */
Zelalem Awekeb0d69e82021-10-15 17:25:52 -0500110#if ENABLE_PIE
111#if SEPARATE_CODE_AND_RODATA
112 adrp x0, __TEXT_START__
113 add x0, x0, :lo12:__TEXT_START__
114#else
115 adrp x0, __RO_START__
116 add x0, x0, :lo12:__RO_START__
117#endif /* SEPARATE_CODE_AND_RODATA */
118#else
119 adrp x0, __RW_START__
120 add x0, x0, :lo12:__RW_START__
121#endif /* ENABLE_PIE */
122 adrp x1, __RW_END__
123 add x1, x1, :lo12:__RW_END__
Achin Guptae9c4a642015-09-11 16:03:13 +0100124 sub x1, x1, x0
125 bl inv_dcache_range
126
127 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000128 * Zero out NOBITS sections. There are 2 of them:
129 * - the .bss section;
130 * - the coherent memory section.
131 * ---------------------------------------------
132 */
Yann Gautiere57bce82020-08-18 14:42:41 +0200133 adrp x0, __BSS_START__
134 add x0, x0, :lo12:__BSS_START__
135 adrp x1, __BSS_END__
136 add x1, x1, :lo12:__BSS_END__
137 sub x1, x1, x0
Douglas Raillard21362a92016-12-02 13:51:54 +0000138 bl zeromem
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000139
Soby Mathew2ae20432015-01-08 18:02:44 +0000140#if USE_COHERENT_MEM
Yann Gautiere57bce82020-08-18 14:42:41 +0200141 adrp x0, __COHERENT_RAM_START__
142 add x0, x0, :lo12:__COHERENT_RAM_START__
143 adrp x1, __COHERENT_RAM_END_UNALIGNED__
144 add x1, x1, :lo12:__COHERENT_RAM_END_UNALIGNED__
145 sub x1, x1, x0
Douglas Raillard21362a92016-12-02 13:51:54 +0000146 bl zeromem
Soby Mathew2ae20432015-01-08 18:02:44 +0000147#endif
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000148
149 /* --------------------------------------------
Achin Guptaf4a97092014-06-25 19:26:22 +0100150 * Allocate a stack whose memory will be marked
151 * as Normal-IS-WBWA when the MMU is enabled.
152 * There is no risk of reading stale stack
153 * memory after enabling the MMU as only the
154 * primary cpu is running at the moment.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000155 * --------------------------------------------
156 */
Soby Mathewda43b662015-07-08 21:45:46 +0100157 bl plat_set_my_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000158
159 /* ---------------------------------------------
Douglas Raillard306593d2017-02-24 18:14:15 +0000160 * Initialize the stack protector canary before
161 * any C code is called.
162 * ---------------------------------------------
163 */
164#if STACK_PROTECTOR_ENABLED
165 bl update_stack_protector_canary
166#endif
167
168 /* ---------------------------------------------
Antonio Nino Diaze61ece02019-02-26 11:41:03 +0000169 * Perform TSP setup
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000170 * ---------------------------------------------
171 */
Antonio Nino Diaze61ece02019-02-26 11:41:03 +0000172 bl tsp_setup
173
Antonio Nino Diaze61ece02019-02-26 11:41:03 +0000174#if ENABLE_PAUTH
Alexei Fedorov90f2e882019-05-24 12:17:09 +0100175 /* ---------------------------------------------
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100176 * Program APIAKey_EL1
177 * and enable pointer authentication
Alexei Fedorov90f2e882019-05-24 12:17:09 +0100178 * ---------------------------------------------
179 */
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100180 bl pauth_init_enable_el1
Antonio Nino Diaze61ece02019-02-26 11:41:03 +0000181#endif /* ENABLE_PAUTH */
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000182
183 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000184 * Jump to main function.
185 * ---------------------------------------------
186 */
187 bl tsp_main
188
189 /* ---------------------------------------------
190 * Tell TSPD that we are done initialising
191 * ---------------------------------------------
192 */
193 mov x1, x0
194 mov x0, #TSP_ENTRY_DONE
195 smc #0
196
197tsp_entrypoint_panic:
198 b tsp_entrypoint_panic
Kévin Petita877c252015-03-24 14:03:57 +0000199endfunc tsp_entrypoint
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000200
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100201
202 /* -------------------------------------------
203 * Table of entrypoint vectors provided to the
204 * TSPD for the various entrypoints
205 * -------------------------------------------
206 */
Alexei Fedorov90f2e882019-05-24 12:17:09 +0100207vector_base tsp_vector_table
David Cunado28f69ab2017-04-05 11:34:03 +0100208 b tsp_yield_smc_entry
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100209 b tsp_fast_smc_entry
210 b tsp_cpu_on_entry
211 b tsp_cpu_off_entry
212 b tsp_cpu_resume_entry
213 b tsp_cpu_suspend_entry
Soby Mathewbec98512015-09-03 18:29:38 +0100214 b tsp_sel1_intr_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100215 b tsp_system_off_entry
216 b tsp_system_reset_entry
David Cunado28f69ab2017-04-05 11:34:03 +0100217 b tsp_abort_yield_smc_entry
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100218
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000219 /*---------------------------------------------
220 * This entrypoint is used by the TSPD when this
221 * cpu is to be turned off through a CPU_OFF
222 * psci call to ask the TSP to perform any
223 * bookeeping necessary. In the current
224 * implementation, the TSPD expects the TSP to
225 * re-initialise its state so nothing is done
226 * here except for acknowledging the request.
227 * ---------------------------------------------
228 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000229func tsp_cpu_off_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000230 bl tsp_cpu_off_main
231 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000232endfunc tsp_cpu_off_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000233
234 /*---------------------------------------------
Juan Castillo4dc4a472014-08-12 11:17:06 +0100235 * This entrypoint is used by the TSPD when the
236 * system is about to be switched off (through
237 * a SYSTEM_OFF psci call) to ask the TSP to
238 * perform any necessary bookkeeping.
239 * ---------------------------------------------
240 */
241func tsp_system_off_entry
242 bl tsp_system_off_main
243 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000244endfunc tsp_system_off_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100245
246 /*---------------------------------------------
247 * This entrypoint is used by the TSPD when the
248 * system is about to be reset (through a
249 * SYSTEM_RESET psci call) to ask the TSP to
250 * perform any necessary bookkeeping.
251 * ---------------------------------------------
252 */
253func tsp_system_reset_entry
254 bl tsp_system_reset_main
255 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000256endfunc tsp_system_reset_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100257
258 /*---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000259 * This entrypoint is used by the TSPD when this
260 * cpu is turned on using a CPU_ON psci call to
261 * ask the TSP to initialise itself i.e. setup
262 * the mmu, stacks etc. Minimal architectural
263 * state will be initialised by the TSPD when
264 * this function is entered i.e. Caches and MMU
265 * will be turned off, the execution state
266 * will be aarch64 and exceptions masked.
267 * ---------------------------------------------
268 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000269func tsp_cpu_on_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000270 /* ---------------------------------------------
271 * Set the exception vector to something sane.
272 * ---------------------------------------------
273 */
Achin Guptaa4f50c22014-05-09 12:17:56 +0100274 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000275 msr vbar_el1, x0
Achin Guptaed1744e2014-08-04 23:13:10 +0100276 isb
277
278 /* Enable the SError interrupt */
279 msr daifclr, #DAIF_ABT_BIT
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000280
281 /* ---------------------------------------------
Achin Gupta9f098352014-07-18 18:38:28 +0100282 * Enable the instruction cache, stack pointer
283 * and data access alignment checks
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000284 * ---------------------------------------------
285 */
Achin Gupta9f098352014-07-18 18:38:28 +0100286 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000287 mrs x0, sctlr_el1
Achin Gupta9f098352014-07-18 18:38:28 +0100288 orr x0, x0, x1
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000289 msr sctlr_el1, x0
290 isb
291
292 /* --------------------------------------------
Achin Guptae1aa5162014-06-26 09:58:52 +0100293 * Give ourselves a stack whose memory will be
294 * marked as Normal-IS-WBWA when the MMU is
295 * enabled.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000296 * --------------------------------------------
297 */
Soby Mathewda43b662015-07-08 21:45:46 +0100298 bl plat_set_my_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000299
Achin Guptae1aa5162014-06-26 09:58:52 +0100300 /* --------------------------------------------
Jeenu Viswambharan0859d2c2018-04-27 16:28:12 +0100301 * Enable MMU and D-caches together.
Achin Guptae1aa5162014-06-26 09:58:52 +0100302 * --------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000303 */
Jeenu Viswambharan0859d2c2018-04-27 16:28:12 +0100304 mov x0, #0
Dan Handleyb226a4d2014-05-16 14:08:45 +0100305 bl bl32_plat_enable_mmu
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000306
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100307#if ENABLE_PAUTH
308 /* ---------------------------------------------
309 * Program APIAKey_EL1
310 * and enable pointer authentication
311 * ---------------------------------------------
312 */
313 bl pauth_init_enable_el1
314#endif /* ENABLE_PAUTH */
315
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000316 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000317 * Enter C runtime to perform any remaining
318 * book keeping
319 * ---------------------------------------------
320 */
321 bl tsp_cpu_on_main
322 restore_args_call_smc
323
324 /* Should never reach here */
325tsp_cpu_on_entry_panic:
326 b tsp_cpu_on_entry_panic
Kévin Petita877c252015-03-24 14:03:57 +0000327endfunc tsp_cpu_on_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000328
329 /*---------------------------------------------
330 * This entrypoint is used by the TSPD when this
331 * cpu is to be suspended through a CPU_SUSPEND
332 * psci call to ask the TSP to perform any
333 * bookeeping necessary. In the current
334 * implementation, the TSPD saves and restores
335 * the EL1 state.
336 * ---------------------------------------------
337 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000338func tsp_cpu_suspend_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000339 bl tsp_cpu_suspend_main
340 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000341endfunc tsp_cpu_suspend_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000342
Soby Mathewbec98512015-09-03 18:29:38 +0100343 /*-------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100344 * This entrypoint is used by the TSPD to pass
Soby Mathew78664242015-11-13 02:08:43 +0000345 * control for `synchronously` handling a S-EL1
346 * Interrupt which was triggered while executing
347 * in normal world. 'x0' contains a magic number
348 * which indicates this. TSPD expects control to
349 * be handed back at the end of interrupt
350 * processing. This is done through an SMC.
351 * The handover agreement is:
Achin Gupta76717892014-05-09 11:42:56 +0100352 *
353 * 1. PSTATE.DAIF are set upon entry. 'x1' has
354 * the ELR_EL3 from the non-secure state.
355 * 2. TSP has to preserve the callee saved
356 * general purpose registers, SP_EL1/EL0 and
357 * LR.
358 * 3. TSP has to preserve the system and vfp
359 * registers (if applicable).
360 * 4. TSP can use 'x0-x18' to enable its C
361 * runtime.
362 * 5. TSP returns to TSPD using an SMC with
Soby Mathewbec98512015-09-03 18:29:38 +0100363 * 'x0' = TSP_HANDLED_S_EL1_INTR
364 * ------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100365 */
Soby Mathewbec98512015-09-03 18:29:38 +0100366func tsp_sel1_intr_entry
Achin Gupta76717892014-05-09 11:42:56 +0100367#if DEBUG
Soby Mathew78664242015-11-13 02:08:43 +0000368 mov_imm x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
Achin Gupta76717892014-05-09 11:42:56 +0100369 cmp x0, x2
Soby Mathewbec98512015-09-03 18:29:38 +0100370 b.ne tsp_sel1_int_entry_panic
Achin Gupta76717892014-05-09 11:42:56 +0100371#endif
Soby Mathewbec98512015-09-03 18:29:38 +0100372 /*-------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100373 * Save any previous context needed to perform
374 * an exception return from S-EL1 e.g. context
Soby Mathewbec98512015-09-03 18:29:38 +0100375 * from a previous Non secure Interrupt.
376 * Update statistics and handle the S-EL1
377 * interrupt before returning to the TSPD.
Achin Gupta76717892014-05-09 11:42:56 +0100378 * IRQ/FIQs are not enabled since that will
379 * complicate the implementation. Execution
380 * will be transferred back to the normal world
Soby Mathew78664242015-11-13 02:08:43 +0000381 * in any case. The handler can return 0
382 * if the interrupt was handled or TSP_PREEMPTED
383 * if the expected interrupt was preempted
384 * by an interrupt that should be handled in EL3
385 * e.g. Group 0 interrupt in GICv3. In both
386 * the cases switch to EL3 using SMC with id
387 * TSP_HANDLED_S_EL1_INTR. Any other return value
388 * from the handler will result in panic.
Soby Mathewbec98512015-09-03 18:29:38 +0100389 * ------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100390 */
391 save_eret_context x2 x3
Soby Mathewbec98512015-09-03 18:29:38 +0100392 bl tsp_update_sync_sel1_intr_stats
393 bl tsp_common_int_handler
Soby Mathew78664242015-11-13 02:08:43 +0000394 /* Check if the S-EL1 interrupt has been handled */
395 cbnz x0, tsp_sel1_intr_check_preemption
396 b tsp_sel1_intr_return
397tsp_sel1_intr_check_preemption:
398 /* Check if the S-EL1 interrupt has been preempted */
399 mov_imm x1, TSP_PREEMPTED
400 cmp x0, x1
401 b.ne tsp_sel1_int_entry_panic
402tsp_sel1_intr_return:
403 mov_imm x0, TSP_HANDLED_S_EL1_INTR
Achin Gupta76717892014-05-09 11:42:56 +0100404 restore_eret_context x2 x3
Achin Gupta76717892014-05-09 11:42:56 +0100405 smc #0
406
Soby Mathew78664242015-11-13 02:08:43 +0000407 /* Should never reach here */
Soby Mathewbec98512015-09-03 18:29:38 +0100408tsp_sel1_int_entry_panic:
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000409 no_ret plat_panic_handler
Soby Mathewbec98512015-09-03 18:29:38 +0100410endfunc tsp_sel1_intr_entry
Achin Gupta76717892014-05-09 11:42:56 +0100411
412 /*---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000413 * This entrypoint is used by the TSPD when this
414 * cpu resumes execution after an earlier
415 * CPU_SUSPEND psci call to ask the TSP to
416 * restore its saved context. In the current
417 * implementation, the TSPD saves and restores
418 * EL1 state so nothing is done here apart from
419 * acknowledging the request.
420 * ---------------------------------------------
421 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000422func tsp_cpu_resume_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000423 bl tsp_cpu_resume_main
424 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000425
426 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000427 no_ret plat_panic_handler
Kévin Petita877c252015-03-24 14:03:57 +0000428endfunc tsp_cpu_resume_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000429
430 /*---------------------------------------------
431 * This entrypoint is used by the TSPD to ask
432 * the TSP to service a fast smc request.
433 * ---------------------------------------------
434 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000435func tsp_fast_smc_entry
Soby Mathew9f71f702014-05-09 20:49:17 +0100436 bl tsp_smc_handler
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000437 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000438
439 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000440 no_ret plat_panic_handler
Kévin Petita877c252015-03-24 14:03:57 +0000441endfunc tsp_fast_smc_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000442
Soby Mathew9f71f702014-05-09 20:49:17 +0100443 /*---------------------------------------------
444 * This entrypoint is used by the TSPD to ask
David Cunado28f69ab2017-04-05 11:34:03 +0100445 * the TSP to service a Yielding SMC request.
Soby Mathew9f71f702014-05-09 20:49:17 +0100446 * We will enable preemption during execution
447 * of tsp_smc_handler.
448 * ---------------------------------------------
449 */
David Cunado28f69ab2017-04-05 11:34:03 +0100450func tsp_yield_smc_entry
Soby Mathew9f71f702014-05-09 20:49:17 +0100451 msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
452 bl tsp_smc_handler
453 msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
454 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000455
456 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000457 no_ret plat_panic_handler
David Cunado28f69ab2017-04-05 11:34:03 +0100458endfunc tsp_yield_smc_entry
Douglas Raillardf2129652016-11-24 15:43:19 +0000459
460 /*---------------------------------------------------------------------
David Cunado28f69ab2017-04-05 11:34:03 +0100461 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
Douglas Raillardf2129652016-11-24 15:43:19 +0000462 * SMC. It could be on behalf of non-secure world or because a CPU
463 * suspend/CPU off request needs to abort the preempted SMC.
464 * --------------------------------------------------------------------
465 */
David Cunado28f69ab2017-04-05 11:34:03 +0100466func tsp_abort_yield_smc_entry
Douglas Raillardf2129652016-11-24 15:43:19 +0000467
468 /*
469 * Exceptions masking is already done by the TSPD when entering this
470 * hook so there is no need to do it here.
471 */
472
473 /* Reset the stack used by the pre-empted SMC */
474 bl plat_set_my_stack
475
476 /*
477 * Allow some cleanup such as releasing locks.
478 */
479 bl tsp_abort_smc_handler
480
481 restore_args_call_smc
482
483 /* Should never reach here */
484 bl plat_panic_handler
David Cunado28f69ab2017-04-05 11:34:03 +0100485endfunc tsp_abort_yield_smc_entry