blob: e5ea88c4ec7f6823de28f2e135210e247961fa72 [file] [log] [blame]
Achin Gupta7c88f3f2014-02-18 18:09:12 +00001/*
Achin Gupta6b4ec242021-10-04 20:13:36 +01002 * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
Achin Gupta7c88f3f2014-02-18 18:09:12 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta7c88f3f2014-02-18 18:09:12 +00005 */
6
Masahiro Yamadade634f82020-01-17 13:45:14 +09007#include <platform_def.h>
8
Achin Gupta7c88f3f2014-02-18 18:09:12 +00009#include <arch.h>
Andrew Thoelke38bde412014-03-18 13:46:55 +000010#include <asm_macros.S>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011#include <bl32/tsp/tsp.h>
12#include <lib/xlat_tables/xlat_tables_defs.h>
Achin Gupta6b4ec242021-10-04 20:13:36 +010013#include <smccc_helpers.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000014
Dan Handleye2c27f52014-08-01 17:58:27 +010015#include "../tsp_private.h"
Achin Gupta7c88f3f2014-02-18 18:09:12 +000016
17
18 .globl tsp_entrypoint
Andrew Thoelke891c4ca2014-05-20 21:43:27 +010019 .globl tsp_vector_table
Achin Gupta6b4ec242021-10-04 20:13:36 +010020#if SPMC_AT_EL3
21 .globl tsp_cpu_on_entry
22#endif
Achin Gupta7c88f3f2014-02-18 18:09:12 +000023
Soby Mathew9f71f702014-05-09 20:49:17 +010024
25
Achin Gupta7c88f3f2014-02-18 18:09:12 +000026 /* ---------------------------------------------
27 * Populate the params in x0-x7 from the pointer
28 * to the smc args structure in x0.
29 * ---------------------------------------------
30 */
31 .macro restore_args_call_smc
Achin Gupta6b4ec242021-10-04 20:13:36 +010032 ldp x6, x7, [x0, #SMC_ARG6]
33 ldp x4, x5, [x0, #SMC_ARG4]
34 ldp x2, x3, [x0, #SMC_ARG2]
35 ldp x0, x1, [x0, #SMC_ARG0]
Achin Gupta7c88f3f2014-02-18 18:09:12 +000036 smc #0
37 .endm
38
Achin Gupta76717892014-05-09 11:42:56 +010039 .macro save_eret_context reg1 reg2
40 mrs \reg1, elr_el1
41 mrs \reg2, spsr_el1
42 stp \reg1, \reg2, [sp, #-0x10]!
43 stp x30, x18, [sp, #-0x10]!
44 .endm
45
46 .macro restore_eret_context reg1 reg2
47 ldp x30, x18, [sp], #0x10
48 ldp \reg1, \reg2, [sp], #0x10
49 msr elr_el1, \reg1
50 msr spsr_el1, \reg2
51 .endm
52
Julius Wernerb4c75e92017-08-01 15:16:36 -070053func tsp_entrypoint _align=3
Achin Gupta7c88f3f2014-02-18 18:09:12 +000054
Masahiro Yamadade634f82020-01-17 13:45:14 +090055#if ENABLE_PIE
56 /*
57 * ------------------------------------------------------------
58 * If PIE is enabled fixup the Global descriptor Table only
59 * once during primary core cold boot path.
60 *
61 * Compile time base address, required for fixup, is calculated
62 * using "pie_fixup" label present within first page.
63 * ------------------------------------------------------------
64 */
65 pie_fixup:
66 ldr x0, =pie_fixup
Jimmy Brissoned202072020-08-04 16:18:52 -050067 and x0, x0, #~(PAGE_SIZE_MASK)
Masahiro Yamadade634f82020-01-17 13:45:14 +090068 mov_imm x1, (BL32_LIMIT - BL32_BASE)
69 add x1, x1, x0
70 bl fixup_gdt_reloc
71#endif /* ENABLE_PIE */
72
Achin Gupta7c88f3f2014-02-18 18:09:12 +000073 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +000074 * Set the exception vector to something sane.
75 * ---------------------------------------------
76 */
Achin Guptaa4f50c22014-05-09 12:17:56 +010077 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +000078 msr vbar_el1, x0
Achin Guptaed1744e2014-08-04 23:13:10 +010079 isb
Achin Gupta7c88f3f2014-02-18 18:09:12 +000080
81 /* ---------------------------------------------
Achin Guptaed1744e2014-08-04 23:13:10 +010082 * Enable the SError interrupt now that the
83 * exception vectors have been setup.
84 * ---------------------------------------------
85 */
86 msr daifclr, #DAIF_ABT_BIT
87
88 /* ---------------------------------------------
Achin Gupta9f098352014-07-18 18:38:28 +010089 * Enable the instruction cache, stack pointer
John Tsichritzisd5a59602019-03-04 16:42:54 +000090 * and data access alignment checks and disable
91 * speculative loads.
Achin Gupta7c88f3f2014-02-18 18:09:12 +000092 * ---------------------------------------------
93 */
Achin Gupta9f098352014-07-18 18:38:28 +010094 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +000095 mrs x0, sctlr_el1
Achin Gupta9f098352014-07-18 18:38:28 +010096 orr x0, x0, x1
John Tsichritzisd5a59602019-03-04 16:42:54 +000097 bic x0, x0, #SCTLR_DSSBS_BIT
Achin Gupta7c88f3f2014-02-18 18:09:12 +000098 msr sctlr_el1, x0
99 isb
100
101 /* ---------------------------------------------
Achin Guptae9c4a642015-09-11 16:03:13 +0100102 * Invalidate the RW memory used by the BL32
103 * image. This includes the data and NOBITS
104 * sections. This is done to safeguard against
105 * possible corruption of this memory by dirty
106 * cache lines in a system cache as a result of
Zelalem Awekeb0d69e82021-10-15 17:25:52 -0500107 * use by an earlier boot loader stage. If PIE
108 * is enabled however, RO sections including the
109 * GOT may be modified during pie fixup.
110 * Therefore, to be on the safe side, invalidate
111 * the entire image region if PIE is enabled.
Achin Guptae9c4a642015-09-11 16:03:13 +0100112 * ---------------------------------------------
113 */
Zelalem Awekeb0d69e82021-10-15 17:25:52 -0500114#if ENABLE_PIE
115#if SEPARATE_CODE_AND_RODATA
116 adrp x0, __TEXT_START__
117 add x0, x0, :lo12:__TEXT_START__
118#else
119 adrp x0, __RO_START__
120 add x0, x0, :lo12:__RO_START__
121#endif /* SEPARATE_CODE_AND_RODATA */
122#else
123 adrp x0, __RW_START__
124 add x0, x0, :lo12:__RW_START__
125#endif /* ENABLE_PIE */
126 adrp x1, __RW_END__
127 add x1, x1, :lo12:__RW_END__
Achin Guptae9c4a642015-09-11 16:03:13 +0100128 sub x1, x1, x0
129 bl inv_dcache_range
130
131 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000132 * Zero out NOBITS sections. There are 2 of them:
133 * - the .bss section;
134 * - the coherent memory section.
135 * ---------------------------------------------
136 */
Yann Gautiere57bce82020-08-18 14:42:41 +0200137 adrp x0, __BSS_START__
138 add x0, x0, :lo12:__BSS_START__
139 adrp x1, __BSS_END__
140 add x1, x1, :lo12:__BSS_END__
141 sub x1, x1, x0
Douglas Raillard21362a92016-12-02 13:51:54 +0000142 bl zeromem
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000143
Soby Mathew2ae20432015-01-08 18:02:44 +0000144#if USE_COHERENT_MEM
Yann Gautiere57bce82020-08-18 14:42:41 +0200145 adrp x0, __COHERENT_RAM_START__
146 add x0, x0, :lo12:__COHERENT_RAM_START__
147 adrp x1, __COHERENT_RAM_END_UNALIGNED__
148 add x1, x1, :lo12:__COHERENT_RAM_END_UNALIGNED__
149 sub x1, x1, x0
Douglas Raillard21362a92016-12-02 13:51:54 +0000150 bl zeromem
Soby Mathew2ae20432015-01-08 18:02:44 +0000151#endif
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000152
153 /* --------------------------------------------
Achin Guptaf4a97092014-06-25 19:26:22 +0100154 * Allocate a stack whose memory will be marked
155 * as Normal-IS-WBWA when the MMU is enabled.
156 * There is no risk of reading stale stack
157 * memory after enabling the MMU as only the
158 * primary cpu is running at the moment.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000159 * --------------------------------------------
160 */
Soby Mathewda43b662015-07-08 21:45:46 +0100161 bl plat_set_my_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000162
163 /* ---------------------------------------------
Douglas Raillard306593d2017-02-24 18:14:15 +0000164 * Initialize the stack protector canary before
165 * any C code is called.
166 * ---------------------------------------------
167 */
168#if STACK_PROTECTOR_ENABLED
169 bl update_stack_protector_canary
170#endif
171
172 /* ---------------------------------------------
Antonio Nino Diaze61ece02019-02-26 11:41:03 +0000173 * Perform TSP setup
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000174 * ---------------------------------------------
175 */
Antonio Nino Diaze61ece02019-02-26 11:41:03 +0000176 bl tsp_setup
177
Antonio Nino Diaze61ece02019-02-26 11:41:03 +0000178#if ENABLE_PAUTH
Alexei Fedorov90f2e882019-05-24 12:17:09 +0100179 /* ---------------------------------------------
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100180 * Program APIAKey_EL1
181 * and enable pointer authentication
Alexei Fedorov90f2e882019-05-24 12:17:09 +0100182 * ---------------------------------------------
183 */
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100184 bl pauth_init_enable_el1
Antonio Nino Diaze61ece02019-02-26 11:41:03 +0000185#endif /* ENABLE_PAUTH */
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000186
187 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000188 * Jump to main function.
189 * ---------------------------------------------
190 */
191 bl tsp_main
192
193 /* ---------------------------------------------
194 * Tell TSPD that we are done initialising
195 * ---------------------------------------------
196 */
197 mov x1, x0
198 mov x0, #TSP_ENTRY_DONE
199 smc #0
200
201tsp_entrypoint_panic:
202 b tsp_entrypoint_panic
Kévin Petita877c252015-03-24 14:03:57 +0000203endfunc tsp_entrypoint
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000204
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100205
206 /* -------------------------------------------
207 * Table of entrypoint vectors provided to the
208 * TSPD for the various entrypoints
209 * -------------------------------------------
210 */
Alexei Fedorov90f2e882019-05-24 12:17:09 +0100211vector_base tsp_vector_table
David Cunado28f69ab2017-04-05 11:34:03 +0100212 b tsp_yield_smc_entry
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100213 b tsp_fast_smc_entry
214 b tsp_cpu_on_entry
215 b tsp_cpu_off_entry
216 b tsp_cpu_resume_entry
217 b tsp_cpu_suspend_entry
Soby Mathewbec98512015-09-03 18:29:38 +0100218 b tsp_sel1_intr_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100219 b tsp_system_off_entry
220 b tsp_system_reset_entry
David Cunado28f69ab2017-04-05 11:34:03 +0100221 b tsp_abort_yield_smc_entry
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100222
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000223 /*---------------------------------------------
224 * This entrypoint is used by the TSPD when this
225 * cpu is to be turned off through a CPU_OFF
226 * psci call to ask the TSP to perform any
227 * bookeeping necessary. In the current
228 * implementation, the TSPD expects the TSP to
229 * re-initialise its state so nothing is done
230 * here except for acknowledging the request.
231 * ---------------------------------------------
232 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000233func tsp_cpu_off_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000234 bl tsp_cpu_off_main
235 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000236endfunc tsp_cpu_off_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000237
238 /*---------------------------------------------
Juan Castillo4dc4a472014-08-12 11:17:06 +0100239 * This entrypoint is used by the TSPD when the
240 * system is about to be switched off (through
241 * a SYSTEM_OFF psci call) to ask the TSP to
242 * perform any necessary bookkeeping.
243 * ---------------------------------------------
244 */
245func tsp_system_off_entry
246 bl tsp_system_off_main
247 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000248endfunc tsp_system_off_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100249
250 /*---------------------------------------------
251 * This entrypoint is used by the TSPD when the
252 * system is about to be reset (through a
253 * SYSTEM_RESET psci call) to ask the TSP to
254 * perform any necessary bookkeeping.
255 * ---------------------------------------------
256 */
257func tsp_system_reset_entry
258 bl tsp_system_reset_main
259 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000260endfunc tsp_system_reset_entry
Juan Castillo4dc4a472014-08-12 11:17:06 +0100261
262 /*---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000263 * This entrypoint is used by the TSPD when this
264 * cpu is turned on using a CPU_ON psci call to
265 * ask the TSP to initialise itself i.e. setup
266 * the mmu, stacks etc. Minimal architectural
267 * state will be initialised by the TSPD when
268 * this function is entered i.e. Caches and MMU
269 * will be turned off, the execution state
270 * will be aarch64 and exceptions masked.
271 * ---------------------------------------------
272 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000273func tsp_cpu_on_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000274 /* ---------------------------------------------
275 * Set the exception vector to something sane.
276 * ---------------------------------------------
277 */
Achin Guptaa4f50c22014-05-09 12:17:56 +0100278 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000279 msr vbar_el1, x0
Achin Guptaed1744e2014-08-04 23:13:10 +0100280 isb
281
282 /* Enable the SError interrupt */
283 msr daifclr, #DAIF_ABT_BIT
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000284
285 /* ---------------------------------------------
Achin Gupta9f098352014-07-18 18:38:28 +0100286 * Enable the instruction cache, stack pointer
287 * and data access alignment checks
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000288 * ---------------------------------------------
289 */
Achin Gupta9f098352014-07-18 18:38:28 +0100290 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000291 mrs x0, sctlr_el1
Achin Gupta9f098352014-07-18 18:38:28 +0100292 orr x0, x0, x1
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000293 msr sctlr_el1, x0
294 isb
295
296 /* --------------------------------------------
Achin Guptae1aa5162014-06-26 09:58:52 +0100297 * Give ourselves a stack whose memory will be
298 * marked as Normal-IS-WBWA when the MMU is
299 * enabled.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000300 * --------------------------------------------
301 */
Soby Mathewda43b662015-07-08 21:45:46 +0100302 bl plat_set_my_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000303
Achin Guptae1aa5162014-06-26 09:58:52 +0100304 /* --------------------------------------------
Jeenu Viswambharan0859d2c2018-04-27 16:28:12 +0100305 * Enable MMU and D-caches together.
Achin Guptae1aa5162014-06-26 09:58:52 +0100306 * --------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000307 */
Jeenu Viswambharan0859d2c2018-04-27 16:28:12 +0100308 mov x0, #0
Dan Handleyb226a4d2014-05-16 14:08:45 +0100309 bl bl32_plat_enable_mmu
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000310
Alexei Fedorovf41355c2019-09-13 14:11:59 +0100311#if ENABLE_PAUTH
312 /* ---------------------------------------------
313 * Program APIAKey_EL1
314 * and enable pointer authentication
315 * ---------------------------------------------
316 */
317 bl pauth_init_enable_el1
318#endif /* ENABLE_PAUTH */
319
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000320 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000321 * Enter C runtime to perform any remaining
322 * book keeping
323 * ---------------------------------------------
324 */
325 bl tsp_cpu_on_main
326 restore_args_call_smc
327
328 /* Should never reach here */
329tsp_cpu_on_entry_panic:
330 b tsp_cpu_on_entry_panic
Kévin Petita877c252015-03-24 14:03:57 +0000331endfunc tsp_cpu_on_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000332
333 /*---------------------------------------------
334 * This entrypoint is used by the TSPD when this
335 * cpu is to be suspended through a CPU_SUSPEND
336 * psci call to ask the TSP to perform any
337 * bookeeping necessary. In the current
338 * implementation, the TSPD saves and restores
339 * the EL1 state.
340 * ---------------------------------------------
341 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000342func tsp_cpu_suspend_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000343 bl tsp_cpu_suspend_main
344 restore_args_call_smc
Kévin Petita877c252015-03-24 14:03:57 +0000345endfunc tsp_cpu_suspend_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000346
Soby Mathewbec98512015-09-03 18:29:38 +0100347 /*-------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100348 * This entrypoint is used by the TSPD to pass
Soby Mathew78664242015-11-13 02:08:43 +0000349 * control for `synchronously` handling a S-EL1
350 * Interrupt which was triggered while executing
351 * in normal world. 'x0' contains a magic number
352 * which indicates this. TSPD expects control to
353 * be handed back at the end of interrupt
354 * processing. This is done through an SMC.
355 * The handover agreement is:
Achin Gupta76717892014-05-09 11:42:56 +0100356 *
357 * 1. PSTATE.DAIF are set upon entry. 'x1' has
358 * the ELR_EL3 from the non-secure state.
359 * 2. TSP has to preserve the callee saved
360 * general purpose registers, SP_EL1/EL0 and
361 * LR.
362 * 3. TSP has to preserve the system and vfp
363 * registers (if applicable).
364 * 4. TSP can use 'x0-x18' to enable its C
365 * runtime.
366 * 5. TSP returns to TSPD using an SMC with
Soby Mathewbec98512015-09-03 18:29:38 +0100367 * 'x0' = TSP_HANDLED_S_EL1_INTR
368 * ------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100369 */
Soby Mathewbec98512015-09-03 18:29:38 +0100370func tsp_sel1_intr_entry
Achin Gupta76717892014-05-09 11:42:56 +0100371#if DEBUG
Soby Mathew78664242015-11-13 02:08:43 +0000372 mov_imm x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
Achin Gupta76717892014-05-09 11:42:56 +0100373 cmp x0, x2
Soby Mathewbec98512015-09-03 18:29:38 +0100374 b.ne tsp_sel1_int_entry_panic
Achin Gupta76717892014-05-09 11:42:56 +0100375#endif
Soby Mathewbec98512015-09-03 18:29:38 +0100376 /*-------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100377 * Save any previous context needed to perform
378 * an exception return from S-EL1 e.g. context
Soby Mathewbec98512015-09-03 18:29:38 +0100379 * from a previous Non secure Interrupt.
380 * Update statistics and handle the S-EL1
381 * interrupt before returning to the TSPD.
Achin Gupta76717892014-05-09 11:42:56 +0100382 * IRQ/FIQs are not enabled since that will
383 * complicate the implementation. Execution
384 * will be transferred back to the normal world
Soby Mathew78664242015-11-13 02:08:43 +0000385 * in any case. The handler can return 0
386 * if the interrupt was handled or TSP_PREEMPTED
387 * if the expected interrupt was preempted
388 * by an interrupt that should be handled in EL3
389 * e.g. Group 0 interrupt in GICv3. In both
390 * the cases switch to EL3 using SMC with id
391 * TSP_HANDLED_S_EL1_INTR. Any other return value
392 * from the handler will result in panic.
Soby Mathewbec98512015-09-03 18:29:38 +0100393 * ------------------------------------------------
Achin Gupta76717892014-05-09 11:42:56 +0100394 */
395 save_eret_context x2 x3
Soby Mathewbec98512015-09-03 18:29:38 +0100396 bl tsp_update_sync_sel1_intr_stats
397 bl tsp_common_int_handler
Soby Mathew78664242015-11-13 02:08:43 +0000398 /* Check if the S-EL1 interrupt has been handled */
399 cbnz x0, tsp_sel1_intr_check_preemption
400 b tsp_sel1_intr_return
401tsp_sel1_intr_check_preemption:
402 /* Check if the S-EL1 interrupt has been preempted */
403 mov_imm x1, TSP_PREEMPTED
404 cmp x0, x1
405 b.ne tsp_sel1_int_entry_panic
406tsp_sel1_intr_return:
407 mov_imm x0, TSP_HANDLED_S_EL1_INTR
Achin Gupta76717892014-05-09 11:42:56 +0100408 restore_eret_context x2 x3
Achin Gupta76717892014-05-09 11:42:56 +0100409 smc #0
410
Soby Mathew78664242015-11-13 02:08:43 +0000411 /* Should never reach here */
Soby Mathewbec98512015-09-03 18:29:38 +0100412tsp_sel1_int_entry_panic:
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000413 no_ret plat_panic_handler
Soby Mathewbec98512015-09-03 18:29:38 +0100414endfunc tsp_sel1_intr_entry
Achin Gupta76717892014-05-09 11:42:56 +0100415
416 /*---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000417 * This entrypoint is used by the TSPD when this
418 * cpu resumes execution after an earlier
419 * CPU_SUSPEND psci call to ask the TSP to
420 * restore its saved context. In the current
421 * implementation, the TSPD saves and restores
422 * EL1 state so nothing is done here apart from
423 * acknowledging the request.
424 * ---------------------------------------------
425 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000426func tsp_cpu_resume_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000427 bl tsp_cpu_resume_main
428 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000429
430 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000431 no_ret plat_panic_handler
Kévin Petita877c252015-03-24 14:03:57 +0000432endfunc tsp_cpu_resume_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000433
434 /*---------------------------------------------
435 * This entrypoint is used by the TSPD to ask
436 * the TSP to service a fast smc request.
437 * ---------------------------------------------
438 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000439func tsp_fast_smc_entry
Soby Mathew9f71f702014-05-09 20:49:17 +0100440 bl tsp_smc_handler
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000441 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000442
443 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000444 no_ret plat_panic_handler
Kévin Petita877c252015-03-24 14:03:57 +0000445endfunc tsp_fast_smc_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000446
Soby Mathew9f71f702014-05-09 20:49:17 +0100447 /*---------------------------------------------
448 * This entrypoint is used by the TSPD to ask
David Cunado28f69ab2017-04-05 11:34:03 +0100449 * the TSP to service a Yielding SMC request.
Soby Mathew9f71f702014-05-09 20:49:17 +0100450 * We will enable preemption during execution
451 * of tsp_smc_handler.
452 * ---------------------------------------------
453 */
David Cunado28f69ab2017-04-05 11:34:03 +0100454func tsp_yield_smc_entry
Soby Mathew9f71f702014-05-09 20:49:17 +0100455 msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
456 bl tsp_smc_handler
457 msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
458 restore_args_call_smc
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000459
460 /* Should never reach here */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000461 no_ret plat_panic_handler
David Cunado28f69ab2017-04-05 11:34:03 +0100462endfunc tsp_yield_smc_entry
Douglas Raillardf2129652016-11-24 15:43:19 +0000463
464 /*---------------------------------------------------------------------
David Cunado28f69ab2017-04-05 11:34:03 +0100465 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
Douglas Raillardf2129652016-11-24 15:43:19 +0000466 * SMC. It could be on behalf of non-secure world or because a CPU
467 * suspend/CPU off request needs to abort the preempted SMC.
468 * --------------------------------------------------------------------
469 */
David Cunado28f69ab2017-04-05 11:34:03 +0100470func tsp_abort_yield_smc_entry
Douglas Raillardf2129652016-11-24 15:43:19 +0000471
472 /*
473 * Exceptions masking is already done by the TSPD when entering this
474 * hook so there is no need to do it here.
475 */
476
477 /* Reset the stack used by the pre-empted SMC */
478 bl plat_set_my_stack
479
480 /*
481 * Allow some cleanup such as releasing locks.
482 */
483 bl tsp_abort_smc_handler
484
485 restore_args_call_smc
486
487 /* Should never reach here */
488 bl plat_panic_handler
David Cunado28f69ab2017-04-05 11:34:03 +0100489endfunc tsp_abort_yield_smc_entry