blob: c564f8b8670a5b26c0cb192b5529929705bcfbbc [file] [log] [blame]
Achin Gupta375f5382014-02-18 18:12:48 +00001/*
Jeenu Viswambharan339580c2018-01-10 15:22:49 +00002 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
Achin Gupta375f5382014-02-18 18:12:48 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta375f5382014-02-18 18:12:48 +00005 */
6
7
8/*******************************************************************************
9 * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a
10 * plug-in component to the Secure Monitor, registered as a runtime service. The
11 * SPD is expected to be a functional extension of the Secure Payload (SP) that
12 * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting
13 * the Trusted OS/Applications range to the dispatcher. The SPD will either
14 * handle the request locally or delegate it to the Secure Payload. It is also
15 * responsible for initialising and maintaining communication with the SP.
16 ******************************************************************************/
Achin Gupta375f5382014-02-18 18:12:48 +000017#include <arch_helpers.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010018#include <assert.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010019#include <bl31.h>
Isla Mitchell99305012017-07-11 14:54:08 +010020#include <bl_common.h>
Achin Gupta375f5382014-02-18 18:12:48 +000021#include <context_mgmt.h>
Achin Guptaaeaab682014-05-09 13:21:31 +010022#include <debug.h>
Jeenu Viswambharan339580c2018-01-10 15:22:49 +000023#include <ehf.h>
Achin Guptaaeaab682014-05-09 13:21:31 +010024#include <errno.h>
25#include <platform.h>
Achin Gupta375f5382014-02-18 18:12:48 +000026#include <runtime_svc.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010027#include <stddef.h>
Soby Mathew47903c02015-01-13 15:48:26 +000028#include <string.h>
Achin Gupta375f5382014-02-18 18:12:48 +000029#include <tsp.h>
Jeenu Viswambharandf1ddb52014-02-28 11:23:35 +000030#include <uuid.h>
Dan Handley714a0d22014-04-09 13:13:04 +010031#include "tspd_private.h"
Achin Gupta375f5382014-02-18 18:12:48 +000032
33/*******************************************************************************
Andrew Thoelke891c4ca2014-05-20 21:43:27 +010034 * Address of the entrypoint vector table in the Secure Payload. It is
35 * initialised once on the primary core after a cold boot.
Achin Gupta375f5382014-02-18 18:12:48 +000036 ******************************************************************************/
Andrew Thoelke891c4ca2014-05-20 21:43:27 +010037tsp_vectors_t *tsp_vectors;
Achin Gupta375f5382014-02-18 18:12:48 +000038
39/*******************************************************************************
40 * Array to keep track of per-cpu Secure Payload state
41 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +010042tsp_context_t tspd_sp_context[TSPD_CORE_COUNT];
Achin Gupta375f5382014-02-18 18:12:48 +000043
Jeenu Viswambharan7f366602014-02-20 17:11:00 +000044
Jeenu Viswambharandf1ddb52014-02-28 11:23:35 +000045/* TSP UID */
46DEFINE_SVC_UUID(tsp_uuid,
47 0x5b3056a0, 0x3291, 0x427b, 0x98, 0x11,
48 0x71, 0x68, 0xca, 0x50, 0xf3, 0xfa);
49
Vikram Kanigirid8c9d262014-05-16 18:48:12 +010050int32_t tspd_init(void);
Jeenu Viswambharan7f366602014-02-20 17:11:00 +000051
Soby Mathewbc912822015-09-22 12:01:18 +010052/*
53 * This helper function handles Secure EL1 preemption. The preemption could be
54 * due Non Secure interrupts or EL3 interrupts. In both the cases we context
55 * switch to the normal world and in case of EL3 interrupts, it will again be
56 * routed to EL3 which will get handled at the exception vectors.
57 */
Soby Mathew47903c02015-01-13 15:48:26 +000058uint64_t tspd_handle_sp_preemption(void *handle)
59{
60 cpu_context_t *ns_cpu_context;
Soby Mathewbc912822015-09-22 12:01:18 +010061
Soby Mathew47903c02015-01-13 15:48:26 +000062 assert(handle == cm_get_context(SECURE));
63 cm_el1_sysregs_context_save(SECURE);
64 /* Get a reference to the non-secure context */
65 ns_cpu_context = cm_get_context(NON_SECURE);
66 assert(ns_cpu_context);
67
68 /*
Soby Mathew78664242015-11-13 02:08:43 +000069 * To allow Secure EL1 interrupt handler to re-enter TSP while TSP
70 * is preempted, the secure system register context which will get
71 * overwritten must be additionally saved. This is currently done
72 * by the TSPD S-EL1 interrupt handler.
73 */
74
75 /*
76 * Restore non-secure state.
Soby Mathew47903c02015-01-13 15:48:26 +000077 */
78 cm_el1_sysregs_context_restore(NON_SECURE);
79 cm_set_next_eret_context(NON_SECURE);
80
Soby Mathewbc912822015-09-22 12:01:18 +010081 /*
David Cunado28f69ab2017-04-05 11:34:03 +010082 * The TSP was preempted during execution of a Yielding SMC Call.
Soby Mathew78664242015-11-13 02:08:43 +000083 * Return back to the normal world with SMC_PREEMPTED as error
84 * code in x0.
Soby Mathewbc912822015-09-22 12:01:18 +010085 */
Soby Mathew47903c02015-01-13 15:48:26 +000086 SMC_RET1(ns_cpu_context, SMC_PREEMPTED);
87}
Soby Mathewbc912822015-09-22 12:01:18 +010088
Achin Guptaaeaab682014-05-09 13:21:31 +010089/*******************************************************************************
90 * This function is the handler registered for S-EL1 interrupts by the TSPD. It
91 * validates the interrupt and upon success arranges entry into the TSP at
Soby Mathewbec98512015-09-03 18:29:38 +010092 * 'tsp_sel1_intr_entry()' for handling the interrupt.
Achin Guptaaeaab682014-05-09 13:21:31 +010093 ******************************************************************************/
94static uint64_t tspd_sel1_interrupt_handler(uint32_t id,
95 uint32_t flags,
96 void *handle,
97 void *cookie)
98{
99 uint32_t linear_id;
Achin Guptaaeaab682014-05-09 13:21:31 +0100100 tsp_context_t *tsp_ctx;
101
102 /* Check the security state when the exception was generated */
103 assert(get_interrupt_src_ss(flags) == NON_SECURE);
104
Achin Guptaaeaab682014-05-09 13:21:31 +0100105 /* Sanity check the pointer to this cpu's context */
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100106 assert(handle == cm_get_context(NON_SECURE));
Achin Guptaaeaab682014-05-09 13:21:31 +0100107
108 /* Save the non-secure context before entering the TSP */
109 cm_el1_sysregs_context_save(NON_SECURE);
110
111 /* Get a reference to this cpu's TSP context */
Soby Mathewda43b662015-07-08 21:45:46 +0100112 linear_id = plat_my_core_pos();
Achin Guptaaeaab682014-05-09 13:21:31 +0100113 tsp_ctx = &tspd_sp_context[linear_id];
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100114 assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE));
Achin Guptaaeaab682014-05-09 13:21:31 +0100115
116 /*
117 * Determine if the TSP was previously preempted. Its last known
118 * context has to be preserved in this case.
119 * The TSP should return control to the TSPD after handling this
Soby Mathewbec98512015-09-03 18:29:38 +0100120 * S-EL1 interrupt. Preserve essential EL3 context to allow entry into
121 * the TSP at the S-EL1 interrupt entry point using the 'cpu_context'
122 * structure. There is no need to save the secure system register
123 * context since the TSP is supposed to preserve it during S-EL1
124 * interrupt handling.
Achin Guptaaeaab682014-05-09 13:21:31 +0100125 */
David Cunado28f69ab2017-04-05 11:34:03 +0100126 if (get_yield_smc_active_flag(tsp_ctx->state)) {
Achin Guptaaeaab682014-05-09 13:21:31 +0100127 tsp_ctx->saved_spsr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx,
128 CTX_SPSR_EL3);
129 tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx,
130 CTX_ELR_EL3);
Soby Mathewbec98512015-09-03 18:29:38 +0100131#if TSP_NS_INTR_ASYNC_PREEMPT
Soby Mathew47903c02015-01-13 15:48:26 +0000132 /*Need to save the previously interrupted secure context */
133 memcpy(&tsp_ctx->sp_ctx, &tsp_ctx->cpu_ctx, TSPD_SP_CTX_SIZE);
134#endif
Achin Guptaaeaab682014-05-09 13:21:31 +0100135 }
136
Achin Guptaaeaab682014-05-09 13:21:31 +0100137 cm_el1_sysregs_context_restore(SECURE);
Soby Mathewbec98512015-09-03 18:29:38 +0100138 cm_set_elr_spsr_el3(SECURE, (uint64_t) &tsp_vectors->sel1_intr_entry,
Andrew Thoelke4e126072014-06-04 21:10:52 +0100139 SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
Soby Mathew47903c02015-01-13 15:48:26 +0000140
Achin Guptaaeaab682014-05-09 13:21:31 +0100141 cm_set_next_eret_context(SECURE);
142
143 /*
Soby Mathewbec98512015-09-03 18:29:38 +0100144 * Tell the TSP that it has to handle a S-EL1 interrupt synchronously.
145 * Also the instruction in normal world where the interrupt was
146 * generated is passed for debugging purposes. It is safe to retrieve
147 * this address from ELR_EL3 as the secure context will not take effect
148 * until el3_exit().
Achin Guptaaeaab682014-05-09 13:21:31 +0100149 */
Soby Mathewbec98512015-09-03 18:29:38 +0100150 SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_SEL1_INTR_AND_RETURN, read_elr_el3());
Achin Guptaaeaab682014-05-09 13:21:31 +0100151}
Soby Mathew47903c02015-01-13 15:48:26 +0000152
Soby Mathewbec98512015-09-03 18:29:38 +0100153#if TSP_NS_INTR_ASYNC_PREEMPT
Soby Mathew47903c02015-01-13 15:48:26 +0000154/*******************************************************************************
Soby Mathewbec98512015-09-03 18:29:38 +0100155 * This function is the handler registered for Non secure interrupts by the
156 * TSPD. It validates the interrupt and upon success arranges entry into the
157 * normal world for handling the interrupt.
Soby Mathew47903c02015-01-13 15:48:26 +0000158 ******************************************************************************/
159static uint64_t tspd_ns_interrupt_handler(uint32_t id,
160 uint32_t flags,
161 void *handle,
162 void *cookie)
163{
164 /* Check the security state when the exception was generated */
165 assert(get_interrupt_src_ss(flags) == SECURE);
166
Soby Mathew47903c02015-01-13 15:48:26 +0000167 /*
168 * Disable the routing of NS interrupts from secure world to EL3 while
169 * interrupted on this core.
170 */
171 disable_intr_rm_local(INTR_TYPE_NS, SECURE);
172
173 return tspd_handle_sp_preemption(handle);
174}
175#endif
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000176
Achin Gupta375f5382014-02-18 18:12:48 +0000177/*******************************************************************************
178 * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type
179 * (aarch32/aarch64) if not already known and initialises the context for entry
180 * into the SP for its initialisation.
181 ******************************************************************************/
182int32_t tspd_setup(void)
183{
Vikram Kanigiri9d70f0f2014-07-15 16:46:43 +0100184 entry_point_info_t *tsp_ep_info;
Achin Gupta375f5382014-02-18 18:12:48 +0000185 uint32_t linear_id;
186
Soby Mathewda43b662015-07-08 21:45:46 +0100187 linear_id = plat_my_core_pos();
Achin Gupta375f5382014-02-18 18:12:48 +0000188
189 /*
190 * Get information about the Secure Payload (BL32) image. Its
191 * absence is a critical failure. TODO: Add support to
192 * conditionally include the SPD service
193 */
Vikram Kanigiri9d70f0f2014-07-15 16:46:43 +0100194 tsp_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
195 if (!tsp_ep_info) {
196 WARN("No TSP provided by BL2 boot loader, Booting device"
197 " without TSP initialization. SMC`s destined for TSP"
198 " will return SMC_UNK\n");
199 return 1;
200 }
Achin Gupta375f5382014-02-18 18:12:48 +0000201
202 /*
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000203 * If there's no valid entry point for SP, we return a non-zero value
204 * signalling failure initializing the service. We bail out without
205 * registering any handlers
206 */
Vikram Kanigiri9d70f0f2014-07-15 16:46:43 +0100207 if (!tsp_ep_info->pc)
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000208 return 1;
209
210 /*
Sandrine Bailleuxf4119ec2015-12-17 13:58:58 +0000211 * We could inspect the SP image and determine its execution
Achin Gupta375f5382014-02-18 18:12:48 +0000212 * state i.e whether AArch32 or AArch64. Assuming it's AArch64
213 * for the time being.
214 */
Vikram Kanigiri9d70f0f2014-07-15 16:46:43 +0100215 tspd_init_tsp_ep_state(tsp_ep_info,
216 TSP_AARCH64,
217 tsp_ep_info->pc,
218 &tspd_sp_context[linear_id]);
Achin Gupta375f5382014-02-18 18:12:48 +0000219
Vikram Kanigiri4e813412014-07-15 16:49:22 +0100220#if TSP_INIT_ASYNC
221 bl31_set_next_image_type(SECURE);
222#else
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000223 /*
224 * All TSPD initialization done. Now register our init function with
225 * BL31 for deferred invocation
226 */
227 bl31_register_bl32_init(&tspd_init);
Vikram Kanigiri4e813412014-07-15 16:49:22 +0100228#endif
Vikram Kanigiri9d70f0f2014-07-15 16:46:43 +0100229 return 0;
Achin Gupta375f5382014-02-18 18:12:48 +0000230}
231
232/*******************************************************************************
233 * This function passes control to the Secure Payload image (BL32) for the first
234 * time on the primary cpu after a cold boot. It assumes that a valid secure
235 * context has already been created by tspd_setup() which can be directly used.
236 * It also assumes that a valid non-secure context has been initialised by PSCI
237 * so it does not need to save and restore any non-secure state. This function
238 * performs a synchronous entry into the Secure payload. The SP passes control
Vikram Kanigirid8c9d262014-05-16 18:48:12 +0100239 * back to this routine through a SMC.
Achin Gupta375f5382014-02-18 18:12:48 +0000240 ******************************************************************************/
Vikram Kanigirid8c9d262014-05-16 18:48:12 +0100241int32_t tspd_init(void)
Achin Gupta375f5382014-02-18 18:12:48 +0000242{
Soby Mathewda43b662015-07-08 21:45:46 +0100243 uint32_t linear_id = plat_my_core_pos();
Dan Handleye2712bc2014-04-10 15:37:22 +0100244 tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
Vikram Kanigiri9d70f0f2014-07-15 16:46:43 +0100245 entry_point_info_t *tsp_entry_point;
Vikram Kanigiri4e813412014-07-15 16:49:22 +0100246 uint64_t rc;
Vikram Kanigiri9d70f0f2014-07-15 16:46:43 +0100247
248 /*
249 * Get information about the Secure Payload (BL32) image. Its
250 * absence is a critical failure.
251 */
252 tsp_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
253 assert(tsp_entry_point);
254
Soby Mathewda43b662015-07-08 21:45:46 +0100255 cm_init_my_context(tsp_entry_point);
Achin Gupta375f5382014-02-18 18:12:48 +0000256
257 /*
Vikram Kanigiri4e813412014-07-15 16:49:22 +0100258 * Arrange for an entry into the test secure payload. It will be
259 * returned via TSP_ENTRY_DONE case
Achin Gupta607084e2014-02-09 18:24:19 +0000260 */
Achin Gupta375f5382014-02-18 18:12:48 +0000261 rc = tspd_synchronous_sp_entry(tsp_ctx);
262 assert(rc != 0);
Achin Guptaaeaab682014-05-09 13:21:31 +0100263
Achin Gupta375f5382014-02-18 18:12:48 +0000264 return rc;
265}
266
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000267
Achin Gupta375f5382014-02-18 18:12:48 +0000268/*******************************************************************************
269 * This function is responsible for handling all SMCs in the Trusted OS/App
270 * range from the non-secure state as defined in the SMC Calling Convention
271 * Document. It is also responsible for communicating with the Secure payload
272 * to delegate work and return results back to the non-secure state. Lastly it
273 * will also return any information that the secure payload needs to do the
274 * work assigned to it.
275 ******************************************************************************/
276uint64_t tspd_smc_handler(uint32_t smc_fid,
277 uint64_t x1,
278 uint64_t x2,
279 uint64_t x3,
280 uint64_t x4,
281 void *cookie,
282 void *handle,
283 uint64_t flags)
284{
Dan Handleye2712bc2014-04-10 15:37:22 +0100285 cpu_context_t *ns_cpu_context;
Soby Mathewda43b662015-07-08 21:45:46 +0100286 uint32_t linear_id = plat_my_core_pos(), ns;
Dan Handleye2712bc2014-04-10 15:37:22 +0100287 tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
Vikram Kanigiri4e813412014-07-15 16:49:22 +0100288 uint64_t rc;
289#if TSP_INIT_ASYNC
290 entry_point_info_t *next_image_info;
291#endif
Achin Gupta375f5382014-02-18 18:12:48 +0000292
293 /* Determine which security state this SMC originated from */
294 ns = is_caller_non_secure(flags);
295
296 switch (smc_fid) {
297
298 /*
Soby Mathew9f71f702014-05-09 20:49:17 +0100299 * This function ID is used by TSP to indicate that it was
300 * preempted by a normal world IRQ.
301 *
302 */
303 case TSP_PREEMPTED:
304 if (ns)
305 SMC_RET1(handle, SMC_UNK);
306
Soby Mathew47903c02015-01-13 15:48:26 +0000307 return tspd_handle_sp_preemption(handle);
Soby Mathew9f71f702014-05-09 20:49:17 +0100308
309 /*
Achin Guptaaeaab682014-05-09 13:21:31 +0100310 * This function ID is used only by the TSP to indicate that it has
Soby Mathew78664242015-11-13 02:08:43 +0000311 * finished handling a S-EL1 interrupt or was preempted by a higher
312 * priority pending EL3 interrupt. Execution should resume
Achin Guptaaeaab682014-05-09 13:21:31 +0100313 * in the normal world.
314 */
Soby Mathewbec98512015-09-03 18:29:38 +0100315 case TSP_HANDLED_S_EL1_INTR:
Achin Guptaaeaab682014-05-09 13:21:31 +0100316 if (ns)
317 SMC_RET1(handle, SMC_UNK);
318
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100319 assert(handle == cm_get_context(SECURE));
Achin Guptaaeaab682014-05-09 13:21:31 +0100320
321 /*
322 * Restore the relevant EL3 state which saved to service
323 * this SMC.
324 */
David Cunado28f69ab2017-04-05 11:34:03 +0100325 if (get_yield_smc_active_flag(tsp_ctx->state)) {
Achin Guptaaeaab682014-05-09 13:21:31 +0100326 SMC_SET_EL3(&tsp_ctx->cpu_ctx,
327 CTX_SPSR_EL3,
328 tsp_ctx->saved_spsr_el3);
329 SMC_SET_EL3(&tsp_ctx->cpu_ctx,
330 CTX_ELR_EL3,
331 tsp_ctx->saved_elr_el3);
Soby Mathewbec98512015-09-03 18:29:38 +0100332#if TSP_NS_INTR_ASYNC_PREEMPT
Soby Mathew47903c02015-01-13 15:48:26 +0000333 /*
334 * Need to restore the previously interrupted
335 * secure context.
336 */
337 memcpy(&tsp_ctx->cpu_ctx, &tsp_ctx->sp_ctx,
338 TSPD_SP_CTX_SIZE);
339#endif
Achin Guptaaeaab682014-05-09 13:21:31 +0100340 }
341
342 /* Get a reference to the non-secure context */
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100343 ns_cpu_context = cm_get_context(NON_SECURE);
Achin Guptaaeaab682014-05-09 13:21:31 +0100344 assert(ns_cpu_context);
345
346 /*
347 * Restore non-secure state. There is no need to save the
348 * secure system register context since the TSP was supposed
349 * to preserve it during S-EL1 interrupt handling.
350 */
351 cm_el1_sysregs_context_restore(NON_SECURE);
352 cm_set_next_eret_context(NON_SECURE);
353
354 SMC_RET0((uint64_t) ns_cpu_context);
355
Achin Guptaaeaab682014-05-09 13:21:31 +0100356 /*
Achin Gupta375f5382014-02-18 18:12:48 +0000357 * This function ID is used only by the SP to indicate it has
358 * finished initialising itself after a cold boot
359 */
360 case TSP_ENTRY_DONE:
361 if (ns)
362 SMC_RET1(handle, SMC_UNK);
363
364 /*
365 * Stash the SP entry points information. This is done
366 * only once on the primary cpu
367 */
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100368 assert(tsp_vectors == NULL);
369 tsp_vectors = (tsp_vectors_t *) x1;
Achin Gupta375f5382014-02-18 18:12:48 +0000370
Vikram Kanigiri4e813412014-07-15 16:49:22 +0100371 if (tsp_vectors) {
372 set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON);
373
374 /*
375 * TSP has been successfully initialized. Register power
376 * managemnt hooks with PSCI
377 */
378 psci_register_spd_pm_hook(&tspd_pm);
379
380 /*
381 * Register an interrupt handler for S-EL1 interrupts
382 * when generated during code executing in the
383 * non-secure state.
384 */
385 flags = 0;
386 set_interrupt_rm_flag(flags, NON_SECURE);
387 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
388 tspd_sel1_interrupt_handler,
389 flags);
390 if (rc)
391 panic();
Soby Mathew47903c02015-01-13 15:48:26 +0000392
Soby Mathewbec98512015-09-03 18:29:38 +0100393#if TSP_NS_INTR_ASYNC_PREEMPT
Soby Mathew47903c02015-01-13 15:48:26 +0000394 /*
395 * Register an interrupt handler for NS interrupts when
396 * generated during code executing in secure state are
397 * routed to EL3.
398 */
399 flags = 0;
400 set_interrupt_rm_flag(flags, SECURE);
401
402 rc = register_interrupt_type_handler(INTR_TYPE_NS,
403 tspd_ns_interrupt_handler,
404 flags);
405 if (rc)
406 panic();
407
408 /*
Soby Mathewbc912822015-09-22 12:01:18 +0100409 * Disable the NS interrupt locally.
Soby Mathew47903c02015-01-13 15:48:26 +0000410 */
411 disable_intr_rm_local(INTR_TYPE_NS, SECURE);
412#endif
Vikram Kanigiri4e813412014-07-15 16:49:22 +0100413 }
414
415
416#if TSP_INIT_ASYNC
417 /* Save the Secure EL1 system register context */
418 assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx);
419 cm_el1_sysregs_context_save(SECURE);
420
421 /* Program EL3 registers to enable entry into the next EL */
422 next_image_info = bl31_plat_get_next_image_ep_info(NON_SECURE);
423 assert(next_image_info);
424 assert(NON_SECURE ==
425 GET_SECURITY_STATE(next_image_info->h.attr));
426
Soby Mathewda43b662015-07-08 21:45:46 +0100427 cm_init_my_context(next_image_info);
Vikram Kanigiri4e813412014-07-15 16:49:22 +0100428 cm_prepare_el3_exit(NON_SECURE);
429 SMC_RET0(cm_get_context(NON_SECURE));
430#else
Achin Gupta375f5382014-02-18 18:12:48 +0000431 /*
432 * SP reports completion. The SPD must have initiated
433 * the original request through a synchronous entry
434 * into the SP. Jump back to the original C runtime
435 * context.
436 */
Achin Gupta916a2c12014-02-09 23:11:46 +0000437 tspd_synchronous_sp_exit(tsp_ctx, x1);
Vikram Kanigiri4e813412014-07-15 16:49:22 +0100438#endif
Douglas Raillardf2129652016-11-24 15:43:19 +0000439 /*
440 * This function ID is used only by the SP to indicate it has finished
David Cunado28f69ab2017-04-05 11:34:03 +0100441 * aborting a preempted Yielding SMC Call.
Douglas Raillardf2129652016-11-24 15:43:19 +0000442 */
443 case TSP_ABORT_DONE:
Achin Gupta375f5382014-02-18 18:12:48 +0000444
Achin Gupta607084e2014-02-09 18:24:19 +0000445 /*
Sandrine Bailleuxf4119ec2015-12-17 13:58:58 +0000446 * These function IDs are used only by the SP to indicate it has
Achin Gupta607084e2014-02-09 18:24:19 +0000447 * finished:
448 * 1. turning itself on in response to an earlier psci
449 * cpu_on request
450 * 2. resuming itself after an earlier psci cpu_suspend
451 * request.
452 */
453 case TSP_ON_DONE:
454 case TSP_RESUME_DONE:
455
456 /*
Sandrine Bailleuxf4119ec2015-12-17 13:58:58 +0000457 * These function IDs are used only by the SP to indicate it has
Achin Gupta607084e2014-02-09 18:24:19 +0000458 * finished:
459 * 1. suspending itself after an earlier psci cpu_suspend
460 * request.
461 * 2. turning itself off in response to an earlier psci
462 * cpu_off request.
463 */
464 case TSP_OFF_DONE:
465 case TSP_SUSPEND_DONE:
Juan Castillo4dc4a472014-08-12 11:17:06 +0100466 case TSP_SYSTEM_OFF_DONE:
467 case TSP_SYSTEM_RESET_DONE:
Achin Gupta607084e2014-02-09 18:24:19 +0000468 if (ns)
469 SMC_RET1(handle, SMC_UNK);
470
471 /*
472 * SP reports completion. The SPD must have initiated the
473 * original request through a synchronous entry into the SP.
474 * Jump back to the original C runtime context, and pass x1 as
475 * return value to the caller
476 */
Achin Gupta916a2c12014-02-09 23:11:46 +0000477 tspd_synchronous_sp_exit(tsp_ctx, x1);
Achin Gupta607084e2014-02-09 18:24:19 +0000478
Achin Gupta916a2c12014-02-09 23:11:46 +0000479 /*
480 * Request from non-secure client to perform an
481 * arithmetic operation or response from secure
482 * payload to an earlier request.
483 */
Soby Mathew9f71f702014-05-09 20:49:17 +0100484 case TSP_FAST_FID(TSP_ADD):
485 case TSP_FAST_FID(TSP_SUB):
486 case TSP_FAST_FID(TSP_MUL):
487 case TSP_FAST_FID(TSP_DIV):
488
David Cunado28f69ab2017-04-05 11:34:03 +0100489 case TSP_YIELD_FID(TSP_ADD):
490 case TSP_YIELD_FID(TSP_SUB):
491 case TSP_YIELD_FID(TSP_MUL):
492 case TSP_YIELD_FID(TSP_DIV):
Achin Gupta916a2c12014-02-09 23:11:46 +0000493 if (ns) {
494 /*
495 * This is a fresh request from the non-secure client.
496 * The parameters are in x1 and x2. Figure out which
497 * registers need to be preserved, save the non-secure
498 * state and send the request to the secure payload.
499 */
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100500 assert(handle == cm_get_context(NON_SECURE));
Soby Mathew9f71f702014-05-09 20:49:17 +0100501
502 /* Check if we are already preempted */
David Cunado28f69ab2017-04-05 11:34:03 +0100503 if (get_yield_smc_active_flag(tsp_ctx->state))
Soby Mathew9f71f702014-05-09 20:49:17 +0100504 SMC_RET1(handle, SMC_UNK);
505
Achin Gupta916a2c12014-02-09 23:11:46 +0000506 cm_el1_sysregs_context_save(NON_SECURE);
507
508 /* Save x1 and x2 for use by TSP_GET_ARGS call below */
Soby Mathew9f71f702014-05-09 20:49:17 +0100509 store_tsp_args(tsp_ctx, x1, x2);
Achin Gupta916a2c12014-02-09 23:11:46 +0000510
511 /*
512 * We are done stashing the non-secure context. Ask the
513 * secure payload to do the work now.
514 */
515
516 /*
517 * Verify if there is a valid context to use, copy the
518 * operation type and parameters to the secure context
519 * and jump to the fast smc entry point in the secure
520 * payload. Entry into S-EL1 will take place upon exit
521 * from this function.
522 */
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100523 assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE));
Soby Mathew9f71f702014-05-09 20:49:17 +0100524
525 /* Set appropriate entry for SMC.
526 * We expect the TSP to manage the PSTATE.I and PSTATE.F
527 * flags as appropriate.
528 */
529 if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) {
530 cm_set_elr_el3(SECURE, (uint64_t)
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100531 &tsp_vectors->fast_smc_entry);
Soby Mathew9f71f702014-05-09 20:49:17 +0100532 } else {
David Cunado28f69ab2017-04-05 11:34:03 +0100533 set_yield_smc_active_flag(tsp_ctx->state);
Soby Mathew9f71f702014-05-09 20:49:17 +0100534 cm_set_elr_el3(SECURE, (uint64_t)
David Cunado28f69ab2017-04-05 11:34:03 +0100535 &tsp_vectors->yield_smc_entry);
Soby Mathewbec98512015-09-03 18:29:38 +0100536#if TSP_NS_INTR_ASYNC_PREEMPT
Soby Mathew47903c02015-01-13 15:48:26 +0000537 /*
538 * Enable the routing of NS interrupts to EL3
David Cunado28f69ab2017-04-05 11:34:03 +0100539 * during processing of a Yielding SMC Call on
540 * this core.
Soby Mathew47903c02015-01-13 15:48:26 +0000541 */
542 enable_intr_rm_local(INTR_TYPE_NS, SECURE);
543#endif
Jeenu Viswambharan339580c2018-01-10 15:22:49 +0000544
545#if EL3_EXCEPTION_HANDLING
546 /*
547 * With EL3 exception handling, while an SMC is
548 * being processed, Non-secure interrupts can't
549 * preempt Secure execution. However, for
550 * yielding SMCs, we want preemption to happen;
551 * so explicitly allow NS preemption in this
552 * case.
553 */
554 ehf_allow_ns_preemption();
555#endif
Soby Mathew9f71f702014-05-09 20:49:17 +0100556 }
557
Achin Gupta916a2c12014-02-09 23:11:46 +0000558 cm_el1_sysregs_context_restore(SECURE);
559 cm_set_next_eret_context(SECURE);
Soby Mathew9f71f702014-05-09 20:49:17 +0100560 SMC_RET3(&tsp_ctx->cpu_ctx, smc_fid, x1, x2);
Achin Gupta916a2c12014-02-09 23:11:46 +0000561 } else {
562 /*
563 * This is the result from the secure client of an
Soby Mathew9f71f702014-05-09 20:49:17 +0100564 * earlier request. The results are in x1-x3. Copy it
Achin Gupta916a2c12014-02-09 23:11:46 +0000565 * into the non-secure context, save the secure state
566 * and return to the non-secure state.
567 */
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100568 assert(handle == cm_get_context(SECURE));
Achin Gupta916a2c12014-02-09 23:11:46 +0000569 cm_el1_sysregs_context_save(SECURE);
570
571 /* Get a reference to the non-secure context */
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100572 ns_cpu_context = cm_get_context(NON_SECURE);
Achin Gupta916a2c12014-02-09 23:11:46 +0000573 assert(ns_cpu_context);
Achin Gupta916a2c12014-02-09 23:11:46 +0000574
575 /* Restore non-secure state */
576 cm_el1_sysregs_context_restore(NON_SECURE);
577 cm_set_next_eret_context(NON_SECURE);
David Cunado28f69ab2017-04-05 11:34:03 +0100578 if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_YIELD) {
579 clr_yield_smc_active_flag(tsp_ctx->state);
Soby Mathewbec98512015-09-03 18:29:38 +0100580#if TSP_NS_INTR_ASYNC_PREEMPT
Soby Mathew47903c02015-01-13 15:48:26 +0000581 /*
582 * Disable the routing of NS interrupts to EL3
David Cunado28f69ab2017-04-05 11:34:03 +0100583 * after processing of a Yielding SMC Call on
584 * this core is finished.
Soby Mathew47903c02015-01-13 15:48:26 +0000585 */
586 disable_intr_rm_local(INTR_TYPE_NS, SECURE);
587#endif
588 }
589
Soby Mathew9f71f702014-05-09 20:49:17 +0100590 SMC_RET3(ns_cpu_context, x1, x2, x3);
Achin Gupta916a2c12014-02-09 23:11:46 +0000591 }
592
Douglas Raillardf2129652016-11-24 15:43:19 +0000593 break;
594 /*
David Cunado28f69ab2017-04-05 11:34:03 +0100595 * Request from the non-secure world to abort a preempted Yielding SMC
596 * Call.
Douglas Raillardf2129652016-11-24 15:43:19 +0000597 */
598 case TSP_FID_ABORT:
599 /* ABORT should only be invoked by normal world */
600 if (!ns) {
601 assert(0);
602 break;
603 }
604
Douglas Raillardbcc3dd32017-02-03 18:01:51 +0000605 assert(handle == cm_get_context(NON_SECURE));
606 cm_el1_sysregs_context_save(NON_SECURE);
607
Douglas Raillardf2129652016-11-24 15:43:19 +0000608 /* Abort the preempted SMC request */
Douglas Raillardbcc3dd32017-02-03 18:01:51 +0000609 if (!tspd_abort_preempted_smc(tsp_ctx)) {
Douglas Raillardf2129652016-11-24 15:43:19 +0000610 /*
611 * If there was no preempted SMC to abort, return
612 * SMC_UNK.
Douglas Raillardbcc3dd32017-02-03 18:01:51 +0000613 *
614 * Restoring the NON_SECURE context is not necessary as
615 * the synchronous entry did not take place if the
616 * return code of tspd_abort_preempted_smc is zero.
Douglas Raillardf2129652016-11-24 15:43:19 +0000617 */
Douglas Raillardbcc3dd32017-02-03 18:01:51 +0000618 cm_set_next_eret_context(NON_SECURE);
619 break;
620 }
Douglas Raillardf2129652016-11-24 15:43:19 +0000621
Douglas Raillardbcc3dd32017-02-03 18:01:51 +0000622 cm_el1_sysregs_context_restore(NON_SECURE);
623 cm_set_next_eret_context(NON_SECURE);
Antonio Nino Diazacb29142017-04-04 17:08:32 +0100624 SMC_RET1(handle, SMC_OK);
Achin Gupta916a2c12014-02-09 23:11:46 +0000625
626 /*
Soby Mathew9f71f702014-05-09 20:49:17 +0100627 * Request from non secure world to resume the preempted
David Cunado28f69ab2017-04-05 11:34:03 +0100628 * Yielding SMC Call.
Soby Mathew9f71f702014-05-09 20:49:17 +0100629 */
630 case TSP_FID_RESUME:
Soby Mathew3d578512014-05-27 10:20:01 +0100631 /* RESUME should be invoked only by normal world */
632 if (!ns) {
633 assert(0);
634 break;
635 }
Soby Mathew9f71f702014-05-09 20:49:17 +0100636
Soby Mathew3d578512014-05-27 10:20:01 +0100637 /*
638 * This is a resume request from the non-secure client.
639 * save the non-secure state and send the request to
640 * the secure payload.
641 */
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100642 assert(handle == cm_get_context(NON_SECURE));
Soby Mathew9f71f702014-05-09 20:49:17 +0100643
Soby Mathew3d578512014-05-27 10:20:01 +0100644 /* Check if we are already preempted before resume */
David Cunado28f69ab2017-04-05 11:34:03 +0100645 if (!get_yield_smc_active_flag(tsp_ctx->state))
Soby Mathew3d578512014-05-27 10:20:01 +0100646 SMC_RET1(handle, SMC_UNK);
Soby Mathew9f71f702014-05-09 20:49:17 +0100647
Soby Mathew3d578512014-05-27 10:20:01 +0100648 cm_el1_sysregs_context_save(NON_SECURE);
Soby Mathew9f71f702014-05-09 20:49:17 +0100649
Soby Mathew3d578512014-05-27 10:20:01 +0100650 /*
651 * We are done stashing the non-secure context. Ask the
652 * secure payload to do the work now.
653 */
Soby Mathewbec98512015-09-03 18:29:38 +0100654#if TSP_NS_INTR_ASYNC_PREEMPT
Soby Mathew47903c02015-01-13 15:48:26 +0000655 /*
656 * Enable the routing of NS interrupts to EL3 during resumption
David Cunado28f69ab2017-04-05 11:34:03 +0100657 * of a Yielding SMC Call on this core.
Soby Mathew47903c02015-01-13 15:48:26 +0000658 */
659 enable_intr_rm_local(INTR_TYPE_NS, SECURE);
660#endif
661
Jeenu Viswambharan339580c2018-01-10 15:22:49 +0000662#if EL3_EXCEPTION_HANDLING
663 /*
664 * Allow the resumed yielding SMC processing to be preempted by
665 * Non-secure interrupts.
666 */
667 ehf_allow_ns_preemption();
668#endif
Soby Mathew9f71f702014-05-09 20:49:17 +0100669
Soby Mathew3d578512014-05-27 10:20:01 +0100670 /* We just need to return to the preempted point in
671 * TSP and the execution will resume as normal.
672 */
673 cm_el1_sysregs_context_restore(SECURE);
674 cm_set_next_eret_context(SECURE);
675 SMC_RET0(&tsp_ctx->cpu_ctx);
Soby Mathew9f71f702014-05-09 20:49:17 +0100676
677 /*
Achin Gupta916a2c12014-02-09 23:11:46 +0000678 * This is a request from the secure payload for more arguments
679 * for an ongoing arithmetic operation requested by the
680 * non-secure world. Simply return the arguments from the non-
681 * secure client in the original call.
682 */
683 case TSP_GET_ARGS:
684 if (ns)
685 SMC_RET1(handle, SMC_UNK);
686
Soby Mathew9f71f702014-05-09 20:49:17 +0100687 get_tsp_args(tsp_ctx, x1, x2);
688 SMC_RET2(handle, x1, x2);
Achin Gupta916a2c12014-02-09 23:11:46 +0000689
Jeenu Viswambharandf1ddb52014-02-28 11:23:35 +0000690 case TOS_CALL_COUNT:
691 /*
692 * Return the number of service function IDs implemented to
693 * provide service to non-secure
694 */
695 SMC_RET1(handle, TSP_NUM_FID);
696
697 case TOS_UID:
698 /* Return TSP UID to the caller */
699 SMC_UUID_RET(handle, tsp_uuid);
700
701 case TOS_CALL_VERSION:
702 /* Return the version of current implementation */
703 SMC_RET2(handle, TSP_VERSION_MAJOR, TSP_VERSION_MINOR);
704
Achin Gupta375f5382014-02-18 18:12:48 +0000705 default:
Achin Gupta607084e2014-02-09 18:24:19 +0000706 break;
Achin Gupta375f5382014-02-18 18:12:48 +0000707 }
708
Achin Gupta607084e2014-02-09 18:24:19 +0000709 SMC_RET1(handle, SMC_UNK);
Achin Gupta375f5382014-02-18 18:12:48 +0000710}
711
Soby Mathew9f71f702014-05-09 20:49:17 +0100712/* Define a SPD runtime service descriptor for fast SMC calls */
Achin Gupta375f5382014-02-18 18:12:48 +0000713DECLARE_RT_SVC(
Soby Mathew9f71f702014-05-09 20:49:17 +0100714 tspd_fast,
Achin Gupta375f5382014-02-18 18:12:48 +0000715
716 OEN_TOS_START,
717 OEN_TOS_END,
718 SMC_TYPE_FAST,
719 tspd_setup,
720 tspd_smc_handler
721);
Soby Mathew9f71f702014-05-09 20:49:17 +0100722
David Cunado28f69ab2017-04-05 11:34:03 +0100723/* Define a SPD runtime service descriptor for Yielding SMC Calls */
Soby Mathew9f71f702014-05-09 20:49:17 +0100724DECLARE_RT_SVC(
725 tspd_std,
726
727 OEN_TOS_START,
728 OEN_TOS_END,
David Cunado28f69ab2017-04-05 11:34:03 +0100729 SMC_TYPE_YIELD,
Soby Mathew9f71f702014-05-09 20:49:17 +0100730 NULL,
731 tspd_smc_handler
732);