blob: d2acda8d9e8761d6d94291f9a468cfa7455438b4 [file] [log] [blame]
Achin Gupta375f5382014-02-18 18:12:48 +00001/*
Jeenu Viswambharan339580c2018-01-10 15:22:49 +00002 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
Achin Gupta375f5382014-02-18 18:12:48 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta375f5382014-02-18 18:12:48 +00005 */
6
7
8/*******************************************************************************
9 * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a
10 * plug-in component to the Secure Monitor, registered as a runtime service. The
11 * SPD is expected to be a functional extension of the Secure Payload (SP) that
12 * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting
13 * the Trusted OS/Applications range to the dispatcher. The SPD will either
14 * handle the request locally or delegate it to the Secure Payload. It is also
15 * responsible for initialising and maintaining communication with the SP.
16 ******************************************************************************/
Achin Gupta375f5382014-02-18 18:12:48 +000017#include <arch_helpers.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010018#include <assert.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010019#include <bl31.h>
Isla Mitchell99305012017-07-11 14:54:08 +010020#include <bl_common.h>
Achin Gupta375f5382014-02-18 18:12:48 +000021#include <context_mgmt.h>
Achin Guptaaeaab682014-05-09 13:21:31 +010022#include <debug.h>
Jeenu Viswambharan339580c2018-01-10 15:22:49 +000023#include <ehf.h>
Achin Guptaaeaab682014-05-09 13:21:31 +010024#include <errno.h>
25#include <platform.h>
Achin Gupta375f5382014-02-18 18:12:48 +000026#include <runtime_svc.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010027#include <stddef.h>
Soby Mathew47903c02015-01-13 15:48:26 +000028#include <string.h>
Achin Gupta375f5382014-02-18 18:12:48 +000029#include <tsp.h>
Jeenu Viswambharandf1ddb52014-02-28 11:23:35 +000030#include <uuid.h>
Dan Handley714a0d22014-04-09 13:13:04 +010031#include "tspd_private.h"
Achin Gupta375f5382014-02-18 18:12:48 +000032
33/*******************************************************************************
Andrew Thoelke891c4ca2014-05-20 21:43:27 +010034 * Address of the entrypoint vector table in the Secure Payload. It is
35 * initialised once on the primary core after a cold boot.
Achin Gupta375f5382014-02-18 18:12:48 +000036 ******************************************************************************/
Andrew Thoelke891c4ca2014-05-20 21:43:27 +010037tsp_vectors_t *tsp_vectors;
Achin Gupta375f5382014-02-18 18:12:48 +000038
39/*******************************************************************************
40 * Array to keep track of per-cpu Secure Payload state
41 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +010042tsp_context_t tspd_sp_context[TSPD_CORE_COUNT];
Achin Gupta375f5382014-02-18 18:12:48 +000043
Jeenu Viswambharan7f366602014-02-20 17:11:00 +000044
Jeenu Viswambharandf1ddb52014-02-28 11:23:35 +000045/* TSP UID */
Roberto Vargaseace8f12018-04-26 13:36:53 +010046DEFINE_SVC_UUID2(tsp_uuid,
47 0xa056305b, 0x9132, 0x7b42, 0x98, 0x11,
48 0x71, 0x68, 0xca, 0x50, 0xf3, 0xfa);
Jeenu Viswambharandf1ddb52014-02-28 11:23:35 +000049
Vikram Kanigirid8c9d262014-05-16 18:48:12 +010050int32_t tspd_init(void);
Jeenu Viswambharan7f366602014-02-20 17:11:00 +000051
Soby Mathewbc912822015-09-22 12:01:18 +010052/*
53 * This helper function handles Secure EL1 preemption. The preemption could be
54 * due Non Secure interrupts or EL3 interrupts. In both the cases we context
55 * switch to the normal world and in case of EL3 interrupts, it will again be
56 * routed to EL3 which will get handled at the exception vectors.
57 */
Soby Mathew47903c02015-01-13 15:48:26 +000058uint64_t tspd_handle_sp_preemption(void *handle)
59{
60 cpu_context_t *ns_cpu_context;
Soby Mathewbc912822015-09-22 12:01:18 +010061
Soby Mathew47903c02015-01-13 15:48:26 +000062 assert(handle == cm_get_context(SECURE));
63 cm_el1_sysregs_context_save(SECURE);
64 /* Get a reference to the non-secure context */
65 ns_cpu_context = cm_get_context(NON_SECURE);
66 assert(ns_cpu_context);
67
68 /*
Soby Mathew78664242015-11-13 02:08:43 +000069 * To allow Secure EL1 interrupt handler to re-enter TSP while TSP
70 * is preempted, the secure system register context which will get
71 * overwritten must be additionally saved. This is currently done
72 * by the TSPD S-EL1 interrupt handler.
73 */
74
75 /*
76 * Restore non-secure state.
Soby Mathew47903c02015-01-13 15:48:26 +000077 */
78 cm_el1_sysregs_context_restore(NON_SECURE);
79 cm_set_next_eret_context(NON_SECURE);
80
Soby Mathewbc912822015-09-22 12:01:18 +010081 /*
David Cunado28f69ab2017-04-05 11:34:03 +010082 * The TSP was preempted during execution of a Yielding SMC Call.
Soby Mathew78664242015-11-13 02:08:43 +000083 * Return back to the normal world with SMC_PREEMPTED as error
84 * code in x0.
Soby Mathewbc912822015-09-22 12:01:18 +010085 */
Soby Mathew47903c02015-01-13 15:48:26 +000086 SMC_RET1(ns_cpu_context, SMC_PREEMPTED);
87}
Soby Mathewbc912822015-09-22 12:01:18 +010088
Achin Guptaaeaab682014-05-09 13:21:31 +010089/*******************************************************************************
90 * This function is the handler registered for S-EL1 interrupts by the TSPD. It
91 * validates the interrupt and upon success arranges entry into the TSP at
Soby Mathewbec98512015-09-03 18:29:38 +010092 * 'tsp_sel1_intr_entry()' for handling the interrupt.
Achin Guptaaeaab682014-05-09 13:21:31 +010093 ******************************************************************************/
94static uint64_t tspd_sel1_interrupt_handler(uint32_t id,
95 uint32_t flags,
96 void *handle,
97 void *cookie)
98{
99 uint32_t linear_id;
Achin Guptaaeaab682014-05-09 13:21:31 +0100100 tsp_context_t *tsp_ctx;
101
102 /* Check the security state when the exception was generated */
103 assert(get_interrupt_src_ss(flags) == NON_SECURE);
104
Achin Guptaaeaab682014-05-09 13:21:31 +0100105 /* Sanity check the pointer to this cpu's context */
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100106 assert(handle == cm_get_context(NON_SECURE));
Achin Guptaaeaab682014-05-09 13:21:31 +0100107
108 /* Save the non-secure context before entering the TSP */
109 cm_el1_sysregs_context_save(NON_SECURE);
110
111 /* Get a reference to this cpu's TSP context */
Soby Mathewda43b662015-07-08 21:45:46 +0100112 linear_id = plat_my_core_pos();
Achin Guptaaeaab682014-05-09 13:21:31 +0100113 tsp_ctx = &tspd_sp_context[linear_id];
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100114 assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE));
Achin Guptaaeaab682014-05-09 13:21:31 +0100115
116 /*
117 * Determine if the TSP was previously preempted. Its last known
118 * context has to be preserved in this case.
119 * The TSP should return control to the TSPD after handling this
Soby Mathewbec98512015-09-03 18:29:38 +0100120 * S-EL1 interrupt. Preserve essential EL3 context to allow entry into
121 * the TSP at the S-EL1 interrupt entry point using the 'cpu_context'
122 * structure. There is no need to save the secure system register
123 * context since the TSP is supposed to preserve it during S-EL1
124 * interrupt handling.
Achin Guptaaeaab682014-05-09 13:21:31 +0100125 */
David Cunado28f69ab2017-04-05 11:34:03 +0100126 if (get_yield_smc_active_flag(tsp_ctx->state)) {
Achin Guptaaeaab682014-05-09 13:21:31 +0100127 tsp_ctx->saved_spsr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx,
128 CTX_SPSR_EL3);
129 tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx,
130 CTX_ELR_EL3);
Soby Mathewbec98512015-09-03 18:29:38 +0100131#if TSP_NS_INTR_ASYNC_PREEMPT
Soby Mathew47903c02015-01-13 15:48:26 +0000132 /*Need to save the previously interrupted secure context */
133 memcpy(&tsp_ctx->sp_ctx, &tsp_ctx->cpu_ctx, TSPD_SP_CTX_SIZE);
134#endif
Achin Guptaaeaab682014-05-09 13:21:31 +0100135 }
136
Achin Guptaaeaab682014-05-09 13:21:31 +0100137 cm_el1_sysregs_context_restore(SECURE);
Soby Mathewbec98512015-09-03 18:29:38 +0100138 cm_set_elr_spsr_el3(SECURE, (uint64_t) &tsp_vectors->sel1_intr_entry,
Andrew Thoelke4e126072014-06-04 21:10:52 +0100139 SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
Soby Mathew47903c02015-01-13 15:48:26 +0000140
Achin Guptaaeaab682014-05-09 13:21:31 +0100141 cm_set_next_eret_context(SECURE);
142
143 /*
Soby Mathewbec98512015-09-03 18:29:38 +0100144 * Tell the TSP that it has to handle a S-EL1 interrupt synchronously.
145 * Also the instruction in normal world where the interrupt was
146 * generated is passed for debugging purposes. It is safe to retrieve
147 * this address from ELR_EL3 as the secure context will not take effect
148 * until el3_exit().
Achin Guptaaeaab682014-05-09 13:21:31 +0100149 */
Soby Mathewbec98512015-09-03 18:29:38 +0100150 SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_SEL1_INTR_AND_RETURN, read_elr_el3());
Achin Guptaaeaab682014-05-09 13:21:31 +0100151}
Soby Mathew47903c02015-01-13 15:48:26 +0000152
Soby Mathewbec98512015-09-03 18:29:38 +0100153#if TSP_NS_INTR_ASYNC_PREEMPT
Soby Mathew47903c02015-01-13 15:48:26 +0000154/*******************************************************************************
Soby Mathewbec98512015-09-03 18:29:38 +0100155 * This function is the handler registered for Non secure interrupts by the
156 * TSPD. It validates the interrupt and upon success arranges entry into the
157 * normal world for handling the interrupt.
Soby Mathew47903c02015-01-13 15:48:26 +0000158 ******************************************************************************/
159static uint64_t tspd_ns_interrupt_handler(uint32_t id,
160 uint32_t flags,
161 void *handle,
162 void *cookie)
163{
164 /* Check the security state when the exception was generated */
165 assert(get_interrupt_src_ss(flags) == SECURE);
166
Soby Mathew47903c02015-01-13 15:48:26 +0000167 /*
168 * Disable the routing of NS interrupts from secure world to EL3 while
169 * interrupted on this core.
170 */
171 disable_intr_rm_local(INTR_TYPE_NS, SECURE);
172
173 return tspd_handle_sp_preemption(handle);
174}
175#endif
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000176
Achin Gupta375f5382014-02-18 18:12:48 +0000177/*******************************************************************************
178 * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type
179 * (aarch32/aarch64) if not already known and initialises the context for entry
180 * into the SP for its initialisation.
181 ******************************************************************************/
Masahiro Yamada56212752018-04-19 01:14:42 +0900182static int32_t tspd_setup(void)
Achin Gupta375f5382014-02-18 18:12:48 +0000183{
Vikram Kanigiri9d70f0f2014-07-15 16:46:43 +0100184 entry_point_info_t *tsp_ep_info;
Achin Gupta375f5382014-02-18 18:12:48 +0000185 uint32_t linear_id;
186
Soby Mathewda43b662015-07-08 21:45:46 +0100187 linear_id = plat_my_core_pos();
Achin Gupta375f5382014-02-18 18:12:48 +0000188
189 /*
190 * Get information about the Secure Payload (BL32) image. Its
191 * absence is a critical failure. TODO: Add support to
192 * conditionally include the SPD service
193 */
Vikram Kanigiri9d70f0f2014-07-15 16:46:43 +0100194 tsp_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
195 if (!tsp_ep_info) {
196 WARN("No TSP provided by BL2 boot loader, Booting device"
197 " without TSP initialization. SMC`s destined for TSP"
198 " will return SMC_UNK\n");
199 return 1;
200 }
Achin Gupta375f5382014-02-18 18:12:48 +0000201
202 /*
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000203 * If there's no valid entry point for SP, we return a non-zero value
204 * signalling failure initializing the service. We bail out without
205 * registering any handlers
206 */
Vikram Kanigiri9d70f0f2014-07-15 16:46:43 +0100207 if (!tsp_ep_info->pc)
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000208 return 1;
209
210 /*
Sandrine Bailleuxf4119ec2015-12-17 13:58:58 +0000211 * We could inspect the SP image and determine its execution
Achin Gupta375f5382014-02-18 18:12:48 +0000212 * state i.e whether AArch32 or AArch64. Assuming it's AArch64
213 * for the time being.
214 */
Vikram Kanigiri9d70f0f2014-07-15 16:46:43 +0100215 tspd_init_tsp_ep_state(tsp_ep_info,
216 TSP_AARCH64,
217 tsp_ep_info->pc,
218 &tspd_sp_context[linear_id]);
Achin Gupta375f5382014-02-18 18:12:48 +0000219
Vikram Kanigiri4e813412014-07-15 16:49:22 +0100220#if TSP_INIT_ASYNC
221 bl31_set_next_image_type(SECURE);
222#else
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000223 /*
224 * All TSPD initialization done. Now register our init function with
225 * BL31 for deferred invocation
226 */
227 bl31_register_bl32_init(&tspd_init);
Vikram Kanigiri4e813412014-07-15 16:49:22 +0100228#endif
Vikram Kanigiri9d70f0f2014-07-15 16:46:43 +0100229 return 0;
Achin Gupta375f5382014-02-18 18:12:48 +0000230}
231
232/*******************************************************************************
233 * This function passes control to the Secure Payload image (BL32) for the first
234 * time on the primary cpu after a cold boot. It assumes that a valid secure
235 * context has already been created by tspd_setup() which can be directly used.
236 * It also assumes that a valid non-secure context has been initialised by PSCI
237 * so it does not need to save and restore any non-secure state. This function
238 * performs a synchronous entry into the Secure payload. The SP passes control
Vikram Kanigirid8c9d262014-05-16 18:48:12 +0100239 * back to this routine through a SMC.
Achin Gupta375f5382014-02-18 18:12:48 +0000240 ******************************************************************************/
Vikram Kanigirid8c9d262014-05-16 18:48:12 +0100241int32_t tspd_init(void)
Achin Gupta375f5382014-02-18 18:12:48 +0000242{
Soby Mathewda43b662015-07-08 21:45:46 +0100243 uint32_t linear_id = plat_my_core_pos();
Dan Handleye2712bc2014-04-10 15:37:22 +0100244 tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
Vikram Kanigiri9d70f0f2014-07-15 16:46:43 +0100245 entry_point_info_t *tsp_entry_point;
Vikram Kanigiri4e813412014-07-15 16:49:22 +0100246 uint64_t rc;
Vikram Kanigiri9d70f0f2014-07-15 16:46:43 +0100247
248 /*
249 * Get information about the Secure Payload (BL32) image. Its
250 * absence is a critical failure.
251 */
252 tsp_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
253 assert(tsp_entry_point);
254
Soby Mathewda43b662015-07-08 21:45:46 +0100255 cm_init_my_context(tsp_entry_point);
Achin Gupta375f5382014-02-18 18:12:48 +0000256
257 /*
Vikram Kanigiri4e813412014-07-15 16:49:22 +0100258 * Arrange for an entry into the test secure payload. It will be
259 * returned via TSP_ENTRY_DONE case
Achin Gupta607084e2014-02-09 18:24:19 +0000260 */
Achin Gupta375f5382014-02-18 18:12:48 +0000261 rc = tspd_synchronous_sp_entry(tsp_ctx);
262 assert(rc != 0);
Achin Guptaaeaab682014-05-09 13:21:31 +0100263
Achin Gupta375f5382014-02-18 18:12:48 +0000264 return rc;
265}
266
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000267
Achin Gupta375f5382014-02-18 18:12:48 +0000268/*******************************************************************************
269 * This function is responsible for handling all SMCs in the Trusted OS/App
270 * range from the non-secure state as defined in the SMC Calling Convention
271 * Document. It is also responsible for communicating with the Secure payload
272 * to delegate work and return results back to the non-secure state. Lastly it
273 * will also return any information that the secure payload needs to do the
274 * work assigned to it.
275 ******************************************************************************/
Masahiro Yamada5ac9d962018-04-19 01:18:48 +0900276static uintptr_t tspd_smc_handler(uint32_t smc_fid,
277 u_register_t x1,
278 u_register_t x2,
279 u_register_t x3,
280 u_register_t x4,
Achin Gupta375f5382014-02-18 18:12:48 +0000281 void *cookie,
282 void *handle,
Masahiro Yamada5ac9d962018-04-19 01:18:48 +0900283 u_register_t flags)
Achin Gupta375f5382014-02-18 18:12:48 +0000284{
Dan Handleye2712bc2014-04-10 15:37:22 +0100285 cpu_context_t *ns_cpu_context;
Soby Mathewda43b662015-07-08 21:45:46 +0100286 uint32_t linear_id = plat_my_core_pos(), ns;
Dan Handleye2712bc2014-04-10 15:37:22 +0100287 tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
Vikram Kanigiri4e813412014-07-15 16:49:22 +0100288 uint64_t rc;
289#if TSP_INIT_ASYNC
290 entry_point_info_t *next_image_info;
291#endif
Achin Gupta375f5382014-02-18 18:12:48 +0000292
293 /* Determine which security state this SMC originated from */
294 ns = is_caller_non_secure(flags);
295
296 switch (smc_fid) {
297
298 /*
Soby Mathew9f71f702014-05-09 20:49:17 +0100299 * This function ID is used by TSP to indicate that it was
300 * preempted by a normal world IRQ.
301 *
302 */
303 case TSP_PREEMPTED:
304 if (ns)
305 SMC_RET1(handle, SMC_UNK);
306
Soby Mathew47903c02015-01-13 15:48:26 +0000307 return tspd_handle_sp_preemption(handle);
Soby Mathew9f71f702014-05-09 20:49:17 +0100308
309 /*
Achin Guptaaeaab682014-05-09 13:21:31 +0100310 * This function ID is used only by the TSP to indicate that it has
Soby Mathew78664242015-11-13 02:08:43 +0000311 * finished handling a S-EL1 interrupt or was preempted by a higher
312 * priority pending EL3 interrupt. Execution should resume
Achin Guptaaeaab682014-05-09 13:21:31 +0100313 * in the normal world.
314 */
Soby Mathewbec98512015-09-03 18:29:38 +0100315 case TSP_HANDLED_S_EL1_INTR:
Achin Guptaaeaab682014-05-09 13:21:31 +0100316 if (ns)
317 SMC_RET1(handle, SMC_UNK);
318
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100319 assert(handle == cm_get_context(SECURE));
Achin Guptaaeaab682014-05-09 13:21:31 +0100320
321 /*
322 * Restore the relevant EL3 state which saved to service
323 * this SMC.
324 */
David Cunado28f69ab2017-04-05 11:34:03 +0100325 if (get_yield_smc_active_flag(tsp_ctx->state)) {
Achin Guptaaeaab682014-05-09 13:21:31 +0100326 SMC_SET_EL3(&tsp_ctx->cpu_ctx,
327 CTX_SPSR_EL3,
328 tsp_ctx->saved_spsr_el3);
329 SMC_SET_EL3(&tsp_ctx->cpu_ctx,
330 CTX_ELR_EL3,
331 tsp_ctx->saved_elr_el3);
Soby Mathewbec98512015-09-03 18:29:38 +0100332#if TSP_NS_INTR_ASYNC_PREEMPT
Soby Mathew47903c02015-01-13 15:48:26 +0000333 /*
334 * Need to restore the previously interrupted
335 * secure context.
336 */
337 memcpy(&tsp_ctx->cpu_ctx, &tsp_ctx->sp_ctx,
338 TSPD_SP_CTX_SIZE);
339#endif
Achin Guptaaeaab682014-05-09 13:21:31 +0100340 }
341
342 /* Get a reference to the non-secure context */
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100343 ns_cpu_context = cm_get_context(NON_SECURE);
Achin Guptaaeaab682014-05-09 13:21:31 +0100344 assert(ns_cpu_context);
345
346 /*
347 * Restore non-secure state. There is no need to save the
348 * secure system register context since the TSP was supposed
349 * to preserve it during S-EL1 interrupt handling.
350 */
351 cm_el1_sysregs_context_restore(NON_SECURE);
352 cm_set_next_eret_context(NON_SECURE);
353
354 SMC_RET0((uint64_t) ns_cpu_context);
355
Achin Guptaaeaab682014-05-09 13:21:31 +0100356 /*
Achin Gupta375f5382014-02-18 18:12:48 +0000357 * This function ID is used only by the SP to indicate it has
358 * finished initialising itself after a cold boot
359 */
360 case TSP_ENTRY_DONE:
361 if (ns)
362 SMC_RET1(handle, SMC_UNK);
363
364 /*
365 * Stash the SP entry points information. This is done
366 * only once on the primary cpu
367 */
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100368 assert(tsp_vectors == NULL);
369 tsp_vectors = (tsp_vectors_t *) x1;
Achin Gupta375f5382014-02-18 18:12:48 +0000370
Vikram Kanigiri4e813412014-07-15 16:49:22 +0100371 if (tsp_vectors) {
372 set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON);
373
374 /*
375 * TSP has been successfully initialized. Register power
376 * managemnt hooks with PSCI
377 */
378 psci_register_spd_pm_hook(&tspd_pm);
379
380 /*
381 * Register an interrupt handler for S-EL1 interrupts
382 * when generated during code executing in the
383 * non-secure state.
384 */
385 flags = 0;
386 set_interrupt_rm_flag(flags, NON_SECURE);
387 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
388 tspd_sel1_interrupt_handler,
389 flags);
390 if (rc)
391 panic();
Soby Mathew47903c02015-01-13 15:48:26 +0000392
Soby Mathewbec98512015-09-03 18:29:38 +0100393#if TSP_NS_INTR_ASYNC_PREEMPT
Soby Mathew47903c02015-01-13 15:48:26 +0000394 /*
395 * Register an interrupt handler for NS interrupts when
396 * generated during code executing in secure state are
397 * routed to EL3.
398 */
399 flags = 0;
400 set_interrupt_rm_flag(flags, SECURE);
401
402 rc = register_interrupt_type_handler(INTR_TYPE_NS,
403 tspd_ns_interrupt_handler,
404 flags);
405 if (rc)
406 panic();
407
408 /*
Soby Mathewbc912822015-09-22 12:01:18 +0100409 * Disable the NS interrupt locally.
Soby Mathew47903c02015-01-13 15:48:26 +0000410 */
411 disable_intr_rm_local(INTR_TYPE_NS, SECURE);
412#endif
Vikram Kanigiri4e813412014-07-15 16:49:22 +0100413 }
414
415
416#if TSP_INIT_ASYNC
417 /* Save the Secure EL1 system register context */
418 assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx);
419 cm_el1_sysregs_context_save(SECURE);
420
421 /* Program EL3 registers to enable entry into the next EL */
422 next_image_info = bl31_plat_get_next_image_ep_info(NON_SECURE);
423 assert(next_image_info);
424 assert(NON_SECURE ==
425 GET_SECURITY_STATE(next_image_info->h.attr));
426
Soby Mathewda43b662015-07-08 21:45:46 +0100427 cm_init_my_context(next_image_info);
Vikram Kanigiri4e813412014-07-15 16:49:22 +0100428 cm_prepare_el3_exit(NON_SECURE);
429 SMC_RET0(cm_get_context(NON_SECURE));
430#else
Achin Gupta375f5382014-02-18 18:12:48 +0000431 /*
432 * SP reports completion. The SPD must have initiated
433 * the original request through a synchronous entry
434 * into the SP. Jump back to the original C runtime
435 * context.
436 */
Achin Gupta916a2c12014-02-09 23:11:46 +0000437 tspd_synchronous_sp_exit(tsp_ctx, x1);
Jonathan Wright75a5d8b2018-03-14 15:56:21 +0000438 break;
Vikram Kanigiri4e813412014-07-15 16:49:22 +0100439#endif
Douglas Raillardf2129652016-11-24 15:43:19 +0000440 /*
441 * This function ID is used only by the SP to indicate it has finished
David Cunado28f69ab2017-04-05 11:34:03 +0100442 * aborting a preempted Yielding SMC Call.
Douglas Raillardf2129652016-11-24 15:43:19 +0000443 */
444 case TSP_ABORT_DONE:
Achin Gupta375f5382014-02-18 18:12:48 +0000445
Achin Gupta607084e2014-02-09 18:24:19 +0000446 /*
Sandrine Bailleuxf4119ec2015-12-17 13:58:58 +0000447 * These function IDs are used only by the SP to indicate it has
Achin Gupta607084e2014-02-09 18:24:19 +0000448 * finished:
449 * 1. turning itself on in response to an earlier psci
450 * cpu_on request
451 * 2. resuming itself after an earlier psci cpu_suspend
452 * request.
453 */
454 case TSP_ON_DONE:
455 case TSP_RESUME_DONE:
456
457 /*
Sandrine Bailleuxf4119ec2015-12-17 13:58:58 +0000458 * These function IDs are used only by the SP to indicate it has
Achin Gupta607084e2014-02-09 18:24:19 +0000459 * finished:
460 * 1. suspending itself after an earlier psci cpu_suspend
461 * request.
462 * 2. turning itself off in response to an earlier psci
463 * cpu_off request.
464 */
465 case TSP_OFF_DONE:
466 case TSP_SUSPEND_DONE:
Juan Castillo4dc4a472014-08-12 11:17:06 +0100467 case TSP_SYSTEM_OFF_DONE:
468 case TSP_SYSTEM_RESET_DONE:
Achin Gupta607084e2014-02-09 18:24:19 +0000469 if (ns)
470 SMC_RET1(handle, SMC_UNK);
471
472 /*
473 * SP reports completion. The SPD must have initiated the
474 * original request through a synchronous entry into the SP.
475 * Jump back to the original C runtime context, and pass x1 as
476 * return value to the caller
477 */
Achin Gupta916a2c12014-02-09 23:11:46 +0000478 tspd_synchronous_sp_exit(tsp_ctx, x1);
Jonathan Wright75a5d8b2018-03-14 15:56:21 +0000479 break;
Achin Gupta607084e2014-02-09 18:24:19 +0000480
Achin Gupta916a2c12014-02-09 23:11:46 +0000481 /*
482 * Request from non-secure client to perform an
483 * arithmetic operation or response from secure
484 * payload to an earlier request.
485 */
Soby Mathew9f71f702014-05-09 20:49:17 +0100486 case TSP_FAST_FID(TSP_ADD):
487 case TSP_FAST_FID(TSP_SUB):
488 case TSP_FAST_FID(TSP_MUL):
489 case TSP_FAST_FID(TSP_DIV):
490
David Cunado28f69ab2017-04-05 11:34:03 +0100491 case TSP_YIELD_FID(TSP_ADD):
492 case TSP_YIELD_FID(TSP_SUB):
493 case TSP_YIELD_FID(TSP_MUL):
494 case TSP_YIELD_FID(TSP_DIV):
Achin Gupta916a2c12014-02-09 23:11:46 +0000495 if (ns) {
496 /*
497 * This is a fresh request from the non-secure client.
498 * The parameters are in x1 and x2. Figure out which
499 * registers need to be preserved, save the non-secure
500 * state and send the request to the secure payload.
501 */
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100502 assert(handle == cm_get_context(NON_SECURE));
Soby Mathew9f71f702014-05-09 20:49:17 +0100503
504 /* Check if we are already preempted */
David Cunado28f69ab2017-04-05 11:34:03 +0100505 if (get_yield_smc_active_flag(tsp_ctx->state))
Soby Mathew9f71f702014-05-09 20:49:17 +0100506 SMC_RET1(handle, SMC_UNK);
507
Achin Gupta916a2c12014-02-09 23:11:46 +0000508 cm_el1_sysregs_context_save(NON_SECURE);
509
510 /* Save x1 and x2 for use by TSP_GET_ARGS call below */
Soby Mathew9f71f702014-05-09 20:49:17 +0100511 store_tsp_args(tsp_ctx, x1, x2);
Achin Gupta916a2c12014-02-09 23:11:46 +0000512
513 /*
514 * We are done stashing the non-secure context. Ask the
515 * secure payload to do the work now.
516 */
517
518 /*
519 * Verify if there is a valid context to use, copy the
520 * operation type and parameters to the secure context
521 * and jump to the fast smc entry point in the secure
522 * payload. Entry into S-EL1 will take place upon exit
523 * from this function.
524 */
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100525 assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE));
Soby Mathew9f71f702014-05-09 20:49:17 +0100526
527 /* Set appropriate entry for SMC.
528 * We expect the TSP to manage the PSTATE.I and PSTATE.F
529 * flags as appropriate.
530 */
531 if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) {
532 cm_set_elr_el3(SECURE, (uint64_t)
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100533 &tsp_vectors->fast_smc_entry);
Soby Mathew9f71f702014-05-09 20:49:17 +0100534 } else {
David Cunado28f69ab2017-04-05 11:34:03 +0100535 set_yield_smc_active_flag(tsp_ctx->state);
Soby Mathew9f71f702014-05-09 20:49:17 +0100536 cm_set_elr_el3(SECURE, (uint64_t)
David Cunado28f69ab2017-04-05 11:34:03 +0100537 &tsp_vectors->yield_smc_entry);
Soby Mathewbec98512015-09-03 18:29:38 +0100538#if TSP_NS_INTR_ASYNC_PREEMPT
Soby Mathew47903c02015-01-13 15:48:26 +0000539 /*
540 * Enable the routing of NS interrupts to EL3
David Cunado28f69ab2017-04-05 11:34:03 +0100541 * during processing of a Yielding SMC Call on
542 * this core.
Soby Mathew47903c02015-01-13 15:48:26 +0000543 */
544 enable_intr_rm_local(INTR_TYPE_NS, SECURE);
545#endif
Jeenu Viswambharan339580c2018-01-10 15:22:49 +0000546
547#if EL3_EXCEPTION_HANDLING
548 /*
549 * With EL3 exception handling, while an SMC is
550 * being processed, Non-secure interrupts can't
551 * preempt Secure execution. However, for
552 * yielding SMCs, we want preemption to happen;
553 * so explicitly allow NS preemption in this
Jeenu Viswambharanabf5b062018-01-22 12:42:54 +0000554 * case, and supply the preemption return code
555 * for TSP.
Jeenu Viswambharan339580c2018-01-10 15:22:49 +0000556 */
Jeenu Viswambharanabf5b062018-01-22 12:42:54 +0000557 ehf_allow_ns_preemption(TSP_PREEMPTED);
Jeenu Viswambharan339580c2018-01-10 15:22:49 +0000558#endif
Soby Mathew9f71f702014-05-09 20:49:17 +0100559 }
560
Achin Gupta916a2c12014-02-09 23:11:46 +0000561 cm_el1_sysregs_context_restore(SECURE);
562 cm_set_next_eret_context(SECURE);
Soby Mathew9f71f702014-05-09 20:49:17 +0100563 SMC_RET3(&tsp_ctx->cpu_ctx, smc_fid, x1, x2);
Achin Gupta916a2c12014-02-09 23:11:46 +0000564 } else {
565 /*
566 * This is the result from the secure client of an
Soby Mathew9f71f702014-05-09 20:49:17 +0100567 * earlier request. The results are in x1-x3. Copy it
Achin Gupta916a2c12014-02-09 23:11:46 +0000568 * into the non-secure context, save the secure state
569 * and return to the non-secure state.
570 */
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100571 assert(handle == cm_get_context(SECURE));
Achin Gupta916a2c12014-02-09 23:11:46 +0000572 cm_el1_sysregs_context_save(SECURE);
573
574 /* Get a reference to the non-secure context */
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100575 ns_cpu_context = cm_get_context(NON_SECURE);
Achin Gupta916a2c12014-02-09 23:11:46 +0000576 assert(ns_cpu_context);
Achin Gupta916a2c12014-02-09 23:11:46 +0000577
578 /* Restore non-secure state */
579 cm_el1_sysregs_context_restore(NON_SECURE);
580 cm_set_next_eret_context(NON_SECURE);
David Cunado28f69ab2017-04-05 11:34:03 +0100581 if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_YIELD) {
582 clr_yield_smc_active_flag(tsp_ctx->state);
Soby Mathewbec98512015-09-03 18:29:38 +0100583#if TSP_NS_INTR_ASYNC_PREEMPT
Soby Mathew47903c02015-01-13 15:48:26 +0000584 /*
585 * Disable the routing of NS interrupts to EL3
David Cunado28f69ab2017-04-05 11:34:03 +0100586 * after processing of a Yielding SMC Call on
587 * this core is finished.
Soby Mathew47903c02015-01-13 15:48:26 +0000588 */
589 disable_intr_rm_local(INTR_TYPE_NS, SECURE);
590#endif
591 }
592
Soby Mathew9f71f702014-05-09 20:49:17 +0100593 SMC_RET3(ns_cpu_context, x1, x2, x3);
Achin Gupta916a2c12014-02-09 23:11:46 +0000594 }
Daniel Boulby8942a1b2018-06-22 14:16:03 +0100595 assert(0); /* Unreachable */
Achin Gupta916a2c12014-02-09 23:11:46 +0000596
Douglas Raillardf2129652016-11-24 15:43:19 +0000597 /*
David Cunado28f69ab2017-04-05 11:34:03 +0100598 * Request from the non-secure world to abort a preempted Yielding SMC
599 * Call.
Douglas Raillardf2129652016-11-24 15:43:19 +0000600 */
601 case TSP_FID_ABORT:
602 /* ABORT should only be invoked by normal world */
603 if (!ns) {
604 assert(0);
605 break;
606 }
607
Douglas Raillardbcc3dd32017-02-03 18:01:51 +0000608 assert(handle == cm_get_context(NON_SECURE));
609 cm_el1_sysregs_context_save(NON_SECURE);
610
Douglas Raillardf2129652016-11-24 15:43:19 +0000611 /* Abort the preempted SMC request */
Douglas Raillardbcc3dd32017-02-03 18:01:51 +0000612 if (!tspd_abort_preempted_smc(tsp_ctx)) {
Douglas Raillardf2129652016-11-24 15:43:19 +0000613 /*
614 * If there was no preempted SMC to abort, return
615 * SMC_UNK.
Douglas Raillardbcc3dd32017-02-03 18:01:51 +0000616 *
617 * Restoring the NON_SECURE context is not necessary as
618 * the synchronous entry did not take place if the
619 * return code of tspd_abort_preempted_smc is zero.
Douglas Raillardf2129652016-11-24 15:43:19 +0000620 */
Douglas Raillardbcc3dd32017-02-03 18:01:51 +0000621 cm_set_next_eret_context(NON_SECURE);
622 break;
623 }
Douglas Raillardf2129652016-11-24 15:43:19 +0000624
Douglas Raillardbcc3dd32017-02-03 18:01:51 +0000625 cm_el1_sysregs_context_restore(NON_SECURE);
626 cm_set_next_eret_context(NON_SECURE);
Antonio Nino Diazacb29142017-04-04 17:08:32 +0100627 SMC_RET1(handle, SMC_OK);
Achin Gupta916a2c12014-02-09 23:11:46 +0000628
629 /*
Soby Mathew9f71f702014-05-09 20:49:17 +0100630 * Request from non secure world to resume the preempted
David Cunado28f69ab2017-04-05 11:34:03 +0100631 * Yielding SMC Call.
Soby Mathew9f71f702014-05-09 20:49:17 +0100632 */
633 case TSP_FID_RESUME:
Soby Mathew3d578512014-05-27 10:20:01 +0100634 /* RESUME should be invoked only by normal world */
635 if (!ns) {
636 assert(0);
637 break;
638 }
Soby Mathew9f71f702014-05-09 20:49:17 +0100639
Soby Mathew3d578512014-05-27 10:20:01 +0100640 /*
641 * This is a resume request from the non-secure client.
642 * save the non-secure state and send the request to
643 * the secure payload.
644 */
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100645 assert(handle == cm_get_context(NON_SECURE));
Soby Mathew9f71f702014-05-09 20:49:17 +0100646
Soby Mathew3d578512014-05-27 10:20:01 +0100647 /* Check if we are already preempted before resume */
David Cunado28f69ab2017-04-05 11:34:03 +0100648 if (!get_yield_smc_active_flag(tsp_ctx->state))
Soby Mathew3d578512014-05-27 10:20:01 +0100649 SMC_RET1(handle, SMC_UNK);
Soby Mathew9f71f702014-05-09 20:49:17 +0100650
Soby Mathew3d578512014-05-27 10:20:01 +0100651 cm_el1_sysregs_context_save(NON_SECURE);
Soby Mathew9f71f702014-05-09 20:49:17 +0100652
Soby Mathew3d578512014-05-27 10:20:01 +0100653 /*
654 * We are done stashing the non-secure context. Ask the
655 * secure payload to do the work now.
656 */
Soby Mathewbec98512015-09-03 18:29:38 +0100657#if TSP_NS_INTR_ASYNC_PREEMPT
Soby Mathew47903c02015-01-13 15:48:26 +0000658 /*
659 * Enable the routing of NS interrupts to EL3 during resumption
David Cunado28f69ab2017-04-05 11:34:03 +0100660 * of a Yielding SMC Call on this core.
Soby Mathew47903c02015-01-13 15:48:26 +0000661 */
662 enable_intr_rm_local(INTR_TYPE_NS, SECURE);
663#endif
664
Jeenu Viswambharan339580c2018-01-10 15:22:49 +0000665#if EL3_EXCEPTION_HANDLING
666 /*
667 * Allow the resumed yielding SMC processing to be preempted by
Jeenu Viswambharanabf5b062018-01-22 12:42:54 +0000668 * Non-secure interrupts. Also, supply the preemption return
669 * code for TSP.
Jeenu Viswambharan339580c2018-01-10 15:22:49 +0000670 */
Jeenu Viswambharanabf5b062018-01-22 12:42:54 +0000671 ehf_allow_ns_preemption(TSP_PREEMPTED);
Jeenu Viswambharan339580c2018-01-10 15:22:49 +0000672#endif
Soby Mathew9f71f702014-05-09 20:49:17 +0100673
Soby Mathew3d578512014-05-27 10:20:01 +0100674 /* We just need to return to the preempted point in
675 * TSP and the execution will resume as normal.
676 */
677 cm_el1_sysregs_context_restore(SECURE);
678 cm_set_next_eret_context(SECURE);
679 SMC_RET0(&tsp_ctx->cpu_ctx);
Soby Mathew9f71f702014-05-09 20:49:17 +0100680
681 /*
Achin Gupta916a2c12014-02-09 23:11:46 +0000682 * This is a request from the secure payload for more arguments
683 * for an ongoing arithmetic operation requested by the
684 * non-secure world. Simply return the arguments from the non-
685 * secure client in the original call.
686 */
687 case TSP_GET_ARGS:
688 if (ns)
689 SMC_RET1(handle, SMC_UNK);
690
Soby Mathew9f71f702014-05-09 20:49:17 +0100691 get_tsp_args(tsp_ctx, x1, x2);
692 SMC_RET2(handle, x1, x2);
Achin Gupta916a2c12014-02-09 23:11:46 +0000693
Jeenu Viswambharandf1ddb52014-02-28 11:23:35 +0000694 case TOS_CALL_COUNT:
695 /*
696 * Return the number of service function IDs implemented to
697 * provide service to non-secure
698 */
699 SMC_RET1(handle, TSP_NUM_FID);
700
701 case TOS_UID:
702 /* Return TSP UID to the caller */
703 SMC_UUID_RET(handle, tsp_uuid);
704
705 case TOS_CALL_VERSION:
706 /* Return the version of current implementation */
707 SMC_RET2(handle, TSP_VERSION_MAJOR, TSP_VERSION_MINOR);
708
Achin Gupta375f5382014-02-18 18:12:48 +0000709 default:
Achin Gupta607084e2014-02-09 18:24:19 +0000710 break;
Achin Gupta375f5382014-02-18 18:12:48 +0000711 }
712
Achin Gupta607084e2014-02-09 18:24:19 +0000713 SMC_RET1(handle, SMC_UNK);
Achin Gupta375f5382014-02-18 18:12:48 +0000714}
715
Soby Mathew9f71f702014-05-09 20:49:17 +0100716/* Define a SPD runtime service descriptor for fast SMC calls */
Achin Gupta375f5382014-02-18 18:12:48 +0000717DECLARE_RT_SVC(
Soby Mathew9f71f702014-05-09 20:49:17 +0100718 tspd_fast,
Achin Gupta375f5382014-02-18 18:12:48 +0000719
720 OEN_TOS_START,
721 OEN_TOS_END,
722 SMC_TYPE_FAST,
723 tspd_setup,
724 tspd_smc_handler
725);
Soby Mathew9f71f702014-05-09 20:49:17 +0100726
David Cunado28f69ab2017-04-05 11:34:03 +0100727/* Define a SPD runtime service descriptor for Yielding SMC Calls */
Soby Mathew9f71f702014-05-09 20:49:17 +0100728DECLARE_RT_SVC(
729 tspd_std,
730
731 OEN_TOS_START,
732 OEN_TOS_END,
David Cunado28f69ab2017-04-05 11:34:03 +0100733 SMC_TYPE_YIELD,
Soby Mathew9f71f702014-05-09 20:49:17 +0100734 NULL,
735 tspd_smc_handler
736);