Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 1 | /* |
Jeenu Viswambharan | 339580c | 2018-01-10 15:22:49 +0000 | [diff] [blame] | 2 | * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved. |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 3 | * |
dp-arm | fa3cf0b | 2017-05-03 09:38:09 +0100 | [diff] [blame] | 4 | * SPDX-License-Identifier: BSD-3-Clause |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | |
| 8 | /******************************************************************************* |
| 9 | * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a |
| 10 | * plug-in component to the Secure Monitor, registered as a runtime service. The |
| 11 | * SPD is expected to be a functional extension of the Secure Payload (SP) that |
| 12 | * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting |
| 13 | * the Trusted OS/Applications range to the dispatcher. The SPD will either |
| 14 | * handle the request locally or delegate it to the Secure Payload. It is also |
| 15 | * responsible for initialising and maintaining communication with the SP. |
| 16 | ******************************************************************************/ |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 17 | #include <arch_helpers.h> |
Dan Handley | 2bd4ef2 | 2014-04-09 13:14:54 +0100 | [diff] [blame] | 18 | #include <assert.h> |
Dan Handley | 2bd4ef2 | 2014-04-09 13:14:54 +0100 | [diff] [blame] | 19 | #include <bl31.h> |
Isla Mitchell | 9930501 | 2017-07-11 14:54:08 +0100 | [diff] [blame] | 20 | #include <bl_common.h> |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 21 | #include <context_mgmt.h> |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 22 | #include <debug.h> |
Jeenu Viswambharan | 339580c | 2018-01-10 15:22:49 +0000 | [diff] [blame] | 23 | #include <ehf.h> |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 24 | #include <errno.h> |
| 25 | #include <platform.h> |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 26 | #include <runtime_svc.h> |
Dan Handley | 2bd4ef2 | 2014-04-09 13:14:54 +0100 | [diff] [blame] | 27 | #include <stddef.h> |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 28 | #include <string.h> |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 29 | #include <tsp.h> |
Jeenu Viswambharan | df1ddb5 | 2014-02-28 11:23:35 +0000 | [diff] [blame] | 30 | #include <uuid.h> |
Dan Handley | 714a0d2 | 2014-04-09 13:13:04 +0100 | [diff] [blame] | 31 | #include "tspd_private.h" |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 32 | |
| 33 | /******************************************************************************* |
Andrew Thoelke | 891c4ca | 2014-05-20 21:43:27 +0100 | [diff] [blame] | 34 | * Address of the entrypoint vector table in the Secure Payload. It is |
| 35 | * initialised once on the primary core after a cold boot. |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 36 | ******************************************************************************/ |
Andrew Thoelke | 891c4ca | 2014-05-20 21:43:27 +0100 | [diff] [blame] | 37 | tsp_vectors_t *tsp_vectors; |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 38 | |
| 39 | /******************************************************************************* |
| 40 | * Array to keep track of per-cpu Secure Payload state |
| 41 | ******************************************************************************/ |
Dan Handley | e2712bc | 2014-04-10 15:37:22 +0100 | [diff] [blame] | 42 | tsp_context_t tspd_sp_context[TSPD_CORE_COUNT]; |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 43 | |
Jeenu Viswambharan | 7f36660 | 2014-02-20 17:11:00 +0000 | [diff] [blame] | 44 | |
Jeenu Viswambharan | df1ddb5 | 2014-02-28 11:23:35 +0000 | [diff] [blame] | 45 | /* TSP UID */ |
Roberto Vargas | eace8f1 | 2018-04-26 13:36:53 +0100 | [diff] [blame] | 46 | DEFINE_SVC_UUID2(tsp_uuid, |
| 47 | 0xa056305b, 0x9132, 0x7b42, 0x98, 0x11, |
| 48 | 0x71, 0x68, 0xca, 0x50, 0xf3, 0xfa); |
Jeenu Viswambharan | df1ddb5 | 2014-02-28 11:23:35 +0000 | [diff] [blame] | 49 | |
Vikram Kanigiri | d8c9d26 | 2014-05-16 18:48:12 +0100 | [diff] [blame] | 50 | int32_t tspd_init(void); |
Jeenu Viswambharan | 7f36660 | 2014-02-20 17:11:00 +0000 | [diff] [blame] | 51 | |
Soby Mathew | bc91282 | 2015-09-22 12:01:18 +0100 | [diff] [blame] | 52 | /* |
| 53 | * This helper function handles Secure EL1 preemption. The preemption could be |
| 54 | * due Non Secure interrupts or EL3 interrupts. In both the cases we context |
| 55 | * switch to the normal world and in case of EL3 interrupts, it will again be |
| 56 | * routed to EL3 which will get handled at the exception vectors. |
| 57 | */ |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 58 | uint64_t tspd_handle_sp_preemption(void *handle) |
| 59 | { |
| 60 | cpu_context_t *ns_cpu_context; |
Soby Mathew | bc91282 | 2015-09-22 12:01:18 +0100 | [diff] [blame] | 61 | |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 62 | assert(handle == cm_get_context(SECURE)); |
| 63 | cm_el1_sysregs_context_save(SECURE); |
| 64 | /* Get a reference to the non-secure context */ |
| 65 | ns_cpu_context = cm_get_context(NON_SECURE); |
| 66 | assert(ns_cpu_context); |
| 67 | |
| 68 | /* |
Soby Mathew | 7866424 | 2015-11-13 02:08:43 +0000 | [diff] [blame] | 69 | * To allow Secure EL1 interrupt handler to re-enter TSP while TSP |
| 70 | * is preempted, the secure system register context which will get |
| 71 | * overwritten must be additionally saved. This is currently done |
| 72 | * by the TSPD S-EL1 interrupt handler. |
| 73 | */ |
| 74 | |
| 75 | /* |
| 76 | * Restore non-secure state. |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 77 | */ |
| 78 | cm_el1_sysregs_context_restore(NON_SECURE); |
| 79 | cm_set_next_eret_context(NON_SECURE); |
| 80 | |
Soby Mathew | bc91282 | 2015-09-22 12:01:18 +0100 | [diff] [blame] | 81 | /* |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 82 | * The TSP was preempted during execution of a Yielding SMC Call. |
Soby Mathew | 7866424 | 2015-11-13 02:08:43 +0000 | [diff] [blame] | 83 | * Return back to the normal world with SMC_PREEMPTED as error |
| 84 | * code in x0. |
Soby Mathew | bc91282 | 2015-09-22 12:01:18 +0100 | [diff] [blame] | 85 | */ |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 86 | SMC_RET1(ns_cpu_context, SMC_PREEMPTED); |
| 87 | } |
Soby Mathew | bc91282 | 2015-09-22 12:01:18 +0100 | [diff] [blame] | 88 | |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 89 | /******************************************************************************* |
| 90 | * This function is the handler registered for S-EL1 interrupts by the TSPD. It |
| 91 | * validates the interrupt and upon success arranges entry into the TSP at |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 92 | * 'tsp_sel1_intr_entry()' for handling the interrupt. |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 93 | ******************************************************************************/ |
| 94 | static uint64_t tspd_sel1_interrupt_handler(uint32_t id, |
| 95 | uint32_t flags, |
| 96 | void *handle, |
| 97 | void *cookie) |
| 98 | { |
| 99 | uint32_t linear_id; |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 100 | tsp_context_t *tsp_ctx; |
| 101 | |
| 102 | /* Check the security state when the exception was generated */ |
| 103 | assert(get_interrupt_src_ss(flags) == NON_SECURE); |
| 104 | |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 105 | /* Sanity check the pointer to this cpu's context */ |
Andrew Thoelke | a2f6553 | 2014-05-14 17:09:32 +0100 | [diff] [blame] | 106 | assert(handle == cm_get_context(NON_SECURE)); |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 107 | |
| 108 | /* Save the non-secure context before entering the TSP */ |
| 109 | cm_el1_sysregs_context_save(NON_SECURE); |
| 110 | |
| 111 | /* Get a reference to this cpu's TSP context */ |
Soby Mathew | da43b66 | 2015-07-08 21:45:46 +0100 | [diff] [blame] | 112 | linear_id = plat_my_core_pos(); |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 113 | tsp_ctx = &tspd_sp_context[linear_id]; |
Andrew Thoelke | a2f6553 | 2014-05-14 17:09:32 +0100 | [diff] [blame] | 114 | assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE)); |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 115 | |
| 116 | /* |
| 117 | * Determine if the TSP was previously preempted. Its last known |
| 118 | * context has to be preserved in this case. |
| 119 | * The TSP should return control to the TSPD after handling this |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 120 | * S-EL1 interrupt. Preserve essential EL3 context to allow entry into |
| 121 | * the TSP at the S-EL1 interrupt entry point using the 'cpu_context' |
| 122 | * structure. There is no need to save the secure system register |
| 123 | * context since the TSP is supposed to preserve it during S-EL1 |
| 124 | * interrupt handling. |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 125 | */ |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 126 | if (get_yield_smc_active_flag(tsp_ctx->state)) { |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 127 | tsp_ctx->saved_spsr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx, |
| 128 | CTX_SPSR_EL3); |
| 129 | tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx, |
| 130 | CTX_ELR_EL3); |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 131 | #if TSP_NS_INTR_ASYNC_PREEMPT |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 132 | /*Need to save the previously interrupted secure context */ |
| 133 | memcpy(&tsp_ctx->sp_ctx, &tsp_ctx->cpu_ctx, TSPD_SP_CTX_SIZE); |
| 134 | #endif |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 135 | } |
| 136 | |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 137 | cm_el1_sysregs_context_restore(SECURE); |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 138 | cm_set_elr_spsr_el3(SECURE, (uint64_t) &tsp_vectors->sel1_intr_entry, |
Andrew Thoelke | 4e12607 | 2014-06-04 21:10:52 +0100 | [diff] [blame] | 139 | SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 140 | |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 141 | cm_set_next_eret_context(SECURE); |
| 142 | |
| 143 | /* |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 144 | * Tell the TSP that it has to handle a S-EL1 interrupt synchronously. |
| 145 | * Also the instruction in normal world where the interrupt was |
| 146 | * generated is passed for debugging purposes. It is safe to retrieve |
| 147 | * this address from ELR_EL3 as the secure context will not take effect |
| 148 | * until el3_exit(). |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 149 | */ |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 150 | SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_SEL1_INTR_AND_RETURN, read_elr_el3()); |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 151 | } |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 152 | |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 153 | #if TSP_NS_INTR_ASYNC_PREEMPT |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 154 | /******************************************************************************* |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 155 | * This function is the handler registered for Non secure interrupts by the |
| 156 | * TSPD. It validates the interrupt and upon success arranges entry into the |
| 157 | * normal world for handling the interrupt. |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 158 | ******************************************************************************/ |
| 159 | static uint64_t tspd_ns_interrupt_handler(uint32_t id, |
| 160 | uint32_t flags, |
| 161 | void *handle, |
| 162 | void *cookie) |
| 163 | { |
| 164 | /* Check the security state when the exception was generated */ |
| 165 | assert(get_interrupt_src_ss(flags) == SECURE); |
| 166 | |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 167 | /* |
| 168 | * Disable the routing of NS interrupts from secure world to EL3 while |
| 169 | * interrupted on this core. |
| 170 | */ |
| 171 | disable_intr_rm_local(INTR_TYPE_NS, SECURE); |
| 172 | |
| 173 | return tspd_handle_sp_preemption(handle); |
| 174 | } |
| 175 | #endif |
Jeenu Viswambharan | 7f36660 | 2014-02-20 17:11:00 +0000 | [diff] [blame] | 176 | |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 177 | /******************************************************************************* |
| 178 | * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type |
| 179 | * (aarch32/aarch64) if not already known and initialises the context for entry |
| 180 | * into the SP for its initialisation. |
| 181 | ******************************************************************************/ |
Masahiro Yamada | 5621275 | 2018-04-19 01:14:42 +0900 | [diff] [blame] | 182 | static int32_t tspd_setup(void) |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 183 | { |
Vikram Kanigiri | 9d70f0f | 2014-07-15 16:46:43 +0100 | [diff] [blame] | 184 | entry_point_info_t *tsp_ep_info; |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 185 | uint32_t linear_id; |
| 186 | |
Soby Mathew | da43b66 | 2015-07-08 21:45:46 +0100 | [diff] [blame] | 187 | linear_id = plat_my_core_pos(); |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 188 | |
| 189 | /* |
| 190 | * Get information about the Secure Payload (BL32) image. Its |
| 191 | * absence is a critical failure. TODO: Add support to |
| 192 | * conditionally include the SPD service |
| 193 | */ |
Vikram Kanigiri | 9d70f0f | 2014-07-15 16:46:43 +0100 | [diff] [blame] | 194 | tsp_ep_info = bl31_plat_get_next_image_ep_info(SECURE); |
| 195 | if (!tsp_ep_info) { |
| 196 | WARN("No TSP provided by BL2 boot loader, Booting device" |
| 197 | " without TSP initialization. SMC`s destined for TSP" |
| 198 | " will return SMC_UNK\n"); |
| 199 | return 1; |
| 200 | } |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 201 | |
| 202 | /* |
Jeenu Viswambharan | 7f36660 | 2014-02-20 17:11:00 +0000 | [diff] [blame] | 203 | * If there's no valid entry point for SP, we return a non-zero value |
| 204 | * signalling failure initializing the service. We bail out without |
| 205 | * registering any handlers |
| 206 | */ |
Vikram Kanigiri | 9d70f0f | 2014-07-15 16:46:43 +0100 | [diff] [blame] | 207 | if (!tsp_ep_info->pc) |
Jeenu Viswambharan | 7f36660 | 2014-02-20 17:11:00 +0000 | [diff] [blame] | 208 | return 1; |
| 209 | |
| 210 | /* |
Sandrine Bailleux | f4119ec | 2015-12-17 13:58:58 +0000 | [diff] [blame] | 211 | * We could inspect the SP image and determine its execution |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 212 | * state i.e whether AArch32 or AArch64. Assuming it's AArch64 |
| 213 | * for the time being. |
| 214 | */ |
Vikram Kanigiri | 9d70f0f | 2014-07-15 16:46:43 +0100 | [diff] [blame] | 215 | tspd_init_tsp_ep_state(tsp_ep_info, |
| 216 | TSP_AARCH64, |
| 217 | tsp_ep_info->pc, |
| 218 | &tspd_sp_context[linear_id]); |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 219 | |
Vikram Kanigiri | 4e81341 | 2014-07-15 16:49:22 +0100 | [diff] [blame] | 220 | #if TSP_INIT_ASYNC |
| 221 | bl31_set_next_image_type(SECURE); |
| 222 | #else |
Jeenu Viswambharan | 7f36660 | 2014-02-20 17:11:00 +0000 | [diff] [blame] | 223 | /* |
| 224 | * All TSPD initialization done. Now register our init function with |
| 225 | * BL31 for deferred invocation |
| 226 | */ |
| 227 | bl31_register_bl32_init(&tspd_init); |
Vikram Kanigiri | 4e81341 | 2014-07-15 16:49:22 +0100 | [diff] [blame] | 228 | #endif |
Vikram Kanigiri | 9d70f0f | 2014-07-15 16:46:43 +0100 | [diff] [blame] | 229 | return 0; |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 230 | } |
| 231 | |
| 232 | /******************************************************************************* |
| 233 | * This function passes control to the Secure Payload image (BL32) for the first |
| 234 | * time on the primary cpu after a cold boot. It assumes that a valid secure |
| 235 | * context has already been created by tspd_setup() which can be directly used. |
| 236 | * It also assumes that a valid non-secure context has been initialised by PSCI |
| 237 | * so it does not need to save and restore any non-secure state. This function |
| 238 | * performs a synchronous entry into the Secure payload. The SP passes control |
Vikram Kanigiri | d8c9d26 | 2014-05-16 18:48:12 +0100 | [diff] [blame] | 239 | * back to this routine through a SMC. |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 240 | ******************************************************************************/ |
Vikram Kanigiri | d8c9d26 | 2014-05-16 18:48:12 +0100 | [diff] [blame] | 241 | int32_t tspd_init(void) |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 242 | { |
Soby Mathew | da43b66 | 2015-07-08 21:45:46 +0100 | [diff] [blame] | 243 | uint32_t linear_id = plat_my_core_pos(); |
Dan Handley | e2712bc | 2014-04-10 15:37:22 +0100 | [diff] [blame] | 244 | tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; |
Vikram Kanigiri | 9d70f0f | 2014-07-15 16:46:43 +0100 | [diff] [blame] | 245 | entry_point_info_t *tsp_entry_point; |
Vikram Kanigiri | 4e81341 | 2014-07-15 16:49:22 +0100 | [diff] [blame] | 246 | uint64_t rc; |
Vikram Kanigiri | 9d70f0f | 2014-07-15 16:46:43 +0100 | [diff] [blame] | 247 | |
| 248 | /* |
| 249 | * Get information about the Secure Payload (BL32) image. Its |
| 250 | * absence is a critical failure. |
| 251 | */ |
| 252 | tsp_entry_point = bl31_plat_get_next_image_ep_info(SECURE); |
| 253 | assert(tsp_entry_point); |
| 254 | |
Soby Mathew | da43b66 | 2015-07-08 21:45:46 +0100 | [diff] [blame] | 255 | cm_init_my_context(tsp_entry_point); |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 256 | |
| 257 | /* |
Vikram Kanigiri | 4e81341 | 2014-07-15 16:49:22 +0100 | [diff] [blame] | 258 | * Arrange for an entry into the test secure payload. It will be |
| 259 | * returned via TSP_ENTRY_DONE case |
Achin Gupta | 607084e | 2014-02-09 18:24:19 +0000 | [diff] [blame] | 260 | */ |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 261 | rc = tspd_synchronous_sp_entry(tsp_ctx); |
| 262 | assert(rc != 0); |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 263 | |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 264 | return rc; |
| 265 | } |
| 266 | |
Jeenu Viswambharan | 7f36660 | 2014-02-20 17:11:00 +0000 | [diff] [blame] | 267 | |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 268 | /******************************************************************************* |
| 269 | * This function is responsible for handling all SMCs in the Trusted OS/App |
| 270 | * range from the non-secure state as defined in the SMC Calling Convention |
| 271 | * Document. It is also responsible for communicating with the Secure payload |
| 272 | * to delegate work and return results back to the non-secure state. Lastly it |
| 273 | * will also return any information that the secure payload needs to do the |
| 274 | * work assigned to it. |
| 275 | ******************************************************************************/ |
Masahiro Yamada | 5ac9d96 | 2018-04-19 01:18:48 +0900 | [diff] [blame] | 276 | static uintptr_t tspd_smc_handler(uint32_t smc_fid, |
| 277 | u_register_t x1, |
| 278 | u_register_t x2, |
| 279 | u_register_t x3, |
| 280 | u_register_t x4, |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 281 | void *cookie, |
| 282 | void *handle, |
Masahiro Yamada | 5ac9d96 | 2018-04-19 01:18:48 +0900 | [diff] [blame] | 283 | u_register_t flags) |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 284 | { |
Dan Handley | e2712bc | 2014-04-10 15:37:22 +0100 | [diff] [blame] | 285 | cpu_context_t *ns_cpu_context; |
Soby Mathew | da43b66 | 2015-07-08 21:45:46 +0100 | [diff] [blame] | 286 | uint32_t linear_id = plat_my_core_pos(), ns; |
Dan Handley | e2712bc | 2014-04-10 15:37:22 +0100 | [diff] [blame] | 287 | tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; |
Vikram Kanigiri | 4e81341 | 2014-07-15 16:49:22 +0100 | [diff] [blame] | 288 | uint64_t rc; |
| 289 | #if TSP_INIT_ASYNC |
| 290 | entry_point_info_t *next_image_info; |
| 291 | #endif |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 292 | |
| 293 | /* Determine which security state this SMC originated from */ |
| 294 | ns = is_caller_non_secure(flags); |
| 295 | |
| 296 | switch (smc_fid) { |
| 297 | |
| 298 | /* |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 299 | * This function ID is used by TSP to indicate that it was |
| 300 | * preempted by a normal world IRQ. |
| 301 | * |
| 302 | */ |
| 303 | case TSP_PREEMPTED: |
| 304 | if (ns) |
| 305 | SMC_RET1(handle, SMC_UNK); |
| 306 | |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 307 | return tspd_handle_sp_preemption(handle); |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 308 | |
| 309 | /* |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 310 | * This function ID is used only by the TSP to indicate that it has |
Soby Mathew | 7866424 | 2015-11-13 02:08:43 +0000 | [diff] [blame] | 311 | * finished handling a S-EL1 interrupt or was preempted by a higher |
| 312 | * priority pending EL3 interrupt. Execution should resume |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 313 | * in the normal world. |
| 314 | */ |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 315 | case TSP_HANDLED_S_EL1_INTR: |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 316 | if (ns) |
| 317 | SMC_RET1(handle, SMC_UNK); |
| 318 | |
Andrew Thoelke | a2f6553 | 2014-05-14 17:09:32 +0100 | [diff] [blame] | 319 | assert(handle == cm_get_context(SECURE)); |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 320 | |
| 321 | /* |
| 322 | * Restore the relevant EL3 state which saved to service |
| 323 | * this SMC. |
| 324 | */ |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 325 | if (get_yield_smc_active_flag(tsp_ctx->state)) { |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 326 | SMC_SET_EL3(&tsp_ctx->cpu_ctx, |
| 327 | CTX_SPSR_EL3, |
| 328 | tsp_ctx->saved_spsr_el3); |
| 329 | SMC_SET_EL3(&tsp_ctx->cpu_ctx, |
| 330 | CTX_ELR_EL3, |
| 331 | tsp_ctx->saved_elr_el3); |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 332 | #if TSP_NS_INTR_ASYNC_PREEMPT |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 333 | /* |
| 334 | * Need to restore the previously interrupted |
| 335 | * secure context. |
| 336 | */ |
| 337 | memcpy(&tsp_ctx->cpu_ctx, &tsp_ctx->sp_ctx, |
| 338 | TSPD_SP_CTX_SIZE); |
| 339 | #endif |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 340 | } |
| 341 | |
| 342 | /* Get a reference to the non-secure context */ |
Andrew Thoelke | a2f6553 | 2014-05-14 17:09:32 +0100 | [diff] [blame] | 343 | ns_cpu_context = cm_get_context(NON_SECURE); |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 344 | assert(ns_cpu_context); |
| 345 | |
| 346 | /* |
| 347 | * Restore non-secure state. There is no need to save the |
| 348 | * secure system register context since the TSP was supposed |
| 349 | * to preserve it during S-EL1 interrupt handling. |
| 350 | */ |
| 351 | cm_el1_sysregs_context_restore(NON_SECURE); |
| 352 | cm_set_next_eret_context(NON_SECURE); |
| 353 | |
| 354 | SMC_RET0((uint64_t) ns_cpu_context); |
| 355 | |
Achin Gupta | aeaab68 | 2014-05-09 13:21:31 +0100 | [diff] [blame] | 356 | /* |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 357 | * This function ID is used only by the SP to indicate it has |
| 358 | * finished initialising itself after a cold boot |
| 359 | */ |
| 360 | case TSP_ENTRY_DONE: |
| 361 | if (ns) |
| 362 | SMC_RET1(handle, SMC_UNK); |
| 363 | |
| 364 | /* |
| 365 | * Stash the SP entry points information. This is done |
| 366 | * only once on the primary cpu |
| 367 | */ |
Andrew Thoelke | 891c4ca | 2014-05-20 21:43:27 +0100 | [diff] [blame] | 368 | assert(tsp_vectors == NULL); |
| 369 | tsp_vectors = (tsp_vectors_t *) x1; |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 370 | |
Vikram Kanigiri | 4e81341 | 2014-07-15 16:49:22 +0100 | [diff] [blame] | 371 | if (tsp_vectors) { |
| 372 | set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON); |
| 373 | |
| 374 | /* |
| 375 | * TSP has been successfully initialized. Register power |
| 376 | * managemnt hooks with PSCI |
| 377 | */ |
| 378 | psci_register_spd_pm_hook(&tspd_pm); |
| 379 | |
| 380 | /* |
| 381 | * Register an interrupt handler for S-EL1 interrupts |
| 382 | * when generated during code executing in the |
| 383 | * non-secure state. |
| 384 | */ |
| 385 | flags = 0; |
| 386 | set_interrupt_rm_flag(flags, NON_SECURE); |
| 387 | rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, |
| 388 | tspd_sel1_interrupt_handler, |
| 389 | flags); |
| 390 | if (rc) |
| 391 | panic(); |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 392 | |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 393 | #if TSP_NS_INTR_ASYNC_PREEMPT |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 394 | /* |
| 395 | * Register an interrupt handler for NS interrupts when |
| 396 | * generated during code executing in secure state are |
| 397 | * routed to EL3. |
| 398 | */ |
| 399 | flags = 0; |
| 400 | set_interrupt_rm_flag(flags, SECURE); |
| 401 | |
| 402 | rc = register_interrupt_type_handler(INTR_TYPE_NS, |
| 403 | tspd_ns_interrupt_handler, |
| 404 | flags); |
| 405 | if (rc) |
| 406 | panic(); |
| 407 | |
| 408 | /* |
Soby Mathew | bc91282 | 2015-09-22 12:01:18 +0100 | [diff] [blame] | 409 | * Disable the NS interrupt locally. |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 410 | */ |
| 411 | disable_intr_rm_local(INTR_TYPE_NS, SECURE); |
| 412 | #endif |
Vikram Kanigiri | 4e81341 | 2014-07-15 16:49:22 +0100 | [diff] [blame] | 413 | } |
| 414 | |
| 415 | |
| 416 | #if TSP_INIT_ASYNC |
| 417 | /* Save the Secure EL1 system register context */ |
| 418 | assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx); |
| 419 | cm_el1_sysregs_context_save(SECURE); |
| 420 | |
| 421 | /* Program EL3 registers to enable entry into the next EL */ |
| 422 | next_image_info = bl31_plat_get_next_image_ep_info(NON_SECURE); |
| 423 | assert(next_image_info); |
| 424 | assert(NON_SECURE == |
| 425 | GET_SECURITY_STATE(next_image_info->h.attr)); |
| 426 | |
Soby Mathew | da43b66 | 2015-07-08 21:45:46 +0100 | [diff] [blame] | 427 | cm_init_my_context(next_image_info); |
Vikram Kanigiri | 4e81341 | 2014-07-15 16:49:22 +0100 | [diff] [blame] | 428 | cm_prepare_el3_exit(NON_SECURE); |
| 429 | SMC_RET0(cm_get_context(NON_SECURE)); |
| 430 | #else |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 431 | /* |
| 432 | * SP reports completion. The SPD must have initiated |
| 433 | * the original request through a synchronous entry |
| 434 | * into the SP. Jump back to the original C runtime |
| 435 | * context. |
| 436 | */ |
Achin Gupta | 916a2c1 | 2014-02-09 23:11:46 +0000 | [diff] [blame] | 437 | tspd_synchronous_sp_exit(tsp_ctx, x1); |
Jonathan Wright | 75a5d8b | 2018-03-14 15:56:21 +0000 | [diff] [blame] | 438 | break; |
Vikram Kanigiri | 4e81341 | 2014-07-15 16:49:22 +0100 | [diff] [blame] | 439 | #endif |
Douglas Raillard | f212965 | 2016-11-24 15:43:19 +0000 | [diff] [blame] | 440 | /* |
| 441 | * This function ID is used only by the SP to indicate it has finished |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 442 | * aborting a preempted Yielding SMC Call. |
Douglas Raillard | f212965 | 2016-11-24 15:43:19 +0000 | [diff] [blame] | 443 | */ |
| 444 | case TSP_ABORT_DONE: |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 445 | |
Achin Gupta | 607084e | 2014-02-09 18:24:19 +0000 | [diff] [blame] | 446 | /* |
Sandrine Bailleux | f4119ec | 2015-12-17 13:58:58 +0000 | [diff] [blame] | 447 | * These function IDs are used only by the SP to indicate it has |
Achin Gupta | 607084e | 2014-02-09 18:24:19 +0000 | [diff] [blame] | 448 | * finished: |
| 449 | * 1. turning itself on in response to an earlier psci |
| 450 | * cpu_on request |
| 451 | * 2. resuming itself after an earlier psci cpu_suspend |
| 452 | * request. |
| 453 | */ |
| 454 | case TSP_ON_DONE: |
| 455 | case TSP_RESUME_DONE: |
| 456 | |
| 457 | /* |
Sandrine Bailleux | f4119ec | 2015-12-17 13:58:58 +0000 | [diff] [blame] | 458 | * These function IDs are used only by the SP to indicate it has |
Achin Gupta | 607084e | 2014-02-09 18:24:19 +0000 | [diff] [blame] | 459 | * finished: |
| 460 | * 1. suspending itself after an earlier psci cpu_suspend |
| 461 | * request. |
| 462 | * 2. turning itself off in response to an earlier psci |
| 463 | * cpu_off request. |
| 464 | */ |
| 465 | case TSP_OFF_DONE: |
| 466 | case TSP_SUSPEND_DONE: |
Juan Castillo | 4dc4a47 | 2014-08-12 11:17:06 +0100 | [diff] [blame] | 467 | case TSP_SYSTEM_OFF_DONE: |
| 468 | case TSP_SYSTEM_RESET_DONE: |
Achin Gupta | 607084e | 2014-02-09 18:24:19 +0000 | [diff] [blame] | 469 | if (ns) |
| 470 | SMC_RET1(handle, SMC_UNK); |
| 471 | |
| 472 | /* |
| 473 | * SP reports completion. The SPD must have initiated the |
| 474 | * original request through a synchronous entry into the SP. |
| 475 | * Jump back to the original C runtime context, and pass x1 as |
| 476 | * return value to the caller |
| 477 | */ |
Achin Gupta | 916a2c1 | 2014-02-09 23:11:46 +0000 | [diff] [blame] | 478 | tspd_synchronous_sp_exit(tsp_ctx, x1); |
Jonathan Wright | 75a5d8b | 2018-03-14 15:56:21 +0000 | [diff] [blame] | 479 | break; |
Achin Gupta | 607084e | 2014-02-09 18:24:19 +0000 | [diff] [blame] | 480 | |
Achin Gupta | 916a2c1 | 2014-02-09 23:11:46 +0000 | [diff] [blame] | 481 | /* |
| 482 | * Request from non-secure client to perform an |
| 483 | * arithmetic operation or response from secure |
| 484 | * payload to an earlier request. |
| 485 | */ |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 486 | case TSP_FAST_FID(TSP_ADD): |
| 487 | case TSP_FAST_FID(TSP_SUB): |
| 488 | case TSP_FAST_FID(TSP_MUL): |
| 489 | case TSP_FAST_FID(TSP_DIV): |
| 490 | |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 491 | case TSP_YIELD_FID(TSP_ADD): |
| 492 | case TSP_YIELD_FID(TSP_SUB): |
| 493 | case TSP_YIELD_FID(TSP_MUL): |
| 494 | case TSP_YIELD_FID(TSP_DIV): |
Achin Gupta | 916a2c1 | 2014-02-09 23:11:46 +0000 | [diff] [blame] | 495 | if (ns) { |
| 496 | /* |
| 497 | * This is a fresh request from the non-secure client. |
| 498 | * The parameters are in x1 and x2. Figure out which |
| 499 | * registers need to be preserved, save the non-secure |
| 500 | * state and send the request to the secure payload. |
| 501 | */ |
Andrew Thoelke | a2f6553 | 2014-05-14 17:09:32 +0100 | [diff] [blame] | 502 | assert(handle == cm_get_context(NON_SECURE)); |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 503 | |
| 504 | /* Check if we are already preempted */ |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 505 | if (get_yield_smc_active_flag(tsp_ctx->state)) |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 506 | SMC_RET1(handle, SMC_UNK); |
| 507 | |
Achin Gupta | 916a2c1 | 2014-02-09 23:11:46 +0000 | [diff] [blame] | 508 | cm_el1_sysregs_context_save(NON_SECURE); |
| 509 | |
| 510 | /* Save x1 and x2 for use by TSP_GET_ARGS call below */ |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 511 | store_tsp_args(tsp_ctx, x1, x2); |
Achin Gupta | 916a2c1 | 2014-02-09 23:11:46 +0000 | [diff] [blame] | 512 | |
| 513 | /* |
| 514 | * We are done stashing the non-secure context. Ask the |
| 515 | * secure payload to do the work now. |
| 516 | */ |
| 517 | |
| 518 | /* |
| 519 | * Verify if there is a valid context to use, copy the |
| 520 | * operation type and parameters to the secure context |
| 521 | * and jump to the fast smc entry point in the secure |
| 522 | * payload. Entry into S-EL1 will take place upon exit |
| 523 | * from this function. |
| 524 | */ |
Andrew Thoelke | a2f6553 | 2014-05-14 17:09:32 +0100 | [diff] [blame] | 525 | assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE)); |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 526 | |
| 527 | /* Set appropriate entry for SMC. |
| 528 | * We expect the TSP to manage the PSTATE.I and PSTATE.F |
| 529 | * flags as appropriate. |
| 530 | */ |
| 531 | if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) { |
| 532 | cm_set_elr_el3(SECURE, (uint64_t) |
Andrew Thoelke | 891c4ca | 2014-05-20 21:43:27 +0100 | [diff] [blame] | 533 | &tsp_vectors->fast_smc_entry); |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 534 | } else { |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 535 | set_yield_smc_active_flag(tsp_ctx->state); |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 536 | cm_set_elr_el3(SECURE, (uint64_t) |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 537 | &tsp_vectors->yield_smc_entry); |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 538 | #if TSP_NS_INTR_ASYNC_PREEMPT |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 539 | /* |
| 540 | * Enable the routing of NS interrupts to EL3 |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 541 | * during processing of a Yielding SMC Call on |
| 542 | * this core. |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 543 | */ |
| 544 | enable_intr_rm_local(INTR_TYPE_NS, SECURE); |
| 545 | #endif |
Jeenu Viswambharan | 339580c | 2018-01-10 15:22:49 +0000 | [diff] [blame] | 546 | |
| 547 | #if EL3_EXCEPTION_HANDLING |
| 548 | /* |
| 549 | * With EL3 exception handling, while an SMC is |
| 550 | * being processed, Non-secure interrupts can't |
| 551 | * preempt Secure execution. However, for |
| 552 | * yielding SMCs, we want preemption to happen; |
| 553 | * so explicitly allow NS preemption in this |
Jeenu Viswambharan | abf5b06 | 2018-01-22 12:42:54 +0000 | [diff] [blame] | 554 | * case, and supply the preemption return code |
| 555 | * for TSP. |
Jeenu Viswambharan | 339580c | 2018-01-10 15:22:49 +0000 | [diff] [blame] | 556 | */ |
Jeenu Viswambharan | abf5b06 | 2018-01-22 12:42:54 +0000 | [diff] [blame] | 557 | ehf_allow_ns_preemption(TSP_PREEMPTED); |
Jeenu Viswambharan | 339580c | 2018-01-10 15:22:49 +0000 | [diff] [blame] | 558 | #endif |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 559 | } |
| 560 | |
Achin Gupta | 916a2c1 | 2014-02-09 23:11:46 +0000 | [diff] [blame] | 561 | cm_el1_sysregs_context_restore(SECURE); |
| 562 | cm_set_next_eret_context(SECURE); |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 563 | SMC_RET3(&tsp_ctx->cpu_ctx, smc_fid, x1, x2); |
Achin Gupta | 916a2c1 | 2014-02-09 23:11:46 +0000 | [diff] [blame] | 564 | } else { |
| 565 | /* |
| 566 | * This is the result from the secure client of an |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 567 | * earlier request. The results are in x1-x3. Copy it |
Achin Gupta | 916a2c1 | 2014-02-09 23:11:46 +0000 | [diff] [blame] | 568 | * into the non-secure context, save the secure state |
| 569 | * and return to the non-secure state. |
| 570 | */ |
Andrew Thoelke | a2f6553 | 2014-05-14 17:09:32 +0100 | [diff] [blame] | 571 | assert(handle == cm_get_context(SECURE)); |
Achin Gupta | 916a2c1 | 2014-02-09 23:11:46 +0000 | [diff] [blame] | 572 | cm_el1_sysregs_context_save(SECURE); |
| 573 | |
| 574 | /* Get a reference to the non-secure context */ |
Andrew Thoelke | a2f6553 | 2014-05-14 17:09:32 +0100 | [diff] [blame] | 575 | ns_cpu_context = cm_get_context(NON_SECURE); |
Achin Gupta | 916a2c1 | 2014-02-09 23:11:46 +0000 | [diff] [blame] | 576 | assert(ns_cpu_context); |
Achin Gupta | 916a2c1 | 2014-02-09 23:11:46 +0000 | [diff] [blame] | 577 | |
| 578 | /* Restore non-secure state */ |
| 579 | cm_el1_sysregs_context_restore(NON_SECURE); |
| 580 | cm_set_next_eret_context(NON_SECURE); |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 581 | if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_YIELD) { |
| 582 | clr_yield_smc_active_flag(tsp_ctx->state); |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 583 | #if TSP_NS_INTR_ASYNC_PREEMPT |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 584 | /* |
| 585 | * Disable the routing of NS interrupts to EL3 |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 586 | * after processing of a Yielding SMC Call on |
| 587 | * this core is finished. |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 588 | */ |
| 589 | disable_intr_rm_local(INTR_TYPE_NS, SECURE); |
| 590 | #endif |
| 591 | } |
| 592 | |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 593 | SMC_RET3(ns_cpu_context, x1, x2, x3); |
Achin Gupta | 916a2c1 | 2014-02-09 23:11:46 +0000 | [diff] [blame] | 594 | } |
| 595 | |
Douglas Raillard | f212965 | 2016-11-24 15:43:19 +0000 | [diff] [blame] | 596 | /* |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 597 | * Request from the non-secure world to abort a preempted Yielding SMC |
| 598 | * Call. |
Douglas Raillard | f212965 | 2016-11-24 15:43:19 +0000 | [diff] [blame] | 599 | */ |
| 600 | case TSP_FID_ABORT: |
| 601 | /* ABORT should only be invoked by normal world */ |
| 602 | if (!ns) { |
| 603 | assert(0); |
| 604 | break; |
| 605 | } |
| 606 | |
Douglas Raillard | bcc3dd3 | 2017-02-03 18:01:51 +0000 | [diff] [blame] | 607 | assert(handle == cm_get_context(NON_SECURE)); |
| 608 | cm_el1_sysregs_context_save(NON_SECURE); |
| 609 | |
Douglas Raillard | f212965 | 2016-11-24 15:43:19 +0000 | [diff] [blame] | 610 | /* Abort the preempted SMC request */ |
Douglas Raillard | bcc3dd3 | 2017-02-03 18:01:51 +0000 | [diff] [blame] | 611 | if (!tspd_abort_preempted_smc(tsp_ctx)) { |
Douglas Raillard | f212965 | 2016-11-24 15:43:19 +0000 | [diff] [blame] | 612 | /* |
| 613 | * If there was no preempted SMC to abort, return |
| 614 | * SMC_UNK. |
Douglas Raillard | bcc3dd3 | 2017-02-03 18:01:51 +0000 | [diff] [blame] | 615 | * |
| 616 | * Restoring the NON_SECURE context is not necessary as |
| 617 | * the synchronous entry did not take place if the |
| 618 | * return code of tspd_abort_preempted_smc is zero. |
Douglas Raillard | f212965 | 2016-11-24 15:43:19 +0000 | [diff] [blame] | 619 | */ |
Douglas Raillard | bcc3dd3 | 2017-02-03 18:01:51 +0000 | [diff] [blame] | 620 | cm_set_next_eret_context(NON_SECURE); |
| 621 | break; |
| 622 | } |
Douglas Raillard | f212965 | 2016-11-24 15:43:19 +0000 | [diff] [blame] | 623 | |
Douglas Raillard | bcc3dd3 | 2017-02-03 18:01:51 +0000 | [diff] [blame] | 624 | cm_el1_sysregs_context_restore(NON_SECURE); |
| 625 | cm_set_next_eret_context(NON_SECURE); |
Antonio Nino Diaz | acb2914 | 2017-04-04 17:08:32 +0100 | [diff] [blame] | 626 | SMC_RET1(handle, SMC_OK); |
Achin Gupta | 916a2c1 | 2014-02-09 23:11:46 +0000 | [diff] [blame] | 627 | |
| 628 | /* |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 629 | * Request from non secure world to resume the preempted |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 630 | * Yielding SMC Call. |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 631 | */ |
| 632 | case TSP_FID_RESUME: |
Soby Mathew | 3d57851 | 2014-05-27 10:20:01 +0100 | [diff] [blame] | 633 | /* RESUME should be invoked only by normal world */ |
| 634 | if (!ns) { |
| 635 | assert(0); |
| 636 | break; |
| 637 | } |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 638 | |
Soby Mathew | 3d57851 | 2014-05-27 10:20:01 +0100 | [diff] [blame] | 639 | /* |
| 640 | * This is a resume request from the non-secure client. |
| 641 | * save the non-secure state and send the request to |
| 642 | * the secure payload. |
| 643 | */ |
Andrew Thoelke | a2f6553 | 2014-05-14 17:09:32 +0100 | [diff] [blame] | 644 | assert(handle == cm_get_context(NON_SECURE)); |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 645 | |
Soby Mathew | 3d57851 | 2014-05-27 10:20:01 +0100 | [diff] [blame] | 646 | /* Check if we are already preempted before resume */ |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 647 | if (!get_yield_smc_active_flag(tsp_ctx->state)) |
Soby Mathew | 3d57851 | 2014-05-27 10:20:01 +0100 | [diff] [blame] | 648 | SMC_RET1(handle, SMC_UNK); |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 649 | |
Soby Mathew | 3d57851 | 2014-05-27 10:20:01 +0100 | [diff] [blame] | 650 | cm_el1_sysregs_context_save(NON_SECURE); |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 651 | |
Soby Mathew | 3d57851 | 2014-05-27 10:20:01 +0100 | [diff] [blame] | 652 | /* |
| 653 | * We are done stashing the non-secure context. Ask the |
| 654 | * secure payload to do the work now. |
| 655 | */ |
Soby Mathew | bec9851 | 2015-09-03 18:29:38 +0100 | [diff] [blame] | 656 | #if TSP_NS_INTR_ASYNC_PREEMPT |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 657 | /* |
| 658 | * Enable the routing of NS interrupts to EL3 during resumption |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 659 | * of a Yielding SMC Call on this core. |
Soby Mathew | 47903c0 | 2015-01-13 15:48:26 +0000 | [diff] [blame] | 660 | */ |
| 661 | enable_intr_rm_local(INTR_TYPE_NS, SECURE); |
| 662 | #endif |
| 663 | |
Jeenu Viswambharan | 339580c | 2018-01-10 15:22:49 +0000 | [diff] [blame] | 664 | #if EL3_EXCEPTION_HANDLING |
| 665 | /* |
| 666 | * Allow the resumed yielding SMC processing to be preempted by |
Jeenu Viswambharan | abf5b06 | 2018-01-22 12:42:54 +0000 | [diff] [blame] | 667 | * Non-secure interrupts. Also, supply the preemption return |
| 668 | * code for TSP. |
Jeenu Viswambharan | 339580c | 2018-01-10 15:22:49 +0000 | [diff] [blame] | 669 | */ |
Jeenu Viswambharan | abf5b06 | 2018-01-22 12:42:54 +0000 | [diff] [blame] | 670 | ehf_allow_ns_preemption(TSP_PREEMPTED); |
Jeenu Viswambharan | 339580c | 2018-01-10 15:22:49 +0000 | [diff] [blame] | 671 | #endif |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 672 | |
Soby Mathew | 3d57851 | 2014-05-27 10:20:01 +0100 | [diff] [blame] | 673 | /* We just need to return to the preempted point in |
| 674 | * TSP and the execution will resume as normal. |
| 675 | */ |
| 676 | cm_el1_sysregs_context_restore(SECURE); |
| 677 | cm_set_next_eret_context(SECURE); |
| 678 | SMC_RET0(&tsp_ctx->cpu_ctx); |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 679 | |
| 680 | /* |
Achin Gupta | 916a2c1 | 2014-02-09 23:11:46 +0000 | [diff] [blame] | 681 | * This is a request from the secure payload for more arguments |
| 682 | * for an ongoing arithmetic operation requested by the |
| 683 | * non-secure world. Simply return the arguments from the non- |
| 684 | * secure client in the original call. |
| 685 | */ |
| 686 | case TSP_GET_ARGS: |
| 687 | if (ns) |
| 688 | SMC_RET1(handle, SMC_UNK); |
| 689 | |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 690 | get_tsp_args(tsp_ctx, x1, x2); |
| 691 | SMC_RET2(handle, x1, x2); |
Achin Gupta | 916a2c1 | 2014-02-09 23:11:46 +0000 | [diff] [blame] | 692 | |
Jeenu Viswambharan | df1ddb5 | 2014-02-28 11:23:35 +0000 | [diff] [blame] | 693 | case TOS_CALL_COUNT: |
| 694 | /* |
| 695 | * Return the number of service function IDs implemented to |
| 696 | * provide service to non-secure |
| 697 | */ |
| 698 | SMC_RET1(handle, TSP_NUM_FID); |
| 699 | |
| 700 | case TOS_UID: |
| 701 | /* Return TSP UID to the caller */ |
| 702 | SMC_UUID_RET(handle, tsp_uuid); |
| 703 | |
| 704 | case TOS_CALL_VERSION: |
| 705 | /* Return the version of current implementation */ |
| 706 | SMC_RET2(handle, TSP_VERSION_MAJOR, TSP_VERSION_MINOR); |
| 707 | |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 708 | default: |
Achin Gupta | 607084e | 2014-02-09 18:24:19 +0000 | [diff] [blame] | 709 | break; |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 710 | } |
| 711 | |
Achin Gupta | 607084e | 2014-02-09 18:24:19 +0000 | [diff] [blame] | 712 | SMC_RET1(handle, SMC_UNK); |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 713 | } |
| 714 | |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 715 | /* Define a SPD runtime service descriptor for fast SMC calls */ |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 716 | DECLARE_RT_SVC( |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 717 | tspd_fast, |
Achin Gupta | 375f538 | 2014-02-18 18:12:48 +0000 | [diff] [blame] | 718 | |
| 719 | OEN_TOS_START, |
| 720 | OEN_TOS_END, |
| 721 | SMC_TYPE_FAST, |
| 722 | tspd_setup, |
| 723 | tspd_smc_handler |
| 724 | ); |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 725 | |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 726 | /* Define a SPD runtime service descriptor for Yielding SMC Calls */ |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 727 | DECLARE_RT_SVC( |
| 728 | tspd_std, |
| 729 | |
| 730 | OEN_TOS_START, |
| 731 | OEN_TOS_END, |
David Cunado | 28f69ab | 2017-04-05 11:34:03 +0100 | [diff] [blame] | 732 | SMC_TYPE_YIELD, |
Soby Mathew | 9f71f70 | 2014-05-09 20:49:17 +0100 | [diff] [blame] | 733 | NULL, |
| 734 | tspd_smc_handler |
| 735 | ); |