Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 1 | /* |
Varun Wadekar | 079e20e | 2018-08-10 09:55:25 -0700 | [diff] [blame] | 2 | * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved. |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 3 | * |
dp-arm | fa3cf0b | 2017-05-03 09:38:09 +0100 | [diff] [blame] | 4 | * SPDX-License-Identifier: BSD-3-Clause |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | /******************************************************************************* |
| 8 | * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a |
| 9 | * plug-in component to the Secure Monitor, registered as a runtime service. The |
| 10 | * SPD is expected to be a functional extension of the Secure Payload (SP) that |
| 11 | * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting |
| 12 | * the Trusted OS/Applications range to the dispatcher. The SPD will either |
| 13 | * handle the request locally or delegate it to the Secure Payload. It is also |
| 14 | * responsible for initialising and maintaining communication with the SP. |
| 15 | ******************************************************************************/ |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 16 | #include <assert.h> |
Varun Wadekar | 079e20e | 2018-08-10 09:55:25 -0700 | [diff] [blame] | 17 | #include <bl31/interrupt_mgmt.h> |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 18 | #include <errno.h> |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 19 | #include <stddef.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 20 | |
Antonio Nino Diaz | 31a157f | 2019-02-11 11:57:57 +0000 | [diff] [blame] | 21 | #include <arch_helpers.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 22 | #include <bl31/bl31.h> |
Antonio Nino Diaz | 31a157f | 2019-02-11 11:57:57 +0000 | [diff] [blame] | 23 | #include <bl32/payloads/tlk.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 24 | #include <common/bl_common.h> |
| 25 | #include <common/debug.h> |
| 26 | #include <common/runtime_svc.h> |
| 27 | #include <lib/el3_runtime/context_mgmt.h> |
| 28 | #include <plat/common/platform.h> |
| 29 | #include <tools_share/uuid.h> |
| 30 | |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 31 | #include "tlkd_private.h" |
| 32 | |
| 33 | extern const spd_pm_ops_t tlkd_pm_ops; |
| 34 | |
| 35 | /******************************************************************************* |
Varun Wadekar | a70dec3 | 2015-08-26 12:49:03 +0530 | [diff] [blame] | 36 | * Per-cpu Secure Payload state |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 37 | ******************************************************************************/ |
Varun Wadekar | a70dec3 | 2015-08-26 12:49:03 +0530 | [diff] [blame] | 38 | tlk_context_t tlk_ctx; |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 39 | |
Varun Wadekar | 0bbe8a6 | 2016-06-07 21:21:59 -0700 | [diff] [blame] | 40 | /******************************************************************************* |
| 41 | * CPU number on which TLK booted up |
| 42 | ******************************************************************************/ |
Varun Wadekar | 66231d1 | 2017-06-07 09:57:42 -0700 | [diff] [blame] | 43 | static uint32_t boot_cpu; |
Varun Wadekar | 0bbe8a6 | 2016-06-07 21:21:59 -0700 | [diff] [blame] | 44 | |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 45 | /* TLK UID: RFC-4122 compliant UUID (version-5, sha-1) */ |
Roberto Vargas | eace8f1 | 2018-04-26 13:36:53 +0100 | [diff] [blame] | 46 | DEFINE_SVC_UUID2(tlk_uuid, |
| 47 | 0xc9e911bd, 0xba2b, 0xee52, 0xb1, 0x72, |
| 48 | 0x46, 0x1f, 0xba, 0x97, 0x7f, 0x63); |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 49 | |
Masahiro Yamada | 5621275 | 2018-04-19 01:14:42 +0900 | [diff] [blame] | 50 | static int32_t tlkd_init(void); |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 51 | |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 52 | /******************************************************************************* |
Varun Wadekar | 079e20e | 2018-08-10 09:55:25 -0700 | [diff] [blame] | 53 | * Secure Payload Dispatcher's timer interrupt handler |
| 54 | ******************************************************************************/ |
| 55 | static uint64_t tlkd_interrupt_handler(uint32_t id, |
| 56 | uint32_t flags, |
| 57 | void *handle, |
| 58 | void *cookie) |
| 59 | { |
| 60 | cpu_context_t *s_cpu_context; |
| 61 | int irq = plat_ic_get_pending_interrupt_id(); |
| 62 | |
| 63 | /* acknowledge the interrupt and mark it complete */ |
| 64 | (void)plat_ic_acknowledge_interrupt(); |
| 65 | plat_ic_end_of_interrupt(irq); |
| 66 | |
| 67 | /* |
| 68 | * Disable the routing of NS interrupts from secure world to |
| 69 | * EL3 while interrupted on this core. |
| 70 | */ |
| 71 | disable_intr_rm_local(INTR_TYPE_S_EL1, SECURE); |
| 72 | |
| 73 | /* Check the security state when the exception was generated */ |
| 74 | assert(get_interrupt_src_ss(flags) == NON_SECURE); |
| 75 | assert(handle == cm_get_context(NON_SECURE)); |
| 76 | |
| 77 | /* Save non-secure state */ |
| 78 | cm_el1_sysregs_context_save(NON_SECURE); |
| 79 | |
| 80 | /* Get a reference to the secure context */ |
| 81 | s_cpu_context = cm_get_context(SECURE); |
| 82 | assert(s_cpu_context); |
| 83 | |
| 84 | /* |
| 85 | * Restore non-secure state. There is no need to save the |
| 86 | * secure system register context since the SP was supposed |
| 87 | * to preserve it during S-EL1 interrupt handling. |
| 88 | */ |
| 89 | cm_el1_sysregs_context_restore(SECURE); |
| 90 | cm_set_next_eret_context(SECURE); |
| 91 | |
| 92 | /* Provide the IRQ number to the SPD */ |
| 93 | SMC_RET4(s_cpu_context, (uint32_t)TLK_IRQ_FIRED, 0, (uint32_t)irq, 0); |
| 94 | } |
| 95 | |
| 96 | /******************************************************************************* |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 97 | * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type |
| 98 | * (aarch32/aarch64) if not already known and initialises the context for entry |
| 99 | * into the SP for its initialisation. |
| 100 | ******************************************************************************/ |
Masahiro Yamada | 5621275 | 2018-04-19 01:14:42 +0900 | [diff] [blame] | 101 | static int32_t tlkd_setup(void) |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 102 | { |
| 103 | entry_point_info_t *tlk_ep_info; |
Varun Wadekar | 079e20e | 2018-08-10 09:55:25 -0700 | [diff] [blame] | 104 | uint32_t flags; |
| 105 | int32_t ret; |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 106 | |
| 107 | /* |
| 108 | * Get information about the Secure Payload (BL32) image. Its |
| 109 | * absence is a critical failure. |
| 110 | */ |
| 111 | tlk_ep_info = bl31_plat_get_next_image_ep_info(SECURE); |
| 112 | if (!tlk_ep_info) { |
| 113 | WARN("No SP provided. Booting device without SP" |
| 114 | " initialization. SMC`s destined for SP" |
| 115 | " will return SMC_UNK\n"); |
| 116 | return 1; |
| 117 | } |
| 118 | |
| 119 | /* |
| 120 | * If there's no valid entry point for SP, we return a non-zero value |
| 121 | * signalling failure initializing the service. We bail out without |
| 122 | * registering any handlers |
| 123 | */ |
| 124 | if (!tlk_ep_info->pc) |
| 125 | return 1; |
| 126 | |
| 127 | /* |
| 128 | * Inspect the SP image's SPSR and determine it's execution state |
| 129 | * i.e whether AArch32 or AArch64. |
| 130 | */ |
| 131 | tlkd_init_tlk_ep_state(tlk_ep_info, |
| 132 | (tlk_ep_info->spsr >> MODE_RW_SHIFT) & MODE_RW_MASK, |
| 133 | tlk_ep_info->pc, |
| 134 | &tlk_ctx); |
| 135 | |
Varun Wadekar | 079e20e | 2018-08-10 09:55:25 -0700 | [diff] [blame] | 136 | /* get a list of all S-EL1 IRQs from the platform */ |
| 137 | |
| 138 | /* register interrupt handler */ |
| 139 | flags = 0; |
| 140 | set_interrupt_rm_flag(flags, NON_SECURE); |
| 141 | ret = register_interrupt_type_handler(INTR_TYPE_S_EL1, |
| 142 | tlkd_interrupt_handler, |
| 143 | flags); |
| 144 | if (ret != 0) { |
| 145 | ERROR("failed to register tlkd interrupt handler (%d)\n", ret); |
| 146 | } |
| 147 | |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 148 | /* |
| 149 | * All TLK SPD initialization done. Now register our init function |
| 150 | * with BL31 for deferred invocation |
| 151 | */ |
| 152 | bl31_register_bl32_init(&tlkd_init); |
| 153 | |
| 154 | return 0; |
| 155 | } |
| 156 | |
| 157 | /******************************************************************************* |
| 158 | * This function passes control to the Secure Payload image (BL32) for the first |
| 159 | * time on the primary cpu after a cold boot. It assumes that a valid secure |
| 160 | * context has already been created by tlkd_setup() which can be directly |
| 161 | * used. This function performs a synchronous entry into the Secure payload. |
| 162 | * The SP passes control back to this routine through a SMC. |
| 163 | ******************************************************************************/ |
Masahiro Yamada | 5621275 | 2018-04-19 01:14:42 +0900 | [diff] [blame] | 164 | static int32_t tlkd_init(void) |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 165 | { |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 166 | entry_point_info_t *tlk_entry_point; |
| 167 | |
| 168 | /* |
| 169 | * Get information about the Secure Payload (BL32) image. Its |
| 170 | * absence is a critical failure. |
| 171 | */ |
| 172 | tlk_entry_point = bl31_plat_get_next_image_ep_info(SECURE); |
| 173 | assert(tlk_entry_point); |
| 174 | |
Soby Mathew | da43b66 | 2015-07-08 21:45:46 +0100 | [diff] [blame] | 175 | cm_init_my_context(tlk_entry_point); |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 176 | |
| 177 | /* |
Varun Wadekar | 0bbe8a6 | 2016-06-07 21:21:59 -0700 | [diff] [blame] | 178 | * TLK runs only on a single CPU. Store the value of the boot |
| 179 | * CPU for sanity checking later. |
| 180 | */ |
| 181 | boot_cpu = plat_my_core_pos(); |
| 182 | |
| 183 | /* |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 184 | * Arrange for an entry into the test secure payload. |
| 185 | */ |
| 186 | return tlkd_synchronous_sp_entry(&tlk_ctx); |
| 187 | } |
| 188 | |
| 189 | /******************************************************************************* |
| 190 | * This function is responsible for handling all SMCs in the Trusted OS/App |
| 191 | * range from the non-secure state as defined in the SMC Calling Convention |
| 192 | * Document. It is also responsible for communicating with the Secure payload |
| 193 | * to delegate work and return results back to the non-secure state. Lastly it |
| 194 | * will also return any information that the secure payload needs to do the |
| 195 | * work assigned to it. |
| 196 | ******************************************************************************/ |
Masahiro Yamada | 5ac9d96 | 2018-04-19 01:18:48 +0900 | [diff] [blame] | 197 | static uintptr_t tlkd_smc_handler(uint32_t smc_fid, |
| 198 | u_register_t x1, |
| 199 | u_register_t x2, |
| 200 | u_register_t x3, |
| 201 | u_register_t x4, |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 202 | void *cookie, |
| 203 | void *handle, |
Masahiro Yamada | 5ac9d96 | 2018-04-19 01:18:48 +0900 | [diff] [blame] | 204 | u_register_t flags) |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 205 | { |
Varun Wadekar | a97535f | 2015-03-13 14:19:11 +0530 | [diff] [blame] | 206 | cpu_context_t *ns_cpu_context; |
Varun Wadekar | ebfeae9 | 2015-04-02 14:57:47 +0530 | [diff] [blame] | 207 | gp_regs_t *gp_regs; |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 208 | uint32_t ns; |
Varun Wadekar | ebfeae9 | 2015-04-02 14:57:47 +0530 | [diff] [blame] | 209 | uint64_t par; |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 210 | |
| 211 | /* Passing a NULL context is a critical programming error */ |
| 212 | assert(handle); |
| 213 | |
Varun Wadekar | 0bbe8a6 | 2016-06-07 21:21:59 -0700 | [diff] [blame] | 214 | /* These SMCs are only supported by a single CPU */ |
| 215 | if (boot_cpu != plat_my_core_pos()) |
Varun Wadekar | b539b6c | 2015-03-13 15:18:20 +0530 | [diff] [blame] | 216 | SMC_RET1(handle, SMC_UNK); |
| 217 | |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 218 | /* Determine which security state this SMC originated from */ |
| 219 | ns = is_caller_non_secure(flags); |
| 220 | |
| 221 | switch (smc_fid) { |
| 222 | |
| 223 | /* |
Varun Wadekar | 968c029 | 2015-03-13 15:10:54 +0530 | [diff] [blame] | 224 | * This function ID is used by SP to indicate that it was |
| 225 | * preempted by a non-secure world IRQ. |
| 226 | */ |
| 227 | case TLK_PREEMPTED: |
| 228 | |
| 229 | if (ns) |
| 230 | SMC_RET1(handle, SMC_UNK); |
| 231 | |
| 232 | assert(handle == cm_get_context(SECURE)); |
| 233 | cm_el1_sysregs_context_save(SECURE); |
| 234 | |
| 235 | /* Get a reference to the non-secure context */ |
| 236 | ns_cpu_context = cm_get_context(NON_SECURE); |
| 237 | assert(ns_cpu_context); |
| 238 | |
| 239 | /* |
| 240 | * Restore non-secure state. There is no need to save the |
| 241 | * secure system register context since the SP was supposed |
| 242 | * to preserve it during S-EL1 interrupt handling. |
| 243 | */ |
| 244 | cm_el1_sysregs_context_restore(NON_SECURE); |
| 245 | cm_set_next_eret_context(NON_SECURE); |
| 246 | |
Varun Wadekar | ebfeae9 | 2015-04-02 14:57:47 +0530 | [diff] [blame] | 247 | SMC_RET1(ns_cpu_context, x1); |
Varun Wadekar | 968c029 | 2015-03-13 15:10:54 +0530 | [diff] [blame] | 248 | |
| 249 | /* |
Varun Wadekar | a97535f | 2015-03-13 14:19:11 +0530 | [diff] [blame] | 250 | * This is a request from the non-secure context to: |
| 251 | * |
| 252 | * a. register shared memory with the SP for storing it's |
| 253 | * activity logs. |
| 254 | * b. register shared memory with the SP for passing args |
| 255 | * required for maintaining sessions with the Trusted |
| 256 | * Applications. |
Mihir Joshi | 518230c | 2018-01-22 14:02:16 -0800 | [diff] [blame] | 257 | * c. register shared persistent buffers for secure storage |
| 258 | * d. register NS DRAM ranges passed by Cboot |
| 259 | * e. register Root of Trust parameters from Cboot for Verified Boot |
| 260 | * f. open/close sessions |
| 261 | * g. issue commands to the Trusted Apps |
| 262 | * h. resume the preempted yielding SMC call. |
Varun Wadekar | a97535f | 2015-03-13 14:19:11 +0530 | [diff] [blame] | 263 | */ |
| 264 | case TLK_REGISTER_LOGBUF: |
| 265 | case TLK_REGISTER_REQBUF: |
Mihir Joshi | 518230c | 2018-01-22 14:02:16 -0800 | [diff] [blame] | 266 | case TLK_SS_REGISTER_HANDLER: |
| 267 | case TLK_REGISTER_NS_DRAM_RANGES: |
| 268 | case TLK_SET_ROOT_OF_TRUST: |
Varun Wadekar | b539b6c | 2015-03-13 15:18:20 +0530 | [diff] [blame] | 269 | case TLK_OPEN_TA_SESSION: |
| 270 | case TLK_CLOSE_TA_SESSION: |
| 271 | case TLK_TA_LAUNCH_OP: |
| 272 | case TLK_TA_SEND_EVENT: |
Varun Wadekar | 5e1fa05 | 2015-10-07 17:15:41 +0530 | [diff] [blame] | 273 | case TLK_RESUME_FID: |
Varun Wadekar | b539b6c | 2015-03-13 15:18:20 +0530 | [diff] [blame] | 274 | |
Varun Wadekar | ebfeae9 | 2015-04-02 14:57:47 +0530 | [diff] [blame] | 275 | if (!ns) |
Varun Wadekar | a97535f | 2015-03-13 14:19:11 +0530 | [diff] [blame] | 276 | SMC_RET1(handle, SMC_UNK); |
| 277 | |
| 278 | /* |
| 279 | * This is a fresh request from the non-secure client. |
| 280 | * The parameters are in x1 and x2. Figure out which |
| 281 | * registers need to be preserved, save the non-secure |
| 282 | * state and send the request to the secure payload. |
| 283 | */ |
| 284 | assert(handle == cm_get_context(NON_SECURE)); |
| 285 | |
Varun Wadekar | 5e1fa05 | 2015-10-07 17:15:41 +0530 | [diff] [blame] | 286 | /* |
David Cunado | c8833ea | 2017-04-16 17:15:08 +0100 | [diff] [blame] | 287 | * Check if we are already processing a yielding SMC |
Varun Wadekar | 5e1fa05 | 2015-10-07 17:15:41 +0530 | [diff] [blame] | 288 | * call. Of all the supported fids, only the "resume" |
| 289 | * fid expects the flag to be set. |
| 290 | */ |
| 291 | if (smc_fid == TLK_RESUME_FID) { |
David Cunado | c8833ea | 2017-04-16 17:15:08 +0100 | [diff] [blame] | 292 | if (!get_yield_smc_active_flag(tlk_ctx.state)) |
Varun Wadekar | 5e1fa05 | 2015-10-07 17:15:41 +0530 | [diff] [blame] | 293 | SMC_RET1(handle, SMC_UNK); |
| 294 | } else { |
David Cunado | c8833ea | 2017-04-16 17:15:08 +0100 | [diff] [blame] | 295 | if (get_yield_smc_active_flag(tlk_ctx.state)) |
Varun Wadekar | 5e1fa05 | 2015-10-07 17:15:41 +0530 | [diff] [blame] | 296 | SMC_RET1(handle, SMC_UNK); |
| 297 | } |
Varun Wadekar | a97535f | 2015-03-13 14:19:11 +0530 | [diff] [blame] | 298 | |
| 299 | cm_el1_sysregs_context_save(NON_SECURE); |
| 300 | |
| 301 | /* |
| 302 | * Verify if there is a valid context to use. |
| 303 | */ |
| 304 | assert(&tlk_ctx.cpu_ctx == cm_get_context(SECURE)); |
| 305 | |
| 306 | /* |
| 307 | * Mark the SP state as active. |
| 308 | */ |
David Cunado | c8833ea | 2017-04-16 17:15:08 +0100 | [diff] [blame] | 309 | set_yield_smc_active_flag(tlk_ctx.state); |
Varun Wadekar | a97535f | 2015-03-13 14:19:11 +0530 | [diff] [blame] | 310 | |
Varun Wadekar | a97535f | 2015-03-13 14:19:11 +0530 | [diff] [blame] | 311 | /* |
| 312 | * We are done stashing the non-secure context. Ask the |
| 313 | * secure payload to do the work now. |
| 314 | */ |
| 315 | cm_el1_sysregs_context_restore(SECURE); |
| 316 | cm_set_next_eret_context(SECURE); |
Varun Wadekar | ebfeae9 | 2015-04-02 14:57:47 +0530 | [diff] [blame] | 317 | |
| 318 | /* |
| 319 | * TLK is a 32-bit Trusted OS and so expects the SMC |
| 320 | * arguments via r0-r7. TLK expects the monitor frame |
| 321 | * registers to be 64-bits long. Hence, we pass x0 in |
| 322 | * r0-r1, x1 in r2-r3, x3 in r4-r5 and x4 in r6-r7. |
| 323 | * |
| 324 | * As smc_fid is a uint32 value, r1 contains 0. |
| 325 | */ |
| 326 | gp_regs = get_gpregs_ctx(&tlk_ctx.cpu_ctx); |
| 327 | write_ctx_reg(gp_regs, CTX_GPREG_X4, (uint32_t)x2); |
| 328 | write_ctx_reg(gp_regs, CTX_GPREG_X5, (uint32_t)(x2 >> 32)); |
| 329 | write_ctx_reg(gp_regs, CTX_GPREG_X6, (uint32_t)x3); |
| 330 | write_ctx_reg(gp_regs, CTX_GPREG_X7, (uint32_t)(x3 >> 32)); |
| 331 | SMC_RET4(&tlk_ctx.cpu_ctx, smc_fid, 0, (uint32_t)x1, |
| 332 | (uint32_t)(x1 >> 32)); |
Varun Wadekar | a97535f | 2015-03-13 14:19:11 +0530 | [diff] [blame] | 333 | |
| 334 | /* |
Varun Wadekar | ebfeae9 | 2015-04-02 14:57:47 +0530 | [diff] [blame] | 335 | * Translate NS/EL1-S virtual addresses. |
| 336 | * |
| 337 | * x1 = virtual address |
| 338 | * x3 = type (NS/S) |
| 339 | * |
| 340 | * Returns PA:lo in r0, PA:hi in r1. |
Varun Wadekar | 97625e3 | 2015-03-13 14:59:03 +0530 | [diff] [blame] | 341 | */ |
| 342 | case TLK_VA_TRANSLATE: |
Varun Wadekar | ebfeae9 | 2015-04-02 14:57:47 +0530 | [diff] [blame] | 343 | |
| 344 | /* Should be invoked only by secure world */ |
| 345 | if (ns) |
Varun Wadekar | 97625e3 | 2015-03-13 14:59:03 +0530 | [diff] [blame] | 346 | SMC_RET1(handle, SMC_UNK); |
| 347 | |
Varun Wadekar | ebfeae9 | 2015-04-02 14:57:47 +0530 | [diff] [blame] | 348 | /* NS virtual addresses are 64-bit long */ |
| 349 | if (x3 & TLK_TRANSLATE_NS_VADDR) |
| 350 | x1 = (uint32_t)x1 | (x2 << 32); |
Varun Wadekar | 97625e3 | 2015-03-13 14:59:03 +0530 | [diff] [blame] | 351 | |
Varun Wadekar | ebfeae9 | 2015-04-02 14:57:47 +0530 | [diff] [blame] | 352 | if (!x1) |
| 353 | SMC_RET1(handle, SMC_UNK); |
Varun Wadekar | 97625e3 | 2015-03-13 14:59:03 +0530 | [diff] [blame] | 354 | |
Varun Wadekar | ebfeae9 | 2015-04-02 14:57:47 +0530 | [diff] [blame] | 355 | /* |
| 356 | * TODO: Sanity check x1. This would require platform |
| 357 | * support. |
| 358 | */ |
Varun Wadekar | 97625e3 | 2015-03-13 14:59:03 +0530 | [diff] [blame] | 359 | |
Varun Wadekar | ebfeae9 | 2015-04-02 14:57:47 +0530 | [diff] [blame] | 360 | /* virtual address and type: ns/s */ |
| 361 | par = tlkd_va_translate(x1, x3); |
| 362 | |
| 363 | /* return physical address in r0-r1 */ |
| 364 | SMC_RET4(handle, (uint32_t)par, (uint32_t)(par >> 32), 0, 0); |
Varun Wadekar | 97625e3 | 2015-03-13 14:59:03 +0530 | [diff] [blame] | 365 | |
| 366 | /* |
Varun Wadekar | a97535f | 2015-03-13 14:19:11 +0530 | [diff] [blame] | 367 | * This is a request from the SP to mark completion of |
David Cunado | c8833ea | 2017-04-16 17:15:08 +0100 | [diff] [blame] | 368 | * a yielding function ID. |
Varun Wadekar | a97535f | 2015-03-13 14:19:11 +0530 | [diff] [blame] | 369 | */ |
| 370 | case TLK_REQUEST_DONE: |
Varun Wadekar | ebfeae9 | 2015-04-02 14:57:47 +0530 | [diff] [blame] | 371 | if (ns) |
Varun Wadekar | a97535f | 2015-03-13 14:19:11 +0530 | [diff] [blame] | 372 | SMC_RET1(handle, SMC_UNK); |
| 373 | |
| 374 | /* |
| 375 | * Mark the SP state as inactive. |
| 376 | */ |
David Cunado | c8833ea | 2017-04-16 17:15:08 +0100 | [diff] [blame] | 377 | clr_yield_smc_active_flag(tlk_ctx.state); |
Varun Wadekar | a97535f | 2015-03-13 14:19:11 +0530 | [diff] [blame] | 378 | |
| 379 | /* Get a reference to the non-secure context */ |
| 380 | ns_cpu_context = cm_get_context(NON_SECURE); |
| 381 | assert(ns_cpu_context); |
| 382 | |
| 383 | /* |
| 384 | * This is a request completion SMC and we must switch to |
| 385 | * the non-secure world to pass the result. |
| 386 | */ |
| 387 | cm_el1_sysregs_context_save(SECURE); |
| 388 | |
| 389 | /* |
| 390 | * We are done stashing the secure context. Switch to the |
| 391 | * non-secure context and return the result. |
| 392 | */ |
| 393 | cm_el1_sysregs_context_restore(NON_SECURE); |
| 394 | cm_set_next_eret_context(NON_SECURE); |
Varun Wadekar | ebfeae9 | 2015-04-02 14:57:47 +0530 | [diff] [blame] | 395 | SMC_RET1(ns_cpu_context, x1); |
Varun Wadekar | a97535f | 2015-03-13 14:19:11 +0530 | [diff] [blame] | 396 | |
| 397 | /* |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 398 | * This function ID is used only by the SP to indicate it has |
| 399 | * finished initialising itself after a cold boot |
| 400 | */ |
| 401 | case TLK_ENTRY_DONE: |
Varun Wadekar | ebfeae9 | 2015-04-02 14:57:47 +0530 | [diff] [blame] | 402 | if (ns) |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 403 | SMC_RET1(handle, SMC_UNK); |
| 404 | |
| 405 | /* |
| 406 | * SP has been successfully initialized. Register power |
Paul Beesley | 1fbc97b | 2019-01-11 18:26:51 +0000 | [diff] [blame] | 407 | * management hooks with PSCI |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 408 | */ |
| 409 | psci_register_spd_pm_hook(&tlkd_pm_ops); |
| 410 | |
| 411 | /* |
| 412 | * TLK reports completion. The SPD must have initiated |
| 413 | * the original request through a synchronous entry |
| 414 | * into the SP. Jump back to the original C runtime |
| 415 | * context. |
| 416 | */ |
Varun Wadekar | ebfeae9 | 2015-04-02 14:57:47 +0530 | [diff] [blame] | 417 | tlkd_synchronous_sp_exit(&tlk_ctx, x1); |
Jonathan Wright | 75a5d8b | 2018-03-14 15:56:21 +0000 | [diff] [blame] | 418 | break; |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 419 | |
| 420 | /* |
Varun Wadekar | a70dec3 | 2015-08-26 12:49:03 +0530 | [diff] [blame] | 421 | * These function IDs are used only by TLK to indicate it has |
| 422 | * finished: |
| 423 | * 1. suspending itself after an earlier psci cpu_suspend |
| 424 | * request. |
| 425 | * 2. resuming itself after an earlier psci cpu_suspend |
| 426 | * request. |
| 427 | * 3. powering down after an earlier psci system_off/system_reset |
| 428 | * request. |
| 429 | */ |
| 430 | case TLK_SUSPEND_DONE: |
| 431 | case TLK_RESUME_DONE: |
| 432 | case TLK_SYSTEM_OFF_DONE: |
| 433 | |
| 434 | if (ns) |
| 435 | SMC_RET1(handle, SMC_UNK); |
| 436 | |
| 437 | /* |
| 438 | * TLK reports completion. TLKD must have initiated the |
| 439 | * original request through a synchronous entry into the SP. |
| 440 | * Jump back to the original C runtime context, and pass x1 as |
| 441 | * return value to the caller |
| 442 | */ |
| 443 | tlkd_synchronous_sp_exit(&tlk_ctx, x1); |
Jonathan Wright | 75a5d8b | 2018-03-14 15:56:21 +0000 | [diff] [blame] | 444 | break; |
Varun Wadekar | a70dec3 | 2015-08-26 12:49:03 +0530 | [diff] [blame] | 445 | |
| 446 | /* |
Varun Wadekar | 079e20e | 2018-08-10 09:55:25 -0700 | [diff] [blame] | 447 | * This function ID is used by SP to indicate that it has completed |
| 448 | * handling the secure interrupt. |
| 449 | */ |
| 450 | case TLK_IRQ_DONE: |
| 451 | |
| 452 | if (ns) |
| 453 | SMC_RET1(handle, SMC_UNK); |
| 454 | |
| 455 | assert(handle == cm_get_context(SECURE)); |
| 456 | |
| 457 | /* save secure world context */ |
| 458 | cm_el1_sysregs_context_save(SECURE); |
| 459 | |
| 460 | /* Get a reference to the non-secure context */ |
| 461 | ns_cpu_context = cm_get_context(NON_SECURE); |
| 462 | assert(ns_cpu_context); |
| 463 | |
| 464 | /* |
| 465 | * Restore non-secure state. There is no need to save the |
| 466 | * secure system register context since the SP was supposed |
| 467 | * to preserve it during S-EL1 interrupt handling. |
| 468 | */ |
| 469 | cm_el1_sysregs_context_restore(NON_SECURE); |
| 470 | cm_set_next_eret_context(NON_SECURE); |
| 471 | |
| 472 | SMC_RET0(ns_cpu_context); |
| 473 | |
| 474 | /* |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 475 | * Return the number of service function IDs implemented to |
| 476 | * provide service to non-secure |
| 477 | */ |
| 478 | case TOS_CALL_COUNT: |
| 479 | SMC_RET1(handle, TLK_NUM_FID); |
| 480 | |
| 481 | /* |
| 482 | * Return TLK's UID to the caller |
| 483 | */ |
| 484 | case TOS_UID: |
| 485 | SMC_UUID_RET(handle, tlk_uuid); |
| 486 | |
| 487 | /* |
| 488 | * Return the version of current implementation |
| 489 | */ |
| 490 | case TOS_CALL_VERSION: |
| 491 | SMC_RET2(handle, TLK_VERSION_MAJOR, TLK_VERSION_MINOR); |
| 492 | |
| 493 | default: |
Mihir Joshi | 518230c | 2018-01-22 14:02:16 -0800 | [diff] [blame] | 494 | WARN("%s: Unhandled SMC: 0x%x\n", __func__, smc_fid); |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 495 | break; |
| 496 | } |
| 497 | |
| 498 | SMC_RET1(handle, SMC_UNK); |
| 499 | } |
| 500 | |
| 501 | /* Define a SPD runtime service descriptor for fast SMC calls */ |
| 502 | DECLARE_RT_SVC( |
| 503 | tlkd_tos_fast, |
| 504 | |
| 505 | OEN_TOS_START, |
| 506 | OEN_TOS_END, |
| 507 | SMC_TYPE_FAST, |
| 508 | tlkd_setup, |
| 509 | tlkd_smc_handler |
| 510 | ); |
| 511 | |
David Cunado | c8833ea | 2017-04-16 17:15:08 +0100 | [diff] [blame] | 512 | /* Define a SPD runtime service descriptor for yielding SMC calls */ |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 513 | DECLARE_RT_SVC( |
| 514 | tlkd_tos_std, |
| 515 | |
| 516 | OEN_TOS_START, |
| 517 | OEN_TOS_END, |
David Cunado | c8833ea | 2017-04-16 17:15:08 +0100 | [diff] [blame] | 518 | SMC_TYPE_YIELD, |
Varun Wadekar | 3d4e6a5 | 2015-03-13 14:01:03 +0530 | [diff] [blame] | 519 | NULL, |
| 520 | tlkd_smc_handler |
| 521 | ); |
Varun Wadekar | b539b6c | 2015-03-13 15:18:20 +0530 | [diff] [blame] | 522 | |
| 523 | /* Define a SPD runtime service descriptor for fast SMC calls */ |
| 524 | DECLARE_RT_SVC( |
| 525 | tlkd_tap_fast, |
| 526 | |
| 527 | OEN_TAP_START, |
| 528 | OEN_TAP_END, |
| 529 | SMC_TYPE_FAST, |
| 530 | NULL, |
| 531 | tlkd_smc_handler |
| 532 | ); |
| 533 | |
David Cunado | c8833ea | 2017-04-16 17:15:08 +0100 | [diff] [blame] | 534 | /* Define a SPD runtime service descriptor for yielding SMC calls */ |
Varun Wadekar | b539b6c | 2015-03-13 15:18:20 +0530 | [diff] [blame] | 535 | DECLARE_RT_SVC( |
| 536 | tlkd_tap_std, |
| 537 | |
| 538 | OEN_TAP_START, |
| 539 | OEN_TAP_END, |
David Cunado | c8833ea | 2017-04-16 17:15:08 +0100 | [diff] [blame] | 540 | SMC_TYPE_YIELD, |
Varun Wadekar | b539b6c | 2015-03-13 15:18:20 +0530 | [diff] [blame] | 541 | NULL, |
| 542 | tlkd_smc_handler |
| 543 | ); |