Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 1 | /* |
Daniel Boulby | 44b4333 | 2020-11-25 16:36:46 +0000 | [diff] [blame] | 2 | * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved. |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: BSD-3-Clause |
| 5 | */ |
| 6 | |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 7 | #include <assert.h> |
Scott Branden | e5dcf98 | 2020-08-25 13:49:32 -0700 | [diff] [blame] | 8 | #include <inttypes.h> |
| 9 | #include <stdint.h> |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 10 | #include <string.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 11 | |
| 12 | #include <arch_helpers.h> |
Daniel Boulby | 44b4333 | 2020-11-25 16:36:46 +0000 | [diff] [blame] | 13 | #include <arch_features.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 14 | #include <bl31/ehf.h> |
| 15 | #include <bl31/interrupt_mgmt.h> |
| 16 | #include <common/bl_common.h> |
| 17 | #include <common/debug.h> |
| 18 | #include <common/runtime_svc.h> |
| 19 | #include <lib/cassert.h> |
| 20 | #include <services/sdei.h> |
| 21 | |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 22 | #include "sdei_private.h" |
| 23 | |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 24 | /* x0-x17 GPREGS context */ |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 25 | #define SDEI_SAVED_GPREGS 18U |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 26 | |
| 27 | /* Maximum preemption nesting levels: Critical priority and Normal priority */ |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 28 | #define MAX_EVENT_NESTING 2U |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 29 | |
| 30 | /* Per-CPU SDEI state access macro */ |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 31 | #define sdei_get_this_pe_state() (&cpu_state[plat_my_core_pos()]) |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 32 | |
| 33 | /* Structure to store information about an outstanding dispatch */ |
| 34 | typedef struct sdei_dispatch_context { |
| 35 | sdei_ev_map_t *map; |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 36 | uint64_t x[SDEI_SAVED_GPREGS]; |
Antonio Nino Diaz | 4586d1c | 2019-02-08 13:10:45 +0000 | [diff] [blame] | 37 | jmp_buf *dispatch_jmp; |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 38 | |
| 39 | /* Exception state registers */ |
| 40 | uint64_t elr_el3; |
| 41 | uint64_t spsr_el3; |
Dimitris Papastamos | bb1fd5b | 2018-06-07 11:29:15 +0100 | [diff] [blame] | 42 | |
| 43 | #if DYNAMIC_WORKAROUND_CVE_2018_3639 |
| 44 | /* CVE-2018-3639 mitigation state */ |
| 45 | uint64_t disable_cve_2018_3639; |
| 46 | #endif |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 47 | } sdei_dispatch_context_t; |
| 48 | |
| 49 | /* Per-CPU SDEI state data */ |
| 50 | typedef struct sdei_cpu_state { |
| 51 | sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING]; |
| 52 | unsigned short stack_top; /* Empty ascending */ |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 53 | bool pe_masked; |
| 54 | bool pending_enables; |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 55 | } sdei_cpu_state_t; |
| 56 | |
| 57 | /* SDEI states for all cores in the system */ |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 58 | static sdei_cpu_state_t cpu_state[PLATFORM_CORE_COUNT]; |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 59 | |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 60 | int64_t sdei_pe_mask(void) |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 61 | { |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 62 | int64_t ret = 0; |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 63 | sdei_cpu_state_t *state = sdei_get_this_pe_state(); |
| 64 | |
| 65 | /* |
| 66 | * Return value indicates whether this call had any effect in the mask |
| 67 | * status of this PE. |
| 68 | */ |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 69 | if (!state->pe_masked) { |
| 70 | state->pe_masked = true; |
| 71 | ret = 1; |
| 72 | } |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 73 | |
| 74 | return ret; |
| 75 | } |
| 76 | |
| 77 | void sdei_pe_unmask(void) |
| 78 | { |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 79 | unsigned int i; |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 80 | sdei_ev_map_t *map; |
| 81 | sdei_entry_t *se; |
| 82 | sdei_cpu_state_t *state = sdei_get_this_pe_state(); |
| 83 | uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK; |
| 84 | |
| 85 | /* |
| 86 | * If there are pending enables, iterate through the private mappings |
| 87 | * and enable those bound maps that are in enabled state. Also, iterate |
| 88 | * through shared mappings and enable interrupts of events that are |
| 89 | * targeted to this PE. |
| 90 | */ |
| 91 | if (state->pending_enables) { |
| 92 | for_each_private_map(i, map) { |
| 93 | se = get_event_entry(map); |
| 94 | if (is_map_bound(map) && GET_EV_STATE(se, ENABLED)) |
| 95 | plat_ic_enable_interrupt(map->intr); |
| 96 | } |
| 97 | |
| 98 | for_each_shared_map(i, map) { |
| 99 | se = get_event_entry(map); |
| 100 | |
| 101 | sdei_map_lock(map); |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 102 | if (is_map_bound(map) && GET_EV_STATE(se, ENABLED) && |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 103 | (se->reg_flags == SDEI_REGF_RM_PE) && |
| 104 | (se->affinity == my_mpidr)) { |
| 105 | plat_ic_enable_interrupt(map->intr); |
| 106 | } |
| 107 | sdei_map_unlock(map); |
| 108 | } |
| 109 | } |
| 110 | |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 111 | state->pending_enables = false; |
| 112 | state->pe_masked = false; |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 113 | } |
| 114 | |
| 115 | /* Push a dispatch context to the dispatch stack */ |
| 116 | static sdei_dispatch_context_t *push_dispatch(void) |
| 117 | { |
| 118 | sdei_cpu_state_t *state = sdei_get_this_pe_state(); |
| 119 | sdei_dispatch_context_t *disp_ctx; |
| 120 | |
| 121 | /* Cannot have more than max events */ |
| 122 | assert(state->stack_top < MAX_EVENT_NESTING); |
| 123 | |
| 124 | disp_ctx = &state->dispatch_stack[state->stack_top]; |
| 125 | state->stack_top++; |
| 126 | |
| 127 | return disp_ctx; |
| 128 | } |
| 129 | |
| 130 | /* Pop a dispatch context to the dispatch stack */ |
| 131 | static sdei_dispatch_context_t *pop_dispatch(void) |
| 132 | { |
| 133 | sdei_cpu_state_t *state = sdei_get_this_pe_state(); |
| 134 | |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 135 | if (state->stack_top == 0U) |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 136 | return NULL; |
| 137 | |
| 138 | assert(state->stack_top <= MAX_EVENT_NESTING); |
| 139 | |
| 140 | state->stack_top--; |
| 141 | |
| 142 | return &state->dispatch_stack[state->stack_top]; |
| 143 | } |
| 144 | |
| 145 | /* Retrieve the context at the top of dispatch stack */ |
| 146 | static sdei_dispatch_context_t *get_outstanding_dispatch(void) |
| 147 | { |
| 148 | sdei_cpu_state_t *state = sdei_get_this_pe_state(); |
| 149 | |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 150 | if (state->stack_top == 0U) |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 151 | return NULL; |
| 152 | |
| 153 | assert(state->stack_top <= MAX_EVENT_NESTING); |
| 154 | |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 155 | return &state->dispatch_stack[state->stack_top - 1U]; |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 156 | } |
| 157 | |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 158 | static sdei_dispatch_context_t *save_event_ctx(sdei_ev_map_t *map, |
| 159 | void *tgt_ctx) |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 160 | { |
| 161 | sdei_dispatch_context_t *disp_ctx; |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 162 | const gp_regs_t *tgt_gpregs; |
| 163 | const el3_state_t *tgt_el3; |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 164 | |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 165 | assert(tgt_ctx != NULL); |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 166 | tgt_gpregs = get_gpregs_ctx(tgt_ctx); |
| 167 | tgt_el3 = get_el3state_ctx(tgt_ctx); |
| 168 | |
| 169 | disp_ctx = push_dispatch(); |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 170 | assert(disp_ctx != NULL); |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 171 | disp_ctx->map = map; |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 172 | |
| 173 | /* Save general purpose and exception registers */ |
| 174 | memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x)); |
| 175 | disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3); |
| 176 | disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3); |
Dimitris Papastamos | bb1fd5b | 2018-06-07 11:29:15 +0100 | [diff] [blame] | 177 | |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 178 | return disp_ctx; |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 179 | } |
| 180 | |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 181 | static void restore_event_ctx(const sdei_dispatch_context_t *disp_ctx, void *tgt_ctx) |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 182 | { |
| 183 | gp_regs_t *tgt_gpregs; |
| 184 | el3_state_t *tgt_el3; |
| 185 | |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 186 | assert(tgt_ctx != NULL); |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 187 | tgt_gpregs = get_gpregs_ctx(tgt_ctx); |
| 188 | tgt_el3 = get_el3state_ctx(tgt_ctx); |
| 189 | |
| 190 | CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)), |
| 191 | foo); |
| 192 | |
| 193 | /* Restore general purpose and exception registers */ |
| 194 | memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x)); |
| 195 | write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3); |
| 196 | write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3); |
Dimitris Papastamos | bb1fd5b | 2018-06-07 11:29:15 +0100 | [diff] [blame] | 197 | |
| 198 | #if DYNAMIC_WORKAROUND_CVE_2018_3639 |
| 199 | cve_2018_3639_t *tgt_cve_2018_3639; |
| 200 | tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx); |
| 201 | |
| 202 | /* Restore CVE-2018-3639 mitigation state */ |
| 203 | write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, |
| 204 | disp_ctx->disable_cve_2018_3639); |
| 205 | #endif |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 206 | } |
| 207 | |
| 208 | static void save_secure_context(void) |
| 209 | { |
| 210 | cm_el1_sysregs_context_save(SECURE); |
| 211 | } |
| 212 | |
| 213 | /* Restore Secure context and arrange to resume it at the next ERET */ |
| 214 | static void restore_and_resume_secure_context(void) |
| 215 | { |
| 216 | cm_el1_sysregs_context_restore(SECURE); |
| 217 | cm_set_next_eret_context(SECURE); |
| 218 | } |
| 219 | |
| 220 | /* |
| 221 | * Restore Non-secure context and arrange to resume it at the next ERET. Return |
| 222 | * pointer to the Non-secure context. |
| 223 | */ |
| 224 | static cpu_context_t *restore_and_resume_ns_context(void) |
| 225 | { |
| 226 | cpu_context_t *ns_ctx; |
| 227 | |
| 228 | cm_el1_sysregs_context_restore(NON_SECURE); |
| 229 | cm_set_next_eret_context(NON_SECURE); |
| 230 | |
| 231 | ns_ctx = cm_get_context(NON_SECURE); |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 232 | assert(ns_ctx != NULL); |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 233 | |
| 234 | return ns_ctx; |
| 235 | } |
| 236 | |
| 237 | /* |
Daniel Boulby | 44b4333 | 2020-11-25 16:36:46 +0000 | [diff] [blame] | 238 | * Prepare for ERET: |
| 239 | * - Set the ELR to the registered handler address |
| 240 | * - Set the SPSR register as described in the SDEI documentation and |
| 241 | * the AArch64.TakeException() pseudocode function in |
| 242 | * ARM DDI 0487F.c page J1-7635 |
| 243 | */ |
| 244 | |
| 245 | static void sdei_set_elr_spsr(sdei_entry_t *se, sdei_dispatch_context_t *disp_ctx) |
| 246 | { |
| 247 | unsigned int client_el = sdei_client_el(); |
| 248 | u_register_t sdei_spsr = SPSR_64(client_el, MODE_SP_ELX, |
| 249 | DISABLE_ALL_EXCEPTIONS); |
| 250 | |
| 251 | u_register_t interrupted_pstate = disp_ctx->spsr_el3; |
| 252 | |
| 253 | /* Check the SPAN bit in the client el SCTLR */ |
| 254 | u_register_t client_el_sctlr; |
| 255 | |
| 256 | if (client_el == MODE_EL2) { |
| 257 | client_el_sctlr = read_sctlr_el2(); |
| 258 | } else { |
| 259 | client_el_sctlr = read_sctlr_el1(); |
| 260 | } |
| 261 | |
| 262 | /* |
| 263 | * Check whether to force the PAN bit or use the value in the |
| 264 | * interrupted EL according to the check described in |
| 265 | * TakeException. Since the client can only be Non-Secure |
| 266 | * EL2 or El1 some of the conditions in ElIsInHost() we know |
| 267 | * will always be True. |
| 268 | * When the client_el is EL2 we know that there will be a SPAN |
| 269 | * bit in SCTLR_EL2 as we have already checked for the condition |
| 270 | * HCR_EL2.E2H = 1 and HCR_EL2.TGE = 1 |
| 271 | */ |
| 272 | u_register_t hcr_el2 = read_hcr(); |
Andre Przywara | 98908b3 | 2022-11-17 16:42:09 +0000 | [diff] [blame] | 273 | bool el_is_in_host = (read_feat_vhe_id_field() != 0U) && |
Daniel Boulby | 44b4333 | 2020-11-25 16:36:46 +0000 | [diff] [blame] | 274 | (hcr_el2 & HCR_TGE_BIT) && |
| 275 | (hcr_el2 & HCR_E2H_BIT); |
| 276 | |
Andre Przywara | 9727294 | 2023-01-26 15:27:38 +0000 | [diff] [blame] | 277 | if (is_feat_pan_supported() && |
Daniel Boulby | 44b4333 | 2020-11-25 16:36:46 +0000 | [diff] [blame] | 278 | ((client_el == MODE_EL1) || |
| 279 | (client_el == MODE_EL2 && el_is_in_host)) && |
| 280 | ((client_el_sctlr & SCTLR_SPAN_BIT) == 0U)) { |
| 281 | sdei_spsr |= SPSR_PAN_BIT; |
| 282 | } else { |
| 283 | sdei_spsr |= (interrupted_pstate & SPSR_PAN_BIT); |
| 284 | } |
| 285 | |
| 286 | /* If SSBS is implemented, take the value from the client el SCTLR */ |
| 287 | u_register_t ssbs_enabled = (read_id_aa64pfr1_el1() |
| 288 | >> ID_AA64PFR1_EL1_SSBS_SHIFT) |
| 289 | & ID_AA64PFR1_EL1_SSBS_MASK; |
| 290 | if (ssbs_enabled != SSBS_UNAVAILABLE) { |
| 291 | u_register_t ssbs_bit = ((client_el_sctlr & SCTLR_DSSBS_BIT) |
| 292 | >> SCTLR_DSSBS_SHIFT) |
| 293 | << SPSR_SSBS_SHIFT_AARCH64; |
| 294 | sdei_spsr |= ssbs_bit; |
| 295 | } |
| 296 | |
| 297 | /* If MTE is implemented in the client el set the TCO bit */ |
| 298 | if (get_armv8_5_mte_support() >= MTE_IMPLEMENTED_ELX) { |
| 299 | sdei_spsr |= SPSR_TCO_BIT_AARCH64; |
| 300 | } |
| 301 | |
| 302 | /* Take the DIT field from the pstate of the interrupted el */ |
| 303 | sdei_spsr |= (interrupted_pstate & SPSR_DIT_BIT); |
| 304 | |
| 305 | cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep, sdei_spsr); |
| 306 | } |
| 307 | |
| 308 | /* |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 309 | * Populate the Non-secure context so that the next ERET will dispatch to the |
| 310 | * SDEI client. |
| 311 | */ |
| 312 | static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se, |
Antonio Nino Diaz | 4586d1c | 2019-02-08 13:10:45 +0000 | [diff] [blame] | 313 | cpu_context_t *ctx, jmp_buf *dispatch_jmp) |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 314 | { |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 315 | sdei_dispatch_context_t *disp_ctx; |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 316 | |
| 317 | /* Push the event and context */ |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 318 | disp_ctx = save_event_ctx(map, ctx); |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 319 | |
| 320 | /* |
| 321 | * Setup handler arguments: |
| 322 | * |
| 323 | * - x0: Event number |
| 324 | * - x1: Handler argument supplied at the time of event registration |
| 325 | * - x2: Interrupted PC |
| 326 | * - x3: Interrupted SPSR |
| 327 | */ |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 328 | SMC_SET_GP(ctx, CTX_GPREG_X0, (uint64_t) map->ev_num); |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 329 | SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg); |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 330 | SMC_SET_GP(ctx, CTX_GPREG_X2, disp_ctx->elr_el3); |
| 331 | SMC_SET_GP(ctx, CTX_GPREG_X3, disp_ctx->spsr_el3); |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 332 | |
Daniel Boulby | 44b4333 | 2020-11-25 16:36:46 +0000 | [diff] [blame] | 333 | /* Setup the elr and spsr register to prepare for ERET */ |
| 334 | sdei_set_elr_spsr(se, disp_ctx); |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 335 | |
| 336 | #if DYNAMIC_WORKAROUND_CVE_2018_3639 |
| 337 | cve_2018_3639_t *tgt_cve_2018_3639; |
| 338 | tgt_cve_2018_3639 = get_cve_2018_3639_ctx(ctx); |
| 339 | |
| 340 | /* Save CVE-2018-3639 mitigation state */ |
| 341 | disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639, |
| 342 | CTX_CVE_2018_3639_DISABLE); |
| 343 | |
| 344 | /* Force SDEI handler to execute with mitigation enabled by default */ |
| 345 | write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0); |
| 346 | #endif |
| 347 | |
| 348 | disp_ctx->dispatch_jmp = dispatch_jmp; |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 349 | } |
| 350 | |
| 351 | /* Handle a triggered SDEI interrupt while events were masked on this PE */ |
| 352 | static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se, |
| 353 | sdei_cpu_state_t *state, unsigned int intr_raw) |
| 354 | { |
| 355 | uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK); |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 356 | bool disable = false; |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 357 | |
| 358 | /* Nothing to do for event 0 */ |
| 359 | if (map->ev_num == SDEI_EVENT_0) |
| 360 | return; |
| 361 | |
| 362 | /* |
| 363 | * For a private event, or for a shared event specifically routed to |
| 364 | * this CPU, we disable interrupt, leave the interrupt pending, and do |
| 365 | * EOI. |
| 366 | */ |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 367 | if (is_event_private(map) || (se->reg_flags == SDEI_REGF_RM_PE)) |
| 368 | disable = true; |
| 369 | |
| 370 | if (se->reg_flags == SDEI_REGF_RM_PE) |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 371 | assert(se->affinity == my_mpidr); |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 372 | |
| 373 | if (disable) { |
| 374 | plat_ic_disable_interrupt(map->intr); |
| 375 | plat_ic_set_interrupt_pending(map->intr); |
| 376 | plat_ic_end_of_interrupt(intr_raw); |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 377 | state->pending_enables = true; |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 378 | |
| 379 | return; |
| 380 | } |
| 381 | |
| 382 | /* |
| 383 | * We just received a shared event with routing set to ANY PE. The |
| 384 | * interrupt can't be delegated on this PE as SDEI events are masked. |
| 385 | * However, because its routing mode is ANY, it is possible that the |
| 386 | * event can be delegated on any other PE that hasn't masked events. |
| 387 | * Therefore, we set the interrupt back pending so as to give other |
| 388 | * suitable PEs a chance of handling it. |
| 389 | */ |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 390 | assert(plat_ic_is_spi(map->intr) != 0); |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 391 | plat_ic_set_interrupt_pending(map->intr); |
| 392 | |
| 393 | /* |
| 394 | * Leaving the same interrupt pending also means that the same interrupt |
| 395 | * can target this PE again as soon as this PE leaves EL3. Whether and |
| 396 | * how often that happens depends on the implementation of GIC. |
| 397 | * |
| 398 | * We therefore call a platform handler to resolve this situation. |
| 399 | */ |
| 400 | plat_sdei_handle_masked_trigger(my_mpidr, map->intr); |
| 401 | |
| 402 | /* This PE is masked. We EOI the interrupt, as it can't be delegated */ |
| 403 | plat_ic_end_of_interrupt(intr_raw); |
| 404 | } |
| 405 | |
| 406 | /* SDEI main interrupt handler */ |
| 407 | int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle, |
| 408 | void *cookie) |
| 409 | { |
| 410 | sdei_entry_t *se; |
| 411 | cpu_context_t *ctx; |
| 412 | sdei_ev_map_t *map; |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 413 | const sdei_dispatch_context_t *disp_ctx; |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 414 | unsigned int sec_state; |
| 415 | sdei_cpu_state_t *state; |
| 416 | uint32_t intr; |
Antonio Nino Diaz | 4586d1c | 2019-02-08 13:10:45 +0000 | [diff] [blame] | 417 | jmp_buf dispatch_jmp; |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 418 | const uint64_t mpidr = read_mpidr_el1(); |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 419 | |
| 420 | /* |
| 421 | * To handle an event, the following conditions must be true: |
| 422 | * |
| 423 | * 1. Event must be signalled |
| 424 | * 2. Event must be enabled |
| 425 | * 3. This PE must be a target PE for the event |
| 426 | * 4. PE must be unmasked for SDEI |
| 427 | * 5. If this is a normal event, no event must be running |
| 428 | * 6. If this is a critical event, no critical event must be running |
| 429 | * |
| 430 | * (1) and (2) are true when this function is running |
| 431 | * (3) is enforced in GIC by selecting the appropriate routing option |
| 432 | * (4) is satisfied by client calling PE_UNMASK |
| 433 | * (5) and (6) is enforced using interrupt priority, the RPR, in GIC: |
| 434 | * - Normal SDEI events belong to Normal SDE priority class |
| 435 | * - Critical SDEI events belong to Critical CSDE priority class |
| 436 | * |
| 437 | * The interrupt has already been acknowledged, and therefore is active, |
| 438 | * so no other PE can handle this event while we are at it. |
| 439 | * |
| 440 | * Find if this is an SDEI interrupt. There must be an event mapped to |
| 441 | * this interrupt |
| 442 | */ |
| 443 | intr = plat_ic_get_interrupt_id(intr_raw); |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 444 | map = find_event_map_by_intr(intr, (plat_ic_is_spi(intr) != 0)); |
| 445 | if (map == NULL) { |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 446 | ERROR("No SDEI map for interrupt %u\n", intr); |
| 447 | panic(); |
| 448 | } |
| 449 | |
| 450 | /* |
| 451 | * Received interrupt number must either correspond to event 0, or must |
| 452 | * be bound interrupt. |
| 453 | */ |
| 454 | assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map)); |
| 455 | |
| 456 | se = get_event_entry(map); |
| 457 | state = sdei_get_this_pe_state(); |
| 458 | |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 459 | if (state->pe_masked) { |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 460 | /* |
| 461 | * Interrupts received while this PE was masked can't be |
| 462 | * dispatched. |
| 463 | */ |
Scott Branden | e5dcf98 | 2020-08-25 13:49:32 -0700 | [diff] [blame] | 464 | SDEI_LOG("interrupt %u on %" PRIx64 " while PE masked\n", |
| 465 | map->intr, mpidr); |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 466 | if (is_event_shared(map)) |
| 467 | sdei_map_lock(map); |
| 468 | |
| 469 | handle_masked_trigger(map, se, state, intr_raw); |
| 470 | |
| 471 | if (is_event_shared(map)) |
| 472 | sdei_map_unlock(map); |
| 473 | |
| 474 | return 0; |
| 475 | } |
| 476 | |
| 477 | /* Insert load barrier for signalled SDEI event */ |
| 478 | if (map->ev_num == SDEI_EVENT_0) |
| 479 | dmbld(); |
| 480 | |
| 481 | if (is_event_shared(map)) |
| 482 | sdei_map_lock(map); |
| 483 | |
| 484 | /* Assert shared event routed to this PE had been configured so */ |
| 485 | if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) { |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 486 | assert(se->affinity == (mpidr & MPIDR_AFFINITY_MASK)); |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 487 | } |
| 488 | |
| 489 | if (!can_sdei_state_trans(se, DO_DISPATCH)) { |
| 490 | SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n", |
| 491 | map->ev_num, se->state); |
| 492 | |
| 493 | /* |
| 494 | * If the event is registered, leave the interrupt pending so |
| 495 | * that it's delivered when the event is enabled. |
| 496 | */ |
| 497 | if (GET_EV_STATE(se, REGISTERED)) |
| 498 | plat_ic_set_interrupt_pending(map->intr); |
| 499 | |
| 500 | /* |
| 501 | * The interrupt was disabled or unregistered after the handler |
| 502 | * started to execute, which means now the interrupt is already |
| 503 | * disabled and we just need to EOI the interrupt. |
| 504 | */ |
| 505 | plat_ic_end_of_interrupt(intr_raw); |
| 506 | |
| 507 | if (is_event_shared(map)) |
| 508 | sdei_map_unlock(map); |
| 509 | |
| 510 | return 0; |
| 511 | } |
| 512 | |
| 513 | disp_ctx = get_outstanding_dispatch(); |
| 514 | if (is_event_critical(map)) { |
| 515 | /* |
| 516 | * If this event is Critical, and if there's an outstanding |
| 517 | * dispatch, assert the latter is a Normal dispatch. Critical |
| 518 | * events can preempt an outstanding Normal event dispatch. |
| 519 | */ |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 520 | if (disp_ctx != NULL) |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 521 | assert(is_event_normal(disp_ctx->map)); |
| 522 | } else { |
| 523 | /* |
| 524 | * If this event is Normal, assert that there are no outstanding |
| 525 | * dispatches. Normal events can't preempt any outstanding event |
| 526 | * dispatches. |
| 527 | */ |
| 528 | assert(disp_ctx == NULL); |
| 529 | } |
| 530 | |
| 531 | sec_state = get_interrupt_src_ss(flags); |
| 532 | |
| 533 | if (is_event_shared(map)) |
| 534 | sdei_map_unlock(map); |
| 535 | |
Scott Branden | e5dcf98 | 2020-08-25 13:49:32 -0700 | [diff] [blame] | 536 | SDEI_LOG("ACK %" PRIx64 ", ev:0x%x ss:%d spsr:%lx ELR:%lx\n", |
| 537 | mpidr, map->ev_num, sec_state, read_spsr_el3(), read_elr_el3()); |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 538 | |
| 539 | ctx = handle; |
| 540 | |
| 541 | /* |
| 542 | * Check if we interrupted secure state. Perform a context switch so |
| 543 | * that we can delegate to NS. |
| 544 | */ |
| 545 | if (sec_state == SECURE) { |
| 546 | save_secure_context(); |
| 547 | ctx = restore_and_resume_ns_context(); |
| 548 | } |
| 549 | |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 550 | /* Synchronously dispatch event */ |
| 551 | setup_ns_dispatch(map, se, ctx, &dispatch_jmp); |
| 552 | begin_sdei_synchronous_dispatch(&dispatch_jmp); |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 553 | |
| 554 | /* |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 555 | * We reach here when client completes the event. |
| 556 | * |
Jeenu Viswambharan | 744bb2b | 2018-10-11 09:50:26 +0100 | [diff] [blame] | 557 | * If the cause of dispatch originally interrupted the Secure world, |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 558 | * resume Secure. |
| 559 | * |
| 560 | * No need to save the Non-secure context ahead of a world switch: the |
| 561 | * Non-secure context was fully saved before dispatch, and has been |
| 562 | * returned to its pre-dispatch state. |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 563 | */ |
Jeenu Viswambharan | 744bb2b | 2018-10-11 09:50:26 +0100 | [diff] [blame] | 564 | if (sec_state == SECURE) |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 565 | restore_and_resume_secure_context(); |
| 566 | |
| 567 | /* |
| 568 | * The event was dispatched after receiving SDEI interrupt. With |
| 569 | * the event handling completed, EOI the corresponding |
| 570 | * interrupt. |
| 571 | */ |
Jeenu Viswambharan | dd6dad0 | 2018-06-22 11:21:35 +0100 | [diff] [blame] | 572 | if ((map->ev_num != SDEI_EVENT_0) && !is_map_bound(map)) { |
Vasyl Gomonovych | ab5d76b | 2021-09-01 10:30:55 -0700 | [diff] [blame] | 573 | ERROR("Invalid SDEI mapping: ev=0x%x\n", map->ev_num); |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 574 | panic(); |
| 575 | } |
| 576 | plat_ic_end_of_interrupt(intr_raw); |
| 577 | |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 578 | return 0; |
| 579 | } |
| 580 | |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 581 | /* |
| 582 | * Explicitly dispatch the given SDEI event. |
| 583 | * |
| 584 | * When calling this API, the caller must be prepared for the SDEI dispatcher to |
| 585 | * restore and make Non-secure context as active. This call returns only after |
| 586 | * the client has completed the dispatch. Then, the Non-secure context will be |
| 587 | * active, and the following ERET will return to Non-secure. |
| 588 | * |
| 589 | * Should the caller require re-entry to Secure, it must restore the Secure |
| 590 | * context and program registers for ERET. |
| 591 | */ |
| 592 | int sdei_dispatch_event(int ev_num) |
Jeenu Viswambharan | cf1f221 | 2017-10-02 12:10:54 +0100 | [diff] [blame] | 593 | { |
| 594 | sdei_entry_t *se; |
| 595 | sdei_ev_map_t *map; |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 596 | cpu_context_t *ns_ctx; |
Jeenu Viswambharan | cf1f221 | 2017-10-02 12:10:54 +0100 | [diff] [blame] | 597 | sdei_dispatch_context_t *disp_ctx; |
| 598 | sdei_cpu_state_t *state; |
Antonio Nino Diaz | 4586d1c | 2019-02-08 13:10:45 +0000 | [diff] [blame] | 599 | jmp_buf dispatch_jmp; |
Jeenu Viswambharan | cf1f221 | 2017-10-02 12:10:54 +0100 | [diff] [blame] | 600 | |
| 601 | /* Can't dispatch if events are masked on this PE */ |
| 602 | state = sdei_get_this_pe_state(); |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 603 | if (state->pe_masked) |
Jeenu Viswambharan | cf1f221 | 2017-10-02 12:10:54 +0100 | [diff] [blame] | 604 | return -1; |
| 605 | |
| 606 | /* Event 0 can't be dispatched */ |
| 607 | if (ev_num == SDEI_EVENT_0) |
| 608 | return -1; |
| 609 | |
| 610 | /* Locate mapping corresponding to this event */ |
| 611 | map = find_event_map(ev_num); |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 612 | if (map == NULL) |
Jeenu Viswambharan | cf1f221 | 2017-10-02 12:10:54 +0100 | [diff] [blame] | 613 | return -1; |
| 614 | |
Jeenu Viswambharan | 3439230 | 2018-01-17 12:30:11 +0000 | [diff] [blame] | 615 | /* Only explicit events can be dispatched */ |
| 616 | if (!is_map_explicit(map)) |
Jeenu Viswambharan | cf1f221 | 2017-10-02 12:10:54 +0100 | [diff] [blame] | 617 | return -1; |
| 618 | |
| 619 | /* Examine state of dispatch stack */ |
| 620 | disp_ctx = get_outstanding_dispatch(); |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 621 | if (disp_ctx != NULL) { |
Jeenu Viswambharan | cf1f221 | 2017-10-02 12:10:54 +0100 | [diff] [blame] | 622 | /* |
| 623 | * There's an outstanding dispatch. If the outstanding dispatch |
| 624 | * is critical, no more dispatches are possible. |
| 625 | */ |
| 626 | if (is_event_critical(disp_ctx->map)) |
| 627 | return -1; |
| 628 | |
| 629 | /* |
| 630 | * If the outstanding dispatch is Normal, only critical events |
| 631 | * can be dispatched. |
| 632 | */ |
| 633 | if (is_event_normal(map)) |
| 634 | return -1; |
| 635 | } |
| 636 | |
| 637 | se = get_event_entry(map); |
| 638 | if (!can_sdei_state_trans(se, DO_DISPATCH)) |
| 639 | return -1; |
| 640 | |
Jeenu Viswambharan | cf1f221 | 2017-10-02 12:10:54 +0100 | [diff] [blame] | 641 | /* |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 642 | * Prepare for NS dispatch by restoring the Non-secure context and |
| 643 | * marking that as active. |
Jeenu Viswambharan | cf1f221 | 2017-10-02 12:10:54 +0100 | [diff] [blame] | 644 | */ |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 645 | ns_ctx = restore_and_resume_ns_context(); |
| 646 | |
Ming Huang | 07f45d1 | 2021-04-23 15:06:17 +0800 | [diff] [blame] | 647 | /* Activate the priority corresponding to the event being dispatched */ |
| 648 | ehf_activate_priority(sdei_event_priority(map)); |
| 649 | |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 650 | /* Dispatch event synchronously */ |
| 651 | setup_ns_dispatch(map, se, ns_ctx, &dispatch_jmp); |
| 652 | begin_sdei_synchronous_dispatch(&dispatch_jmp); |
Jeenu Viswambharan | cf1f221 | 2017-10-02 12:10:54 +0100 | [diff] [blame] | 653 | |
| 654 | /* |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 655 | * We reach here when client completes the event. |
| 656 | * |
| 657 | * Deactivate the priority level that was activated at the time of |
| 658 | * explicit dispatch. |
Jeenu Viswambharan | cf1f221 | 2017-10-02 12:10:54 +0100 | [diff] [blame] | 659 | */ |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 660 | ehf_deactivate_priority(sdei_event_priority(map)); |
Jeenu Viswambharan | cf1f221 | 2017-10-02 12:10:54 +0100 | [diff] [blame] | 661 | |
| 662 | return 0; |
| 663 | } |
| 664 | |
Antonio Nino Diaz | 4586d1c | 2019-02-08 13:10:45 +0000 | [diff] [blame] | 665 | static void end_sdei_synchronous_dispatch(jmp_buf *buffer) |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 666 | { |
Antonio Nino Diaz | 4586d1c | 2019-02-08 13:10:45 +0000 | [diff] [blame] | 667 | longjmp(*buffer, 1); |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 668 | } |
| 669 | |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 670 | int sdei_event_complete(bool resume, uint64_t pc) |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 671 | { |
| 672 | sdei_dispatch_context_t *disp_ctx; |
| 673 | sdei_entry_t *se; |
| 674 | sdei_ev_map_t *map; |
| 675 | cpu_context_t *ctx; |
| 676 | sdei_action_t act; |
| 677 | unsigned int client_el = sdei_client_el(); |
| 678 | |
| 679 | /* Return error if called without an active event */ |
Jeenu Viswambharan | 8483f36 | 2018-01-22 12:04:13 +0000 | [diff] [blame] | 680 | disp_ctx = get_outstanding_dispatch(); |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 681 | if (disp_ctx == NULL) |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 682 | return SDEI_EDENY; |
| 683 | |
| 684 | /* Validate resumption point */ |
| 685 | if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0)) |
| 686 | return SDEI_EDENY; |
| 687 | |
| 688 | map = disp_ctx->map; |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 689 | assert(map != NULL); |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 690 | se = get_event_entry(map); |
| 691 | |
Jeenu Viswambharan | 40d7ec6 | 2018-08-10 11:05:31 +0100 | [diff] [blame] | 692 | if (is_event_shared(map)) |
| 693 | sdei_map_lock(map); |
| 694 | |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 695 | act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE; |
| 696 | if (!can_sdei_state_trans(se, act)) { |
| 697 | if (is_event_shared(map)) |
| 698 | sdei_map_unlock(map); |
| 699 | return SDEI_EDENY; |
| 700 | } |
| 701 | |
Jeenu Viswambharan | 40d7ec6 | 2018-08-10 11:05:31 +0100 | [diff] [blame] | 702 | if (is_event_shared(map)) |
| 703 | sdei_map_unlock(map); |
| 704 | |
Jeenu Viswambharan | 8483f36 | 2018-01-22 12:04:13 +0000 | [diff] [blame] | 705 | /* Having done sanity checks, pop dispatch */ |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 706 | (void) pop_dispatch(); |
Jeenu Viswambharan | 8483f36 | 2018-01-22 12:04:13 +0000 | [diff] [blame] | 707 | |
Scott Branden | e5dcf98 | 2020-08-25 13:49:32 -0700 | [diff] [blame] | 708 | SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(), |
Jeenu Viswambharan | 8483f36 | 2018-01-22 12:04:13 +0000 | [diff] [blame] | 709 | map->ev_num, read_spsr_el3(), read_elr_el3()); |
| 710 | |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 711 | /* |
| 712 | * Restore Non-secure to how it was originally interrupted. Once done, |
| 713 | * it's up-to-date with the saved copy. |
| 714 | */ |
| 715 | ctx = cm_get_context(NON_SECURE); |
| 716 | restore_event_ctx(disp_ctx, ctx); |
| 717 | |
| 718 | if (resume) { |
| 719 | /* |
| 720 | * Complete-and-resume call. Prepare the Non-secure context |
| 721 | * (currently active) for complete and resume. |
| 722 | */ |
| 723 | cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el, |
| 724 | MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); |
| 725 | |
| 726 | /* |
| 727 | * Make it look as if a synchronous exception were taken at the |
| 728 | * supplied Non-secure resumption point. Populate SPSR and |
| 729 | * ELR_ELx so that an ERET from there works as expected. |
| 730 | * |
| 731 | * The assumption is that the client, if necessary, would have |
| 732 | * saved any live content in these registers before making this |
| 733 | * call. |
| 734 | */ |
| 735 | if (client_el == MODE_EL2) { |
| 736 | write_elr_el2(disp_ctx->elr_el3); |
| 737 | write_spsr_el2(disp_ctx->spsr_el3); |
| 738 | } else { |
| 739 | /* EL1 */ |
| 740 | write_elr_el1(disp_ctx->elr_el3); |
| 741 | write_spsr_el1(disp_ctx->spsr_el3); |
| 742 | } |
| 743 | } |
| 744 | |
Jeenu Viswambharan | 8b7e6bc | 2018-02-16 12:07:48 +0000 | [diff] [blame] | 745 | /* End the outstanding dispatch */ |
Jeenu Viswambharan | 58c6dd4 | 2018-06-22 12:03:44 +0100 | [diff] [blame] | 746 | end_sdei_synchronous_dispatch(disp_ctx->dispatch_jmp); |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 747 | |
| 748 | return 0; |
| 749 | } |
| 750 | |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 751 | int64_t sdei_event_context(void *handle, unsigned int param) |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 752 | { |
| 753 | sdei_dispatch_context_t *disp_ctx; |
| 754 | |
| 755 | if (param >= SDEI_SAVED_GPREGS) |
| 756 | return SDEI_EINVAL; |
| 757 | |
| 758 | /* Get outstanding dispatch on this CPU */ |
| 759 | disp_ctx = get_outstanding_dispatch(); |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 760 | if (disp_ctx == NULL) |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 761 | return SDEI_EDENY; |
| 762 | |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 763 | assert(disp_ctx->map != NULL); |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 764 | |
| 765 | if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT)) |
| 766 | return SDEI_EDENY; |
| 767 | |
| 768 | /* |
| 769 | * No locking is required for the Running status as this is the only CPU |
| 770 | * which can complete the event |
| 771 | */ |
| 772 | |
Jeenu Viswambharan | 32ceef5 | 2018-08-02 10:14:12 +0100 | [diff] [blame] | 773 | return (int64_t) disp_ctx->x[param]; |
Jeenu Viswambharan | 04e3a7f | 2017-10-16 08:43:14 +0100 | [diff] [blame] | 774 | } |