blob: c58adba50ce0dff7237b8e16d0cded0e10083776 [file] [log] [blame]
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001/*
Govindraj Raja24d3a4e2023-12-21 13:57:49 -06002 * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01007#include <assert.h>
Scott Brandene5dcf982020-08-25 13:49:32 -07008#include <inttypes.h>
9#include <stdint.h>
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010010#include <string.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011
12#include <arch_helpers.h>
Daniel Boulby44b43332020-11-25 16:36:46 +000013#include <arch_features.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000014#include <bl31/ehf.h>
15#include <bl31/interrupt_mgmt.h>
Arvind Ram Prakashdf8200d2024-02-20 11:35:27 -060016#include <bl31/sync_handle.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000017#include <common/bl_common.h>
18#include <common/debug.h>
19#include <common/runtime_svc.h>
20#include <lib/cassert.h>
21#include <services/sdei.h>
22
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010023#include "sdei_private.h"
24
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010025/* x0-x17 GPREGS context */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +010026#define SDEI_SAVED_GPREGS 18U
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010027
28/* Maximum preemption nesting levels: Critical priority and Normal priority */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +010029#define MAX_EVENT_NESTING 2U
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010030
31/* Per-CPU SDEI state access macro */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +010032#define sdei_get_this_pe_state() (&cpu_state[plat_my_core_pos()])
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010033
34/* Structure to store information about an outstanding dispatch */
35typedef struct sdei_dispatch_context {
36 sdei_ev_map_t *map;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010037 uint64_t x[SDEI_SAVED_GPREGS];
Antonio Nino Diaz4586d1c2019-02-08 13:10:45 +000038 jmp_buf *dispatch_jmp;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010039
40 /* Exception state registers */
41 uint64_t elr_el3;
42 uint64_t spsr_el3;
Dimitris Papastamosbb1fd5b2018-06-07 11:29:15 +010043
44#if DYNAMIC_WORKAROUND_CVE_2018_3639
45 /* CVE-2018-3639 mitigation state */
46 uint64_t disable_cve_2018_3639;
47#endif
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010048} sdei_dispatch_context_t;
49
50/* Per-CPU SDEI state data */
51typedef struct sdei_cpu_state {
52 sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING];
53 unsigned short stack_top; /* Empty ascending */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +010054 bool pe_masked;
55 bool pending_enables;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010056} sdei_cpu_state_t;
57
58/* SDEI states for all cores in the system */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +010059static sdei_cpu_state_t cpu_state[PLATFORM_CORE_COUNT];
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010060
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +010061int64_t sdei_pe_mask(void)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010062{
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +010063 int64_t ret = 0;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010064 sdei_cpu_state_t *state = sdei_get_this_pe_state();
65
66 /*
67 * Return value indicates whether this call had any effect in the mask
68 * status of this PE.
69 */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +010070 if (!state->pe_masked) {
71 state->pe_masked = true;
72 ret = 1;
73 }
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010074
75 return ret;
76}
77
78void sdei_pe_unmask(void)
79{
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +010080 unsigned int i;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010081 sdei_ev_map_t *map;
82 sdei_entry_t *se;
83 sdei_cpu_state_t *state = sdei_get_this_pe_state();
84 uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
85
86 /*
87 * If there are pending enables, iterate through the private mappings
88 * and enable those bound maps that are in enabled state. Also, iterate
89 * through shared mappings and enable interrupts of events that are
90 * targeted to this PE.
91 */
92 if (state->pending_enables) {
93 for_each_private_map(i, map) {
94 se = get_event_entry(map);
95 if (is_map_bound(map) && GET_EV_STATE(se, ENABLED))
96 plat_ic_enable_interrupt(map->intr);
97 }
98
99 for_each_shared_map(i, map) {
100 se = get_event_entry(map);
101
102 sdei_map_lock(map);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100103 if (is_map_bound(map) && GET_EV_STATE(se, ENABLED) &&
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100104 (se->reg_flags == SDEI_REGF_RM_PE) &&
105 (se->affinity == my_mpidr)) {
106 plat_ic_enable_interrupt(map->intr);
107 }
108 sdei_map_unlock(map);
109 }
110 }
111
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100112 state->pending_enables = false;
113 state->pe_masked = false;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100114}
115
116/* Push a dispatch context to the dispatch stack */
117static sdei_dispatch_context_t *push_dispatch(void)
118{
119 sdei_cpu_state_t *state = sdei_get_this_pe_state();
120 sdei_dispatch_context_t *disp_ctx;
121
122 /* Cannot have more than max events */
123 assert(state->stack_top < MAX_EVENT_NESTING);
124
125 disp_ctx = &state->dispatch_stack[state->stack_top];
126 state->stack_top++;
127
128 return disp_ctx;
129}
130
131/* Pop a dispatch context to the dispatch stack */
132static sdei_dispatch_context_t *pop_dispatch(void)
133{
134 sdei_cpu_state_t *state = sdei_get_this_pe_state();
135
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100136 if (state->stack_top == 0U)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100137 return NULL;
138
139 assert(state->stack_top <= MAX_EVENT_NESTING);
140
141 state->stack_top--;
142
143 return &state->dispatch_stack[state->stack_top];
144}
145
146/* Retrieve the context at the top of dispatch stack */
147static sdei_dispatch_context_t *get_outstanding_dispatch(void)
148{
149 sdei_cpu_state_t *state = sdei_get_this_pe_state();
150
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100151 if (state->stack_top == 0U)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100152 return NULL;
153
154 assert(state->stack_top <= MAX_EVENT_NESTING);
155
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100156 return &state->dispatch_stack[state->stack_top - 1U];
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100157}
158
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000159static sdei_dispatch_context_t *save_event_ctx(sdei_ev_map_t *map,
160 void *tgt_ctx)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100161{
162 sdei_dispatch_context_t *disp_ctx;
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100163 const gp_regs_t *tgt_gpregs;
164 const el3_state_t *tgt_el3;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100165
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100166 assert(tgt_ctx != NULL);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100167 tgt_gpregs = get_gpregs_ctx(tgt_ctx);
168 tgt_el3 = get_el3state_ctx(tgt_ctx);
169
170 disp_ctx = push_dispatch();
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100171 assert(disp_ctx != NULL);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100172 disp_ctx->map = map;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100173
174 /* Save general purpose and exception registers */
175 memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x));
176 disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3);
177 disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3);
Dimitris Papastamosbb1fd5b2018-06-07 11:29:15 +0100178
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000179 return disp_ctx;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100180}
181
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100182static void restore_event_ctx(const sdei_dispatch_context_t *disp_ctx, void *tgt_ctx)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100183{
184 gp_regs_t *tgt_gpregs;
185 el3_state_t *tgt_el3;
186
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100187 assert(tgt_ctx != NULL);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100188 tgt_gpregs = get_gpregs_ctx(tgt_ctx);
189 tgt_el3 = get_el3state_ctx(tgt_ctx);
190
191 CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)),
192 foo);
193
194 /* Restore general purpose and exception registers */
195 memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x));
196 write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3);
197 write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3);
Dimitris Papastamosbb1fd5b2018-06-07 11:29:15 +0100198
199#if DYNAMIC_WORKAROUND_CVE_2018_3639
200 cve_2018_3639_t *tgt_cve_2018_3639;
201 tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx);
202
203 /* Restore CVE-2018-3639 mitigation state */
204 write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE,
205 disp_ctx->disable_cve_2018_3639);
206#endif
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100207}
208
209static void save_secure_context(void)
210{
211 cm_el1_sysregs_context_save(SECURE);
212}
213
214/* Restore Secure context and arrange to resume it at the next ERET */
215static void restore_and_resume_secure_context(void)
216{
217 cm_el1_sysregs_context_restore(SECURE);
218 cm_set_next_eret_context(SECURE);
219}
220
221/*
222 * Restore Non-secure context and arrange to resume it at the next ERET. Return
223 * pointer to the Non-secure context.
224 */
225static cpu_context_t *restore_and_resume_ns_context(void)
226{
227 cpu_context_t *ns_ctx;
228
229 cm_el1_sysregs_context_restore(NON_SECURE);
230 cm_set_next_eret_context(NON_SECURE);
231
232 ns_ctx = cm_get_context(NON_SECURE);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100233 assert(ns_ctx != NULL);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100234
235 return ns_ctx;
236}
237
238/*
Daniel Boulby44b43332020-11-25 16:36:46 +0000239 * Prepare for ERET:
240 * - Set the ELR to the registered handler address
Arvind Ram Prakashdf8200d2024-02-20 11:35:27 -0600241 * - Set the SPSR register by calling the common create_spsr() function
Daniel Boulby44b43332020-11-25 16:36:46 +0000242 */
243
244static void sdei_set_elr_spsr(sdei_entry_t *se, sdei_dispatch_context_t *disp_ctx)
245{
246 unsigned int client_el = sdei_client_el();
247 u_register_t sdei_spsr = SPSR_64(client_el, MODE_SP_ELX,
248 DISABLE_ALL_EXCEPTIONS);
249
250 u_register_t interrupted_pstate = disp_ctx->spsr_el3;
251
Arvind Ram Prakashdf8200d2024-02-20 11:35:27 -0600252 sdei_spsr = create_spsr(interrupted_pstate, client_el);
Daniel Boulby44b43332020-11-25 16:36:46 +0000253
254 cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep, sdei_spsr);
255}
256
257/*
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100258 * Populate the Non-secure context so that the next ERET will dispatch to the
259 * SDEI client.
260 */
261static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se,
Antonio Nino Diaz4586d1c2019-02-08 13:10:45 +0000262 cpu_context_t *ctx, jmp_buf *dispatch_jmp)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100263{
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000264 sdei_dispatch_context_t *disp_ctx;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100265
266 /* Push the event and context */
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000267 disp_ctx = save_event_ctx(map, ctx);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100268
269 /*
270 * Setup handler arguments:
271 *
272 * - x0: Event number
273 * - x1: Handler argument supplied at the time of event registration
274 * - x2: Interrupted PC
275 * - x3: Interrupted SPSR
276 */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100277 SMC_SET_GP(ctx, CTX_GPREG_X0, (uint64_t) map->ev_num);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100278 SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg);
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000279 SMC_SET_GP(ctx, CTX_GPREG_X2, disp_ctx->elr_el3);
280 SMC_SET_GP(ctx, CTX_GPREG_X3, disp_ctx->spsr_el3);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100281
Daniel Boulby44b43332020-11-25 16:36:46 +0000282 /* Setup the elr and spsr register to prepare for ERET */
283 sdei_set_elr_spsr(se, disp_ctx);
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000284
285#if DYNAMIC_WORKAROUND_CVE_2018_3639
286 cve_2018_3639_t *tgt_cve_2018_3639;
287 tgt_cve_2018_3639 = get_cve_2018_3639_ctx(ctx);
288
289 /* Save CVE-2018-3639 mitigation state */
290 disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639,
291 CTX_CVE_2018_3639_DISABLE);
292
293 /* Force SDEI handler to execute with mitigation enabled by default */
294 write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0);
295#endif
296
297 disp_ctx->dispatch_jmp = dispatch_jmp;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100298}
299
300/* Handle a triggered SDEI interrupt while events were masked on this PE */
301static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se,
302 sdei_cpu_state_t *state, unsigned int intr_raw)
303{
304 uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100305 bool disable = false;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100306
307 /* Nothing to do for event 0 */
308 if (map->ev_num == SDEI_EVENT_0)
309 return;
310
311 /*
312 * For a private event, or for a shared event specifically routed to
313 * this CPU, we disable interrupt, leave the interrupt pending, and do
314 * EOI.
315 */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100316 if (is_event_private(map) || (se->reg_flags == SDEI_REGF_RM_PE))
317 disable = true;
318
319 if (se->reg_flags == SDEI_REGF_RM_PE)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100320 assert(se->affinity == my_mpidr);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100321
322 if (disable) {
323 plat_ic_disable_interrupt(map->intr);
324 plat_ic_set_interrupt_pending(map->intr);
325 plat_ic_end_of_interrupt(intr_raw);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100326 state->pending_enables = true;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100327
328 return;
329 }
330
331 /*
332 * We just received a shared event with routing set to ANY PE. The
333 * interrupt can't be delegated on this PE as SDEI events are masked.
334 * However, because its routing mode is ANY, it is possible that the
335 * event can be delegated on any other PE that hasn't masked events.
336 * Therefore, we set the interrupt back pending so as to give other
337 * suitable PEs a chance of handling it.
338 */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100339 assert(plat_ic_is_spi(map->intr) != 0);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100340 plat_ic_set_interrupt_pending(map->intr);
341
342 /*
343 * Leaving the same interrupt pending also means that the same interrupt
344 * can target this PE again as soon as this PE leaves EL3. Whether and
345 * how often that happens depends on the implementation of GIC.
346 *
347 * We therefore call a platform handler to resolve this situation.
348 */
349 plat_sdei_handle_masked_trigger(my_mpidr, map->intr);
350
351 /* This PE is masked. We EOI the interrupt, as it can't be delegated */
352 plat_ic_end_of_interrupt(intr_raw);
353}
354
355/* SDEI main interrupt handler */
356int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
357 void *cookie)
358{
359 sdei_entry_t *se;
360 cpu_context_t *ctx;
361 sdei_ev_map_t *map;
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100362 const sdei_dispatch_context_t *disp_ctx;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100363 unsigned int sec_state;
364 sdei_cpu_state_t *state;
365 uint32_t intr;
Antonio Nino Diaz4586d1c2019-02-08 13:10:45 +0000366 jmp_buf dispatch_jmp;
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100367 const uint64_t mpidr = read_mpidr_el1();
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100368
369 /*
370 * To handle an event, the following conditions must be true:
371 *
372 * 1. Event must be signalled
373 * 2. Event must be enabled
374 * 3. This PE must be a target PE for the event
375 * 4. PE must be unmasked for SDEI
376 * 5. If this is a normal event, no event must be running
377 * 6. If this is a critical event, no critical event must be running
378 *
379 * (1) and (2) are true when this function is running
380 * (3) is enforced in GIC by selecting the appropriate routing option
381 * (4) is satisfied by client calling PE_UNMASK
382 * (5) and (6) is enforced using interrupt priority, the RPR, in GIC:
383 * - Normal SDEI events belong to Normal SDE priority class
384 * - Critical SDEI events belong to Critical CSDE priority class
385 *
386 * The interrupt has already been acknowledged, and therefore is active,
387 * so no other PE can handle this event while we are at it.
388 *
389 * Find if this is an SDEI interrupt. There must be an event mapped to
390 * this interrupt
391 */
392 intr = plat_ic_get_interrupt_id(intr_raw);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100393 map = find_event_map_by_intr(intr, (plat_ic_is_spi(intr) != 0));
394 if (map == NULL) {
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100395 ERROR("No SDEI map for interrupt %u\n", intr);
396 panic();
397 }
398
399 /*
400 * Received interrupt number must either correspond to event 0, or must
401 * be bound interrupt.
402 */
403 assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map));
404
405 se = get_event_entry(map);
406 state = sdei_get_this_pe_state();
407
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100408 if (state->pe_masked) {
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100409 /*
410 * Interrupts received while this PE was masked can't be
411 * dispatched.
412 */
Scott Brandene5dcf982020-08-25 13:49:32 -0700413 SDEI_LOG("interrupt %u on %" PRIx64 " while PE masked\n",
414 map->intr, mpidr);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100415 if (is_event_shared(map))
416 sdei_map_lock(map);
417
418 handle_masked_trigger(map, se, state, intr_raw);
419
420 if (is_event_shared(map))
421 sdei_map_unlock(map);
422
423 return 0;
424 }
425
426 /* Insert load barrier for signalled SDEI event */
427 if (map->ev_num == SDEI_EVENT_0)
428 dmbld();
429
430 if (is_event_shared(map))
431 sdei_map_lock(map);
432
433 /* Assert shared event routed to this PE had been configured so */
434 if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) {
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100435 assert(se->affinity == (mpidr & MPIDR_AFFINITY_MASK));
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100436 }
437
438 if (!can_sdei_state_trans(se, DO_DISPATCH)) {
439 SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n",
440 map->ev_num, se->state);
441
442 /*
443 * If the event is registered, leave the interrupt pending so
444 * that it's delivered when the event is enabled.
445 */
446 if (GET_EV_STATE(se, REGISTERED))
447 plat_ic_set_interrupt_pending(map->intr);
448
449 /*
450 * The interrupt was disabled or unregistered after the handler
451 * started to execute, which means now the interrupt is already
452 * disabled and we just need to EOI the interrupt.
453 */
454 plat_ic_end_of_interrupt(intr_raw);
455
456 if (is_event_shared(map))
457 sdei_map_unlock(map);
458
459 return 0;
460 }
461
462 disp_ctx = get_outstanding_dispatch();
463 if (is_event_critical(map)) {
464 /*
465 * If this event is Critical, and if there's an outstanding
466 * dispatch, assert the latter is a Normal dispatch. Critical
467 * events can preempt an outstanding Normal event dispatch.
468 */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100469 if (disp_ctx != NULL)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100470 assert(is_event_normal(disp_ctx->map));
471 } else {
472 /*
473 * If this event is Normal, assert that there are no outstanding
474 * dispatches. Normal events can't preempt any outstanding event
475 * dispatches.
476 */
477 assert(disp_ctx == NULL);
478 }
479
480 sec_state = get_interrupt_src_ss(flags);
481
482 if (is_event_shared(map))
483 sdei_map_unlock(map);
484
Scott Brandene5dcf982020-08-25 13:49:32 -0700485 SDEI_LOG("ACK %" PRIx64 ", ev:0x%x ss:%d spsr:%lx ELR:%lx\n",
486 mpidr, map->ev_num, sec_state, read_spsr_el3(), read_elr_el3());
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100487
488 ctx = handle;
489
490 /*
491 * Check if we interrupted secure state. Perform a context switch so
492 * that we can delegate to NS.
493 */
494 if (sec_state == SECURE) {
495 save_secure_context();
496 ctx = restore_and_resume_ns_context();
497 }
498
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000499 /* Synchronously dispatch event */
500 setup_ns_dispatch(map, se, ctx, &dispatch_jmp);
501 begin_sdei_synchronous_dispatch(&dispatch_jmp);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100502
503 /*
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000504 * We reach here when client completes the event.
505 *
Jeenu Viswambharan744bb2b2018-10-11 09:50:26 +0100506 * If the cause of dispatch originally interrupted the Secure world,
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000507 * resume Secure.
508 *
509 * No need to save the Non-secure context ahead of a world switch: the
510 * Non-secure context was fully saved before dispatch, and has been
511 * returned to its pre-dispatch state.
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100512 */
Jeenu Viswambharan744bb2b2018-10-11 09:50:26 +0100513 if (sec_state == SECURE)
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000514 restore_and_resume_secure_context();
515
516 /*
517 * The event was dispatched after receiving SDEI interrupt. With
518 * the event handling completed, EOI the corresponding
519 * interrupt.
520 */
Jeenu Viswambharandd6dad02018-06-22 11:21:35 +0100521 if ((map->ev_num != SDEI_EVENT_0) && !is_map_bound(map)) {
Vasyl Gomonovychab5d76b2021-09-01 10:30:55 -0700522 ERROR("Invalid SDEI mapping: ev=0x%x\n", map->ev_num);
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000523 panic();
524 }
525 plat_ic_end_of_interrupt(intr_raw);
526
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100527 return 0;
528}
529
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000530/*
531 * Explicitly dispatch the given SDEI event.
532 *
533 * When calling this API, the caller must be prepared for the SDEI dispatcher to
534 * restore and make Non-secure context as active. This call returns only after
535 * the client has completed the dispatch. Then, the Non-secure context will be
536 * active, and the following ERET will return to Non-secure.
537 *
538 * Should the caller require re-entry to Secure, it must restore the Secure
539 * context and program registers for ERET.
540 */
541int sdei_dispatch_event(int ev_num)
Jeenu Viswambharancf1f2212017-10-02 12:10:54 +0100542{
543 sdei_entry_t *se;
544 sdei_ev_map_t *map;
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000545 cpu_context_t *ns_ctx;
Jeenu Viswambharancf1f2212017-10-02 12:10:54 +0100546 sdei_dispatch_context_t *disp_ctx;
547 sdei_cpu_state_t *state;
Antonio Nino Diaz4586d1c2019-02-08 13:10:45 +0000548 jmp_buf dispatch_jmp;
Jeenu Viswambharancf1f2212017-10-02 12:10:54 +0100549
550 /* Can't dispatch if events are masked on this PE */
551 state = sdei_get_this_pe_state();
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100552 if (state->pe_masked)
Jeenu Viswambharancf1f2212017-10-02 12:10:54 +0100553 return -1;
554
555 /* Event 0 can't be dispatched */
556 if (ev_num == SDEI_EVENT_0)
557 return -1;
558
559 /* Locate mapping corresponding to this event */
560 map = find_event_map(ev_num);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100561 if (map == NULL)
Jeenu Viswambharancf1f2212017-10-02 12:10:54 +0100562 return -1;
563
Jeenu Viswambharan34392302018-01-17 12:30:11 +0000564 /* Only explicit events can be dispatched */
565 if (!is_map_explicit(map))
Jeenu Viswambharancf1f2212017-10-02 12:10:54 +0100566 return -1;
567
568 /* Examine state of dispatch stack */
569 disp_ctx = get_outstanding_dispatch();
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100570 if (disp_ctx != NULL) {
Jeenu Viswambharancf1f2212017-10-02 12:10:54 +0100571 /*
572 * There's an outstanding dispatch. If the outstanding dispatch
573 * is critical, no more dispatches are possible.
574 */
575 if (is_event_critical(disp_ctx->map))
576 return -1;
577
578 /*
579 * If the outstanding dispatch is Normal, only critical events
580 * can be dispatched.
581 */
582 if (is_event_normal(map))
583 return -1;
584 }
585
586 se = get_event_entry(map);
587 if (!can_sdei_state_trans(se, DO_DISPATCH))
588 return -1;
589
Jeenu Viswambharancf1f2212017-10-02 12:10:54 +0100590 /*
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000591 * Prepare for NS dispatch by restoring the Non-secure context and
592 * marking that as active.
Jeenu Viswambharancf1f2212017-10-02 12:10:54 +0100593 */
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000594 ns_ctx = restore_and_resume_ns_context();
595
Ming Huang07f45d12021-04-23 15:06:17 +0800596 /* Activate the priority corresponding to the event being dispatched */
597 ehf_activate_priority(sdei_event_priority(map));
598
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000599 /* Dispatch event synchronously */
600 setup_ns_dispatch(map, se, ns_ctx, &dispatch_jmp);
601 begin_sdei_synchronous_dispatch(&dispatch_jmp);
Jeenu Viswambharancf1f2212017-10-02 12:10:54 +0100602
603 /*
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000604 * We reach here when client completes the event.
605 *
606 * Deactivate the priority level that was activated at the time of
607 * explicit dispatch.
Jeenu Viswambharancf1f2212017-10-02 12:10:54 +0100608 */
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000609 ehf_deactivate_priority(sdei_event_priority(map));
Jeenu Viswambharancf1f2212017-10-02 12:10:54 +0100610
611 return 0;
612}
613
Antonio Nino Diaz4586d1c2019-02-08 13:10:45 +0000614static void end_sdei_synchronous_dispatch(jmp_buf *buffer)
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000615{
Antonio Nino Diaz4586d1c2019-02-08 13:10:45 +0000616 longjmp(*buffer, 1);
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000617}
618
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100619int sdei_event_complete(bool resume, uint64_t pc)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100620{
621 sdei_dispatch_context_t *disp_ctx;
622 sdei_entry_t *se;
623 sdei_ev_map_t *map;
624 cpu_context_t *ctx;
625 sdei_action_t act;
626 unsigned int client_el = sdei_client_el();
627
628 /* Return error if called without an active event */
Jeenu Viswambharan8483f362018-01-22 12:04:13 +0000629 disp_ctx = get_outstanding_dispatch();
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100630 if (disp_ctx == NULL)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100631 return SDEI_EDENY;
632
633 /* Validate resumption point */
634 if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0))
635 return SDEI_EDENY;
636
637 map = disp_ctx->map;
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100638 assert(map != NULL);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100639 se = get_event_entry(map);
640
Jeenu Viswambharan40d7ec62018-08-10 11:05:31 +0100641 if (is_event_shared(map))
642 sdei_map_lock(map);
643
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100644 act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE;
645 if (!can_sdei_state_trans(se, act)) {
646 if (is_event_shared(map))
647 sdei_map_unlock(map);
648 return SDEI_EDENY;
649 }
650
Jeenu Viswambharan40d7ec62018-08-10 11:05:31 +0100651 if (is_event_shared(map))
652 sdei_map_unlock(map);
653
Jeenu Viswambharan8483f362018-01-22 12:04:13 +0000654 /* Having done sanity checks, pop dispatch */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100655 (void) pop_dispatch();
Jeenu Viswambharan8483f362018-01-22 12:04:13 +0000656
Scott Brandene5dcf982020-08-25 13:49:32 -0700657 SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(),
Jeenu Viswambharan8483f362018-01-22 12:04:13 +0000658 map->ev_num, read_spsr_el3(), read_elr_el3());
659
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100660 /*
661 * Restore Non-secure to how it was originally interrupted. Once done,
662 * it's up-to-date with the saved copy.
663 */
664 ctx = cm_get_context(NON_SECURE);
665 restore_event_ctx(disp_ctx, ctx);
666
667 if (resume) {
668 /*
669 * Complete-and-resume call. Prepare the Non-secure context
670 * (currently active) for complete and resume.
671 */
672 cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el,
673 MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
674
675 /*
676 * Make it look as if a synchronous exception were taken at the
677 * supplied Non-secure resumption point. Populate SPSR and
678 * ELR_ELx so that an ERET from there works as expected.
679 *
680 * The assumption is that the client, if necessary, would have
681 * saved any live content in these registers before making this
682 * call.
683 */
684 if (client_el == MODE_EL2) {
685 write_elr_el2(disp_ctx->elr_el3);
686 write_spsr_el2(disp_ctx->spsr_el3);
687 } else {
688 /* EL1 */
689 write_elr_el1(disp_ctx->elr_el3);
690 write_spsr_el1(disp_ctx->spsr_el3);
691 }
692 }
693
Jeenu Viswambharan8b7e6bc2018-02-16 12:07:48 +0000694 /* End the outstanding dispatch */
Jeenu Viswambharan58c6dd42018-06-22 12:03:44 +0100695 end_sdei_synchronous_dispatch(disp_ctx->dispatch_jmp);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100696
697 return 0;
698}
699
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100700int64_t sdei_event_context(void *handle, unsigned int param)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100701{
702 sdei_dispatch_context_t *disp_ctx;
703
704 if (param >= SDEI_SAVED_GPREGS)
705 return SDEI_EINVAL;
706
707 /* Get outstanding dispatch on this CPU */
708 disp_ctx = get_outstanding_dispatch();
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100709 if (disp_ctx == NULL)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100710 return SDEI_EDENY;
711
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100712 assert(disp_ctx->map != NULL);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100713
714 if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT))
715 return SDEI_EDENY;
716
717 /*
718 * No locking is required for the Running status as this is the only CPU
719 * which can complete the event
720 */
721
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100722 return (int64_t) disp_ctx->x[param];
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100723}