blob: 1f551020694d113322724aecdea6534ba7103488 [file] [log] [blame]
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001/*
Balint Dobszayd0dbd5e2019-12-18 15:28:00 +01002 * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch_helpers.h>
8#include <assert.h>
Scott Brandene5dcf982020-08-25 13:49:32 -07009#include <inttypes.h>
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010010#include <stddef.h>
Scott Brandene5dcf982020-08-25 13:49:32 -070011#include <stdint.h>
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010012#include <string.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013
14#include <bl31/bl31.h>
15#include <bl31/ehf.h>
16#include <bl31/interrupt_mgmt.h>
17#include <common/bl_common.h>
18#include <common/debug.h>
19#include <common/runtime_svc.h>
20#include <context.h>
21#include <lib/cassert.h>
22#include <lib/el3_runtime/pubsub.h>
23#include <lib/utils.h>
24#include <plat/common/platform.h>
25#include <services/sdei.h>
26
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010027#include "sdei_private.h"
28
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +010029#define MAJOR_VERSION 1ULL
30#define MINOR_VERSION 0ULL
31#define VENDOR_VERSION 0ULL
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010032
33#define MAKE_SDEI_VERSION(_major, _minor, _vendor) \
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +010034 ((((_major)) << 48ULL) | (((_minor)) << 32ULL) | (_vendor))
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010035
36#define LOWEST_INTR_PRIORITY 0xff
37
38#define is_valid_affinity(_mpidr) (plat_core_pos_by_mpidr(_mpidr) >= 0)
39
40CASSERT(PLAT_SDEI_CRITICAL_PRI < PLAT_SDEI_NORMAL_PRI,
41 sdei_critical_must_have_higher_priority);
42
43static unsigned int num_dyn_priv_slots, num_dyn_shrd_slots;
44
45/* Initialise SDEI map entries */
46static void init_map(sdei_ev_map_t *map)
47{
48 map->reg_count = 0;
49}
50
51/* Convert mapping to SDEI class */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +010052static sdei_class_t map_to_class(sdei_ev_map_t *map)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010053{
54 return is_event_critical(map) ? SDEI_CRITICAL : SDEI_NORMAL;
55}
56
57/* Clear SDEI event entries except state */
58static void clear_event_entries(sdei_entry_t *se)
59{
60 se->ep = 0;
61 se->arg = 0;
62 se->affinity = 0;
63 se->reg_flags = 0;
64}
65
66/* Perform CPU-specific state initialisation */
67static void *sdei_cpu_on_init(const void *arg)
68{
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +010069 unsigned int i;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010070 sdei_ev_map_t *map;
71 sdei_entry_t *se;
72
73 /* Initialize private mappings on this CPU */
74 for_each_private_map(i, map) {
75 se = get_event_entry(map);
76 clear_event_entries(se);
77 se->state = 0;
78 }
79
80 SDEI_LOG("Private events initialized on %lx\n", read_mpidr_el1());
81
82 /* All PEs start with SDEI events masked */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +010083 (void) sdei_pe_mask();
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010084
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +010085 return NULL;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010086}
87
Jeenu Viswambharanf5ee3832018-02-01 08:05:36 +000088/* CPU initialisation after wakeup from suspend */
89static void *sdei_cpu_wakeup_init(const void *arg)
90{
91 SDEI_LOG("Events masked on %lx\n", read_mpidr_el1());
92
93 /* All PEs wake up with SDEI events masked */
94 sdei_pe_mask();
95
96 return 0;
97}
98
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010099/* Initialise an SDEI class */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100100static void sdei_class_init(sdei_class_t class)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100101{
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100102 unsigned int i;
103 bool zero_found __unused = false;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100104 int ev_num_so_far __unused;
105 sdei_ev_map_t *map;
106
107 /* Sanity check and configuration of shared events */
108 ev_num_so_far = -1;
109 for_each_shared_map(i, map) {
110#if ENABLE_ASSERTIONS
111 /* Ensure mappings are sorted */
112 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
113
114 ev_num_so_far = map->ev_num;
115
116 /* Event 0 must not be shared */
117 assert(map->ev_num != SDEI_EVENT_0);
118
119 /* Check for valid event */
120 assert(map->ev_num >= 0);
121
122 /* Make sure it's a shared event */
123 assert(is_event_shared(map));
124
125 /* No shared mapping should have signalable property */
126 assert(!is_event_signalable(map));
Jeenu Viswambharan34392302018-01-17 12:30:11 +0000127
128 /* Shared mappings can't be explicit */
129 assert(!is_map_explicit(map));
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100130#endif
131
132 /* Skip initializing the wrong priority */
133 if (map_to_class(map) != class)
134 continue;
135
136 /* Platform events are always bound, so set the bound flag */
137 if (is_map_dynamic(map)) {
138 assert(map->intr == SDEI_DYN_IRQ);
Jeenu Viswambharanc3fcec12017-11-16 12:06:34 +0000139 assert(is_event_normal(map));
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100140 num_dyn_shrd_slots++;
141 } else {
142 /* Shared mappings must be bound to shared interrupt */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100143 assert(plat_ic_is_spi(map->intr) != 0);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100144 set_map_bound(map);
145 }
146
147 init_map(map);
148 }
149
150 /* Sanity check and configuration of private events for this CPU */
151 ev_num_so_far = -1;
152 for_each_private_map(i, map) {
153#if ENABLE_ASSERTIONS
154 /* Ensure mappings are sorted */
155 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
156
157 ev_num_so_far = map->ev_num;
158
159 if (map->ev_num == SDEI_EVENT_0) {
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100160 zero_found = true;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100161
162 /* Event 0 must be a Secure SGI */
163 assert(is_secure_sgi(map->intr));
164
165 /*
166 * Event 0 can have only have signalable flag (apart
167 * from being private
168 */
169 assert(map->map_flags == (SDEI_MAPF_SIGNALABLE |
170 SDEI_MAPF_PRIVATE));
171 } else {
172 /* No other mapping should have signalable property */
173 assert(!is_event_signalable(map));
174 }
175
176 /* Check for valid event */
177 assert(map->ev_num >= 0);
178
179 /* Make sure it's a private event */
180 assert(is_event_private(map));
Jeenu Viswambharan34392302018-01-17 12:30:11 +0000181
182 /*
183 * Other than priority, explicit events can only have explicit
184 * and private flags set.
185 */
186 if (is_map_explicit(map)) {
187 assert((map->map_flags | SDEI_MAPF_CRITICAL) ==
188 (SDEI_MAPF_EXPLICIT | SDEI_MAPF_PRIVATE
189 | SDEI_MAPF_CRITICAL));
190 }
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100191#endif
192
193 /* Skip initializing the wrong priority */
194 if (map_to_class(map) != class)
195 continue;
196
197 /* Platform events are always bound, so set the bound flag */
198 if (map->ev_num != SDEI_EVENT_0) {
199 if (is_map_dynamic(map)) {
200 assert(map->intr == SDEI_DYN_IRQ);
Jeenu Viswambharanc3fcec12017-11-16 12:06:34 +0000201 assert(is_event_normal(map));
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100202 num_dyn_priv_slots++;
Jeenu Viswambharan34392302018-01-17 12:30:11 +0000203 } else if (is_map_explicit(map)) {
204 /*
205 * Explicit mappings don't have a backing
206 * SDEI interrupt, but verify that anyway.
207 */
208 assert(map->intr == SDEI_DYN_IRQ);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100209 } else {
210 /*
211 * Private mappings must be bound to private
212 * interrupt.
213 */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100214 assert(plat_ic_is_ppi((unsigned) map->intr) != 0);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100215 set_map_bound(map);
216 }
217 }
218
219 init_map(map);
220 }
221
222 /* Ensure event 0 is in the mapping */
223 assert(zero_found);
224
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100225 (void) sdei_cpu_on_init(NULL);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100226}
227
228/* SDEI dispatcher initialisation */
229void sdei_init(void)
230{
Balint Dobszayd0dbd5e2019-12-18 15:28:00 +0100231 plat_sdei_setup();
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100232 sdei_class_init(SDEI_CRITICAL);
233 sdei_class_init(SDEI_NORMAL);
234
235 /* Register priority level handlers */
236 ehf_register_priority_handler(PLAT_SDEI_CRITICAL_PRI,
237 sdei_intr_handler);
238 ehf_register_priority_handler(PLAT_SDEI_NORMAL_PRI,
239 sdei_intr_handler);
240}
241
242/* Populate SDEI event entry */
243static void set_sdei_entry(sdei_entry_t *se, uint64_t ep, uint64_t arg,
244 unsigned int flags, uint64_t affinity)
245{
246 assert(se != NULL);
247
248 se->ep = ep;
249 se->arg = arg;
250 se->affinity = (affinity & MPIDR_AFFINITY_MASK);
251 se->reg_flags = flags;
252}
253
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100254static uint64_t sdei_version(void)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100255{
256 return MAKE_SDEI_VERSION(MAJOR_VERSION, MINOR_VERSION, VENDOR_VERSION);
257}
258
259/* Validate flags and MPIDR values for REGISTER and ROUTING_SET calls */
260static int validate_flags(uint64_t flags, uint64_t mpidr)
261{
262 /* Validate flags */
263 switch (flags) {
264 case SDEI_REGF_RM_PE:
265 if (!is_valid_affinity(mpidr))
266 return SDEI_EINVAL;
267 break;
268 case SDEI_REGF_RM_ANY:
269 break;
270 default:
271 /* Unknown flags */
272 return SDEI_EINVAL;
273 }
274
275 return 0;
276}
277
278/* Set routing of an SDEI event */
279static int sdei_event_routing_set(int ev_num, uint64_t flags, uint64_t mpidr)
280{
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100281 int ret;
282 unsigned int routing;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100283 sdei_ev_map_t *map;
284 sdei_entry_t *se;
285
286 ret = validate_flags(flags, mpidr);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100287 if (ret != 0)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100288 return ret;
289
290 /* Check if valid event number */
291 map = find_event_map(ev_num);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100292 if (map == NULL)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100293 return SDEI_EINVAL;
294
295 /* The event must not be private */
296 if (is_event_private(map))
297 return SDEI_EINVAL;
298
299 se = get_event_entry(map);
300
301 sdei_map_lock(map);
302
303 if (!is_map_bound(map) || is_event_private(map)) {
304 ret = SDEI_EINVAL;
305 goto finish;
306 }
307
308 if (!can_sdei_state_trans(se, DO_ROUTING)) {
309 ret = SDEI_EDENY;
310 goto finish;
311 }
312
313 /* Choose appropriate routing */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100314 routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ?
315 INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100316
317 /* Update event registration flag */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100318 se->reg_flags = (unsigned int) flags;
Tony Xiec55ff342020-12-31 11:25:15 +0800319 if (flags == SDEI_REGF_RM_PE) {
320 se->affinity = (mpidr & MPIDR_AFFINITY_MASK);
321 }
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100322
323 /*
324 * ROUTING_SET is permissible only when event composite state is
325 * 'registered, disabled, and not running'. This means that the
326 * interrupt is currently disabled, and not active.
327 */
328 plat_ic_set_spi_routing(map->intr, routing, (u_register_t) mpidr);
329
330finish:
331 sdei_map_unlock(map);
332
333 return ret;
334}
335
336/* Register handler and argument for an SDEI event */
Balint Dobszayd0dbd5e2019-12-18 15:28:00 +0100337static int64_t sdei_event_register(int ev_num,
338 uint64_t ep,
339 uint64_t arg,
340 uint64_t flags,
341 uint64_t mpidr)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100342{
343 int ret;
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100344 unsigned int routing;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100345 sdei_entry_t *se;
346 sdei_ev_map_t *map;
347 sdei_state_t backup_state;
348
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100349 if ((ep == 0U) || (plat_sdei_validate_entry_point(
350 ep, sdei_client_el()) != 0)) {
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100351 return SDEI_EINVAL;
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100352 }
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100353
354 ret = validate_flags(flags, mpidr);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100355 if (ret != 0)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100356 return ret;
357
358 /* Check if valid event number */
359 map = find_event_map(ev_num);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100360 if (map == NULL)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100361 return SDEI_EINVAL;
362
363 /* Private events always target the PE */
364 if (is_event_private(map))
365 flags = SDEI_REGF_RM_PE;
366
367 se = get_event_entry(map);
368
369 /*
370 * Even though register operation is per-event (additionally for private
371 * events, registration is required individually), it has to be
372 * serialised with respect to bind/release, which are global operations.
373 * So we hold the lock throughout, unconditionally.
374 */
375 sdei_map_lock(map);
376
377 backup_state = se->state;
378 if (!can_sdei_state_trans(se, DO_REGISTER))
379 goto fallback;
380
381 /*
382 * When registering for dynamic events, make sure it's been bound
383 * already. This has to be the case as, without binding, the client
384 * can't know about the event number to register for.
385 */
386 if (is_map_dynamic(map) && !is_map_bound(map))
387 goto fallback;
388
389 if (is_event_private(map)) {
390 /* Multiple calls to register are possible for private events */
391 assert(map->reg_count >= 0);
392 } else {
393 /* Only single call to register is possible for shared events */
394 assert(map->reg_count == 0);
395 }
396
397 if (is_map_bound(map)) {
398 /* Meanwhile, did any PE ACK the interrupt? */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100399 if (plat_ic_get_interrupt_active(map->intr) != 0U)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100400 goto fallback;
401
402 /* The interrupt must currently owned by Non-secure */
403 if (plat_ic_get_interrupt_type(map->intr) != INTR_TYPE_NS)
404 goto fallback;
405
406 /*
407 * Disable forwarding of new interrupt triggers to CPU
408 * interface.
409 */
410 plat_ic_disable_interrupt(map->intr);
411
412 /*
413 * Any events that are triggered after register and before
414 * enable should remain pending. Clear any previous interrupt
415 * triggers which are pending (except for SGIs). This has no
416 * affect on level-triggered interrupts.
417 */
418 if (ev_num != SDEI_EVENT_0)
419 plat_ic_clear_interrupt_pending(map->intr);
420
421 /* Map interrupt to EL3 and program the correct priority */
422 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_EL3);
423
424 /* Program the appropriate interrupt priority */
425 plat_ic_set_interrupt_priority(map->intr, sdei_event_priority(map));
426
427 /*
428 * Set the routing mode for shared event as requested. We
429 * already ensure that shared events get bound to SPIs.
430 */
431 if (is_event_shared(map)) {
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100432 routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ?
433 INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE);
434 plat_ic_set_spi_routing(map->intr, routing,
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100435 (u_register_t) mpidr);
436 }
437 }
438
439 /* Populate event entries */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100440 set_sdei_entry(se, ep, arg, (unsigned int) flags, mpidr);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100441
442 /* Increment register count */
443 map->reg_count++;
444
445 sdei_map_unlock(map);
446
447 return 0;
448
449fallback:
450 /* Reinstate previous state */
451 se->state = backup_state;
452
453 sdei_map_unlock(map);
454
455 return SDEI_EDENY;
456}
457
458/* Enable SDEI event */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100459static int64_t sdei_event_enable(int ev_num)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100460{
461 sdei_ev_map_t *map;
462 sdei_entry_t *se;
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100463 int ret;
464 bool before, after;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100465
466 /* Check if valid event number */
467 map = find_event_map(ev_num);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100468 if (map == NULL)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100469 return SDEI_EINVAL;
470
471 se = get_event_entry(map);
472 ret = SDEI_EDENY;
473
474 if (is_event_shared(map))
475 sdei_map_lock(map);
476
477 before = GET_EV_STATE(se, ENABLED);
478 if (!can_sdei_state_trans(se, DO_ENABLE))
479 goto finish;
480 after = GET_EV_STATE(se, ENABLED);
481
482 /*
483 * Enable interrupt for bound events only if there's a change in enabled
484 * state.
485 */
486 if (is_map_bound(map) && (!before && after))
487 plat_ic_enable_interrupt(map->intr);
488
489 ret = 0;
490
491finish:
492 if (is_event_shared(map))
493 sdei_map_unlock(map);
494
495 return ret;
496}
497
498/* Disable SDEI event */
499static int sdei_event_disable(int ev_num)
500{
501 sdei_ev_map_t *map;
502 sdei_entry_t *se;
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100503 int ret;
504 bool before, after;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100505
506 /* Check if valid event number */
507 map = find_event_map(ev_num);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100508 if (map == NULL)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100509 return SDEI_EINVAL;
510
511 se = get_event_entry(map);
512 ret = SDEI_EDENY;
513
514 if (is_event_shared(map))
515 sdei_map_lock(map);
516
517 before = GET_EV_STATE(se, ENABLED);
518 if (!can_sdei_state_trans(se, DO_DISABLE))
519 goto finish;
520 after = GET_EV_STATE(se, ENABLED);
521
522 /*
523 * Disable interrupt for bound events only if there's a change in
524 * enabled state.
525 */
526 if (is_map_bound(map) && (before && !after))
527 plat_ic_disable_interrupt(map->intr);
528
529 ret = 0;
530
531finish:
532 if (is_event_shared(map))
533 sdei_map_unlock(map);
534
535 return ret;
536}
537
538/* Query SDEI event information */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100539static int64_t sdei_event_get_info(int ev_num, int info)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100540{
541 sdei_entry_t *se;
542 sdei_ev_map_t *map;
543
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100544 uint64_t flags;
545 bool registered;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100546 uint64_t affinity;
547
548 /* Check if valid event number */
549 map = find_event_map(ev_num);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100550 if (map == NULL)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100551 return SDEI_EINVAL;
552
553 se = get_event_entry(map);
554
555 if (is_event_shared(map))
556 sdei_map_lock(map);
557
558 /* Sample state under lock */
559 registered = GET_EV_STATE(se, REGISTERED);
560 flags = se->reg_flags;
561 affinity = se->affinity;
562
563 if (is_event_shared(map))
564 sdei_map_unlock(map);
565
566 switch (info) {
567 case SDEI_INFO_EV_TYPE:
568 return is_event_shared(map);
569
570 case SDEI_INFO_EV_NOT_SIGNALED:
571 return !is_event_signalable(map);
572
573 case SDEI_INFO_EV_PRIORITY:
574 return is_event_critical(map);
575
576 case SDEI_INFO_EV_ROUTING_MODE:
577 if (!is_event_shared(map))
578 return SDEI_EINVAL;
579 if (!registered)
580 return SDEI_EDENY;
581 return (flags == SDEI_REGF_RM_PE);
582
583 case SDEI_INFO_EV_ROUTING_AFF:
584 if (!is_event_shared(map))
585 return SDEI_EINVAL;
586 if (!registered)
587 return SDEI_EDENY;
588 if (flags != SDEI_REGF_RM_PE)
589 return SDEI_EINVAL;
590 return affinity;
591
592 default:
593 return SDEI_EINVAL;
594 }
595}
596
597/* Unregister an SDEI event */
598static int sdei_event_unregister(int ev_num)
599{
600 int ret = 0;
601 sdei_entry_t *se;
602 sdei_ev_map_t *map;
603
604 /* Check if valid event number */
605 map = find_event_map(ev_num);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100606 if (map == NULL)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100607 return SDEI_EINVAL;
608
609 se = get_event_entry(map);
610
611 /*
612 * Even though unregister operation is per-event (additionally for
613 * private events, unregistration is required individually), it has to
614 * be serialised with respect to bind/release, which are global
615 * operations. So we hold the lock throughout, unconditionally.
616 */
617 sdei_map_lock(map);
618
619 if (!can_sdei_state_trans(se, DO_UNREGISTER)) {
620 /*
621 * Even if the call is invalid, and the handler is running (for
622 * example, having unregistered from a running handler earlier),
623 * return pending error code; otherwise, return deny.
624 */
625 ret = GET_EV_STATE(se, RUNNING) ? SDEI_EPEND : SDEI_EDENY;
626
627 goto finish;
628 }
629
630 map->reg_count--;
631 if (is_event_private(map)) {
632 /* Multiple calls to register are possible for private events */
633 assert(map->reg_count >= 0);
634 } else {
635 /* Only single call to register is possible for shared events */
636 assert(map->reg_count == 0);
637 }
638
639 if (is_map_bound(map)) {
640 plat_ic_disable_interrupt(map->intr);
641
642 /*
643 * Clear pending interrupt. Skip for SGIs as they may not be
644 * cleared on interrupt controllers.
645 */
646 if (ev_num != SDEI_EVENT_0)
647 plat_ic_clear_interrupt_pending(map->intr);
648
649 assert(plat_ic_get_interrupt_type(map->intr) == INTR_TYPE_EL3);
650 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_NS);
651 plat_ic_set_interrupt_priority(map->intr, LOWEST_INTR_PRIORITY);
652 }
653
654 clear_event_entries(se);
655
656 /*
657 * If the handler is running at the time of unregister, return the
658 * pending error code.
659 */
660 if (GET_EV_STATE(se, RUNNING))
661 ret = SDEI_EPEND;
662
663finish:
664 sdei_map_unlock(map);
665
666 return ret;
667}
668
669/* Query status of an SDEI event */
670static int sdei_event_status(int ev_num)
671{
672 sdei_ev_map_t *map;
673 sdei_entry_t *se;
674 sdei_state_t state;
675
676 /* Check if valid event number */
677 map = find_event_map(ev_num);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100678 if (map == NULL)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100679 return SDEI_EINVAL;
680
681 se = get_event_entry(map);
682
683 if (is_event_shared(map))
684 sdei_map_lock(map);
685
686 /* State value directly maps to the expected return format */
687 state = se->state;
688
689 if (is_event_shared(map))
690 sdei_map_unlock(map);
691
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100692 return (int) state;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100693}
694
695/* Bind an SDEI event to an interrupt */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100696static int sdei_interrupt_bind(unsigned int intr_num)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100697{
698 sdei_ev_map_t *map;
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100699 bool retry = true, shared_mapping;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100700
701 /* SGIs are not allowed to be bound */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100702 if (plat_ic_is_sgi(intr_num) != 0)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100703 return SDEI_EINVAL;
704
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100705 shared_mapping = (plat_ic_is_spi(intr_num) != 0);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100706 do {
707 /*
708 * Bail out if there is already an event for this interrupt,
709 * either platform-defined or dynamic.
710 */
711 map = find_event_map_by_intr(intr_num, shared_mapping);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100712 if (map != NULL) {
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100713 if (is_map_dynamic(map)) {
714 if (is_map_bound(map)) {
715 /*
716 * Dynamic event, already bound. Return
717 * event number.
718 */
719 return map->ev_num;
720 }
721 } else {
722 /* Binding non-dynamic event */
723 return SDEI_EINVAL;
724 }
725 }
726
727 /*
728 * The interrupt is not bound yet. Try to find a free slot to
729 * bind it. Free dynamic mappings have their interrupt set as
730 * SDEI_DYN_IRQ.
731 */
732 map = find_event_map_by_intr(SDEI_DYN_IRQ, shared_mapping);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100733 if (map == NULL)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100734 return SDEI_ENOMEM;
735
736 /* The returned mapping must be dynamic */
737 assert(is_map_dynamic(map));
738
739 /*
740 * We cannot assert for bound maps here, as we might be racing
741 * with another bind.
742 */
743
744 /* The requested interrupt must already belong to NS */
745 if (plat_ic_get_interrupt_type(intr_num) != INTR_TYPE_NS)
746 return SDEI_EDENY;
747
748 /*
749 * Interrupt programming and ownership transfer are deferred
750 * until register.
751 */
752
753 sdei_map_lock(map);
754 if (!is_map_bound(map)) {
755 map->intr = intr_num;
756 set_map_bound(map);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100757 retry = false;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100758 }
759 sdei_map_unlock(map);
760 } while (retry);
761
762 return map->ev_num;
763}
764
765/* Release a bound SDEI event previously to an interrupt */
766static int sdei_interrupt_release(int ev_num)
767{
768 int ret = 0;
769 sdei_ev_map_t *map;
770 sdei_entry_t *se;
771
772 /* Check if valid event number */
773 map = find_event_map(ev_num);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100774 if (map == NULL)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100775 return SDEI_EINVAL;
776
777 if (!is_map_dynamic(map))
778 return SDEI_EINVAL;
779
780 se = get_event_entry(map);
781
782 sdei_map_lock(map);
783
784 /* Event must have been unregistered before release */
785 if (map->reg_count != 0) {
786 ret = SDEI_EDENY;
787 goto finish;
788 }
789
790 /*
791 * Interrupt release never causes the state to change. We only check
792 * whether it's permissible or not.
793 */
794 if (!can_sdei_state_trans(se, DO_RELEASE)) {
795 ret = SDEI_EDENY;
796 goto finish;
797 }
798
799 if (is_map_bound(map)) {
800 /*
801 * Deny release if the interrupt is active, which means it's
802 * probably being acknowledged and handled elsewhere.
803 */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100804 if (plat_ic_get_interrupt_active(map->intr) != 0U) {
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100805 ret = SDEI_EDENY;
806 goto finish;
807 }
808
809 /*
810 * Interrupt programming and ownership transfer are already done
811 * during unregister.
812 */
813
814 map->intr = SDEI_DYN_IRQ;
815 clr_map_bound(map);
816 } else {
817 SDEI_LOG("Error release bound:%d cnt:%d\n", is_map_bound(map),
818 map->reg_count);
819 ret = SDEI_EINVAL;
820 }
821
822finish:
823 sdei_map_unlock(map);
824
825 return ret;
826}
827
828/* Perform reset of private SDEI events */
829static int sdei_private_reset(void)
830{
831 sdei_ev_map_t *map;
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100832 int ret = 0, final_ret = 0;
833 unsigned int i;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100834
835 /* Unregister all private events */
836 for_each_private_map(i, map) {
837 /*
838 * The unregister can fail if the event is not registered, which
839 * is allowed, and a deny will be returned. But if the event is
840 * running or unregister pending, the call fails.
841 */
842 ret = sdei_event_unregister(map->ev_num);
843 if ((ret == SDEI_EPEND) && (final_ret == 0))
Jeenu Viswambharanc94cf262017-11-30 10:25:10 +0000844 final_ret = SDEI_EDENY;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100845 }
846
847 return final_ret;
848}
849
850/* Perform reset of shared SDEI events */
851static int sdei_shared_reset(void)
852{
853 const sdei_mapping_t *mapping;
854 sdei_ev_map_t *map;
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100855 int ret = 0, final_ret = 0;
856 unsigned int i, j;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100857
858 /* Unregister all shared events */
859 for_each_shared_map(i, map) {
860 /*
861 * The unregister can fail if the event is not registered, which
862 * is allowed, and a deny will be returned. But if the event is
863 * running or unregister pending, the call fails.
864 */
865 ret = sdei_event_unregister(map->ev_num);
866 if ((ret == SDEI_EPEND) && (final_ret == 0))
Jeenu Viswambharanc94cf262017-11-30 10:25:10 +0000867 final_ret = SDEI_EDENY;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100868 }
869
870 if (final_ret != 0)
871 return final_ret;
872
873 /*
874 * Loop through both private and shared mappings, and release all
875 * bindings.
876 */
877 for_each_mapping_type(i, mapping) {
878 iterate_mapping(mapping, j, map) {
879 /*
880 * Release bindings for mappings that are dynamic and
881 * bound.
882 */
883 if (is_map_dynamic(map) && is_map_bound(map)) {
884 /*
885 * Any failure to release would mean there is at
886 * least a PE registered for the event.
887 */
888 ret = sdei_interrupt_release(map->ev_num);
889 if ((ret != 0) && (final_ret == 0))
890 final_ret = ret;
891 }
892 }
893 }
894
895 return final_ret;
896}
897
898/* Send a signal to another SDEI client PE */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100899static int sdei_signal(int ev_num, uint64_t target_pe)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100900{
901 sdei_ev_map_t *map;
902
903 /* Only event 0 can be signalled */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100904 if (ev_num != SDEI_EVENT_0)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100905 return SDEI_EINVAL;
906
907 /* Find mapping for event 0 */
908 map = find_event_map(SDEI_EVENT_0);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100909 if (map == NULL)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100910 return SDEI_EINVAL;
911
912 /* The event must be signalable */
913 if (!is_event_signalable(map))
914 return SDEI_EINVAL;
915
916 /* Validate target */
917 if (plat_core_pos_by_mpidr(target_pe) < 0)
918 return SDEI_EINVAL;
919
920 /* Raise SGI. Platform will validate target_pe */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100921 plat_ic_raise_el3_sgi((int) map->intr, (u_register_t) target_pe);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100922
923 return 0;
924}
925
926/* Query SDEI dispatcher features */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100927static uint64_t sdei_features(unsigned int feature)
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100928{
929 if (feature == SDEI_FEATURE_BIND_SLOTS) {
930 return FEATURE_BIND_SLOTS(num_dyn_priv_slots,
931 num_dyn_shrd_slots);
932 }
933
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100934 return (uint64_t) SDEI_EINVAL;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100935}
936
937/* SDEI top level handler for servicing SMCs */
938uint64_t sdei_smc_handler(uint32_t smc_fid,
939 uint64_t x1,
940 uint64_t x2,
941 uint64_t x3,
942 uint64_t x4,
943 void *cookie,
944 void *handle,
945 uint64_t flags)
946{
947
948 uint64_t x5;
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100949 unsigned int ss = (unsigned int) get_interrupt_src_ss(flags);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100950 int64_t ret;
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100951 bool resume = false;
952 cpu_context_t *ctx = handle;
953 int ev_num = (int) x1;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100954
955 if (ss != NON_SECURE)
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100956 SMC_RET1(ctx, SMC_UNK);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100957
958 /* Verify the caller EL */
959 if (GET_EL(read_spsr_el3()) != sdei_client_el())
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100960 SMC_RET1(ctx, SMC_UNK);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100961
962 switch (smc_fid) {
963 case SDEI_VERSION:
964 SDEI_LOG("> VER\n");
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100965 ret = (int64_t) sdei_version();
Scott Brandene5dcf982020-08-25 13:49:32 -0700966 SDEI_LOG("< VER:%" PRIx64 "\n", ret);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100967 SMC_RET1(ctx, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100968
969 case SDEI_EVENT_REGISTER:
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100970 x5 = SMC_GET_GP(ctx, CTX_GPREG_X5);
Scott Brandene5dcf982020-08-25 13:49:32 -0700971 SDEI_LOG("> REG(n:%d e:%" PRIx64 " a:%" PRIx64 " f:%x m:%" PRIx64 "\n", ev_num,
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100972 x2, x3, (int) x4, x5);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100973 ret = sdei_event_register(ev_num, x2, x3, x4, x5);
Scott Brandene5dcf982020-08-25 13:49:32 -0700974 SDEI_LOG("< REG:%" PRId64 "\n", ret);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100975 SMC_RET1(ctx, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100976
977 case SDEI_EVENT_ENABLE:
978 SDEI_LOG("> ENABLE(n:%d)\n", (int) x1);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100979 ret = sdei_event_enable(ev_num);
Scott Brandene5dcf982020-08-25 13:49:32 -0700980 SDEI_LOG("< ENABLE:%" PRId64 "\n", ret);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100981 SMC_RET1(ctx, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100982
983 case SDEI_EVENT_DISABLE:
Vasyl Gomonovychab5d76b2021-09-01 10:30:55 -0700984 SDEI_LOG("> DISABLE(n:0x%x)\n", ev_num);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100985 ret = sdei_event_disable(ev_num);
Scott Brandene5dcf982020-08-25 13:49:32 -0700986 SDEI_LOG("< DISABLE:%" PRId64 "\n", ret);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100987 SMC_RET1(ctx, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100988
989 case SDEI_EVENT_CONTEXT:
990 SDEI_LOG("> CTX(p:%d):%lx\n", (int) x1, read_mpidr_el1());
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100991 ret = sdei_event_context(ctx, (unsigned int) x1);
Scott Brandene5dcf982020-08-25 13:49:32 -0700992 SDEI_LOG("< CTX:%" PRId64 "\n", ret);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100993 SMC_RET1(ctx, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100994
995 case SDEI_EVENT_COMPLETE_AND_RESUME:
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +0100996 resume = true;
997 /* Fallthrough */
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100998
999 case SDEI_EVENT_COMPLETE:
Scott Brandene5dcf982020-08-25 13:49:32 -07001000 SDEI_LOG("> COMPLETE(r:%u sta/ep:%" PRIx64 "):%lx\n",
1001 (unsigned int) resume, x1, read_mpidr_el1());
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001002 ret = sdei_event_complete(resume, x1);
Scott Brandene5dcf982020-08-25 13:49:32 -07001003 SDEI_LOG("< COMPLETE:%" PRIx64 "\n", ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001004
1005 /*
1006 * Set error code only if the call failed. If the call
1007 * succeeded, we discard the dispatched context, and restore the
1008 * interrupted context to a pristine condition, and therefore
1009 * shouldn't be modified. We don't return to the caller in this
1010 * case anyway.
1011 */
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001012 if (ret != 0)
1013 SMC_RET1(ctx, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001014
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001015 SMC_RET0(ctx);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001016
1017 case SDEI_EVENT_STATUS:
Vasyl Gomonovychab5d76b2021-09-01 10:30:55 -07001018 SDEI_LOG("> STAT(n:0x%x)\n", ev_num);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001019 ret = sdei_event_status(ev_num);
Scott Brandene5dcf982020-08-25 13:49:32 -07001020 SDEI_LOG("< STAT:%" PRId64 "\n", ret);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001021 SMC_RET1(ctx, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001022
1023 case SDEI_EVENT_GET_INFO:
Vasyl Gomonovychab5d76b2021-09-01 10:30:55 -07001024 SDEI_LOG("> INFO(n:0x%x, %d)\n", ev_num, (int) x2);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001025 ret = sdei_event_get_info(ev_num, (int) x2);
Scott Brandene5dcf982020-08-25 13:49:32 -07001026 SDEI_LOG("< INFO:%" PRId64 "\n", ret);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001027 SMC_RET1(ctx, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001028
1029 case SDEI_EVENT_UNREGISTER:
Vasyl Gomonovychab5d76b2021-09-01 10:30:55 -07001030 SDEI_LOG("> UNREG(n:0x%x)\n", ev_num);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001031 ret = sdei_event_unregister(ev_num);
Scott Brandene5dcf982020-08-25 13:49:32 -07001032 SDEI_LOG("< UNREG:%" PRId64 "\n", ret);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001033 SMC_RET1(ctx, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001034
1035 case SDEI_PE_UNMASK:
1036 SDEI_LOG("> UNMASK:%lx\n", read_mpidr_el1());
1037 sdei_pe_unmask();
Jeenu Viswambharan22a16f92017-11-13 12:30:45 +00001038 SDEI_LOG("< UNMASK:%d\n", 0);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001039 SMC_RET1(ctx, 0);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001040
1041 case SDEI_PE_MASK:
1042 SDEI_LOG("> MASK:%lx\n", read_mpidr_el1());
1043 ret = sdei_pe_mask();
Scott Brandene5dcf982020-08-25 13:49:32 -07001044 SDEI_LOG("< MASK:%" PRId64 "\n", ret);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001045 SMC_RET1(ctx, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001046
1047 case SDEI_INTERRUPT_BIND:
1048 SDEI_LOG("> BIND(%d)\n", (int) x1);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001049 ret = sdei_interrupt_bind((unsigned int) x1);
Scott Brandene5dcf982020-08-25 13:49:32 -07001050 SDEI_LOG("< BIND:%" PRId64 "\n", ret);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001051 SMC_RET1(ctx, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001052
1053 case SDEI_INTERRUPT_RELEASE:
Vasyl Gomonovychab5d76b2021-09-01 10:30:55 -07001054 SDEI_LOG("> REL(0x%x)\n", ev_num);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001055 ret = sdei_interrupt_release(ev_num);
Scott Brandene5dcf982020-08-25 13:49:32 -07001056 SDEI_LOG("< REL:%" PRId64 "\n", ret);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001057 SMC_RET1(ctx, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001058
1059 case SDEI_SHARED_RESET:
1060 SDEI_LOG("> S_RESET():%lx\n", read_mpidr_el1());
1061 ret = sdei_shared_reset();
Scott Brandene5dcf982020-08-25 13:49:32 -07001062 SDEI_LOG("< S_RESET:%" PRId64 "\n", ret);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001063 SMC_RET1(ctx, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001064
1065 case SDEI_PRIVATE_RESET:
1066 SDEI_LOG("> P_RESET():%lx\n", read_mpidr_el1());
1067 ret = sdei_private_reset();
Scott Brandene5dcf982020-08-25 13:49:32 -07001068 SDEI_LOG("< P_RESET:%" PRId64 "\n", ret);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001069 SMC_RET1(ctx, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001070
1071 case SDEI_EVENT_ROUTING_SET:
Scott Brandene5dcf982020-08-25 13:49:32 -07001072 SDEI_LOG("> ROUTE_SET(n:%d f:%" PRIx64 " aff:%" PRIx64 ")\n", ev_num, x2, x3);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001073 ret = sdei_event_routing_set(ev_num, x2, x3);
Scott Brandene5dcf982020-08-25 13:49:32 -07001074 SDEI_LOG("< ROUTE_SET:%" PRId64 "\n", ret);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001075 SMC_RET1(ctx, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001076
1077 case SDEI_FEATURES:
Scott Brandene5dcf982020-08-25 13:49:32 -07001078 SDEI_LOG("> FTRS(f:%" PRIx64 ")\n", x1);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001079 ret = (int64_t) sdei_features((unsigned int) x1);
Scott Brandene5dcf982020-08-25 13:49:32 -07001080 SDEI_LOG("< FTRS:%" PRIx64 "\n", ret);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001081 SMC_RET1(ctx, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001082
1083 case SDEI_EVENT_SIGNAL:
Scott Brandene5dcf982020-08-25 13:49:32 -07001084 SDEI_LOG("> SIGNAL(e:%d t:%" PRIx64 ")\n", ev_num, x2);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001085 ret = sdei_signal(ev_num, x2);
Scott Brandene5dcf982020-08-25 13:49:32 -07001086 SDEI_LOG("< SIGNAL:%" PRId64 "\n", ret);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001087 SMC_RET1(ctx, ret);
Jonathan Wright75a5d8b2018-03-14 15:56:21 +00001088
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001089 default:
Jonathan Wright75a5d8b2018-03-14 15:56:21 +00001090 /* Do nothing in default case */
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001091 break;
1092 }
1093
1094 WARN("Unimplemented SDEI Call: 0x%x\n", smc_fid);
Jeenu Viswambharan32ceef52018-08-02 10:14:12 +01001095 SMC_RET1(ctx, SMC_UNK);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001096}
1097
1098/* Subscribe to PSCI CPU on to initialize per-CPU SDEI configuration */
1099SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, sdei_cpu_on_init);
Jeenu Viswambharanf5ee3832018-02-01 08:05:36 +00001100
1101/* Subscribe to PSCI CPU suspend finisher for per-CPU configuration */
1102SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, sdei_cpu_wakeup_init);