blob: 28afc1d4be5403fc6e7dcf50712e479d17c7eafe [file] [log] [blame]
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001/*
Jeenu Viswambharan34392302018-01-17 12:30:11 +00002 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch_helpers.h>
8#include <assert.h>
9#include <bl31.h>
10#include <bl_common.h>
11#include <cassert.h>
12#include <context.h>
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +010013#include <debug.h>
14#include <ehf.h>
15#include <interrupt_mgmt.h>
16#include <platform.h>
17#include <pubsub.h>
18#include <runtime_svc.h>
19#include <sdei.h>
20#include <stddef.h>
21#include <string.h>
22#include <utils.h>
23#include "sdei_private.h"
24
25#define MAJOR_VERSION 1
26#define MINOR_VERSION 0
27#define VENDOR_VERSION 0
28
29#define MAKE_SDEI_VERSION(_major, _minor, _vendor) \
30 ((((unsigned long long)(_major)) << 48) | \
31 (((unsigned long long)(_minor)) << 32) | \
32 (_vendor))
33
34#define LOWEST_INTR_PRIORITY 0xff
35
36#define is_valid_affinity(_mpidr) (plat_core_pos_by_mpidr(_mpidr) >= 0)
37
38CASSERT(PLAT_SDEI_CRITICAL_PRI < PLAT_SDEI_NORMAL_PRI,
39 sdei_critical_must_have_higher_priority);
40
41static unsigned int num_dyn_priv_slots, num_dyn_shrd_slots;
42
43/* Initialise SDEI map entries */
44static void init_map(sdei_ev_map_t *map)
45{
46 map->reg_count = 0;
47}
48
49/* Convert mapping to SDEI class */
50sdei_class_t map_to_class(sdei_ev_map_t *map)
51{
52 return is_event_critical(map) ? SDEI_CRITICAL : SDEI_NORMAL;
53}
54
55/* Clear SDEI event entries except state */
56static void clear_event_entries(sdei_entry_t *se)
57{
58 se->ep = 0;
59 se->arg = 0;
60 se->affinity = 0;
61 se->reg_flags = 0;
62}
63
64/* Perform CPU-specific state initialisation */
65static void *sdei_cpu_on_init(const void *arg)
66{
67 int i;
68 sdei_ev_map_t *map;
69 sdei_entry_t *se;
70
71 /* Initialize private mappings on this CPU */
72 for_each_private_map(i, map) {
73 se = get_event_entry(map);
74 clear_event_entries(se);
75 se->state = 0;
76 }
77
78 SDEI_LOG("Private events initialized on %lx\n", read_mpidr_el1());
79
80 /* All PEs start with SDEI events masked */
81 sdei_pe_mask();
82
83 return 0;
84}
85
86/* Initialise an SDEI class */
87void sdei_class_init(sdei_class_t class)
88{
89 unsigned int i, zero_found __unused = 0;
90 int ev_num_so_far __unused;
91 sdei_ev_map_t *map;
92
93 /* Sanity check and configuration of shared events */
94 ev_num_so_far = -1;
95 for_each_shared_map(i, map) {
96#if ENABLE_ASSERTIONS
97 /* Ensure mappings are sorted */
98 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
99
100 ev_num_so_far = map->ev_num;
101
102 /* Event 0 must not be shared */
103 assert(map->ev_num != SDEI_EVENT_0);
104
105 /* Check for valid event */
106 assert(map->ev_num >= 0);
107
108 /* Make sure it's a shared event */
109 assert(is_event_shared(map));
110
111 /* No shared mapping should have signalable property */
112 assert(!is_event_signalable(map));
Jeenu Viswambharan34392302018-01-17 12:30:11 +0000113
114 /* Shared mappings can't be explicit */
115 assert(!is_map_explicit(map));
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100116#endif
117
118 /* Skip initializing the wrong priority */
119 if (map_to_class(map) != class)
120 continue;
121
122 /* Platform events are always bound, so set the bound flag */
123 if (is_map_dynamic(map)) {
124 assert(map->intr == SDEI_DYN_IRQ);
Jeenu Viswambharanc3fcec12017-11-16 12:06:34 +0000125 assert(is_event_normal(map));
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100126 num_dyn_shrd_slots++;
127 } else {
128 /* Shared mappings must be bound to shared interrupt */
129 assert(plat_ic_is_spi(map->intr));
130 set_map_bound(map);
131 }
132
133 init_map(map);
134 }
135
136 /* Sanity check and configuration of private events for this CPU */
137 ev_num_so_far = -1;
138 for_each_private_map(i, map) {
139#if ENABLE_ASSERTIONS
140 /* Ensure mappings are sorted */
141 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
142
143 ev_num_so_far = map->ev_num;
144
145 if (map->ev_num == SDEI_EVENT_0) {
146 zero_found = 1;
147
148 /* Event 0 must be a Secure SGI */
149 assert(is_secure_sgi(map->intr));
150
151 /*
152 * Event 0 can have only have signalable flag (apart
153 * from being private
154 */
155 assert(map->map_flags == (SDEI_MAPF_SIGNALABLE |
156 SDEI_MAPF_PRIVATE));
157 } else {
158 /* No other mapping should have signalable property */
159 assert(!is_event_signalable(map));
160 }
161
162 /* Check for valid event */
163 assert(map->ev_num >= 0);
164
165 /* Make sure it's a private event */
166 assert(is_event_private(map));
Jeenu Viswambharan34392302018-01-17 12:30:11 +0000167
168 /*
169 * Other than priority, explicit events can only have explicit
170 * and private flags set.
171 */
172 if (is_map_explicit(map)) {
173 assert((map->map_flags | SDEI_MAPF_CRITICAL) ==
174 (SDEI_MAPF_EXPLICIT | SDEI_MAPF_PRIVATE
175 | SDEI_MAPF_CRITICAL));
176 }
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100177#endif
178
179 /* Skip initializing the wrong priority */
180 if (map_to_class(map) != class)
181 continue;
182
183 /* Platform events are always bound, so set the bound flag */
184 if (map->ev_num != SDEI_EVENT_0) {
185 if (is_map_dynamic(map)) {
186 assert(map->intr == SDEI_DYN_IRQ);
Jeenu Viswambharanc3fcec12017-11-16 12:06:34 +0000187 assert(is_event_normal(map));
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100188 num_dyn_priv_slots++;
Jeenu Viswambharan34392302018-01-17 12:30:11 +0000189 } else if (is_map_explicit(map)) {
190 /*
191 * Explicit mappings don't have a backing
192 * SDEI interrupt, but verify that anyway.
193 */
194 assert(map->intr == SDEI_DYN_IRQ);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100195 } else {
196 /*
197 * Private mappings must be bound to private
198 * interrupt.
199 */
200 assert(plat_ic_is_ppi(map->intr));
201 set_map_bound(map);
202 }
203 }
204
205 init_map(map);
206 }
207
208 /* Ensure event 0 is in the mapping */
209 assert(zero_found);
210
211 sdei_cpu_on_init(NULL);
212}
213
214/* SDEI dispatcher initialisation */
215void sdei_init(void)
216{
217 sdei_class_init(SDEI_CRITICAL);
218 sdei_class_init(SDEI_NORMAL);
219
220 /* Register priority level handlers */
221 ehf_register_priority_handler(PLAT_SDEI_CRITICAL_PRI,
222 sdei_intr_handler);
223 ehf_register_priority_handler(PLAT_SDEI_NORMAL_PRI,
224 sdei_intr_handler);
225}
226
227/* Populate SDEI event entry */
228static void set_sdei_entry(sdei_entry_t *se, uint64_t ep, uint64_t arg,
229 unsigned int flags, uint64_t affinity)
230{
231 assert(se != NULL);
232
233 se->ep = ep;
234 se->arg = arg;
235 se->affinity = (affinity & MPIDR_AFFINITY_MASK);
236 se->reg_flags = flags;
237}
238
239static unsigned long long sdei_version(void)
240{
241 return MAKE_SDEI_VERSION(MAJOR_VERSION, MINOR_VERSION, VENDOR_VERSION);
242}
243
244/* Validate flags and MPIDR values for REGISTER and ROUTING_SET calls */
245static int validate_flags(uint64_t flags, uint64_t mpidr)
246{
247 /* Validate flags */
248 switch (flags) {
249 case SDEI_REGF_RM_PE:
250 if (!is_valid_affinity(mpidr))
251 return SDEI_EINVAL;
252 break;
253 case SDEI_REGF_RM_ANY:
254 break;
255 default:
256 /* Unknown flags */
257 return SDEI_EINVAL;
258 }
259
260 return 0;
261}
262
263/* Set routing of an SDEI event */
264static int sdei_event_routing_set(int ev_num, uint64_t flags, uint64_t mpidr)
265{
266 int ret, routing;
267 sdei_ev_map_t *map;
268 sdei_entry_t *se;
269
270 ret = validate_flags(flags, mpidr);
271 if (ret)
272 return ret;
273
274 /* Check if valid event number */
275 map = find_event_map(ev_num);
276 if (!map)
277 return SDEI_EINVAL;
278
279 /* The event must not be private */
280 if (is_event_private(map))
281 return SDEI_EINVAL;
282
283 se = get_event_entry(map);
284
285 sdei_map_lock(map);
286
287 if (!is_map_bound(map) || is_event_private(map)) {
288 ret = SDEI_EINVAL;
289 goto finish;
290 }
291
292 if (!can_sdei_state_trans(se, DO_ROUTING)) {
293 ret = SDEI_EDENY;
294 goto finish;
295 }
296
297 /* Choose appropriate routing */
298 routing = (flags == SDEI_REGF_RM_ANY) ? INTR_ROUTING_MODE_ANY :
299 INTR_ROUTING_MODE_PE;
300
301 /* Update event registration flag */
302 se->reg_flags = flags;
303
304 /*
305 * ROUTING_SET is permissible only when event composite state is
306 * 'registered, disabled, and not running'. This means that the
307 * interrupt is currently disabled, and not active.
308 */
309 plat_ic_set_spi_routing(map->intr, routing, (u_register_t) mpidr);
310
311finish:
312 sdei_map_unlock(map);
313
314 return ret;
315}
316
317/* Register handler and argument for an SDEI event */
318static int sdei_event_register(int ev_num, uint64_t ep, uint64_t arg,
319 uint64_t flags, uint64_t mpidr)
320{
321 int ret;
322 sdei_entry_t *se;
323 sdei_ev_map_t *map;
324 sdei_state_t backup_state;
325
326 if (!ep || (plat_sdei_validate_entry_point(ep, sdei_client_el()) != 0))
327 return SDEI_EINVAL;
328
329 ret = validate_flags(flags, mpidr);
330 if (ret)
331 return ret;
332
333 /* Check if valid event number */
334 map = find_event_map(ev_num);
335 if (!map)
336 return SDEI_EINVAL;
337
338 /* Private events always target the PE */
339 if (is_event_private(map))
340 flags = SDEI_REGF_RM_PE;
341
342 se = get_event_entry(map);
343
344 /*
345 * Even though register operation is per-event (additionally for private
346 * events, registration is required individually), it has to be
347 * serialised with respect to bind/release, which are global operations.
348 * So we hold the lock throughout, unconditionally.
349 */
350 sdei_map_lock(map);
351
352 backup_state = se->state;
353 if (!can_sdei_state_trans(se, DO_REGISTER))
354 goto fallback;
355
356 /*
357 * When registering for dynamic events, make sure it's been bound
358 * already. This has to be the case as, without binding, the client
359 * can't know about the event number to register for.
360 */
361 if (is_map_dynamic(map) && !is_map_bound(map))
362 goto fallback;
363
364 if (is_event_private(map)) {
365 /* Multiple calls to register are possible for private events */
366 assert(map->reg_count >= 0);
367 } else {
368 /* Only single call to register is possible for shared events */
369 assert(map->reg_count == 0);
370 }
371
372 if (is_map_bound(map)) {
373 /* Meanwhile, did any PE ACK the interrupt? */
374 if (plat_ic_get_interrupt_active(map->intr))
375 goto fallback;
376
377 /* The interrupt must currently owned by Non-secure */
378 if (plat_ic_get_interrupt_type(map->intr) != INTR_TYPE_NS)
379 goto fallback;
380
381 /*
382 * Disable forwarding of new interrupt triggers to CPU
383 * interface.
384 */
385 plat_ic_disable_interrupt(map->intr);
386
387 /*
388 * Any events that are triggered after register and before
389 * enable should remain pending. Clear any previous interrupt
390 * triggers which are pending (except for SGIs). This has no
391 * affect on level-triggered interrupts.
392 */
393 if (ev_num != SDEI_EVENT_0)
394 plat_ic_clear_interrupt_pending(map->intr);
395
396 /* Map interrupt to EL3 and program the correct priority */
397 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_EL3);
398
399 /* Program the appropriate interrupt priority */
400 plat_ic_set_interrupt_priority(map->intr, sdei_event_priority(map));
401
402 /*
403 * Set the routing mode for shared event as requested. We
404 * already ensure that shared events get bound to SPIs.
405 */
406 if (is_event_shared(map)) {
407 plat_ic_set_spi_routing(map->intr,
408 ((flags == SDEI_REGF_RM_ANY) ?
409 INTR_ROUTING_MODE_ANY :
410 INTR_ROUTING_MODE_PE),
411 (u_register_t) mpidr);
412 }
413 }
414
415 /* Populate event entries */
416 set_sdei_entry(se, ep, arg, flags, mpidr);
417
418 /* Increment register count */
419 map->reg_count++;
420
421 sdei_map_unlock(map);
422
423 return 0;
424
425fallback:
426 /* Reinstate previous state */
427 se->state = backup_state;
428
429 sdei_map_unlock(map);
430
431 return SDEI_EDENY;
432}
433
434/* Enable SDEI event */
435static int sdei_event_enable(int ev_num)
436{
437 sdei_ev_map_t *map;
438 sdei_entry_t *se;
439 int ret, before, after;
440
441 /* Check if valid event number */
442 map = find_event_map(ev_num);
443 if (!map)
444 return SDEI_EINVAL;
445
446 se = get_event_entry(map);
447 ret = SDEI_EDENY;
448
449 if (is_event_shared(map))
450 sdei_map_lock(map);
451
452 before = GET_EV_STATE(se, ENABLED);
453 if (!can_sdei_state_trans(se, DO_ENABLE))
454 goto finish;
455 after = GET_EV_STATE(se, ENABLED);
456
457 /*
458 * Enable interrupt for bound events only if there's a change in enabled
459 * state.
460 */
461 if (is_map_bound(map) && (!before && after))
462 plat_ic_enable_interrupt(map->intr);
463
464 ret = 0;
465
466finish:
467 if (is_event_shared(map))
468 sdei_map_unlock(map);
469
470 return ret;
471}
472
473/* Disable SDEI event */
474static int sdei_event_disable(int ev_num)
475{
476 sdei_ev_map_t *map;
477 sdei_entry_t *se;
478 int ret, before, after;
479
480 /* Check if valid event number */
481 map = find_event_map(ev_num);
482 if (!map)
483 return SDEI_EINVAL;
484
485 se = get_event_entry(map);
486 ret = SDEI_EDENY;
487
488 if (is_event_shared(map))
489 sdei_map_lock(map);
490
491 before = GET_EV_STATE(se, ENABLED);
492 if (!can_sdei_state_trans(se, DO_DISABLE))
493 goto finish;
494 after = GET_EV_STATE(se, ENABLED);
495
496 /*
497 * Disable interrupt for bound events only if there's a change in
498 * enabled state.
499 */
500 if (is_map_bound(map) && (before && !after))
501 plat_ic_disable_interrupt(map->intr);
502
503 ret = 0;
504
505finish:
506 if (is_event_shared(map))
507 sdei_map_unlock(map);
508
509 return ret;
510}
511
512/* Query SDEI event information */
513static uint64_t sdei_event_get_info(int ev_num, int info)
514{
515 sdei_entry_t *se;
516 sdei_ev_map_t *map;
517
518 unsigned int flags, registered;
519 uint64_t affinity;
520
521 /* Check if valid event number */
522 map = find_event_map(ev_num);
523 if (!map)
524 return SDEI_EINVAL;
525
526 se = get_event_entry(map);
527
528 if (is_event_shared(map))
529 sdei_map_lock(map);
530
531 /* Sample state under lock */
532 registered = GET_EV_STATE(se, REGISTERED);
533 flags = se->reg_flags;
534 affinity = se->affinity;
535
536 if (is_event_shared(map))
537 sdei_map_unlock(map);
538
539 switch (info) {
540 case SDEI_INFO_EV_TYPE:
541 return is_event_shared(map);
542
543 case SDEI_INFO_EV_NOT_SIGNALED:
544 return !is_event_signalable(map);
545
546 case SDEI_INFO_EV_PRIORITY:
547 return is_event_critical(map);
548
549 case SDEI_INFO_EV_ROUTING_MODE:
550 if (!is_event_shared(map))
551 return SDEI_EINVAL;
552 if (!registered)
553 return SDEI_EDENY;
554 return (flags == SDEI_REGF_RM_PE);
555
556 case SDEI_INFO_EV_ROUTING_AFF:
557 if (!is_event_shared(map))
558 return SDEI_EINVAL;
559 if (!registered)
560 return SDEI_EDENY;
561 if (flags != SDEI_REGF_RM_PE)
562 return SDEI_EINVAL;
563 return affinity;
564
565 default:
566 return SDEI_EINVAL;
567 }
568}
569
570/* Unregister an SDEI event */
571static int sdei_event_unregister(int ev_num)
572{
573 int ret = 0;
574 sdei_entry_t *se;
575 sdei_ev_map_t *map;
576
577 /* Check if valid event number */
578 map = find_event_map(ev_num);
579 if (!map)
580 return SDEI_EINVAL;
581
582 se = get_event_entry(map);
583
584 /*
585 * Even though unregister operation is per-event (additionally for
586 * private events, unregistration is required individually), it has to
587 * be serialised with respect to bind/release, which are global
588 * operations. So we hold the lock throughout, unconditionally.
589 */
590 sdei_map_lock(map);
591
592 if (!can_sdei_state_trans(se, DO_UNREGISTER)) {
593 /*
594 * Even if the call is invalid, and the handler is running (for
595 * example, having unregistered from a running handler earlier),
596 * return pending error code; otherwise, return deny.
597 */
598 ret = GET_EV_STATE(se, RUNNING) ? SDEI_EPEND : SDEI_EDENY;
599
600 goto finish;
601 }
602
603 map->reg_count--;
604 if (is_event_private(map)) {
605 /* Multiple calls to register are possible for private events */
606 assert(map->reg_count >= 0);
607 } else {
608 /* Only single call to register is possible for shared events */
609 assert(map->reg_count == 0);
610 }
611
612 if (is_map_bound(map)) {
613 plat_ic_disable_interrupt(map->intr);
614
615 /*
616 * Clear pending interrupt. Skip for SGIs as they may not be
617 * cleared on interrupt controllers.
618 */
619 if (ev_num != SDEI_EVENT_0)
620 plat_ic_clear_interrupt_pending(map->intr);
621
622 assert(plat_ic_get_interrupt_type(map->intr) == INTR_TYPE_EL3);
623 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_NS);
624 plat_ic_set_interrupt_priority(map->intr, LOWEST_INTR_PRIORITY);
625 }
626
627 clear_event_entries(se);
628
629 /*
630 * If the handler is running at the time of unregister, return the
631 * pending error code.
632 */
633 if (GET_EV_STATE(se, RUNNING))
634 ret = SDEI_EPEND;
635
636finish:
637 sdei_map_unlock(map);
638
639 return ret;
640}
641
642/* Query status of an SDEI event */
643static int sdei_event_status(int ev_num)
644{
645 sdei_ev_map_t *map;
646 sdei_entry_t *se;
647 sdei_state_t state;
648
649 /* Check if valid event number */
650 map = find_event_map(ev_num);
651 if (!map)
652 return SDEI_EINVAL;
653
654 se = get_event_entry(map);
655
656 if (is_event_shared(map))
657 sdei_map_lock(map);
658
659 /* State value directly maps to the expected return format */
660 state = se->state;
661
662 if (is_event_shared(map))
663 sdei_map_unlock(map);
664
665 return state;
666}
667
668/* Bind an SDEI event to an interrupt */
669static int sdei_interrupt_bind(int intr_num)
670{
671 sdei_ev_map_t *map;
672 int retry = 1, shared_mapping;
673
674 /* SGIs are not allowed to be bound */
675 if (plat_ic_is_sgi(intr_num))
676 return SDEI_EINVAL;
677
678 shared_mapping = plat_ic_is_spi(intr_num);
679 do {
680 /*
681 * Bail out if there is already an event for this interrupt,
682 * either platform-defined or dynamic.
683 */
684 map = find_event_map_by_intr(intr_num, shared_mapping);
685 if (map) {
686 if (is_map_dynamic(map)) {
687 if (is_map_bound(map)) {
688 /*
689 * Dynamic event, already bound. Return
690 * event number.
691 */
692 return map->ev_num;
693 }
694 } else {
695 /* Binding non-dynamic event */
696 return SDEI_EINVAL;
697 }
698 }
699
700 /*
701 * The interrupt is not bound yet. Try to find a free slot to
702 * bind it. Free dynamic mappings have their interrupt set as
703 * SDEI_DYN_IRQ.
704 */
705 map = find_event_map_by_intr(SDEI_DYN_IRQ, shared_mapping);
706 if (!map)
707 return SDEI_ENOMEM;
708
709 /* The returned mapping must be dynamic */
710 assert(is_map_dynamic(map));
711
712 /*
713 * We cannot assert for bound maps here, as we might be racing
714 * with another bind.
715 */
716
717 /* The requested interrupt must already belong to NS */
718 if (plat_ic_get_interrupt_type(intr_num) != INTR_TYPE_NS)
719 return SDEI_EDENY;
720
721 /*
722 * Interrupt programming and ownership transfer are deferred
723 * until register.
724 */
725
726 sdei_map_lock(map);
727 if (!is_map_bound(map)) {
728 map->intr = intr_num;
729 set_map_bound(map);
730 retry = 0;
731 }
732 sdei_map_unlock(map);
733 } while (retry);
734
735 return map->ev_num;
736}
737
738/* Release a bound SDEI event previously to an interrupt */
739static int sdei_interrupt_release(int ev_num)
740{
741 int ret = 0;
742 sdei_ev_map_t *map;
743 sdei_entry_t *se;
744
745 /* Check if valid event number */
746 map = find_event_map(ev_num);
747 if (!map)
748 return SDEI_EINVAL;
749
750 if (!is_map_dynamic(map))
751 return SDEI_EINVAL;
752
753 se = get_event_entry(map);
754
755 sdei_map_lock(map);
756
757 /* Event must have been unregistered before release */
758 if (map->reg_count != 0) {
759 ret = SDEI_EDENY;
760 goto finish;
761 }
762
763 /*
764 * Interrupt release never causes the state to change. We only check
765 * whether it's permissible or not.
766 */
767 if (!can_sdei_state_trans(se, DO_RELEASE)) {
768 ret = SDEI_EDENY;
769 goto finish;
770 }
771
772 if (is_map_bound(map)) {
773 /*
774 * Deny release if the interrupt is active, which means it's
775 * probably being acknowledged and handled elsewhere.
776 */
777 if (plat_ic_get_interrupt_active(map->intr)) {
778 ret = SDEI_EDENY;
779 goto finish;
780 }
781
782 /*
783 * Interrupt programming and ownership transfer are already done
784 * during unregister.
785 */
786
787 map->intr = SDEI_DYN_IRQ;
788 clr_map_bound(map);
789 } else {
790 SDEI_LOG("Error release bound:%d cnt:%d\n", is_map_bound(map),
791 map->reg_count);
792 ret = SDEI_EINVAL;
793 }
794
795finish:
796 sdei_map_unlock(map);
797
798 return ret;
799}
800
801/* Perform reset of private SDEI events */
802static int sdei_private_reset(void)
803{
804 sdei_ev_map_t *map;
805 int ret = 0, final_ret = 0, i;
806
807 /* Unregister all private events */
808 for_each_private_map(i, map) {
809 /*
810 * The unregister can fail if the event is not registered, which
811 * is allowed, and a deny will be returned. But if the event is
812 * running or unregister pending, the call fails.
813 */
814 ret = sdei_event_unregister(map->ev_num);
815 if ((ret == SDEI_EPEND) && (final_ret == 0))
Jeenu Viswambharanc94cf262017-11-30 10:25:10 +0000816 final_ret = SDEI_EDENY;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100817 }
818
819 return final_ret;
820}
821
822/* Perform reset of shared SDEI events */
823static int sdei_shared_reset(void)
824{
825 const sdei_mapping_t *mapping;
826 sdei_ev_map_t *map;
827 int ret = 0, final_ret = 0, i, j;
828
829 /* Unregister all shared events */
830 for_each_shared_map(i, map) {
831 /*
832 * The unregister can fail if the event is not registered, which
833 * is allowed, and a deny will be returned. But if the event is
834 * running or unregister pending, the call fails.
835 */
836 ret = sdei_event_unregister(map->ev_num);
837 if ((ret == SDEI_EPEND) && (final_ret == 0))
Jeenu Viswambharanc94cf262017-11-30 10:25:10 +0000838 final_ret = SDEI_EDENY;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100839 }
840
841 if (final_ret != 0)
842 return final_ret;
843
844 /*
845 * Loop through both private and shared mappings, and release all
846 * bindings.
847 */
848 for_each_mapping_type(i, mapping) {
849 iterate_mapping(mapping, j, map) {
850 /*
851 * Release bindings for mappings that are dynamic and
852 * bound.
853 */
854 if (is_map_dynamic(map) && is_map_bound(map)) {
855 /*
856 * Any failure to release would mean there is at
857 * least a PE registered for the event.
858 */
859 ret = sdei_interrupt_release(map->ev_num);
860 if ((ret != 0) && (final_ret == 0))
861 final_ret = ret;
862 }
863 }
864 }
865
866 return final_ret;
867}
868
869/* Send a signal to another SDEI client PE */
870int sdei_signal(int event, uint64_t target_pe)
871{
872 sdei_ev_map_t *map;
873
874 /* Only event 0 can be signalled */
875 if (event != SDEI_EVENT_0)
876 return SDEI_EINVAL;
877
878 /* Find mapping for event 0 */
879 map = find_event_map(SDEI_EVENT_0);
880 if (!map)
881 return SDEI_EINVAL;
882
883 /* The event must be signalable */
884 if (!is_event_signalable(map))
885 return SDEI_EINVAL;
886
887 /* Validate target */
888 if (plat_core_pos_by_mpidr(target_pe) < 0)
889 return SDEI_EINVAL;
890
891 /* Raise SGI. Platform will validate target_pe */
892 plat_ic_raise_el3_sgi(map->intr, (u_register_t) target_pe);
893
894 return 0;
895}
896
897/* Query SDEI dispatcher features */
898uint64_t sdei_features(unsigned int feature)
899{
900 if (feature == SDEI_FEATURE_BIND_SLOTS) {
901 return FEATURE_BIND_SLOTS(num_dyn_priv_slots,
902 num_dyn_shrd_slots);
903 }
904
905 return SDEI_EINVAL;
906}
907
908/* SDEI top level handler for servicing SMCs */
909uint64_t sdei_smc_handler(uint32_t smc_fid,
910 uint64_t x1,
911 uint64_t x2,
912 uint64_t x3,
913 uint64_t x4,
914 void *cookie,
915 void *handle,
916 uint64_t flags)
917{
918
919 uint64_t x5;
920 int ss = get_interrupt_src_ss(flags);
921 int64_t ret;
922 unsigned int resume = 0;
923
924 if (ss != NON_SECURE)
925 SMC_RET1(handle, SMC_UNK);
926
927 /* Verify the caller EL */
928 if (GET_EL(read_spsr_el3()) != sdei_client_el())
929 SMC_RET1(handle, SMC_UNK);
930
931 switch (smc_fid) {
932 case SDEI_VERSION:
933 SDEI_LOG("> VER\n");
934 ret = sdei_version();
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +0200935 SDEI_LOG("< VER:%llx\n", ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100936 SMC_RET1(handle, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100937
938 case SDEI_EVENT_REGISTER:
939 x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +0200940 SDEI_LOG("> REG(n:%d e:%llx a:%llx f:%x m:%llx)\n", (int) x1,
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100941 x2, x3, (int) x4, x5);
942 ret = sdei_event_register(x1, x2, x3, x4, x5);
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +0200943 SDEI_LOG("< REG:%lld\n", ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100944 SMC_RET1(handle, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100945
946 case SDEI_EVENT_ENABLE:
947 SDEI_LOG("> ENABLE(n:%d)\n", (int) x1);
948 ret = sdei_event_enable(x1);
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +0200949 SDEI_LOG("< ENABLE:%lld\n", ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100950 SMC_RET1(handle, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100951
952 case SDEI_EVENT_DISABLE:
953 SDEI_LOG("> DISABLE(n:%d)\n", (int) x1);
954 ret = sdei_event_disable(x1);
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +0200955 SDEI_LOG("< DISABLE:%lld\n", ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100956 SMC_RET1(handle, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100957
958 case SDEI_EVENT_CONTEXT:
959 SDEI_LOG("> CTX(p:%d):%lx\n", (int) x1, read_mpidr_el1());
960 ret = sdei_event_context(handle, x1);
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +0200961 SDEI_LOG("< CTX:%lld\n", ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100962 SMC_RET1(handle, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100963
964 case SDEI_EVENT_COMPLETE_AND_RESUME:
965 resume = 1;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100966
967 case SDEI_EVENT_COMPLETE:
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +0200968 SDEI_LOG("> COMPLETE(r:%d sta/ep:%llx):%lx\n", resume, x1,
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100969 read_mpidr_el1());
970 ret = sdei_event_complete(resume, x1);
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +0200971 SDEI_LOG("< COMPLETE:%llx\n", ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100972
973 /*
974 * Set error code only if the call failed. If the call
975 * succeeded, we discard the dispatched context, and restore the
976 * interrupted context to a pristine condition, and therefore
977 * shouldn't be modified. We don't return to the caller in this
978 * case anyway.
979 */
980 if (ret)
981 SMC_RET1(handle, ret);
982
983 SMC_RET0(handle);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100984
985 case SDEI_EVENT_STATUS:
986 SDEI_LOG("> STAT(n:%d)\n", (int) x1);
987 ret = sdei_event_status(x1);
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +0200988 SDEI_LOG("< STAT:%lld\n", ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100989 SMC_RET1(handle, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100990
991 case SDEI_EVENT_GET_INFO:
992 SDEI_LOG("> INFO(n:%d, %d)\n", (int) x1, (int) x2);
993 ret = sdei_event_get_info(x1, x2);
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +0200994 SDEI_LOG("< INFO:%lld\n", ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100995 SMC_RET1(handle, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100996
997 case SDEI_EVENT_UNREGISTER:
998 SDEI_LOG("> UNREG(n:%d)\n", (int) x1);
999 ret = sdei_event_unregister(x1);
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +02001000 SDEI_LOG("< UNREG:%lld\n", ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001001 SMC_RET1(handle, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001002
1003 case SDEI_PE_UNMASK:
1004 SDEI_LOG("> UNMASK:%lx\n", read_mpidr_el1());
1005 sdei_pe_unmask();
Jeenu Viswambharan22a16f92017-11-13 12:30:45 +00001006 SDEI_LOG("< UNMASK:%d\n", 0);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001007 SMC_RET1(handle, 0);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001008
1009 case SDEI_PE_MASK:
1010 SDEI_LOG("> MASK:%lx\n", read_mpidr_el1());
1011 ret = sdei_pe_mask();
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +02001012 SDEI_LOG("< MASK:%lld\n", ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001013 SMC_RET1(handle, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001014
1015 case SDEI_INTERRUPT_BIND:
1016 SDEI_LOG("> BIND(%d)\n", (int) x1);
1017 ret = sdei_interrupt_bind(x1);
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +02001018 SDEI_LOG("< BIND:%lld\n", ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001019 SMC_RET1(handle, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001020
1021 case SDEI_INTERRUPT_RELEASE:
1022 SDEI_LOG("> REL(%d)\n", (int) x1);
1023 ret = sdei_interrupt_release(x1);
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +02001024 SDEI_LOG("< REL:%lld\n", ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001025 SMC_RET1(handle, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001026
1027 case SDEI_SHARED_RESET:
1028 SDEI_LOG("> S_RESET():%lx\n", read_mpidr_el1());
1029 ret = sdei_shared_reset();
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +02001030 SDEI_LOG("< S_RESET:%lld\n", ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001031 SMC_RET1(handle, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001032
1033 case SDEI_PRIVATE_RESET:
1034 SDEI_LOG("> P_RESET():%lx\n", read_mpidr_el1());
1035 ret = sdei_private_reset();
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +02001036 SDEI_LOG("< P_RESET:%lld\n", ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001037 SMC_RET1(handle, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001038
1039 case SDEI_EVENT_ROUTING_SET:
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +02001040 SDEI_LOG("> ROUTE_SET(n:%d f:%llx aff:%llx)\n", (int) x1, x2, x3);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001041 ret = sdei_event_routing_set(x1, x2, x3);
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +02001042 SDEI_LOG("< ROUTE_SET:%lld\n", ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001043 SMC_RET1(handle, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001044
1045 case SDEI_FEATURES:
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +02001046 SDEI_LOG("> FTRS(f:%llx)\n", x1);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001047 ret = sdei_features(x1);
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +02001048 SDEI_LOG("< FTRS:%llx\n", ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001049 SMC_RET1(handle, ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001050
1051 case SDEI_EVENT_SIGNAL:
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +02001052 SDEI_LOG("> SIGNAL(e:%llx t:%llx)\n", x1, x2);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001053 ret = sdei_signal(x1, x2);
Sandrine Bailleuxd1d58712018-06-20 18:18:56 +02001054 SDEI_LOG("< SIGNAL:%lld\n", ret);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001055 SMC_RET1(handle, ret);
Jonathan Wright75a5d8b2018-03-14 15:56:21 +00001056
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001057 default:
Jonathan Wright75a5d8b2018-03-14 15:56:21 +00001058 /* Do nothing in default case */
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001059 break;
1060 }
1061
1062 WARN("Unimplemented SDEI Call: 0x%x\n", smc_fid);
1063 SMC_RET1(handle, SMC_UNK);
1064}
1065
1066/* Subscribe to PSCI CPU on to initialize per-CPU SDEI configuration */
1067SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, sdei_cpu_on_init);