blob: 1969307fd7601077a13747e6021c534cf9ad6541 [file] [log] [blame]
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +01001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch_helpers.h>
8#include <assert.h>
9#include <bl31.h>
10#include <bl_common.h>
11#include <cassert.h>
12#include <context.h>
13#include <context_mgmt.h>
14#include <debug.h>
15#include <ehf.h>
16#include <interrupt_mgmt.h>
17#include <platform.h>
18#include <pubsub.h>
19#include <runtime_svc.h>
20#include <sdei.h>
21#include <stddef.h>
22#include <string.h>
23#include <utils.h>
24#include "sdei_private.h"
25
26#define MAJOR_VERSION 1
27#define MINOR_VERSION 0
28#define VENDOR_VERSION 0
29
30#define MAKE_SDEI_VERSION(_major, _minor, _vendor) \
31 ((((unsigned long long)(_major)) << 48) | \
32 (((unsigned long long)(_minor)) << 32) | \
33 (_vendor))
34
35#define LOWEST_INTR_PRIORITY 0xff
36
37#define is_valid_affinity(_mpidr) (plat_core_pos_by_mpidr(_mpidr) >= 0)
38
39CASSERT(PLAT_SDEI_CRITICAL_PRI < PLAT_SDEI_NORMAL_PRI,
40 sdei_critical_must_have_higher_priority);
41
42static unsigned int num_dyn_priv_slots, num_dyn_shrd_slots;
43
44/* Initialise SDEI map entries */
45static void init_map(sdei_ev_map_t *map)
46{
47 map->reg_count = 0;
48}
49
50/* Convert mapping to SDEI class */
51sdei_class_t map_to_class(sdei_ev_map_t *map)
52{
53 return is_event_critical(map) ? SDEI_CRITICAL : SDEI_NORMAL;
54}
55
56/* Clear SDEI event entries except state */
57static void clear_event_entries(sdei_entry_t *se)
58{
59 se->ep = 0;
60 se->arg = 0;
61 se->affinity = 0;
62 se->reg_flags = 0;
63}
64
65/* Perform CPU-specific state initialisation */
66static void *sdei_cpu_on_init(const void *arg)
67{
68 int i;
69 sdei_ev_map_t *map;
70 sdei_entry_t *se;
71
72 /* Initialize private mappings on this CPU */
73 for_each_private_map(i, map) {
74 se = get_event_entry(map);
75 clear_event_entries(se);
76 se->state = 0;
77 }
78
79 SDEI_LOG("Private events initialized on %lx\n", read_mpidr_el1());
80
81 /* All PEs start with SDEI events masked */
82 sdei_pe_mask();
83
84 return 0;
85}
86
87/* Initialise an SDEI class */
88void sdei_class_init(sdei_class_t class)
89{
90 unsigned int i, zero_found __unused = 0;
91 int ev_num_so_far __unused;
92 sdei_ev_map_t *map;
93
94 /* Sanity check and configuration of shared events */
95 ev_num_so_far = -1;
96 for_each_shared_map(i, map) {
97#if ENABLE_ASSERTIONS
98 /* Ensure mappings are sorted */
99 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
100
101 ev_num_so_far = map->ev_num;
102
103 /* Event 0 must not be shared */
104 assert(map->ev_num != SDEI_EVENT_0);
105
106 /* Check for valid event */
107 assert(map->ev_num >= 0);
108
109 /* Make sure it's a shared event */
110 assert(is_event_shared(map));
111
112 /* No shared mapping should have signalable property */
113 assert(!is_event_signalable(map));
114#endif
115
116 /* Skip initializing the wrong priority */
117 if (map_to_class(map) != class)
118 continue;
119
120 /* Platform events are always bound, so set the bound flag */
121 if (is_map_dynamic(map)) {
122 assert(map->intr == SDEI_DYN_IRQ);
Jeenu Viswambharanc3fcec12017-11-16 12:06:34 +0000123 assert(is_event_normal(map));
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100124 num_dyn_shrd_slots++;
125 } else {
126 /* Shared mappings must be bound to shared interrupt */
127 assert(plat_ic_is_spi(map->intr));
128 set_map_bound(map);
129 }
130
131 init_map(map);
132 }
133
134 /* Sanity check and configuration of private events for this CPU */
135 ev_num_so_far = -1;
136 for_each_private_map(i, map) {
137#if ENABLE_ASSERTIONS
138 /* Ensure mappings are sorted */
139 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
140
141 ev_num_so_far = map->ev_num;
142
143 if (map->ev_num == SDEI_EVENT_0) {
144 zero_found = 1;
145
146 /* Event 0 must be a Secure SGI */
147 assert(is_secure_sgi(map->intr));
148
149 /*
150 * Event 0 can have only have signalable flag (apart
151 * from being private
152 */
153 assert(map->map_flags == (SDEI_MAPF_SIGNALABLE |
154 SDEI_MAPF_PRIVATE));
155 } else {
156 /* No other mapping should have signalable property */
157 assert(!is_event_signalable(map));
158 }
159
160 /* Check for valid event */
161 assert(map->ev_num >= 0);
162
163 /* Make sure it's a private event */
164 assert(is_event_private(map));
165#endif
166
167 /* Skip initializing the wrong priority */
168 if (map_to_class(map) != class)
169 continue;
170
171 /* Platform events are always bound, so set the bound flag */
172 if (map->ev_num != SDEI_EVENT_0) {
173 if (is_map_dynamic(map)) {
174 assert(map->intr == SDEI_DYN_IRQ);
Jeenu Viswambharanc3fcec12017-11-16 12:06:34 +0000175 assert(is_event_normal(map));
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100176 num_dyn_priv_slots++;
177 } else {
178 /*
179 * Private mappings must be bound to private
180 * interrupt.
181 */
182 assert(plat_ic_is_ppi(map->intr));
183 set_map_bound(map);
184 }
185 }
186
187 init_map(map);
188 }
189
190 /* Ensure event 0 is in the mapping */
191 assert(zero_found);
192
193 sdei_cpu_on_init(NULL);
194}
195
196/* SDEI dispatcher initialisation */
197void sdei_init(void)
198{
199 sdei_class_init(SDEI_CRITICAL);
200 sdei_class_init(SDEI_NORMAL);
201
202 /* Register priority level handlers */
203 ehf_register_priority_handler(PLAT_SDEI_CRITICAL_PRI,
204 sdei_intr_handler);
205 ehf_register_priority_handler(PLAT_SDEI_NORMAL_PRI,
206 sdei_intr_handler);
207}
208
209/* Populate SDEI event entry */
210static void set_sdei_entry(sdei_entry_t *se, uint64_t ep, uint64_t arg,
211 unsigned int flags, uint64_t affinity)
212{
213 assert(se != NULL);
214
215 se->ep = ep;
216 se->arg = arg;
217 se->affinity = (affinity & MPIDR_AFFINITY_MASK);
218 se->reg_flags = flags;
219}
220
221static unsigned long long sdei_version(void)
222{
223 return MAKE_SDEI_VERSION(MAJOR_VERSION, MINOR_VERSION, VENDOR_VERSION);
224}
225
226/* Validate flags and MPIDR values for REGISTER and ROUTING_SET calls */
227static int validate_flags(uint64_t flags, uint64_t mpidr)
228{
229 /* Validate flags */
230 switch (flags) {
231 case SDEI_REGF_RM_PE:
232 if (!is_valid_affinity(mpidr))
233 return SDEI_EINVAL;
234 break;
235 case SDEI_REGF_RM_ANY:
236 break;
237 default:
238 /* Unknown flags */
239 return SDEI_EINVAL;
240 }
241
242 return 0;
243}
244
245/* Set routing of an SDEI event */
246static int sdei_event_routing_set(int ev_num, uint64_t flags, uint64_t mpidr)
247{
248 int ret, routing;
249 sdei_ev_map_t *map;
250 sdei_entry_t *se;
251
252 ret = validate_flags(flags, mpidr);
253 if (ret)
254 return ret;
255
256 /* Check if valid event number */
257 map = find_event_map(ev_num);
258 if (!map)
259 return SDEI_EINVAL;
260
261 /* The event must not be private */
262 if (is_event_private(map))
263 return SDEI_EINVAL;
264
265 se = get_event_entry(map);
266
267 sdei_map_lock(map);
268
269 if (!is_map_bound(map) || is_event_private(map)) {
270 ret = SDEI_EINVAL;
271 goto finish;
272 }
273
274 if (!can_sdei_state_trans(se, DO_ROUTING)) {
275 ret = SDEI_EDENY;
276 goto finish;
277 }
278
279 /* Choose appropriate routing */
280 routing = (flags == SDEI_REGF_RM_ANY) ? INTR_ROUTING_MODE_ANY :
281 INTR_ROUTING_MODE_PE;
282
283 /* Update event registration flag */
284 se->reg_flags = flags;
285
286 /*
287 * ROUTING_SET is permissible only when event composite state is
288 * 'registered, disabled, and not running'. This means that the
289 * interrupt is currently disabled, and not active.
290 */
291 plat_ic_set_spi_routing(map->intr, routing, (u_register_t) mpidr);
292
293finish:
294 sdei_map_unlock(map);
295
296 return ret;
297}
298
299/* Register handler and argument for an SDEI event */
300static int sdei_event_register(int ev_num, uint64_t ep, uint64_t arg,
301 uint64_t flags, uint64_t mpidr)
302{
303 int ret;
304 sdei_entry_t *se;
305 sdei_ev_map_t *map;
306 sdei_state_t backup_state;
307
308 if (!ep || (plat_sdei_validate_entry_point(ep, sdei_client_el()) != 0))
309 return SDEI_EINVAL;
310
311 ret = validate_flags(flags, mpidr);
312 if (ret)
313 return ret;
314
315 /* Check if valid event number */
316 map = find_event_map(ev_num);
317 if (!map)
318 return SDEI_EINVAL;
319
320 /* Private events always target the PE */
321 if (is_event_private(map))
322 flags = SDEI_REGF_RM_PE;
323
324 se = get_event_entry(map);
325
326 /*
327 * Even though register operation is per-event (additionally for private
328 * events, registration is required individually), it has to be
329 * serialised with respect to bind/release, which are global operations.
330 * So we hold the lock throughout, unconditionally.
331 */
332 sdei_map_lock(map);
333
334 backup_state = se->state;
335 if (!can_sdei_state_trans(se, DO_REGISTER))
336 goto fallback;
337
338 /*
339 * When registering for dynamic events, make sure it's been bound
340 * already. This has to be the case as, without binding, the client
341 * can't know about the event number to register for.
342 */
343 if (is_map_dynamic(map) && !is_map_bound(map))
344 goto fallback;
345
346 if (is_event_private(map)) {
347 /* Multiple calls to register are possible for private events */
348 assert(map->reg_count >= 0);
349 } else {
350 /* Only single call to register is possible for shared events */
351 assert(map->reg_count == 0);
352 }
353
354 if (is_map_bound(map)) {
355 /* Meanwhile, did any PE ACK the interrupt? */
356 if (plat_ic_get_interrupt_active(map->intr))
357 goto fallback;
358
359 /* The interrupt must currently owned by Non-secure */
360 if (plat_ic_get_interrupt_type(map->intr) != INTR_TYPE_NS)
361 goto fallback;
362
363 /*
364 * Disable forwarding of new interrupt triggers to CPU
365 * interface.
366 */
367 plat_ic_disable_interrupt(map->intr);
368
369 /*
370 * Any events that are triggered after register and before
371 * enable should remain pending. Clear any previous interrupt
372 * triggers which are pending (except for SGIs). This has no
373 * affect on level-triggered interrupts.
374 */
375 if (ev_num != SDEI_EVENT_0)
376 plat_ic_clear_interrupt_pending(map->intr);
377
378 /* Map interrupt to EL3 and program the correct priority */
379 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_EL3);
380
381 /* Program the appropriate interrupt priority */
382 plat_ic_set_interrupt_priority(map->intr, sdei_event_priority(map));
383
384 /*
385 * Set the routing mode for shared event as requested. We
386 * already ensure that shared events get bound to SPIs.
387 */
388 if (is_event_shared(map)) {
389 plat_ic_set_spi_routing(map->intr,
390 ((flags == SDEI_REGF_RM_ANY) ?
391 INTR_ROUTING_MODE_ANY :
392 INTR_ROUTING_MODE_PE),
393 (u_register_t) mpidr);
394 }
395 }
396
397 /* Populate event entries */
398 set_sdei_entry(se, ep, arg, flags, mpidr);
399
400 /* Increment register count */
401 map->reg_count++;
402
403 sdei_map_unlock(map);
404
405 return 0;
406
407fallback:
408 /* Reinstate previous state */
409 se->state = backup_state;
410
411 sdei_map_unlock(map);
412
413 return SDEI_EDENY;
414}
415
416/* Enable SDEI event */
417static int sdei_event_enable(int ev_num)
418{
419 sdei_ev_map_t *map;
420 sdei_entry_t *se;
421 int ret, before, after;
422
423 /* Check if valid event number */
424 map = find_event_map(ev_num);
425 if (!map)
426 return SDEI_EINVAL;
427
428 se = get_event_entry(map);
429 ret = SDEI_EDENY;
430
431 if (is_event_shared(map))
432 sdei_map_lock(map);
433
434 before = GET_EV_STATE(se, ENABLED);
435 if (!can_sdei_state_trans(se, DO_ENABLE))
436 goto finish;
437 after = GET_EV_STATE(se, ENABLED);
438
439 /*
440 * Enable interrupt for bound events only if there's a change in enabled
441 * state.
442 */
443 if (is_map_bound(map) && (!before && after))
444 plat_ic_enable_interrupt(map->intr);
445
446 ret = 0;
447
448finish:
449 if (is_event_shared(map))
450 sdei_map_unlock(map);
451
452 return ret;
453}
454
455/* Disable SDEI event */
456static int sdei_event_disable(int ev_num)
457{
458 sdei_ev_map_t *map;
459 sdei_entry_t *se;
460 int ret, before, after;
461
462 /* Check if valid event number */
463 map = find_event_map(ev_num);
464 if (!map)
465 return SDEI_EINVAL;
466
467 se = get_event_entry(map);
468 ret = SDEI_EDENY;
469
470 if (is_event_shared(map))
471 sdei_map_lock(map);
472
473 before = GET_EV_STATE(se, ENABLED);
474 if (!can_sdei_state_trans(se, DO_DISABLE))
475 goto finish;
476 after = GET_EV_STATE(se, ENABLED);
477
478 /*
479 * Disable interrupt for bound events only if there's a change in
480 * enabled state.
481 */
482 if (is_map_bound(map) && (before && !after))
483 plat_ic_disable_interrupt(map->intr);
484
485 ret = 0;
486
487finish:
488 if (is_event_shared(map))
489 sdei_map_unlock(map);
490
491 return ret;
492}
493
494/* Query SDEI event information */
495static uint64_t sdei_event_get_info(int ev_num, int info)
496{
497 sdei_entry_t *se;
498 sdei_ev_map_t *map;
499
500 unsigned int flags, registered;
501 uint64_t affinity;
502
503 /* Check if valid event number */
504 map = find_event_map(ev_num);
505 if (!map)
506 return SDEI_EINVAL;
507
508 se = get_event_entry(map);
509
510 if (is_event_shared(map))
511 sdei_map_lock(map);
512
513 /* Sample state under lock */
514 registered = GET_EV_STATE(se, REGISTERED);
515 flags = se->reg_flags;
516 affinity = se->affinity;
517
518 if (is_event_shared(map))
519 sdei_map_unlock(map);
520
521 switch (info) {
522 case SDEI_INFO_EV_TYPE:
523 return is_event_shared(map);
524
525 case SDEI_INFO_EV_NOT_SIGNALED:
526 return !is_event_signalable(map);
527
528 case SDEI_INFO_EV_PRIORITY:
529 return is_event_critical(map);
530
531 case SDEI_INFO_EV_ROUTING_MODE:
532 if (!is_event_shared(map))
533 return SDEI_EINVAL;
534 if (!registered)
535 return SDEI_EDENY;
536 return (flags == SDEI_REGF_RM_PE);
537
538 case SDEI_INFO_EV_ROUTING_AFF:
539 if (!is_event_shared(map))
540 return SDEI_EINVAL;
541 if (!registered)
542 return SDEI_EDENY;
543 if (flags != SDEI_REGF_RM_PE)
544 return SDEI_EINVAL;
545 return affinity;
546
547 default:
548 return SDEI_EINVAL;
549 }
550}
551
552/* Unregister an SDEI event */
553static int sdei_event_unregister(int ev_num)
554{
555 int ret = 0;
556 sdei_entry_t *se;
557 sdei_ev_map_t *map;
558
559 /* Check if valid event number */
560 map = find_event_map(ev_num);
561 if (!map)
562 return SDEI_EINVAL;
563
564 se = get_event_entry(map);
565
566 /*
567 * Even though unregister operation is per-event (additionally for
568 * private events, unregistration is required individually), it has to
569 * be serialised with respect to bind/release, which are global
570 * operations. So we hold the lock throughout, unconditionally.
571 */
572 sdei_map_lock(map);
573
574 if (!can_sdei_state_trans(se, DO_UNREGISTER)) {
575 /*
576 * Even if the call is invalid, and the handler is running (for
577 * example, having unregistered from a running handler earlier),
578 * return pending error code; otherwise, return deny.
579 */
580 ret = GET_EV_STATE(se, RUNNING) ? SDEI_EPEND : SDEI_EDENY;
581
582 goto finish;
583 }
584
585 map->reg_count--;
586 if (is_event_private(map)) {
587 /* Multiple calls to register are possible for private events */
588 assert(map->reg_count >= 0);
589 } else {
590 /* Only single call to register is possible for shared events */
591 assert(map->reg_count == 0);
592 }
593
594 if (is_map_bound(map)) {
595 plat_ic_disable_interrupt(map->intr);
596
597 /*
598 * Clear pending interrupt. Skip for SGIs as they may not be
599 * cleared on interrupt controllers.
600 */
601 if (ev_num != SDEI_EVENT_0)
602 plat_ic_clear_interrupt_pending(map->intr);
603
604 assert(plat_ic_get_interrupt_type(map->intr) == INTR_TYPE_EL3);
605 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_NS);
606 plat_ic_set_interrupt_priority(map->intr, LOWEST_INTR_PRIORITY);
607 }
608
609 clear_event_entries(se);
610
611 /*
612 * If the handler is running at the time of unregister, return the
613 * pending error code.
614 */
615 if (GET_EV_STATE(se, RUNNING))
616 ret = SDEI_EPEND;
617
618finish:
619 sdei_map_unlock(map);
620
621 return ret;
622}
623
624/* Query status of an SDEI event */
625static int sdei_event_status(int ev_num)
626{
627 sdei_ev_map_t *map;
628 sdei_entry_t *se;
629 sdei_state_t state;
630
631 /* Check if valid event number */
632 map = find_event_map(ev_num);
633 if (!map)
634 return SDEI_EINVAL;
635
636 se = get_event_entry(map);
637
638 if (is_event_shared(map))
639 sdei_map_lock(map);
640
641 /* State value directly maps to the expected return format */
642 state = se->state;
643
644 if (is_event_shared(map))
645 sdei_map_unlock(map);
646
647 return state;
648}
649
650/* Bind an SDEI event to an interrupt */
651static int sdei_interrupt_bind(int intr_num)
652{
653 sdei_ev_map_t *map;
654 int retry = 1, shared_mapping;
655
656 /* SGIs are not allowed to be bound */
657 if (plat_ic_is_sgi(intr_num))
658 return SDEI_EINVAL;
659
660 shared_mapping = plat_ic_is_spi(intr_num);
661 do {
662 /*
663 * Bail out if there is already an event for this interrupt,
664 * either platform-defined or dynamic.
665 */
666 map = find_event_map_by_intr(intr_num, shared_mapping);
667 if (map) {
668 if (is_map_dynamic(map)) {
669 if (is_map_bound(map)) {
670 /*
671 * Dynamic event, already bound. Return
672 * event number.
673 */
674 return map->ev_num;
675 }
676 } else {
677 /* Binding non-dynamic event */
678 return SDEI_EINVAL;
679 }
680 }
681
682 /*
683 * The interrupt is not bound yet. Try to find a free slot to
684 * bind it. Free dynamic mappings have their interrupt set as
685 * SDEI_DYN_IRQ.
686 */
687 map = find_event_map_by_intr(SDEI_DYN_IRQ, shared_mapping);
688 if (!map)
689 return SDEI_ENOMEM;
690
691 /* The returned mapping must be dynamic */
692 assert(is_map_dynamic(map));
693
694 /*
695 * We cannot assert for bound maps here, as we might be racing
696 * with another bind.
697 */
698
699 /* The requested interrupt must already belong to NS */
700 if (plat_ic_get_interrupt_type(intr_num) != INTR_TYPE_NS)
701 return SDEI_EDENY;
702
703 /*
704 * Interrupt programming and ownership transfer are deferred
705 * until register.
706 */
707
708 sdei_map_lock(map);
709 if (!is_map_bound(map)) {
710 map->intr = intr_num;
711 set_map_bound(map);
712 retry = 0;
713 }
714 sdei_map_unlock(map);
715 } while (retry);
716
717 return map->ev_num;
718}
719
720/* Release a bound SDEI event previously to an interrupt */
721static int sdei_interrupt_release(int ev_num)
722{
723 int ret = 0;
724 sdei_ev_map_t *map;
725 sdei_entry_t *se;
726
727 /* Check if valid event number */
728 map = find_event_map(ev_num);
729 if (!map)
730 return SDEI_EINVAL;
731
732 if (!is_map_dynamic(map))
733 return SDEI_EINVAL;
734
735 se = get_event_entry(map);
736
737 sdei_map_lock(map);
738
739 /* Event must have been unregistered before release */
740 if (map->reg_count != 0) {
741 ret = SDEI_EDENY;
742 goto finish;
743 }
744
745 /*
746 * Interrupt release never causes the state to change. We only check
747 * whether it's permissible or not.
748 */
749 if (!can_sdei_state_trans(se, DO_RELEASE)) {
750 ret = SDEI_EDENY;
751 goto finish;
752 }
753
754 if (is_map_bound(map)) {
755 /*
756 * Deny release if the interrupt is active, which means it's
757 * probably being acknowledged and handled elsewhere.
758 */
759 if (plat_ic_get_interrupt_active(map->intr)) {
760 ret = SDEI_EDENY;
761 goto finish;
762 }
763
764 /*
765 * Interrupt programming and ownership transfer are already done
766 * during unregister.
767 */
768
769 map->intr = SDEI_DYN_IRQ;
770 clr_map_bound(map);
771 } else {
772 SDEI_LOG("Error release bound:%d cnt:%d\n", is_map_bound(map),
773 map->reg_count);
774 ret = SDEI_EINVAL;
775 }
776
777finish:
778 sdei_map_unlock(map);
779
780 return ret;
781}
782
783/* Perform reset of private SDEI events */
784static int sdei_private_reset(void)
785{
786 sdei_ev_map_t *map;
787 int ret = 0, final_ret = 0, i;
788
789 /* Unregister all private events */
790 for_each_private_map(i, map) {
791 /*
792 * The unregister can fail if the event is not registered, which
793 * is allowed, and a deny will be returned. But if the event is
794 * running or unregister pending, the call fails.
795 */
796 ret = sdei_event_unregister(map->ev_num);
797 if ((ret == SDEI_EPEND) && (final_ret == 0))
Jeenu Viswambharanc94cf262017-11-30 10:25:10 +0000798 final_ret = SDEI_EDENY;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100799 }
800
801 return final_ret;
802}
803
804/* Perform reset of shared SDEI events */
805static int sdei_shared_reset(void)
806{
807 const sdei_mapping_t *mapping;
808 sdei_ev_map_t *map;
809 int ret = 0, final_ret = 0, i, j;
810
811 /* Unregister all shared events */
812 for_each_shared_map(i, map) {
813 /*
814 * The unregister can fail if the event is not registered, which
815 * is allowed, and a deny will be returned. But if the event is
816 * running or unregister pending, the call fails.
817 */
818 ret = sdei_event_unregister(map->ev_num);
819 if ((ret == SDEI_EPEND) && (final_ret == 0))
Jeenu Viswambharanc94cf262017-11-30 10:25:10 +0000820 final_ret = SDEI_EDENY;
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100821 }
822
823 if (final_ret != 0)
824 return final_ret;
825
826 /*
827 * Loop through both private and shared mappings, and release all
828 * bindings.
829 */
830 for_each_mapping_type(i, mapping) {
831 iterate_mapping(mapping, j, map) {
832 /*
833 * Release bindings for mappings that are dynamic and
834 * bound.
835 */
836 if (is_map_dynamic(map) && is_map_bound(map)) {
837 /*
838 * Any failure to release would mean there is at
839 * least a PE registered for the event.
840 */
841 ret = sdei_interrupt_release(map->ev_num);
842 if ((ret != 0) && (final_ret == 0))
843 final_ret = ret;
844 }
845 }
846 }
847
848 return final_ret;
849}
850
851/* Send a signal to another SDEI client PE */
852int sdei_signal(int event, uint64_t target_pe)
853{
854 sdei_ev_map_t *map;
855
856 /* Only event 0 can be signalled */
857 if (event != SDEI_EVENT_0)
858 return SDEI_EINVAL;
859
860 /* Find mapping for event 0 */
861 map = find_event_map(SDEI_EVENT_0);
862 if (!map)
863 return SDEI_EINVAL;
864
865 /* The event must be signalable */
866 if (!is_event_signalable(map))
867 return SDEI_EINVAL;
868
869 /* Validate target */
870 if (plat_core_pos_by_mpidr(target_pe) < 0)
871 return SDEI_EINVAL;
872
873 /* Raise SGI. Platform will validate target_pe */
874 plat_ic_raise_el3_sgi(map->intr, (u_register_t) target_pe);
875
876 return 0;
877}
878
879/* Query SDEI dispatcher features */
880uint64_t sdei_features(unsigned int feature)
881{
882 if (feature == SDEI_FEATURE_BIND_SLOTS) {
883 return FEATURE_BIND_SLOTS(num_dyn_priv_slots,
884 num_dyn_shrd_slots);
885 }
886
887 return SDEI_EINVAL;
888}
889
890/* SDEI top level handler for servicing SMCs */
891uint64_t sdei_smc_handler(uint32_t smc_fid,
892 uint64_t x1,
893 uint64_t x2,
894 uint64_t x3,
895 uint64_t x4,
896 void *cookie,
897 void *handle,
898 uint64_t flags)
899{
900
901 uint64_t x5;
902 int ss = get_interrupt_src_ss(flags);
903 int64_t ret;
904 unsigned int resume = 0;
905
906 if (ss != NON_SECURE)
907 SMC_RET1(handle, SMC_UNK);
908
909 /* Verify the caller EL */
910 if (GET_EL(read_spsr_el3()) != sdei_client_el())
911 SMC_RET1(handle, SMC_UNK);
912
913 switch (smc_fid) {
914 case SDEI_VERSION:
915 SDEI_LOG("> VER\n");
916 ret = sdei_version();
917 SDEI_LOG("< VER:%lx\n", ret);
918 SMC_RET1(handle, ret);
919 break;
920
921 case SDEI_EVENT_REGISTER:
922 x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
923 SDEI_LOG("> REG(n:%d e:%lx a:%lx f:%x m:%lx)\n", (int) x1,
924 x2, x3, (int) x4, x5);
925 ret = sdei_event_register(x1, x2, x3, x4, x5);
926 SDEI_LOG("< REG:%ld\n", ret);
927 SMC_RET1(handle, ret);
928 break;
929
930 case SDEI_EVENT_ENABLE:
931 SDEI_LOG("> ENABLE(n:%d)\n", (int) x1);
932 ret = sdei_event_enable(x1);
933 SDEI_LOG("< ENABLE:%ld\n", ret);
934 SMC_RET1(handle, ret);
935 break;
936
937 case SDEI_EVENT_DISABLE:
938 SDEI_LOG("> DISABLE(n:%d)\n", (int) x1);
939 ret = sdei_event_disable(x1);
940 SDEI_LOG("< DISABLE:%ld\n", ret);
941 SMC_RET1(handle, ret);
942 break;
943
944 case SDEI_EVENT_CONTEXT:
945 SDEI_LOG("> CTX(p:%d):%lx\n", (int) x1, read_mpidr_el1());
946 ret = sdei_event_context(handle, x1);
947 SDEI_LOG("< CTX:%ld\n", ret);
948 SMC_RET1(handle, ret);
949 break;
950
951 case SDEI_EVENT_COMPLETE_AND_RESUME:
952 resume = 1;
953 /* Fall through */
954
955 case SDEI_EVENT_COMPLETE:
956 SDEI_LOG("> COMPLETE(r:%d sta/ep:%lx):%lx\n", resume, x1,
957 read_mpidr_el1());
958 ret = sdei_event_complete(resume, x1);
959 SDEI_LOG("< COMPLETE:%lx\n", ret);
960
961 /*
962 * Set error code only if the call failed. If the call
963 * succeeded, we discard the dispatched context, and restore the
964 * interrupted context to a pristine condition, and therefore
965 * shouldn't be modified. We don't return to the caller in this
966 * case anyway.
967 */
968 if (ret)
969 SMC_RET1(handle, ret);
970
971 SMC_RET0(handle);
972 break;
973
974 case SDEI_EVENT_STATUS:
975 SDEI_LOG("> STAT(n:%d)\n", (int) x1);
976 ret = sdei_event_status(x1);
977 SDEI_LOG("< STAT:%ld\n", ret);
978 SMC_RET1(handle, ret);
979 break;
980
981 case SDEI_EVENT_GET_INFO:
982 SDEI_LOG("> INFO(n:%d, %d)\n", (int) x1, (int) x2);
983 ret = sdei_event_get_info(x1, x2);
984 SDEI_LOG("< INFO:%ld\n", ret);
985 SMC_RET1(handle, ret);
986 break;
987
988 case SDEI_EVENT_UNREGISTER:
989 SDEI_LOG("> UNREG(n:%d)\n", (int) x1);
990 ret = sdei_event_unregister(x1);
991 SDEI_LOG("< UNREG:%ld\n", ret);
992 SMC_RET1(handle, ret);
993 break;
994
995 case SDEI_PE_UNMASK:
996 SDEI_LOG("> UNMASK:%lx\n", read_mpidr_el1());
997 sdei_pe_unmask();
Jeenu Viswambharan22a16f92017-11-13 12:30:45 +0000998 SDEI_LOG("< UNMASK:%d\n", 0);
Jeenu Viswambharan04e3a7f2017-10-16 08:43:14 +0100999 SMC_RET1(handle, 0);
1000 break;
1001
1002 case SDEI_PE_MASK:
1003 SDEI_LOG("> MASK:%lx\n", read_mpidr_el1());
1004 ret = sdei_pe_mask();
1005 SDEI_LOG("< MASK:%ld\n", ret);
1006 SMC_RET1(handle, ret);
1007 break;
1008
1009 case SDEI_INTERRUPT_BIND:
1010 SDEI_LOG("> BIND(%d)\n", (int) x1);
1011 ret = sdei_interrupt_bind(x1);
1012 SDEI_LOG("< BIND:%ld\n", ret);
1013 SMC_RET1(handle, ret);
1014 break;
1015
1016 case SDEI_INTERRUPT_RELEASE:
1017 SDEI_LOG("> REL(%d)\n", (int) x1);
1018 ret = sdei_interrupt_release(x1);
1019 SDEI_LOG("< REL:%ld\n", ret);
1020 SMC_RET1(handle, ret);
1021 break;
1022
1023 case SDEI_SHARED_RESET:
1024 SDEI_LOG("> S_RESET():%lx\n", read_mpidr_el1());
1025 ret = sdei_shared_reset();
1026 SDEI_LOG("< S_RESET:%ld\n", ret);
1027 SMC_RET1(handle, ret);
1028 break;
1029
1030 case SDEI_PRIVATE_RESET:
1031 SDEI_LOG("> P_RESET():%lx\n", read_mpidr_el1());
1032 ret = sdei_private_reset();
1033 SDEI_LOG("< P_RESET:%ld\n", ret);
1034 SMC_RET1(handle, ret);
1035 break;
1036
1037 case SDEI_EVENT_ROUTING_SET:
1038 SDEI_LOG("> ROUTE_SET(n:%d f:%lx aff:%lx)\n", (int) x1, x2, x3);
1039 ret = sdei_event_routing_set(x1, x2, x3);
1040 SDEI_LOG("< ROUTE_SET:%ld\n", ret);
1041 SMC_RET1(handle, ret);
1042 break;
1043
1044 case SDEI_FEATURES:
1045 SDEI_LOG("> FTRS(f:%lx)\n", x1);
1046 ret = sdei_features(x1);
1047 SDEI_LOG("< FTRS:%lx\n", ret);
1048 SMC_RET1(handle, ret);
1049 break;
1050
1051 case SDEI_EVENT_SIGNAL:
1052 SDEI_LOG("> SIGNAL(e:%lx t:%lx)\n", x1, x2);
1053 ret = sdei_signal(x1, x2);
1054 SDEI_LOG("< SIGNAL:%ld\n", ret);
1055 SMC_RET1(handle, ret);
1056 break;
1057 default:
1058 break;
1059 }
1060
1061 WARN("Unimplemented SDEI Call: 0x%x\n", smc_fid);
1062 SMC_RET1(handle, SMC_UNK);
1063}
1064
1065/* Subscribe to PSCI CPU on to initialize per-CPU SDEI configuration */
1066SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, sdei_cpu_on_init);