BL31: Add SDEI dispatcher

The implementation currently supports only interrupt-based SDEI events,
and supports all interfaces as defined by SDEI specification version
1.0 [1].

Introduce the build option SDEI_SUPPORT to include SDEI dispatcher in
BL31.

Update user guide and porting guide. SDEI documentation to follow.

[1] http://infocenter.arm.com/help/topic/com.arm.doc.den0054a/ARM_DEN0054A_Software_Delegated_Exception_Interface.pdf

Change-Id: I758b733084e4ea3b27ac77d0259705565842241a
Co-authored-by: Yousuf A <yousuf.sait@arm.com>
Signed-off-by: Jeenu Viswambharan <jeenu.viswambharan@arm.com>
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 781e5af..336c295 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -36,6 +36,16 @@
 BL31_SOURCES		+=	bl31/ehf.c
 endif
 
+ifeq (${SDEI_SUPPORT},1)
+ifeq (${EL3_EXCEPTION_HANDLING},0)
+  $(error EL3_EXCEPTION_HANDLING must be 1 for SDEI support)
+endif
+BL31_SOURCES		+=	services/std_svc/sdei/sdei_event.c	\
+				services/std_svc/sdei/sdei_intr_mgmt.c	\
+				services/std_svc/sdei/sdei_main.c	\
+				services/std_svc/sdei/sdei_state.c
+endif
+
 BL31_LINKERFILE		:=	bl31/bl31.ld.S
 
 # Flag used to indicate if Crash reporting via console should be included
@@ -46,6 +56,8 @@
 
 $(eval $(call assert_boolean,CRASH_REPORTING))
 $(eval $(call assert_boolean,EL3_EXCEPTION_HANDLING))
+$(eval $(call assert_boolean,SDEI_SUPPORT))
 
 $(eval $(call add_define,CRASH_REPORTING))
 $(eval $(call add_define,EL3_EXCEPTION_HANDLING))
+$(eval $(call add_define,SDEI_SUPPORT))
diff --git a/docs/porting-guide.rst b/docs/porting-guide.rst
index af933fd..f020ec9 100644
--- a/docs/porting-guide.rst
+++ b/docs/porting-guide.rst
@@ -1904,6 +1904,74 @@
 assertion is raised if the value of the constant is not aligned to the cache
 line boundary.
 
+SDEI porting requirements
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The SDEI dispatcher requires the platform to provide the following macros
+and functions, of which some are optional, and some others mandatory.
+
+Macros
+......
+
+Macro: PLAT_SDEI_NORMAL_PRI [mandatory]
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This macro must be defined to the EL3 exception priority level associated with
+Normal SDEI events on the platform. This must have a higher value (therefore of
+lower priority) than ``PLAT_SDEI_CRITICAL_PRI``.
+
+Macro: PLAT_SDEI_CRITICAL_PRI [mandatory]
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This macro must be defined to the EL3 exception priority level associated with
+Critical SDEI events on the platform. This must have a lower value (therefore of
+higher priority) than ``PLAT_SDEI_NORMAL_PRI``.
+
+It's recommended that SDEI exception priorities in general are assigned the
+lowest among Secure priorities. Among the SDEI exceptions, Critical SDEI
+priority must be higher than Normal SDEI priority.
+
+Functions
+.........
+
+Function: int plat_sdei_validate_entry_point(uintptr_t ep) [optional]
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+::
+
+  Argument: uintptr_t
+  Return: int
+
+This function validates the address of client entry points provided for both
+event registration and *Complete and Resume* SDEI calls. The function takes one
+argument, which is the address of the handler the SDEI client requested to
+register. The function must return ``0`` for successful validation, or ``-1``
+upon failure.
+
+The default implementation always returns ``0``. On ARM platforms, this function
+is implemented to translate the entry point to physical address, and further to
+ensure that the address is located in Non-secure DRAM.
+
+Function: void plat_sdei_handle_masked_trigger(uint64_t mpidr, unsigned int intr) [optional]
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+::
+
+  Argument: uint64_t
+  Argument: unsigned int
+  Return: void
+
+SDEI specification requires that a PE comes out of reset with the events masked.
+The client therefore is expected to call ``PE_UNMASK`` to unmask SDEI events on
+the PE. No SDEI events can be dispatched until such time.
+
+Should a PE receive an interrupt that was bound to an SDEI event while the
+events are masked on the PE, the dispatcher implementation invokes the function
+``plat_sdei_handle_masked_trigger``. The MPIDR of the PE that received the
+interrupt and the interrupt ID are passed as parameters.
+
+The default implementation only prints out a warning message.
+
 Power State Coordination Interface (in BL31)
 --------------------------------------------
 
diff --git a/docs/user-guide.rst b/docs/user-guide.rst
index 8ae5e9e..172e793 100644
--- a/docs/user-guide.rst
+++ b/docs/user-guide.rst
@@ -534,6 +534,12 @@
    optional. It is only needed if the platform makefile specifies that it
    is required in order to build the ``fwu_fip`` target.
 
+-  ``SDEI_SUPPORT``: Setting this to ``1`` enables support for Software
+   Delegated Exception Interface to BL31 image. This defaults to ``0``.
+
+   When set to ``1``, the build option ``EL3_EXCEPTION_HANDLING`` must also be
+   set to ``1``.
+
 -  ``SEPARATE_CODE_AND_RODATA``: Whether code and read-only data should be
    isolated on separate memory pages. This is a trade-off between security and
    memory usage. See "Isolating code and read-only data on separate memory
diff --git a/include/plat/common/platform.h b/include/plat/common/platform.h
index 086e5e6..f11bee9 100644
--- a/include/plat/common/platform.h
+++ b/include/plat/common/platform.h
@@ -114,6 +114,16 @@
 void bl1_platform_setup(void);
 struct meminfo *bl1_plat_sec_mem_layout(void);
 
+/*******************************************************************************
+ * Optional EL3 component functions in BL31
+ ******************************************************************************/
+
+/* SDEI platform functions */
+#if SDEI_SUPPORT
+int plat_sdei_validate_entry_point(uintptr_t ep, unsigned int client_mode);
+void plat_sdei_handle_masked_trigger(uint64_t mpidr, unsigned int intr);
+#endif
+
 /*
  * The following function is mandatory when the
  * firmware update feature is used.
diff --git a/include/services/sdei.h b/include/services/sdei.h
new file mode 100644
index 0000000..72eb6d7
--- /dev/null
+++ b/include/services/sdei.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SDEI_H__
+#define __SDEI_H__
+
+#include <spinlock.h>
+#include <utils_def.h>
+
+/* Range 0xC4000020 - 0xC400003F reserved for SDE 64bit smc calls */
+#define SDEI_VERSION				0xC4000020
+#define SDEI_EVENT_REGISTER			0xC4000021
+#define SDEI_EVENT_ENABLE			0xC4000022
+#define SDEI_EVENT_DISABLE			0xC4000023
+#define SDEI_EVENT_CONTEXT			0xC4000024
+#define SDEI_EVENT_COMPLETE			0xC4000025
+#define SDEI_EVENT_COMPLETE_AND_RESUME		0xC4000026
+
+#define SDEI_EVENT_UNREGISTER			0xC4000027
+#define SDEI_EVENT_STATUS			0xC4000028
+#define SDEI_EVENT_GET_INFO			0xC4000029
+#define SDEI_EVENT_ROUTING_SET			0xC400002A
+#define SDEI_PE_MASK				0xC400002B
+#define SDEI_PE_UNMASK				0xC400002C
+
+#define SDEI_INTERRUPT_BIND			0xC400002D
+#define SDEI_INTERRUPT_RELEASE			0xC400002E
+#define SDEI_EVENT_SIGNAL			0xC400002F
+#define SDEI_FEATURES				0xC4000030
+#define SDEI_PRIVATE_RESET			0xC4000031
+#define SDEI_SHARED_RESET			0xC4000032
+
+/* SDEI_EVENT_REGISTER flags */
+#define SDEI_REGF_RM_ANY	0
+#define SDEI_REGF_RM_PE		1
+
+/* SDEI_EVENT_COMPLETE status flags */
+#define SDEI_EV_HANDLED		0
+#define SDEI_EV_FAILED		1
+
+/* SDE event status values in bit position */
+#define SDEI_STATF_REGISTERED		0
+#define SDEI_STATF_ENABLED		1
+#define SDEI_STATF_RUNNING		2
+
+/* Internal: SDEI flag bit positions */
+#define _SDEI_MAPF_DYNAMIC_SHIFT	1
+#define _SDEI_MAPF_BOUND_SHIFT		2
+#define _SDEI_MAPF_SIGNALABLE_SHIFT	3
+#define _SDEI_MAPF_PRIVATE_SHIFT	4
+#define _SDEI_MAPF_CRITICAL_SHIFT	5
+
+/* SDEI event 0 */
+#define SDEI_EVENT_0	0
+
+/* Placeholder interrupt for dynamic mapping */
+#define SDEI_DYN_IRQ	0
+
+/* SDEI flags */
+
+/*
+ * These flags determine whether or not an event can be associated with an
+ * interrupt. Static events are permanently associated with an interrupt, and
+ * can't be changed at runtime.  Association of dynamic events with interrupts
+ * can be changed at run time using the SDEI_INTERRUPT_BIND and
+ * SDEI_INTERRUPT_RELEASE calls.
+ *
+ * SDEI_MAPF_DYNAMIC only indicates run time configurability, where as
+ * SDEI_MAPF_BOUND indicates interrupt association. For example:
+ *
+ *  - Calling SDEI_INTERRUPT_BIND on a dynamic event will have both
+ *    SDEI_MAPF_DYNAMIC and SDEI_MAPF_BOUND set.
+ *
+ *  - Statically-bound events will always have SDEI_MAPF_BOUND set, and neither
+ *    SDEI_INTERRUPT_BIND nor SDEI_INTERRUPT_RELEASE can be called on them.
+ *
+ * See also the is_map_bound() macro.
+ */
+#define SDEI_MAPF_DYNAMIC	BIT(_SDEI_MAPF_DYNAMIC_SHIFT)
+#define SDEI_MAPF_BOUND		BIT(_SDEI_MAPF_BOUND_SHIFT)
+
+#define SDEI_MAPF_SIGNALABLE	BIT(_SDEI_MAPF_SIGNALABLE_SHIFT)
+#define SDEI_MAPF_PRIVATE	BIT(_SDEI_MAPF_PRIVATE_SHIFT)
+#define SDEI_MAPF_CRITICAL	BIT(_SDEI_MAPF_CRITICAL_SHIFT)
+
+/* Indices of private and shared mappings */
+#define _SDEI_MAP_IDX_PRIV	0
+#define _SDEI_MAP_IDX_SHRD	1
+#define _SDEI_MAP_IDX_MAX	2
+
+/* The macros below are used to identify SDEI calls from the SMC function ID */
+#define SDEI_FID_MASK		U(0xffe0)
+#define SDEI_FID_VALUE		U(0x20)
+#define is_sdei_fid(_fid) \
+	((((_fid) & SDEI_FID_MASK) == SDEI_FID_VALUE) && \
+	 (((_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64))
+
+#define SDEI_EVENT_MAP(_event, _intr, _flags) \
+	{ \
+		.ev_num = _event, \
+		.intr = _intr, \
+		.map_flags = _flags \
+	}
+
+#define SDEI_SHARED_EVENT(_event, _intr, _flags) \
+	SDEI_EVENT_MAP(_event, _intr, _flags)
+
+#define SDEI_PRIVATE_EVENT(_event, _intr, _flags) \
+	SDEI_EVENT_MAP(_event, _intr, _flags | SDEI_MAPF_PRIVATE)
+
+#define SDEI_DEFINE_EVENT_0(_intr) \
+	SDEI_PRIVATE_EVENT(SDEI_EVENT_0, _intr, SDEI_MAPF_SIGNALABLE)
+
+/*
+ * Declare shared and private entries for each core. Also declare a global
+ * structure containing private and share entries.
+ *
+ * This macro must be used in the same file as the platform SDEI mappings are
+ * declared. Only then would ARRAY_SIZE() yield a meaningful value.
+ */
+#define REGISTER_SDEI_MAP(_private, _shared) \
+	sdei_entry_t sdei_private_event_table \
+		[PLATFORM_CORE_COUNT * ARRAY_SIZE(_private)]; \
+	sdei_entry_t sdei_shared_event_table[ARRAY_SIZE(_shared)]; \
+	const sdei_mapping_t sdei_global_mappings[] = { \
+		[_SDEI_MAP_IDX_PRIV] = { \
+			.map = _private, \
+			.num_maps = ARRAY_SIZE(_private) \
+		}, \
+		[_SDEI_MAP_IDX_SHRD] = { \
+			.map = _shared, \
+			.num_maps = ARRAY_SIZE(_shared) \
+		}, \
+	}
+
+typedef uint8_t sdei_state_t;
+
+/* Runtime data of SDEI event */
+typedef struct sdei_entry {
+	uint64_t ep;		/* Entry point */
+	uint64_t arg;		/* Entry point argument */
+	uint64_t affinity;	/* Affinity of shared event */
+	unsigned int reg_flags;	/* Registration flags */
+
+	/* Event handler states: registered, enabled, running */
+	sdei_state_t state;
+} sdei_entry_t;
+
+/* Mapping of SDEI events to interrupts, and associated data */
+typedef struct sdei_ev_map {
+	int32_t ev_num;		/* Event number */
+	unsigned int intr;	/* Physical interrupt number for a bound map */
+	unsigned int map_flags;	/* Mapping flags, see SDEI_MAPF_* */
+	unsigned int reg_count;	/* Registration count */
+	spinlock_t lock;	/* Per-event lock */
+} sdei_ev_map_t;
+
+typedef struct sdei_mapping {
+	sdei_ev_map_t *map;
+	size_t num_maps;
+} sdei_mapping_t;
+
+/* Handler to be called to handle SDEI smc calls */
+uint64_t sdei_smc_handler(uint32_t smc_fid,
+		uint64_t x1,
+		uint64_t x2,
+		uint64_t x3,
+		uint64_t x4,
+		void *cookie,
+		void *handle,
+		uint64_t flags);
+
+void sdei_init(void);
+
+#endif /* __SDEI_H__ */
diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk
index 7299dc4..660e54e 100644
--- a/make_helpers/defaults.mk
+++ b/make_helpers/defaults.mk
@@ -114,6 +114,9 @@
 # For Chain of Trust
 SAVE_KEYS			:= 0
 
+# Software Delegated Exception support
+SDEI_SUPPORT            	:= 0
+
 # Whether code and read-only data should be put on separate memory pages. The
 # platform Makefile is free to override this value.
 SEPARATE_CODE_AND_RODATA	:= 0
diff --git a/plat/common/aarch64/plat_common.c b/plat/common/aarch64/plat_common.c
index 05084e1..a87e7c6 100644
--- a/plat/common/aarch64/plat_common.c
+++ b/plat/common/aarch64/plat_common.c
@@ -3,6 +3,8 @@
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
+
+#include <arch_helpers.h>
 #include <assert.h>
 #include <console.h>
 #include <platform.h>
@@ -20,6 +22,11 @@
 #pragma weak plat_get_syscnt_freq2
 #endif /* ERROR_DEPRECATED */
 
+#if SDEI_SUPPORT
+#pragma weak plat_sdei_handle_masked_trigger
+#pragma weak plat_sdei_validate_entry_point
+#endif
+
 void bl31_plat_enable_mmu(uint32_t flags)
 {
 	enable_mmu_el3(flags);
@@ -64,3 +71,22 @@
 	return (unsigned int)freq;
 }
 #endif /* ERROR_DEPRECATED */
+
+#if SDEI_SUPPORT
+/*
+ * Function that handles spurious SDEI interrupts while events are masked.
+ */
+void plat_sdei_handle_masked_trigger(uint64_t mpidr, unsigned int intr)
+{
+	WARN("Spurious SDEI interrupt %u on masked PE %lx\n", intr, mpidr);
+}
+
+/*
+ * Default Function to validate SDEI entry point, which returns success.
+ * Platforms may override this with their own validation mechanism.
+ */
+int plat_sdei_validate_entry_point(uintptr_t ep, unsigned int client_mode)
+{
+	return 0;
+}
+#endif
diff --git a/services/std_svc/sdei/sdei_event.c b/services/std_svc/sdei/sdei_event.c
new file mode 100644
index 0000000..bf0e779
--- /dev/null
+++ b/services/std_svc/sdei/sdei_event.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <utils.h>
+#include "sdei_private.h"
+
+#define MAP_OFF(_map, _mapping) ((_map) - (_mapping)->map)
+
+/*
+ * Get SDEI entry with the given mapping: on success, returns pointer to SDEI
+ * entry. On error, returns NULL.
+ *
+ * Both shared and private maps are stored in single-dimensional array. Private
+ * event entries are kept for each PE forming a 2D array.
+ */
+sdei_entry_t *get_event_entry(sdei_ev_map_t *map)
+{
+	const sdei_mapping_t *mapping;
+	sdei_entry_t *cpu_priv_base;
+	unsigned int idx, base_idx;
+
+	if (is_event_private(map)) {
+		/*
+		 * For a private map, find the index of the mapping in the
+		 * array.
+		 */
+		mapping = SDEI_PRIVATE_MAPPING();
+		idx = MAP_OFF(map, mapping);
+
+		/* Base of private mappings for this CPU */
+		base_idx = plat_my_core_pos() * mapping->num_maps;
+		cpu_priv_base = &sdei_private_event_table[base_idx];
+
+		/*
+		 * Return the address of the entry at the same index in the
+		 * per-CPU event entry.
+		 */
+		return &cpu_priv_base[idx];
+	} else {
+		mapping = SDEI_SHARED_MAPPING();
+		idx = MAP_OFF(map, mapping);
+
+		return &sdei_shared_event_table[idx];
+	}
+}
+
+/*
+ * Find event mapping for a given interrupt number: On success, returns pointer
+ * to the event mapping. On error, returns NULL.
+ */
+sdei_ev_map_t *find_event_map_by_intr(int intr_num, int shared)
+{
+	const sdei_mapping_t *mapping;
+	sdei_ev_map_t *map;
+	unsigned int i;
+
+	/*
+	 * Look for a match in private and shared mappings, as requested. This
+	 * is a linear search. However, if the mappings are required to be
+	 * sorted, for large maps, we could consider binary search.
+	 */
+	mapping = shared ? SDEI_SHARED_MAPPING() : SDEI_PRIVATE_MAPPING();
+	iterate_mapping(mapping, i, map) {
+		if (map->intr == intr_num)
+			return map;
+	}
+
+	return NULL;
+}
+
+/*
+ * Find event mapping for a given event number: On success returns pointer to
+ * the event mapping. On error, returns NULL.
+ */
+sdei_ev_map_t *find_event_map(int ev_num)
+{
+	const sdei_mapping_t *mapping;
+	sdei_ev_map_t *map;
+	unsigned int i, j;
+
+	/*
+	 * Iterate through mappings to find a match. This is a linear search.
+	 * However, if the mappings are required to be sorted, for large maps,
+	 * we could consider binary search.
+	 */
+	for_each_mapping_type(i, mapping) {
+		iterate_mapping(mapping, j, map) {
+			if (map->ev_num == ev_num)
+				return map;
+		}
+	}
+
+	return NULL;
+}
diff --git a/services/std_svc/sdei/sdei_intr_mgmt.c b/services/std_svc/sdei/sdei_intr_mgmt.c
new file mode 100644
index 0000000..d7cf289
--- /dev/null
+++ b/services/std_svc/sdei/sdei_intr_mgmt.c
@@ -0,0 +1,590 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <cassert.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <ehf.h>
+#include <interrupt_mgmt.h>
+#include <runtime_svc.h>
+#include <sdei.h>
+#include <string.h>
+#include "sdei_private.h"
+
+#define PE_MASKED	1
+#define PE_NOT_MASKED	0
+
+/* x0-x17 GPREGS context */
+#define SDEI_SAVED_GPREGS	18
+
+/* Maximum preemption nesting levels: Critical priority and Normal priority */
+#define MAX_EVENT_NESTING	2
+
+/* Per-CPU SDEI state access macro */
+#define sdei_get_this_pe_state()	(&sdei_cpu_state[plat_my_core_pos()])
+
+/* Structure to store information about an outstanding dispatch */
+typedef struct sdei_dispatch_context {
+	sdei_ev_map_t *map;
+	unsigned int sec_state;
+	unsigned int intr_raw;
+	uint64_t x[SDEI_SAVED_GPREGS];
+
+	/* Exception state registers */
+	uint64_t elr_el3;
+	uint64_t spsr_el3;
+} sdei_dispatch_context_t;
+
+/* Per-CPU SDEI state data */
+typedef struct sdei_cpu_state {
+	sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING];
+	unsigned short stack_top; /* Empty ascending */
+	unsigned int pe_masked:1;
+	unsigned int pending_enables:1;
+} sdei_cpu_state_t;
+
+/* SDEI states for all cores in the system */
+static sdei_cpu_state_t sdei_cpu_state[PLATFORM_CORE_COUNT];
+
+unsigned int sdei_pe_mask(void)
+{
+	unsigned int ret;
+	sdei_cpu_state_t *state = sdei_get_this_pe_state();
+
+	/*
+	 * Return value indicates whether this call had any effect in the mask
+	 * status of this PE.
+	 */
+	ret = (state->pe_masked ^ PE_MASKED);
+	state->pe_masked = PE_MASKED;
+
+	return ret;
+}
+
+void sdei_pe_unmask(void)
+{
+	int i;
+	sdei_ev_map_t *map;
+	sdei_entry_t *se;
+	sdei_cpu_state_t *state = sdei_get_this_pe_state();
+	uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
+
+	/*
+	 * If there are pending enables, iterate through the private mappings
+	 * and enable those bound maps that are in enabled state. Also, iterate
+	 * through shared mappings and enable interrupts of events that are
+	 * targeted to this PE.
+	 */
+	if (state->pending_enables) {
+		for_each_private_map(i, map) {
+			se = get_event_entry(map);
+			if (is_map_bound(map) && GET_EV_STATE(se, ENABLED))
+				plat_ic_enable_interrupt(map->intr);
+		}
+
+		for_each_shared_map(i, map) {
+			se = get_event_entry(map);
+
+			sdei_map_lock(map);
+			if (is_map_bound(map) &&
+					GET_EV_STATE(se, ENABLED) &&
+					(se->reg_flags == SDEI_REGF_RM_PE) &&
+					(se->affinity == my_mpidr)) {
+				plat_ic_enable_interrupt(map->intr);
+			}
+			sdei_map_unlock(map);
+		}
+	}
+
+	state->pending_enables = 0;
+	state->pe_masked = PE_NOT_MASKED;
+}
+
+/* Push a dispatch context to the dispatch stack */
+static sdei_dispatch_context_t *push_dispatch(void)
+{
+	sdei_cpu_state_t *state = sdei_get_this_pe_state();
+	sdei_dispatch_context_t *disp_ctx;
+
+	/* Cannot have more than max events */
+	assert(state->stack_top < MAX_EVENT_NESTING);
+
+	disp_ctx = &state->dispatch_stack[state->stack_top];
+	state->stack_top++;
+
+	return disp_ctx;
+}
+
+/* Pop a dispatch context to the dispatch stack */
+static sdei_dispatch_context_t *pop_dispatch(void)
+{
+	sdei_cpu_state_t *state = sdei_get_this_pe_state();
+
+	if (state->stack_top == 0)
+		return NULL;
+
+	assert(state->stack_top <= MAX_EVENT_NESTING);
+
+	state->stack_top--;
+
+	return &state->dispatch_stack[state->stack_top];
+}
+
+/* Retrieve the context at the top of dispatch stack */
+static sdei_dispatch_context_t *get_outstanding_dispatch(void)
+{
+	sdei_cpu_state_t *state = sdei_get_this_pe_state();
+
+	if (state->stack_top == 0)
+		return NULL;
+
+	assert(state->stack_top <= MAX_EVENT_NESTING);
+
+	return &state->dispatch_stack[state->stack_top - 1];
+}
+
+static void save_event_ctx(sdei_ev_map_t *map, void *tgt_ctx, int sec_state,
+		unsigned int intr_raw)
+{
+	sdei_dispatch_context_t *disp_ctx;
+	gp_regs_t *tgt_gpregs;
+	el3_state_t *tgt_el3;
+
+	assert(tgt_ctx);
+	tgt_gpregs = get_gpregs_ctx(tgt_ctx);
+	tgt_el3 = get_el3state_ctx(tgt_ctx);
+
+	disp_ctx = push_dispatch();
+	assert(disp_ctx);
+	disp_ctx->sec_state = sec_state;
+	disp_ctx->map = map;
+	disp_ctx->intr_raw = intr_raw;
+
+	/* Save general purpose and exception registers */
+	memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x));
+	disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3);
+	disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3);
+}
+
+static void restore_event_ctx(sdei_dispatch_context_t *disp_ctx, void *tgt_ctx)
+{
+	gp_regs_t *tgt_gpregs;
+	el3_state_t *tgt_el3;
+
+	assert(tgt_ctx);
+	tgt_gpregs = get_gpregs_ctx(tgt_ctx);
+	tgt_el3 = get_el3state_ctx(tgt_ctx);
+
+	CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)),
+			foo);
+
+	/* Restore general purpose and exception registers */
+	memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x));
+	write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3);
+	write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3);
+}
+
+static void save_secure_context(void)
+{
+	cm_el1_sysregs_context_save(SECURE);
+}
+
+/* Restore Secure context and arrange to resume it at the next ERET */
+static void restore_and_resume_secure_context(void)
+{
+	cm_el1_sysregs_context_restore(SECURE);
+	cm_set_next_eret_context(SECURE);
+}
+
+/*
+ * Restore Non-secure context and arrange to resume it at the next ERET. Return
+ * pointer to the Non-secure context.
+ */
+static cpu_context_t *restore_and_resume_ns_context(void)
+{
+	cpu_context_t *ns_ctx;
+
+	cm_el1_sysregs_context_restore(NON_SECURE);
+	cm_set_next_eret_context(NON_SECURE);
+
+	ns_ctx = cm_get_context(NON_SECURE);
+	assert(ns_ctx);
+
+	return ns_ctx;
+}
+
+/*
+ * Populate the Non-secure context so that the next ERET will dispatch to the
+ * SDEI client.
+ */
+static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se,
+		cpu_context_t *ctx, int sec_state_to_resume,
+		unsigned int intr_raw)
+{
+	el3_state_t *el3_ctx = get_el3state_ctx(ctx);
+
+	/* Push the event and context */
+	save_event_ctx(map, ctx, sec_state_to_resume, intr_raw);
+
+	/*
+	 * Setup handler arguments:
+	 *
+	 * - x0: Event number
+	 * - x1: Handler argument supplied at the time of event registration
+	 * - x2: Interrupted PC
+	 * - x3: Interrupted SPSR
+	 */
+	SMC_SET_GP(ctx, CTX_GPREG_X0, map->ev_num);
+	SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg);
+	SMC_SET_GP(ctx, CTX_GPREG_X2, read_ctx_reg(el3_ctx, CTX_ELR_EL3));
+	SMC_SET_GP(ctx, CTX_GPREG_X3, read_ctx_reg(el3_ctx, CTX_SPSR_EL3));
+
+	/*
+	 * Prepare for ERET:
+	 *
+	 * - Set PC to the registered handler address
+	 * - Set SPSR to jump to client EL with exceptions masked
+	 */
+	cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep,
+			SPSR_64(sdei_client_el(), MODE_SP_ELX,
+				DISABLE_ALL_EXCEPTIONS));
+}
+
+/* Handle a triggered SDEI interrupt while events were masked on this PE */
+static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se,
+		sdei_cpu_state_t *state, unsigned int intr_raw)
+{
+	uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK);
+	int disable = 0;
+
+	/* Nothing to do for event 0 */
+	if (map->ev_num == SDEI_EVENT_0)
+		return;
+
+	/*
+	 * For a private event, or for a shared event specifically routed to
+	 * this CPU, we disable interrupt, leave the interrupt pending, and do
+	 * EOI.
+	 */
+	if (is_event_private(map)) {
+		disable = 1;
+	} else if (se->reg_flags == SDEI_REGF_RM_PE) {
+		assert(se->affinity == my_mpidr);
+		disable = 1;
+	}
+
+	if (disable) {
+		plat_ic_disable_interrupt(map->intr);
+		plat_ic_set_interrupt_pending(map->intr);
+		plat_ic_end_of_interrupt(intr_raw);
+		state->pending_enables = 1;
+
+		return;
+	}
+
+	/*
+	 * We just received a shared event with routing set to ANY PE. The
+	 * interrupt can't be delegated on this PE as SDEI events are masked.
+	 * However, because its routing mode is ANY, it is possible that the
+	 * event can be delegated on any other PE that hasn't masked events.
+	 * Therefore, we set the interrupt back pending so as to give other
+	 * suitable PEs a chance of handling it.
+	 */
+	assert(plat_ic_is_spi(map->intr));
+	plat_ic_set_interrupt_pending(map->intr);
+
+	/*
+	 * Leaving the same interrupt pending also means that the same interrupt
+	 * can target this PE again as soon as this PE leaves EL3. Whether and
+	 * how often that happens depends on the implementation of GIC.
+	 *
+	 * We therefore call a platform handler to resolve this situation.
+	 */
+	plat_sdei_handle_masked_trigger(my_mpidr, map->intr);
+
+	/* This PE is masked. We EOI the interrupt, as it can't be delegated */
+	plat_ic_end_of_interrupt(intr_raw);
+}
+
+/* SDEI main interrupt handler */
+int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
+		void *cookie)
+{
+	sdei_entry_t *se;
+	cpu_context_t *ctx;
+	sdei_ev_map_t *map;
+	sdei_dispatch_context_t *disp_ctx;
+	unsigned int sec_state;
+	sdei_cpu_state_t *state;
+	uint32_t intr;
+
+	/*
+	 * To handle an event, the following conditions must be true:
+	 *
+	 * 1. Event must be signalled
+	 * 2. Event must be enabled
+	 * 3. This PE must be a target PE for the event
+	 * 4. PE must be unmasked for SDEI
+	 * 5. If this is a normal event, no event must be running
+	 * 6. If this is a critical event, no critical event must be running
+	 *
+	 * (1) and (2) are true when this function is running
+	 * (3) is enforced in GIC by selecting the appropriate routing option
+	 * (4) is satisfied by client calling PE_UNMASK
+	 * (5) and (6) is enforced using interrupt priority, the RPR, in GIC:
+	 *   - Normal SDEI events belong to Normal SDE priority class
+	 *   - Critical SDEI events belong to Critical CSDE priority class
+	 *
+	 * The interrupt has already been acknowledged, and therefore is active,
+	 * so no other PE can handle this event while we are at it.
+	 *
+	 * Find if this is an SDEI interrupt. There must be an event mapped to
+	 * this interrupt
+	 */
+	intr = plat_ic_get_interrupt_id(intr_raw);
+	map = find_event_map_by_intr(intr, plat_ic_is_spi(intr));
+	if (!map) {
+		ERROR("No SDEI map for interrupt %u\n", intr);
+		panic();
+	}
+
+	/*
+	 * Received interrupt number must either correspond to event 0, or must
+	 * be bound interrupt.
+	 */
+	assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map));
+
+	se = get_event_entry(map);
+	state = sdei_get_this_pe_state();
+
+	if (state->pe_masked == PE_MASKED) {
+		/*
+		 * Interrupts received while this PE was masked can't be
+		 * dispatched.
+		 */
+		SDEI_LOG("interrupt %u on %lx while PE masked\n", map->intr,
+				read_mpidr_el1());
+		if (is_event_shared(map))
+			sdei_map_lock(map);
+
+		handle_masked_trigger(map, se, state, intr_raw);
+
+		if (is_event_shared(map))
+			sdei_map_unlock(map);
+
+		return 0;
+	}
+
+	/* Insert load barrier for signalled SDEI event */
+	if (map->ev_num == SDEI_EVENT_0)
+		dmbld();
+
+	if (is_event_shared(map))
+		sdei_map_lock(map);
+
+	/* Assert shared event routed to this PE had been configured so */
+	if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) {
+		assert(se->affinity ==
+				(read_mpidr_el1() & MPIDR_AFFINITY_MASK));
+	}
+
+	if (!can_sdei_state_trans(se, DO_DISPATCH)) {
+		SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n",
+				map->ev_num, se->state);
+
+		/*
+		 * If the event is registered, leave the interrupt pending so
+		 * that it's delivered when the event is enabled.
+		 */
+		if (GET_EV_STATE(se, REGISTERED))
+			plat_ic_set_interrupt_pending(map->intr);
+
+		/*
+		 * The interrupt was disabled or unregistered after the handler
+		 * started to execute, which means now the interrupt is already
+		 * disabled and we just need to EOI the interrupt.
+		 */
+		plat_ic_end_of_interrupt(intr_raw);
+
+		if (is_event_shared(map))
+			sdei_map_unlock(map);
+
+		return 0;
+	}
+
+	disp_ctx = get_outstanding_dispatch();
+	if (is_event_critical(map)) {
+		/*
+		 * If this event is Critical, and if there's an outstanding
+		 * dispatch, assert the latter is a Normal dispatch. Critical
+		 * events can preempt an outstanding Normal event dispatch.
+		 */
+		if (disp_ctx)
+			assert(is_event_normal(disp_ctx->map));
+	} else {
+		/*
+		 * If this event is Normal, assert that there are no outstanding
+		 * dispatches. Normal events can't preempt any outstanding event
+		 * dispatches.
+		 */
+		assert(disp_ctx == NULL);
+	}
+
+	sec_state = get_interrupt_src_ss(flags);
+
+	if (is_event_shared(map))
+		sdei_map_unlock(map);
+
+	SDEI_LOG("ACK %lx, ev:%d ss:%d spsr:%lx ELR:%lx\n", read_mpidr_el1(),
+			map->ev_num, sec_state, read_spsr_el3(),
+			read_elr_el3());
+
+	ctx = handle;
+
+	/*
+	 * Check if we interrupted secure state. Perform a context switch so
+	 * that we can delegate to NS.
+	 */
+	if (sec_state == SECURE) {
+		save_secure_context();
+		ctx = restore_and_resume_ns_context();
+	}
+
+	setup_ns_dispatch(map, se, ctx, sec_state, intr_raw);
+
+	/*
+	 * End of interrupt is done in sdei_event_complete, when the client
+	 * signals completion.
+	 */
+	return 0;
+}
+
+int sdei_event_complete(int resume, uint64_t pc)
+{
+	sdei_dispatch_context_t *disp_ctx;
+	sdei_entry_t *se;
+	sdei_ev_map_t *map;
+	cpu_context_t *ctx;
+	sdei_action_t act;
+	unsigned int client_el = sdei_client_el();
+
+	/* Return error if called without an active event */
+	disp_ctx = pop_dispatch();
+	if (!disp_ctx)
+		return SDEI_EDENY;
+
+	/* Validate resumption point */
+	if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0))
+		return SDEI_EDENY;
+
+	map = disp_ctx->map;
+	assert(map);
+
+	se = get_event_entry(map);
+
+	SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(),
+			map->ev_num, read_spsr_el3(), read_elr_el3());
+
+	if (is_event_shared(map))
+		sdei_map_lock(map);
+
+	act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE;
+	if (!can_sdei_state_trans(se, act)) {
+		if (is_event_shared(map))
+			sdei_map_unlock(map);
+		return SDEI_EDENY;
+	}
+
+	/*
+	 * Restore Non-secure to how it was originally interrupted. Once done,
+	 * it's up-to-date with the saved copy.
+	 */
+	ctx = cm_get_context(NON_SECURE);
+	restore_event_ctx(disp_ctx, ctx);
+
+	if (resume) {
+		/*
+		 * Complete-and-resume call. Prepare the Non-secure context
+		 * (currently active) for complete and resume.
+		 */
+		cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el,
+					MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
+
+		/*
+		 * Make it look as if a synchronous exception were taken at the
+		 * supplied Non-secure resumption point. Populate SPSR and
+		 * ELR_ELx so that an ERET from there works as expected.
+		 *
+		 * The assumption is that the client, if necessary, would have
+		 * saved any live content in these registers before making this
+		 * call.
+		 */
+		if (client_el == MODE_EL2) {
+			write_elr_el2(disp_ctx->elr_el3);
+			write_spsr_el2(disp_ctx->spsr_el3);
+		} else {
+			/* EL1 */
+			write_elr_el1(disp_ctx->elr_el3);
+			write_spsr_el1(disp_ctx->spsr_el3);
+		}
+	}
+
+	/*
+	 * If the cause of dispatch originally interrupted the Secure world, and
+	 * if Non-secure world wasn't allowed to preempt Secure execution,
+	 * resume Secure.
+	 *
+	 * No need to save the Non-secure context ahead of a world switch: the
+	 * Non-secure context was fully saved before dispatch, and has been
+	 * returned to its pre-dispatch state.
+	 */
+	if ((disp_ctx->sec_state == SECURE) &&
+			(ehf_is_ns_preemption_allowed() == 0)) {
+		restore_and_resume_secure_context();
+	}
+
+	if ((map->ev_num == SDEI_EVENT_0) || is_map_bound(map)) {
+		/*
+		 * The event was dispatched after receiving SDEI interrupt. With
+		 * the event handling completed, EOI the corresponding
+		 * interrupt.
+		 */
+		plat_ic_end_of_interrupt(disp_ctx->intr_raw);
+	}
+
+	if (is_event_shared(map))
+		sdei_map_unlock(map);
+
+	return 0;
+}
+
+int sdei_event_context(void *handle, unsigned int param)
+{
+	sdei_dispatch_context_t *disp_ctx;
+
+	if (param >= SDEI_SAVED_GPREGS)
+		return SDEI_EINVAL;
+
+	/* Get outstanding dispatch on this CPU */
+	disp_ctx = get_outstanding_dispatch();
+	if (!disp_ctx)
+		return SDEI_EDENY;
+
+	assert(disp_ctx->map);
+
+	if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT))
+		return SDEI_EDENY;
+
+	/*
+	 * No locking is required for the Running status as this is the only CPU
+	 * which can complete the event
+	 */
+
+	return disp_ctx->x[param];
+}
diff --git a/services/std_svc/sdei/sdei_main.c b/services/std_svc/sdei/sdei_main.c
new file mode 100644
index 0000000..c414bee
--- /dev/null
+++ b/services/std_svc/sdei/sdei_main.c
@@ -0,0 +1,1064 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl31.h>
+#include <bl_common.h>
+#include <cassert.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <ehf.h>
+#include <interrupt_mgmt.h>
+#include <platform.h>
+#include <pubsub.h>
+#include <runtime_svc.h>
+#include <sdei.h>
+#include <stddef.h>
+#include <string.h>
+#include <utils.h>
+#include "sdei_private.h"
+
+#define MAJOR_VERSION	1
+#define MINOR_VERSION	0
+#define VENDOR_VERSION	0
+
+#define MAKE_SDEI_VERSION(_major, _minor, _vendor) \
+	((((unsigned long long)(_major)) << 48) | \
+	 (((unsigned long long)(_minor)) << 32) | \
+	 (_vendor))
+
+#define LOWEST_INTR_PRIORITY		0xff
+
+#define is_valid_affinity(_mpidr)	(plat_core_pos_by_mpidr(_mpidr) >= 0)
+
+CASSERT(PLAT_SDEI_CRITICAL_PRI < PLAT_SDEI_NORMAL_PRI,
+		sdei_critical_must_have_higher_priority);
+
+static unsigned int num_dyn_priv_slots, num_dyn_shrd_slots;
+
+/* Initialise SDEI map entries */
+static void init_map(sdei_ev_map_t *map)
+{
+	map->reg_count = 0;
+}
+
+/* Convert mapping to SDEI class */
+sdei_class_t map_to_class(sdei_ev_map_t *map)
+{
+	return is_event_critical(map) ? SDEI_CRITICAL : SDEI_NORMAL;
+}
+
+/* Clear SDEI event entries except state */
+static void clear_event_entries(sdei_entry_t *se)
+{
+	se->ep = 0;
+	se->arg = 0;
+	se->affinity = 0;
+	se->reg_flags = 0;
+}
+
+/* Perform CPU-specific state initialisation */
+static void *sdei_cpu_on_init(const void *arg)
+{
+	int i;
+	sdei_ev_map_t *map;
+	sdei_entry_t *se;
+
+	/* Initialize private mappings on this CPU */
+	for_each_private_map(i, map) {
+		se = get_event_entry(map);
+		clear_event_entries(se);
+		se->state = 0;
+	}
+
+	SDEI_LOG("Private events initialized on %lx\n", read_mpidr_el1());
+
+	/* All PEs start with SDEI events masked */
+	sdei_pe_mask();
+
+	return 0;
+}
+
+/* Initialise an SDEI class */
+void sdei_class_init(sdei_class_t class)
+{
+	unsigned int i, zero_found __unused = 0;
+	int ev_num_so_far __unused;
+	sdei_ev_map_t *map;
+
+	/* Sanity check and configuration of shared events */
+	ev_num_so_far = -1;
+	for_each_shared_map(i, map) {
+#if ENABLE_ASSERTIONS
+		/* Ensure mappings are sorted */
+		assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
+
+		ev_num_so_far = map->ev_num;
+
+		/* Event 0 must not be shared */
+		assert(map->ev_num != SDEI_EVENT_0);
+
+		/* Check for valid event */
+		assert(map->ev_num >= 0);
+
+		/* Make sure it's a shared event */
+		assert(is_event_shared(map));
+
+		/* No shared mapping should have signalable property */
+		assert(!is_event_signalable(map));
+#endif
+
+		/* Skip initializing the wrong priority */
+		if (map_to_class(map) != class)
+			continue;
+
+		/* Platform events are always bound, so set the bound flag */
+		if (is_map_dynamic(map)) {
+			assert(map->intr == SDEI_DYN_IRQ);
+			num_dyn_shrd_slots++;
+		} else {
+			/* Shared mappings must be bound to shared interrupt */
+			assert(plat_ic_is_spi(map->intr));
+			set_map_bound(map);
+		}
+
+		init_map(map);
+	}
+
+	/* Sanity check and configuration of private events for this CPU */
+	ev_num_so_far = -1;
+	for_each_private_map(i, map) {
+#if ENABLE_ASSERTIONS
+		/* Ensure mappings are sorted */
+		assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
+
+		ev_num_so_far = map->ev_num;
+
+		if (map->ev_num == SDEI_EVENT_0) {
+			zero_found = 1;
+
+			/* Event 0 must be a Secure SGI */
+			assert(is_secure_sgi(map->intr));
+
+			/*
+			 * Event 0 can have only have signalable flag (apart
+			 * from being private
+			 */
+			assert(map->map_flags == (SDEI_MAPF_SIGNALABLE |
+						SDEI_MAPF_PRIVATE));
+		} else {
+			/* No other mapping should have signalable property */
+			assert(!is_event_signalable(map));
+		}
+
+		/* Check for valid event */
+		assert(map->ev_num >= 0);
+
+		/* Make sure it's a private event */
+		assert(is_event_private(map));
+#endif
+
+		/* Skip initializing the wrong priority */
+		if (map_to_class(map) != class)
+			continue;
+
+		/* Platform events are always bound, so set the bound flag */
+		if (map->ev_num != SDEI_EVENT_0) {
+			if (is_map_dynamic(map)) {
+				assert(map->intr == SDEI_DYN_IRQ);
+				num_dyn_priv_slots++;
+			} else {
+				/*
+				 * Private mappings must be bound to private
+				 * interrupt.
+				 */
+				assert(plat_ic_is_ppi(map->intr));
+				set_map_bound(map);
+			}
+		}
+
+		init_map(map);
+	}
+
+	/* Ensure event 0 is in the mapping */
+	assert(zero_found);
+
+	sdei_cpu_on_init(NULL);
+}
+
+/* SDEI dispatcher initialisation */
+void sdei_init(void)
+{
+	sdei_class_init(SDEI_CRITICAL);
+	sdei_class_init(SDEI_NORMAL);
+
+	/* Register priority level handlers */
+	ehf_register_priority_handler(PLAT_SDEI_CRITICAL_PRI,
+			sdei_intr_handler);
+	ehf_register_priority_handler(PLAT_SDEI_NORMAL_PRI,
+			sdei_intr_handler);
+}
+
+/* Populate SDEI event entry */
+static void set_sdei_entry(sdei_entry_t *se, uint64_t ep, uint64_t arg,
+		unsigned int flags, uint64_t affinity)
+{
+	assert(se != NULL);
+
+	se->ep = ep;
+	se->arg = arg;
+	se->affinity = (affinity & MPIDR_AFFINITY_MASK);
+	se->reg_flags = flags;
+}
+
+static unsigned long long sdei_version(void)
+{
+	return MAKE_SDEI_VERSION(MAJOR_VERSION, MINOR_VERSION, VENDOR_VERSION);
+}
+
+/* Validate flags and MPIDR values for REGISTER and ROUTING_SET calls */
+static int validate_flags(uint64_t flags, uint64_t mpidr)
+{
+	/* Validate flags */
+	switch (flags) {
+	case SDEI_REGF_RM_PE:
+		if (!is_valid_affinity(mpidr))
+			return SDEI_EINVAL;
+		break;
+	case SDEI_REGF_RM_ANY:
+		break;
+	default:
+		/* Unknown flags */
+		return SDEI_EINVAL;
+	}
+
+	return 0;
+}
+
+/* Set routing of an SDEI event */
+static int sdei_event_routing_set(int ev_num, uint64_t flags, uint64_t mpidr)
+{
+	int ret, routing;
+	sdei_ev_map_t *map;
+	sdei_entry_t *se;
+
+	ret = validate_flags(flags, mpidr);
+	if (ret)
+		return ret;
+
+	/* Check if valid event number */
+	map = find_event_map(ev_num);
+	if (!map)
+		return SDEI_EINVAL;
+
+	/* The event must not be private */
+	if (is_event_private(map))
+		return SDEI_EINVAL;
+
+	se = get_event_entry(map);
+
+	sdei_map_lock(map);
+
+	if (!is_map_bound(map) || is_event_private(map)) {
+		ret = SDEI_EINVAL;
+		goto finish;
+	}
+
+	if (!can_sdei_state_trans(se, DO_ROUTING)) {
+		ret = SDEI_EDENY;
+		goto finish;
+	}
+
+	/* Choose appropriate routing */
+	routing = (flags == SDEI_REGF_RM_ANY) ? INTR_ROUTING_MODE_ANY :
+		INTR_ROUTING_MODE_PE;
+
+	/* Update event registration flag */
+	se->reg_flags = flags;
+
+	/*
+	 * ROUTING_SET is permissible only when event composite state is
+	 * 'registered, disabled, and not running'. This means that the
+	 * interrupt is currently disabled, and not active.
+	 */
+	plat_ic_set_spi_routing(map->intr, routing, (u_register_t) mpidr);
+
+finish:
+	sdei_map_unlock(map);
+
+	return ret;
+}
+
+/* Register handler and argument for an SDEI event */
+static int sdei_event_register(int ev_num, uint64_t ep, uint64_t arg,
+		uint64_t flags, uint64_t mpidr)
+{
+	int ret;
+	sdei_entry_t *se;
+	sdei_ev_map_t *map;
+	sdei_state_t backup_state;
+
+	if (!ep || (plat_sdei_validate_entry_point(ep, sdei_client_el()) != 0))
+		return SDEI_EINVAL;
+
+	ret = validate_flags(flags, mpidr);
+	if (ret)
+		return ret;
+
+	/* Check if valid event number */
+	map = find_event_map(ev_num);
+	if (!map)
+		return SDEI_EINVAL;
+
+	/* Private events always target the PE */
+	if (is_event_private(map))
+		flags = SDEI_REGF_RM_PE;
+
+	se = get_event_entry(map);
+
+	/*
+	 * Even though register operation is per-event (additionally for private
+	 * events, registration is required individually), it has to be
+	 * serialised with respect to bind/release, which are global operations.
+	 * So we hold the lock throughout, unconditionally.
+	 */
+	sdei_map_lock(map);
+
+	backup_state = se->state;
+	if (!can_sdei_state_trans(se, DO_REGISTER))
+		goto fallback;
+
+	/*
+	 * When registering for dynamic events, make sure it's been bound
+	 * already. This has to be the case as, without binding, the client
+	 * can't know about the event number to register for.
+	 */
+	if (is_map_dynamic(map) && !is_map_bound(map))
+		goto fallback;
+
+	if (is_event_private(map)) {
+		/* Multiple calls to register are possible for private events */
+		assert(map->reg_count >= 0);
+	} else {
+		/* Only single call to register is possible for shared events */
+		assert(map->reg_count == 0);
+	}
+
+	if (is_map_bound(map)) {
+		/* Meanwhile, did any PE ACK the interrupt? */
+		if (plat_ic_get_interrupt_active(map->intr))
+			goto fallback;
+
+		/* The interrupt must currently owned by Non-secure */
+		if (plat_ic_get_interrupt_type(map->intr) != INTR_TYPE_NS)
+			goto fallback;
+
+		/*
+		 * Disable forwarding of new interrupt triggers to CPU
+		 * interface.
+		 */
+		plat_ic_disable_interrupt(map->intr);
+
+		/*
+		 * Any events that are triggered after register and before
+		 * enable should remain pending. Clear any previous interrupt
+		 * triggers which are pending (except for SGIs). This has no
+		 * affect on level-triggered interrupts.
+		 */
+		if (ev_num != SDEI_EVENT_0)
+			plat_ic_clear_interrupt_pending(map->intr);
+
+		/* Map interrupt to EL3 and program the correct priority */
+		plat_ic_set_interrupt_type(map->intr, INTR_TYPE_EL3);
+
+		/* Program the appropriate interrupt priority */
+		plat_ic_set_interrupt_priority(map->intr, sdei_event_priority(map));
+
+		/*
+		 * Set the routing mode for shared event as requested. We
+		 * already ensure that shared events get bound to SPIs.
+		 */
+		if (is_event_shared(map)) {
+			plat_ic_set_spi_routing(map->intr,
+					((flags == SDEI_REGF_RM_ANY) ?
+					 INTR_ROUTING_MODE_ANY :
+					 INTR_ROUTING_MODE_PE),
+					(u_register_t) mpidr);
+		}
+	}
+
+	/* Populate event entries */
+	set_sdei_entry(se, ep, arg, flags, mpidr);
+
+	/* Increment register count */
+	map->reg_count++;
+
+	sdei_map_unlock(map);
+
+	return 0;
+
+fallback:
+	/* Reinstate previous state */
+	se->state = backup_state;
+
+	sdei_map_unlock(map);
+
+	return SDEI_EDENY;
+}
+
+/* Enable SDEI event */
+static int sdei_event_enable(int ev_num)
+{
+	sdei_ev_map_t *map;
+	sdei_entry_t *se;
+	int ret, before, after;
+
+	/* Check if valid event number */
+	map = find_event_map(ev_num);
+	if (!map)
+		return SDEI_EINVAL;
+
+	se = get_event_entry(map);
+	ret = SDEI_EDENY;
+
+	if (is_event_shared(map))
+		sdei_map_lock(map);
+
+	before = GET_EV_STATE(se, ENABLED);
+	if (!can_sdei_state_trans(se, DO_ENABLE))
+		goto finish;
+	after = GET_EV_STATE(se, ENABLED);
+
+	/*
+	 * Enable interrupt for bound events only if there's a change in enabled
+	 * state.
+	 */
+	if (is_map_bound(map) && (!before && after))
+		plat_ic_enable_interrupt(map->intr);
+
+	ret = 0;
+
+finish:
+	if (is_event_shared(map))
+		sdei_map_unlock(map);
+
+	return ret;
+}
+
+/* Disable SDEI event */
+static int sdei_event_disable(int ev_num)
+{
+	sdei_ev_map_t *map;
+	sdei_entry_t *se;
+	int ret, before, after;
+
+	/* Check if valid event number */
+	map = find_event_map(ev_num);
+	if (!map)
+		return SDEI_EINVAL;
+
+	se = get_event_entry(map);
+	ret = SDEI_EDENY;
+
+	if (is_event_shared(map))
+		sdei_map_lock(map);
+
+	before = GET_EV_STATE(se, ENABLED);
+	if (!can_sdei_state_trans(se, DO_DISABLE))
+		goto finish;
+	after = GET_EV_STATE(se, ENABLED);
+
+	/*
+	 * Disable interrupt for bound events only if there's a change in
+	 * enabled state.
+	 */
+	if (is_map_bound(map) && (before && !after))
+		plat_ic_disable_interrupt(map->intr);
+
+	ret = 0;
+
+finish:
+	if (is_event_shared(map))
+		sdei_map_unlock(map);
+
+	return ret;
+}
+
+/* Query SDEI event information */
+static uint64_t sdei_event_get_info(int ev_num, int info)
+{
+	sdei_entry_t *se;
+	sdei_ev_map_t *map;
+
+	unsigned int flags, registered;
+	uint64_t affinity;
+
+	/* Check if valid event number */
+	map = find_event_map(ev_num);
+	if (!map)
+		return SDEI_EINVAL;
+
+	se = get_event_entry(map);
+
+	if (is_event_shared(map))
+		sdei_map_lock(map);
+
+	/* Sample state under lock */
+	registered = GET_EV_STATE(se, REGISTERED);
+	flags = se->reg_flags;
+	affinity = se->affinity;
+
+	if (is_event_shared(map))
+		sdei_map_unlock(map);
+
+	switch (info) {
+	case SDEI_INFO_EV_TYPE:
+		return is_event_shared(map);
+
+	case SDEI_INFO_EV_NOT_SIGNALED:
+		return !is_event_signalable(map);
+
+	case SDEI_INFO_EV_PRIORITY:
+		return is_event_critical(map);
+
+	case SDEI_INFO_EV_ROUTING_MODE:
+		if (!is_event_shared(map))
+			return SDEI_EINVAL;
+		if (!registered)
+			return SDEI_EDENY;
+		return (flags == SDEI_REGF_RM_PE);
+
+	case SDEI_INFO_EV_ROUTING_AFF:
+		if (!is_event_shared(map))
+			return SDEI_EINVAL;
+		if (!registered)
+			return SDEI_EDENY;
+		if (flags != SDEI_REGF_RM_PE)
+			return SDEI_EINVAL;
+		return affinity;
+
+	default:
+		return SDEI_EINVAL;
+	}
+}
+
+/* Unregister an SDEI event */
+static int sdei_event_unregister(int ev_num)
+{
+	int ret = 0;
+	sdei_entry_t *se;
+	sdei_ev_map_t *map;
+
+	/* Check if valid event number */
+	map = find_event_map(ev_num);
+	if (!map)
+		return SDEI_EINVAL;
+
+	se = get_event_entry(map);
+
+	/*
+	 * Even though unregister operation is per-event (additionally for
+	 * private events, unregistration is required individually), it has to
+	 * be serialised with respect to bind/release, which are global
+	 * operations.  So we hold the lock throughout, unconditionally.
+	 */
+	sdei_map_lock(map);
+
+	if (!can_sdei_state_trans(se, DO_UNREGISTER)) {
+		/*
+		 * Even if the call is invalid, and the handler is running (for
+		 * example, having unregistered from a running handler earlier),
+		 * return pending error code; otherwise, return deny.
+		 */
+		ret = GET_EV_STATE(se, RUNNING) ? SDEI_EPEND : SDEI_EDENY;
+
+		goto finish;
+	}
+
+	map->reg_count--;
+	if (is_event_private(map)) {
+		/* Multiple calls to register are possible for private events */
+		assert(map->reg_count >= 0);
+	} else {
+		/* Only single call to register is possible for shared events */
+		assert(map->reg_count == 0);
+	}
+
+	if (is_map_bound(map)) {
+		plat_ic_disable_interrupt(map->intr);
+
+		/*
+		 * Clear pending interrupt. Skip for SGIs as they may not be
+		 * cleared on interrupt controllers.
+		 */
+		if (ev_num != SDEI_EVENT_0)
+			plat_ic_clear_interrupt_pending(map->intr);
+
+		assert(plat_ic_get_interrupt_type(map->intr) == INTR_TYPE_EL3);
+		plat_ic_set_interrupt_type(map->intr, INTR_TYPE_NS);
+		plat_ic_set_interrupt_priority(map->intr, LOWEST_INTR_PRIORITY);
+	}
+
+	clear_event_entries(se);
+
+	/*
+	 * If the handler is running at the time of unregister, return the
+	 * pending error code.
+	 */
+	if (GET_EV_STATE(se, RUNNING))
+		ret = SDEI_EPEND;
+
+finish:
+	sdei_map_unlock(map);
+
+	return ret;
+}
+
+/* Query status of an SDEI event */
+static int sdei_event_status(int ev_num)
+{
+	sdei_ev_map_t *map;
+	sdei_entry_t *se;
+	sdei_state_t state;
+
+	/* Check if valid event number */
+	map = find_event_map(ev_num);
+	if (!map)
+		return SDEI_EINVAL;
+
+	se = get_event_entry(map);
+
+	if (is_event_shared(map))
+		sdei_map_lock(map);
+
+	/* State value directly maps to the expected return format */
+	state = se->state;
+
+	if (is_event_shared(map))
+		sdei_map_unlock(map);
+
+	return state;
+}
+
+/* Bind an SDEI event to an interrupt */
+static int sdei_interrupt_bind(int intr_num)
+{
+	sdei_ev_map_t *map;
+	int retry = 1, shared_mapping;
+
+	/* SGIs are not allowed to be bound */
+	if (plat_ic_is_sgi(intr_num))
+		return SDEI_EINVAL;
+
+	shared_mapping = plat_ic_is_spi(intr_num);
+	do {
+		/*
+		 * Bail out if there is already an event for this interrupt,
+		 * either platform-defined or dynamic.
+		 */
+		map = find_event_map_by_intr(intr_num, shared_mapping);
+		if (map) {
+			if (is_map_dynamic(map)) {
+				if (is_map_bound(map)) {
+					/*
+					 * Dynamic event, already bound. Return
+					 * event number.
+					 */
+					return map->ev_num;
+				}
+			} else {
+				/* Binding non-dynamic event */
+				return SDEI_EINVAL;
+			}
+		}
+
+		/*
+		 * The interrupt is not bound yet. Try to find a free slot to
+		 * bind it. Free dynamic mappings have their interrupt set as
+		 * SDEI_DYN_IRQ.
+		 */
+		map = find_event_map_by_intr(SDEI_DYN_IRQ, shared_mapping);
+		if (!map)
+			return SDEI_ENOMEM;
+
+		/* The returned mapping must be dynamic */
+		assert(is_map_dynamic(map));
+
+		/*
+		 * We cannot assert for bound maps here, as we might be racing
+		 * with another bind.
+		 */
+
+		/* The requested interrupt must already belong to NS */
+		if (plat_ic_get_interrupt_type(intr_num) != INTR_TYPE_NS)
+			return SDEI_EDENY;
+
+		/*
+		 * Interrupt programming and ownership transfer are deferred
+		 * until register.
+		 */
+
+		sdei_map_lock(map);
+		if (!is_map_bound(map)) {
+			map->intr = intr_num;
+			set_map_bound(map);
+			retry = 0;
+		}
+		sdei_map_unlock(map);
+	} while (retry);
+
+	return map->ev_num;
+}
+
+/* Release a bound SDEI event previously to an interrupt */
+static int sdei_interrupt_release(int ev_num)
+{
+	int ret = 0;
+	sdei_ev_map_t *map;
+	sdei_entry_t *se;
+
+	/* Check if valid event number */
+	map = find_event_map(ev_num);
+	if (!map)
+		return SDEI_EINVAL;
+
+	if (!is_map_dynamic(map))
+		return SDEI_EINVAL;
+
+	se = get_event_entry(map);
+
+	sdei_map_lock(map);
+
+	/* Event must have been unregistered before release */
+	if (map->reg_count != 0) {
+		ret = SDEI_EDENY;
+		goto finish;
+	}
+
+	/*
+	 * Interrupt release never causes the state to change. We only check
+	 * whether it's permissible or not.
+	 */
+	if (!can_sdei_state_trans(se, DO_RELEASE)) {
+		ret = SDEI_EDENY;
+		goto finish;
+	}
+
+	if (is_map_bound(map)) {
+		/*
+		 * Deny release if the interrupt is active, which means it's
+		 * probably being acknowledged and handled elsewhere.
+		 */
+		if (plat_ic_get_interrupt_active(map->intr)) {
+			ret = SDEI_EDENY;
+			goto finish;
+		}
+
+		/*
+		 * Interrupt programming and ownership transfer are already done
+		 * during unregister.
+		 */
+
+		map->intr = SDEI_DYN_IRQ;
+		clr_map_bound(map);
+	} else {
+		SDEI_LOG("Error release bound:%d cnt:%d\n", is_map_bound(map),
+				map->reg_count);
+		ret = SDEI_EINVAL;
+	}
+
+finish:
+	sdei_map_unlock(map);
+
+	return ret;
+}
+
+/* Perform reset of private SDEI events */
+static int sdei_private_reset(void)
+{
+	sdei_ev_map_t *map;
+	int ret = 0, final_ret = 0, i;
+
+	/* Unregister all private events */
+	for_each_private_map(i, map) {
+		/*
+		 * The unregister can fail if the event is not registered, which
+		 * is allowed, and a deny will be returned. But if the event is
+		 * running or unregister pending, the call fails.
+		 */
+		ret = sdei_event_unregister(map->ev_num);
+		if ((ret == SDEI_EPEND) && (final_ret == 0))
+			final_ret = ret;
+	}
+
+	return final_ret;
+}
+
+/* Perform reset of shared SDEI events */
+static int sdei_shared_reset(void)
+{
+	const sdei_mapping_t *mapping;
+	sdei_ev_map_t *map;
+	int ret = 0, final_ret = 0, i, j;
+
+	/* Unregister all shared events */
+	for_each_shared_map(i, map) {
+		/*
+		 * The unregister can fail if the event is not registered, which
+		 * is allowed, and a deny will be returned. But if the event is
+		 * running or unregister pending, the call fails.
+		 */
+		ret = sdei_event_unregister(map->ev_num);
+		if ((ret == SDEI_EPEND) && (final_ret == 0))
+			final_ret = ret;
+	}
+
+	if (final_ret != 0)
+		return final_ret;
+
+	/*
+	 * Loop through both private and shared mappings, and release all
+	 * bindings.
+	 */
+	for_each_mapping_type(i, mapping) {
+		iterate_mapping(mapping, j, map) {
+			/*
+			 * Release bindings for mappings that are dynamic and
+			 * bound.
+			 */
+			if (is_map_dynamic(map) && is_map_bound(map)) {
+				/*
+				 * Any failure to release would mean there is at
+				 * least a PE registered for the event.
+				 */
+				ret = sdei_interrupt_release(map->ev_num);
+				if ((ret != 0) && (final_ret == 0))
+					final_ret = ret;
+			}
+		}
+	}
+
+	return final_ret;
+}
+
+/* Send a signal to another SDEI client PE */
+int sdei_signal(int event, uint64_t target_pe)
+{
+	sdei_ev_map_t *map;
+
+	/* Only event 0 can be signalled */
+	if (event != SDEI_EVENT_0)
+		return SDEI_EINVAL;
+
+	/* Find mapping for event 0 */
+	map = find_event_map(SDEI_EVENT_0);
+	if (!map)
+		return SDEI_EINVAL;
+
+	/* The event must be signalable */
+	if (!is_event_signalable(map))
+		return SDEI_EINVAL;
+
+	/* Validate target */
+	if (plat_core_pos_by_mpidr(target_pe) < 0)
+		return SDEI_EINVAL;
+
+	/* Raise SGI. Platform will validate target_pe */
+	plat_ic_raise_el3_sgi(map->intr, (u_register_t) target_pe);
+
+	return 0;
+}
+
+/* Query SDEI dispatcher features */
+uint64_t sdei_features(unsigned int feature)
+{
+	if (feature == SDEI_FEATURE_BIND_SLOTS) {
+		return FEATURE_BIND_SLOTS(num_dyn_priv_slots,
+				num_dyn_shrd_slots);
+	}
+
+	return SDEI_EINVAL;
+}
+
+/* SDEI top level handler for servicing SMCs */
+uint64_t sdei_smc_handler(uint32_t smc_fid,
+			  uint64_t x1,
+			  uint64_t x2,
+			  uint64_t x3,
+			  uint64_t x4,
+			  void *cookie,
+			  void *handle,
+			  uint64_t flags)
+{
+
+	uint64_t x5;
+	int ss = get_interrupt_src_ss(flags);
+	int64_t ret;
+	unsigned int resume = 0;
+
+	if (ss != NON_SECURE)
+		SMC_RET1(handle, SMC_UNK);
+
+	/* Verify the caller EL */
+	if (GET_EL(read_spsr_el3()) != sdei_client_el())
+		SMC_RET1(handle, SMC_UNK);
+
+	switch (smc_fid) {
+	case SDEI_VERSION:
+		SDEI_LOG("> VER\n");
+		ret = sdei_version();
+		SDEI_LOG("< VER:%lx\n", ret);
+		SMC_RET1(handle, ret);
+		break;
+
+	case SDEI_EVENT_REGISTER:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		SDEI_LOG("> REG(n:%d e:%lx a:%lx f:%x m:%lx)\n", (int) x1,
+				x2, x3, (int) x4, x5);
+		ret = sdei_event_register(x1, x2, x3, x4, x5);
+		SDEI_LOG("< REG:%ld\n", ret);
+		SMC_RET1(handle, ret);
+		break;
+
+	case SDEI_EVENT_ENABLE:
+		SDEI_LOG("> ENABLE(n:%d)\n", (int) x1);
+		ret = sdei_event_enable(x1);
+		SDEI_LOG("< ENABLE:%ld\n", ret);
+		SMC_RET1(handle, ret);
+		break;
+
+	case SDEI_EVENT_DISABLE:
+		SDEI_LOG("> DISABLE(n:%d)\n", (int) x1);
+		ret = sdei_event_disable(x1);
+		SDEI_LOG("< DISABLE:%ld\n", ret);
+		SMC_RET1(handle, ret);
+		break;
+
+	case SDEI_EVENT_CONTEXT:
+		SDEI_LOG("> CTX(p:%d):%lx\n", (int) x1, read_mpidr_el1());
+		ret = sdei_event_context(handle, x1);
+		SDEI_LOG("< CTX:%ld\n", ret);
+		SMC_RET1(handle, ret);
+		break;
+
+	case SDEI_EVENT_COMPLETE_AND_RESUME:
+		resume = 1;
+		/* Fall through */
+
+	case SDEI_EVENT_COMPLETE:
+		SDEI_LOG("> COMPLETE(r:%d sta/ep:%lx):%lx\n", resume, x1,
+				read_mpidr_el1());
+		ret = sdei_event_complete(resume, x1);
+		SDEI_LOG("< COMPLETE:%lx\n", ret);
+
+		/*
+		 * Set error code only if the call failed. If the call
+		 * succeeded, we discard the dispatched context, and restore the
+		 * interrupted context to a pristine condition, and therefore
+		 * shouldn't be modified. We don't return to the caller in this
+		 * case anyway.
+		 */
+		if (ret)
+			SMC_RET1(handle, ret);
+
+		SMC_RET0(handle);
+		break;
+
+	case SDEI_EVENT_STATUS:
+		SDEI_LOG("> STAT(n:%d)\n", (int) x1);
+		ret = sdei_event_status(x1);
+		SDEI_LOG("< STAT:%ld\n", ret);
+		SMC_RET1(handle, ret);
+		break;
+
+	case SDEI_EVENT_GET_INFO:
+		SDEI_LOG("> INFO(n:%d, %d)\n", (int) x1, (int) x2);
+		ret = sdei_event_get_info(x1, x2);
+		SDEI_LOG("< INFO:%ld\n", ret);
+		SMC_RET1(handle, ret);
+		break;
+
+	case SDEI_EVENT_UNREGISTER:
+		SDEI_LOG("> UNREG(n:%d)\n", (int) x1);
+		ret = sdei_event_unregister(x1);
+		SDEI_LOG("< UNREG:%ld\n", ret);
+		SMC_RET1(handle, ret);
+		break;
+
+	case SDEI_PE_UNMASK:
+		SDEI_LOG("> UNMASK:%lx\n", read_mpidr_el1());
+		sdei_pe_unmask();
+		SDEI_LOG("< UNMASK:%ld\n", 0);
+		SMC_RET1(handle, 0);
+		break;
+
+	case SDEI_PE_MASK:
+		SDEI_LOG("> MASK:%lx\n", read_mpidr_el1());
+		ret = sdei_pe_mask();
+		SDEI_LOG("< MASK:%ld\n", ret);
+		SMC_RET1(handle, ret);
+		break;
+
+	case SDEI_INTERRUPT_BIND:
+		SDEI_LOG("> BIND(%d)\n", (int) x1);
+		ret = sdei_interrupt_bind(x1);
+		SDEI_LOG("< BIND:%ld\n", ret);
+		SMC_RET1(handle, ret);
+		break;
+
+	case SDEI_INTERRUPT_RELEASE:
+		SDEI_LOG("> REL(%d)\n", (int) x1);
+		ret = sdei_interrupt_release(x1);
+		SDEI_LOG("< REL:%ld\n", ret);
+		SMC_RET1(handle, ret);
+		break;
+
+	case SDEI_SHARED_RESET:
+		SDEI_LOG("> S_RESET():%lx\n", read_mpidr_el1());
+		ret = sdei_shared_reset();
+		SDEI_LOG("< S_RESET:%ld\n", ret);
+		SMC_RET1(handle, ret);
+		break;
+
+	case SDEI_PRIVATE_RESET:
+		SDEI_LOG("> P_RESET():%lx\n", read_mpidr_el1());
+		ret = sdei_private_reset();
+		SDEI_LOG("< P_RESET:%ld\n", ret);
+		SMC_RET1(handle, ret);
+		break;
+
+	case SDEI_EVENT_ROUTING_SET:
+		SDEI_LOG("> ROUTE_SET(n:%d f:%lx aff:%lx)\n", (int) x1, x2, x3);
+		ret = sdei_event_routing_set(x1, x2, x3);
+		SDEI_LOG("< ROUTE_SET:%ld\n", ret);
+		SMC_RET1(handle, ret);
+		break;
+
+	case SDEI_FEATURES:
+		SDEI_LOG("> FTRS(f:%lx)\n", x1);
+		ret = sdei_features(x1);
+		SDEI_LOG("< FTRS:%lx\n", ret);
+		SMC_RET1(handle, ret);
+		break;
+
+	case SDEI_EVENT_SIGNAL:
+		SDEI_LOG("> SIGNAL(e:%lx t:%lx)\n", x1, x2);
+		ret = sdei_signal(x1, x2);
+		SDEI_LOG("< SIGNAL:%ld\n", ret);
+		SMC_RET1(handle, ret);
+		break;
+	default:
+		break;
+	}
+
+	WARN("Unimplemented SDEI Call: 0x%x\n", smc_fid);
+	SMC_RET1(handle, SMC_UNK);
+}
+
+/* Subscribe to PSCI CPU on to initialize per-CPU SDEI configuration */
+SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, sdei_cpu_on_init);
diff --git a/services/std_svc/sdei/sdei_private.h b/services/std_svc/sdei/sdei_private.h
new file mode 100644
index 0000000..44db419
--- /dev/null
+++ b/services/std_svc/sdei/sdei_private.h
@@ -0,0 +1,234 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SDEI_PRIVATE_H__
+#define __SDEI_PRIVATE_H__
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <errno.h>
+#include <interrupt_mgmt.h>
+#include <platform.h>
+#include <sdei.h>
+#include <spinlock.h>
+#include <stdbool.h>
+#include <types.h>
+#include <utils_def.h>
+
+#ifdef AARCH32
+# error SDEI is implemented only for AArch64 systems
+#endif
+
+#ifndef PLAT_SDEI_CRITICAL_PRI
+# error Platform must define SDEI critical priority value
+#endif
+
+#ifndef PLAT_SDEI_NORMAL_PRI
+# error Platform must define SDEI normal priority value
+#endif
+
+/* Output SDEI logs as verbose */
+#define SDEI_LOG(...)	VERBOSE("SDEI: " __VA_ARGS__)
+
+/* SDEI handler unregistered state. This is the default state. */
+#define SDEI_STATE_UNREGISTERED		0
+
+/* SDE event status values in bit position */
+#define SDEI_STATF_REGISTERED		0
+#define SDEI_STATF_ENABLED		1
+#define SDEI_STATF_RUNNING		2
+
+/* SDEI SMC error codes */
+#define	SDEI_EINVAL	(-2)
+#define	SDEI_EDENY	(-3)
+#define	SDEI_EPEND	(-5)
+#define	SDEI_ENOMEM	(-10)
+
+/*
+ * 'info' parameter to SDEI_EVENT_GET_INFO SMC.
+ *
+ * Note that the SDEI v1.0 speification mistakenly enumerates the
+ * SDEI_INFO_EV_SIGNALED as SDEI_INFO_SIGNALED. This will be corrected in a
+ * future version.
+ */
+#define SDEI_INFO_EV_TYPE		0
+#define SDEI_INFO_EV_NOT_SIGNALED	1
+#define SDEI_INFO_EV_PRIORITY		2
+#define SDEI_INFO_EV_ROUTING_MODE	3
+#define SDEI_INFO_EV_ROUTING_AFF	4
+
+#define SDEI_PRIVATE_MAPPING()	(&sdei_global_mappings[_SDEI_MAP_IDX_PRIV])
+#define SDEI_SHARED_MAPPING()	(&sdei_global_mappings[_SDEI_MAP_IDX_SHRD])
+
+#define for_each_mapping_type(_i, _mapping) \
+	for (_i = 0, _mapping = &sdei_global_mappings[i]; \
+			_i < _SDEI_MAP_IDX_MAX; \
+			_i++, _mapping = &sdei_global_mappings[i])
+
+#define iterate_mapping(_mapping, _i, _map) \
+	for (_map = (_mapping)->map, _i = 0; \
+			_i < (_mapping)->num_maps; \
+			_i++, _map++)
+
+#define for_each_private_map(_i, _map) \
+	iterate_mapping(SDEI_PRIVATE_MAPPING(), _i, _map)
+
+#define for_each_shared_map(_i, _map) \
+	iterate_mapping(SDEI_SHARED_MAPPING(), _i, _map)
+
+/* SDEI_FEATURES */
+#define SDEI_FEATURE_BIND_SLOTS		0
+#define BIND_SLOTS_MASK			0xffff
+#define FEATURES_SHARED_SLOTS_SHIFT	16
+#define FEATURES_PRIVATE_SLOTS_SHIFT	0
+#define FEATURE_BIND_SLOTS(_priv, _shrd) \
+	((((_priv) & BIND_SLOTS_MASK) << FEATURES_PRIVATE_SLOTS_SHIFT) | \
+	 (((_shrd) & BIND_SLOTS_MASK) << FEATURES_SHARED_SLOTS_SHIFT))
+
+#define GET_EV_STATE(_e, _s)	get_ev_state_bit(_e, SDEI_STATF_##_s)
+#define SET_EV_STATE(_e, _s)	clr_ev_state_bit(_e->state, SDEI_STATF_##_s)
+
+static inline int is_event_private(sdei_ev_map_t *map)
+{
+	return ((map->map_flags & BIT(_SDEI_MAPF_PRIVATE_SHIFT)) != 0);
+}
+
+static inline int is_event_shared(sdei_ev_map_t *map)
+{
+	return !is_event_private(map);
+}
+
+static inline int is_event_critical(sdei_ev_map_t *map)
+{
+	return ((map->map_flags & BIT(_SDEI_MAPF_CRITICAL_SHIFT)) != 0);
+}
+
+static inline int is_event_normal(sdei_ev_map_t *map)
+{
+	return !is_event_critical(map);
+}
+
+static inline int is_event_signalable(sdei_ev_map_t *map)
+{
+	return ((map->map_flags & BIT(_SDEI_MAPF_SIGNALABLE_SHIFT)) != 0);
+}
+
+static inline int is_map_dynamic(sdei_ev_map_t *map)
+{
+	return ((map->map_flags & BIT(_SDEI_MAPF_DYNAMIC_SHIFT)) != 0);
+}
+
+/*
+ * Checks whether an event is associated with an interrupt. Static events always
+ * return true, and dynamic events return whether SDEI_INTERRUPT_BIND had been
+ * called on them. This can be used on both static or dynamic events to check
+ * for an associated interrupt.
+ */
+static inline int is_map_bound(sdei_ev_map_t *map)
+{
+	return ((map->map_flags & BIT(_SDEI_MAPF_BOUND_SHIFT)) != 0);
+}
+
+static inline void set_map_bound(sdei_ev_map_t *map)
+{
+	map->map_flags |= BIT(_SDEI_MAPF_BOUND_SHIFT);
+}
+
+static inline void clr_map_bound(sdei_ev_map_t *map)
+{
+	map->map_flags &= ~(BIT(_SDEI_MAPF_BOUND_SHIFT));
+}
+
+static inline int is_secure_sgi(unsigned int intr)
+{
+	return (plat_ic_is_sgi(intr) &&
+			(plat_ic_get_interrupt_type(intr) == INTR_TYPE_EL3));
+}
+
+/*
+ * Determine EL of the client. If EL2 is implemented (hence the enabled HCE
+ * bit), deem EL2; otherwise, deem EL1.
+ */
+static inline unsigned int sdei_client_el(void)
+{
+	return read_scr_el3() & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1;
+}
+
+static inline unsigned int sdei_event_priority(sdei_ev_map_t *map)
+{
+	return is_event_critical(map) ? PLAT_SDEI_CRITICAL_PRI :
+		PLAT_SDEI_NORMAL_PRI;
+}
+
+static inline int get_ev_state_bit(sdei_entry_t *se, unsigned int bit_no)
+{
+	return ((se->state & BIT(bit_no)) != 0);
+}
+
+static inline void clr_ev_state_bit(sdei_entry_t *se, unsigned int bit_no)
+{
+	se->state &= ~BIT(bit_no);
+}
+
+/* SDEI actions for state transition */
+typedef enum {
+	/*
+	 * Actions resulting from client requests. These directly map to SMC
+	 * calls. Note that the state table columns are listed in this order
+	 * too.
+	 */
+	DO_REGISTER = 0,
+	DO_RELEASE = 1,
+	DO_ENABLE = 2,
+	DO_DISABLE = 3,
+	DO_UNREGISTER = 4,
+	DO_ROUTING = 5,
+	DO_CONTEXT = 6,
+	DO_COMPLETE = 7,
+	DO_COMPLETE_RESUME = 8,
+
+	/* Action for event dispatch */
+	DO_DISPATCH = 9,
+
+	DO_MAX,
+} sdei_action_t;
+
+typedef enum {
+	SDEI_NORMAL,
+	SDEI_CRITICAL
+} sdei_class_t;
+
+static inline void sdei_map_lock(sdei_ev_map_t *map)
+{
+	spin_lock(&map->lock);
+}
+
+static inline void sdei_map_unlock(sdei_ev_map_t *map)
+{
+	spin_unlock(&map->lock);
+}
+
+extern const sdei_mapping_t sdei_global_mappings[];
+extern sdei_entry_t sdei_private_event_table[];
+extern sdei_entry_t sdei_shared_event_table[];
+
+void init_sdei_state(void);
+
+sdei_ev_map_t *find_event_map_by_intr(int intr_num, int shared);
+sdei_ev_map_t *find_event_map(int ev_num);
+sdei_entry_t *get_event_entry(sdei_ev_map_t *map);
+
+int sdei_event_context(void *handle, unsigned int param);
+int sdei_event_complete(int resume, uint64_t arg);
+
+void sdei_pe_unmask(void);
+unsigned int sdei_pe_mask(void);
+
+int sdei_intr_handler(uint32_t intr, uint32_t flags, void *handle,
+		void *cookie);
+bool can_sdei_state_trans(sdei_entry_t *se, sdei_action_t act);
+
+#endif /* __SDEI_PRIVATE_H__ */
diff --git a/services/std_svc/sdei/sdei_state.c b/services/std_svc/sdei/sdei_state.c
new file mode 100644
index 0000000..3f60dfd
--- /dev/null
+++ b/services/std_svc/sdei/sdei_state.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <cassert.h>
+#include <stdbool.h>
+#include "sdei_private.h"
+
+/* Aliases for SDEI handler states: 'R'unning, 'E'nabled, and re'G'istered */
+#define r_		0
+#define R_		(1u << SDEI_STATF_RUNNING)
+
+#define e_		0
+#define E_		(1u << SDEI_STATF_ENABLED)
+
+#define g_		0
+#define G_		(1u << SDEI_STATF_REGISTERED)
+
+/* All possible composite handler states */
+#define reg_		(r_ | e_ | g_)
+#define reG_		(r_ | e_ | G_)
+#define rEg_		(r_ | E_ | g_)
+#define rEG_		(r_ | E_ | G_)
+#define Reg_		(R_ | e_ | g_)
+#define ReG_		(R_ | e_ | G_)
+#define REg_		(R_ | E_ | g_)
+#define REG_		(R_ | E_ | G_)
+
+#define MAX_STATES	(REG_ + 1)
+
+/* Invalid state */
+#define	SDEI_STATE_INVALID	((sdei_state_t) (-1))
+
+/* No change in state */
+#define	SDEI_STATE_NOP		((sdei_state_t) (-2))
+
+#define X___		SDEI_STATE_INVALID
+#define NOP_		SDEI_STATE_NOP
+
+/* Ensure special states don't overlap with valid ones */
+CASSERT(X___ > REG_, sdei_state_overlap_invalid);
+CASSERT(NOP_ > REG_, sdei_state_overlap_nop);
+
+/*
+ * SDEI handler state machine: refer to sections 6.1 and 6.1.2 of the SDEI v1.0
+ * specification:
+ *
+ * http://infocenter.arm.com/help/topic/com.arm.doc.den0054a/ARM_DEN0054A_Software_Delegated_Exception_Interface.pdf
+ *
+ * Not all calls contribute to handler state transition. This table is also used
+ * to validate whether a call is permissible at a given handler state:
+ *
+ *  - X___ denotes a forbidden transition;
+ *  - NOP_ denotes a permitted transition, but there's no change in state;
+ *  - Otherwise, XXX_ gives the new state.
+ *
+ * DISP[atch] is a transition added for the implementation, but is not mentioned
+ * in the spec.
+ *
+ * Those calls that the spec mentions as can be made any time don't picture in
+ * this table.
+ */
+
+static const sdei_state_t sdei_state_table[MAX_STATES][DO_MAX] = {
+/*
+ *	Action:		REG     REL	ENA	DISA	UREG	ROUT	CTX	COMP	COMPR	DISP
+ *	Notes:			[3]			[1]	[3]	[3][4]			[2]
+ */
+	/* Handler unregistered, disabled, and not running. This is the default state. */
+/* 0 */	[reg_] = {	reG_,	NOP_,	X___,	X___,	X___,	X___,	X___,	X___,	X___,	X___,	},
+
+	/* Handler unregistered and running */
+/* 4 */	[Reg_] = {	X___,	X___,	X___,	X___,	X___,	X___,	NOP_,	reg_,	reg_,	X___,	},
+
+	/* Handler registered */
+/* 1 */	[reG_] = {	X___,	X___,	rEG_,	NOP_,	reg_,	NOP_,	X___,	X___,	X___,	X___,	},
+
+	/* Handler registered and running */
+/* 5 */	[ReG_] = {	X___,	X___,	REG_,	NOP_,	Reg_,	X___,	NOP_,	reG_,	reG_,	X___,	},
+
+	/* Handler registered and enabled */
+/* 3 */	[rEG_] = {	X___,	X___,	NOP_,	reG_,	reg_,	X___,	X___,	X___,	X___,	REG_,	},
+
+	/* Handler registered, enabled, and running */
+/* 7 */	[REG_] = {	X___,	X___,	NOP_,	ReG_,	Reg_,	X___,	NOP_,	rEG_,	rEG_,	X___,	},
+
+	/*
+	 * Invalid states: no valid transition would leave the handler in these
+	 * states; and no transition from these states is possible either.
+	 */
+
+	/*
+	 * Handler can't be enabled without being registered. I.e., XEg is
+	 * impossible.
+	 */
+/* 2 */	[rEg_] = {	X___,	X___,	X___,	X___,	X___,	X___,	X___,	X___,	X___,	X___,	},
+/* 6 */	[REg_] = {	X___,	X___,	X___,	X___,	X___,	X___,	X___,	X___,	X___,	X___,	},
+};
+
+/*
+ * [1] Unregister will always also disable the event, so the new state will have
+ *     Xeg.
+ * [2] Event is considered for dispatch only when it's both registered and
+ *     enabled.
+ * [3] Never causes change in state.
+ * [4] Only allowed when running.
+ */
+
+/*
+ * Given an action, transition the state of an event by looking up the state
+ * table above:
+ *
+ *  - Return false for invalid transition;
+ *  - Return true for valid transition that causes no change in state;
+ *  - Otherwise, update state and return true.
+ *
+ * This function assumes that the caller holds necessary locks. If the
+ * transition has constrains other than the state table describes, the caller is
+ * expected to restore the previous state. See sdei_event_register() for
+ * example.
+ */
+bool can_sdei_state_trans(sdei_entry_t *se, sdei_action_t act)
+{
+	sdei_state_t next;
+
+	assert(act < DO_MAX);
+	if (se->state >= MAX_STATES) {
+		WARN(" event state invalid: %x\n", se->state);
+		return false;
+	}
+
+	next = sdei_state_table[se->state][act];
+	switch (next) {
+	case SDEI_STATE_INVALID:
+		return false;
+
+	case SDEI_STATE_NOP:
+		return true;
+
+	default:
+		/* Valid transition. Update state. */
+		SDEI_LOG(" event state 0x%x => 0x%x\n", se->state, next);
+		se->state = next;
+
+		return true;
+	}
+}
diff --git a/services/std_svc/std_svc_setup.c b/services/std_svc/std_svc_setup.c
index 977ed7f..ffc3471 100644
--- a/services/std_svc/std_svc_setup.c
+++ b/services/std_svc/std_svc_setup.c
@@ -11,6 +11,7 @@
 #include <psci.h>
 #include <runtime_instr.h>
 #include <runtime_svc.h>
+#include <sdei.h>
 #include <smcc_helpers.h>
 #include <spm_svc.h>
 #include <std_svc.h>
@@ -45,6 +46,11 @@
 	}
 #endif
 
+#if SDEI_SUPPORT
+	/* SDEI initialisation */
+	sdei_init();
+#endif
+
 	return ret;
 }
 
@@ -92,7 +98,6 @@
 		SMC_RET1(handle, ret);
 	}
 
-
 #if ENABLE_SPM
 	/*
 	 * Dispatch SPM calls to SPM SMC handler and return its return
@@ -104,6 +109,13 @@
 	}
 #endif
 
+#if SDEI_SUPPORT
+	if (is_sdei_fid(smc_fid)) {
+		return sdei_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,
+				flags);
+	}
+#endif
+
 	switch (smc_fid) {
 	case ARM_STD_SVC_CALL_COUNT:
 		/*