Merge changes from topic "spmd" into integration

* changes:
  SPMD: enable SPM dispatcher support
  SPMD: hook SPMD into standard services framework
  SPMD: add SPM dispatcher based upon SPCI Beta 0 spec
  SPMD: add support to run BL32 in TDRAM and BL31 in secure DRAM on Arm FVP
  SPMD: add support for an example SPM core manifest
  SPMD: add SPCI Beta 0 specification header file
diff --git a/Makefile b/Makefile
index 5167d2e..0d8ddd8 100644
--- a/Makefile
+++ b/Makefile
@@ -418,11 +418,20 @@
         $(warning "SPD and EL3_PAYLOAD_BASE are incompatible build options.")
         $(warning "The SPD and its BL32 companion will be present but ignored.")
 endif
-        # We expect to locate an spd.mk under the specified SPD directory
-        SPD_MAKE	:=	$(wildcard services/spd/${SPD}/${SPD}.mk)
+	ifeq (${SPD},spmd)
+		# SPMD is located in std_svc directory
+		SPD_DIR := std_svc
+	else
+		# All other SPDs in spd directory
+		SPD_DIR := spd
+	endif
+
+	# We expect to locate an spd.mk under the specified SPD directory
+	SPD_MAKE	:=	$(wildcard services/${SPD_DIR}/${SPD}/${SPD}.mk)
+
 
         ifeq (${SPD_MAKE},)
-                $(error Error: No services/spd/${SPD}/${SPD}.mk located)
+                $(error Error: No services/${SPD_DIR}/${SPD}/${SPD}.mk located)
         endif
         $(info Including ${SPD_MAKE})
         include ${SPD_MAKE}
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 58909e8..0948e94 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -31,6 +31,7 @@
 				services/arm_arch_svc/arm_arch_svc_setup.c	\
 				services/std_svc/std_svc_setup.c		\
 				${PSCI_LIB_SOURCES}				\
+				${SPMD_SOURCES}					\
 				${SPM_SOURCES}
 
 
diff --git a/common/desc_image_load.c b/common/desc_image_load.c
index b483597..0769226 100644
--- a/common/desc_image_load.c
+++ b/common/desc_image_load.c
@@ -215,6 +215,9 @@
 	bl_params_node_t *params_node;
 	unsigned int fw_config_id;
 	uintptr_t hw_config_base = 0, fw_config_base;
+#if defined(SPD_spmd)
+	uint32_t fw_config_size = 0;
+#endif
 	bl_mem_params_node_t *mem_params;
 
 	assert(bl2_to_next_bl_params != NULL);
@@ -249,10 +252,14 @@
 
 		if (fw_config_id != INVALID_IMAGE_ID) {
 			mem_params = get_bl_mem_params_node(fw_config_id);
-			if (mem_params != NULL)
+			if (mem_params != NULL) {
 				fw_config_base = mem_params->image_info.image_base;
+#if defined(SPD_spmd)
+				fw_config_size =
+					mem_params->image_info.image_size;
+#endif
+			}
 		}
-
 		/*
 		 * Pass hw and tb_fw config addresses to next images. NOTE - for
 		 * EL3 runtime images (BL31 for AArch64 and BL32 for AArch32),
@@ -273,6 +280,11 @@
 			if (params_node->ep_info->args.arg1 == 0U)
 				params_node->ep_info->args.arg1 =
 								hw_config_base;
+#if defined(SPD_spmd)
+			if (params_node->ep_info->args.arg2 == 0U)
+				params_node->ep_info->args.arg2 =
+								fw_config_size;
+#endif
 		}
 	}
 }
diff --git a/include/plat/arm/common/arm_def.h b/include/plat/arm/common/arm_def.h
index c825bf4..7df6b0d 100644
--- a/include/plat/arm/common/arm_def.h
+++ b/include/plat/arm/common/arm_def.h
@@ -229,6 +229,14 @@
 						ARM_EL3_TZC_DRAM1_SIZE,	\
 						MT_MEMORY | MT_RW | MT_SECURE)
 
+#if defined(SPD_spmd)
+#define ARM_MAP_TRUSTED_DRAM		MAP_REGION_FLAT(		    \
+						PLAT_ARM_TRUSTED_DRAM_BASE, \
+						PLAT_ARM_TRUSTED_DRAM_SIZE, \
+						MT_MEMORY | MT_RW | MT_SECURE)
+#endif
+
+
 /*
  * Mapping for the BL1 RW region. This mapping is needed by BL2 in order to
  * share the Mbed TLS heap. Since the heap is allocated inside BL1, it resides
@@ -477,6 +485,12 @@
 #  define BL32_BASE			(ARM_AP_TZC_DRAM1_BASE + ULL(0x200000))
 #  define BL32_LIMIT			(ARM_AP_TZC_DRAM1_BASE +	\
 						ARM_AP_TZC_DRAM1_SIZE)
+# elif defined(SPD_spmd)
+#  define TSP_SEC_MEM_BASE		(ARM_AP_TZC_DRAM1_BASE + ULL(0x200000))
+#  define TSP_SEC_MEM_SIZE		(ARM_AP_TZC_DRAM1_SIZE - ULL(0x200000))
+#  define BL32_BASE			PLAT_ARM_TRUSTED_DRAM_BASE
+#  define BL32_LIMIT			(PLAT_ARM_TRUSTED_DRAM_BASE	\
+						+ (UL(1) << 21))
 # elif ARM_BL31_IN_DRAM
 #  define TSP_SEC_MEM_BASE		(ARM_AP_TZC_DRAM1_BASE +	\
 						PLAT_ARM_MAX_BL31_SIZE)
@@ -511,12 +525,12 @@
 
 /*
  * BL32 is mandatory in AArch32. In AArch64, undefine BL32_BASE if there is no
- * SPD and no SPM, as they are the only ones that can be used as BL32.
+ * SPD and no SPM-MM, as they are the only ones that can be used as BL32.
  */
 #if defined(__aarch64__) && !JUNO_AARCH32_EL3_RUNTIME
 # if defined(SPD_none) && !SPM_MM
 #  undef BL32_BASE
-# endif /* defined(SPD_none) && !SPM_MM*/
+# endif /* defined(SPD_none) && !SPM_MM */
 #endif /* defined(__aarch64__) && !JUNO_AARCH32_EL3_RUNTIME */
 
 /*******************************************************************************
diff --git a/include/plat/common/platform.h b/include/plat/common/platform.h
index 332cfca..f5bd298 100644
--- a/include/plat/common/platform.h
+++ b/include/plat/common/platform.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,9 @@
 #include <stdint.h>
 
 #include <lib/psci/psci.h>
+#if defined(SPD_spmd)
+ #include <services/spm_core_manifest.h>
+#endif
 
 /*******************************************************************************
  * Forward declarations
@@ -272,7 +275,11 @@
 int plat_spm_sp_rd_load(struct sp_res_desc *rd, const void *ptr, size_t size);
 int plat_spm_sp_get_next_address(void **sp_base, size_t *sp_size,
 				 void **rd_base, size_t *rd_size);
-
+#if defined(SPD_spmd)
+int plat_spm_core_manifest_load(spmc_manifest_sect_attribute_t *manifest,
+				const void *ptr,
+				size_t size);
+#endif
 /*******************************************************************************
  * Mandatory BL image load functions(may be overridden).
  ******************************************************************************/
diff --git a/include/services/spci_svc.h b/include/services/spci_svc.h
new file mode 100644
index 0000000..49ba408
--- /dev/null
+++ b/include/services/spci_svc.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPCI_SVC_H
+#define SPCI_SVC_H
+
+#include <lib/smccc.h>
+#include <lib/utils_def.h>
+#include <tools_share/uuid.h>
+
+/* SPCI error codes. */
+#define SPCI_ERROR_NOT_SUPPORTED	-1
+#define SPCI_ERROR_INVALID_PARAMETER	-2
+#define SPCI_ERROR_NO_MEMORY		-3
+#define SPCI_ERROR_BUSY			-4
+#define SPCI_ERROR_INTERRUPTED		-5
+#define SPCI_ERROR_DENIED		-6
+#define SPCI_ERROR_RETRY		-7
+
+/* The macros below are used to identify SPCI calls from the SMC function ID */
+#define SPCI_FNUM_MIN_VALUE	U(0x60)
+#define SPCI_FNUM_MAX_VALUE	U(0x7f)
+#define is_spci_fid(fid) __extension__ ({		\
+	__typeof__(fid) _fid = (fid);			\
+	((GET_SMC_NUM(_fid) >= SPCI_FNUM_MIN_VALUE) &&	\
+	 (GET_SMC_NUM(_fid) <= SPCI_FNUM_MAX_VALUE)); })
+
+/* SPCI_VERSION helpers */
+#define SPCI_VERSION_MAJOR		U(0)
+#define SPCI_VERSION_MAJOR_SHIFT	16
+#define SPCI_VERSION_MAJOR_MASK		U(0x7FFF)
+#define SPCI_VERSION_MINOR		U(9)
+#define SPCI_VERSION_MINOR_SHIFT	0
+#define SPCI_VERSION_MINOR_MASK		U(0xFFFF)
+
+#define MAKE_SPCI_VERSION(major, minor) \
+	((((major) & SPCI_VERSION_MAJOR_MASK) <<  SPCI_VERSION_MAJOR_SHIFT) | \
+	 (((minor) & SPCI_VERSION_MINOR_MASK) << SPCI_VERSION_MINOR_SHIFT))
+#define SPCI_VERSION_COMPILED		MAKE_SPCI_VERSION(SPCI_VERSION_MAJOR, \
+							  SPCI_VERSION_MINOR)
+
+/* SPCI_MSG_SEND helpers */
+#define SPCI_MSG_SEND_ATTRS_BLK_SHIFT	U(0)
+#define SPCI_MSG_SEND_ATTRS_BLK_MASK	U(0x1)
+#define SPCI_MSG_SEND_ATTRS_BLK		U(0)
+#define SPCI_MSG_SEND_ATTRS_BLK_NOT	U(1)
+#define SPCI_MSG_SEND_ATTRS(blk)		\
+	(((blk) & SPCI_MSG_SEND_ATTRS_BLK_MASK) \
+	<< SPCI_MSG_SEND_ATTRS_BLK_SHIFT)
+
+/* Get SPCI fastcall std FID from function number */
+#define SPCI_FID(smc_cc, func_num)			\
+		((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT) |	\
+		 ((smc_cc) << FUNCID_CC_SHIFT) |	\
+		 (OEN_STD_START << FUNCID_OEN_SHIFT) |	\
+		 ((func_num) << FUNCID_NUM_SHIFT))
+
+/* SPCI function numbers */
+#define SPCI_FNUM_ERROR			U(0x60)
+#define SPCI_FNUM_SUCCESS		U(0x61)
+#define SPCI_FNUM_INTERRUPT		U(0x62)
+#define SPCI_FNUM_VERSION		U(0x63)
+#define SPCI_FNUM_FEATURES		U(0x64)
+#define SPCI_FNUM_RX_RELEASE		U(0x65)
+#define SPCI_FNUM_RXTX_MAP		U(0x66)
+#define SPCI_FNUM_RXTX_UNMAP		U(0x67)
+#define SPCI_FNUM_PARTITION_INFO_GET	U(0x68)
+#define SPCI_FNUM_ID_GET		U(0x69)
+#define SPCI_FNUM_MSG_POLL		U(0x6A)
+#define SPCI_FNUM_MSG_WAIT		U(0x6B)
+#define SPCI_FNUM_MSG_YIELD		U(0x6C)
+#define SPCI_FNUM_MSG_RUN		U(0x6D)
+#define SPCI_FNUM_MSG_SEND		U(0x6E)
+#define SPCI_FNUM_MSG_SEND_DIRECT_REQ	U(0x6F)
+#define SPCI_FNUM_MSG_SEND_DIRECT_RESP	U(0x70)
+#define SPCI_FNUM_MEM_DONATE		U(0x71)
+#define SPCI_FNUM_MEM_LEND		U(0x72)
+#define SPCI_FNUM_MEM_SHARE		U(0x73)
+#define SPCI_FNUM_MEM_RETRIEVE_REQ	U(0x74)
+#define SPCI_FNUM_MEM_RETRIEVE_RESP	U(0x75)
+#define SPCI_FNUM_MEM_RELINQUISH	U(0x76)
+#define SPCI_FNUM_MEM_RECLAIM		U(0x77)
+
+/* SPCI SMC32 FIDs */
+#define SPCI_ERROR		SPCI_FID(SMC_32, SPCI_FNUM_ERROR)
+#define SPCI_SUCCESS_SMC32	SPCI_FID(SMC_32, SPCI_FNUM_SUCCESS)
+#define SPCI_INTERRUPT		SPCI_FID(SMC_32, SPCI_FNUM_INTERRUPT)
+#define SPCI_VERSION		SPCI_FID(SMC_32, SPCI_FNUM_VERSION)
+#define SPCI_FEATURES		SPCI_FID(SMC_32, SPCI_FNUM_FEATURES)
+#define SPCI_RX_RELEASE		SPCI_FID(SMC_32, SPCI_FNUM_RX_RELEASE)
+#define SPCI_RXTX_MAP_SMC32	SPCI_FID(SMC_32, SPCI_FNUM_RXTX_MAP)
+#define SPCI_RXTX_UNMAP		SPCI_FID(SMC_32, SPCI_FNUM_RXTX_UNMAP)
+#define SPCI_PARTITION_INFO_GET	SPCI_FID(SMC_32, SPCI_FNUM_PARTITION_INFO_GET)
+#define SPCI_ID_GET		SPCI_FID(SMC_32, SPCI_FNUM_ID_GET)
+#define SPCI_MSG_POLL		SPCI_FID(SMC_32, SPCI_FNUM_MSG_POLL)
+#define SPCI_MSG_WAIT		SPCI_FID(SMC_32, SPCI_FNUM_MSG_WAIT)
+#define SPCI_MSG_YIELD		SPCI_FID(SMC_32, SPCI_FNUM_MSG_YIELD)
+#define SPCI_MSG_RUN		SPCI_FID(SMC_32, SPCI_FNUM_MSG_RUN)
+#define SPCI_MSG_SEND		SPCI_FID(SMC_32, SPCI_FNUM_MSG_SEND)
+#define SPCI_MSG_SEND_DIRECT_REQ_SMC32 \
+	SPCI_FID(SMC_32, SPCI_FNUM_MSG_SEND_DIRECT_REQ)
+#define SPCI_MSG_SEND_DIRECT_RESP_SMC32	\
+	SPCI_FID(SMC_32, SPCI_FNUM_MSG_SEND_DIRECT_RESP)
+#define SPCI_MEM_DONATE_SMC32	SPCI_FID(SMC_32, SPCI_FNUM_MEM_DONATE)
+#define SPCI_MEM_LEND_SMC32	SPCI_FID(SMC_32, SPCI_FNUM_MEM_LEND)
+#define SPCI_MEM_SHARE_SMC32	SPCI_FID(SMC_32, SPCI_FNUM_MEM_SHARE)
+#define SPCI_MEM_RETRIEVE_REQ_SMC32 \
+	SPCI_FID(SMC_32, SPCI_FNUM_MEM_RETRIEVE_REQ)
+#define SPCI_MEM_RETRIEVE_RESP	SPCI_FID(SMC_32, SPCI_FNUM_MEM_RETRIEVE_RESP)
+#define SPCI_MEM_RELINQUISH	SPCI_FID(SMC_32, SPCI_FNUM_MEM_RELINQUISH)
+#define SPCI_MEM_RECLAIM	SPCI_FID(SMC_32, SPCI_FNUM_MEM_RECLAIM)
+
+/* SPCI SMC64 FIDs */
+#define SPCI_SUCCESS_SMC64	SPCI_FID(SMC_64, SPCI_FNUM_SUCCESS)
+#define SPCI_RXTX_MAP_SMC64	SPCI_FID(SMC_64, SPCI_FNUM_RXTX_MAP)
+#define SPCI_MSG_SEND_DIRECT_REQ_SMC64 \
+	SPCI_FID(SMC_64, SPCI_FNUM_MSG_SEND_DIRECT_REQ)
+#define SPCI_MSG_SEND_DIRECT_RESP_SMC64	\
+	SPCI_FID(SMC_64, SPCI_FNUM_MSG_SEND_DIRECT_RESP)
+#define SPCI_MEM_DONATE_SMC64	SPCI_FID(SMC_64, SPCI_FNUM_MEM_DONATE)
+#define SPCI_MEM_LEND_SMC64	SPCI_FID(SMC_64, SPCI_FNUM_MEM_LEND)
+#define SPCI_MEM_SHARE_SMC64	SPCI_FID(SMC_64, SPCI_FNUM_MEM_SHARE)
+#define SPCI_MEM_RETRIEVE_REQ_SMC64 \
+	SPCI_FID(SMC_64, SPCI_FNUM_MEM_RETRIEVE_REQ)
+
+/*
+ * Reserve a special value for traffic targeted to the Hypervisor or SPM.
+ */
+#define SPCI_TARGET_INFO_MBZ		U(0x0)
+
+/*
+ * Reserve a special value for MBZ parameters.
+ */
+#define SPCI_PARAM_MBZ			U(0x0)
+
+#endif /* SPCI_SVC_H */
diff --git a/include/services/spm_core_manifest.h b/include/services/spm_core_manifest.h
new file mode 100644
index 0000000..06ecc13
--- /dev/null
+++ b/include/services/spm_core_manifest.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPMC_MANIFEST_H
+#define SPMC_MANIFEST_H
+
+#include <stdint.h>
+
+/*******************************************************************************
+ * Attribute Section
+ ******************************************************************************/
+
+typedef struct spm_core_manifest_sect_attribute {
+	/*
+	 * SPCI version (mandatory).
+	 */
+	uint32_t major_version;
+	uint32_t minor_version;
+
+	/*
+	 * Run-Time Exception Level (mandatory):
+	 * - 1: SEL1
+	 * - 2: SEL2
+	 */
+	uint32_t runtime_el;
+
+	/*
+	 * Run-Time Execution state (optional):
+	 * - 0: AArch64 (default)
+	 * - 1: AArch32
+	 */
+	uint32_t exec_state;
+
+	/*
+	 * Address of binary image containing SPM core in bytes (optional).
+	 */
+	uint64_t load_address;
+
+	/*
+	 * Offset from the base of the partition's binary image to the entry
+	 * point of the partition.
+	 */
+	uint64_t entrypoint;
+
+	/*
+	 * Size of binary image containing SPM core in bytes (mandatory).
+	 */
+	uint32_t binary_size;
+
+} spmc_manifest_sect_attribute_t;
+
+#endif /* SPMC_MANIFEST_H */
diff --git a/include/services/spmd_svc.h b/include/services/spmd_svc.h
new file mode 100644
index 0000000..6e4caf2
--- /dev/null
+++ b/include/services/spmd_svc.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPMD_SVC_H
+#define SPMD_SVC_H
+
+#ifndef __ASSEMBLER__
+#include <services/spci_svc.h>
+#include <stdint.h>
+
+int32_t spmd_setup(void);
+uint64_t spmd_smc_handler(uint32_t smc_fid,
+			  uint64_t x1,
+			  uint64_t x2,
+			  uint64_t x3,
+			  uint64_t x4,
+			  void *cookie,
+			  void *handle,
+			  uint64_t flags);
+#endif /* __ASSEMBLER__ */
+
+#endif /* SPMD_SVC_H */
diff --git a/plat/arm/board/fvp/fdts/fvp_spmc_manifest.dts b/plat/arm/board/fvp/fdts/fvp_spmc_manifest.dts
new file mode 100644
index 0000000..e1c106f
--- /dev/null
+++ b/plat/arm/board/fvp/fdts/fvp_spmc_manifest.dts
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/dts-v1/;
+
+/ {
+	compatible = "spci-core-manifest-1.0";
+
+	attribute {
+		maj_ver = <0x0>;
+		min_ver = <0x9>;
+		runtime_el = <0x1>;
+		exec_state = <0x0>;
+		load_address = <0x0 0x6000000>;
+		entrypoint = <0x0 0x6000000>;
+	};
+};
diff --git a/plat/arm/board/fvp/fvp_common.c b/plat/arm/board/fvp/fvp_common.c
index ffaa93d..2c880fc 100644
--- a/plat/arm/board/fvp/fvp_common.c
+++ b/plat/arm/board/fvp/fvp_common.c
@@ -86,6 +86,9 @@
 #ifdef __aarch64__
 	ARM_MAP_DRAM2,
 #endif
+#if defined(SPD_spmd)
+	ARM_MAP_TRUSTED_DRAM,
+#endif
 #ifdef SPD_tspd
 	ARM_MAP_TSP_SEC_MEM,
 #endif
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
index 6fb34c4..4cc3bc6 100644
--- a/plat/arm/board/fvp/platform.mk
+++ b/plat/arm/board/fvp/platform.mk
@@ -222,6 +222,14 @@
 $(eval $(call TOOL_ADD_PAYLOAD,${FVP_TOS_FW_CONFIG},--tos-fw-config))
 endif
 
+ifeq (${SPD},spmd)
+FDT_SOURCES		+=	plat/arm/board/fvp/fdts/${PLAT}_spmc_manifest.dts
+FVP_TOS_FW_CONFIG	:=	${BUILD_PLAT}/fdts/${PLAT}_spmc_manifest.dtb
+
+# Add the TOS_FW_CONFIG to FIP and specify the same to certtool
+$(eval $(call TOOL_ADD_PAYLOAD,${FVP_TOS_FW_CONFIG},--tos-fw-config))
+endif
+
 # Add the TB_FW_CONFIG to FIP and specify the same to certtool
 $(eval $(call TOOL_ADD_PAYLOAD,${FVP_TB_FW_CONFIG},--tb-fw-config))
 # Add the SOC_FW_CONFIG to FIP and specify the same to certtool
diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk
index 9f4bc21..d2578b7 100644
--- a/plat/arm/common/arm_common.mk
+++ b/plat/arm/common/arm_common.mk
@@ -265,6 +265,13 @@
 				lib/extensions/pauth/pauth_helpers.S
 endif
 
+ifeq (${SPD},spmd)
+BL31_SOURCES		+=	plat/common/plat_spmd_manifest.c	\
+				common/fdt_wrappers.c			\
+				${LIBFDT_SRCS}
+
+endif
+
 ifneq (${TRUSTED_BOARD_BOOT},0)
 
     # Include common TBB sources
diff --git a/plat/arm/common/arm_dyn_cfg.c b/plat/arm/common/arm_dyn_cfg.c
index e6c5a73..fdc3ef3 100644
--- a/plat/arm/common/arm_dyn_cfg.c
+++ b/plat/arm/common/arm_dyn_cfg.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -197,8 +197,8 @@
 			HW_CONFIG_ID,
 			SOC_FW_CONFIG_ID,
 			NT_FW_CONFIG_ID,
-#ifdef SPD_tspd
-			/* Currently tos_fw_config is only present for TSP */
+#if defined(SPD_tspd) || defined(SPD_spmd)
+			/* tos_fw_config is only present for TSPD/SPMD */
 			TOS_FW_CONFIG_ID
 #endif
 	};
diff --git a/plat/common/plat_spmd_manifest.c b/plat/common/plat_spmd_manifest.c
new file mode 100644
index 0000000..4c78979
--- /dev/null
+++ b/plat/common/plat_spmd_manifest.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+#include <libfdt.h>
+
+#include <common/debug.h>
+#include <common/fdt_wrappers.h>
+#include <errno.h>
+#include <platform_def.h>
+#include <services/spm_core_manifest.h>
+
+/*******************************************************************************
+ * Attribute section handler
+ ******************************************************************************/
+static int manifest_parse_attribute(spmc_manifest_sect_attribute_t *attr,
+				    const void *fdt,
+				    int node)
+{
+	int rc = 0;
+
+	assert(attr && fdt);
+
+	rc = fdtw_read_cells(fdt, node, "maj_ver", 1, &attr->major_version);
+	if (rc) {
+		ERROR("Missing SPCI major version in SPM core manifest.\n");
+		return -ENOENT;
+	}
+
+	rc = fdtw_read_cells(fdt, node, "min_ver", 1, &attr->minor_version);
+	if (rc) {
+		ERROR("Missing SPCI minor version in SPM core manifest.\n");
+		return -ENOENT;
+	}
+
+	rc = fdtw_read_cells(fdt, node, "runtime_el", 1, &attr->runtime_el);
+	if (rc) {
+		ERROR("Missing SPM core runtime EL in manifest.\n");
+		return -ENOENT;
+	}
+
+	rc = fdtw_read_cells(fdt, node, "exec_state", 1, &attr->exec_state);
+	if (rc)
+		NOTICE("Execution state not specified in SPM core manifest.\n");
+
+	rc = fdtw_read_cells(fdt, node, "binary_size", 1, &attr->binary_size);
+	if (rc)
+		NOTICE("Binary size not specified in SPM core manifest.\n");
+
+	rc = fdtw_read_cells(fdt, node, "load_address", 2, &attr->load_address);
+	if (rc)
+		NOTICE("Load address not specified in SPM core manifest.\n");
+
+	rc = fdtw_read_cells(fdt, node, "entrypoint", 2, &attr->entrypoint);
+	if (rc)
+		NOTICE("Entrypoint not specified in SPM core manifest.\n");
+
+	VERBOSE("SPM core manifest attribute section:\n");
+	VERBOSE("  version: %x.%x\n", attr->major_version, attr->minor_version);
+	VERBOSE("  runtime_el: 0x%x\n", attr->runtime_el);
+	VERBOSE("  binary_size: 0x%x\n", attr->binary_size);
+	VERBOSE("  load_address: 0x%llx\n", attr->load_address);
+	VERBOSE("  entrypoint: 0x%llx\n", attr->entrypoint);
+
+	return 0;
+}
+
+/*******************************************************************************
+ * Root node handler
+ ******************************************************************************/
+static int manifest_parse_root(spmc_manifest_sect_attribute_t *manifest,
+				const void *fdt,
+				int root)
+{
+	int node;
+	char *str;
+
+	str = "attribute";
+	node = fdt_subnode_offset_namelen(fdt, root, str, strlen(str));
+	if (node < 0) {
+		ERROR("Root node doesn't contain subnode '%s'\n", str);
+		return -ENOENT;
+	}
+
+	return manifest_parse_attribute(manifest, fdt, node);
+}
+
+/*******************************************************************************
+ * Platform handler to parse a SPM core manifest.
+ ******************************************************************************/
+int plat_spm_core_manifest_load(spmc_manifest_sect_attribute_t *manifest,
+				const void *ptr,
+				size_t size)
+{
+	int rc;
+	int root_node;
+
+	assert(manifest != NULL);
+	assert(ptr != NULL);
+
+	INFO("Reading SPM core manifest at address %p\n", ptr);
+
+	rc = fdt_check_header(ptr);
+	if (rc != 0) {
+		ERROR("Wrong format for SPM core manifest (%d).\n", rc);
+		return -EINVAL;
+	}
+
+	INFO("Reading SPM core manifest at address %p\n", ptr);
+
+	root_node = fdt_node_offset_by_compatible(ptr, -1,
+				"arm,spci-core-manifest-1.0");
+	if (root_node < 0) {
+		ERROR("Unrecognized SPM core manifest\n");
+		return -ENOENT;
+	}
+
+	INFO("Reading SPM core manifest at address %p\n", ptr);
+	return manifest_parse_root(manifest, ptr, root_node);
+}
diff --git a/services/std_svc/spmd/aarch64/spmd_helpers.S b/services/std_svc/spmd/aarch64/spmd_helpers.S
new file mode 100644
index 0000000..d7bffca
--- /dev/null
+++ b/services/std_svc/spmd/aarch64/spmd_helpers.S
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include "../spmd_private.h"
+
+	.global spmd_spm_core_enter
+	.global spmd_spm_core_exit
+
+	/* ---------------------------------------------------------------------
+	 * This function is called with SP_EL0 as stack. Here we stash our EL3
+	 * callee-saved registers on to the stack as a part of saving the C
+	 * runtime and enter the secure payload.
+	 * 'x0' contains a pointer to the memory where the address of the C
+	 *  runtime context is to be saved.
+	 * ---------------------------------------------------------------------
+	 */
+func spmd_spm_core_enter
+	/* Make space for the registers that we're going to save */
+	mov	x3, sp
+	str	x3, [x0, #0]
+	sub	sp, sp, #SPMD_C_RT_CTX_SIZE
+
+	/* Save callee-saved registers on to the stack */
+	stp	x19, x20, [sp, #SPMD_C_RT_CTX_X19]
+	stp	x21, x22, [sp, #SPMD_C_RT_CTX_X21]
+	stp	x23, x24, [sp, #SPMD_C_RT_CTX_X23]
+	stp	x25, x26, [sp, #SPMD_C_RT_CTX_X25]
+	stp	x27, x28, [sp, #SPMD_C_RT_CTX_X27]
+	stp	x29, x30, [sp, #SPMD_C_RT_CTX_X29]
+
+	/* ---------------------------------------------------------------------
+	 * Everything is setup now. el3_exit() will use the secure context to
+	 * restore to the general purpose and EL3 system registers to ERET
+	 * into the secure payload.
+	 * ---------------------------------------------------------------------
+	 */
+	b	el3_exit
+endfunc spmd_spm_core_enter
+
+	/* ---------------------------------------------------------------------
+	 * This function is called with 'x0' pointing to a C runtime context.
+	 * It restores the saved registers and jumps to that runtime with 'x0'
+	 * as the new SP register. This destroys the C runtime context that had
+	 * been built on the stack below the saved context by the caller. Later
+	 * the second parameter 'x1' is passed as a return value to the caller.
+	 * ---------------------------------------------------------------------
+	 */
+func spmd_spm_core_exit
+	/* Restore the previous stack */
+	mov	sp, x0
+
+	/* Restore callee-saved registers on to the stack */
+	ldp	x19, x20, [x0, #(SPMD_C_RT_CTX_X19 - SPMD_C_RT_CTX_SIZE)]
+	ldp	x21, x22, [x0, #(SPMD_C_RT_CTX_X21 - SPMD_C_RT_CTX_SIZE)]
+	ldp	x23, x24, [x0, #(SPMD_C_RT_CTX_X23 - SPMD_C_RT_CTX_SIZE)]
+	ldp	x25, x26, [x0, #(SPMD_C_RT_CTX_X25 - SPMD_C_RT_CTX_SIZE)]
+	ldp	x27, x28, [x0, #(SPMD_C_RT_CTX_X27 - SPMD_C_RT_CTX_SIZE)]
+	ldp	x29, x30, [x0, #(SPMD_C_RT_CTX_X29 - SPMD_C_RT_CTX_SIZE)]
+
+	/* ---------------------------------------------------------------------
+	 * This should take us back to the instruction after the call to the
+	 * last spm_secure_partition_enter().* Place the second parameter to x0
+	 * so that the caller will see it as a return value from the original
+	 * entry call.
+	 * ---------------------------------------------------------------------
+	 */
+	mov	x0, x1
+	ret
+endfunc spmd_spm_core_exit
diff --git a/services/std_svc/spmd/spmd.mk b/services/std_svc/spmd/spmd.mk
new file mode 100644
index 0000000..38d43f1
--- /dev/null
+++ b/services/std_svc/spmd/spmd.mk
@@ -0,0 +1,21 @@
+#
+# Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${ARCH},aarch64)
+        $(error "Error: SPMD is only supported on aarch64.")
+endif
+
+SPMD_SOURCES	+=	$(addprefix services/std_svc/spmd/,	\
+			${ARCH}/spmd_helpers.S			\
+			spmd_main.c)
+
+# Let the top-level Makefile know that we intend to include a BL32 image
+NEED_BL32		:=	yes
+
+# Enable dynamic memory mapping
+# The SPMD component maps the SPMC DTB within BL31 virtual space.
+PLAT_XLAT_TABLES_DYNAMIC :=	1
+$(eval $(call add_define,PLAT_XLAT_TABLES_DYNAMIC))
diff --git a/services/std_svc/spmd/spmd_main.c b/services/std_svc/spmd/spmd_main.c
new file mode 100644
index 0000000..677f639
--- /dev/null
+++ b/services/std_svc/spmd/spmd_main.c
@@ -0,0 +1,487 @@
+/*
+ * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <bl31/bl31.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/smccc.h>
+#include <lib/spinlock.h>
+#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/common_def.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+#include <services/spci_svc.h>
+#include <services/spmd_svc.h>
+#include <smccc_helpers.h>
+#include "spmd_private.h"
+
+/*******************************************************************************
+ * SPM Core context information.
+ ******************************************************************************/
+spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * SPM Core attribute information read from its manifest.
+ ******************************************************************************/
+spmc_manifest_sect_attribute_t spmc_attrs;
+
+/*******************************************************************************
+ * This function takes an SP context pointer and performs a synchronous entry
+ * into it.
+ ******************************************************************************/
+uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
+{
+	uint64_t rc;
+
+	assert(spmc_ctx != NULL);
+
+	cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
+
+	/* Restore the context assigned above */
+	cm_el1_sysregs_context_restore(SECURE);
+	cm_set_next_eret_context(SECURE);
+
+	/* Invalidate TLBs at EL1. */
+	tlbivmalle1();
+	dsbish();
+
+	/* Enter Secure Partition */
+	rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
+
+	/* Save secure state */
+	cm_el1_sysregs_context_save(SECURE);
+
+	return rc;
+}
+
+/*******************************************************************************
+ * This function returns to the place where spm_sp_synchronous_entry() was
+ * called originally.
+ ******************************************************************************/
+__dead2 void spmd_spm_core_sync_exit(uint64_t rc)
+{
+	spmd_spm_core_context_t *ctx = &spm_core_context[plat_my_core_pos()];
+
+	/* Get context of the SP in use by this CPU. */
+	assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
+
+	/*
+	 * The SPMD must have initiated the original request through a
+	 * synchronous entry into SPMC. Jump back to the original C runtime
+	 * context with the value of rc in x0;
+	 */
+	spmd_spm_core_exit(ctx->c_rt_ctx, rc);
+
+	panic();
+}
+
+/*******************************************************************************
+ * Jump to the SPM core for the first time.
+ ******************************************************************************/
+static int32_t spmd_init(void)
+{
+	uint64_t rc = 0;
+	spmd_spm_core_context_t *ctx = &spm_core_context[plat_my_core_pos()];
+
+	INFO("SPM Core init start.\n");
+	ctx->state = SPMC_STATE_RESET;
+
+	rc = spmd_spm_core_sync_entry(ctx);
+	if (rc) {
+		ERROR("SPMC initialisation failed 0x%llx\n", rc);
+		panic();
+	}
+
+	ctx->state = SPMC_STATE_IDLE;
+	INFO("SPM Core init end.\n");
+
+	return 1;
+}
+
+/*******************************************************************************
+ * Initialize context of SPM core.
+ ******************************************************************************/
+int32_t spmd_setup(void)
+{
+	int rc;
+	void *rd_base;
+	size_t rd_size;
+	entry_point_info_t *spmc_ep_info;
+	uintptr_t rd_base_align;
+	uintptr_t rd_size_align;
+	uint32_t ep_attr;
+
+	spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+	if (!spmc_ep_info) {
+		WARN("No SPM core image provided by BL2 boot loader, Booting "
+		     "device without SP initialization. SMC`s destined for SPM "
+		     "core will return SMC_UNK\n");
+		return 1;
+	}
+
+	/* Under no circumstances will this parameter be 0 */
+	assert(spmc_ep_info->pc != 0U);
+
+	/*
+	 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
+	 * be used as a manifest for the SPM core at the next lower EL/mode.
+	 */
+	if (spmc_ep_info->args.arg0 == 0U || spmc_ep_info->args.arg2 == 0U) {
+		ERROR("Invalid or absent SPM core manifest\n");
+		panic();
+	}
+
+	/* Obtain whereabouts of SPM core manifest */
+	rd_base = (void *) spmc_ep_info->args.arg0;
+	rd_size = spmc_ep_info->args.arg2;
+
+	rd_base_align = page_align((uintptr_t) rd_base, DOWN);
+	rd_size_align = page_align((uintptr_t) rd_size, UP);
+
+	/* Map the manifest in the SPMD translation regime first */
+	VERBOSE("SPM core manifest base : 0x%lx\n", rd_base_align);
+	VERBOSE("SPM core manifest size : 0x%lx\n", rd_size_align);
+	rc = mmap_add_dynamic_region((unsigned long long) rd_base_align,
+				     (uintptr_t) rd_base_align,
+				     rd_size_align,
+				     MT_RO_DATA);
+	if (rc < 0) {
+		ERROR("Error while mapping SPM core manifest (%d).\n", rc);
+		panic();
+	}
+
+	/* Load the SPM core manifest */
+	rc = plat_spm_core_manifest_load(&spmc_attrs, rd_base, rd_size);
+	if (rc < 0) {
+		WARN("No or invalid SPM core manifest image provided by BL2 "
+		     "boot loader. ");
+		goto error;
+	}
+
+	/*
+	 * Ensure that the SPM core version is compatible with the SPM
+	 * dispatcher version
+	 */
+	if ((spmc_attrs.major_version != SPCI_VERSION_MAJOR) ||
+	    (spmc_attrs.minor_version > SPCI_VERSION_MINOR)) {
+		WARN("Unsupported SPCI version (%x.%x) specified in SPM core "
+		     "manifest image provided by BL2 boot loader.\n",
+		     spmc_attrs.major_version, spmc_attrs.minor_version);
+		goto error;
+	}
+
+	INFO("SPCI version (%x.%x).\n", spmc_attrs.major_version,
+	     spmc_attrs.minor_version);
+
+	/* Validate the SPM core runtime EL */
+	if ((spmc_attrs.runtime_el != MODE_EL1) &&
+	    (spmc_attrs.runtime_el != MODE_EL2)) {
+		WARN("Unsupported SPM core run time EL%x specified in "
+		     "manifest image provided by BL2 boot loader.\n",
+		     spmc_attrs.runtime_el);
+		goto error;
+	}
+
+	INFO("SPM core run time EL%x.\n", spmc_attrs.runtime_el);
+
+	/* Validate the SPM core execution state */
+	if ((spmc_attrs.exec_state != MODE_RW_64) &&
+	    (spmc_attrs.exec_state != MODE_RW_32)) {
+		WARN("Unsupported SPM core execution state %x specified in "
+		     "manifest image provided by BL2 boot loader.\n",
+		     spmc_attrs.exec_state);
+		goto error;
+	}
+
+	INFO("SPM core execution state %x.\n", spmc_attrs.exec_state);
+
+	/* Ensure manifest has not requested S-EL2 in AArch32 state */
+	if ((spmc_attrs.exec_state == MODE_RW_32) &&
+	    (spmc_attrs.runtime_el == MODE_EL2)) {
+		WARN("Invalid combination of SPM core execution state (%x) "
+		     "and run time EL (%x).\n", spmc_attrs.exec_state,
+		     spmc_attrs.runtime_el);
+		goto error;
+	}
+
+	/*
+	 * Check if S-EL2 is supported on this system if S-EL2
+	 * is required for SPM
+	 */
+	if (spmc_attrs.runtime_el == MODE_EL2) {
+		uint64_t sel2 = read_id_aa64pfr0_el1();
+
+		sel2 >>= ID_AA64PFR0_SEL2_SHIFT;
+		sel2 &= ID_AA64PFR0_SEL2_MASK;
+
+		if (!sel2) {
+			WARN("SPM core run time EL: S-EL%x is not supported "
+			     "but specified in manifest image provided by "
+			     "BL2 boot loader.\n", spmc_attrs.runtime_el);
+			goto error;
+		}
+	}
+
+	/* Initialise an entrypoint to set up the CPU context */
+	ep_attr = SECURE | EP_ST_ENABLE;
+	if (read_sctlr_el3() & SCTLR_EE_BIT)
+		ep_attr |= EP_EE_BIG;
+	SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
+	assert(spmc_ep_info->pc == BL32_BASE);
+
+	/*
+	 * Populate SPSR for SPM core based upon validated parameters from the
+	 * manifest
+	 */
+	if (spmc_attrs.exec_state == MODE_RW_32) {
+		spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
+						 SPSR_E_LITTLE,
+						 DAIF_FIQ_BIT |
+						 DAIF_IRQ_BIT |
+						 DAIF_ABT_BIT);
+	} else {
+		spmc_ep_info->spsr = SPSR_64(spmc_attrs.runtime_el,
+					     MODE_SP_ELX,
+					     DISABLE_ALL_EXCEPTIONS);
+	}
+
+	/* Initialise SPM core context with this entry point information */
+	cm_setup_context(&(spm_core_context[plat_my_core_pos()].cpu_ctx),
+			 spmc_ep_info);
+
+	INFO("SPM core setup done.\n");
+
+	/* Register init function for deferred init.  */
+	bl31_register_bl32_init(&spmd_init);
+
+	return 0;
+
+error:
+	WARN("Booting device without SPM initialization. "
+	     "SPCI SMCs destined for SPM core will return "
+	     "ENOTSUPPORTED\n");
+
+	rc = mmap_remove_dynamic_region(rd_base_align, rd_size_align);
+	if (rc < 0) {
+		ERROR("Error while unmapping SPM core manifest (%d).\n",
+		      rc);
+		panic();
+	}
+
+	return 1;
+}
+
+/*******************************************************************************
+ * This function handles all SMCs in the range reserved for SPCI. Each call is
+ * either forwarded to the other security state or handled by the SPM dispatcher
+ ******************************************************************************/
+uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
+			  uint64_t x3, uint64_t x4, void *cookie, void *handle,
+			  uint64_t flags)
+{
+	uint32_t in_sstate;
+	uint32_t out_sstate;
+	int32_t ret;
+	spmd_spm_core_context_t *ctx = &spm_core_context[plat_my_core_pos()];
+
+	/* Determine which security state this SMC originated from */
+	if (is_caller_secure(flags)) {
+		in_sstate = SECURE;
+		out_sstate = NON_SECURE;
+	} else {
+		in_sstate = NON_SECURE;
+		out_sstate = SECURE;
+	}
+
+	INFO("SPM: 0x%x, 0x%llx, 0x%llx, 0x%llx, 0x%llx, "
+	     "0x%llx, 0x%llx, 0x%llx\n",
+	     smc_fid, x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5),
+	     SMC_GET_GP(handle, CTX_GPREG_X6),
+	     SMC_GET_GP(handle, CTX_GPREG_X7));
+
+	switch (smc_fid) {
+	case SPCI_ERROR:
+		/*
+		 * Check if this is the first invocation of this interface on
+		 * this CPU. If so, then indicate that the SPM core initialised
+		 * unsuccessfully.
+		 */
+		if ((in_sstate == SECURE) && (ctx->state == SPMC_STATE_RESET))
+			spmd_spm_core_sync_exit(x2);
+
+		/* Save incoming security state */
+		cm_el1_sysregs_context_save(in_sstate);
+
+		/* Restore outgoing security state */
+		cm_el1_sysregs_context_restore(out_sstate);
+		cm_set_next_eret_context(out_sstate);
+
+		SMC_RET8(cm_get_context(out_sstate), smc_fid, x1, x2, x3, x4,
+			 SMC_GET_GP(handle, CTX_GPREG_X5),
+			 SMC_GET_GP(handle, CTX_GPREG_X6),
+			 SMC_GET_GP(handle, CTX_GPREG_X7));
+		break; /* not reached */
+
+	case SPCI_VERSION:
+		/*
+		 * TODO: This is an optimization that the version information
+		 * provided by the SPM core manifest is returned by the SPM
+		 * dispatcher. It might be a better idea to simply forward this
+		 * call to the SPM core and wash our hands completely.
+		 */
+		ret = MAKE_SPCI_VERSION(spmc_attrs.major_version,
+					spmc_attrs.minor_version);
+		SMC_RET8(handle, SPCI_SUCCESS_SMC32, SPCI_TARGET_INFO_MBZ, ret,
+			 SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, SPCI_PARAM_MBZ,
+			 SPCI_PARAM_MBZ, SPCI_PARAM_MBZ);
+		break; /* not reached */
+
+	case SPCI_FEATURES:
+		/*
+		 * This is an optional interface. Do the minimal checks and
+		 * forward to SPM core which will handle it if implemented.
+		 */
+
+		/*
+		 * Check if w1 holds a valid SPCI fid. This is an
+		 * optimization.
+		 */
+		if (!is_spci_fid(x1))
+			SMC_RET8(handle, SPCI_ERROR,
+				 SPCI_TARGET_INFO_MBZ, SPCI_ERROR_NOT_SUPPORTED,
+				 SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, SPCI_PARAM_MBZ,
+				 SPCI_PARAM_MBZ, SPCI_PARAM_MBZ);
+
+		/* Forward SMC from Normal world to the SPM core */
+		if (in_sstate == NON_SECURE) {
+			/* Save incoming security state */
+			cm_el1_sysregs_context_save(in_sstate);
+
+			/* Restore outgoing security state */
+			cm_el1_sysregs_context_restore(out_sstate);
+			cm_set_next_eret_context(out_sstate);
+
+			SMC_RET8(cm_get_context(out_sstate), smc_fid,
+				 x1, x2, x3, x4,
+				 SMC_GET_GP(handle, CTX_GPREG_X5),
+				 SMC_GET_GP(handle, CTX_GPREG_X6),
+				 SMC_GET_GP(handle, CTX_GPREG_X7));
+		} else {
+			/*
+			 * Return success if call was from secure world i.e. all
+			 * SPCI functions are supported. This is essentially a
+			 * nop.
+			 */
+			SMC_RET8(handle, SPCI_SUCCESS_SMC32, x1, x2, x3, x4,
+				 SMC_GET_GP(handle, CTX_GPREG_X5),
+				 SMC_GET_GP(handle, CTX_GPREG_X6),
+				 SMC_GET_GP(handle, CTX_GPREG_X7));
+		}
+		break; /* not reached */
+
+	case SPCI_RX_RELEASE:
+	case SPCI_RXTX_MAP_SMC32:
+	case SPCI_RXTX_MAP_SMC64:
+	case SPCI_RXTX_UNMAP:
+	case SPCI_MSG_RUN:
+		/* This interface must be invoked only by the Normal world */
+		if (in_sstate == SECURE) {
+			SMC_RET8(handle, SPCI_ERROR,
+				 SPCI_TARGET_INFO_MBZ, SPCI_ERROR_NOT_SUPPORTED,
+				 SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, SPCI_PARAM_MBZ,
+				 SPCI_PARAM_MBZ, SPCI_PARAM_MBZ);
+		}
+
+		/* Fall through to forward the call to the other world */
+
+	case SPCI_PARTITION_INFO_GET:
+	case SPCI_MSG_SEND:
+	case SPCI_MSG_SEND_DIRECT_REQ_SMC32:
+	case SPCI_MSG_SEND_DIRECT_REQ_SMC64:
+	case SPCI_MSG_SEND_DIRECT_RESP_SMC32:
+	case SPCI_MSG_SEND_DIRECT_RESP_SMC64:
+	case SPCI_MEM_DONATE_SMC32:
+	case SPCI_MEM_DONATE_SMC64:
+	case SPCI_MEM_LEND_SMC32:
+	case SPCI_MEM_LEND_SMC64:
+	case SPCI_MEM_SHARE_SMC32:
+	case SPCI_MEM_SHARE_SMC64:
+	case SPCI_MEM_RETRIEVE_REQ_SMC32:
+	case SPCI_MEM_RETRIEVE_REQ_SMC64:
+	case SPCI_MEM_RETRIEVE_RESP:
+	case SPCI_MEM_RELINQUISH:
+	case SPCI_MEM_RECLAIM:
+	case SPCI_SUCCESS_SMC32:
+	case SPCI_SUCCESS_SMC64:
+		/*
+		 * TODO: Assume that no requests originate from EL3 at the
+		 * moment. This will change if a SP service is required in
+		 * response to secure interrupts targeted to EL3. Until then
+		 * simply forward the call to the Normal world.
+		 */
+
+		/* Save incoming security state */
+		cm_el1_sysregs_context_save(in_sstate);
+
+		/* Restore outgoing security state */
+		cm_el1_sysregs_context_restore(out_sstate);
+		cm_set_next_eret_context(out_sstate);
+
+		SMC_RET8(cm_get_context(out_sstate), smc_fid, x1, x2, x3, x4,
+			 SMC_GET_GP(handle, CTX_GPREG_X5),
+			 SMC_GET_GP(handle, CTX_GPREG_X6),
+			 SMC_GET_GP(handle, CTX_GPREG_X7));
+		break; /* not reached */
+
+	case SPCI_MSG_WAIT:
+		/*
+		 * Check if this is the first invocation of this interface on
+		 * this CPU from the Secure world. If so, then indicate that the
+		 * SPM core initialised successfully.
+		 */
+		if ((in_sstate == SECURE) && (ctx->state == SPMC_STATE_RESET)) {
+			spmd_spm_core_sync_exit(0);
+		}
+
+		/* Intentional fall-through */
+
+	case SPCI_MSG_YIELD:
+		/* This interface must be invoked only by the Secure world */
+		if (in_sstate == NON_SECURE) {
+			SMC_RET8(handle, SPCI_ERROR,
+				 SPCI_TARGET_INFO_MBZ, SPCI_ERROR_NOT_SUPPORTED,
+				 SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, SPCI_PARAM_MBZ,
+				 SPCI_PARAM_MBZ, SPCI_PARAM_MBZ);
+		}
+
+		/* Save incoming security state */
+		cm_el1_sysregs_context_save(in_sstate);
+
+		/* Restore outgoing security state */
+		cm_el1_sysregs_context_restore(out_sstate);
+		cm_set_next_eret_context(out_sstate);
+
+		SMC_RET8(cm_get_context(out_sstate), smc_fid, x1, x2, x3, x4,
+			 SMC_GET_GP(handle, CTX_GPREG_X5),
+			 SMC_GET_GP(handle, CTX_GPREG_X6),
+			 SMC_GET_GP(handle, CTX_GPREG_X7));
+		break; /* not reached */
+
+	default:
+		WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
+		SMC_RET8(handle, SPCI_ERROR,
+			 SPCI_TARGET_INFO_MBZ, SPCI_ERROR_NOT_SUPPORTED,
+			 SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, SPCI_PARAM_MBZ,
+			 SPCI_PARAM_MBZ, SPCI_PARAM_MBZ);
+	}
+}
diff --git a/services/std_svc/spmd/spmd_private.h b/services/std_svc/spmd/spmd_private.h
new file mode 100644
index 0000000..61b479a
--- /dev/null
+++ b/services/std_svc/spmd/spmd_private.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPMD_PRIVATE_H
+#define SPMD_PRIVATE_H
+
+#include <context.h>
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define SPMD_C_RT_CTX_X19		0x0
+#define SPMD_C_RT_CTX_X20		0x8
+#define SPMD_C_RT_CTX_X21		0x10
+#define SPMD_C_RT_CTX_X22		0x18
+#define SPMD_C_RT_CTX_X23		0x20
+#define SPMD_C_RT_CTX_X24		0x28
+#define SPMD_C_RT_CTX_X25		0x30
+#define SPMD_C_RT_CTX_X26		0x38
+#define SPMD_C_RT_CTX_X27		0x40
+#define SPMD_C_RT_CTX_X28		0x48
+#define SPMD_C_RT_CTX_X29		0x50
+#define SPMD_C_RT_CTX_X30		0x58
+
+#define SPMD_C_RT_CTX_SIZE		0x60
+#define SPMD_C_RT_CTX_ENTRIES		(SPMD_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+#ifndef __ASSEMBLER__
+#include <services/spci_svc.h>
+#include <stdint.h>
+
+/*
+ * Convert a function no. in a FID to a bit position. All function nos. are
+ * between 0 and 0x1f
+ */
+#define SPCI_FNO_TO_BIT_POS(_fid)	(1 << ((_fid) & U(0x1f)))
+
+typedef enum spmc_state {
+	SPMC_STATE_RESET = 0,
+	SPMC_STATE_IDLE
+} spmc_state_t;
+
+/*
+ * Data structure used by the SPM dispatcher (SPMD) in EL3 to track context of
+ * the SPM core (SPMC) at the next lower EL.
+ */
+typedef struct spmd_spm_core_context {
+	uint64_t c_rt_ctx;
+	cpu_context_t cpu_ctx;
+	spmc_state_t state;
+} spmd_spm_core_context_t;
+
+/*
+ * Data structure used by the SPM dispatcher (SPMD) in EL3 to track sequence of
+ * SPCI calls from lower ELs.
+ *
+ * next_smc_bit_map: Per-cpu bit map of SMCs from each world that are expected
+ *                   next.
+ */
+typedef struct spmd_spci_context {
+	uint32_t next_smc_bit_map[2];
+} spmd_spci_context_t;
+
+/* Functions used to enter/exit a Secure Partition synchronously */
+uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *ctx);
+__dead2 void spmd_spm_core_sync_exit(uint64_t rc);
+
+/* Assembly helpers */
+uint64_t spmd_spm_core_enter(uint64_t *c_rt_ctx);
+void __dead2 spmd_spm_core_exit(uint64_t c_rt_ctx, uint64_t ret);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* SPMD_PRIVATE_H */
diff --git a/services/std_svc/std_svc_setup.c b/services/std_svc/std_svc_setup.c
index 7787a2f..895fd29 100644
--- a/services/std_svc/std_svc_setup.c
+++ b/services/std_svc/std_svc_setup.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -15,6 +15,7 @@
 #include <lib/runtime_instr.h>
 #include <services/sdei.h>
 #include <services/spm_mm_svc.h>
+#include <services/spmd_svc.h>
 #include <services/std_svc.h>
 #include <smccc_helpers.h>
 #include <tools_share/uuid.h>
@@ -51,6 +52,12 @@
 	}
 #endif
 
+#if defined(SPD_spmd)
+	if (spmd_setup() != 0) {
+		ret = 1;
+	}
+#endif
+
 #if SDEI_SUPPORT
 	/* SDEI initialisation */
 	sdei_init();
@@ -114,6 +121,17 @@
 	}
 #endif
 
+#if defined(SPD_spmd)
+	/*
+	 * Dispatch SPCI calls to the SPCI SMC handler implemented by the SPM
+	 * dispatcher and return its return value
+	 */
+	if (is_spci_fid(smc_fid)) {
+		return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
+					handle, flags);
+	}
+#endif
+
 #if SDEI_SUPPORT
 	if (is_sdei_fid(smc_fid)) {
 		return sdei_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,