SPMD: handle SPMC message to register secondary core entry point

Upon booting, the SPMC running on the primary core shall register the
secondary core entry points to which a given secondary core being woken
up shall jump to into the SPMC . The current implementation assumes the
SPMC calls a registering service implemented in the SPMD for each core
identified by its MPIDR. This can typically happen in a simple loop
implemented in the early SPMC initialization routines by passing each
core identifier associated with an entry point address and context
information.
This service is implemented on top of a more generic SPMC<=>SPMD
interface using direct request/response message passing as defined by
the FF-A specification.

Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
Signed-off-by: Max Shvetsov <maksims.svecovs@arm.com>
Change-Id: I1f70163b6b5cee0880bd2004e1fec41e3780ba35
diff --git a/services/std_svc/spmd/spmd_main.c b/services/std_svc/spmd/spmd_main.c
index cdbb9ca..3050957 100644
--- a/services/std_svc/spmd/spmd_main.c
+++ b/services/std_svc/spmd/spmd_main.c
@@ -140,7 +140,7 @@
 	ctx->state = SPMC_STATE_ON_PENDING;
 
 	/* Set the SPMC context state on other CPUs to OFF */
-	for (core_id = 0; core_id < PLATFORM_CORE_COUNT; core_id++) {
+	for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
 		if (core_id != linear_id) {
 			spm_core_context[core_id].state = SPMC_STATE_OFF;
 		}
@@ -355,6 +355,17 @@
 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
 }
 
+/*******************************************************************************
+ * spmd_check_address_in_binary_image
+ ******************************************************************************/
+bool spmd_check_address_in_binary_image(uint64_t address)
+{
+	assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
+
+	return ((address >= spmc_attrs.load_address) &&
+		(address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
+}
+
 /******************************************************************************
  * spmd_is_spmc_message
  *****************************************************************************/
@@ -364,6 +375,26 @@
 		&& (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
 }
 
+/******************************************************************************
+ * spmd_handle_spmc_message
+ *****************************************************************************/
+static int32_t spmd_handle_spmc_message(uint64_t msg, uint64_t parm1,
+					uint64_t parm2, uint64_t parm3,
+					uint64_t parm4)
+{
+	VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__,
+		msg, parm1, parm2, parm3, parm4);
+
+	switch (msg) {
+	case SPMD_DIRECT_MSG_SET_ENTRY_POINT:
+		return spmd_pm_secondary_core_set_ep(parm1, parm2, parm3);
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
 /*******************************************************************************
  * This function handles all SMCs in the range reserved for FFA. Each call is
  * either forwarded to the other security state or handled by the SPM dispatcher
@@ -481,6 +512,35 @@
 
 		break; /* not reached */
 
+	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
+		if (secure_origin && spmd_is_spmc_message(x1)) {
+			ret = spmd_handle_spmc_message(x3, x4,
+				SMC_GET_GP(handle, CTX_GPREG_X5),
+				SMC_GET_GP(handle, CTX_GPREG_X6),
+				SMC_GET_GP(handle, CTX_GPREG_X7));
+
+			SMC_RET8(handle, FFA_SUCCESS_SMC32,
+				FFA_TARGET_INFO_MBZ, ret,
+				FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+				FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+				FFA_PARAM_MBZ);
+		} else {
+			/* Forward direct message to the other world */
+			return spmd_smc_forward(smc_fid, secure_origin,
+				x1, x2, x3, x4, handle);
+		}
+		break; /* Not reached */
+
+	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
+		if (secure_origin && spmd_is_spmc_message(x1)) {
+			spmd_spm_core_sync_exit(0);
+		} else {
+			/* Forward direct message to the other world */
+			return spmd_smc_forward(smc_fid, secure_origin,
+				x1, x2, x3, x4, handle);
+		}
+		break; /* Not reached */
+
 	case FFA_RX_RELEASE:
 	case FFA_RXTX_MAP_SMC32:
 	case FFA_RXTX_MAP_SMC64:
@@ -496,9 +556,7 @@
 
 	case FFA_PARTITION_INFO_GET:
 	case FFA_MSG_SEND:
-	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
-	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
 	case FFA_MEM_DONATE_SMC32:
 	case FFA_MEM_DONATE_SMC64:
diff --git a/services/std_svc/spmd/spmd_pm.c b/services/std_svc/spmd/spmd_pm.c
index eff59ad..13c638c 100644
--- a/services/std_svc/spmd/spmd_pm.c
+++ b/services/std_svc/spmd/spmd_pm.c
@@ -5,8 +5,56 @@
  */
 
 #include <assert.h>
+#include <errno.h>
 #include "spmd_private.h"
 
+struct spmd_pm_secondary_ep_t {
+	uintptr_t entry_point;
+	uintptr_t context;
+	bool locked;
+};
+
+static struct spmd_pm_secondary_ep_t spmd_pm_secondary_ep[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * spmd_pm_secondary_core_set_ep
+ ******************************************************************************/
+int32_t spmd_pm_secondary_core_set_ep(uint64_t mpidr, uintptr_t entry_point,
+				      uint64_t context)
+{
+	int id = plat_core_pos_by_mpidr(mpidr);
+
+	if ((id < 0) || (id >= PLATFORM_CORE_COUNT)) {
+		ERROR("%s inconsistent MPIDR (%llx)\n", __func__, mpidr);
+		return -EINVAL;
+	}
+
+	if (spmd_pm_secondary_ep[id].locked) {
+		ERROR("%s entry locked (%llx)\n", __func__, mpidr);
+		return -EINVAL;
+	}
+
+	/*
+	 * Check entry_point address is a PA within
+	 * load_address <= entry_point < load_address + binary_size
+	 */
+	if (!spmd_check_address_in_binary_image(entry_point)) {
+		ERROR("%s entry point is not within image boundaries (%llx)\n",
+		      __func__, mpidr);
+		return -EINVAL;
+	}
+
+	/* Fill new entry to corresponding secondary core id and lock it */
+	spmd_pm_secondary_ep[id].entry_point = entry_point;
+	spmd_pm_secondary_ep[id].context = context;
+	spmd_pm_secondary_ep[id].locked = true;
+
+	VERBOSE("%s %d %llx %lx %llx\n",
+		__func__, id, mpidr, entry_point, context);
+
+	return 0;
+}
+
 /*******************************************************************************
  * This CPU has been turned on. Enter SPMC to initialise S-EL1 or S-EL2. As part
  * of the SPMC initialization path, they will initialize any SPs that they
diff --git a/services/std_svc/spmd/spmd_private.h b/services/std_svc/spmd/spmd_private.h
index 7d5f476..0d78f53 100644
--- a/services/std_svc/spmd/spmd_private.h
+++ b/services/std_svc/spmd/spmd_private.h
@@ -81,6 +81,10 @@
 /* SPMC context on current CPU get helper */
 spmd_spm_core_context_t *spmd_get_context(void);
 
+int32_t spmd_pm_secondary_core_set_ep(uint64_t mpidr, uintptr_t entry_point,
+				      uint64_t context);
+bool spmd_check_address_in_binary_image(uint64_t address);
+
 #endif /* __ASSEMBLER__ */
 
 #endif /* SPMD_PRIVATE_H */