feat(el3-spmc): synchronize access to the s-el0 sp context

This patch locks and unlocks access to the S-EL0 SP context when its
runtime state and model are updated to avoid issues around concurrent
access to global state.

Signed-off-by: Achin Gupta <achin.gupta@arm.com>
Signed-off-by: Nishant Sharma <nishant.sharma@arm.com>
Change-Id: I427657050574c189cbaf82c1371e3ee44bc1663e
diff --git a/services/std_svc/spm/el3_spmc/spmc.h b/services/std_svc/spm/el3_spmc/spmc.h
index c8515e5..e093a82 100644
--- a/services/std_svc/spm/el3_spmc/spmc.h
+++ b/services/std_svc/spm/el3_spmc/spmc.h
@@ -168,6 +168,9 @@
 	/* Mailbox tracking. */
 	struct mailbox mailbox;
 
+	/* Lock to protect the runtime state of a S-EL0 SP execution context. */
+	spinlock_t rt_state_lock;
+
 	/* Pointer to translation table context of a S-EL0 SP. */
 	xlat_ctx_t *xlat_ctx_handle;
 
diff --git a/services/std_svc/spm/el3_spmc/spmc_main.c b/services/std_svc/spm/el3_spmc/spmc_main.c
index d661e8a..42747bf 100644
--- a/services/std_svc/spm/el3_spmc/spmc_main.c
+++ b/services/std_svc/spm/el3_spmc/spmc_main.c
@@ -401,6 +401,11 @@
 					     FFA_ERROR_INVALID_PARAMETER);
 	}
 
+	/* Protect the runtime state of a UP S-EL0 SP with a lock. */
+	if (sp->runtime_el == S_EL0) {
+		spin_lock(&sp->rt_state_lock);
+	}
+
 	/*
 	 * Check that the target execution context is in a waiting state before
 	 * forwarding the direct request to it.
@@ -409,6 +414,11 @@
 	if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
 		VERBOSE("SP context on core%u is not waiting (%u).\n",
 			idx, sp->ec[idx].rt_model);
+
+		if (sp->runtime_el == S_EL0) {
+			spin_unlock(&sp->rt_state_lock);
+		}
+
 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
 	}
 
@@ -419,6 +429,11 @@
 	sp->ec[idx].rt_state = RT_STATE_RUNNING;
 	sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
 	sp->ec[idx].dir_req_origin_id = src_id;
+
+	if (sp->runtime_el == S_EL0) {
+		spin_unlock(&sp->rt_state_lock);
+	}
+
 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
 			       handle, cookie, flags, dst_id);
 }
@@ -473,6 +488,10 @@
 					     FFA_ERROR_INVALID_PARAMETER);
 	}
 
+	if (sp->runtime_el == S_EL0) {
+		spin_lock(&sp->rt_state_lock);
+	}
+
 	/* Sanity check state is being tracked correctly in the SPMC. */
 	idx = get_ec_index(sp);
 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
@@ -481,12 +500,18 @@
 	if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
 		VERBOSE("SP context on core%u not handling direct req (%u).\n",
 			idx, sp->ec[idx].rt_model);
+		if (sp->runtime_el == S_EL0) {
+			spin_unlock(&sp->rt_state_lock);
+		}
 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
 	}
 
 	if (sp->ec[idx].dir_req_origin_id != dst_id) {
 		WARN("Invalid direct resp partition ID 0x%x != 0x%x on core%u.\n",
 		     dst_id, sp->ec[idx].dir_req_origin_id, idx);
+		if (sp->runtime_el == S_EL0) {
+			spin_unlock(&sp->rt_state_lock);
+		}
 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
 	}
 
@@ -496,6 +521,10 @@
 	/* Clear the ongoing direct request ID. */
 	sp->ec[idx].dir_req_origin_id = INV_SP_ID;
 
+	if (sp->runtime_el == S_EL0) {
+		spin_unlock(&sp->rt_state_lock);
+	}
+
 	/*
 	 * If the receiver is not the SPMC then forward the response to the
 	 * Normal world.
@@ -547,9 +576,15 @@
 	 * Get the execution context of the SP that invoked FFA_MSG_WAIT.
 	 */
 	idx = get_ec_index(sp);
+	if (sp->runtime_el == S_EL0) {
+		spin_lock(&sp->rt_state_lock);
+	}
 
 	/* Ensure SP execution context was in the right runtime model. */
 	if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
+		if (sp->runtime_el == S_EL0) {
+			spin_unlock(&sp->rt_state_lock);
+		}
 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
 	}
 
@@ -561,6 +596,9 @@
 	 * state is updated after the exit.
 	 */
 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
+		if (sp->runtime_el == S_EL0) {
+			spin_unlock(&sp->rt_state_lock);
+		}
 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
 		/* Should not get here */
 		panic();
@@ -578,9 +616,19 @@
 		cm_el1_sysregs_context_save(secure_state_in);
 		cm_el1_sysregs_context_restore(secure_state_out);
 		cm_set_next_eret_context(secure_state_out);
+
+		if (sp->runtime_el == S_EL0) {
+			spin_unlock(&sp->rt_state_lock);
+		}
+
 		SMC_RET0(cm_get_context(secure_state_out));
 	}
 
+	/* Protect the runtime state of a S-EL0 SP with a lock. */
+	if (sp->runtime_el == S_EL0) {
+		spin_unlock(&sp->rt_state_lock);
+	}
+
 	/* Forward the response to the Normal world. */
 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
 			       handle, cookie, flags, FFA_NWD_ID);
@@ -1354,14 +1402,21 @@
 	}
 
 	idx = get_ec_index(sp);
+
 	if (idx != vcpu_id) {
 		ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id);
 		return spmc_ffa_error_return(handle,
 					     FFA_ERROR_INVALID_PARAMETER);
 	}
+	if (sp->runtime_el == S_EL0) {
+		spin_lock(&sp->rt_state_lock);
+	}
 	rt_state = &((sp->ec[idx]).rt_state);
 	rt_model = &((sp->ec[idx]).rt_model);
 	if (*rt_state == RT_STATE_RUNNING) {
+		if (sp->runtime_el == S_EL0) {
+			spin_unlock(&sp->rt_state_lock);
+		}
 		ERROR("Partition (0x%x) is already running.\n", target_id);
 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
 	}
@@ -1388,6 +1443,10 @@
 	 */
 	*rt_state = RT_STATE_RUNNING;
 
+	if (sp->runtime_el == S_EL0) {
+		spin_unlock(&sp->rt_state_lock);
+	}
+
 	return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0,
 			       handle, cookie, flags, target_id);
 }