feat(tsp): enable test cases for EL3 SPMC

Introduce initial test cases to the TSP which are
designed to be exercised by the FF-A Test Driver
in the Normal World. These have been designed to
test basic functionality of the EL3 SPMC.

These tests currently ensure the following functionality:
  - Partition discovery.
  - Direct messaging.
  - Communication with a Logical SP.
  - Memory Sharing and Lending ABIs
  - Sharing of contiguous and non-contiguous memory regions.
  - Memory region descriptors spread of over multiple
    invocations.

Signed-off-by: Marc Bonnici <marc.bonnici@arm.com>
Signed-off-by: Shruti Gupta <shruti.gupta@arm.com>
Change-Id: Iaee4180aa18d6b7ac7b53685c6589f0ab306e876
diff --git a/bl32/tsp/ffa_helpers.c b/bl32/tsp/ffa_helpers.c
index 296957e..3639c22 100644
--- a/bl32/tsp/ffa_helpers.c
+++ b/bl32/tsp/ffa_helpers.c
@@ -9,6 +9,48 @@
 #include <services/ffa_svc.h>
 #include "tsp_private.h"
 
+/*******************************************************************************
+ * Wrapper function to send a direct request.
+ ******************************************************************************/
+smc_args_t ffa_msg_send_direct_req(ffa_endpoint_id16_t sender,
+				   ffa_endpoint_id16_t receiver,
+				   uint32_t arg3,
+				   uint32_t arg4,
+				   uint32_t arg5,
+				   uint32_t arg6,
+				   uint32_t arg7)
+{
+	uint32_t src_dst_ids = (sender << FFA_DIRECT_MSG_SOURCE_SHIFT) |
+			       (receiver << FFA_DIRECT_MSG_DESTINATION_SHIFT);
+
+
+	/* Send Direct Request. */
+	return smc_helper(FFA_MSG_SEND_DIRECT_REQ_SMC64, src_dst_ids,
+			0, arg3, arg4, arg5, arg6, arg7);
+}
+
+/*******************************************************************************
+ * Wrapper function to send a direct response.
+ ******************************************************************************/
+smc_args_t *ffa_msg_send_direct_resp(ffa_endpoint_id16_t sender,
+				     ffa_endpoint_id16_t receiver,
+				     uint32_t arg3,
+				     uint32_t arg4,
+				     uint32_t arg5,
+				     uint32_t arg6,
+				     uint32_t arg7)
+{
+	uint32_t src_dst_ids = (sender << FFA_DIRECT_MSG_SOURCE_SHIFT) |
+			       (receiver << FFA_DIRECT_MSG_DESTINATION_SHIFT);
+
+	return set_smc_args(FFA_MSG_SEND_DIRECT_RESP_SMC64, src_dst_ids,
+			    0, arg3, arg4, arg5, arg6, arg7);
+}
+
+/*******************************************************************************
+ * Memory Management Helpers.
+ ******************************************************************************/
+
 /**
  * Initialises the header of the given `ffa_mtd`, not including the
  * composite memory region offset.
@@ -49,7 +91,7 @@
  *
  * Returns the size of the descriptor written.
  */
-uint32_t ffa_memory_retrieve_request_init(
+static uint32_t ffa_memory_retrieve_request_init(
 	struct ffa_mtd *memory_region, uint64_t handle,
 	ffa_endpoint_id16_t sender, ffa_endpoint_id16_t *receivers, uint32_t receiver_count,
 	uint64_t tag, ffa_mtd_flag32_t flags,
@@ -98,6 +140,89 @@
 		       0, 0, 0, 0);
 }
 
+bool memory_retrieve(struct mailbox *mb,
+			    struct ffa_mtd **retrieved,
+			    uint64_t handle, ffa_endpoint_id16_t sender,
+			    ffa_endpoint_id16_t *receivers, uint32_t receiver_count,
+			    ffa_mtd_flag32_t flags, uint32_t *frag_length,
+			    uint32_t *total_length)
+{
+	smc_args_t ret;
+	uint32_t descriptor_size;
+	struct ffa_mtd *memory_region = (struct ffa_mtd *)mb->tx_buffer;
+
+	if (retrieved == NULL || mb == NULL) {
+		ERROR("Invalid parameters!\n");
+		return false;
+	}
+
+	/* Clear TX buffer. */
+	memset(memory_region, 0, PAGE_SIZE);
+
+	/* Clear local buffer. */
+	memset(mem_region_buffer, 0, REGION_BUF_SIZE);
+
+	descriptor_size = ffa_memory_retrieve_request_init(
+	    memory_region, handle, sender, receivers, receiver_count, 0, flags,
+	    FFA_MEM_PERM_RW | FFA_MEM_PERM_NX,
+	    FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB |
+	    FFA_MEM_ATTR_INNER_SHAREABLE);
+
+	ret = ffa_mem_retrieve_req(descriptor_size, descriptor_size);
+
+	if (ffa_func_id(ret) == FFA_ERROR) {
+		ERROR("Couldn't retrieve the memory page. Error: %x\n",
+		      ffa_error_code(ret));
+		return false;
+	}
+
+	/*
+	 * Following total_size and fragment_size are useful to keep track
+	 * of the state of transaction. When the sum of all fragment_size of all
+	 * fragments is equal to total_size, the memory transaction has been
+	 * completed.
+	 */
+	*total_length = ret._regs[1];
+	*frag_length = ret._regs[2];
+
+	/* Validate frag_length is less than total_length and mailbox size. */
+	if (*frag_length == 0U || *total_length == 0U ||
+	    *frag_length > *total_length || *frag_length > (mb->rxtx_page_count * PAGE_SIZE)) {
+		ERROR("Invalid parameters!\n");
+		return false;
+	}
+
+	/* Copy response to local buffer. */
+	memcpy(mem_region_buffer, mb->rx_buffer, *frag_length);
+
+	if (ffa_rx_release()) {
+		ERROR("Failed to release buffer!\n");
+		return false;
+	}
+
+	*retrieved = (struct ffa_mtd *) mem_region_buffer;
+
+	if ((*retrieved)->emad_count > MAX_MEM_SHARE_RECIPIENTS) {
+		VERBOSE("SPMC memory sharing supports max of %u receivers!\n",
+			MAX_MEM_SHARE_RECIPIENTS);
+		return false;
+	}
+
+	/*
+	 * We are sharing memory from the normal world therefore validate the NS
+	 * bit was set by the SPMC.
+	 */
+	if (((*retrieved)->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) == 0U) {
+		ERROR("SPMC has not set the NS bit! 0x%x\n",
+		      (*retrieved)->memory_region_attributes);
+		return false;
+	}
+
+	VERBOSE("Memory Descriptor Retrieved!\n");
+
+	return true;
+}
+
 /* Relinquish the memory region. */
 bool memory_relinquish(struct ffa_mem_relinquish_descriptor *m, uint64_t handle,
 		       ffa_endpoint_id16_t id)
diff --git a/bl32/tsp/ffa_helpers.h b/bl32/tsp/ffa_helpers.h
index e8bd51d..e650a07 100644
--- a/bl32/tsp/ffa_helpers.h
+++ b/bl32/tsp/ffa_helpers.h
@@ -25,6 +25,9 @@
 	return (uint32_t) val._regs[2];
 }
 
+extern uint8_t mem_region_buffer[4096 * 2]  __aligned(PAGE_SIZE);
+#define REGION_BUF_SIZE sizeof(mem_region_buffer)
+
 /** The maximum number of recipients a memory region may be sent to. */
 #define MAX_MEM_SHARE_RECIPIENTS	2U
 
@@ -83,25 +86,31 @@
 	return ((perm >> FFA_MEM_PERM_DATA_OFFSET) & FFA_MEM_PERM_DATA_MASK);
 }
 
-/**
- * Initialises the given `ffa_mtd` to be used for an
- * `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
- *
- * Returns the size of the message written.
- */
-uint32_t ffa_memory_retrieve_request_init(
-	struct ffa_mtd *memory_region, uint64_t handle,
-	ffa_endpoint_id16_t sender, ffa_endpoint_id16_t *test_receivers, uint32_t receiver_count,
-	uint64_t tag, ffa_mtd_flag32_t flags,
-	ffa_mem_perm8_t permissions, ffa_mem_attr16_t attributes);
-
 smc_args_t ffa_mem_frag_rx(uint64_t handle, uint32_t recv_length);
-smc_args_t ffa_mem_retrieve_req(uint32_t descriptor_length,
-				uint32_t fragment_length);
 bool ffa_mem_relinquish(void);
 bool ffa_rx_release(void);
 bool memory_relinquish(struct ffa_mem_relinquish_descriptor *m, uint64_t handle,
 		       ffa_endpoint_id16_t id);
 bool ffa_rxtx_map(uintptr_t send, uintptr_t recv, uint32_t pages);
+bool memory_retrieve(struct mailbox *mb,
+		     struct ffa_mtd **retrieved,
+		     uint64_t handle, ffa_endpoint_id16_t sender,
+		     ffa_endpoint_id16_t *receivers, uint32_t receiver_count,
+		     ffa_mtd_flag32_t flags, uint32_t *frag_length,
+		     uint32_t *total_length);
 
+smc_args_t ffa_msg_send_direct_req(ffa_endpoint_id16_t sender,
+				   ffa_endpoint_id16_t receiver,
+				   uint32_t arg3,
+				   uint32_t arg4,
+				   uint32_t arg5,
+				   uint32_t arg6,
+				   uint32_t arg7);
+smc_args_t *ffa_msg_send_direct_resp(ffa_endpoint_id16_t sender,
+				     ffa_endpoint_id16_t receiver,
+				     uint32_t arg3,
+				     uint32_t arg4,
+				     uint32_t arg5,
+				     uint32_t arg6,
+				     uint32_t arg7);
 #endif /* FFA_HELPERS_H */
diff --git a/bl32/tsp/tsp_common.c b/bl32/tsp/tsp_common.c
index 028421e..908b4ff 100644
--- a/bl32/tsp/tsp_common.c
+++ b/bl32/tsp/tsp_common.c
@@ -21,11 +21,6 @@
 #include <platform_def.h>
 
 /*******************************************************************************
- * Lock to control access to the console
- ******************************************************************************/
-spinlock_t console_lock;
-
-/*******************************************************************************
  * Per cpu data structure to populate parameters for an SMC in C code and use
  * a pointer to this structure in assembler code to populate x0-x7.
  ******************************************************************************/
@@ -105,14 +100,10 @@
 	tsp_stats[linear_id].smc_count++;
 	tsp_stats[linear_id].eret_count++;
 
-#if LOG_LEVEL >= LOG_LEVEL_INFO
-	spin_lock(&console_lock);
 	INFO("TSP: cpu 0x%lx SYSTEM_OFF request\n", read_mpidr());
 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets requests\n", read_mpidr(),
 	     tsp_stats[linear_id].smc_count,
 	     tsp_stats[linear_id].eret_count);
-	spin_unlock(&console_lock);
-#endif
 
 	/* Indicate to the SPD that we have completed this request. */
 	return set_smc_args(TSP_SYSTEM_OFF_DONE, 0, 0, 0, 0, 0, 0, 0);
@@ -137,14 +128,10 @@
 	tsp_stats[linear_id].smc_count++;
 	tsp_stats[linear_id].eret_count++;
 
-#if LOG_LEVEL >= LOG_LEVEL_INFO
-	spin_lock(&console_lock);
 	INFO("TSP: cpu 0x%lx SYSTEM_RESET request\n", read_mpidr());
 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets requests\n", read_mpidr(),
 	     tsp_stats[linear_id].smc_count,
 	     tsp_stats[linear_id].eret_count);
-	spin_unlock(&console_lock);
-#endif
 
 	/* Indicate to the SPD that we have completed this request. */
 	return set_smc_args(TSP_SYSTEM_RESET_DONE, 0, 0, 0, 0, 0, 0, 0);
diff --git a/bl32/tsp/tsp_ffa_main.c b/bl32/tsp/tsp_ffa_main.c
index edd6c24..53dbd03 100644
--- a/bl32/tsp/tsp_ffa_main.c
+++ b/bl32/tsp/tsp_ffa_main.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -15,8 +15,11 @@
 #include <bl32/tsp/tsp.h>
 #include <common/bl_common.h>
 #include <common/debug.h>
+#include "ffa_helpers.h"
 #include <lib/psci/psci.h>
 #include <lib/spinlock.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
 #include <plat/common/platform.h>
 #include <platform_tsp.h>
 #include <services/ffa_svc.h>
@@ -24,15 +27,245 @@
 
 #include <platform_def.h>
 
+static ffa_endpoint_id16_t tsp_id, spmc_id;
+uint8_t mem_region_buffer[4096 * 2]  __aligned(PAGE_SIZE);
+
+/* Partition Mailbox. */
+static uint8_t send_page[PAGE_SIZE] __aligned(PAGE_SIZE);
+static uint8_t recv_page[PAGE_SIZE] __aligned(PAGE_SIZE);
+
+/*
+ * Declare a global mailbox for use within the TSP.
+ * This will be initialized appropriately when the buffers
+ * are mapped with the SPMC.
+ */
+static struct mailbox mailbox;
+
+/*******************************************************************************
+ * This enum is used to handle test cases driven from the FF-A Test Driver.
+ ******************************************************************************/
+/* Keep in Sync with FF-A Test Driver. */
+enum message_t {
+	/* Partition Only Messages. */
+	FF_A_RELAY_MESSAGE = 0,
+
+	/* Basic Functionality. */
+	FF_A_ECHO_MESSAGE,
+	FF_A_RELAY_MESSAGE_EL3,
+
+	/* Memory Sharing. */
+	FF_A_MEMORY_SHARE,
+	FF_A_MEMORY_SHARE_FRAGMENTED,
+	FF_A_MEMORY_LEND,
+	FF_A_MEMORY_LEND_FRAGMENTED,
+
+	FF_A_MEMORY_SHARE_MULTI_ENDPOINT,
+	FF_A_MEMORY_LEND_MULTI_ENDPOINT,
+
+	LAST,
+	FF_A_RUN_ALL = 255,
+	FF_A_OP_MAX = 256
+};
+
+#if SPMC_AT_EL3
 extern void tsp_cpu_on_entry(void);
+#endif
 
-static ffa_endpoint_id16_t tsp_id, spmc_id;
+/*******************************************************************************
+ * Test Functions.
+ ******************************************************************************/
+
+/*******************************************************************************
+ * Enable the TSP to forward the received message to another partition and ask
+ * it to echo the value back in order to validate direct messages functionality.
+ ******************************************************************************/
+static int ffa_test_relay(uint64_t arg0,
+			  uint64_t arg1,
+			  uint64_t arg2,
+			  uint64_t arg3,
+			  uint64_t arg4,
+			  uint64_t arg5,
+			  uint64_t arg6,
+			  uint64_t arg7)
+{
+	smc_args_t ffa_forward_result;
+	ffa_endpoint_id16_t receiver = arg5;
+
+	ffa_forward_result = ffa_msg_send_direct_req(ffa_endpoint_source(arg1),
+						     receiver,
+						     FF_A_ECHO_MESSAGE, arg4,
+						     0, 0, 0);
+	return ffa_forward_result._regs[3];
+}
+
+/*******************************************************************************
+ * This function handles memory management tests, currently share and lend.
+ * This test supports the use of FRAG_RX to use memory descriptors that do not
+ * fit in a single 4KB buffer.
+ ******************************************************************************/
+static int test_memory_send(ffa_endpoint_id16_t sender, uint64_t handle,
+			    ffa_mtd_flag32_t flags, bool multi_endpoint)
+{
+	struct ffa_mtd *m;
+	struct ffa_emad_v1_0 *receivers;
+	struct ffa_comp_mrd *composite;
+	int ret, status = 0;
+	unsigned int mem_attrs;
+	char *ptr;
+	ffa_endpoint_id16_t source = sender;
+	uint32_t total_length, recv_length = 0;
+
+	/*
+	 * In the case that we're testing multiple endpoints choose a partition
+	 * ID that resides in the normal world so the SPMC won't detect it as
+	 * invalid.
+	 * TODO: Should get endpoint receiver id and flag as input from NWd.
+	 */
+	uint32_t receiver_count = multi_endpoint ? 2 : 1;
+	ffa_endpoint_id16_t test_receivers[2] = { tsp_id, 0x10 };
+
+	/* Ensure that the sender ID resides in the normal world. */
+	if (ffa_is_secure_world_id(sender)) {
+		ERROR("Invalid sender ID 0x%x.\n", sender);
+		return FFA_ERROR_DENIED;
+	}
+
+	if (!memory_retrieve(&mailbox, &m, handle, source, test_receivers,
+			     receiver_count, flags, &recv_length,
+			     &total_length)) {
+		return FFA_ERROR_INVALID_PARAMETER;
+	}
+
+	receivers = (struct ffa_emad_v1_0 *)
+		    ((uint8_t *) m + m->emad_offset);
+	while (total_length != recv_length) {
+		smc_args_t ffa_return;
+		uint32_t frag_length;
+
+		ffa_return = ffa_mem_frag_rx(handle, recv_length);
+
+		if (ffa_return._regs[0] == FFA_ERROR) {
+			WARN("TSP: failed to resume mem with handle %lx\n",
+			     handle);
+			return ffa_return._regs[2];
+		}
+		frag_length = ffa_return._regs[3];
+
+		/* Validate frag_length is less than total_length and mailbox size. */
+		if (frag_length > total_length ||
+				frag_length > (mailbox.rxtx_page_count * PAGE_SIZE)) {
+			ERROR("Invalid parameters!\n");
+			return FFA_ERROR_INVALID_PARAMETER;
+		}
+
+		/* Validate frag_length is less than remaining mem_region_buffer size. */
+		if (frag_length + recv_length >= REGION_BUF_SIZE) {
+			ERROR("Out of memory!\n");
+			return FFA_ERROR_INVALID_PARAMETER;
+		}
+
+		memcpy(&mem_region_buffer[recv_length], mailbox.rx_buffer,
+		       frag_length);
+
+		if (ffa_rx_release()) {
+			ERROR("Failed to release buffer!\n");
+			return FFA_ERROR_DENIED;
+		}
+
+		recv_length += frag_length;
+
+		assert(recv_length <= total_length);
+	}
+
+	composite = ffa_memory_region_get_composite(m, 0);
+	if (composite == NULL) {
+		WARN("Failed to get composite descriptor!\n");
+		return FFA_ERROR_INVALID_PARAMETER;
+	}
+
+	VERBOSE("Address: %p; page_count: %x %lx\n",
+		(void *)composite->address_range_array[0].address,
+		composite->address_range_array[0].page_count, PAGE_SIZE);
+
+	/* This test is only concerned with RW permissions. */
+	if (ffa_get_data_access_attr(
+	    receivers[0].mapd.memory_access_permissions) != FFA_MEM_PERM_RW) {
+		ERROR("Data permission in retrieve response %x does not match share/lend %x!\n",
+		      ffa_get_data_access_attr(receivers[0].mapd.memory_access_permissions),
+		      FFA_MEM_PERM_RW);
+		return FFA_ERROR_INVALID_PARAMETER;
+	}
+
+	mem_attrs = MT_RW_DATA | MT_EXECUTE_NEVER;
+
+	/* Only expecting to be sent memory from NWd so map accordingly. */
+	mem_attrs |= MT_NS;
+
+	for (uint32_t i = 0U; i < composite->address_range_count; i++) {
+		size_t size = composite->address_range_array[i].page_count * PAGE_SIZE;
+
+		ptr = (char *) composite->address_range_array[i].address;
+		ret = mmap_add_dynamic_region(
+				(uint64_t)ptr,
+				(uint64_t)ptr,
+				size, mem_attrs);
+
+		if (ret != 0) {
+			ERROR("Failed [%u] mmap_add_dynamic_region %u (%lx) (%lx) (%x)!\n",
+				i, ret,
+				(uint64_t)composite->address_range_array[i].address,
+				size, mem_attrs);
+
+			/* Remove mappings created in this transaction. */
+			for (i--; i >= 0U; i--) {
+				ret = mmap_remove_dynamic_region(
+					(uint64_t)ptr,
+					composite->address_range_array[i].page_count * PAGE_SIZE);
+
+				if (ret != 0) {
+					ERROR("Failed [%d] mmap_remove_dynamic_region!\n", i);
+					panic();
+				}
+			}
+			return FFA_ERROR_NO_MEMORY;
+		}
+
+		/* Increment memory region for validation purposes. */
+		++(*ptr);
+
+		/*
+		 * Read initial magic number from memory region for
+		 * validation purposes.
+		 */
+		if (!i) {
+			status = *ptr;
+		}
+	}
+
+	for (uint32_t i = 0U; i < composite->address_range_count; i++) {
+		ret = mmap_remove_dynamic_region(
+			(uint64_t)composite->address_range_array[i].address,
+			composite->address_range_array[i].page_count * PAGE_SIZE);
+
+		if (ret != 0) {
+			ERROR("Failed [%d] mmap_remove_dynamic_region!\n", i);
+			return FFA_ERROR_NO_MEMORY;
+		}
+	}
+
+	if (!memory_relinquish((struct ffa_mem_relinquish_descriptor *)mailbox.tx_buffer,
+				m->handle, tsp_id)) {
+		ERROR("Failed to relinquish memory region!\n");
+		return FFA_ERROR_INVALID_PARAMETER;
+	}
+	return status;
+}
 
 static smc_args_t *send_ffa_pm_success(void)
 {
 	return set_smc_args(FFA_MSG_SEND_DIRECT_RESP_SMC32,
-			    tsp_id << FFA_DIRECT_MSG_SOURCE_SHIFT |
-			    spmc_id,
+			    ((tsp_id & FFA_DIRECT_MSG_ENDPOINT_ID_MASK)
+			    << FFA_DIRECT_MSG_SOURCE_SHIFT) | spmc_id,
 			    FFA_FWK_MSG_BIT |
 			    (FFA_PM_MSG_PM_RESP & FFA_FWK_MSG_MASK),
 			    0, 0, 0, 0, 0);
@@ -65,16 +298,12 @@
 	tsp_stats[linear_id].eret_count++;
 	tsp_stats[linear_id].cpu_off_count++;
 
-#if LOG_LEVEL >= LOG_LEVEL_INFO
-	spin_lock(&console_lock);
 	INFO("TSP: cpu 0x%lx off request\n", read_mpidr());
 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu off requests\n",
 		read_mpidr(),
 		tsp_stats[linear_id].smc_count,
 		tsp_stats[linear_id].eret_count,
 		tsp_stats[linear_id].cpu_off_count);
-	spin_unlock(&console_lock);
-#endif
 
 	return send_ffa_pm_success();
 }
@@ -107,15 +336,11 @@
 	tsp_stats[linear_id].eret_count++;
 	tsp_stats[linear_id].cpu_suspend_count++;
 
-#if LOG_LEVEL >= LOG_LEVEL_INFO
-	spin_lock(&console_lock);
 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu suspend requests\n",
 		read_mpidr(),
 		tsp_stats[linear_id].smc_count,
 		tsp_stats[linear_id].eret_count,
 		tsp_stats[linear_id].cpu_suspend_count);
-	spin_unlock(&console_lock);
-#endif
 
 	return send_ffa_pm_success();
 }
@@ -144,8 +369,6 @@
 	tsp_stats[linear_id].eret_count++;
 	tsp_stats[linear_id].cpu_resume_count++;
 
-#if LOG_LEVEL >= LOG_LEVEL_INFO
-	spin_lock(&console_lock);
 	INFO("TSP: cpu 0x%lx resumed. maximum off power level %" PRId64 "\n",
 	     read_mpidr(), max_off_pwrlvl);
 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu resume requests\n",
@@ -153,8 +376,6 @@
 		tsp_stats[linear_id].smc_count,
 		tsp_stats[linear_id].eret_count,
 		tsp_stats[linear_id].cpu_resume_count);
-	spin_unlock(&console_lock);
-#endif
 
 	return send_ffa_pm_success();
 }
@@ -200,6 +421,62 @@
 }
 
 /*******************************************************************************
+ * Handles partition messages. Exercised from the FF-A Test Driver.
+ ******************************************************************************/
+static smc_args_t *handle_partition_message(uint64_t arg0,
+					    uint64_t arg1,
+					    uint64_t arg2,
+					    uint64_t arg3,
+					    uint64_t arg4,
+					    uint64_t arg5,
+					    uint64_t arg6,
+					    uint64_t arg7)
+{
+	uint16_t sender = ffa_endpoint_source(arg1);
+	uint16_t receiver = ffa_endpoint_destination(arg1);
+	int status = -1;
+	const bool multi_endpoint = true;
+
+	switch (arg3) {
+	case FF_A_MEMORY_SHARE:
+		INFO("TSP Tests: Memory Share Request--\n");
+		status = test_memory_send(sender, arg4, FFA_FLAG_SHARE_MEMORY, !multi_endpoint);
+		break;
+
+	case FF_A_MEMORY_LEND:
+		INFO("TSP Tests: Memory Lend Request--\n");
+		status = test_memory_send(sender, arg4, FFA_FLAG_LEND_MEMORY, !multi_endpoint);
+		break;
+
+	case FF_A_MEMORY_SHARE_MULTI_ENDPOINT:
+		INFO("TSP Tests: Multi Endpoint Memory Share Request--\n");
+		status = test_memory_send(sender, arg4, FFA_FLAG_SHARE_MEMORY, multi_endpoint);
+		break;
+
+	case FF_A_MEMORY_LEND_MULTI_ENDPOINT:
+		INFO("TSP Tests: Multi Endpoint Memory Lend Request--\n");
+		status = test_memory_send(sender, arg4, FFA_FLAG_LEND_MEMORY, multi_endpoint);
+		break;
+	case FF_A_RELAY_MESSAGE:
+		INFO("TSP Tests: Relaying message--\n");
+		status = ffa_test_relay(arg0, arg1, arg2, arg3, arg4,
+					arg5, arg6, arg7);
+		break;
+
+	case FF_A_ECHO_MESSAGE:
+		INFO("TSP Tests: echo message--\n");
+		status = arg4;
+		break;
+
+	default:
+		INFO("TSP Tests: Unknown request ID %d--\n", (int) arg3);
+	}
+
+	/* Swap the sender and receiver in the response. */
+	return ffa_msg_send_direct_resp(receiver, sender, status, 0, 0, 0, 0);
+}
+
+/*******************************************************************************
  * This function implements the event loop for handling FF-A ABI invocations.
  ******************************************************************************/
 static smc_args_t *tsp_event_loop(uint64_t smc_fid,
@@ -226,14 +503,15 @@
 		 */
 		return set_smc_args(FFA_MSG_WAIT, 0, 0, 0, 0, 0, 0, 0);
 
+	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
 		/* Check if a framework message, handle accordingly. */
 		if ((arg2 & FFA_FWK_MSG_BIT)) {
 			return handle_framework_message(smc_fid, arg1, arg2, arg3,
 							arg4, arg5, arg6, arg7);
 		}
-	default:
-		break;
+		return handle_partition_message(smc_fid, arg1, arg2, arg3,
+							arg4, arg5, arg6, arg7);
 	}
 
 	ERROR("%s: Unsupported FF-A FID (0x%lx)\n", __func__, smc_fid);
@@ -249,7 +527,7 @@
 		 * Mask FIQ interrupts to avoid preemption
 		 * in case EL3 SPMC delegates an IRQ next or a
 		 * managed exit. Lastly, unmask IRQs so that
-		 * they can be handled immediately upon re-entry
+		 * they can be handled immediately upon re-entry.
 		 *  ---------------------------------------------
 		 */
 		write_daifset(DAIF_FIQ_BIT);
@@ -258,8 +536,8 @@
 			       args->_regs[3], args->_regs[4], args->_regs[5],
 			       args->_regs[6], args->_regs[7]);
 		args = tsp_event_loop(ret._regs[0], ret._regs[1], ret._regs[2],
-				      ret._regs[3], ret._regs[4], ret._regs[5],
-				      ret._regs[6], ret._regs[7]);
+				ret._regs[3], ret._regs[4], ret._regs[5],
+				ret._regs[6], ret._regs[7]);
 	} while (1);
 
 	/* Not Reached. */
@@ -296,7 +574,7 @@
 				smc_args._regs[2]);
 		panic();
 	}
-	/* Get TSP's endpoint id */
+	/* Get TSP's endpoint id. */
 	smc_args = smc_helper(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0);
 	if (smc_args._regs[SMC_ARG0] != FFA_SUCCESS_SMC32) {
 		ERROR("TSP could not get own ID (0x%lx) on core%d\n",
@@ -306,7 +584,8 @@
 
 	tsp_id = smc_args._regs[2];
 	INFO("TSP FF-A endpoint id = 0x%x\n", tsp_id);
-	/* Get the SPMC ID */
+
+	/* Get the SPMC ID. */
 	smc_args = smc_helper(FFA_SPM_ID_GET, 0, 0, 0, 0, 0, 0, 0);
 	if (smc_args._regs[SMC_ARG0] != FFA_SUCCESS_SMC32) {
 		ERROR("TSP could not get SPMC ID (0x%lx) on core%d\n",
@@ -316,7 +595,18 @@
 
 	spmc_id = smc_args._regs[2];
 
+	/* Call RXTX_MAP to map a 4k RX and TX buffer. */
+	if (ffa_rxtx_map((uintptr_t) send_page,
+			 (uintptr_t) recv_page, 1)) {
+		ERROR("TSP could not map it's RX/TX Buffers\n");
+		panic();
+	}
+
-	/* Update this cpu's statistics */
+	mailbox.tx_buffer = send_page;
+	mailbox.rx_buffer = recv_page;
+	mailbox.rxtx_page_count = 1;
+
+	/* Update this cpu's statistics. */
 	tsp_stats[linear_id].smc_count++;
 	tsp_stats[linear_id].eret_count++;
 	tsp_stats[linear_id].cpu_on_count++;
@@ -350,16 +640,12 @@
 	tsp_stats[linear_id].smc_count++;
 	tsp_stats[linear_id].eret_count++;
 	tsp_stats[linear_id].cpu_on_count++;
-#if LOG_LEVEL >= LOG_LEVEL_INFO
-	spin_lock(&console_lock);
 	INFO("TSP: cpu 0x%lx turned on\n", read_mpidr());
 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
 			read_mpidr(),
 			tsp_stats[linear_id].smc_count,
 			tsp_stats[linear_id].eret_count,
 			tsp_stats[linear_id].cpu_on_count);
-	spin_unlock(&console_lock);
-#endif
 	/* ---------------------------------------------
 	 * Jump to the main event loop to return to EL3
 	 * and be ready for the next request on this cpu.
diff --git a/bl32/tsp/tsp_interrupt.c b/bl32/tsp/tsp_interrupt.c
index 6644c50..a847b6c 100644
--- a/bl32/tsp/tsp_interrupt.c
+++ b/bl32/tsp/tsp_interrupt.c
@@ -35,8 +35,6 @@
 	if (type == TSP_HANDLE_SEL1_INTR_AND_RETURN)
 		tsp_stats[linear_id].sync_sel1_intr_ret_count++;
 
-#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
-	spin_lock(&console_lock);
 	VERBOSE("TSP: cpu 0x%lx sync s-el1 interrupt request from 0x%" PRIx64 "\n",
 		read_mpidr(), elr_el3);
 	VERBOSE("TSP: cpu 0x%lx: %d sync s-el1 interrupt requests,"
@@ -44,8 +42,6 @@
 		read_mpidr(),
 		tsp_stats[linear_id].sync_sel1_intr_count,
 		tsp_stats[linear_id].sync_sel1_intr_ret_count);
-	spin_unlock(&console_lock);
-#endif
 }
 
 /******************************************************************************
@@ -58,12 +54,8 @@
 	uint32_t linear_id = plat_my_core_pos();
 
 	tsp_stats[linear_id].preempt_intr_count++;
-#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
-	spin_lock(&console_lock);
 	VERBOSE("TSP: cpu 0x%lx: %d preempt interrupt requests\n",
 		read_mpidr(), tsp_stats[linear_id].preempt_intr_count);
-	spin_unlock(&console_lock);
-#endif
 	return TSP_PREEMPTED;
 }
 
diff --git a/bl32/tsp/tsp_main.c b/bl32/tsp/tsp_main.c
index 051080e..df9903b 100644
--- a/bl32/tsp/tsp_main.c
+++ b/bl32/tsp/tsp_main.c
@@ -45,15 +45,11 @@
 	tsp_stats[linear_id].eret_count++;
 	tsp_stats[linear_id].cpu_on_count++;
 
-#if LOG_LEVEL >= LOG_LEVEL_INFO
-	spin_lock(&console_lock);
 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
 	     read_mpidr(),
 	     tsp_stats[linear_id].smc_count,
 	     tsp_stats[linear_id].eret_count,
 	     tsp_stats[linear_id].cpu_on_count);
-	spin_unlock(&console_lock);
-#endif
 	return (uint64_t) &tsp_vector_table;
 }
 
@@ -74,16 +70,12 @@
 	tsp_stats[linear_id].eret_count++;
 	tsp_stats[linear_id].cpu_on_count++;
 
-#if LOG_LEVEL >= LOG_LEVEL_INFO
-	spin_lock(&console_lock);
 	INFO("TSP: cpu 0x%lx turned on\n", read_mpidr());
 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
 		read_mpidr(),
 		tsp_stats[linear_id].smc_count,
 		tsp_stats[linear_id].eret_count,
 		tsp_stats[linear_id].cpu_on_count);
-	spin_unlock(&console_lock);
-#endif
 	/* Indicate to the SPD that we have completed turned ourselves on */
 	return set_smc_args(TSP_ON_DONE, 0, 0, 0, 0, 0, 0, 0);
 }
@@ -115,16 +107,12 @@
 	tsp_stats[linear_id].eret_count++;
 	tsp_stats[linear_id].cpu_off_count++;
 
-#if LOG_LEVEL >= LOG_LEVEL_INFO
-	spin_lock(&console_lock);
 	INFO("TSP: cpu 0x%lx off request\n", read_mpidr());
 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu off requests\n",
 		read_mpidr(),
 		tsp_stats[linear_id].smc_count,
 		tsp_stats[linear_id].eret_count,
 		tsp_stats[linear_id].cpu_off_count);
-	spin_unlock(&console_lock);
-#endif
 
 	/* Indicate to the SPD that we have completed this request */
 	return set_smc_args(TSP_OFF_DONE, 0, 0, 0, 0, 0, 0, 0);
@@ -158,15 +146,11 @@
 	tsp_stats[linear_id].eret_count++;
 	tsp_stats[linear_id].cpu_suspend_count++;
 
-#if LOG_LEVEL >= LOG_LEVEL_INFO
-	spin_lock(&console_lock);
 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu suspend requests\n",
 		read_mpidr(),
 		tsp_stats[linear_id].smc_count,
 		tsp_stats[linear_id].eret_count,
 		tsp_stats[linear_id].cpu_suspend_count);
-	spin_unlock(&console_lock);
-#endif
 
 	/* Indicate to the SPD that we have completed this request */
 	return set_smc_args(TSP_SUSPEND_DONE, 0, 0, 0, 0, 0, 0, 0);
@@ -196,8 +180,6 @@
 	tsp_stats[linear_id].eret_count++;
 	tsp_stats[linear_id].cpu_resume_count++;
 
-#if LOG_LEVEL >= LOG_LEVEL_INFO
-	spin_lock(&console_lock);
 	INFO("TSP: cpu 0x%lx resumed. maximum off power level %" PRId64 "\n",
 	     read_mpidr(), max_off_pwrlvl);
 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu resume requests\n",
@@ -205,8 +187,6 @@
 		tsp_stats[linear_id].smc_count,
 		tsp_stats[linear_id].eret_count,
 		tsp_stats[linear_id].cpu_resume_count);
-	spin_unlock(&console_lock);
-#endif
 	/* Indicate to the SPD that we have completed this request */
 	return set_smc_args(TSP_RESUME_DONE, 0, 0, 0, 0, 0, 0, 0);
 }
@@ -237,16 +217,12 @@
 	tsp_stats[linear_id].smc_count++;
 	tsp_stats[linear_id].eret_count++;
 
-#if LOG_LEVEL >= LOG_LEVEL_INFO
-	spin_lock(&console_lock);
 	INFO("TSP: cpu 0x%lx received %s smc 0x%" PRIx64 "\n", read_mpidr(),
 		((func >> 31) & 1) == 1 ? "fast" : "yielding",
 		func);
 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets\n", read_mpidr(),
 		tsp_stats[linear_id].smc_count,
 		tsp_stats[linear_id].eret_count);
-	spin_unlock(&console_lock);
-#endif
 
 	/* Render secure services and obtain results here */
 	results[0] = arg1;
@@ -288,11 +264,7 @@
 		break;
 	case TSP_CHECK_DIT:
 		if (!is_armv8_4_dit_present()) {
-#if LOG_LEVEL >= LOG_LEVEL_ERROR
-			spin_lock(&console_lock);
 			ERROR("DIT not supported\n");
-			spin_unlock(&console_lock);
-#endif
 			results[0] = 0;
 			results[1] = 0xffff;
 			break;
diff --git a/bl32/tsp/tsp_private.h b/bl32/tsp/tsp_private.h
index 30df1f0..66873e2 100644
--- a/bl32/tsp/tsp_private.h
+++ b/bl32/tsp/tsp_private.h
@@ -94,7 +94,6 @@
 
 
 /* Data structure to keep track of TSP statistics */
-extern spinlock_t console_lock;
 extern work_statistics_t tsp_stats[PLATFORM_CORE_COUNT];
 
 /* Vector table of jumps */