Add bl31 support common across Broadcom platforms

Signed-off-by: Sheetal Tigadoli <sheetal.tigadoli@broadcom.com>
Change-Id: Ic1a392a633b447935fa3a7528326c97845f5b1bc
diff --git a/plat/brcm/common/brcm_bl31_setup.c b/plat/brcm/common/brcm_bl31_setup.c
new file mode 100644
index 0000000..d3fa83d
--- /dev/null
+++ b/plat/brcm/common/brcm_bl31_setup.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <drivers/arm/sp804_delay_timer.h>
+#include <lib/utils.h>
+#include <plat/common/platform.h>
+
+#include <bcm_console.h>
+#include <plat_brcm.h>
+#include <platform_def.h>
+
+#ifdef BL33_SHARED_DDR_BASE
+struct bl33_info *bl33_info = (struct bl33_info *)BL33_SHARED_DDR_BASE;
+#endif
+
+/*
+ * Placeholder variables for copying the arguments that have been passed to
+ * BL31 from BL2.
+ */
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+/* Weak definitions may be overridden in specific BRCM platform */
+#pragma weak plat_bcm_bl31_early_platform_setup
+#pragma weak plat_brcm_pwrc_setup
+#pragma weak plat_brcm_security_setup
+
+void plat_brcm_security_setup(void)
+{
+
+}
+
+void plat_brcm_pwrc_setup(void)
+{
+
+}
+
+void plat_bcm_bl31_early_platform_setup(void *from_bl2,
+				   bl_params_t *plat_params_from_bl2)
+{
+
+}
+
+/*******************************************************************************
+ * Return a pointer to the 'entry_point_info' structure of the next image for
+ * the security state specified. BL33 corresponds to the non-secure image type
+ * while BL32 corresponds to the secure image type. A NULL pointer is returned
+ * if the image does not exist.
+ ******************************************************************************/
+struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+	entry_point_info_t *next_image_info;
+
+	assert(sec_state_is_valid(type));
+	next_image_info = (type == NON_SECURE)
+			? &bl33_image_ep_info : &bl32_image_ep_info;
+	/*
+	 * None of the images on the ARM development platforms can have 0x0
+	 * as the entrypoint
+	 */
+	if (next_image_info->pc)
+		return next_image_info;
+	else
+		return NULL;
+}
+
+/*******************************************************************************
+ * Perform any BL31 early platform setup common to ARM standard platforms.
+ * Here is an opportunity to copy parameters passed by the calling EL (S-EL1
+ * in BL2 & EL3 in BL1) before they are lost (potentially). This needs to be
+ * done before the MMU is initialized so that the memory layout can be used
+ * while creating page tables. BL2 has flushed this information to memory, so
+ * we are guaranteed to pick up good data.
+ ******************************************************************************/
+void __init brcm_bl31_early_platform_setup(void *from_bl2,
+					  uintptr_t soc_fw_config,
+					  uintptr_t hw_config,
+					  void *plat_params_from_bl2)
+{
+	/* Initialize the console to provide early debug support */
+	bcm_console_boot_init();
+
+	/* Initialize delay timer driver using SP804 dual timer 0 */
+	sp804_timer_init(SP804_TIMER0_BASE,
+			 SP804_TIMER0_CLKMULT, SP804_TIMER0_CLKDIV);
+
+#if RESET_TO_BL31
+	/* There are no parameters from BL2 if BL31 is a reset vector */
+	assert(from_bl2 == NULL);
+	assert(plat_params_from_bl2 == NULL);
+
+# ifdef BL32_BASE
+	/* Populate entry point information for BL32 */
+	SET_PARAM_HEAD(&bl32_image_ep_info,
+		       PARAM_EP,
+		       VERSION_1,
+		       0);
+	SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
+	bl32_image_ep_info.pc = BL32_BASE;
+	bl32_image_ep_info.spsr = brcm_get_spsr_for_bl32_entry();
+# endif /* BL32_BASE */
+
+	/* Populate entry point information for BL33 */
+	SET_PARAM_HEAD(&bl33_image_ep_info,
+		       PARAM_EP,
+		       VERSION_1,
+		       0);
+	/*
+	 * Tell BL31 where the non-trusted software image
+	 * is located and the entry state information
+	 */
+	bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
+
+	bl33_image_ep_info.spsr = brcm_get_spsr_for_bl33_entry();
+	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+
+# if ARM_LINUX_KERNEL_AS_BL33
+	/*
+	 * According to the file ``Documentation/arm64/booting.txt`` of the
+	 * Linux kernel tree, Linux expects the physical address of the device
+	 * tree blob (DTB) in x0, while x1-x3 are reserved for future use and
+	 * must be 0.
+	 */
+	bl33_image_ep_info.args.arg0 = (u_register_t)PRELOADED_DTB_BASE;
+	bl33_image_ep_info.args.arg1 = 0U;
+	bl33_image_ep_info.args.arg2 = 0U;
+	bl33_image_ep_info.args.arg3 = 0U;
+# endif
+
+#else /* RESET_TO_BL31 */
+
+	/*
+	 * In debug builds, we pass a special value in 'plat_params_from_bl2'
+	 * to verify platform parameters from BL2 to BL31.
+	 * In release builds, it's not used.
+	 */
+	assert(((unsigned long long)plat_params_from_bl2) ==
+		BRCM_BL31_PLAT_PARAM_VAL);
+
+	/*
+	 * Check params passed from BL2 should not be NULL
+	 */
+	bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
+
+	assert(params_from_bl2 != NULL);
+	assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
+	assert(params_from_bl2->h.version >= VERSION_2);
+
+	bl_params_node_t *bl_params = params_from_bl2->head;
+
+	/*
+	 * Copy BL33 and BL32 (if present), entry point information.
+	 * They are stored in Secure RAM, in BL2's address space.
+	 */
+	while (bl_params != NULL) {
+		if (bl_params->image_id == BL32_IMAGE_ID &&
+		    bl_params->image_info->h.attr != IMAGE_ATTRIB_SKIP_LOADING)
+			bl32_image_ep_info = *bl_params->ep_info;
+
+		if (bl_params->image_id == BL33_IMAGE_ID)
+			bl33_image_ep_info = *bl_params->ep_info;
+
+		bl_params = bl_params->next_params_info;
+	}
+
+	if (bl33_image_ep_info.pc == 0U)
+		panic();
+#endif /* RESET_TO_BL31 */
+
+#ifdef BL33_SHARED_DDR_BASE
+	/* Pass information to BL33 thorugh x0 */
+	bl33_image_ep_info.args.arg0 = (u_register_t)BL33_SHARED_DDR_BASE;
+	bl33_image_ep_info.args.arg1 = 0ULL;
+	bl33_image_ep_info.args.arg2 = 0ULL;
+	bl33_image_ep_info.args.arg3 = 0ULL;
+#endif
+}
+
+void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
+		u_register_t arg2, u_register_t arg3)
+{
+#ifdef BL31_LOG_LEVEL
+	SET_LOG_LEVEL(BL31_LOG_LEVEL);
+#endif
+
+	brcm_bl31_early_platform_setup((void *)arg0, arg1, arg2, (void *)arg3);
+
+	plat_bcm_bl31_early_platform_setup((void *)arg0, (void *)arg3);
+
+#ifdef DRIVER_CC_ENABLE
+	/*
+	 * Initialize Interconnect for this cluster during cold boot.
+	 * No need for locks as no other CPU is active.
+	 */
+	plat_brcm_interconnect_init();
+
+	/*
+	 * Enable Interconnect coherency for the primary CPU's cluster.
+	 * Earlier bootloader stages might already do this (e.g. Trusted
+	 * Firmware's BL1 does it) but we can't assume so. There is no harm in
+	 * executing this code twice anyway.
+	 * Platform specific PSCI code will enable coherency for other
+	 * clusters.
+	 */
+	plat_brcm_interconnect_enter_coherency();
+#endif
+}
+
+/*******************************************************************************
+ * Perform any BL31 platform setup common to ARM standard platforms
+ ******************************************************************************/
+void brcm_bl31_platform_setup(void)
+{
+	/* Initialize the GIC driver, cpu and distributor interfaces */
+	plat_brcm_gic_driver_init();
+	plat_brcm_gic_init();
+
+	/* Initialize power controller before setting up topology */
+	plat_brcm_pwrc_setup();
+}
+
+/*******************************************************************************
+ * Perform any BL31 platform runtime setup prior to BL31 exit common to ARM
+ * standard platforms
+ * Perform BL31 platform setup
+ ******************************************************************************/
+void brcm_bl31_plat_runtime_setup(void)
+{
+	console_switch_state(CONSOLE_FLAG_RUNTIME);
+
+	/* Initialize the runtime console */
+	bcm_console_runtime_init();
+}
+
+void bl31_platform_setup(void)
+{
+	brcm_bl31_platform_setup();
+
+	/* Initialize the secure environment */
+	plat_brcm_security_setup();
+}
+
+void bl31_plat_runtime_setup(void)
+{
+	brcm_bl31_plat_runtime_setup();
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup shared between
+ * ARM standard platforms. This only does basic initialization. Later
+ * architectural setup (bl31_arch_setup()) does not do anything platform
+ * specific.
+ ******************************************************************************/
+void __init brcm_bl31_plat_arch_setup(void)
+{
+#ifndef MMU_DISABLED
+	const mmap_region_t bl_regions[] = {
+		MAP_REGION_FLAT(BL31_BASE, BL31_END - BL31_BASE,
+				MT_MEMORY | MT_RW | MT_SECURE),
+		MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE,
+				MT_CODE | MT_SECURE),
+		MAP_REGION_FLAT(BL_RO_DATA_BASE,
+				BL_RO_DATA_END - BL_RO_DATA_BASE,
+				MT_RO_DATA | MT_SECURE),
+#if USE_COHERENT_MEM
+		MAP_REGION_FLAT(BL_COHERENT_RAM_BASE,
+				BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE,
+				MT_DEVICE | MT_RW | MT_SECURE),
+#endif
+		{0}
+	};
+
+	setup_page_tables(bl_regions, plat_brcm_get_mmap());
+
+	enable_mmu_el3(0);
+#endif
+}
+
+void __init bl31_plat_arch_setup(void)
+{
+	brcm_bl31_plat_arch_setup();
+}
diff --git a/plat/brcm/common/brcm_ccn.c b/plat/brcm/common/brcm_ccn.c
new file mode 100644
index 0000000..9396aaa
--- /dev/null
+++ b/plat/brcm/common/brcm_ccn.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <drivers/arm/ccn.h>
+
+#include <platform_def.h>
+
+static const unsigned char master_to_rn_id_map[] = {
+	PLAT_BRCM_CLUSTER_TO_CCN_ID_MAP
+};
+
+static const ccn_desc_t bcm_ccn_desc = {
+	.periphbase = PLAT_BRCM_CCN_BASE,
+	.num_masters = ARRAY_SIZE(master_to_rn_id_map),
+	.master_to_rn_id_map = master_to_rn_id_map
+};
+
+void plat_brcm_interconnect_init(void)
+{
+	ccn_init(&bcm_ccn_desc);
+}
+
+void plat_brcm_interconnect_enter_coherency(void)
+{
+	ccn_enter_snoop_dvm_domain(1 << MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
+
+void plat_brcm_interconnect_exit_coherency(void)
+{
+	ccn_exit_snoop_dvm_domain(1 << MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
diff --git a/plat/brcm/common/brcm_gicv3.c b/plat/brcm/common/brcm_gicv3.c
new file mode 100644
index 0000000..c4137c0
--- /dev/null
+++ b/plat/brcm/common/brcm_gicv3.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <drivers/arm/gicv3.h>
+#include <plat/common/platform.h>
+
+#include <platform_def.h>
+
+/* The GICv3 driver only needs to be initialized in EL3 */
+static uintptr_t brcm_rdistif_base_addrs[PLATFORM_CORE_COUNT];
+
+static const interrupt_prop_t brcm_interrupt_props[] = {
+	/* G1S interrupts */
+	PLAT_BRCM_G1S_IRQ_PROPS(INTR_GROUP1S),
+	/* G0 interrupts */
+	PLAT_BRCM_G0_IRQ_PROPS(INTR_GROUP0)
+};
+
+/*
+ * MPIDR hashing function for translating MPIDRs read from GICR_TYPER register
+ * to core position.
+ *
+ * Calculating core position is dependent on MPIDR_EL1.MT bit. However, affinity
+ * values read from GICR_TYPER don't have an MT field. To reuse the same
+ * translation used for CPUs, we insert MT bit read from the PE's MPIDR into
+ * that read from GICR_TYPER.
+ *
+ * Assumptions:
+ *
+ *   - All CPUs implemented in the system have MPIDR_EL1.MT bit set;
+ *   - No CPUs implemented in the system use affinity level 3.
+ */
+static unsigned int brcm_gicv3_mpidr_hash(u_register_t mpidr)
+{
+	mpidr |= (read_mpidr_el1() & MPIDR_MT_MASK);
+	return plat_core_pos_by_mpidr(mpidr);
+}
+
+static const gicv3_driver_data_t brcm_gic_data = {
+	.gicd_base = PLAT_BRCM_GICD_BASE,
+	.gicr_base = PLAT_BRCM_GICR_BASE,
+	.interrupt_props = brcm_interrupt_props,
+	.interrupt_props_num = ARRAY_SIZE(brcm_interrupt_props),
+	.rdistif_num = PLATFORM_CORE_COUNT,
+	.rdistif_base_addrs = brcm_rdistif_base_addrs,
+	.mpidr_to_core_pos = brcm_gicv3_mpidr_hash
+};
+
+void plat_brcm_gic_driver_init(void)
+{
+	/* TODO Check if this is required to be initialized here
+	 * after getting initialized in EL3, should we re-init this here
+	 * in S-EL1
+	 */
+	gicv3_driver_init(&brcm_gic_data);
+}
+
+void plat_brcm_gic_init(void)
+{
+	gicv3_distif_init();
+	gicv3_rdistif_init(plat_my_core_pos());
+	gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+void plat_brcm_gic_cpuif_enable(void)
+{
+	gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+void plat_brcm_gic_cpuif_disable(void)
+{
+	gicv3_cpuif_disable(plat_my_core_pos());
+}
+
+void plat_brcm_gic_pcpu_init(void)
+{
+	gicv3_rdistif_init(plat_my_core_pos());
+}
+
+void plat_brcm_gic_redistif_on(void)
+{
+	gicv3_rdistif_on(plat_my_core_pos());
+}
+
+void plat_brcm_gic_redistif_off(void)
+{
+	gicv3_rdistif_off(plat_my_core_pos());
+}
diff --git a/plat/brcm/common/brcm_mhu.c b/plat/brcm/common/brcm_mhu.c
new file mode 100644
index 0000000..56f44e0
--- /dev/null
+++ b/plat/brcm/common/brcm_mhu.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <drivers/delay_timer.h>
+#include <lib/bakery_lock.h>
+
+#include <brcm_mhu.h>
+#include <platform_def.h>
+
+#include "m0_ipc.h"
+
+#define PLAT_MHU_INTR_REG	AP_TO_SCP_MAILBOX1
+
+/* SCP MHU secure channel registers */
+#define SCP_INTR_S_STAT		CRMU_IHOST_SW_PERSISTENT_REG11
+#define SCP_INTR_S_SET		CRMU_IHOST_SW_PERSISTENT_REG11
+#define SCP_INTR_S_CLEAR	CRMU_IHOST_SW_PERSISTENT_REG11
+
+/* CPU MHU secure channel registers */
+#define CPU_INTR_S_STAT		CRMU_IHOST_SW_PERSISTENT_REG10
+#define CPU_INTR_S_SET		CRMU_IHOST_SW_PERSISTENT_REG10
+#define CPU_INTR_S_CLEAR	CRMU_IHOST_SW_PERSISTENT_REG10
+
+static DEFINE_BAKERY_LOCK(bcm_lock);
+
+/*
+ * Slot 31 is reserved because the MHU hardware uses this register bit to
+ * indicate a non-secure access attempt. The total number of available slots is
+ * therefore 31 [30:0].
+ */
+#define MHU_MAX_SLOT_ID		30
+
+void mhu_secure_message_start(unsigned int slot_id)
+{
+	int iter = 1000000;
+
+	assert(slot_id <= MHU_MAX_SLOT_ID);
+
+	bakery_lock_get(&bcm_lock);
+	/* Make sure any previous command has finished */
+	do {
+		if (!(mmio_read_32(PLAT_BRCM_MHU_BASE + CPU_INTR_S_STAT) &
+		   (1 << slot_id)))
+			break;
+
+		 udelay(1);
+
+	} while (--iter);
+
+	assert(iter != 0);
+}
+
+void mhu_secure_message_send(unsigned int slot_id)
+{
+	uint32_t response, iter = 1000000;
+
+	assert(slot_id <= MHU_MAX_SLOT_ID);
+	assert(!(mmio_read_32(PLAT_BRCM_MHU_BASE + CPU_INTR_S_STAT) &
+							(1 << slot_id)));
+
+	/* Send command to SCP */
+	mmio_setbits_32(PLAT_BRCM_MHU_BASE + CPU_INTR_S_SET, 1 << slot_id);
+	mmio_write_32(CRMU_MAIL_BOX0, MCU_IPC_MCU_CMD_SCPI);
+	mmio_write_32(PLAT_BRCM_MHU_BASE + PLAT_MHU_INTR_REG, 0x1);
+
+	/* Wait until IPC transport acknowledges reception of SCP command */
+	do {
+		response = mmio_read_32(CRMU_MAIL_BOX0);
+		if ((response & ~MCU_IPC_CMD_REPLY_MASK) ==
+		   (MCU_IPC_CMD_DONE_MASK | MCU_IPC_MCU_CMD_SCPI))
+			break;
+
+		udelay(1);
+
+	} while (--iter);
+
+	assert(iter != 0);
+}
+
+uint32_t mhu_secure_message_wait(void)
+{
+	/* Wait for response from SCP */
+	uint32_t response, iter = 1000000;
+
+	do {
+		response = mmio_read_32(PLAT_BRCM_MHU_BASE + SCP_INTR_S_STAT);
+		if (!response)
+			break;
+
+		udelay(1);
+	} while (--iter);
+	assert(iter != 0);
+
+	return response;
+}
+
+void mhu_secure_message_end(unsigned int slot_id)
+{
+	assert(slot_id <= MHU_MAX_SLOT_ID);
+
+	/*
+	 * Clear any response we got by writing one in the relevant slot bit to
+	 * the CLEAR register
+	 */
+	mmio_clrbits_32(PLAT_BRCM_MHU_BASE + SCP_INTR_S_CLEAR, 1 << slot_id);
+	bakery_lock_release(&bcm_lock);
+}
+
+void mhu_secure_init(void)
+{
+	bakery_lock_init(&bcm_lock);
+
+	/*
+	 * The STAT register resets to zero. Ensure it is in the expected state,
+	 * as a stale or garbage value would make us think it's a message we've
+	 * already sent.
+	 */
+	mmio_write_32(PLAT_BRCM_MHU_BASE + CPU_INTR_S_STAT, 0);
+	mmio_write_32(PLAT_BRCM_MHU_BASE + SCP_INTR_S_STAT, 0);
+}
+
+void plat_brcm_pwrc_setup(void)
+{
+	mhu_secure_init();
+}
diff --git a/plat/brcm/common/brcm_mhu.h b/plat/brcm/common/brcm_mhu.h
new file mode 100644
index 0000000..6c89a34
--- /dev/null
+++ b/plat/brcm/common/brcm_mhu.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef BRCM_MHU_H
+#define BRCM_MHU_H
+
+#include <stdint.h>
+
+void mhu_secure_message_start(unsigned int slot_id);
+void mhu_secure_message_send(unsigned int slot_id);
+uint32_t mhu_secure_message_wait(void);
+void mhu_secure_message_end(unsigned int slot_id);
+
+void mhu_secure_init(void);
+
+#endif	/* BRCM_MHU_H */
diff --git a/plat/brcm/common/brcm_scpi.c b/plat/brcm/common/brcm_scpi.c
new file mode 100644
index 0000000..0a703cb
--- /dev/null
+++ b/plat/brcm/common/brcm_scpi.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <lib/utils.h>
+#include <plat/common/platform.h>
+
+#include <brcm_mhu.h>
+#include <brcm_scpi.h>
+#include <platform_def.h>
+
+#define SCPI_SHARED_MEM_SCP_TO_AP	(PLAT_SCP_COM_SHARED_MEM_BASE)
+#define SCPI_SHARED_MEM_AP_TO_SCP	(PLAT_SCP_COM_SHARED_MEM_BASE \
+								 + 0x100)
+
+/* Header and payload addresses for commands from AP to SCP */
+#define SCPI_CMD_HEADER_AP_TO_SCP		\
+	((scpi_cmd_t *) SCPI_SHARED_MEM_AP_TO_SCP)
+#define SCPI_CMD_PAYLOAD_AP_TO_SCP		\
+	((void *) (SCPI_SHARED_MEM_AP_TO_SCP + sizeof(scpi_cmd_t)))
+
+/* Header and payload addresses for responses from SCP to AP */
+#define SCPI_RES_HEADER_SCP_TO_AP \
+	((scpi_cmd_t *) SCPI_SHARED_MEM_SCP_TO_AP)
+#define SCPI_RES_PAYLOAD_SCP_TO_AP \
+	((void *) (SCPI_SHARED_MEM_SCP_TO_AP + sizeof(scpi_cmd_t)))
+
+/* ID of the MHU slot used for the SCPI protocol */
+#define SCPI_MHU_SLOT_ID		0
+
+static void scpi_secure_message_start(void)
+{
+	mhu_secure_message_start(SCPI_MHU_SLOT_ID);
+}
+
+static void scpi_secure_message_send(size_t payload_size)
+{
+	/*
+	 * Ensure that any write to the SCPI payload area is seen by SCP before
+	 * we write to the MHU register. If these 2 writes were reordered by
+	 * the CPU then SCP would read stale payload data
+	 */
+	dmbst();
+
+	mhu_secure_message_send(SCPI_MHU_SLOT_ID);
+}
+
+static void scpi_secure_message_receive(scpi_cmd_t *cmd)
+{
+	uint32_t mhu_status;
+
+	assert(cmd != NULL);
+
+	mhu_status = mhu_secure_message_wait();
+
+	/* Expect an SCPI message, reject any other protocol */
+	if (mhu_status != (1 << SCPI_MHU_SLOT_ID)) {
+		ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n",
+		      mhu_status);
+		panic();
+	}
+
+	/*
+	 * Ensure that any read to the SCPI payload area is done after reading
+	 * the MHU register. If these 2 reads were reordered then the CPU would
+	 * read invalid payload data
+	 */
+	dmbld();
+
+	memcpy(cmd, (void *) SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd));
+}
+
+static void scpi_secure_message_end(void)
+{
+	mhu_secure_message_end(SCPI_MHU_SLOT_ID);
+}
+
+int scpi_wait_ready(void)
+{
+	scpi_cmd_t scpi_cmd;
+
+	VERBOSE("Waiting for SCP_READY command...\n");
+
+	/* Get a message from the SCP */
+	scpi_secure_message_start();
+	scpi_secure_message_receive(&scpi_cmd);
+	scpi_secure_message_end();
+
+	/* We are expecting 'SCP Ready', produce correct error if it's not */
+	scpi_status_t status = SCP_OK;
+
+	if (scpi_cmd.id != SCPI_CMD_SCP_READY) {
+		ERROR("Unexpected SCP command: expected #%u, received #%u\n",
+		      SCPI_CMD_SCP_READY, scpi_cmd.id);
+		status = SCP_E_SUPPORT;
+	} else if (scpi_cmd.size != 0) {
+		ERROR("SCP_READY cmd has incorrect size: expected 0, got %u\n",
+		      scpi_cmd.size);
+		status = SCP_E_SIZE;
+	}
+
+	VERBOSE("Sending response for SCP_READY command\n");
+
+	/*
+	 * Send our response back to SCP.
+	 * We are using the same SCPI header, just update the status field.
+	 */
+	scpi_cmd.status = status;
+	scpi_secure_message_start();
+	memcpy((void *) SCPI_SHARED_MEM_AP_TO_SCP, &scpi_cmd, sizeof(scpi_cmd));
+	scpi_secure_message_send(0);
+	scpi_secure_message_end();
+
+	return status == SCP_OK ? 0 : -1;
+}
+
+void scpi_set_brcm_power_state(unsigned int mpidr,
+		scpi_power_state_t cpu_state, scpi_power_state_t cluster_state,
+		scpi_power_state_t brcm_state)
+{
+	scpi_cmd_t *cmd;
+	uint32_t state = 0;
+	uint32_t *payload_addr;
+
+#if ARM_PLAT_MT
+	/*
+	 * The current SCPI driver only caters for single-threaded platforms.
+	 * Hence we ignore the thread ID (which is always 0) for such platforms.
+	 */
+	state |= (mpidr >> MPIDR_AFF1_SHIFT) & 0x0f;	/* CPU ID */
+	state |= ((mpidr >> MPIDR_AFF2_SHIFT) & 0x0f) << 4;	/* Cluster ID */
+#else
+	state |= mpidr & 0x0f;	/* CPU ID */
+	state |= (mpidr & 0xf00) >> 4;	/* Cluster ID */
+#endif /* ARM_PLAT_MT */
+
+	state |= cpu_state << 8;
+	state |= cluster_state << 12;
+	state |= brcm_state << 16;
+
+	scpi_secure_message_start();
+
+	/* Populate the command header */
+	cmd = SCPI_CMD_HEADER_AP_TO_SCP;
+	cmd->id = SCPI_CMD_SET_POWER_STATE;
+	cmd->set = SCPI_SET_NORMAL;
+	cmd->sender = 0;
+	cmd->size = sizeof(state);
+	/* Populate the command payload */
+	payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
+	*payload_addr = state;
+	scpi_secure_message_send(sizeof(state));
+
+	/*
+	 * SCP does not reply to this command in order to avoid MHU interrupts
+	 * from the sender, which could interfere with its power state request.
+	 */
+	scpi_secure_message_end();
+}
+
+/*
+ * Query and obtain power state from SCP.
+ *
+ * In response to the query, SCP returns power states of all CPUs in all
+ * clusters of the system. The returned response is then filtered based on the
+ * supplied MPIDR. Power states of requested cluster and CPUs within are updated
+ * via. supplied non-NULL pointer arguments.
+ *
+ * Returns 0 on success, or -1 on errors.
+ */
+int scpi_get_brcm_power_state(unsigned int mpidr, unsigned int *cpu_state_p,
+		unsigned int *cluster_state_p)
+{
+	scpi_cmd_t *cmd;
+	scpi_cmd_t response;
+	int power_state, cpu, cluster, rc = -1;
+
+	/*
+	 * Extract CPU and cluster membership of the given MPIDR. SCPI caters
+	 * for only up to 0xf clusters, and 8 CPUs per cluster
+	 */
+	cpu = mpidr & MPIDR_AFFLVL_MASK;
+	cluster = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+	if (cpu >= 8 || cluster >= 0xf)
+		return -1;
+
+	scpi_secure_message_start();
+
+	/* Populate request headers */
+	zeromem(SCPI_CMD_HEADER_AP_TO_SCP, sizeof(*cmd));
+	cmd = SCPI_CMD_HEADER_AP_TO_SCP;
+	cmd->id = SCPI_CMD_GET_POWER_STATE;
+
+	/*
+	 * Send message and wait for SCP's response
+	 */
+	scpi_secure_message_send(0);
+	scpi_secure_message_receive(&response);
+
+	if (response.status != SCP_OK)
+		goto exit;
+
+	/* Validate SCP response */
+	if (!CHECK_RESPONSE(response, cluster))
+		goto exit;
+
+	/* Extract power states for required cluster */
+	power_state = *(((uint16_t *) SCPI_RES_PAYLOAD_SCP_TO_AP) + cluster);
+	if (CLUSTER_ID(power_state) != cluster)
+		goto exit;
+
+	/* Update power state via. pointers */
+	if (cluster_state_p)
+		*cluster_state_p = CLUSTER_POWER_STATE(power_state);
+	if (cpu_state_p)
+		*cpu_state_p = CPU_POWER_STATE(power_state);
+	rc = 0;
+
+exit:
+	scpi_secure_message_end();
+	return rc;
+}
+
+uint32_t scpi_sys_power_state(scpi_system_state_t system_state)
+{
+	scpi_cmd_t *cmd;
+	uint8_t *payload_addr;
+
+	scpi_secure_message_start();
+
+	/* Populate the command header */
+	cmd = SCPI_CMD_HEADER_AP_TO_SCP;
+	cmd->id = SCPI_CMD_SYS_POWER_STATE;
+	cmd->set = 0;
+	cmd->sender = 0;
+	cmd->size = sizeof(*payload_addr);
+	/* Populate the command payload */
+	payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
+	*payload_addr = system_state & 0xff;
+	scpi_secure_message_send(sizeof(*payload_addr));
+
+	scpi_secure_message_end();
+
+	return SCP_OK;
+}
diff --git a/plat/brcm/common/brcm_scpi.h b/plat/brcm/common/brcm_scpi.h
new file mode 100644
index 0000000..f3b658f
--- /dev/null
+++ b/plat/brcm/common/brcm_scpi.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef BRCM_SCPI_H
+#define BRCM_SCPI_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*
+ * An SCPI command consists of a header and a payload.
+ * The following structure describes the header. It is 64-bit long.
+ */
+typedef struct {
+	/* Command ID */
+	uint32_t id		: 7;
+	/* Set ID. Identifies whether this is a standard or extended command. */
+	uint32_t set		: 1;
+	/* Sender ID to match a reply. The value is sender specific. */
+	uint32_t sender		: 8;
+	/* Size of the payload in bytes (0 - 511) */
+	uint32_t size		: 9;
+	uint32_t reserved	: 7;
+	/*
+	 * Status indicating the success of a command.
+	 * See the enum below.
+	 */
+	uint32_t status;
+} scpi_cmd_t;
+
+typedef enum {
+	SCPI_SET_NORMAL = 0,	/* Normal SCPI commands */
+	SCPI_SET_EXTENDED	/* Extended SCPI commands */
+} scpi_set_t;
+
+enum {
+	SCP_OK = 0,	/* Success */
+	SCP_E_PARAM,	/* Invalid parameter(s) */
+	SCP_E_ALIGN,	/* Invalid alignment */
+	SCP_E_SIZE,	/* Invalid size */
+	SCP_E_HANDLER,	/* Invalid handler or callback */
+	SCP_E_ACCESS,	/* Invalid access or permission denied */
+	SCP_E_RANGE,	/* Value out of range */
+	SCP_E_TIMEOUT,	/* Time out has ocurred */
+	SCP_E_NOMEM,	/* Invalid memory area or pointer */
+	SCP_E_PWRSTATE,	/* Invalid power state */
+	SCP_E_SUPPORT,	/* Feature not supported or disabled */
+	SCPI_E_DEVICE,	/* Device error */
+	SCPI_E_BUSY,	/* Device is busy */
+};
+
+typedef uint32_t scpi_status_t;
+typedef enum {
+	SCPI_CMD_SCP_READY = 0x01,
+	SCPI_CMD_SET_POWER_STATE = 0x03,
+	SCPI_CMD_GET_POWER_STATE = 0x04,
+	SCPI_CMD_SYS_POWER_STATE = 0x05
+} scpi_command_t;
+
+/*
+ * Macros to parse SCP response to GET_POWER_STATE command
+ *
+ *   [3:0] : cluster ID
+ *   [7:4] : cluster state: 0 = on; 3 = off; rest are reserved
+ *   [15:8]: on/off state for individual CPUs in the cluster
+ *
+ * Payload is in little-endian
+ */
+#define CLUSTER_ID(_resp)		((_resp) & 0xf)
+#define CLUSTER_POWER_STATE(_resp)	(((_resp) >> 4) & 0xf)
+
+/* Result is a bit mask of CPU on/off states in the cluster */
+#define CPU_POWER_STATE(_resp)		(((_resp) >> 8) & 0xff)
+
+/*
+ * For GET_POWER_STATE, SCP returns the power states of every cluster. The
+ * size of response depends on the number of clusters in the system. The
+ * SCP-to-AP payload contains 2 bytes per cluster. Make sure the response is
+ * large enough to contain power states of a given cluster
+ */
+#define CHECK_RESPONSE(_resp, _clus)  (_resp.size >= (((_clus) + 1) * 2))
+
+typedef enum {
+	scpi_power_on = 0,
+	scpi_power_retention = 1,
+	scpi_power_off = 3,
+} scpi_power_state_t;
+
+typedef enum {
+	scpi_system_shutdown = 0,
+	scpi_system_reboot = 1,
+	scpi_system_reset = 2
+} scpi_system_state_t;
+
+extern int scpi_wait_ready(void);
+extern void scpi_set_brcm_power_state(unsigned int mpidr,
+				      scpi_power_state_t cpu_state,
+				      scpi_power_state_t cluster_state,
+				      scpi_power_state_t css_state);
+int scpi_get_brcm_power_state(unsigned int mpidr, unsigned int *cpu_state_p,
+			      unsigned int *cluster_state_p);
+uint32_t scpi_sys_power_state(scpi_system_state_t system_state);
+
+#endif	/* BRCM_SCPI_H */