Add bl31 support common across Broadcom platforms

Signed-off-by: Sheetal Tigadoli <sheetal.tigadoli@broadcom.com>
Change-Id: Ic1a392a633b447935fa3a7528326c97845f5b1bc
diff --git a/plat/brcm/board/common/board_common.c b/plat/brcm/board/common/board_common.c
index e7b5e47..2f764ab 100644
--- a/plat/brcm/board/common/board_common.c
+++ b/plat/brcm/board/common/board_common.c
@@ -36,6 +36,38 @@
 };
 #endif
 
+#if IMAGE_BL31
+const mmap_region_t plat_brcm_mmap[] = {
+	HSLS_REGION,
+#ifdef PERIPH0_REGION
+	PERIPH0_REGION,
+#endif
+#ifdef PERIPH1_REGION
+	PERIPH1_REGION,
+#endif
+#ifdef PERIPH2_REGION
+	PERIPH2_REGION,
+#endif
+#ifdef USB_REGION
+	USB_REGION,
+#endif
+#ifdef USE_DDR
+	BRCM_MAP_NS_DRAM1,
+#ifdef BRCM_MAP_NS_SHARED_DRAM
+	BRCM_MAP_NS_SHARED_DRAM,
+#endif
+#else
+#ifdef BRCM_MAP_EXT_SRAM
+	BRCM_MAP_EXT_SRAM,
+#endif
+#endif
+#if defined(USE_CRMU_SRAM) && defined(CRMU_SRAM_BASE)
+	CRMU_SRAM_REGION,
+#endif
+	{0}
+};
+#endif
+
 CASSERT((ARRAY_SIZE(plat_brcm_mmap) - 1) <= PLAT_BRCM_MMAP_ENTRIES,
 	assert_plat_brcm_mmap_mismatch);
 CASSERT((PLAT_BRCM_MMAP_ENTRIES + BRCM_BL_REGIONS) <= MAX_MMAP_REGIONS,
diff --git a/plat/brcm/board/common/board_common.mk b/plat/brcm/board/common/board_common.mk
index 9ac26af..7c9cf77 100644
--- a/plat/brcm/board/common/board_common.mk
+++ b/plat/brcm/board/common/board_common.mk
@@ -28,6 +28,13 @@
 $(eval $(call add_define,SYSCNT_FREQ))
 endif
 
+# Process ARM_BL31_IN_DRAM flag
+ifeq (${ARM_BL31_IN_DRAM},)
+ARM_BL31_IN_DRAM		:=	0
+endif
+$(eval $(call assert_boolean,ARM_BL31_IN_DRAM))
+$(eval $(call add_define,ARM_BL31_IN_DRAM))
+
 ifeq (${STANDALONE_BL2},yes)
 $(eval $(call add_define,MMU_DISABLED))
 endif
@@ -50,7 +57,8 @@
 # Use generic OID definition (tbbr_oid.h)
 USE_TBBR_DEFS			:=	1
 
-PLAT_INCLUDES		+=	-Iplat/brcm/board/common
+PLAT_INCLUDES		+=	-Iplat/brcm/board/common \
+				-Iinclude/drivers/brcm
 
 PLAT_BL_COMMON_SOURCES	+=	plat/brcm/common/brcm_common.c \
 				plat/brcm/board/common/cmn_sec.c \
@@ -72,6 +80,24 @@
 
 BL2_SOURCES		+= 	plat/brcm/common/brcm_bl2_setup.c
 
+BL31_SOURCES		+=	plat/brcm/common/brcm_bl31_setup.c
+
+#M0 runtime firmware
+ifdef SCP_BL2
+$(eval $(call add_define,NEED_SCP_BL2))
+SCP_CFG_DIR=$(dir ${SCP_BL2})
+PLAT_INCLUDES += -I${SCP_CFG_DIR}
+endif
+
+ifneq (${NEED_BL33},yes)
+# If there is no BL33, BL31 will jump to this address.
+ifeq (${USE_DDR},yes)
+PRELOADED_BL33_BASE := 0x80000000
+else
+PRELOADED_BL33_BASE := 0x74000000
+endif
+endif
+
 # Use translation tables library v1 by default
 ARM_XLAT_TABLES_LIB_V1		:=	1
 ifeq (${ARM_XLAT_TABLES_LIB_V1}, 1)
diff --git a/plat/brcm/board/common/timer_sync.c b/plat/brcm/board/common/timer_sync.c
new file mode 100644
index 0000000..7e33a94
--- /dev/null
+++ b/plat/brcm/board/common/timer_sync.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2015 - 2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <lib/mmio.h>
+
+#include <platform_def.h>
+#include <timer_sync.h>
+
+/*******************************************************************************
+ * Defines related to time sync and satelite timers
+ ******************************************************************************/
+#define TIME_SYNC_WR_ENA	((uint32_t)0xACCE55 << 8)
+#define IHOST_STA_TMR_CTRL	0x1800
+#define IHOST_SAT_TMR_INC_L	0x1814
+#define IHOST_SAT_TMR_INC_H	0x1818
+
+#define SAT_TMR_CYCLE_DELAY	2
+#define SAT_TMR_32BIT_WRAP_VAL	(BIT_64(32) - SAT_TMR_CYCLE_DELAY)
+
+void ihost_enable_satellite_timer(unsigned int cluster_id)
+{
+	uintptr_t ihost_base;
+	uint32_t time_lx, time_h;
+	uintptr_t ihost_enable;
+
+	VERBOSE("Program iHost%u satellite timer\n", cluster_id);
+	ihost_base = IHOST0_BASE + cluster_id * IHOST_ADDR_SPACE;
+
+	/* this read starts the satellite timer counting from 0 */
+	ihost_enable = CENTRAL_TIMER_GET_IHOST_ENA_BASE + cluster_id * 4;
+	time_lx = mmio_read_32(ihost_enable);
+
+	/*
+	 * Increment the satellite timer by the central timer plus 2
+	 * to accommodate for a 1 cycle delay through NOC
+	 * plus counter starting from 0.
+	 */
+	mmio_write_32(ihost_base + IHOST_SAT_TMR_INC_L,
+		      time_lx + SAT_TMR_CYCLE_DELAY);
+
+	/*
+	 * Read the latched upper data, if lx will wrap by adding 2 to it
+	 * we need to handle the wrap
+	 */
+	time_h = mmio_read_32(CENTRAL_TIMER_GET_H);
+	if (time_lx >= SAT_TMR_32BIT_WRAP_VAL)
+		mmio_write_32(ihost_base + IHOST_SAT_TMR_INC_H, time_h + 1);
+	else
+		mmio_write_32(ihost_base + IHOST_SAT_TMR_INC_H, time_h);
+}
+
+void brcm_timer_sync_init(void)
+{
+	unsigned int cluster_id;
+
+	/* Get the Time Sync module out of reset */
+	mmio_setbits_32(CDRU_MISC_RESET_CONTROL,
+			BIT(CDRU_MISC_RESET_CONTROL_TS_RESET_N));
+
+	/* Deassert the Central Timer TIMER_EN signal for all module */
+	mmio_write_32(CENTRAL_TIMER_SAT_TMR_ENA, TIME_SYNC_WR_ENA);
+
+	/* enables/programs iHost0 satellite timer*/
+	cluster_id = MPIDR_AFFLVL1_VAL(read_mpidr());
+	ihost_enable_satellite_timer(cluster_id);
+}
diff --git a/plat/brcm/board/stingray/driver/ihost_pll_config.c b/plat/brcm/board/stingray/driver/ihost_pll_config.c
new file mode 100644
index 0000000..1184928
--- /dev/null
+++ b/plat/brcm/board/stingray/driver/ihost_pll_config.c
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2016-2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdint.h>
+
+#include <common/debug.h>
+#include <lib/mmio.h>
+
+#include <dmu.h>
+
+#define IHOST0_CONFIG_ROOT	0x66000000
+#define IHOST1_CONFIG_ROOT	0x66002000
+#define IHOST2_CONFIG_ROOT	0x66004000
+#define IHOST3_CONFIG_ROOT	0x66006000
+#define A72_CRM_PLL_PWR_ON	0x00000070
+#define A72_CRM_PLL_PWR_ON__PLL0_RESETB_R	4
+#define A72_CRM_PLL_PWR_ON__PLL0_POST_RESETB_R	5
+#define A72_CRM_PLL_CHNL_BYPS_EN		0x000000ac
+#define A72_CRM_PLL_CHNL_BYPS_EN__PLL_0_CHNL_0_BYPS_EN_R	0
+#define A72_CRM_PLL_CHNL_BYPS_EN_DATAMASK	0x0000ec1f
+#define A72_CRM_PLL_CMD				0x00000080
+#define A72_CRM_PLL_CMD__UPDATE_PLL0_FREQUENCY_VCO_R		0
+#define A72_CRM_PLL_CMD__UPDATE_PLL0_FREQUENCY_POST_R		1
+#define A72_CRM_PLL_STATUS			0x00000084
+#define A72_CRM_PLL_STATUS__PLL0_LOCK_R		9
+#define A72_CRM_PLL0_CTRL1			0x00000100
+#define A72_CRM_PLL0_CTRL2 			0x00000104
+#define A72_CRM_PLL0_CTRL3 			0x00000108
+#define A72_CRM_PLL0_CTRL3__PLL0_PDIV_R 12
+#define A72_CRM_PLL0_CTRL4 			0x0000010c
+#define A72_CRM_PLL0_CTRL4__PLL0_KP_R		0
+#define A72_CRM_PLL0_CTRL4__PLL0_KI_R		4
+#define A72_CRM_PLL0_CTRL4__PLL0_KA_R		7
+#define A72_CRM_PLL0_CTRL4__PLL0_FREFEFF_INFO_R	10
+
+#define PLL_MODE_VCO		0x0
+#define PLL_MODE_BYPASS		0x1
+#define PLL_RESET_TYPE_PLL	0x1
+#define PLL_RESET_TYPE_POST	0x2
+#define PLL_VCO			0x1
+#define PLL_POSTDIV		0x2
+#define ARM_FREQ_3G		PLL_FREQ_FULL
+#define ARM_FREQ_1P5G		PLL_FREQ_HALF
+#define ARM_FREQ_750M		PLL_FREQ_QRTR
+
+static unsigned int ARMCOE_crm_getBaseAddress(unsigned int cluster_num)
+{
+	unsigned int ihostx_config_root;
+
+	switch (cluster_num) {
+	case 0:
+	default:
+		ihostx_config_root = IHOST0_CONFIG_ROOT;
+		break;
+	case 1:
+		ihostx_config_root = IHOST1_CONFIG_ROOT;
+		break;
+	case 2:
+		ihostx_config_root = IHOST2_CONFIG_ROOT;
+		break;
+	case 3:
+		ihostx_config_root = IHOST3_CONFIG_ROOT;
+		break;
+	}
+
+	return ihostx_config_root;
+}
+
+static void ARMCOE_crm_pllAssertReset(unsigned int cluster_num,
+				      unsigned int reset_type)
+{
+	unsigned long ihostx_config_root;
+	unsigned int pll_rst_ctrl;
+
+	ihostx_config_root = ARMCOE_crm_getBaseAddress(cluster_num);
+	pll_rst_ctrl = mmio_read_32(ihostx_config_root + A72_CRM_PLL_PWR_ON);
+
+	// PLL reset
+	if (reset_type & PLL_RESET_TYPE_PLL) {
+		pll_rst_ctrl &= ~(0x1<<A72_CRM_PLL_PWR_ON__PLL0_RESETB_R);
+	}
+	// post-div channel reset
+	if (reset_type & PLL_RESET_TYPE_POST) {
+		pll_rst_ctrl &= ~(0x1<<A72_CRM_PLL_PWR_ON__PLL0_POST_RESETB_R);
+	}
+
+	mmio_write_32(ihostx_config_root + A72_CRM_PLL_PWR_ON, pll_rst_ctrl);
+}
+
+static void ARMCOE_crm_pllSetMode(unsigned int cluster_num, unsigned int mode)
+{
+	unsigned long ihostx_config_root;
+	unsigned int pll_byp_ctrl;
+
+	ihostx_config_root = ARMCOE_crm_getBaseAddress(cluster_num);
+	pll_byp_ctrl = mmio_read_32(ihostx_config_root +
+				    A72_CRM_PLL_CHNL_BYPS_EN);
+
+	if (mode == PLL_MODE_VCO) {
+		// use PLL DCO output
+		pll_byp_ctrl &=
+			~BIT(A72_CRM_PLL_CHNL_BYPS_EN__PLL_0_CHNL_0_BYPS_EN_R);
+	} else {
+		// use PLL bypass sources
+		pll_byp_ctrl |=
+			BIT(A72_CRM_PLL_CHNL_BYPS_EN__PLL_0_CHNL_0_BYPS_EN_R);
+	}
+
+	mmio_write_32(ihostx_config_root + A72_CRM_PLL_CHNL_BYPS_EN,
+		      pll_byp_ctrl);
+}
+
+static void ARMCOE_crm_pllFreqSet(unsigned int cluster_num,
+				  unsigned int ihost_pll_freq_sel,
+				  unsigned int pdiv)
+{
+	unsigned int ndiv_int;
+	unsigned int ndiv_frac_low, ndiv_frac_high;
+	unsigned long ihostx_config_root;
+
+	ndiv_frac_low = 0x0;
+	ndiv_frac_high = 0x0;
+
+	if (ihost_pll_freq_sel == ARM_FREQ_3G) {
+		ndiv_int = 0x78;
+	} else if (ihost_pll_freq_sel == ARM_FREQ_1P5G) {
+		ndiv_int = 0x3c;
+	} else if (ihost_pll_freq_sel == ARM_FREQ_750M) {
+		ndiv_int = 0x1e;
+	} else {
+		return;
+	}
+
+	ndiv_int &= 0x3FF;                // low 10 bits
+	ndiv_frac_low &= 0x3FF;
+	ndiv_frac_high &= 0x3FF;
+
+	ihostx_config_root = ARMCOE_crm_getBaseAddress(cluster_num);
+
+	mmio_write_32(ihostx_config_root+A72_CRM_PLL0_CTRL1, ndiv_frac_low);
+	mmio_write_32(ihostx_config_root+A72_CRM_PLL0_CTRL2, ndiv_frac_high);
+	mmio_write_32(ihostx_config_root+A72_CRM_PLL0_CTRL3,
+		      ndiv_int |
+		      ((pdiv << A72_CRM_PLL0_CTRL3__PLL0_PDIV_R & 0xF000)));
+
+	mmio_write_32(ihostx_config_root + A72_CRM_PLL0_CTRL4,
+			/* From Section 10 of PLL spec */
+			(3 << A72_CRM_PLL0_CTRL4__PLL0_KP_R) |
+			/* From Section 10 of PLL spec */
+			(2 << A72_CRM_PLL0_CTRL4__PLL0_KI_R) |
+			/* Normal mode (i.e. not fast-locking) */
+			(0 << A72_CRM_PLL0_CTRL4__PLL0_KA_R) |
+			/* 50 MHz */
+			(50 << A72_CRM_PLL0_CTRL4__PLL0_FREFEFF_INFO_R));
+}
+
+static void ARMCOE_crm_pllDeassertReset(unsigned int cluster_num,
+					unsigned int reset_type)
+{
+	unsigned long ihostx_config_root;
+	unsigned int pll_rst_ctrl;
+
+	ihostx_config_root = ARMCOE_crm_getBaseAddress(cluster_num);
+	pll_rst_ctrl = mmio_read_32(ihostx_config_root + A72_CRM_PLL_PWR_ON);
+
+	// PLL reset
+	if (reset_type & PLL_RESET_TYPE_PLL) {
+		pll_rst_ctrl |= (0x1 << A72_CRM_PLL_PWR_ON__PLL0_RESETB_R);
+	}
+
+	// post-div channel reset
+	if (reset_type & PLL_RESET_TYPE_POST) {
+		pll_rst_ctrl |= (0x1 << A72_CRM_PLL_PWR_ON__PLL0_POST_RESETB_R);
+	}
+
+	mmio_write_32(ihostx_config_root + A72_CRM_PLL_PWR_ON, pll_rst_ctrl);
+}
+
+static void ARMCOE_crm_pllUpdate(unsigned int cluster_num, unsigned int type)
+{
+	unsigned long ihostx_config_root;
+	unsigned int pll_cmd;
+
+	ihostx_config_root = ARMCOE_crm_getBaseAddress(cluster_num);
+	pll_cmd = mmio_read_32(ihostx_config_root + A72_CRM_PLL_CMD);
+
+	// VCO update
+	if (type & PLL_VCO) {
+		pll_cmd |= BIT(A72_CRM_PLL_CMD__UPDATE_PLL0_FREQUENCY_VCO_R);
+	}
+	// post-div channel update
+	if (type & PLL_POSTDIV) {
+		pll_cmd |= BIT(A72_CRM_PLL_CMD__UPDATE_PLL0_FREQUENCY_POST_R);
+	}
+
+	mmio_write_32(ihostx_config_root+A72_CRM_PLL_CMD, pll_cmd);
+}
+
+static void insert_delay(unsigned int delay)
+{
+	volatile unsigned int index;
+
+	for (index = 0; index < delay; index++)
+		;
+}
+
+
+/*
+ * Returns 1 if PLL locked within certain interval
+ */
+static unsigned int ARMCOE_crm_pllIsLocked(unsigned int cluster_num)
+{
+	unsigned long ihostx_config_root;
+	unsigned int lock_status;
+	unsigned int i;
+
+	ihostx_config_root = ARMCOE_crm_getBaseAddress(cluster_num);
+
+	/* wait a while for pll to lock before returning from this function */
+	for (i = 0; i < 1500; i++) {
+		insert_delay(256);
+		lock_status = mmio_read_32(ihostx_config_root +
+					   A72_CRM_PLL_STATUS);
+		if (lock_status & BIT(A72_CRM_PLL_STATUS__PLL0_LOCK_R))
+			return 1;
+	}
+
+	ERROR("PLL of Cluster #%u failed to lock\n", cluster_num);
+	return 0;
+}
+
+/*
+ * ihost PLL Variable Frequency Configuration
+ *
+ * Frequency Limit {VCO,ARM} (GHz):
+ *	0 - no limit,
+ *	1 - {3.0,1.5},
+ *	2 - {4.0,2.0},
+ *	3 - {5.0,2.5}
+ */
+uint32_t bcm_set_ihost_pll_freq(uint32_t cluster_num, int ihost_pll_freq_sel)
+{
+	NOTICE("cluster: %u, freq_sel:0x%x\n", cluster_num, ihost_pll_freq_sel);
+
+	//bypass PLL
+	ARMCOE_crm_pllSetMode(cluster_num, PLL_MODE_BYPASS);
+	//assert reset
+	ARMCOE_crm_pllAssertReset(cluster_num,
+				  PLL_RESET_TYPE_PLL | PLL_RESET_TYPE_POST);
+	//set ndiv_int for different freq
+	ARMCOE_crm_pllFreqSet(cluster_num, ihost_pll_freq_sel, 0x1);
+	//de-assert reset
+	ARMCOE_crm_pllDeassertReset(cluster_num, PLL_RESET_TYPE_PLL);
+	ARMCOE_crm_pllUpdate(cluster_num, PLL_VCO);
+	//waiting for PLL lock
+	ARMCOE_crm_pllIsLocked(cluster_num);
+	ARMCOE_crm_pllDeassertReset(cluster_num, PLL_RESET_TYPE_POST);
+	//disable bypass PLL
+	ARMCOE_crm_pllSetMode(cluster_num, PLL_MODE_VCO);
+
+	return 0;
+}
+
+uint32_t bcm_get_ihost_pll_freq(uint32_t cluster_num)
+{
+	unsigned long ihostx_config_root;
+	uint32_t ndiv_int;
+	uint32_t ihost_pll_freq_sel;
+
+	ihostx_config_root = ARMCOE_crm_getBaseAddress(cluster_num);
+	ndiv_int = mmio_read_32(ihostx_config_root+A72_CRM_PLL0_CTRL3) & 0x3FF;
+
+	if (ndiv_int == 0x78) {
+		ihost_pll_freq_sel = ARM_FREQ_3G;
+	} else if (ndiv_int == 0x3c) {
+		ihost_pll_freq_sel = ARM_FREQ_1P5G;
+	} else if (ndiv_int == 0x1e) {
+		ihost_pll_freq_sel = ARM_FREQ_750M;
+	} else {
+		/* return unlimit otherwise*/
+		ihost_pll_freq_sel = 0;
+	}
+	return ihost_pll_freq_sel;
+}
diff --git a/plat/brcm/board/stingray/include/ihost_pm.h b/plat/brcm/board/stingray/include/ihost_pm.h
new file mode 100644
index 0000000..83493ab
--- /dev/null
+++ b/plat/brcm/board/stingray/include/ihost_pm.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2016 - 2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef IHOST_PM
+#define IHOST_PM
+
+#include <stdint.h>
+
+#define CLUSTER_POWER_ON	0x1
+#define CLUSTER_POWER_OFF	0x0
+
+void ihost_power_on_cluster(u_register_t mpidr);
+void ihost_power_on_secondary_core(u_register_t mpidr, uint64_t rvbar);
+void ihost_enable_satellite_timer(unsigned int cluster_id);
+
+#endif
diff --git a/plat/brcm/board/stingray/include/platform_def.h b/plat/brcm/board/stingray/include/platform_def.h
index 950c66b..d61a737 100644
--- a/plat/brcm/board/stingray/include/platform_def.h
+++ b/plat/brcm/board/stingray/include/platform_def.h
@@ -29,16 +29,14 @@
 #define PLATFORM_CLUSTER1_CORE_COUNT	2
 #define PLATFORM_CLUSTER2_CORE_COUNT	2
 #define PLATFORM_CLUSTER3_CORE_COUNT	2
-#define PLATFORM_CLUSTER4_CORE_COUNT	2
 
 #define BRCM_SYSTEM_COUNT 1
-#define BRCM_CLUSTER_COUNT 5
+#define BRCM_CLUSTER_COUNT 4
 
 #define PLATFORM_CORE_COUNT	(PLATFORM_CLUSTER0_CORE_COUNT + \
 					PLATFORM_CLUSTER1_CORE_COUNT+ \
 					PLATFORM_CLUSTER2_CORE_COUNT+ \
-					PLATFORM_CLUSTER3_CORE_COUNT+ \
-					PLATFORM_CLUSTER4_CORE_COUNT)
+					PLATFORM_CLUSTER3_CORE_COUNT)
 
 #define PLAT_NUM_PWR_DOMAINS	(BRCM_SYSTEM_COUNT + \
 				 BRCM_CLUSTER_COUNT + \
diff --git a/plat/brcm/board/stingray/include/timer_sync.h b/plat/brcm/board/stingray/include/timer_sync.h
new file mode 100644
index 0000000..1f15bb0
--- /dev/null
+++ b/plat/brcm/board/stingray/include/timer_sync.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2016 - 2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TIMER_SYNC_H
+#define TIMER_SYNC_H
+
+void brcm_timer_sync_init(void);
+
+#endif
diff --git a/plat/brcm/board/stingray/platform.mk b/plat/brcm/board/stingray/platform.mk
index 1b9cc3b..83e502d 100644
--- a/plat/brcm/board/stingray/platform.mk
+++ b/plat/brcm/board/stingray/platform.mk
@@ -11,6 +11,9 @@
 DRIVER_CC_ENABLE := 1
 $(eval $(call add_define,DRIVER_CC_ENABLE))
 
+# BL31 is in DRAM
+ARM_BL31_IN_DRAM	:=	1
+
 USE_CRMU_SRAM := yes
 
 # Use single cluster
@@ -23,6 +26,12 @@
 BOARD_CFG := bcm958742k
 endif
 
+# BL31 build for standalone mode
+ifeq (${STANDALONE_BL31},yes)
+RESET_TO_BL31 := 1
+$(info Using RESET_TO_BL31)
+endif
+
 # For testing purposes, use memsys stubs.  Remove once memsys is fully tested.
 USE_MEMSYS_STUBS := yes
 
@@ -45,4 +54,36 @@
 PLAT_BL_COMMON_SOURCES	+=	lib/cpus/aarch64/cortex_a72.S \
 				plat/${SOC_DIR}/aarch64/plat_helpers.S \
 				drivers/ti/uart/aarch64/16550_console.S \
-				drivers/arm/tzc/tzc400.c
+				plat/${SOC_DIR}/src/tz_sec.c \
+				drivers/arm/tzc/tzc400.c \
+				plat/${SOC_DIR}/src/topology.c
+
+
+# Include GICv3 driver files
+include drivers/arm/gic/v3/gicv3.mk
+
+BRCM_GIC_SOURCES	:=	${GICV3_SOURCES}		\
+				plat/common/plat_gicv3.c	\
+				plat/brcm/common/brcm_gicv3.c
+
+BL31_SOURCES		+=	\
+				drivers/arm/ccn/ccn.c \
+				plat/brcm/board/common/timer_sync.c \
+				plat/brcm/common/brcm_ccn.c \
+				plat/common/plat_psci_common.c \
+				plat/${SOC_DIR}/driver/ihost_pll_config.c \
+				${BRCM_GIC_SOURCES}
+
+ifdef SCP_BL2
+PLAT_INCLUDES		+=	-Iplat/brcm/common/
+
+BL31_SOURCES		+=	plat/brcm/common/brcm_mhu.c \
+				plat/brcm/common/brcm_scpi.c \
+				plat/${SOC_DIR}/src/brcm_pm_ops.c
+else
+BL31_SOURCES		+=	plat/${SOC_DIR}/src/ihost_pm.c \
+				plat/${SOC_DIR}/src/pm.c
+endif
+
+# Do not execute the startup code on warm reset.
+PROGRAMMABLE_RESET_ADDRESS	:=	1
diff --git a/plat/brcm/board/stingray/src/brcm_pm_ops.c b/plat/brcm/board/stingray/src/brcm_pm_ops.c
new file mode 100644
index 0000000..81d2ccf
--- /dev/null
+++ b/plat/brcm/board/stingray/src/brcm_pm_ops.c
@@ -0,0 +1,393 @@
+/*
+ * Copyright (c) 2017 - 2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/ccn.h>
+#include <lib/bakery_lock.h>
+#include <lib/mmio.h>
+#include <lib/psci/psci.h>
+#include <lib/spinlock.h>
+
+#include <brcm_scpi.h>
+#include <cmn_plat_util.h>
+#include <plat_brcm.h>
+#include <platform_def.h>
+
+#include "m0_cfg.h"
+
+
+#define CORE_PWR_STATE(state)	((state)->pwr_domain_state[MPIDR_AFFLVL0])
+#define CLUSTER_PWR_STATE(state)	\
+			((state)->pwr_domain_state[MPIDR_AFFLVL1])
+#define SYSTEM_PWR_STATE(state)	((state)->pwr_domain_state[MPIDR_AFFLVL2])
+
+#define VENDOR_RST_TYPE_SHIFT	4
+
+#if HW_ASSISTED_COHERENCY
+/*
+ * On systems where participant CPUs are cache-coherent, we can use spinlocks
+ * instead of bakery locks.
+ */
+spinlock_t event_lock;
+#define event_lock_get(_lock) spin_lock(&_lock)
+#define event_lock_release(_lock) spin_unlock(&_lock)
+
+#else
+/*
+ * Use bakery locks for state coordination as not all participants are
+ * cache coherent now.
+ */
+DEFINE_BAKERY_LOCK(event_lock);
+#define event_lock_get(_lock) bakery_lock_get(&_lock)
+#define event_lock_release(_lock) bakery_lock_release(&_lock)
+#endif
+
+static int brcm_pwr_domain_on(u_register_t mpidr)
+{
+	/*
+	 * SCP takes care of powering up parent power domains so we
+	 * only need to care about level 0
+	 */
+	scpi_set_brcm_power_state(mpidr, scpi_power_on, scpi_power_on,
+				  scpi_power_on);
+
+	return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * Handler called when a power level has just been powered on after
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from. This handler would never be invoked with
+ * the system power domain uninitialized as either the primary would have taken
+ * care of it as part of cold boot or the first core awakened from system
+ * suspend would have already initialized it.
+ ******************************************************************************/
+static void brcm_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	unsigned long cluster_id = MPIDR_AFFLVL1_VAL(read_mpidr());
+
+	/* Assert that the system power domain need not be initialized */
+	assert(SYSTEM_PWR_STATE(target_state) == PLAT_LOCAL_STATE_RUN);
+
+	assert(CORE_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF);
+
+	/*
+	 * Perform the common cluster specific operations i.e enable coherency
+	 * if this cluster was off.
+	 */
+	if (CLUSTER_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF) {
+		INFO("Cluster #%lu entering to snoop/dvm domain\n", cluster_id);
+		ccn_enter_snoop_dvm_domain(1 << cluster_id);
+	}
+
+	/* Program the gic per-cpu distributor or re-distributor interface */
+	plat_brcm_gic_pcpu_init();
+
+	/* Enable the gic cpu interface */
+	plat_brcm_gic_cpuif_enable();
+}
+
+static void brcm_power_down_common(void)
+{
+	unsigned int standbywfil2, standbywfi;
+	uint64_t mpidr = read_mpidr_el1();
+
+	switch (MPIDR_AFFLVL1_VAL(mpidr)) {
+	case 0x0:
+		standbywfi = CDRU_PROC_EVENT_CLEAR__IH0_CDRU_STANDBYWFI;
+		standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH0_CDRU_STANDBYWFIL2;
+		break;
+	case 0x1:
+		standbywfi = CDRU_PROC_EVENT_CLEAR__IH1_CDRU_STANDBYWFI;
+		standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH1_CDRU_STANDBYWFIL2;
+		break;
+	case 0x2:
+		standbywfi = CDRU_PROC_EVENT_CLEAR__IH2_CDRU_STANDBYWFI;
+		standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH2_CDRU_STANDBYWFIL2;
+		break;
+	case 0x3:
+		standbywfi = CDRU_PROC_EVENT_CLEAR__IH3_CDRU_STANDBYWFI;
+		standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH3_CDRU_STANDBYWFIL2;
+		break;
+	default:
+		ERROR("Invalid cluster #%llx\n", MPIDR_AFFLVL1_VAL(mpidr));
+		return;
+	}
+	/* Clear the WFI status bit */
+	event_lock_get(event_lock);
+	mmio_setbits_32(CDRU_PROC_EVENT_CLEAR,
+			(1 << (standbywfi + MPIDR_AFFLVL0_VAL(mpidr))) |
+			(1 << standbywfil2));
+	event_lock_release(event_lock);
+}
+
+/*
+ * Helper function to inform power down state to SCP.
+ */
+static void brcm_scp_suspend(const psci_power_state_t *target_state)
+{
+	uint32_t cluster_state = scpi_power_on;
+	uint32_t system_state = scpi_power_on;
+
+	/* Check if power down at system power domain level is requested */
+	if (SYSTEM_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF)
+		system_state = scpi_power_retention;
+
+	/* Check if Cluster is to be turned off */
+	if (CLUSTER_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF)
+		cluster_state = scpi_power_off;
+
+	/*
+	 * Ask the SCP to power down the appropriate components depending upon
+	 * their state.
+	 */
+	scpi_set_brcm_power_state(read_mpidr_el1(),
+				  scpi_power_off,
+				  cluster_state,
+				  system_state);
+}
+
+/*
+ * Helper function to turn off a CPU power domain and its parent power domains
+ * if applicable. Since SCPI doesn't differentiate between OFF and suspend, we
+ * call the suspend helper here.
+ */
+static void brcm_scp_off(const psci_power_state_t *target_state)
+{
+	brcm_scp_suspend(target_state);
+}
+
+static void brcm_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	unsigned long cluster_id = MPIDR_AFFLVL1_VAL(read_mpidr_el1());
+
+	assert(CORE_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF);
+	/* Prevent interrupts from spuriously waking up this cpu */
+	plat_brcm_gic_cpuif_disable();
+
+	/* Turn redistributor off */
+	plat_brcm_gic_redistif_off();
+
+	/* If Cluster is to be turned off, disable coherency */
+	if (CLUSTER_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF)
+		ccn_exit_snoop_dvm_domain(1 << cluster_id);
+
+	brcm_power_down_common();
+
+	brcm_scp_off(target_state);
+}
+
+/*******************************************************************************
+ * Handler called when the CPU power domain is about to enter standby.
+ ******************************************************************************/
+static void brcm_cpu_standby(plat_local_state_t cpu_state)
+{
+	unsigned int scr;
+
+	assert(cpu_state == PLAT_LOCAL_STATE_RET);
+
+	scr = read_scr_el3();
+	/*
+	 * Enable the Non secure interrupt to wake the CPU.
+	 * In GICv3 affinity routing mode, the non secure group1 interrupts use
+	 * the PhysicalFIQ at EL3 whereas in GICv2, it uses the PhysicalIRQ.
+	 * Enabling both the bits works for both GICv2 mode and GICv3 affinity
+	 * routing mode.
+	 */
+	write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT);
+	isb();
+	dsb();
+	wfi();
+
+	/*
+	 * Restore SCR to the original value, synchronisation of scr_el3 is
+	 * done by eret while el3_exit to save some execution cycles.
+	 */
+	write_scr_el3(scr);
+}
+
+/*
+ * Helper function to shutdown the system via SCPI.
+ */
+static void __dead2 brcm_scp_sys_shutdown(void)
+{
+	/*
+	 * Disable GIC CPU interface to prevent pending interrupt
+	 * from waking up the AP from WFI.
+	 */
+	plat_brcm_gic_cpuif_disable();
+
+	/* Flush and invalidate data cache */
+	dcsw_op_all(DCCISW);
+
+	/* Bring Cluster out of coherency domain as its going to die */
+	plat_brcm_interconnect_exit_coherency();
+
+	brcm_power_down_common();
+
+	/* Send the power down request to the SCP */
+	scpi_sys_power_state(scpi_system_shutdown);
+
+	wfi();
+	ERROR("BRCM System Off: operation not handled.\n");
+	panic();
+}
+
+/*
+ * Helper function to reset the system
+ */
+static void __dead2 brcm_scp_sys_reset(unsigned int reset_type)
+{
+	/*
+	 * Disable GIC CPU interface to prevent pending interrupt
+	 * from waking up the AP from WFI.
+	 */
+	plat_brcm_gic_cpuif_disable();
+
+	/* Flush and invalidate data cache */
+	dcsw_op_all(DCCISW);
+
+	/* Bring Cluster out of coherency domain as its going to die */
+	plat_brcm_interconnect_exit_coherency();
+
+	brcm_power_down_common();
+
+	/* Send the system reset request to the SCP
+	 *
+	 * As per PSCI spec system power state could be
+	 * 0-> Shutdown
+	 * 1-> Reboot- Board level Reset
+	 * 2-> Reset - SoC level Reset
+	 *
+	 * Spec allocates 8 bits, 2 nibble, for this. One nibble is sufficient
+	 * for sending the state hence We are utilizing 2nd nibble for vendor
+	 * define reset type.
+	 */
+	scpi_sys_power_state((reset_type << VENDOR_RST_TYPE_SHIFT) |
+			     scpi_system_reboot);
+
+	wfi();
+	ERROR("BRCM System Reset: operation not handled.\n");
+	panic();
+}
+
+static void __dead2 brcm_system_reset(void)
+{
+	brcm_scp_sys_reset(SOFT_SYS_RESET_L1);
+}
+
+static int brcm_system_reset2(int is_vendor, int reset_type,
+		      u_register_t cookie)
+{
+	if (!is_vendor) {
+		/* Architectural warm boot: only warm reset is supported */
+		reset_type = SOFT_RESET_L3;
+	}
+	brcm_scp_sys_reset(reset_type);
+
+	/*
+	 * brcm_scp_sys_reset cannot return (it is a __dead function),
+	 * but brcm_system_reset2 has to return some value, even in
+	 * this case.
+	 */
+	return 0;
+}
+
+static int brcm_validate_ns_entrypoint(uintptr_t entrypoint)
+{
+	/*
+	 * Check if the non secure entrypoint lies within the non
+	 * secure DRAM.
+	 */
+	if ((entrypoint >= BRCM_NS_DRAM1_BASE) &&
+	    (entrypoint < (BRCM_NS_DRAM1_BASE + BRCM_NS_DRAM1_SIZE)))
+		return PSCI_E_SUCCESS;
+#ifndef AARCH32
+	if ((entrypoint >= BRCM_DRAM2_BASE) &&
+	    (entrypoint < (BRCM_DRAM2_BASE + BRCM_DRAM2_SIZE)))
+		return PSCI_E_SUCCESS;
+
+	if ((entrypoint >= BRCM_DRAM3_BASE) &&
+	    (entrypoint < (BRCM_DRAM3_BASE + BRCM_DRAM3_SIZE)))
+		return PSCI_E_SUCCESS;
+#endif
+
+	return PSCI_E_INVALID_ADDRESS;
+}
+
+/*******************************************************************************
+ * ARM standard platform handler called to check the validity of the power state
+ * parameter.
+ ******************************************************************************/
+static int brcm_validate_power_state(unsigned int power_state,
+			    psci_power_state_t *req_state)
+{
+	int pstate = psci_get_pstate_type(power_state);
+	int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
+	int i;
+
+	assert(req_state);
+
+	if (pwr_lvl > PLAT_MAX_PWR_LVL)
+		return PSCI_E_INVALID_PARAMS;
+
+	/* Sanity check the requested state */
+	if (pstate == PSTATE_TYPE_STANDBY) {
+		/*
+		 * It's possible to enter standby only on power level 0
+		 * Ignore any other power level.
+		 */
+		if (pwr_lvl != MPIDR_AFFLVL0)
+			return PSCI_E_INVALID_PARAMS;
+
+		req_state->pwr_domain_state[MPIDR_AFFLVL0] =
+					PLAT_LOCAL_STATE_RET;
+	} else {
+		for (i = MPIDR_AFFLVL0; i <= pwr_lvl; i++)
+			req_state->pwr_domain_state[i] =
+					PLAT_LOCAL_STATE_OFF;
+	}
+
+	/*
+	 * We expect the 'state id' to be zero.
+	 */
+	if (psci_get_pstate_id(power_state))
+		return PSCI_E_INVALID_PARAMS;
+
+	return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * Export the platform handlers via plat_brcm_psci_pm_ops. The ARM Standard
+ * platform will take care of registering the handlers with PSCI.
+ ******************************************************************************/
+plat_psci_ops_t plat_brcm_psci_pm_ops = {
+	.pwr_domain_on		= brcm_pwr_domain_on,
+	.pwr_domain_on_finish	= brcm_pwr_domain_on_finish,
+	.pwr_domain_off		= brcm_pwr_domain_off,
+	.cpu_standby		= brcm_cpu_standby,
+	.system_off		= brcm_scp_sys_shutdown,
+	.system_reset		= brcm_system_reset,
+	.system_reset2		= brcm_system_reset2,
+	.validate_ns_entrypoint = brcm_validate_ns_entrypoint,
+	.validate_power_state	= brcm_validate_power_state,
+};
+
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const struct plat_psci_ops **psci_ops)
+{
+	*psci_ops = &plat_brcm_psci_pm_ops;
+
+	/* Setup mailbox with entry point. */
+	mmio_write_64(CRMU_CFG_BASE + offsetof(M0CFG, core_cfg.rvbar),
+		      sec_entrypoint);
+
+	return 0;
+}
diff --git a/plat/brcm/board/stingray/src/ihost_pm.c b/plat/brcm/board/stingray/src/ihost_pm.c
new file mode 100644
index 0000000..9141d3e
--- /dev/null
+++ b/plat/brcm/board/stingray/src/ihost_pm.c
@@ -0,0 +1,355 @@
+/*
+ * Copyright (c) 2016 - 2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+
+#include <dmu.h>
+#include <ihost_pm.h>
+#include <platform_def.h>
+
+#define CDRU_CCN_REGISTER_CONTROL_1__D2XS_PD_IHOST1			2
+#define CDRU_CCN_REGISTER_CONTROL_1__D2XS_PD_IHOST2			1
+#define CDRU_CCN_REGISTER_CONTROL_1__D2XS_PD_IHOST3			0
+#define CDRU_MISC_RESET_CONTROL__CDRU_IH1_RESET				9
+#define CDRU_MISC_RESET_CONTROL__CDRU_IH2_RESET				8
+#define CDRU_MISC_RESET_CONTROL__CDRU_IH3_RESET				7
+#define A72_CRM_SOFTRESETN_0						0x480
+#define A72_CRM_SOFTRESETN_1						0x484
+#define A72_CRM_DOMAIN_4_CONTROL					0x810
+#define A72_CRM_DOMAIN_4_CONTROL__DOMAIN_4_ISO_DFT			3
+#define A72_CRM_DOMAIN_4_CONTROL__DOMAIN_4_ISO_MEM			6
+#define A72_CRM_DOMAIN_4_CONTROL__DOMAIN_4_ISO_I_O			0
+#define A72_CRM_SUBSYSTEM_MEMORY_CONTROL_3				0xB4C
+#define MEMORY_PDA_HI_SHIFT						0x0
+#define A72_CRM_PLL_PWR_ON						0x70
+#define A72_CRM_PLL_PWR_ON__PLL0_ISO_PLLOUT				4
+#define A72_CRM_PLL_PWR_ON__PLL0_PWRON_LDO				1
+#define A72_CRM_PLL_PWR_ON__PLL0_PWRON_PLL				0
+#define A72_CRM_SUBSYSTEM_MEMORY_CONTROL_2				0xB48
+#define A72_CRM_PLL_INTERRUPT_STATUS					0x8c
+#define A72_CRM_PLL_INTERRUPT_STATUS__PLL0_LOCK_LOST_STATUS		8
+#define A72_CRM_PLL_INTERRUPT_STATUS__PLL0_LOCK_STATUS			9
+#define A72_CRM_INTERRUPT_ENABLE					0x4
+#define A72_CRM_INTERRUPT_ENABLE__PLL0_INT_ENABLE			4
+#define A72_CRM_PLL_INTERRUPT_ENABLE					0x88
+#define A72_CRM_PLL_INTERRUPT_ENABLE__PLL0_LOCK_STATUS_INT_ENB		9
+#define A72_CRM_PLL_INTERRUPT_ENABLE__PLL0_LOCK_LOST_STATUS_INT_ENB	8
+#define A72_CRM_PLL0_CFG0_CTRL						0x120
+#define A72_CRM_PLL0_CFG1_CTRL						0x124
+#define A72_CRM_PLL0_CFG2_CTRL						0x128
+#define A72_CRM_PLL0_CFG3_CTRL						0x12C
+#define A72_CRM_CORE_CONFIG_DBGCTRL__DBGROMADDRV			0
+#define A72_CRM_CORE_CONFIG_DBGCTRL					0xD50
+#define A72_CRM_CORE_CONFIG_DBGROM_LO					0xD54
+#define A72_CRM_CORE_CONFIG_DBGROM_HI					0xD58
+#define A72_CRM_SUBSYSTEM_CONFIG_1__DBGL1RSTDISABLE			2
+#define A72_CRM_SOFTRESETN_0__CRYSTAL26_SOFTRESETN			0
+#define A72_CRM_SOFTRESETN_0__CRM_PLL0_SOFTRESETN			1
+#define A72_CRM_AXI_CLK_DESC						0x304
+#define A72_CRM_ACP_CLK_DESC						0x308
+#define A72_CRM_ATB_CLK_DESC						0x30C
+#define A72_CRM_PCLKDBG_DESC						0x310
+#define A72_CRM_CLOCK_MODE_CONTROL					0x40
+#define A72_CRM_CLOCK_MODE_CONTROL__CLK_CHANGE_TRIGGER			0
+#define A72_CRM_CLOCK_CONTROL_0						0x200
+#define A72_CRM_CLOCK_CONTROL_0__ARM_HW_SW_ENABLE_SEL			0
+#define A72_CRM_CLOCK_CONTROL_0__AXI_HW_SW_ENABLE_SEL			2
+#define A72_CRM_CLOCK_CONTROL_0__ACP_HW_SW_ENABLE_SEL			4
+#define A72_CRM_CLOCK_CONTROL_0__ATB_HW_SW_ENABLE_SEL			6
+#define A72_CRM_CLOCK_CONTROL_0__PCLKDBG_HW_SW_ENA_SEL			8
+#define A72_CRM_CLOCK_CONTROL_1						0x204
+#define A72_CRM_CLOCK_CONTROL_1__TMON_HW_SW_ENABLE_SEL			6
+#define A72_CRM_CLOCK_CONTROL_1__APB_HW_SW_ENABLE_SEL			8
+#define A72_CRM_SOFTRESETN_0__CRYSTAL26_SOFTRESETN			0
+#define A72_CRM_SOFTRESETN_0__CRM_PLL0_SOFTRESETN			1
+#define A72_CRM_SOFTRESETN_0__AXI_SOFTRESETN				9
+#define A72_CRM_SOFTRESETN_0__ACP_SOFTRESETN				10
+#define A72_CRM_SOFTRESETN_0__ATB_SOFTRESETN				11
+#define A72_CRM_SOFTRESETN_0__PCLKDBG_SOFTRESETN			12
+#define A72_CRM_SOFTRESETN_0__TMON_SOFTRESETN				15
+#define A72_CRM_SOFTRESETN_0__L2_SOFTRESETN				3
+#define A72_CRM_SOFTRESETN_1__APB_SOFTRESETN				8
+
+/* core related regs */
+#define A72_CRM_DOMAIN_0_CONTROL					0x800
+#define A72_CRM_DOMAIN_0_CONTROL__DOMAIN_0_ISO_MEM			0x6
+#define A72_CRM_DOMAIN_0_CONTROL__DOMAIN_0_ISO_I_O			0x0
+#define A72_CRM_DOMAIN_1_CONTROL					0x804
+#define A72_CRM_DOMAIN_1_CONTROL__DOMAIN_1_ISO_MEM			0x6
+#define A72_CRM_DOMAIN_1_CONTROL__DOMAIN_1_ISO_I_O			0x0
+#define A72_CRM_CORE_CONFIG_RVBA0_LO					0xD10
+#define A72_CRM_CORE_CONFIG_RVBA0_MID					0xD14
+#define A72_CRM_CORE_CONFIG_RVBA0_HI					0xD18
+#define A72_CRM_CORE_CONFIG_RVBA1_LO					0xD20
+#define A72_CRM_CORE_CONFIG_RVBA1_MID					0xD24
+#define A72_CRM_CORE_CONFIG_RVBA1_HI					0xD28
+#define A72_CRM_SUBSYSTEM_CONFIG_0					0xC80
+#define A72_CRM_SUBSYSTEM_CONFIG_0__DBGPWRDUP_CFG_SHIFT			4
+#define A72_CRM_SOFTRESETN_0__COREPOR0_SOFTRESETN			4
+#define A72_CRM_SOFTRESETN_0__COREPOR1_SOFTRESETN			5
+#define A72_CRM_SOFTRESETN_1__CORE0_SOFTRESETN				0
+#define A72_CRM_SOFTRESETN_1__DEBUG0_SOFTRESETN				4
+#define A72_CRM_SOFTRESETN_1__CORE1_SOFTRESETN				1
+#define A72_CRM_SOFTRESETN_1__DEBUG1_SOFTRESETN				5
+
+#define SPROC_MEMORY_BISR 0
+
+static int cluster_power_status[PLAT_BRCM_CLUSTER_COUNT] = {CLUSTER_POWER_ON,
+							   CLUSTER_POWER_OFF,
+							   CLUSTER_POWER_OFF,
+							   CLUSTER_POWER_OFF};
+
+void ihost_power_on_cluster(u_register_t mpidr)
+{
+	uint32_t rst, d2xs;
+	uint32_t cluster_id;
+	uint32_t ihost_base;
+#if SPROC_MEMORY_BISR
+	uint32_t bisr, cnt;
+#endif
+	cluster_id = MPIDR_AFFLVL1_VAL(mpidr);
+	uint32_t cluster0_freq_sel;
+
+	if (cluster_power_status[cluster_id] == CLUSTER_POWER_ON)
+		return;
+
+	cluster_power_status[cluster_id] = CLUSTER_POWER_ON;
+	INFO("enabling Cluster #%u\n", cluster_id);
+
+	switch (cluster_id) {
+	case 1:
+		rst = (1 << CDRU_MISC_RESET_CONTROL__CDRU_IH1_RESET);
+		d2xs = (1 << CDRU_CCN_REGISTER_CONTROL_1__D2XS_PD_IHOST1);
+#if SPROC_MEMORY_BISR
+		bisr = CRMU_BISR_PDG_MASK__CRMU_BISR_IHOST1;
+#endif
+		break;
+	case 2:
+		rst = (1 << CDRU_MISC_RESET_CONTROL__CDRU_IH2_RESET);
+		d2xs = (1 << CDRU_CCN_REGISTER_CONTROL_1__D2XS_PD_IHOST2);
+#if SPROC_MEMORY_BISR
+		bisr = CRMU_BISR_PDG_MASK__CRMU_BISR_IHOST2;
+#endif
+		break;
+	case 3:
+		rst = (1 << CDRU_MISC_RESET_CONTROL__CDRU_IH3_RESET);
+		d2xs = (1 << CDRU_CCN_REGISTER_CONTROL_1__D2XS_PD_IHOST3);
+#if SPROC_MEMORY_BISR
+		bisr = CRMU_BISR_PDG_MASK__CRMU_BISR_IHOST3;
+#endif
+		break;
+	default:
+		ERROR("Invalid cluster :%u\n", cluster_id);
+		return;
+	}
+
+	/* Releasing ihost resets */
+	mmio_setbits_32(CDRU_MISC_RESET_CONTROL, rst);
+
+	/* calculate cluster/ihost base address */
+	ihost_base = IHOST0_BASE + cluster_id * IHOST_ADDR_SPACE;
+
+	/* Remove Cluster IO isolation */
+	mmio_clrsetbits_32(ihost_base + A72_CRM_DOMAIN_4_CONTROL,
+		       (1 << A72_CRM_DOMAIN_4_CONTROL__DOMAIN_4_ISO_I_O),
+		       (1 << A72_CRM_DOMAIN_4_CONTROL__DOMAIN_4_ISO_DFT) |
+		       (1 << A72_CRM_DOMAIN_4_CONTROL__DOMAIN_4_ISO_MEM));
+
+	/*
+	 * Since BISR sequence requires that all cores of cluster should
+	 * have removed I/O isolation hence doing same here.
+	 */
+	/* Remove core0 memory IO isolations */
+	mmio_clrsetbits_32(ihost_base + A72_CRM_DOMAIN_0_CONTROL,
+			  (1 << A72_CRM_DOMAIN_0_CONTROL__DOMAIN_0_ISO_I_O),
+			  (1 << A72_CRM_DOMAIN_0_CONTROL__DOMAIN_0_ISO_MEM));
+
+	/* Remove core1 memory IO isolations */
+	mmio_clrsetbits_32(ihost_base + A72_CRM_DOMAIN_1_CONTROL,
+			  (1 << A72_CRM_DOMAIN_1_CONTROL__DOMAIN_1_ISO_I_O),
+			  (1 << A72_CRM_DOMAIN_1_CONTROL__DOMAIN_1_ISO_MEM));
+
+#if SPROC_MEMORY_BISR
+	mmio_setbits_32(CRMU_BISR_PDG_MASK, (1 << bisr));
+
+	if (!(mmio_read_32(CDRU_CHIP_STRAP_DATA_LSW) &
+		       (1 << CDRU_CHIP_STRAP_DATA_LSW__BISR_BYPASS_MODE))) {
+		/* BISR completion would take max 2 usec */
+		cnt = 0;
+		while (cnt < 2) {
+			udelay(1);
+			if (mmio_read_32(CRMU_CHIP_OTPC_STATUS) &
+			(1 << CRMU_CHIP_OTPC_STATUS__OTP_BISR_LOAD_DONE))
+				break;
+			cnt++;
+		}
+	}
+
+	/* if BISR is not completed, need to be checked with ASIC team */
+	if (((mmio_read_32(CRMU_CHIP_OTPC_STATUS)) &
+	   (1 << CRMU_CHIP_OTPC_STATUS__OTP_BISR_LOAD_DONE)) == 0) {
+		WARN("BISR did not completed and need to be addressed\n");
+	}
+#endif
+
+	/* PLL Power up. supply is already on. Turn on PLL LDO/PWR */
+	mmio_write_32(ihost_base + A72_CRM_PLL_PWR_ON,
+		     (1 << A72_CRM_PLL_PWR_ON__PLL0_ISO_PLLOUT) |
+		     (1 << A72_CRM_PLL_PWR_ON__PLL0_PWRON_LDO) |
+		     (1 << A72_CRM_PLL_PWR_ON__PLL0_PWRON_PLL));
+
+	/* 1us in spec; Doubling it to be safe*/
+	udelay(2);
+
+	/* Remove PLL output ISO */
+	mmio_write_32(ihost_base + A72_CRM_PLL_PWR_ON,
+		     (1 << A72_CRM_PLL_PWR_ON__PLL0_PWRON_LDO) |
+		     (1 << A72_CRM_PLL_PWR_ON__PLL0_PWRON_PLL));
+
+	/*
+	 * PLL0 Configuration Control Register
+	 * these 4 registers drive the i_pll_ctrl[63:0] input of pll
+	 * (16b per register).
+	 * the values are derived from the spec (sections 8 and 10).
+	 */
+
+	mmio_write_32(ihost_base + A72_CRM_PLL0_CFG0_CTRL, 0x00000000);
+	mmio_write_32(ihost_base + A72_CRM_PLL0_CFG1_CTRL, 0x00008400);
+	mmio_write_32(ihost_base + A72_CRM_PLL0_CFG2_CTRL, 0x00000001);
+	mmio_write_32(ihost_base + A72_CRM_PLL0_CFG3_CTRL, 0x00000000);
+
+	/* Read the freq_sel from cluster 0, which is up already */
+	cluster0_freq_sel = bcm_get_ihost_pll_freq(0);
+	bcm_set_ihost_pll_freq(cluster_id, cluster0_freq_sel);
+
+	udelay(1);
+
+	/* Release clock source reset */
+	mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_0,
+		       (1 << A72_CRM_SOFTRESETN_0__CRYSTAL26_SOFTRESETN) |
+		       (1 << A72_CRM_SOFTRESETN_0__CRM_PLL0_SOFTRESETN));
+
+	udelay(1);
+
+	/*
+	 * Integer division for clks (divider value = n+1).
+	 * These are the divisor of ARM PLL clock frequecy.
+	 */
+	mmio_write_32(ihost_base + A72_CRM_AXI_CLK_DESC, 0x00000001);
+	mmio_write_32(ihost_base + A72_CRM_ACP_CLK_DESC, 0x00000001);
+	mmio_write_32(ihost_base + A72_CRM_ATB_CLK_DESC, 0x00000004);
+	mmio_write_32(ihost_base + A72_CRM_PCLKDBG_DESC, 0x0000000b);
+
+	/*
+	 * clock change trigger - must set to take effect after clock
+	 * source change
+	 */
+	mmio_setbits_32(ihost_base + A72_CRM_CLOCK_MODE_CONTROL,
+		       (1 << A72_CRM_CLOCK_MODE_CONTROL__CLK_CHANGE_TRIGGER));
+
+	/* turn on functional clocks */
+	mmio_setbits_32(ihost_base + A72_CRM_CLOCK_CONTROL_0,
+		       (3 << A72_CRM_CLOCK_CONTROL_0__ARM_HW_SW_ENABLE_SEL) |
+		       (3 << A72_CRM_CLOCK_CONTROL_0__AXI_HW_SW_ENABLE_SEL) |
+		       (3 << A72_CRM_CLOCK_CONTROL_0__ACP_HW_SW_ENABLE_SEL) |
+		       (3 << A72_CRM_CLOCK_CONTROL_0__ATB_HW_SW_ENABLE_SEL) |
+		       (3 << A72_CRM_CLOCK_CONTROL_0__PCLKDBG_HW_SW_ENA_SEL));
+
+	mmio_setbits_32(ihost_base + A72_CRM_CLOCK_CONTROL_1,
+		       (3 << A72_CRM_CLOCK_CONTROL_1__TMON_HW_SW_ENABLE_SEL) |
+		       (3 << A72_CRM_CLOCK_CONTROL_1__APB_HW_SW_ENABLE_SEL));
+
+	/* Program D2XS Power Down Registers */
+	mmio_setbits_32(CDRU_CCN_REGISTER_CONTROL_1, d2xs);
+
+	/* Program Core Config Debug ROM Address Registers */
+	/* mark valid for Debug ROM base address */
+	mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_DBGCTRL,
+		     (1 << A72_CRM_CORE_CONFIG_DBGCTRL__DBGROMADDRV));
+
+	/* Program Lo and HI address of coresight DBG rom address */
+	mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_DBGROM_LO,
+		     (CORESIGHT_BASE_ADDR >> 12) & 0xffff);
+	mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_DBGROM_HI,
+		     (CORESIGHT_BASE_ADDR >> 28) & 0xffff);
+
+	/*
+	 * Release soft resets of different components.
+	 * Order: Bus clocks --> PERIPH --> L2 --> cores
+	 */
+
+	/* Bus clocks soft resets */
+	mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_0,
+		       (1 << A72_CRM_SOFTRESETN_0__CRYSTAL26_SOFTRESETN) |
+		       (1 << A72_CRM_SOFTRESETN_0__CRM_PLL0_SOFTRESETN) |
+		       (1 << A72_CRM_SOFTRESETN_0__AXI_SOFTRESETN) |
+		       (1 << A72_CRM_SOFTRESETN_0__ACP_SOFTRESETN) |
+		       (1 << A72_CRM_SOFTRESETN_0__ATB_SOFTRESETN) |
+		       (1 << A72_CRM_SOFTRESETN_0__PCLKDBG_SOFTRESETN));
+
+	mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_1,
+		       (1 << A72_CRM_SOFTRESETN_1__APB_SOFTRESETN));
+
+	/* Periph component softreset */
+	mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_0,
+		       (1 << A72_CRM_SOFTRESETN_0__TMON_SOFTRESETN));
+
+	/* L2 softreset */
+	mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_0,
+		       (1 << A72_CRM_SOFTRESETN_0__L2_SOFTRESETN));
+
+	/* Enable and program Satellite timer */
+	ihost_enable_satellite_timer(cluster_id);
+}
+
+void ihost_power_on_secondary_core(u_register_t mpidr, uint64_t rvbar)
+{
+	uint32_t ihost_base;
+	uint32_t coreid = MPIDR_AFFLVL0_VAL(mpidr);
+	uint32_t cluster_id = MPIDR_AFFLVL1_VAL(mpidr);
+
+	ihost_base = IHOST0_BASE + cluster_id * IHOST_ADDR_SPACE;
+	INFO("programming core #%u\n", coreid);
+
+	if (coreid) {
+		/* program the entry point for core1 */
+		mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_RVBA1_LO,
+			      rvbar & 0xFFFF);
+		mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_RVBA1_MID,
+			     (rvbar >> 16) & 0xFFFF);
+		mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_RVBA1_HI,
+			     (rvbar >> 32) & 0xFFFF);
+	} else {
+		/* program the entry point for core */
+		mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_RVBA0_LO,
+			      rvbar & 0xFFFF);
+		mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_RVBA0_MID,
+			     (rvbar >> 16) & 0xFFFF);
+		mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_RVBA0_HI,
+			     (rvbar >> 32) & 0xFFFF);
+	}
+
+	/* Tell debug logic which processor is up */
+	mmio_setbits_32(ihost_base + A72_CRM_SUBSYSTEM_CONFIG_0,
+		       (coreid ?
+		       (2 << A72_CRM_SUBSYSTEM_CONFIG_0__DBGPWRDUP_CFG_SHIFT) :
+		       (1 << A72_CRM_SUBSYSTEM_CONFIG_0__DBGPWRDUP_CFG_SHIFT)));
+
+	/* releasing soft resets for IHOST core */
+	mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_0,
+		       (coreid ?
+		       (1 << A72_CRM_SOFTRESETN_0__COREPOR1_SOFTRESETN) :
+		       (1 << A72_CRM_SOFTRESETN_0__COREPOR0_SOFTRESETN)));
+
+	mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_1,
+		       (coreid ?
+		       ((1 << A72_CRM_SOFTRESETN_1__CORE1_SOFTRESETN) |
+		       (1 << A72_CRM_SOFTRESETN_1__DEBUG1_SOFTRESETN)) :
+		       ((1 << A72_CRM_SOFTRESETN_1__CORE0_SOFTRESETN) |
+		       (1 << A72_CRM_SOFTRESETN_1__DEBUG0_SOFTRESETN))));
+}
diff --git a/plat/brcm/board/stingray/src/pm.c b/plat/brcm/board/stingray/src/pm.c
new file mode 100644
index 0000000..a5ac2e7
--- /dev/null
+++ b/plat/brcm/board/stingray/src/pm.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2015 - 2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/ccn.h>
+#include <drivers/delay_timer.h>
+#include <lib/bakery_lock.h>
+#include <lib/mmio.h>
+#include <lib/psci/psci.h>
+#include <lib/spinlock.h>
+#include <plat/common/platform.h>
+
+#ifdef USE_PAXC
+#include <chimp.h>
+#endif
+#include <cmn_plat_util.h>
+#include <ihost_pm.h>
+#include <plat_brcm.h>
+#include <platform_def.h>
+
+static uint64_t plat_sec_entrypoint;
+
+/*******************************************************************************
+ * SR handler called when a power domain is about to be turned on. The
+ * mpidr determines the CPU to be turned on.
+ ******************************************************************************/
+static int brcm_pwr_domain_on(u_register_t mpidr)
+{
+	int cpuid;
+
+	cpuid = plat_brcm_calc_core_pos(mpidr);
+	INFO("mpidr :%lu, cpuid:%d\n", mpidr, cpuid);
+
+#ifdef USE_SINGLE_CLUSTER
+	if (cpuid > 1)
+		return PSCI_E_INTERN_FAIL;
+#endif
+
+	ihost_power_on_cluster(mpidr);
+
+	ihost_power_on_secondary_core(mpidr, plat_sec_entrypoint);
+
+	return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * SR handler called when a power domain has just been powered on after
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from.
+ ******************************************************************************/
+static void brcm_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	unsigned long cluster_id = MPIDR_AFFLVL1_VAL(read_mpidr());
+
+	assert(target_state->pwr_domain_state[MPIDR_AFFLVL0] ==
+					PLAT_LOCAL_STATE_OFF);
+
+	if (target_state->pwr_domain_state[MPIDR_AFFLVL1] ==
+					PLAT_LOCAL_STATE_OFF) {
+		INFO("Cluster #%lu entering to snoop/dvm domain\n", cluster_id);
+		ccn_enter_snoop_dvm_domain(1 << cluster_id);
+	}
+
+	/* Enable the gic cpu interface */
+	plat_brcm_gic_pcpu_init();
+
+	/* Program the gic per-cpu distributor or re-distributor interface */
+	plat_brcm_gic_cpuif_enable();
+
+	INFO("Gic Initialization done for this affinity instance\n");
+}
+
+static void __dead2 brcm_system_reset(void)
+{
+	uint32_t reset_type = SOFT_SYS_RESET_L1;
+
+#ifdef USE_PAXC
+	if (bcm_chimp_is_nic_mode())
+		reset_type = SOFT_RESET_L3;
+#endif
+	INFO("System rebooting - L%d...\n", reset_type);
+
+	plat_soft_reset(reset_type);
+
+	/* Prevent the function to return due to the attribute */
+	while (1)
+		;
+}
+
+static int brcm_system_reset2(int is_vendor, int reset_type,
+			      u_register_t cookie)
+{
+	INFO("System rebooting - L%d...\n", reset_type);
+
+	plat_soft_reset(reset_type);
+
+	/*
+	 * plat_soft_reset cannot return (it is a __dead function),
+	 * but brcm_system_reset2 has to return some value, even in
+	 * this case.
+	 */
+	return 0;
+}
+
+/*******************************************************************************
+ * Export the platform handlers via plat_brcm_psci_pm_ops. The ARM Standard
+ * platform will take care of registering the handlers with PSCI.
+ ******************************************************************************/
+const plat_psci_ops_t plat_brcm_psci_pm_ops = {
+	.pwr_domain_on		= brcm_pwr_domain_on,
+	.pwr_domain_on_finish	= brcm_pwr_domain_on_finish,
+	.system_reset		= brcm_system_reset,
+	.system_reset2		= brcm_system_reset2
+};
+
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const plat_psci_ops_t **psci_ops)
+{
+	*psci_ops = &plat_brcm_psci_pm_ops;
+	plat_sec_entrypoint = sec_entrypoint;
+
+	return 0;
+}
diff --git a/plat/brcm/board/stingray/src/topology.c b/plat/brcm/board/stingray/src/topology.c
new file mode 100644
index 0000000..24718e5
--- /dev/null
+++ b/plat/brcm/board/stingray/src/topology.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2019-2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <stdint.h>
+
+#include <plat_brcm.h>
+#include <platform_def.h>
+
+/*
+ * On Stingray, the system power level is the highest power level.
+ * The first entry in the power domain descriptor specifies the
+ * number of system power domains i.e. 1.
+ */
+#define SR_PWR_DOMAINS_AT_MAX_PWR_LVL	 1
+
+/*
+ * The Stingray power domain tree descriptor. The cluster power domains
+ * are arranged so that when the PSCI generic code creates the power
+ * domain tree, the indices of the CPU power domain nodes it allocates
+ * match the linear indices returned by plat_core_pos_by_mpidr()
+ * i.e. CLUSTER0 CPUs are allocated indices from 0 to 1 and the higher
+ * indices for other Cluster CPUs.
+ */
+const unsigned char sr_power_domain_tree_desc[] = {
+	/* No of root nodes */
+	SR_PWR_DOMAINS_AT_MAX_PWR_LVL,
+	/* No of children for the root node */
+	BRCM_CLUSTER_COUNT,
+	/* No of children for the first cluster node */
+	PLATFORM_CLUSTER0_CORE_COUNT,
+	/* No of children for the second cluster node */
+	PLATFORM_CLUSTER1_CORE_COUNT,
+	/* No of children for the third cluster node */
+	PLATFORM_CLUSTER2_CORE_COUNT,
+	/* No of children for the fourth cluster node */
+	PLATFORM_CLUSTER3_CORE_COUNT,
+};
+
+/*******************************************************************************
+ * This function returns the Stingray topology tree information.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+	return sr_power_domain_tree_desc;
+}
+
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+	return plat_brcm_calc_core_pos(mpidr);
+}
diff --git a/plat/brcm/board/stingray/src/tz_sec.c b/plat/brcm/board/stingray/src/tz_sec.c
new file mode 100644
index 0000000..07b12a7
--- /dev/null
+++ b/plat/brcm/board/stingray/src/tz_sec.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2016 - 2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/debug.h>
+#include <drivers/arm/tzc400.h>
+#include <lib/mmio.h>
+
+#include <cmn_sec.h>
+#include <platform_def.h>
+
+/*
+ * Trust Zone controllers
+ */
+#define TZC400_FS_SRAM_ROOT	0x66d84000
+
+/*
+ * TZPC Master configure registers
+ */
+
+/* TZPC_TZPCDECPROT0set */
+#define TZPC0_MASTER_NS_BASE		0x68b40804
+#define TZPC0_SATA3_BIT			5
+#define TZPC0_SATA2_BIT			4
+#define TZPC0_SATA1_BIT			3
+#define TZPC0_SATA0_BIT			2
+#define TZPC0_USB3H1_BIT		1
+#define TZPC0_USB3H0_BIT		0
+#define TZPC0_MASTER_SEC_DEFAULT	0
+
+/* TZPC_TZPCDECPROT1set */
+#define TZPC1_MASTER_NS_BASE		0x68b40810
+#define TZPC1_SDIO1_BIT			6
+#define TZPC1_SDIO0_BIT			5
+#define TZPC1_AUDIO0_BIT		4
+#define TZPC1_USB2D_BIT			3
+#define TZPC1_USB2H1_BIT		2
+#define TZPC1_USB2H0_BIT		1
+#define TZPC1_AMAC0_BIT			0
+#define TZPC1_MASTER_SEC_DEFAULT	0
+
+
+struct tz_sec_desc {
+	uintptr_t addr;
+	uint32_t val;
+};
+
+static const struct tz_sec_desc tz_master_defaults[] = {
+{ TZPC0_MASTER_NS_BASE, TZPC0_MASTER_SEC_DEFAULT },
+{ TZPC1_MASTER_NS_BASE, TZPC1_MASTER_SEC_DEFAULT }
+};
+
+/*
+ * Initialize the TrustZone Controller for SRAM partitioning.
+ */
+static void bcm_tzc_setup(void)
+{
+	VERBOSE("Configuring SRAM TrustZone Controller\n");
+
+	/* Init the TZASC controller */
+	tzc400_init(TZC400_FS_SRAM_ROOT);
+
+	/*
+	 * Close the entire SRAM space
+	 * Region 0 covers the entire SRAM space
+	 * None of the NS device can access it.
+	 */
+	tzc400_configure_region0(TZC_REGION_S_RDWR, 0);
+
+	/* Do raise an exception if a NS device tries to access secure memory */
+	tzc400_set_action(TZC_ACTION_ERR);
+}
+
+/*
+ * Configure TZ Master as NS_MASTER or SECURE_MASTER
+ * To set a Master to non-secure, use *_SET registers
+ * To set a Master to secure, use *_CLR registers (set + 0x4 address)
+ */
+static void tz_master_set(uint32_t base, uint32_t value, uint32_t ns)
+{
+	if (ns == SECURE_MASTER) {
+		mmio_write_32(base + 4, value);
+	} else {
+		mmio_write_32(base, value);
+	}
+}
+
+/*
+ * Initialize the secure environment for sdio.
+ */
+void plat_tz_sdio_ns_master_set(uint32_t ns)
+{
+	tz_master_set(TZPC1_MASTER_NS_BASE,
+			1 << TZPC1_SDIO0_BIT,
+			ns);
+}
+
+/*
+ * Initialize the secure environment for usb.
+ */
+void plat_tz_usb_ns_master_set(uint32_t ns)
+{
+	tz_master_set(TZPC1_MASTER_NS_BASE,
+			1 << TZPC1_USB2H0_BIT,
+			ns);
+}
+
+/*
+ * Set masters to default configuration.
+ *
+ * DMA security settings are programmed into the PL-330 controller and
+ * are not set by iProc TZPC registers.
+ * DMA always comes up as secure master (*NS bit is 0).
+ *
+ * Because the default reset values of TZPC are 0 (== Secure),
+ * ARM Verilog code makes all masters, including PCIe, come up as
+ * secure.
+ * However, SOTP has a bit called SOTP_ALLMASTER_NS that overrides
+ * TZPC and makes all masters non-secure for AB devices.
+ *
+ * Hence we first set all the TZPC bits to program all masters,
+ * including PCIe, as non-secure, then set the CLEAR_ALLMASTER_NS bit
+ * so that the SOTP_ALLMASTER_NS cannot override TZPC.
+ * now security settings for each masters come from TZPC
+ * (which makes all masters other than DMA as non-secure).
+ *
+ * During the boot, all masters other than DMA Ctrlr + list
+ * are non-secure in an AB Prod/AB Dev/AB Pending device.
+ *
+ */
+void plat_tz_master_default_cfg(void)
+{
+	int i;
+
+	/* Configure default secure and non-secure TZ Masters */
+	for (i = 0; i < ARRAY_SIZE(tz_master_defaults); i++) {
+		tz_master_set(tz_master_defaults[i].addr,
+			      tz_master_defaults[i].val,
+			      SECURE_MASTER);
+		tz_master_set(tz_master_defaults[i].addr,
+			      ~tz_master_defaults[i].val,
+			      NS_MASTER);
+	}
+
+	/* Clear all master NS */
+	mmio_setbits_32(SOTP_CHIP_CTRL,
+			1 << SOTP_CLEAR_SYSCTRL_ALL_MASTER_NS);
+
+	/* Initialize TZ controller and Set SRAM to secure */
+	bcm_tzc_setup();
+}
diff --git a/plat/brcm/common/brcm_bl31_setup.c b/plat/brcm/common/brcm_bl31_setup.c
new file mode 100644
index 0000000..d3fa83d
--- /dev/null
+++ b/plat/brcm/common/brcm_bl31_setup.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <drivers/arm/sp804_delay_timer.h>
+#include <lib/utils.h>
+#include <plat/common/platform.h>
+
+#include <bcm_console.h>
+#include <plat_brcm.h>
+#include <platform_def.h>
+
+#ifdef BL33_SHARED_DDR_BASE
+struct bl33_info *bl33_info = (struct bl33_info *)BL33_SHARED_DDR_BASE;
+#endif
+
+/*
+ * Placeholder variables for copying the arguments that have been passed to
+ * BL31 from BL2.
+ */
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+/* Weak definitions may be overridden in specific BRCM platform */
+#pragma weak plat_bcm_bl31_early_platform_setup
+#pragma weak plat_brcm_pwrc_setup
+#pragma weak plat_brcm_security_setup
+
+void plat_brcm_security_setup(void)
+{
+
+}
+
+void plat_brcm_pwrc_setup(void)
+{
+
+}
+
+void plat_bcm_bl31_early_platform_setup(void *from_bl2,
+				   bl_params_t *plat_params_from_bl2)
+{
+
+}
+
+/*******************************************************************************
+ * Return a pointer to the 'entry_point_info' structure of the next image for
+ * the security state specified. BL33 corresponds to the non-secure image type
+ * while BL32 corresponds to the secure image type. A NULL pointer is returned
+ * if the image does not exist.
+ ******************************************************************************/
+struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+	entry_point_info_t *next_image_info;
+
+	assert(sec_state_is_valid(type));
+	next_image_info = (type == NON_SECURE)
+			? &bl33_image_ep_info : &bl32_image_ep_info;
+	/*
+	 * None of the images on the ARM development platforms can have 0x0
+	 * as the entrypoint
+	 */
+	if (next_image_info->pc)
+		return next_image_info;
+	else
+		return NULL;
+}
+
+/*******************************************************************************
+ * Perform any BL31 early platform setup common to ARM standard platforms.
+ * Here is an opportunity to copy parameters passed by the calling EL (S-EL1
+ * in BL2 & EL3 in BL1) before they are lost (potentially). This needs to be
+ * done before the MMU is initialized so that the memory layout can be used
+ * while creating page tables. BL2 has flushed this information to memory, so
+ * we are guaranteed to pick up good data.
+ ******************************************************************************/
+void __init brcm_bl31_early_platform_setup(void *from_bl2,
+					  uintptr_t soc_fw_config,
+					  uintptr_t hw_config,
+					  void *plat_params_from_bl2)
+{
+	/* Initialize the console to provide early debug support */
+	bcm_console_boot_init();
+
+	/* Initialize delay timer driver using SP804 dual timer 0 */
+	sp804_timer_init(SP804_TIMER0_BASE,
+			 SP804_TIMER0_CLKMULT, SP804_TIMER0_CLKDIV);
+
+#if RESET_TO_BL31
+	/* There are no parameters from BL2 if BL31 is a reset vector */
+	assert(from_bl2 == NULL);
+	assert(plat_params_from_bl2 == NULL);
+
+# ifdef BL32_BASE
+	/* Populate entry point information for BL32 */
+	SET_PARAM_HEAD(&bl32_image_ep_info,
+		       PARAM_EP,
+		       VERSION_1,
+		       0);
+	SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
+	bl32_image_ep_info.pc = BL32_BASE;
+	bl32_image_ep_info.spsr = brcm_get_spsr_for_bl32_entry();
+# endif /* BL32_BASE */
+
+	/* Populate entry point information for BL33 */
+	SET_PARAM_HEAD(&bl33_image_ep_info,
+		       PARAM_EP,
+		       VERSION_1,
+		       0);
+	/*
+	 * Tell BL31 where the non-trusted software image
+	 * is located and the entry state information
+	 */
+	bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
+
+	bl33_image_ep_info.spsr = brcm_get_spsr_for_bl33_entry();
+	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+
+# if ARM_LINUX_KERNEL_AS_BL33
+	/*
+	 * According to the file ``Documentation/arm64/booting.txt`` of the
+	 * Linux kernel tree, Linux expects the physical address of the device
+	 * tree blob (DTB) in x0, while x1-x3 are reserved for future use and
+	 * must be 0.
+	 */
+	bl33_image_ep_info.args.arg0 = (u_register_t)PRELOADED_DTB_BASE;
+	bl33_image_ep_info.args.arg1 = 0U;
+	bl33_image_ep_info.args.arg2 = 0U;
+	bl33_image_ep_info.args.arg3 = 0U;
+# endif
+
+#else /* RESET_TO_BL31 */
+
+	/*
+	 * In debug builds, we pass a special value in 'plat_params_from_bl2'
+	 * to verify platform parameters from BL2 to BL31.
+	 * In release builds, it's not used.
+	 */
+	assert(((unsigned long long)plat_params_from_bl2) ==
+		BRCM_BL31_PLAT_PARAM_VAL);
+
+	/*
+	 * Check params passed from BL2 should not be NULL
+	 */
+	bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
+
+	assert(params_from_bl2 != NULL);
+	assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
+	assert(params_from_bl2->h.version >= VERSION_2);
+
+	bl_params_node_t *bl_params = params_from_bl2->head;
+
+	/*
+	 * Copy BL33 and BL32 (if present), entry point information.
+	 * They are stored in Secure RAM, in BL2's address space.
+	 */
+	while (bl_params != NULL) {
+		if (bl_params->image_id == BL32_IMAGE_ID &&
+		    bl_params->image_info->h.attr != IMAGE_ATTRIB_SKIP_LOADING)
+			bl32_image_ep_info = *bl_params->ep_info;
+
+		if (bl_params->image_id == BL33_IMAGE_ID)
+			bl33_image_ep_info = *bl_params->ep_info;
+
+		bl_params = bl_params->next_params_info;
+	}
+
+	if (bl33_image_ep_info.pc == 0U)
+		panic();
+#endif /* RESET_TO_BL31 */
+
+#ifdef BL33_SHARED_DDR_BASE
+	/* Pass information to BL33 thorugh x0 */
+	bl33_image_ep_info.args.arg0 = (u_register_t)BL33_SHARED_DDR_BASE;
+	bl33_image_ep_info.args.arg1 = 0ULL;
+	bl33_image_ep_info.args.arg2 = 0ULL;
+	bl33_image_ep_info.args.arg3 = 0ULL;
+#endif
+}
+
+void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
+		u_register_t arg2, u_register_t arg3)
+{
+#ifdef BL31_LOG_LEVEL
+	SET_LOG_LEVEL(BL31_LOG_LEVEL);
+#endif
+
+	brcm_bl31_early_platform_setup((void *)arg0, arg1, arg2, (void *)arg3);
+
+	plat_bcm_bl31_early_platform_setup((void *)arg0, (void *)arg3);
+
+#ifdef DRIVER_CC_ENABLE
+	/*
+	 * Initialize Interconnect for this cluster during cold boot.
+	 * No need for locks as no other CPU is active.
+	 */
+	plat_brcm_interconnect_init();
+
+	/*
+	 * Enable Interconnect coherency for the primary CPU's cluster.
+	 * Earlier bootloader stages might already do this (e.g. Trusted
+	 * Firmware's BL1 does it) but we can't assume so. There is no harm in
+	 * executing this code twice anyway.
+	 * Platform specific PSCI code will enable coherency for other
+	 * clusters.
+	 */
+	plat_brcm_interconnect_enter_coherency();
+#endif
+}
+
+/*******************************************************************************
+ * Perform any BL31 platform setup common to ARM standard platforms
+ ******************************************************************************/
+void brcm_bl31_platform_setup(void)
+{
+	/* Initialize the GIC driver, cpu and distributor interfaces */
+	plat_brcm_gic_driver_init();
+	plat_brcm_gic_init();
+
+	/* Initialize power controller before setting up topology */
+	plat_brcm_pwrc_setup();
+}
+
+/*******************************************************************************
+ * Perform any BL31 platform runtime setup prior to BL31 exit common to ARM
+ * standard platforms
+ * Perform BL31 platform setup
+ ******************************************************************************/
+void brcm_bl31_plat_runtime_setup(void)
+{
+	console_switch_state(CONSOLE_FLAG_RUNTIME);
+
+	/* Initialize the runtime console */
+	bcm_console_runtime_init();
+}
+
+void bl31_platform_setup(void)
+{
+	brcm_bl31_platform_setup();
+
+	/* Initialize the secure environment */
+	plat_brcm_security_setup();
+}
+
+void bl31_plat_runtime_setup(void)
+{
+	brcm_bl31_plat_runtime_setup();
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup shared between
+ * ARM standard platforms. This only does basic initialization. Later
+ * architectural setup (bl31_arch_setup()) does not do anything platform
+ * specific.
+ ******************************************************************************/
+void __init brcm_bl31_plat_arch_setup(void)
+{
+#ifndef MMU_DISABLED
+	const mmap_region_t bl_regions[] = {
+		MAP_REGION_FLAT(BL31_BASE, BL31_END - BL31_BASE,
+				MT_MEMORY | MT_RW | MT_SECURE),
+		MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE,
+				MT_CODE | MT_SECURE),
+		MAP_REGION_FLAT(BL_RO_DATA_BASE,
+				BL_RO_DATA_END - BL_RO_DATA_BASE,
+				MT_RO_DATA | MT_SECURE),
+#if USE_COHERENT_MEM
+		MAP_REGION_FLAT(BL_COHERENT_RAM_BASE,
+				BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE,
+				MT_DEVICE | MT_RW | MT_SECURE),
+#endif
+		{0}
+	};
+
+	setup_page_tables(bl_regions, plat_brcm_get_mmap());
+
+	enable_mmu_el3(0);
+#endif
+}
+
+void __init bl31_plat_arch_setup(void)
+{
+	brcm_bl31_plat_arch_setup();
+}
diff --git a/plat/brcm/common/brcm_ccn.c b/plat/brcm/common/brcm_ccn.c
new file mode 100644
index 0000000..9396aaa
--- /dev/null
+++ b/plat/brcm/common/brcm_ccn.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <drivers/arm/ccn.h>
+
+#include <platform_def.h>
+
+static const unsigned char master_to_rn_id_map[] = {
+	PLAT_BRCM_CLUSTER_TO_CCN_ID_MAP
+};
+
+static const ccn_desc_t bcm_ccn_desc = {
+	.periphbase = PLAT_BRCM_CCN_BASE,
+	.num_masters = ARRAY_SIZE(master_to_rn_id_map),
+	.master_to_rn_id_map = master_to_rn_id_map
+};
+
+void plat_brcm_interconnect_init(void)
+{
+	ccn_init(&bcm_ccn_desc);
+}
+
+void plat_brcm_interconnect_enter_coherency(void)
+{
+	ccn_enter_snoop_dvm_domain(1 << MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
+
+void plat_brcm_interconnect_exit_coherency(void)
+{
+	ccn_exit_snoop_dvm_domain(1 << MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
diff --git a/plat/brcm/common/brcm_gicv3.c b/plat/brcm/common/brcm_gicv3.c
new file mode 100644
index 0000000..c4137c0
--- /dev/null
+++ b/plat/brcm/common/brcm_gicv3.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <drivers/arm/gicv3.h>
+#include <plat/common/platform.h>
+
+#include <platform_def.h>
+
+/* The GICv3 driver only needs to be initialized in EL3 */
+static uintptr_t brcm_rdistif_base_addrs[PLATFORM_CORE_COUNT];
+
+static const interrupt_prop_t brcm_interrupt_props[] = {
+	/* G1S interrupts */
+	PLAT_BRCM_G1S_IRQ_PROPS(INTR_GROUP1S),
+	/* G0 interrupts */
+	PLAT_BRCM_G0_IRQ_PROPS(INTR_GROUP0)
+};
+
+/*
+ * MPIDR hashing function for translating MPIDRs read from GICR_TYPER register
+ * to core position.
+ *
+ * Calculating core position is dependent on MPIDR_EL1.MT bit. However, affinity
+ * values read from GICR_TYPER don't have an MT field. To reuse the same
+ * translation used for CPUs, we insert MT bit read from the PE's MPIDR into
+ * that read from GICR_TYPER.
+ *
+ * Assumptions:
+ *
+ *   - All CPUs implemented in the system have MPIDR_EL1.MT bit set;
+ *   - No CPUs implemented in the system use affinity level 3.
+ */
+static unsigned int brcm_gicv3_mpidr_hash(u_register_t mpidr)
+{
+	mpidr |= (read_mpidr_el1() & MPIDR_MT_MASK);
+	return plat_core_pos_by_mpidr(mpidr);
+}
+
+static const gicv3_driver_data_t brcm_gic_data = {
+	.gicd_base = PLAT_BRCM_GICD_BASE,
+	.gicr_base = PLAT_BRCM_GICR_BASE,
+	.interrupt_props = brcm_interrupt_props,
+	.interrupt_props_num = ARRAY_SIZE(brcm_interrupt_props),
+	.rdistif_num = PLATFORM_CORE_COUNT,
+	.rdistif_base_addrs = brcm_rdistif_base_addrs,
+	.mpidr_to_core_pos = brcm_gicv3_mpidr_hash
+};
+
+void plat_brcm_gic_driver_init(void)
+{
+	/* TODO Check if this is required to be initialized here
+	 * after getting initialized in EL3, should we re-init this here
+	 * in S-EL1
+	 */
+	gicv3_driver_init(&brcm_gic_data);
+}
+
+void plat_brcm_gic_init(void)
+{
+	gicv3_distif_init();
+	gicv3_rdistif_init(plat_my_core_pos());
+	gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+void plat_brcm_gic_cpuif_enable(void)
+{
+	gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+void plat_brcm_gic_cpuif_disable(void)
+{
+	gicv3_cpuif_disable(plat_my_core_pos());
+}
+
+void plat_brcm_gic_pcpu_init(void)
+{
+	gicv3_rdistif_init(plat_my_core_pos());
+}
+
+void plat_brcm_gic_redistif_on(void)
+{
+	gicv3_rdistif_on(plat_my_core_pos());
+}
+
+void plat_brcm_gic_redistif_off(void)
+{
+	gicv3_rdistif_off(plat_my_core_pos());
+}
diff --git a/plat/brcm/common/brcm_mhu.c b/plat/brcm/common/brcm_mhu.c
new file mode 100644
index 0000000..56f44e0
--- /dev/null
+++ b/plat/brcm/common/brcm_mhu.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <drivers/delay_timer.h>
+#include <lib/bakery_lock.h>
+
+#include <brcm_mhu.h>
+#include <platform_def.h>
+
+#include "m0_ipc.h"
+
+#define PLAT_MHU_INTR_REG	AP_TO_SCP_MAILBOX1
+
+/* SCP MHU secure channel registers */
+#define SCP_INTR_S_STAT		CRMU_IHOST_SW_PERSISTENT_REG11
+#define SCP_INTR_S_SET		CRMU_IHOST_SW_PERSISTENT_REG11
+#define SCP_INTR_S_CLEAR	CRMU_IHOST_SW_PERSISTENT_REG11
+
+/* CPU MHU secure channel registers */
+#define CPU_INTR_S_STAT		CRMU_IHOST_SW_PERSISTENT_REG10
+#define CPU_INTR_S_SET		CRMU_IHOST_SW_PERSISTENT_REG10
+#define CPU_INTR_S_CLEAR	CRMU_IHOST_SW_PERSISTENT_REG10
+
+static DEFINE_BAKERY_LOCK(bcm_lock);
+
+/*
+ * Slot 31 is reserved because the MHU hardware uses this register bit to
+ * indicate a non-secure access attempt. The total number of available slots is
+ * therefore 31 [30:0].
+ */
+#define MHU_MAX_SLOT_ID		30
+
+void mhu_secure_message_start(unsigned int slot_id)
+{
+	int iter = 1000000;
+
+	assert(slot_id <= MHU_MAX_SLOT_ID);
+
+	bakery_lock_get(&bcm_lock);
+	/* Make sure any previous command has finished */
+	do {
+		if (!(mmio_read_32(PLAT_BRCM_MHU_BASE + CPU_INTR_S_STAT) &
+		   (1 << slot_id)))
+			break;
+
+		 udelay(1);
+
+	} while (--iter);
+
+	assert(iter != 0);
+}
+
+void mhu_secure_message_send(unsigned int slot_id)
+{
+	uint32_t response, iter = 1000000;
+
+	assert(slot_id <= MHU_MAX_SLOT_ID);
+	assert(!(mmio_read_32(PLAT_BRCM_MHU_BASE + CPU_INTR_S_STAT) &
+							(1 << slot_id)));
+
+	/* Send command to SCP */
+	mmio_setbits_32(PLAT_BRCM_MHU_BASE + CPU_INTR_S_SET, 1 << slot_id);
+	mmio_write_32(CRMU_MAIL_BOX0, MCU_IPC_MCU_CMD_SCPI);
+	mmio_write_32(PLAT_BRCM_MHU_BASE + PLAT_MHU_INTR_REG, 0x1);
+
+	/* Wait until IPC transport acknowledges reception of SCP command */
+	do {
+		response = mmio_read_32(CRMU_MAIL_BOX0);
+		if ((response & ~MCU_IPC_CMD_REPLY_MASK) ==
+		   (MCU_IPC_CMD_DONE_MASK | MCU_IPC_MCU_CMD_SCPI))
+			break;
+
+		udelay(1);
+
+	} while (--iter);
+
+	assert(iter != 0);
+}
+
+uint32_t mhu_secure_message_wait(void)
+{
+	/* Wait for response from SCP */
+	uint32_t response, iter = 1000000;
+
+	do {
+		response = mmio_read_32(PLAT_BRCM_MHU_BASE + SCP_INTR_S_STAT);
+		if (!response)
+			break;
+
+		udelay(1);
+	} while (--iter);
+	assert(iter != 0);
+
+	return response;
+}
+
+void mhu_secure_message_end(unsigned int slot_id)
+{
+	assert(slot_id <= MHU_MAX_SLOT_ID);
+
+	/*
+	 * Clear any response we got by writing one in the relevant slot bit to
+	 * the CLEAR register
+	 */
+	mmio_clrbits_32(PLAT_BRCM_MHU_BASE + SCP_INTR_S_CLEAR, 1 << slot_id);
+	bakery_lock_release(&bcm_lock);
+}
+
+void mhu_secure_init(void)
+{
+	bakery_lock_init(&bcm_lock);
+
+	/*
+	 * The STAT register resets to zero. Ensure it is in the expected state,
+	 * as a stale or garbage value would make us think it's a message we've
+	 * already sent.
+	 */
+	mmio_write_32(PLAT_BRCM_MHU_BASE + CPU_INTR_S_STAT, 0);
+	mmio_write_32(PLAT_BRCM_MHU_BASE + SCP_INTR_S_STAT, 0);
+}
+
+void plat_brcm_pwrc_setup(void)
+{
+	mhu_secure_init();
+}
diff --git a/plat/brcm/common/brcm_mhu.h b/plat/brcm/common/brcm_mhu.h
new file mode 100644
index 0000000..6c89a34
--- /dev/null
+++ b/plat/brcm/common/brcm_mhu.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef BRCM_MHU_H
+#define BRCM_MHU_H
+
+#include <stdint.h>
+
+void mhu_secure_message_start(unsigned int slot_id);
+void mhu_secure_message_send(unsigned int slot_id);
+uint32_t mhu_secure_message_wait(void);
+void mhu_secure_message_end(unsigned int slot_id);
+
+void mhu_secure_init(void);
+
+#endif	/* BRCM_MHU_H */
diff --git a/plat/brcm/common/brcm_scpi.c b/plat/brcm/common/brcm_scpi.c
new file mode 100644
index 0000000..0a703cb
--- /dev/null
+++ b/plat/brcm/common/brcm_scpi.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <lib/utils.h>
+#include <plat/common/platform.h>
+
+#include <brcm_mhu.h>
+#include <brcm_scpi.h>
+#include <platform_def.h>
+
+#define SCPI_SHARED_MEM_SCP_TO_AP	(PLAT_SCP_COM_SHARED_MEM_BASE)
+#define SCPI_SHARED_MEM_AP_TO_SCP	(PLAT_SCP_COM_SHARED_MEM_BASE \
+								 + 0x100)
+
+/* Header and payload addresses for commands from AP to SCP */
+#define SCPI_CMD_HEADER_AP_TO_SCP		\
+	((scpi_cmd_t *) SCPI_SHARED_MEM_AP_TO_SCP)
+#define SCPI_CMD_PAYLOAD_AP_TO_SCP		\
+	((void *) (SCPI_SHARED_MEM_AP_TO_SCP + sizeof(scpi_cmd_t)))
+
+/* Header and payload addresses for responses from SCP to AP */
+#define SCPI_RES_HEADER_SCP_TO_AP \
+	((scpi_cmd_t *) SCPI_SHARED_MEM_SCP_TO_AP)
+#define SCPI_RES_PAYLOAD_SCP_TO_AP \
+	((void *) (SCPI_SHARED_MEM_SCP_TO_AP + sizeof(scpi_cmd_t)))
+
+/* ID of the MHU slot used for the SCPI protocol */
+#define SCPI_MHU_SLOT_ID		0
+
+static void scpi_secure_message_start(void)
+{
+	mhu_secure_message_start(SCPI_MHU_SLOT_ID);
+}
+
+static void scpi_secure_message_send(size_t payload_size)
+{
+	/*
+	 * Ensure that any write to the SCPI payload area is seen by SCP before
+	 * we write to the MHU register. If these 2 writes were reordered by
+	 * the CPU then SCP would read stale payload data
+	 */
+	dmbst();
+
+	mhu_secure_message_send(SCPI_MHU_SLOT_ID);
+}
+
+static void scpi_secure_message_receive(scpi_cmd_t *cmd)
+{
+	uint32_t mhu_status;
+
+	assert(cmd != NULL);
+
+	mhu_status = mhu_secure_message_wait();
+
+	/* Expect an SCPI message, reject any other protocol */
+	if (mhu_status != (1 << SCPI_MHU_SLOT_ID)) {
+		ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n",
+		      mhu_status);
+		panic();
+	}
+
+	/*
+	 * Ensure that any read to the SCPI payload area is done after reading
+	 * the MHU register. If these 2 reads were reordered then the CPU would
+	 * read invalid payload data
+	 */
+	dmbld();
+
+	memcpy(cmd, (void *) SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd));
+}
+
+static void scpi_secure_message_end(void)
+{
+	mhu_secure_message_end(SCPI_MHU_SLOT_ID);
+}
+
+int scpi_wait_ready(void)
+{
+	scpi_cmd_t scpi_cmd;
+
+	VERBOSE("Waiting for SCP_READY command...\n");
+
+	/* Get a message from the SCP */
+	scpi_secure_message_start();
+	scpi_secure_message_receive(&scpi_cmd);
+	scpi_secure_message_end();
+
+	/* We are expecting 'SCP Ready', produce correct error if it's not */
+	scpi_status_t status = SCP_OK;
+
+	if (scpi_cmd.id != SCPI_CMD_SCP_READY) {
+		ERROR("Unexpected SCP command: expected #%u, received #%u\n",
+		      SCPI_CMD_SCP_READY, scpi_cmd.id);
+		status = SCP_E_SUPPORT;
+	} else if (scpi_cmd.size != 0) {
+		ERROR("SCP_READY cmd has incorrect size: expected 0, got %u\n",
+		      scpi_cmd.size);
+		status = SCP_E_SIZE;
+	}
+
+	VERBOSE("Sending response for SCP_READY command\n");
+
+	/*
+	 * Send our response back to SCP.
+	 * We are using the same SCPI header, just update the status field.
+	 */
+	scpi_cmd.status = status;
+	scpi_secure_message_start();
+	memcpy((void *) SCPI_SHARED_MEM_AP_TO_SCP, &scpi_cmd, sizeof(scpi_cmd));
+	scpi_secure_message_send(0);
+	scpi_secure_message_end();
+
+	return status == SCP_OK ? 0 : -1;
+}
+
+void scpi_set_brcm_power_state(unsigned int mpidr,
+		scpi_power_state_t cpu_state, scpi_power_state_t cluster_state,
+		scpi_power_state_t brcm_state)
+{
+	scpi_cmd_t *cmd;
+	uint32_t state = 0;
+	uint32_t *payload_addr;
+
+#if ARM_PLAT_MT
+	/*
+	 * The current SCPI driver only caters for single-threaded platforms.
+	 * Hence we ignore the thread ID (which is always 0) for such platforms.
+	 */
+	state |= (mpidr >> MPIDR_AFF1_SHIFT) & 0x0f;	/* CPU ID */
+	state |= ((mpidr >> MPIDR_AFF2_SHIFT) & 0x0f) << 4;	/* Cluster ID */
+#else
+	state |= mpidr & 0x0f;	/* CPU ID */
+	state |= (mpidr & 0xf00) >> 4;	/* Cluster ID */
+#endif /* ARM_PLAT_MT */
+
+	state |= cpu_state << 8;
+	state |= cluster_state << 12;
+	state |= brcm_state << 16;
+
+	scpi_secure_message_start();
+
+	/* Populate the command header */
+	cmd = SCPI_CMD_HEADER_AP_TO_SCP;
+	cmd->id = SCPI_CMD_SET_POWER_STATE;
+	cmd->set = SCPI_SET_NORMAL;
+	cmd->sender = 0;
+	cmd->size = sizeof(state);
+	/* Populate the command payload */
+	payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
+	*payload_addr = state;
+	scpi_secure_message_send(sizeof(state));
+
+	/*
+	 * SCP does not reply to this command in order to avoid MHU interrupts
+	 * from the sender, which could interfere with its power state request.
+	 */
+	scpi_secure_message_end();
+}
+
+/*
+ * Query and obtain power state from SCP.
+ *
+ * In response to the query, SCP returns power states of all CPUs in all
+ * clusters of the system. The returned response is then filtered based on the
+ * supplied MPIDR. Power states of requested cluster and CPUs within are updated
+ * via. supplied non-NULL pointer arguments.
+ *
+ * Returns 0 on success, or -1 on errors.
+ */
+int scpi_get_brcm_power_state(unsigned int mpidr, unsigned int *cpu_state_p,
+		unsigned int *cluster_state_p)
+{
+	scpi_cmd_t *cmd;
+	scpi_cmd_t response;
+	int power_state, cpu, cluster, rc = -1;
+
+	/*
+	 * Extract CPU and cluster membership of the given MPIDR. SCPI caters
+	 * for only up to 0xf clusters, and 8 CPUs per cluster
+	 */
+	cpu = mpidr & MPIDR_AFFLVL_MASK;
+	cluster = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+	if (cpu >= 8 || cluster >= 0xf)
+		return -1;
+
+	scpi_secure_message_start();
+
+	/* Populate request headers */
+	zeromem(SCPI_CMD_HEADER_AP_TO_SCP, sizeof(*cmd));
+	cmd = SCPI_CMD_HEADER_AP_TO_SCP;
+	cmd->id = SCPI_CMD_GET_POWER_STATE;
+
+	/*
+	 * Send message and wait for SCP's response
+	 */
+	scpi_secure_message_send(0);
+	scpi_secure_message_receive(&response);
+
+	if (response.status != SCP_OK)
+		goto exit;
+
+	/* Validate SCP response */
+	if (!CHECK_RESPONSE(response, cluster))
+		goto exit;
+
+	/* Extract power states for required cluster */
+	power_state = *(((uint16_t *) SCPI_RES_PAYLOAD_SCP_TO_AP) + cluster);
+	if (CLUSTER_ID(power_state) != cluster)
+		goto exit;
+
+	/* Update power state via. pointers */
+	if (cluster_state_p)
+		*cluster_state_p = CLUSTER_POWER_STATE(power_state);
+	if (cpu_state_p)
+		*cpu_state_p = CPU_POWER_STATE(power_state);
+	rc = 0;
+
+exit:
+	scpi_secure_message_end();
+	return rc;
+}
+
+uint32_t scpi_sys_power_state(scpi_system_state_t system_state)
+{
+	scpi_cmd_t *cmd;
+	uint8_t *payload_addr;
+
+	scpi_secure_message_start();
+
+	/* Populate the command header */
+	cmd = SCPI_CMD_HEADER_AP_TO_SCP;
+	cmd->id = SCPI_CMD_SYS_POWER_STATE;
+	cmd->set = 0;
+	cmd->sender = 0;
+	cmd->size = sizeof(*payload_addr);
+	/* Populate the command payload */
+	payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
+	*payload_addr = system_state & 0xff;
+	scpi_secure_message_send(sizeof(*payload_addr));
+
+	scpi_secure_message_end();
+
+	return SCP_OK;
+}
diff --git a/plat/brcm/common/brcm_scpi.h b/plat/brcm/common/brcm_scpi.h
new file mode 100644
index 0000000..f3b658f
--- /dev/null
+++ b/plat/brcm/common/brcm_scpi.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef BRCM_SCPI_H
+#define BRCM_SCPI_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*
+ * An SCPI command consists of a header and a payload.
+ * The following structure describes the header. It is 64-bit long.
+ */
+typedef struct {
+	/* Command ID */
+	uint32_t id		: 7;
+	/* Set ID. Identifies whether this is a standard or extended command. */
+	uint32_t set		: 1;
+	/* Sender ID to match a reply. The value is sender specific. */
+	uint32_t sender		: 8;
+	/* Size of the payload in bytes (0 - 511) */
+	uint32_t size		: 9;
+	uint32_t reserved	: 7;
+	/*
+	 * Status indicating the success of a command.
+	 * See the enum below.
+	 */
+	uint32_t status;
+} scpi_cmd_t;
+
+typedef enum {
+	SCPI_SET_NORMAL = 0,	/* Normal SCPI commands */
+	SCPI_SET_EXTENDED	/* Extended SCPI commands */
+} scpi_set_t;
+
+enum {
+	SCP_OK = 0,	/* Success */
+	SCP_E_PARAM,	/* Invalid parameter(s) */
+	SCP_E_ALIGN,	/* Invalid alignment */
+	SCP_E_SIZE,	/* Invalid size */
+	SCP_E_HANDLER,	/* Invalid handler or callback */
+	SCP_E_ACCESS,	/* Invalid access or permission denied */
+	SCP_E_RANGE,	/* Value out of range */
+	SCP_E_TIMEOUT,	/* Time out has ocurred */
+	SCP_E_NOMEM,	/* Invalid memory area or pointer */
+	SCP_E_PWRSTATE,	/* Invalid power state */
+	SCP_E_SUPPORT,	/* Feature not supported or disabled */
+	SCPI_E_DEVICE,	/* Device error */
+	SCPI_E_BUSY,	/* Device is busy */
+};
+
+typedef uint32_t scpi_status_t;
+typedef enum {
+	SCPI_CMD_SCP_READY = 0x01,
+	SCPI_CMD_SET_POWER_STATE = 0x03,
+	SCPI_CMD_GET_POWER_STATE = 0x04,
+	SCPI_CMD_SYS_POWER_STATE = 0x05
+} scpi_command_t;
+
+/*
+ * Macros to parse SCP response to GET_POWER_STATE command
+ *
+ *   [3:0] : cluster ID
+ *   [7:4] : cluster state: 0 = on; 3 = off; rest are reserved
+ *   [15:8]: on/off state for individual CPUs in the cluster
+ *
+ * Payload is in little-endian
+ */
+#define CLUSTER_ID(_resp)		((_resp) & 0xf)
+#define CLUSTER_POWER_STATE(_resp)	(((_resp) >> 4) & 0xf)
+
+/* Result is a bit mask of CPU on/off states in the cluster */
+#define CPU_POWER_STATE(_resp)		(((_resp) >> 8) & 0xff)
+
+/*
+ * For GET_POWER_STATE, SCP returns the power states of every cluster. The
+ * size of response depends on the number of clusters in the system. The
+ * SCP-to-AP payload contains 2 bytes per cluster. Make sure the response is
+ * large enough to contain power states of a given cluster
+ */
+#define CHECK_RESPONSE(_resp, _clus)  (_resp.size >= (((_clus) + 1) * 2))
+
+typedef enum {
+	scpi_power_on = 0,
+	scpi_power_retention = 1,
+	scpi_power_off = 3,
+} scpi_power_state_t;
+
+typedef enum {
+	scpi_system_shutdown = 0,
+	scpi_system_reboot = 1,
+	scpi_system_reset = 2
+} scpi_system_state_t;
+
+extern int scpi_wait_ready(void);
+extern void scpi_set_brcm_power_state(unsigned int mpidr,
+				      scpi_power_state_t cpu_state,
+				      scpi_power_state_t cluster_state,
+				      scpi_power_state_t css_state);
+int scpi_get_brcm_power_state(unsigned int mpidr, unsigned int *cpu_state_p,
+			      unsigned int *cluster_state_p);
+uint32_t scpi_sys_power_state(scpi_system_state_t system_state);
+
+#endif	/* BRCM_SCPI_H */