plat: marvell: Add common ARMADA platform components

Add common Marvell ARMADA platform components.
This patch also includes common components for Marvell
ARMADA 8K platforms.

Change-Id: I42192fdc6525a42e46b3ac2ad63c83db9bcbfeaf
Signed-off-by: Hanna Hawa <hannah@marvell.com>
Signed-off-by: Konstantin Porotchkin <kostap@marvell.com>
diff --git a/plat/marvell/a8k/common/a8k_common.mk b/plat/marvell/a8k/common/a8k_common.mk
new file mode 100644
index 0000000..3bcce96
--- /dev/null
+++ b/plat/marvell/a8k/common/a8k_common.mk
@@ -0,0 +1,122 @@
+#
+# Copyright (C) 2016 - 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier:     BSD-3-Clause
+# https://spdx.org/licenses
+
+include tools/doimage/doimage.mk
+
+PLAT_FAMILY		:= a8k
+PLAT_FAMILY_BASE	:= plat/marvell/$(PLAT_FAMILY)
+PLAT_INCLUDE_BASE	:= include/plat/marvell/$(PLAT_FAMILY)
+PLAT_COMMON_BASE	:= $(PLAT_FAMILY_BASE)/common
+MARVELL_DRV_BASE	:= drivers/marvell
+MARVELL_COMMON_BASE	:= plat/marvell/common
+
+ERRATA_A72_859971	:= 1
+
+# Enable MSS support for a8k family
+MSS_SUPPORT		:= 1
+
+# Disable EL3 cache for power management
+BL31_CACHE_DISABLE	:= 1
+$(eval $(call add_define,BL31_CACHE_DISABLE))
+
+$(eval $(call add_define,PCI_EP_SUPPORT))
+$(eval $(call assert_boolean,PCI_EP_SUPPORT))
+
+DOIMAGEPATH		?=	tools/doimage
+DOIMAGETOOL		?=	${DOIMAGEPATH}/doimage
+
+ROM_BIN_EXT ?= $(BUILD_PLAT)/ble.bin
+DOIMAGE_FLAGS	+= -b $(ROM_BIN_EXT) $(NAND_DOIMAGE_FLAGS) $(DOIMAGE_SEC_FLAGS)
+
+# This define specifies DDR type for BLE
+$(eval $(call add_define,CONFIG_DDR4))
+
+MARVELL_GIC_SOURCES	:=	drivers/arm/gic/common/gic_common.c	\
+				drivers/arm/gic/v2/gicv2_main.c		\
+				drivers/arm/gic/v2/gicv2_helpers.c	\
+				plat/common/plat_gicv2.c
+
+ATF_INCLUDES		:=	-Iinclude/common/tbbr
+
+PLAT_INCLUDES		:=	-I$(PLAT_FAMILY_BASE)/$(PLAT)		\
+				-I$(PLAT_COMMON_BASE)/include		\
+				-I$(PLAT_INCLUDE_BASE)/common		\
+				-Iinclude/drivers/marvell		\
+				-Iinclude/drivers/marvell/mochi		\
+				$(ATF_INCLUDES)
+
+PLAT_BL_COMMON_SOURCES	:=	$(PLAT_COMMON_BASE)/aarch64/a8k_common.c \
+				drivers/console/aarch64/console.S	 \
+				drivers/ti/uart/aarch64/16550_console.S
+
+BLE_PORTING_SOURCES	:=	$(PLAT_FAMILY_BASE)/$(PLAT)/board/dram_port.c \
+				$(PLAT_FAMILY_BASE)/$(PLAT)/board/marvell_plat_config.c
+
+MARVELL_MOCHI_DRV	+=	$(MARVELL_DRV_BASE)/mochi/cp110_setup.c
+
+BLE_SOURCES		:=	$(PLAT_COMMON_BASE)/plat_ble_setup.c		\
+				$(MARVELL_MOCHI_DRV)			       \
+				$(MARVELL_DRV_BASE)/i2c/a8k_i2c.c	 	\
+				$(PLAT_COMMON_BASE)/plat_pm.c		 	\
+				$(MARVELL_DRV_BASE)/thermal.c			\
+				$(PLAT_COMMON_BASE)/plat_thermal.c		\
+				$(BLE_PORTING_SOURCES)				\
+				$(MARVELL_DRV_BASE)/ccu.c			\
+				$(MARVELL_DRV_BASE)/io_win.c
+
+BL1_SOURCES		+=	$(PLAT_COMMON_BASE)/aarch64/plat_helpers.S \
+				lib/cpus/aarch64/cortex_a72.S
+
+MARVELL_DRV		:= 	$(MARVELL_DRV_BASE)/io_win.c	\
+				$(MARVELL_DRV_BASE)/iob.c	\
+				$(MARVELL_DRV_BASE)/mci.c	\
+				$(MARVELL_DRV_BASE)/amb_adec.c	\
+				$(MARVELL_DRV_BASE)/ccu.c	\
+				$(MARVELL_DRV_BASE)/cache_llc.c	\
+				$(MARVELL_DRV_BASE)/comphy/phy-comphy-cp110.c
+
+BL31_PORTING_SOURCES	:=	$(PLAT_FAMILY_BASE)/$(PLAT)/board/marvell_plat_config.c
+
+BL31_SOURCES		+=	lib/cpus/aarch64/cortex_a72.S		       \
+				$(PLAT_COMMON_BASE)/aarch64/plat_helpers.S     \
+				$(PLAT_COMMON_BASE)/aarch64/plat_arch_config.c \
+				$(PLAT_COMMON_BASE)/plat_pm.c		       \
+				$(PLAT_COMMON_BASE)/plat_bl31_setup.c	       \
+				$(MARVELL_COMMON_BASE)/marvell_gicv2.c	       \
+				$(MARVELL_COMMON_BASE)/mrvl_sip_svc.c	       \
+				$(MARVELL_COMMON_BASE)/marvell_ddr_info.c      \
+				$(BL31_PORTING_SOURCES)			       \
+				$(MARVELL_DRV)				       \
+				$(MARVELL_MOCHI_DRV)			       \
+				$(MARVELL_GIC_SOURCES)
+
+# Add trace functionality for PM
+BL31_SOURCES		+=	$(PLAT_COMMON_BASE)/plat_pm_trace.c
+
+# Disable the PSCI platform compatibility layer (allows porting
+# from Old Platform APIs to the new APIs).
+# It is not needed since Marvell platform already used the new platform APIs.
+ENABLE_PLAT_COMPAT	:= 	0
+
+# Force builds with BL2 image on a80x0 platforms
+ifndef SCP_BL2
+ $(error "Error: SCP_BL2 image is mandatory for a8k family")
+endif
+
+# MSS (SCP) build
+include $(PLAT_COMMON_BASE)/mss/mss_a8k.mk
+
+# BLE (ROM context execution code, AKA binary extension)
+BLE_PATH	?=  ble
+
+include ${BLE_PATH}/ble.mk
+$(eval $(call MAKE_BL,e))
+
+mrvl_flash: ${BUILD_PLAT}/${FIP_NAME} ${DOIMAGETOOL} ${BUILD_PLAT}/ble.bin
+	$(shell truncate -s %128K ${BUILD_PLAT}/bl1.bin)
+	$(shell cat ${BUILD_PLAT}/bl1.bin ${BUILD_PLAT}/${FIP_NAME} > ${BUILD_PLAT}/${BOOT_IMAGE})
+	${DOIMAGETOOL} ${DOIMAGE_FLAGS} ${BUILD_PLAT}/${BOOT_IMAGE} ${BUILD_PLAT}/${FLASH_IMAGE}
+
diff --git a/plat/marvell/a8k/common/aarch64/a8k_common.c b/plat/marvell/a8k/common/aarch64/a8k_common.c
new file mode 100644
index 0000000..7c2bf31
--- /dev/null
+++ b/plat/marvell/a8k/common/aarch64/a8k_common.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <plat_marvell.h>
+
+
+/* MMU entry for internal (register) space access */
+#define MAP_DEVICE0	MAP_REGION_FLAT(DEVICE0_BASE,			\
+					DEVICE0_SIZE,			\
+					MT_DEVICE | MT_RW | MT_SECURE)
+
+/*
+ * Table of regions for various BL stages to map using the MMU.
+ */
+#if IMAGE_BL1
+const mmap_region_t plat_marvell_mmap[] = {
+	MARVELL_MAP_SHARED_RAM,
+	MAP_DEVICE0,
+	{0}
+};
+#endif
+#if IMAGE_BL2
+const mmap_region_t plat_marvell_mmap[] = {
+	MARVELL_MAP_SHARED_RAM,
+	MAP_DEVICE0,
+	MARVELL_MAP_DRAM,
+	{0}
+};
+#endif
+
+#if IMAGE_BL2U
+const mmap_region_t plat_marvell_mmap[] = {
+	MAP_DEVICE0,
+	{0}
+};
+#endif
+
+#if IMAGE_BLE
+const mmap_region_t plat_marvell_mmap[] = {
+	MAP_DEVICE0,
+	{0}
+};
+#endif
+
+#if IMAGE_BL31
+const mmap_region_t plat_marvell_mmap[] = {
+	MARVELL_MAP_SHARED_RAM,
+	MAP_DEVICE0,
+	MARVELL_MAP_DRAM,
+	{0}
+};
+#endif
+#if IMAGE_BL32
+const mmap_region_t plat_marvell_mmap[] = {
+	MAP_DEVICE0,
+	{0}
+};
+#endif
+
+MARVELL_CASSERT_MMAP;
diff --git a/plat/marvell/a8k/common/aarch64/plat_arch_config.c b/plat/marvell/a8k/common/aarch64/plat_arch_config.c
new file mode 100644
index 0000000..8667331
--- /dev/null
+++ b/plat/marvell/a8k/common/aarch64/plat_arch_config.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <platform.h>
+#include <arch_helpers.h>
+#include <mmio.h>
+#include <debug.h>
+#include <cache_llc.h>
+
+
+#define CCU_HTC_ASET			(MVEBU_CCU_BASE(MVEBU_AP0) + 0x264)
+#define MVEBU_IO_AFFINITY		(0xF00)
+
+
+static void plat_enable_affinity(void)
+{
+	int cluster_id;
+	int affinity;
+
+	/* set CPU Affinity */
+	cluster_id = plat_my_core_pos() / PLAT_MARVELL_CLUSTER_CORE_COUNT;
+	affinity = (MVEBU_IO_AFFINITY | (1 << cluster_id));
+	mmio_write_32(CCU_HTC_ASET, affinity);
+
+	/* set barier */
+	isb();
+}
+
+void marvell_psci_arch_init(int die_index)
+{
+#if LLC_ENABLE
+	/* check if LLC is in exclusive mode
+	 * as L2 is configured to UniqueClean eviction
+	 * (in a8k reset handler)
+	 */
+	if (llc_is_exclusive(0) == 0)
+		ERROR("LLC should be configured to exclusice mode\n");
+#endif
+
+	/* Enable Affinity */
+	plat_enable_affinity();
+}
diff --git a/plat/marvell/a8k/common/aarch64/plat_helpers.S b/plat/marvell/a8k/common/aarch64/plat_helpers.S
new file mode 100644
index 0000000..fadc4c2
--- /dev/null
+++ b/plat/marvell/a8k/common/aarch64/plat_helpers.S
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <asm_macros.S>
+#include <platform_def.h>
+#include <marvell_pm.h>
+
+	.globl	plat_secondary_cold_boot_setup
+	.globl	plat_get_my_entrypoint
+	.globl	plat_is_my_cpu_primary
+	.globl  plat_reset_handler
+
+	/* -----------------------------------------------------
+	 * void plat_secondary_cold_boot_setup (void);
+	 *
+	 * This function performs any platform specific actions
+	 * needed for a secondary cpu after a cold reset. Right
+	 * now this is a stub function.
+	 * -----------------------------------------------------
+	 */
+func plat_secondary_cold_boot_setup
+	mov	x0, #0
+	ret
+endfunc plat_secondary_cold_boot_setup
+
+	/* ---------------------------------------------------------------------
+	 * unsigned long plat_get_my_entrypoint (void);
+	 *
+	 * Main job of this routine is to distinguish
+	 * between a cold and warm boot
+	 * For a cold boot, return 0.
+	 * For a warm boot, read the mailbox and return the address it contains.
+	 *
+	 * ---------------------------------------------------------------------
+	 */
+func plat_get_my_entrypoint
+	/* Read first word and compare it with magic num */
+	mov_imm x0, PLAT_MARVELL_MAILBOX_BASE
+	ldr     x1, [x0]
+	mov_imm x2, MVEBU_MAILBOX_MAGIC_NUM
+	cmp     x1, x2
+	beq     warm_boot  /* If compare failed, return 0, i.e. cold boot */
+	mov     x0, #0
+	ret
+warm_boot:
+	mov_imm x1, MBOX_IDX_SEC_ADDR		/* Get the jump address */
+	subs	x1, x1, #1
+	mov	x2, #(MBOX_IDX_SEC_ADDR * 8)
+	lsl	x3, x2, x1
+	add     x0, x0, x3
+	ldr     x0, [x0]
+	ret
+endfunc plat_get_my_entrypoint
+
+	/* -----------------------------------------------------
+	 * unsigned int plat_is_my_cpu_primary (void);
+	 *
+	 * Find out whether the current cpu is the primary
+	 * cpu.
+	 * -----------------------------------------------------
+	 */
+func plat_is_my_cpu_primary
+	mrs	x0, mpidr_el1
+	and	x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
+	cmp	x0, #MVEBU_PRIMARY_CPU
+	cset	w0, eq
+	ret
+endfunc plat_is_my_cpu_primary
+
+        /* -----------------------------------------------------
+	 * void plat_reset_handler (void);
+         *
+	 * Platform specific configuration right after cpu is
+	 * is our of reset.
+	 *
+         * The plat_reset_handler can clobber x0 - x18, x30.
+         * -----------------------------------------------------
+         */
+func plat_reset_handler
+	/*
+	 * Note: the configurations below  should be done before MMU,
+	 *	  I Cache and L2are enabled.
+	 *	  The reset handler is executed right after reset
+	 * 	  and before Caches are enabled.
+	 */
+
+	/* Enable L1/L2 ECC and Parity */
+	mrs x5, s3_1_c11_c0_2  /* L2 Ctrl */
+	orr x5, x5, #(1 << 21) /* Enable L1/L2 cache ECC & Parity */
+	msr s3_1_c11_c0_2, x5  /* L2 Ctrl */
+
+#if LLC_ENABLE
+	/*
+	 * Enable L2 UniqueClean evictions
+	 *  Note: this configuration assumes that LLC is configured
+	 *	  in exclusive mode.
+	 *	  Later on in the code this assumption will be validated
+	 */
+	mrs x5, s3_1_c15_c0_0  /* L2 Ctrl */
+	orr x5, x5, #(1 << 14) /* Enable UniqueClean evictions with data */
+	msr s3_1_c15_c0_0, x5  /* L2 Ctrl */
+#endif
+
+	/* Instruction Barrier to allow msr command completion */
+	isb
+
+        ret
+endfunc plat_reset_handler
diff --git a/plat/marvell/a8k/common/include/a8k_plat_def.h b/plat/marvell/a8k/common/include/a8k_plat_def.h
new file mode 100644
index 0000000..4ed8c7e
--- /dev/null
+++ b/plat/marvell/a8k/common/include/a8k_plat_def.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __A8K_PLAT_DEF_H__
+#define __A8K_PLAT_DEF_H__
+
+#include <marvell_def.h>
+
+#define MVEBU_PRIMARY_CPU		0x0
+#define MVEBU_AP0			0x0
+
+/* APN806 revision ID */
+#define MVEBU_CSS_GWD_CTRL_IIDR2_REG	(MVEBU_REGS_BASE + 0x610FCC)
+#define GWD_IIDR2_REV_ID_OFFSET		12
+#define GWD_IIDR2_REV_ID_MASK		0xF
+#define GWD_IIDR2_CHIP_ID_OFFSET	20
+#define GWD_IIDR2_CHIP_ID_MASK		(0xFFF << GWD_IIDR2_CHIP_ID_OFFSET)
+
+#define CHIP_ID_AP806			0x806
+#define CHIP_ID_AP807			0x807
+
+#define COUNTER_FREQUENCY		25000000
+
+#define MVEBU_REGS_BASE			0xF0000000
+#define MVEBU_REGS_BASE_MASK		0xF0000000
+#define MVEBU_REGS_BASE_AP(ap)		MVEBU_REGS_BASE
+#define MVEBU_CP_REGS_BASE(cp_index)	(0xF2000000 + (cp_index) * 0x2000000)
+#define MVEBU_RFU_BASE			(MVEBU_REGS_BASE + 0x6F0000)
+#define MVEBU_IO_WIN_BASE(ap_index)	(MVEBU_RFU_BASE)
+#define MVEBU_IO_WIN_GCR_OFFSET		(0x70)
+#define MVEBU_IO_WIN_MAX_WINS		(7)
+
+/* Misc SoC configurations Base */
+#define MVEBU_MISC_SOC_BASE		(MVEBU_REGS_BASE + 0x6F4300)
+
+#define MVEBU_CCU_BASE(ap_index)	(MVEBU_REGS_BASE + 0x4000)
+#define MVEBU_CCU_MAX_WINS		(8)
+
+#define MVEBU_LLC_BASE(ap_index)	(MVEBU_REGS_BASE + 0x8000)
+#define MVEBU_DRAM_MAC_BASE		(MVEBU_REGS_BASE + 0x20000)
+#define MVEBU_DRAM_PHY_BASE		(MVEBU_REGS_BASE + 0x20000)
+#define MVEBU_SMMU_BASE			(MVEBU_REGS_BASE + 0x100000)
+#define MVEBU_CP_MPP_REGS(cp_index, n)	(MVEBU_CP_REGS_BASE(cp_index) + \
+						0x440000 + ((n) << 2))
+#define MVEBU_PM_MPP_REGS(cp_index, n)	(MVEBU_CP_REGS_BASE(cp_index) + \
+						0x440000 + ((n / 8) << 2))
+#define MVEBU_CP_GPIO_DATA_OUT(cp_index, n) \
+					(MVEBU_CP_REGS_BASE(cp_index) + \
+					0x440100 + ((n > 32) ? 0x40 : 0x00))
+#define MVEBU_CP_GPIO_DATA_OUT_EN(cp_index, n) \
+					(MVEBU_CP_REGS_BASE(cp_index) + \
+					0x440104 + ((n > 32) ? 0x40 : 0x00))
+#define MVEBU_CP_GPIO_DATA_IN(cp_index, n) (MVEBU_CP_REGS_BASE(cp_index) + \
+					0x440110 + ((n > 32) ? 0x40 : 0x00))
+#define MVEBU_AP_MPP_REGS(n)		(MVEBU_RFU_BASE + 0x4000 + ((n) << 2))
+#define MVEBU_AP_GPIO_REGS		(MVEBU_RFU_BASE + 0x5040)
+#define MVEBU_AP_GPIO_DATA_IN		(MVEBU_AP_GPIO_REGS + 0x10)
+#define MVEBU_AP_I2C_BASE		(MVEBU_REGS_BASE + 0x511000)
+#define MVEBU_CP0_I2C_BASE		(MVEBU_CP_REGS_BASE(0) + 0x701000)
+#define MVEBU_AP_EXT_TSEN_BASE		(MVEBU_RFU_BASE + 0x8084)
+
+#define MVEBU_AP_MC_TRUSTZONE_REG_LOW(ap, win)	(MVEBU_REGS_BASE_AP(ap) + \
+							0x20080 + ((win) * 0x8))
+#define MVEBU_AP_MC_TRUSTZONE_REG_HIGH(ap, win)	(MVEBU_REGS_BASE_AP(ap) + \
+							0x20084 + ((win) * 0x8))
+
+/* MCI indirect access definitions */
+#define MCI_MAX_UNIT_ID				2
+/* SoC RFU / IHBx4 Control */
+#define MCIX4_REG_START_ADDRESS_REG(unit_id)	(MVEBU_RFU_BASE + \
+						0x4218 + (unit_id * 0x20))
+#define MCI_REMAP_OFF_SHIFT			8
+
+#define MVEBU_MCI_REG_BASE_REMAP(index)		(0xFD000000 + \
+						((index) * 0x1000000))
+
+#define MVEBU_PCIE_X4_MAC_BASE(x)	(MVEBU_CP_REGS_BASE(x) + 0x600000)
+#define MVEBU_COMPHY_BASE(x)		(MVEBU_CP_REGS_BASE(x) + 0x441000)
+#define MVEBU_HPIPE_BASE(x)		(MVEBU_CP_REGS_BASE(x) + 0x120000)
+#define MVEBU_CP_DFX_OFFSET		(0x400200)
+
+/*****************************************************************************
+ * MVEBU memory map related constants
+ *****************************************************************************
+ */
+/* Aggregate of all devices in the first GB */
+#define DEVICE0_BASE			MVEBU_REGS_BASE
+#define DEVICE0_SIZE			0x10000000
+
+/*****************************************************************************
+ * GIC-400 & interrupt handling related constants
+ *****************************************************************************
+ */
+/* Base MVEBU compatible GIC memory map */
+#define MVEBU_GICD_BASE			0x210000
+#define MVEBU_GICC_BASE			0x220000
+
+
+/*****************************************************************************
+ * AXI Configuration
+ *****************************************************************************
+ */
+#define MVEBU_AXI_ATTR_ARCACHE_OFFSET		4
+#define MVEBU_AXI_ATTR_ARCACHE_MASK		(0xF << \
+						 MVEBU_AXI_ATTR_ARCACHE_OFFSET)
+#define MVEBU_AXI_ATTR_ARDOMAIN_OFFSET		12
+#define MVEBU_AXI_ATTR_ARDOMAIN_MASK		(0x3 << \
+						 MVEBU_AXI_ATTR_ARDOMAIN_OFFSET)
+#define MVEBU_AXI_ATTR_AWCACHE_OFFSET		20
+#define MVEBU_AXI_ATTR_AWCACHE_MASK		(0xF << \
+						 MVEBU_AXI_ATTR_AWCACHE_OFFSET)
+#define MVEBU_AXI_ATTR_AWDOMAIN_OFFSET		28
+#define MVEBU_AXI_ATTR_AWDOMAIN_MASK		(0x3 << \
+						 MVEBU_AXI_ATTR_AWDOMAIN_OFFSET)
+
+/* SATA MBUS to AXI configuration */
+#define MVEBU_SATA_M2A_AXI_ARCACHE_OFFSET	1
+#define MVEBU_SATA_M2A_AXI_ARCACHE_MASK		(0xF << \
+					MVEBU_SATA_M2A_AXI_ARCACHE_OFFSET)
+#define MVEBU_SATA_M2A_AXI_AWCACHE_OFFSET	5
+#define MVEBU_SATA_M2A_AXI_AWCACHE_MASK		(0xF << \
+					MVEBU_SATA_M2A_AXI_AWCACHE_OFFSET)
+
+/* ARM cache attributes */
+#define CACHE_ATTR_BUFFERABLE			0x1
+#define CACHE_ATTR_CACHEABLE			0x2
+#define CACHE_ATTR_READ_ALLOC			0x4
+#define CACHE_ATTR_WRITE_ALLOC			0x8
+/* Domain */
+#define DOMAIN_NON_SHAREABLE			0x0
+#define DOMAIN_INNER_SHAREABLE			0x1
+#define DOMAIN_OUTER_SHAREABLE			0x2
+#define DOMAIN_SYSTEM_SHAREABLE			0x3
+
+/************************************************************************
+ * Required platform porting definitions common to all
+ * Management Compute SubSystems (MSS)
+ ************************************************************************
+ */
+/*
+ * Load address of SCP_BL2
+ * SCP_BL2 is loaded to the same place as BL31.
+ * Once SCP_BL2 is transferred to the SCP,
+ * it is discarded and BL31 is loaded over the top.
+ */
+#ifdef SCP_IMAGE
+#define SCP_BL2_BASE                    BL31_BASE
+#endif
+
+#ifndef __ASSEMBLER__
+enum ap806_sar_target_dev {
+	SAR_PIDI_MCIX2		= 0x0,
+	SAR_MCIX4		= 0x1,
+	SAR_SPI			= 0x2,
+	SAR_SD			= 0x3,
+	SAR_PIDI_MCIX2_BD	= 0x4, /* BootRom disabled */
+	SAR_MCIX4_DB		= 0x5, /* BootRom disabled */
+	SAR_SPI_DB		= 0x6, /* BootRom disabled */
+	SAR_EMMC		= 0x7
+};
+
+enum io_win_target_ids {
+	MCI_0_TID	 = 0x0,
+	MCI_1_TID	 = 0x1,
+	MCI_2_TID	 = 0x2,
+	PIDI_TID	 = 0x3,
+	SPI_TID		 = 0x4,
+	STM_TID		 = 0x5,
+	BOOTROM_TID	 = 0x6,
+	IO_WIN_MAX_TID
+};
+
+enum ccu_target_ids {
+	IO_0_TID        = 0x00,
+	DRAM_0_TID      = 0x03,
+	IO_1_TID        = 0x0F,
+	CFG_REG_TID     = 0x10,
+	RAR_TID         = 0x20,
+	SRAM_TID        = 0x40,
+	DRAM_1_TID      = 0xC0,
+	CCU_MAX_TID,
+	INVALID_TID     = 0xFF
+};
+#endif /* __ASSEMBLER__ */
+
+#endif /* __A8K_PLAT_DEF_H__ */
diff --git a/plat/marvell/a8k/common/include/ddr_info.h b/plat/marvell/a8k/common/include/ddr_info.h
new file mode 100644
index 0000000..e19036a
--- /dev/null
+++ b/plat/marvell/a8k/common/include/ddr_info.h
@@ -0,0 +1,9 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#define DRAM_MAX_IFACE			1
+#define DRAM_CH0_MMAP_LOW_OFFSET	0x20200
diff --git a/plat/marvell/a8k/common/include/plat_macros.S b/plat/marvell/a8k/common/include/plat_macros.S
new file mode 100644
index 0000000..2a6ccf2
--- /dev/null
+++ b/plat/marvell/a8k/common/include/plat_macros.S
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __PLAT_MACROS_S__
+#define __PLAT_MACROS_S__
+
+#include <marvell_macros.S>
+
+/*
+ * Required platform porting macros
+ * (Provided by included headers)
+ */
+.macro plat_crash_print_regs
+.endm
+
+#endif /* __PLAT_MACROS_S__ */
diff --git a/plat/marvell/a8k/common/include/platform_def.h b/plat/marvell/a8k/common/include/platform_def.h
new file mode 100644
index 0000000..f7bd23f
--- /dev/null
+++ b/plat/marvell/a8k/common/include/platform_def.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <board_marvell_def.h>
+#include <gic_common.h>
+#include <interrupt_props.h>
+#include <mvebu_def.h>
+#ifndef __ASSEMBLY__
+#include <stdio.h>
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Most platform porting definitions provided by included headers
+ */
+
+/*
+ * DRAM Memory layout:
+ *		+-----------------------+
+ *		:			:
+ *		:	Linux		:
+ * 0x04X00000-->+-----------------------+
+ *		|	BL3-3(u-boot)	|>>}>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
+ *		|-----------------------|  }				       |
+ *		|	BL3-[0,1, 2]	|  }---------------------------------> |
+ *		|-----------------------|  }				||     |
+ *		|	BL2		|  }->FIP (loaded by            ||     |
+ *		|-----------------------|  }       BootROM to DRAM)     ||     |
+ *		|	FIP_TOC		|  }                            ||     |
+ * 0x04120000-->|-----------------------|				||     |
+ *		|	BL1 (RO)	|				||     |
+ * 0x04100000-->+-----------------------+				||     |
+ *		:			:				||     |
+ *		: Trusted SRAM section	:				\/     |
+ * 0x04040000-->+-----------------------+  Replaced by BL2  +----------------+ |
+ *		|	BL1 (RW)	|  <<<<<<<<<<<<<<<< | BL3-1 NOBITS   | |
+ * 0x04037000-->|-----------------------|  <<<<<<<<<<<<<<<< |----------------| |
+ *		|			|  <<<<<<<<<<<<<<<< | BL3-1 PROGBITS | |
+ * 0x04023000-->|-----------------------|		    +----------------+ |
+ *		|	BL2		|				       |
+ *		|-----------------------|				       |
+ *		|			|				       |
+ * 0x04001000-->|-----------------------|				       |
+ *		|	Shared		|				       |
+ * 0x04000000-->+-----------------------+				       |
+ *		:			:				       |
+ *		:	Linux		:				       |
+ *		:			:				       |
+ *		|-----------------------|				       |
+ *		|			|	U-Boot(BL3-3) Loaded by BL2    |
+ *		|	U-Boot		|	<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
+ * 0x00000000-->+-----------------------+
+ *
+ * Trusted SRAM section 0x4000000..0x4200000:
+ * ----------------------------------------
+ * SRAM_BASE		= 0x4001000
+ * BL2_BASE			= 0x4006000
+ * BL2_LIMIT		= BL31_BASE
+ * BL31_BASE		= 0x4023000 = (64MB + 256KB - 0x1D000)
+ * BL31_PROGBITS_LIMIT	= BL1_RW_BASE
+ * BL1_RW_BASE		= 0x4037000 = (64MB + 256KB - 0x9000)
+ * BL1_RW_LIMIT		= BL31_LIMIT = 0x4040000
+ *
+ *
+ * PLAT_MARVELL_FIP_BASE	= 0x4120000
+ */
+
+/*
+ * Since BL33 is loaded by BL2 (and validated by BL31) to DRAM offset 0,
+ * it is allowed to load/copy images to 'NULL' pointers
+ */
+#if defined(IMAGE_BL2) || defined(IMAGE_BL31)
+#define PLAT_ALLOW_ZERO_ADDR_COPY
+#endif
+
+#define PLAT_MARVELL_SRAM_BASE			0xFFE1C048
+#define PLAT_MARVELL_SRAM_END			0xFFE78000
+
+#define PLAT_MARVELL_ATF_BASE			0x4000000
+#define PLAT_MARVELL_ATF_LOAD_ADDR		(PLAT_MARVELL_ATF_BASE + \
+								0x100000)
+
+#define PLAT_MARVELL_FIP_BASE			(PLAT_MARVELL_ATF_LOAD_ADDR + \
+								0x20000)
+#define PLAT_MARVELL_FIP_MAX_SIZE		0x4000000
+
+#define PLAT_MARVELL_NORTHB_COUNT		1
+
+#define PLAT_MARVELL_CLUSTER_COUNT		2
+#define PLAT_MARVELL_CLUSTER_CORE_COUNT		2
+
+#define PLAT_MARVELL_CORE_COUNT			(PLAT_MARVELL_CLUSTER_COUNT * \
+						PLAT_MARVELL_CLUSTER_CORE_COUNT)
+
+/* DRAM[2MB..66MB] is used as Trusted ROM */
+#define PLAT_MARVELL_TRUSTED_ROM_BASE		PLAT_MARVELL_ATF_LOAD_ADDR
+/* 64 MB TODO: reduce this to minimum needed according to fip image size */
+#define PLAT_MARVELL_TRUSTED_ROM_SIZE		0x04000000
+/* Reserve 16M for SCP (Secure PayLoad) Trusted DRAM */
+#define PLAT_MARVELL_TRUSTED_DRAM_BASE		0x04400000
+#define PLAT_MARVELL_TRUSTED_DRAM_SIZE		0x01000000	/* 16 MB */
+
+/*
+ * PLAT_ARM_MAX_BL1_RW_SIZE is calculated using the current BL1 RW debug size
+ * plus a little space for growth.
+ */
+#define PLAT_MARVELL_MAX_BL1_RW_SIZE		0xA000
+
+/*
+ * PLAT_ARM_MAX_BL2_SIZE is calculated using the current BL2 debug size plus a
+ * little space for growth.
+ */
+#define PLAT_MARVELL_MAX_BL2_SIZE		0xF000
+
+/*
+ * PLAT_ARM_MAX_BL31_SIZE is calculated using the current BL31 debug size plus a
+ * little space for growth.
+ */
+#define PLAT_MARVEL_MAX_BL31_SIZE		0x5D000
+
+#define PLAT_MARVELL_CPU_ENTRY_ADDR		BL1_RO_BASE
+
+/* GIC related definitions */
+#define PLAT_MARVELL_GICD_BASE		(MVEBU_REGS_BASE + MVEBU_GICD_BASE)
+#define PLAT_MARVELL_GICC_BASE		(MVEBU_REGS_BASE + MVEBU_GICC_BASE)
+
+#define PLAT_MARVELL_G0_IRQ_PROPS(grp) \
+	INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_0, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL)
+
+#define PLAT_MARVELL_G1S_IRQ_PROPS(grp) \
+	INTR_PROP_DESC(MARVELL_IRQ_SEC_PHY_TIMER, GIC_HIGHEST_SEC_PRIORITY, \
+			grp, GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_1, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_2, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_3, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_4, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_5, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_7, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL)
+
+#define PLAT_MARVELL_SHARED_RAM_CACHED		1
+
+/*
+ * Load address of BL3-3 for this platform port
+ */
+#define PLAT_MARVELL_NS_IMAGE_OFFSET		0x0
+
+/* System Reference Clock*/
+#define PLAT_REF_CLK_IN_HZ			COUNTER_FREQUENCY
+
+/*
+ * PL011 related constants
+ */
+#define PLAT_MARVELL_BOOT_UART_BASE		(MVEBU_REGS_BASE + 0x512000)
+#define PLAT_MARVELL_BOOT_UART_CLK_IN_HZ	200000000
+
+#define PLAT_MARVELL_CRASH_UART_BASE		PLAT_MARVELL_BOOT_UART_BASE
+#define PLAT_MARVELL_CRASH_UART_CLK_IN_HZ	PLAT_MARVELL_BOOT_UART_CLK_IN_HZ
+
+#define PLAT_MARVELL_BL31_RUN_UART_BASE		PLAT_MARVELL_BOOT_UART_BASE
+#define PLAT_MARVELL_BL31_RUN_UART_CLK_IN_HZ	PLAT_MARVELL_BOOT_UART_CLK_IN_HZ
+
+/* Recovery image enable */
+#define PLAT_RECOVERY_IMAGE_ENABLE		0
+
+/* Required platform porting definitions */
+#define PLAT_MAX_PWR_LVL			MPIDR_AFFLVL1
+
+/* System timer related constants */
+#define PLAT_MARVELL_NSTIMER_FRAME_ID		1
+
+/* Mailbox base address (note the lower memory space
+ * is reserved for BLE data)
+ */
+#define PLAT_MARVELL_MAILBOX_BASE		(MARVELL_TRUSTED_SRAM_BASE \
+							+ 0x400)
+#define PLAT_MARVELL_MAILBOX_SIZE		0x100
+#define PLAT_MARVELL_MAILBOX_MAGIC_NUM		0x6D72766C	/* mrvl */
+
+/* Securities */
+#define IRQ_SEC_OS_TICK_INT			MARVELL_IRQ_SEC_PHY_TIMER
+
+#define TRUSTED_DRAM_BASE			PLAT_MARVELL_TRUSTED_DRAM_BASE
+#define TRUSTED_DRAM_SIZE			PLAT_MARVELL_TRUSTED_DRAM_SIZE
+
+#define BL32_BASE				TRUSTED_DRAM_BASE
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/marvell/a8k/common/mss/mss_a8k.mk b/plat/marvell/a8k/common/mss/mss_a8k.mk
new file mode 100644
index 0000000..58f23d8
--- /dev/null
+++ b/plat/marvell/a8k/common/mss/mss_a8k.mk
@@ -0,0 +1,20 @@
+#
+# Copyright (C) 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier:	BSD-3-Clause
+# https://spdx.org/licenses
+#
+
+PLAT_MARVELL		:=	plat/marvell
+A8K_MSS_SOURCE		:=	$(PLAT_MARVELL)/a8k/common/mss
+
+BL2_SOURCES		+=	$(A8K_MSS_SOURCE)/mss_bl2_setup.c
+
+BL31_SOURCES		+=	$(A8K_MSS_SOURCE)/mss_pm_ipc.c
+
+PLAT_INCLUDES		+=	-I$(A8K_MSS_SOURCE)
+
+ifneq (${SCP_BL2},)
+# This define is used to inidcate the SCP image is present
+$(eval $(call add_define,SCP_IMAGE))
+endif
diff --git a/plat/marvell/a8k/common/mss/mss_bl2_setup.c b/plat/marvell/a8k/common/mss/mss_bl2_setup.c
new file mode 100644
index 0000000..6688551
--- /dev/null
+++ b/plat/marvell/a8k/common/mss/mss_bl2_setup.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <a8k_common.h>
+#include <bl_common.h>
+#include <ccu.h>
+#include <cp110_setup.h>
+#include <debug.h>
+#include <marvell_plat_priv.h> /* timer functionality */
+#include <mmio.h>
+#include <platform_def.h>
+
+#include "mss_scp_bootloader.h"
+
+/* IO windows configuration */
+#define IOW_GCR_OFFSET		(0x70)
+
+/* MSS windows configuration */
+#define MSS_AEBR(base)			(base + 0x160)
+#define MSS_AIBR(base)			(base + 0x164)
+#define MSS_AEBR_MASK			0xFFF
+#define MSS_AIBR_MASK			0xFFF
+
+#define MSS_EXTERNAL_SPACE		0x50000000
+#define MSS_EXTERNAL_ACCESS_BIT		28
+#define MSS_EXTERNAL_ADDR_MASK		0xfffffff
+#define MSS_INTERNAL_ACCESS_BIT		28
+
+struct addr_map_win ccu_mem_map[] = {
+	{MVEBU_CP_REGS_BASE(0), 0x4000000, IO_0_TID}
+};
+
+/* Since the scp_bl2 image can contain firmware for cp1 and cp0 coprocessors,
+ * the access to cp0 and cp1 need to be provided. More precisely it is
+ * required to:
+ *  - get the information about device id which is stored in CP0 registers
+ *    (to distinguish between cases where we have cp0 and cp1 or standalone cp0)
+ *  - get the access to cp which is needed for loading fw for cp0/cp1
+ *    coprocessors
+ * This function configures ccu windows accordingly.
+ *
+ * Note: there is no need to restore previous ccu configuration, since in next
+ * phase (BL31) the init_ccu will be called (via apn806_init/
+ * bl31_plat_arch_setu) and therefore the ccu configuration will be overwritten.
+ */
+static int bl2_plat_mmap_init(void)
+{
+	int cfg_num, win_id, cfg_idx;
+
+	cfg_num =  ARRAY_SIZE(ccu_mem_map);
+
+	/* CCU window-0 should not be counted - it's already used */
+	if (cfg_num > (MVEBU_CCU_MAX_WINS - 1)) {
+		ERROR("BL2: %s: trying to open too many windows\n", __func__);
+		return -1;
+	}
+
+	/* Enable required CCU windows
+	 * Do not touch CCU window 0,
+	 * it's used for the internal registers access
+	 */
+	for (cfg_idx = 0, win_id = 1; cfg_idx < cfg_num; cfg_idx++, win_id++) {
+		/* Enable required CCU windows */
+		ccu_win_check(&ccu_mem_map[cfg_idx]);
+		ccu_enable_win(MVEBU_AP0, &ccu_mem_map[cfg_idx], win_id);
+	}
+
+	/* Set the default target id to PIDI */
+	mmio_write_32(MVEBU_IO_WIN_BASE(MVEBU_AP0) + IOW_GCR_OFFSET, PIDI_TID);
+
+	return 0;
+}
+
+/*****************************************************************************
+ * Transfer SCP_BL2 from Trusted RAM using the SCP Download protocol.
+ * Return 0 on success, -1 otherwise.
+ *****************************************************************************
+ */
+int bl2_plat_handle_scp_bl2(image_info_t *scp_bl2_image_info)
+{
+	int ret;
+
+	INFO("BL2: Initiating SCP_BL2 transfer to SCP\n");
+	printf("BL2: Initiating SCP_BL2 transfer to SCP\n");
+
+	/* initialize time (for delay functionality) */
+	plat_delay_timer_init();
+
+	ret = bl2_plat_mmap_init();
+	if (ret != 0)
+		return ret;
+
+	ret = scp_bootloader_transfer((void *)scp_bl2_image_info->image_base,
+		scp_bl2_image_info->image_size);
+
+	if (ret == 0)
+		INFO("BL2: SCP_BL2 transferred to SCP\n");
+	else
+		ERROR("BL2: SCP_BL2 transfer failure\n");
+
+	return ret;
+}
+
+uintptr_t bl2_plat_get_cp_mss_regs(int ap_idx, int cp_idx)
+{
+	return MVEBU_CP_REGS_BASE(cp_idx) + 0x280000;
+}
+
+uintptr_t bl2_plat_get_ap_mss_regs(int ap_idx)
+{
+	return MVEBU_REGS_BASE + 0x580000;
+}
+
+uint32_t bl2_plat_get_cp_count(int ap_idx)
+{
+	uint32_t revision = cp110_device_id_get(MVEBU_CP_REGS_BASE(0));
+	/* A8040: two CPs.
+	 * A7040: one CP.
+	 */
+	if (revision == MVEBU_80X0_DEV_ID ||
+	    revision == MVEBU_80X0_CP115_DEV_ID)
+		return 2;
+	else
+		return 1;
+}
+
+uint32_t bl2_plat_get_ap_count(void)
+{
+	/* A8040 and A7040 have only one AP */
+	return 1;
+}
+
+void bl2_plat_configure_mss_windows(uintptr_t mss_regs)
+{
+	/* set AXI External and Internal Address Bus extension */
+	mmio_write_32(MSS_AEBR(mss_regs),
+		      ((0x0 >> MSS_EXTERNAL_ACCESS_BIT) & MSS_AEBR_MASK));
+	mmio_write_32(MSS_AIBR(mss_regs),
+		      ((mss_regs >> MSS_INTERNAL_ACCESS_BIT) & MSS_AIBR_MASK));
+}
diff --git a/plat/marvell/a8k/common/mss/mss_pm_ipc.c b/plat/marvell/a8k/common/mss/mss_pm_ipc.c
new file mode 100644
index 0000000..6ff4abc
--- /dev/null
+++ b/plat/marvell/a8k/common/mss/mss_pm_ipc.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <debug.h>
+#include <mmio.h>
+#include <psci.h>
+#include <string.h>
+
+#include <mss_pm_ipc.h>
+
+/*
+ * SISR is 32 bit interrupt register representing 32 interrupts
+ *
+ * +======+=============+=============+
+ * + Bits + 31          + 30 - 00     +
+ * +======+=============+=============+
+ * + Desc + MSS Msg Int + Reserved    +
+ * +======+=============+=============+
+ */
+#define MSS_SISR		(MVEBU_REGS_BASE + 0x5800D0)
+#define MSS_SISTR		(MVEBU_REGS_BASE + 0x5800D8)
+
+#define MSS_MSG_INT_MASK	(0x80000000)
+#define MSS_TIMER_BASE		(MVEBU_REGS_BASE_MASK + 0x580110)
+#define MSS_TRIGGER_TIMEOUT	(1000)
+
+/*****************************************************************************
+ * mss_pm_ipc_msg_send
+ *
+ * DESCRIPTION: create and transmit IPC message
+ *****************************************************************************
+ */
+int mss_pm_ipc_msg_send(unsigned int channel_id, unsigned int msg_id,
+			const psci_power_state_t *target_state)
+{
+	/* Transmit IPC message */
+#ifndef DISABLE_CLUSTER_LEVEL
+	mv_pm_ipc_msg_tx(channel_id, msg_id,
+			 (unsigned int)target_state->pwr_domain_state[
+					MPIDR_AFFLVL1]);
+#else
+	mv_pm_ipc_msg_tx(channel_id, msg_id, 0);
+#endif
+
+	return 0;
+}
+
+/*****************************************************************************
+ * mss_pm_ipc_msg_trigger
+ *
+ * DESCRIPTION: Trigger IPC message interrupt to MSS
+ *****************************************************************************
+ */
+int mss_pm_ipc_msg_trigger(void)
+{
+	unsigned int timeout;
+	unsigned int t_end;
+	unsigned int t_start = mmio_read_32(MSS_TIMER_BASE);
+
+	mmio_write_32(MSS_SISR, MSS_MSG_INT_MASK);
+
+	do {
+		/* wait while SCP process incoming interrupt */
+		if (mmio_read_32(MSS_SISTR) != MSS_MSG_INT_MASK)
+			break;
+
+		/* check timeout */
+		t_end = mmio_read_32(MSS_TIMER_BASE);
+
+		timeout = ((t_start > t_end) ?
+			   (t_start - t_end) : (t_end - t_start));
+		if (timeout > MSS_TRIGGER_TIMEOUT) {
+			ERROR("PM MSG Trigger Timeout\n");
+			break;
+		}
+
+	} while (1);
+
+	return 0;
+}
diff --git a/plat/marvell/a8k/common/mss/mss_pm_ipc.h b/plat/marvell/a8k/common/mss/mss_pm_ipc.h
new file mode 100644
index 0000000..0f69457
--- /dev/null
+++ b/plat/marvell/a8k/common/mss/mss_pm_ipc.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __MSS_PM_IPC_H
+#define __MSS_PM_IPC_H
+
+#include <mss_ipc_drv.h>
+
+/* Currently MSS does not support Cluster level Power Down */
+#define DISABLE_CLUSTER_LEVEL
+
+
+/*****************************************************************************
+ * mss_pm_ipc_msg_send
+ *
+ * DESCRIPTION: create and transmit IPC message
+ *****************************************************************************
+ */
+int mss_pm_ipc_msg_send(unsigned int channel_id, unsigned int msg_id,
+			const psci_power_state_t *target_state);
+
+/*****************************************************************************
+ * mss_pm_ipc_msg_trigger
+ *
+ * DESCRIPTION: Trigger IPC message interrupt to MSS
+ *****************************************************************************
+ */
+int mss_pm_ipc_msg_trigger(void);
+
+
+#endif /* __MSS_PM_IPC_H */
diff --git a/plat/marvell/a8k/common/plat_bl1_setup.c b/plat/marvell/a8k/common/plat_bl1_setup.c
new file mode 100644
index 0000000..5d85102
--- /dev/null
+++ b/plat/marvell/a8k/common/plat_bl1_setup.c
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <mmio.h>
+#include <plat_marvell.h>
+
+void marvell_bl1_setup_mpps(void)
+{
+	/* Enable UART MPPs.
+	 ** In a normal system, this is done by Bootrom.
+	 */
+	mmio_write_32(MVEBU_AP_MPP_REGS(1), 0x3000);
+	mmio_write_32(MVEBU_AP_MPP_REGS(2), 0x3000);
+}
diff --git a/plat/marvell/a8k/common/plat_bl31_setup.c b/plat/marvell/a8k/common/plat_bl31_setup.c
new file mode 100644
index 0000000..6c85fcc
--- /dev/null
+++ b/plat/marvell/a8k/common/plat_bl31_setup.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <a8k_common.h>
+#include <ap_setup.h>
+#include <cp110_setup.h>
+#include <debug.h>
+#include <marvell_plat_priv.h>
+#include <marvell_pm.h>
+#include <mmio.h>
+#include <mci.h>
+#include <plat_marvell.h>
+
+#include <mss_ipc_drv.h>
+#include <mss_mem.h>
+
+/* In Armada-8k family AP806/AP807, CP0 connected to PIDI
+ * and CP1 connected to IHB via MCI #0
+ */
+#define MVEBU_MCI0		0
+
+static _Bool pm_fw_running;
+
+/* Set a weak stub for platforms that don't need to configure GPIO */
+#pragma weak marvell_gpio_config
+int marvell_gpio_config(void)
+{
+	return 0;
+}
+
+static void marvell_bl31_mpp_init(int cp)
+{
+	uint32_t reg;
+
+	/* need to do for CP#0 only */
+	if (cp)
+		return;
+
+
+	/*
+	 * Enable CP0 I2C MPPs (MPP: 37-38)
+	 * U-Boot rely on proper MPP settings for I2C EEPROM usage
+	 * (only for CP0)
+	 */
+	reg = mmio_read_32(MVEBU_CP_MPP_REGS(0, 4));
+	mmio_write_32(MVEBU_CP_MPP_REGS(0, 4), reg | 0x2200000);
+}
+
+void marvell_bl31_mss_init(void)
+{
+	struct mss_pm_ctrl_block *mss_pm_crtl =
+			(struct mss_pm_ctrl_block *)MSS_SRAM_PM_CONTROL_BASE;
+
+	/* Check that the image was loaded successfully */
+	if (mss_pm_crtl->handshake != HOST_ACKNOWLEDGMENT) {
+		NOTICE("MSS PM is not supported in this build\n");
+		return;
+	}
+
+	/* If we got here it means that the PM firmware is running */
+	pm_fw_running = 1;
+
+	INFO("MSS IPC init\n");
+
+	if (mss_pm_crtl->ipc_state == IPC_INITIALIZED)
+		mv_pm_ipc_init(mss_pm_crtl->ipc_base_address | MVEBU_REGS_BASE);
+}
+
+_Bool is_pm_fw_running(void)
+{
+	return pm_fw_running;
+}
+
+/* This function overruns the same function in marvell_bl31_setup.c */
+void bl31_plat_arch_setup(void)
+{
+	int cp;
+	uintptr_t *mailbox = (void *)PLAT_MARVELL_MAILBOX_BASE;
+
+	/* initialize the timer for mdelay/udelay functionality */
+	plat_delay_timer_init();
+
+	/* configure apn806 */
+	ap_init();
+
+	/* In marvell_bl31_plat_arch_setup, el3 mmu is configured.
+	 * el3 mmu configuration MUST be called after apn806_init, if not,
+	 * this will cause an hang in init_io_win
+	 * (after setting the IO windows GCR values).
+	 */
+	if (mailbox[MBOX_IDX_MAGIC] != MVEBU_MAILBOX_MAGIC_NUM ||
+	    mailbox[MBOX_IDX_SUSPEND_MAGIC] != MVEBU_MAILBOX_SUSPEND_STATE)
+		marvell_bl31_plat_arch_setup();
+
+	for (cp = 0; cp < CP_COUNT; cp++) {
+	/* configure cp110 for CP0*/
+		if (cp == 1)
+			mci_initialize(MVEBU_MCI0);
+
+	/* initialize MCI & CP1 */
+		cp110_init(MVEBU_CP_REGS_BASE(cp),
+			   STREAM_ID_BASE + (cp * MAX_STREAM_ID_PER_CP));
+
+	/* Should be called only after setting IOB windows */
+		marvell_bl31_mpp_init(cp);
+	}
+
+	/* initialize IPC between MSS and ATF */
+	if (mailbox[MBOX_IDX_MAGIC] != MVEBU_MAILBOX_MAGIC_NUM ||
+	    mailbox[MBOX_IDX_SUSPEND_MAGIC] != MVEBU_MAILBOX_SUSPEND_STATE)
+		marvell_bl31_mss_init();
+
+	/* Configure GPIO */
+	marvell_gpio_config();
+}
diff --git a/plat/marvell/a8k/common/plat_ble_setup.c b/plat/marvell/a8k/common/plat_ble_setup.c
new file mode 100644
index 0000000..0cd62cb
--- /dev/null
+++ b/plat/marvell/a8k/common/plat_ble_setup.c
@@ -0,0 +1,570 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <a8k_common.h>
+#include <ap_setup.h>
+#include <aro.h>
+#include <ccu.h>
+#include <cp110_setup.h>
+#include <debug.h>
+#include <io_win.h>
+#include <mv_ddr_if.h>
+#include <mvebu_def.h>
+#include <plat_marvell.h>
+
+/* Register for skip image use */
+#define SCRATCH_PAD_REG2		0xF06F00A8
+#define SCRATCH_PAD_SKIP_VAL		0x01
+#define NUM_OF_GPIO_PER_REG 32
+
+#define MMAP_SAVE_AND_CONFIG	0
+#define MMAP_RESTORE_SAVED	1
+
+/* SAR clock settings */
+#define MVEBU_AP_GEN_MGMT_BASE		(MVEBU_RFU_BASE + 0x8000)
+#define MVEBU_AP_SAR_REG_BASE(r)	(MVEBU_AP_GEN_MGMT_BASE + 0x200 +\
+								((r) << 2))
+
+#define SAR_CLOCK_FREQ_MODE_OFFSET	(0)
+#define SAR_CLOCK_FREQ_MODE_MASK	(0x1f << SAR_CLOCK_FREQ_MODE_OFFSET)
+#define SAR_PIDI_LOW_SPEED_OFFSET	(20)
+#define SAR_PIDI_LOW_SPEED_MASK		(1 << SAR_PIDI_LOW_SPEED_OFFSET)
+#define SAR_PIDI_LOW_SPEED_SHIFT	(15)
+#define SAR_PIDI_LOW_SPEED_SET		(1 << SAR_PIDI_LOW_SPEED_SHIFT)
+
+#define FREQ_MODE_AP_SAR_REG_NUM	(0)
+#define SAR_CLOCK_FREQ_MODE(v)		(((v) & SAR_CLOCK_FREQ_MODE_MASK) >> \
+					SAR_CLOCK_FREQ_MODE_OFFSET)
+
+#define AVS_EN_CTRL_REG			(MVEBU_AP_GEN_MGMT_BASE + 0x130)
+#define AVS_ENABLE_OFFSET		(0)
+#define AVS_SOFT_RESET_OFFSET		(2)
+#define AVS_LOW_VDD_LIMIT_OFFSET	(4)
+#define AVS_HIGH_VDD_LIMIT_OFFSET	(12)
+#define AVS_TARGET_DELTA_OFFSET		(21)
+#define AVS_VDD_LOW_LIMIT_MASK	        (0xFF << AVS_LOW_VDD_LIMIT_OFFSET)
+#define AVS_VDD_HIGH_LIMIT_MASK	        (0xFF << AVS_HIGH_VDD_LIMIT_OFFSET)
+/* VDD limit is 0.9V for A70x0 @ CPU frequency < 1600MHz */
+#define AVS_A7K_LOW_CLK_VALUE		((0x80 << AVS_TARGET_DELTA_OFFSET) | \
+					 (0x1A << AVS_HIGH_VDD_LIMIT_OFFSET) | \
+					 (0x1A << AVS_LOW_VDD_LIMIT_OFFSET) | \
+					 (0x1 << AVS_SOFT_RESET_OFFSET) | \
+					 (0x1 << AVS_ENABLE_OFFSET))
+/* VDD limit is 1.0V for all A80x0 devices */
+#define AVS_A8K_CLK_VALUE		((0x80 << AVS_TARGET_DELTA_OFFSET) | \
+					 (0x24 << AVS_HIGH_VDD_LIMIT_OFFSET) | \
+					 (0x24 << AVS_LOW_VDD_LIMIT_OFFSET) | \
+					 (0x1 << AVS_SOFT_RESET_OFFSET) | \
+					 (0x1 << AVS_ENABLE_OFFSET))
+
+#define AVS_A3900_CLK_VALUE		((0x80 << 24) | \
+					 (0x2c2 << 13) | \
+					 (0x2c2 << 3) | \
+					 (0x1 << AVS_SOFT_RESET_OFFSET) | \
+					 (0x1 << AVS_ENABLE_OFFSET))
+
+#define MVEBU_AP_EFUSE_SRV_CTRL_REG	(MVEBU_AP_GEN_MGMT_BASE + 0x8)
+#define EFUSE_SRV_CTRL_LD_SELECT_OFFS	6
+#define EFUSE_SRV_CTRL_LD_SEL_USER_MASK	(1 << EFUSE_SRV_CTRL_LD_SELECT_OFFS)
+
+/* Notify bootloader on DRAM setup */
+#define AP807_CPU_ARO_0_CTRL_0		(MVEBU_RFU_BASE + 0x82A8)
+#define AP807_CPU_ARO_1_CTRL_0		(MVEBU_RFU_BASE + 0x8D00)
+
+/* 0 - ARO clock is enabled, 1 - ARO clock is disabled */
+#define AP807_CPU_ARO_CLK_EN_OFFSET	0
+#define AP807_CPU_ARO_CLK_EN_MASK	(0x1 << AP807_CPU_ARO_CLK_EN_OFFSET)
+
+/* 0 - ARO is the clock source, 1 - PLL is the clock source */
+#define AP807_CPU_ARO_SEL_PLL_OFFSET	5
+#define AP807_CPU_ARO_SEL_PLL_MASK	(0x1 << AP807_CPU_ARO_SEL_PLL_OFFSET)
+
+/*
+ * - AVS work points in the LD0 eFuse:
+ *	SVC1 work point:     LD0[88:81]
+ *	SVC2 work point:     LD0[96:89]
+ *	SVC3 work point:     LD0[104:97]
+ *	SVC4 work point:     LD0[112:105]
+ * - Identification information in the LD-0 eFuse:
+ *	DRO:           LD0[74:65] - Not used by the SW
+ *	Revision:      LD0[78:75] - Not used by the SW
+ *	Bin:           LD0[80:79] - Not used by the SW
+ *	SW Revision:   LD0[115:113]
+ *	Cluster 1 PWR: LD0[193] - if set to 1, power down CPU Cluster-1
+ *				  resulting in 2 CPUs active only (7020)
+ */
+#define MVEBU_AP_LD_EFUSE_BASE		(MVEBU_AP_GEN_MGMT_BASE + 0xF00)
+/* Bits [94:63] - 32 data bits total */
+#define MVEBU_AP_LD0_94_63_EFUSE_OFFS	(MVEBU_AP_LD_EFUSE_BASE + 0x8)
+/* Bits [125:95] - 31 data bits total, 32nd bit is parity for bits [125:63] */
+#define MVEBU_AP_LD0_125_95_EFUSE_OFFS	(MVEBU_AP_LD_EFUSE_BASE + 0xC)
+/* Bits [220:189] - 32 data bits total */
+#define MVEBU_AP_LD0_220_189_EFUSE_OFFS	(MVEBU_AP_LD_EFUSE_BASE + 0x18)
+/* Offsets for the above 2 fields combined into single 64-bit value [125:63] */
+#define EFUSE_AP_LD0_DRO_OFFS		2		/* LD0[74:65] */
+#define EFUSE_AP_LD0_DRO_MASK		0x3FF
+#define EFUSE_AP_LD0_REVID_OFFS		12		/* LD0[78:75] */
+#define EFUSE_AP_LD0_REVID_MASK		0xF
+#define EFUSE_AP_LD0_BIN_OFFS		16		/* LD0[80:79] */
+#define EFUSE_AP_LD0_BIN_MASK		0x3
+#define EFUSE_AP_LD0_SWREV_OFFS		50		/* LD0[115:113] */
+#define EFUSE_AP_LD0_SWREV_MASK		0x7
+
+#define EFUSE_AP_LD0_SVC1_OFFS		18		/* LD0[88:81] */
+#define EFUSE_AP_LD0_SVC2_OFFS		26		/* LD0[96:89] */
+#define EFUSE_AP_LD0_SVC3_OFFS		34		/* LD0[104:97] */
+#define EFUSE_AP_LD0_SVC4_OFFS		42		/* LD0[112:105] */
+#define EFUSE_AP_LD0_WP_MASK		0xFF
+
+#define EFUSE_AP_LD0_CLUSTER_DOWN_OFFS	4
+
+/* Return the AP revision of the chip */
+static unsigned int ble_get_ap_type(void)
+{
+	unsigned int chip_rev_id;
+
+	chip_rev_id = mmio_read_32(MVEBU_CSS_GWD_CTRL_IIDR2_REG);
+	chip_rev_id = ((chip_rev_id & GWD_IIDR2_CHIP_ID_MASK) >>
+			GWD_IIDR2_CHIP_ID_OFFSET);
+
+	return chip_rev_id;
+}
+
+/******************************************************************************
+ * The routine allows to save the CCU and IO windows configuration during DRAM
+ * setup and restore them afterwards before exiting the BLE stage.
+ * Such window configuration is required since not all default settings coming
+ * from the HW and the BootROM allow access to peripherals connected to
+ * all available CPn components.
+ * For instance, when the boot device is located on CP0, the IO window to CP1
+ * is not opened automatically by the HW and if the DRAM SPD is located on CP1
+ * i2c channel, it cannot be read at BLE stage.
+ * Therefore the DRAM init procedure have to provide access to all available
+ * CPn peripherals during the BLE stage by setting the CCU IO window to all
+ * CPnph addresses and by enabling the IO windows accordingly.
+ * Additionally this function configures the CCU GCR to DRAM, which allows
+ * usage or more than 4GB DRAM as it configured by the default CCU DRAM window.
+ *
+ * IN:
+ *	MMAP_SAVE_AND_CONFIG	- save the existing configuration and update it
+ *	MMAP_RESTORE_SAVED	- restore saved configuration
+ * OUT:
+ *	NONE
+ ****************************************************************************
+ */
+static void ble_plat_mmap_config(int restore)
+{
+	if (restore == MMAP_RESTORE_SAVED) {
+		/* Restore all orig. settings that were modified by BLE stage */
+		ccu_restore_win_all(MVEBU_AP0);
+		/* Restore CCU */
+		iow_restore_win_all(MVEBU_AP0);
+		return;
+	}
+
+	/* Store original values */
+	ccu_save_win_all(MVEBU_AP0);
+	/* Save CCU */
+	iow_save_win_all(MVEBU_AP0);
+
+	init_ccu(MVEBU_AP0);
+	/* The configuration saved, now all the changes can be done */
+	init_io_win(MVEBU_AP0);
+}
+
+/****************************************************************************
+ * Setup Adaptive Voltage Switching - this is required for some platforms
+ ****************************************************************************
+ */
+static void ble_plat_avs_config(void)
+{
+	uint32_t reg_val, device_id;
+
+	/* Check which SoC is running and act accordingly */
+	if (ble_get_ap_type() == CHIP_ID_AP807) {
+		VERBOSE("AVS: Setting AP807 AVS CTRL to 0x%x\n",
+			AVS_A3900_CLK_VALUE);
+		mmio_write_32(AVS_EN_CTRL_REG, AVS_A3900_CLK_VALUE);
+		return;
+	}
+
+	/* Check which SoC is running and act accordingly */
+	device_id = cp110_device_id_get(MVEBU_CP_REGS_BASE(0));
+	switch (device_id) {
+	case MVEBU_80X0_DEV_ID:
+	case MVEBU_80X0_CP115_DEV_ID:
+		/* Set the new AVS value - fix the default one on A80x0 */
+		mmio_write_32(AVS_EN_CTRL_REG, AVS_A8K_CLK_VALUE);
+		break;
+	case MVEBU_70X0_DEV_ID:
+	case MVEBU_70X0_CP115_DEV_ID:
+		/* Only fix AVS for CPU clocks lower than 1600MHz on A70x0 */
+		reg_val = mmio_read_32(MVEBU_AP_SAR_REG_BASE(
+						FREQ_MODE_AP_SAR_REG_NUM));
+		reg_val &= SAR_CLOCK_FREQ_MODE_MASK;
+		reg_val >>= SAR_CLOCK_FREQ_MODE_OFFSET;
+		if ((reg_val > CPU_1600_DDR_900_RCLK_900_2) &&
+		    (reg_val < CPU_DDR_RCLK_INVALID))
+			mmio_write_32(AVS_EN_CTRL_REG, AVS_A7K_LOW_CLK_VALUE);
+		break;
+	default:
+		ERROR("Unsupported Device ID 0x%x\n", device_id);
+	}
+}
+
+/****************************************************************************
+ * SVC flow - v0.10
+ * The feature is intended to configure AVS value according to eFuse values
+ * that are burned individually for each SoC during the test process.
+ * Primary AVS value is stored in HD efuse and processed on power on
+ * by the HW engine
+ * Secondary AVS value is located in LD efuse and contains 4 work points for
+ * various CPU frequencies.
+ * The Secondary AVS value is only taken into account if the SW Revision stored
+ * in the efuse is greater than 0 and the CPU is running in a certain speed.
+ ****************************************************************************
+ */
+static void ble_plat_svc_config(void)
+{
+	uint32_t reg_val, avs_workpoint, freq_pidi_mode;
+	uint64_t efuse;
+	uint32_t device_id, single_cluster;
+	uint8_t  svc[4], perr[4], i, sw_ver;
+
+	/* Due to a bug in A3900 device_id skip SVC config
+	 * TODO: add SVC config once it is decided for a3900
+	 */
+	if (ble_get_ap_type() == CHIP_ID_AP807) {
+		NOTICE("SVC: SVC is not supported on AP807\n");
+		ble_plat_avs_config();
+		return;
+	}
+
+	/* Set access to LD0 */
+	reg_val = mmio_read_32(MVEBU_AP_EFUSE_SRV_CTRL_REG);
+	reg_val &= ~EFUSE_SRV_CTRL_LD_SELECT_OFFS;
+	mmio_write_32(MVEBU_AP_EFUSE_SRV_CTRL_REG, reg_val);
+
+	/* Obtain the value of LD0[125:63] */
+	efuse = mmio_read_32(MVEBU_AP_LD0_125_95_EFUSE_OFFS);
+	efuse <<= 32;
+	efuse |= mmio_read_32(MVEBU_AP_LD0_94_63_EFUSE_OFFS);
+
+	/* SW Revision:
+	 * Starting from SW revision 1 the SVC flow is supported.
+	 * SW version 0 (efuse not programmed) should follow the
+	 * regular AVS update flow.
+	 */
+	sw_ver = (efuse >> EFUSE_AP_LD0_SWREV_OFFS) & EFUSE_AP_LD0_SWREV_MASK;
+	if (sw_ver < 1) {
+		NOTICE("SVC: SW Revision 0x%x. SVC is not supported\n", sw_ver);
+		ble_plat_avs_config();
+		return;
+	}
+
+	/* Frequency mode from SAR */
+	freq_pidi_mode = SAR_CLOCK_FREQ_MODE(
+				mmio_read_32(
+					MVEBU_AP_SAR_REG_BASE(
+						FREQ_MODE_AP_SAR_REG_NUM)));
+
+	/* Decode all SVC work points */
+	svc[0] = (efuse >> EFUSE_AP_LD0_SVC1_OFFS) & EFUSE_AP_LD0_WP_MASK;
+	svc[1] = (efuse >> EFUSE_AP_LD0_SVC2_OFFS) & EFUSE_AP_LD0_WP_MASK;
+	svc[2] = (efuse >> EFUSE_AP_LD0_SVC3_OFFS) & EFUSE_AP_LD0_WP_MASK;
+	svc[3] = (efuse >> EFUSE_AP_LD0_SVC4_OFFS) & EFUSE_AP_LD0_WP_MASK;
+	INFO("SVC: Efuse WP: [0]=0x%x, [1]=0x%x, [2]=0x%x, [3]=0x%x\n",
+		svc[0], svc[1], svc[2], svc[3]);
+
+	/* Validate parity of SVC workpoint values */
+	for (i = 0; i < 4; i++) {
+		uint8_t parity, bit;
+
+		perr[i] = 0;
+
+		for (bit = 1, parity = svc[i] & 1; bit < 7; bit++)
+			parity ^= (svc[i] >> bit) & 1;
+
+		/* Starting from SW version 2, the parity check is mandatory */
+		if ((sw_ver > 1) && (parity != ((svc[i] >> 7) & 1)))
+			perr[i] = 1; /* register the error */
+	}
+
+	single_cluster = mmio_read_32(MVEBU_AP_LD0_220_189_EFUSE_OFFS);
+	single_cluster = (single_cluster >> EFUSE_AP_LD0_CLUSTER_DOWN_OFFS) & 1;
+
+	device_id = cp110_device_id_get(MVEBU_CP_REGS_BASE(0));
+	if (device_id == MVEBU_80X0_DEV_ID ||
+	    device_id == MVEBU_80X0_CP115_DEV_ID) {
+		/* A8040/A8020 */
+		NOTICE("SVC: DEV ID: %s, FREQ Mode: 0x%x\n",
+			single_cluster == 0 ? "8040" : "8020", freq_pidi_mode);
+		switch (freq_pidi_mode) {
+		case CPU_1800_DDR_1200_RCLK_1200:
+		case CPU_1800_DDR_1050_RCLK_1050:
+			if (perr[1])
+				goto perror;
+			avs_workpoint = svc[1];
+			break;
+		case CPU_1600_DDR_1050_RCLK_1050:
+		case CPU_1600_DDR_900_RCLK_900_2:
+			if (perr[2])
+				goto perror;
+			avs_workpoint = svc[2];
+			break;
+		case CPU_1300_DDR_800_RCLK_800:
+		case CPU_1300_DDR_650_RCLK_650:
+			if (perr[3])
+				goto perror;
+			avs_workpoint = svc[3];
+			break;
+		case CPU_2000_DDR_1200_RCLK_1200:
+		case CPU_2000_DDR_1050_RCLK_1050:
+		default:
+			if (perr[0])
+				goto perror;
+			avs_workpoint = svc[0];
+			break;
+		}
+	} else if (device_id == MVEBU_70X0_DEV_ID ||
+		   device_id == MVEBU_70X0_CP115_DEV_ID) {
+		/* A7040/A7020/A6040 */
+		NOTICE("SVC: DEV ID: %s, FREQ Mode: 0x%x\n",
+			single_cluster == 0 ? "7040" : "7020", freq_pidi_mode);
+		switch (freq_pidi_mode) {
+		case CPU_1400_DDR_800_RCLK_800:
+			if (single_cluster) {/* 7020 */
+				if (perr[1])
+					goto perror;
+				avs_workpoint = svc[1];
+			} else {
+				if (perr[0])
+					goto perror;
+				avs_workpoint = svc[0];
+			}
+			break;
+		case CPU_1200_DDR_800_RCLK_800:
+			if (single_cluster) {/* 7020 */
+				if (perr[2])
+					goto perror;
+				avs_workpoint = svc[2];
+			} else {
+				if (perr[1])
+					goto perror;
+				avs_workpoint = svc[1];
+			}
+			break;
+		case CPU_800_DDR_800_RCLK_800:
+		case CPU_1000_DDR_800_RCLK_800:
+			if (single_cluster) {/* 7020 */
+				if (perr[3])
+					goto perror;
+				avs_workpoint = svc[3];
+			} else {
+				if (perr[2])
+					goto perror;
+				avs_workpoint = svc[2];
+			}
+			break;
+		case CPU_600_DDR_800_RCLK_800:
+			if (perr[3])
+				goto perror;
+			avs_workpoint = svc[3]; /* Same for 6040 and 7020 */
+			break;
+		case CPU_1600_DDR_800_RCLK_800: /* 7020 only */
+		default:
+			if (single_cluster) {/* 7020 */
+				if (perr[0])
+					goto perror;
+				avs_workpoint = svc[0];
+			} else
+				avs_workpoint = 0;
+			break;
+		}
+	} else {
+		ERROR("SVC: Unsupported Device ID 0x%x\n", device_id);
+		return;
+	}
+
+	/* Set AVS control if needed */
+	if (avs_workpoint == 0) {
+		ERROR("SVC: AVS work point not changed\n");
+		return;
+	}
+
+	/* Remove parity bit */
+	avs_workpoint &= 0x7F;
+
+	reg_val  = mmio_read_32(AVS_EN_CTRL_REG);
+	NOTICE("SVC: AVS work point changed from 0x%x to 0x%x\n",
+		(reg_val & AVS_VDD_LOW_LIMIT_MASK) >> AVS_LOW_VDD_LIMIT_OFFSET,
+		avs_workpoint);
+	reg_val &= ~(AVS_VDD_LOW_LIMIT_MASK | AVS_VDD_HIGH_LIMIT_MASK);
+	reg_val |= 0x1 << AVS_ENABLE_OFFSET;
+	reg_val |= avs_workpoint << AVS_HIGH_VDD_LIMIT_OFFSET;
+	reg_val |= avs_workpoint << AVS_LOW_VDD_LIMIT_OFFSET;
+	mmio_write_32(AVS_EN_CTRL_REG, reg_val);
+	return;
+
+perror:
+	ERROR("Failed SVC WP[%d] parity check!\n", i);
+	ERROR("Ignoring the WP values\n");
+}
+
+#if PLAT_RECOVERY_IMAGE_ENABLE
+static int ble_skip_image_i2c(struct skip_image *skip_im)
+{
+	ERROR("skipping image using i2c is not supported\n");
+	/* not supported */
+	return 0;
+}
+
+static int ble_skip_image_other(struct skip_image *skip_im)
+{
+	ERROR("implementation missing for skip image request\n");
+	/* not supported, make your own implementation */
+	return 0;
+}
+
+static int ble_skip_image_gpio(struct skip_image *skip_im)
+{
+	unsigned int val;
+	unsigned int mpp_address = 0;
+	unsigned int offset = 0;
+
+	switch (skip_im->info.test.cp_ap) {
+	case(CP):
+		mpp_address = MVEBU_CP_GPIO_DATA_IN(skip_im->info.test.cp_index,
+						    skip_im->info.gpio.num);
+		if (skip_im->info.gpio.num > NUM_OF_GPIO_PER_REG)
+			offset = skip_im->info.gpio.num - NUM_OF_GPIO_PER_REG;
+		else
+			offset = skip_im->info.gpio.num;
+		break;
+	case(AP):
+		mpp_address = MVEBU_AP_GPIO_DATA_IN;
+		offset = skip_im->info.gpio.num;
+		break;
+	}
+
+	val = mmio_read_32(mpp_address);
+	val &= (1 << offset);
+	if ((!val && skip_im->info.gpio.button_state == HIGH) ||
+	    (val && skip_im->info.gpio.button_state == LOW)) {
+		mmio_write_32(SCRATCH_PAD_REG2, SCRATCH_PAD_SKIP_VAL);
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * This function checks if there's a skip image request:
+ * return values:
+ * 1: (true) images request been made.
+ * 0: (false) no image request been made.
+ */
+static int  ble_skip_current_image(void)
+{
+	struct skip_image *skip_im;
+
+	/*fetching skip image info*/
+	skip_im = (struct skip_image *)plat_marvell_get_skip_image_data();
+
+	if (skip_im == NULL)
+		return 0;
+
+	/* check if skipping image request has already been made */
+	if (mmio_read_32(SCRATCH_PAD_REG2) == SCRATCH_PAD_SKIP_VAL)
+		return 0;
+
+	switch (skip_im->detection_method) {
+	case GPIO:
+		return ble_skip_image_gpio(skip_im);
+	case I2C:
+		return ble_skip_image_i2c(skip_im);
+	case USER_DEFINED:
+		return ble_skip_image_other(skip_im);
+	}
+
+	return 0;
+}
+#endif
+
+/* Switch to ARO from PLL in ap807 */
+static void aro_to_pll(void)
+{
+	unsigned int reg;
+
+	/* switch from ARO to PLL */
+	reg = mmio_read_32(AP807_CPU_ARO_0_CTRL_0);
+	reg |= AP807_CPU_ARO_SEL_PLL_MASK;
+	mmio_write_32(AP807_CPU_ARO_0_CTRL_0, reg);
+
+	reg = mmio_read_32(AP807_CPU_ARO_1_CTRL_0);
+	reg |= AP807_CPU_ARO_SEL_PLL_MASK;
+	mmio_write_32(AP807_CPU_ARO_1_CTRL_0, reg);
+
+	mdelay(1000);
+
+	/* disable ARO clk driver */
+	reg = mmio_read_32(AP807_CPU_ARO_0_CTRL_0);
+	reg |= (AP807_CPU_ARO_CLK_EN_MASK);
+	mmio_write_32(AP807_CPU_ARO_0_CTRL_0, reg);
+
+	reg = mmio_read_32(AP807_CPU_ARO_1_CTRL_0);
+	reg |= (AP807_CPU_ARO_CLK_EN_MASK);
+	mmio_write_32(AP807_CPU_ARO_1_CTRL_0, reg);
+}
+
+int ble_plat_setup(int *skip)
+{
+	int ret;
+
+	/* Power down unused CPUs */
+	plat_marvell_early_cpu_powerdown();
+
+	/*
+	 * Save the current CCU configuration and make required changes:
+	 * - Allow access to DRAM larger than 4GB
+	 * - Open memory access to all CPn peripherals
+	 */
+	ble_plat_mmap_config(MMAP_SAVE_AND_CONFIG);
+
+#if PLAT_RECOVERY_IMAGE_ENABLE
+	/* Check if there's a skip request to bootRom recovery Image */
+	if (ble_skip_current_image()) {
+		/* close memory access to all CPn peripherals. */
+		ble_plat_mmap_config(MMAP_RESTORE_SAVED);
+		*skip = 1;
+		return 0;
+	}
+#endif
+	/* Do required CP-110 setups for BLE stage */
+	cp110_ble_init(MVEBU_CP_REGS_BASE(0));
+
+	/* Setup AVS */
+	ble_plat_svc_config();
+
+	/* work with PLL clock driver in AP807 */
+	if (ble_get_ap_type() == CHIP_ID_AP807)
+		aro_to_pll();
+
+	/* Do required AP setups for BLE stage */
+	ap_ble_init();
+
+	/* Update DRAM topology (scan DIMM SPDs) */
+	plat_marvell_dram_update_topology();
+
+	/* Kick it in */
+	ret = dram_init();
+
+	/* Restore the original CCU configuration before exit from BLE */
+	ble_plat_mmap_config(MMAP_RESTORE_SAVED);
+
+	return ret;
+}
diff --git a/plat/marvell/a8k/common/plat_pm.c b/plat/marvell/a8k/common/plat_pm.c
new file mode 100644
index 0000000..c716ee0
--- /dev/null
+++ b/plat/marvell/a8k/common/plat_pm.c
@@ -0,0 +1,829 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <a8k_common.h>
+#include <assert.h>
+#include <bakery_lock.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <cache_llc.h>
+#include <console.h>
+#include <gicv2.h>
+#include <marvell_pm.h>
+#include <mmio.h>
+#include <mss_pm_ipc.h>
+#include <plat_marvell.h>
+#include <platform.h>
+#include <plat_pm_trace.h>
+#include <platform.h>
+
+#define MVEBU_PRIVATE_UID_REG		0x30
+#define MVEBU_RFU_GLOBL_SW_RST		0x84
+#define MVEBU_CCU_RVBAR(cpu)		(MVEBU_REGS_BASE + 0x640 + (cpu * 4))
+#define MVEBU_CCU_CPU_UN_RESET(cpu)	(MVEBU_REGS_BASE + 0x650 + (cpu * 4))
+
+#define MPIDR_CPU_GET(mpidr)		((mpidr) & MPIDR_CPU_MASK)
+#define MPIDR_CLUSTER_GET(mpidr)	MPIDR_AFFLVL1_VAL((mpidr))
+
+#define MVEBU_GPIO_MASK(index)		(1 << (index % 32))
+#define MVEBU_MPP_MASK(index)		(0xF << (4 * (index % 8)))
+#define MVEBU_GPIO_VALUE(index, value)	(value << (index % 32))
+
+#define MVEBU_USER_CMD_0_REG		(MVEBU_DRAM_MAC_BASE + 0x20)
+#define MVEBU_USER_CMD_CH0_OFFSET	28
+#define MVEBU_USER_CMD_CH0_MASK		(1 << MVEBU_USER_CMD_CH0_OFFSET)
+#define MVEBU_USER_CMD_CH0_EN		(1 << MVEBU_USER_CMD_CH0_OFFSET)
+#define MVEBU_USER_CMD_CS_OFFSET	24
+#define MVEBU_USER_CMD_CS_MASK		(0xF << MVEBU_USER_CMD_CS_OFFSET)
+#define MVEBU_USER_CMD_CS_ALL		(0xF << MVEBU_USER_CMD_CS_OFFSET)
+#define MVEBU_USER_CMD_SR_OFFSET	6
+#define MVEBU_USER_CMD_SR_MASK		(0x3 << MVEBU_USER_CMD_SR_OFFSET)
+#define MVEBU_USER_CMD_SR_ENTER		(0x1 << MVEBU_USER_CMD_SR_OFFSET)
+#define MVEBU_MC_PWR_CTRL_REG		(MVEBU_DRAM_MAC_BASE + 0x54)
+#define MVEBU_MC_AC_ON_DLY_OFFSET	8
+#define MVEBU_MC_AC_ON_DLY_MASK		(0xF << MVEBU_MC_AC_ON_DLY_OFFSET)
+#define MVEBU_MC_AC_ON_DLY_DEF_VAR	(8 << MVEBU_MC_AC_ON_DLY_OFFSET)
+#define MVEBU_MC_AC_OFF_DLY_OFFSET	4
+#define MVEBU_MC_AC_OFF_DLY_MASK	(0xF << MVEBU_MC_AC_OFF_DLY_OFFSET)
+#define MVEBU_MC_AC_OFF_DLY_DEF_VAR	(0xC << MVEBU_MC_AC_OFF_DLY_OFFSET)
+#define MVEBU_MC_PHY_AUTO_OFF_OFFSET	0
+#define MVEBU_MC_PHY_AUTO_OFF_MASK	(1 << MVEBU_MC_PHY_AUTO_OFF_OFFSET)
+#define MVEBU_MC_PHY_AUTO_OFF_EN	(1 << MVEBU_MC_PHY_AUTO_OFF_OFFSET)
+
+/* this lock synchronize AP multiple cores execution with MSS */
+DEFINE_BAKERY_LOCK(pm_sys_lock);
+
+/* Weak definitions may be overridden in specific board */
+#pragma weak plat_marvell_get_pm_cfg
+
+/* AP806 CPU power down /power up definitions */
+enum CPU_ID {
+	CPU0,
+	CPU1,
+	CPU2,
+	CPU3
+};
+
+#define REG_WR_VALIDATE_TIMEOUT		(2000)
+
+#define FEATURE_DISABLE_STATUS_REG			\
+			(MVEBU_REGS_BASE + 0x6F8230)
+#define FEATURE_DISABLE_STATUS_CPU_CLUSTER_OFFSET	4
+#define FEATURE_DISABLE_STATUS_CPU_CLUSTER_MASK		\
+			(0x1 << FEATURE_DISABLE_STATUS_CPU_CLUSTER_OFFSET)
+
+#ifdef MVEBU_SOC_AP807
+	#define PWRC_CPUN_CR_PWR_DN_RQ_OFFSET		1
+	#define PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET	0
+#else
+#define PWRC_CPUN_CR_PWR_DN_RQ_OFFSET		0
+	#define PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET	31
+#endif
+
+#define PWRC_CPUN_CR_REG(cpu_id)		\
+			(MVEBU_REGS_BASE + 0x680000 + (cpu_id * 0x10))
+#define PWRC_CPUN_CR_PWR_DN_RQ_MASK		\
+			(0x1 << PWRC_CPUN_CR_PWR_DN_RQ_OFFSET)
+#define PWRC_CPUN_CR_ISO_ENABLE_OFFSET		16
+#define PWRC_CPUN_CR_ISO_ENABLE_MASK		\
+			(0x1 << PWRC_CPUN_CR_ISO_ENABLE_OFFSET)
+#define PWRC_CPUN_CR_LDO_BYPASS_RDY_MASK	\
+			(0x1 << PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET)
+
+#define CCU_B_PRCRN_REG(cpu_id)			\
+			(MVEBU_REGS_BASE + 0x1A50 + \
+			((cpu_id / 2) * (0x400)) + ((cpu_id % 2) * 4))
+#define CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET	0
+#define CCU_B_PRCRN_CPUPORESET_STATIC_MASK	\
+			(0x1 << CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET)
+
+/* power switch fingers */
+#define AP807_PWRC_LDO_CR0_REG			\
+			(MVEBU_REGS_BASE + 0x680000 + 0x100)
+#define AP807_PWRC_LDO_CR0_OFFSET		16
+#define AP807_PWRC_LDO_CR0_MASK			\
+			(0xff << AP807_PWRC_LDO_CR0_OFFSET)
+#define AP807_PWRC_LDO_CR0_VAL			0xfd
+
+/*
+ * Power down CPU:
+ * Used to reduce power consumption, and avoid SoC unnecessary temperature rise.
+ */
+static int plat_marvell_cpu_powerdown(int cpu_id)
+{
+	uint32_t	reg_val;
+	int		exit_loop = REG_WR_VALIDATE_TIMEOUT;
+
+	INFO("Powering down CPU%d\n", cpu_id);
+
+	/* 1. Isolation enable */
+	reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+	reg_val |= 0x1 << PWRC_CPUN_CR_ISO_ENABLE_OFFSET;
+	mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
+
+	/* 2. Read and check Isolation enabled - verify bit set to 1 */
+	do {
+		reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+		exit_loop--;
+	} while (!(reg_val & (0x1 << PWRC_CPUN_CR_ISO_ENABLE_OFFSET)) &&
+		 exit_loop > 0);
+
+	/* 3. Switch off CPU power */
+	reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+	reg_val &= ~PWRC_CPUN_CR_PWR_DN_RQ_MASK;
+	mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
+
+	/* 4. Read and check Switch Off - verify bit set to 0 */
+	exit_loop = REG_WR_VALIDATE_TIMEOUT;
+	do {
+		reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+		exit_loop--;
+	} while (reg_val & PWRC_CPUN_CR_PWR_DN_RQ_MASK && exit_loop > 0);
+
+	if (exit_loop <= 0)
+		goto cpu_poweroff_error;
+
+	/* 5. De-Assert power ready */
+	reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+	reg_val &= ~PWRC_CPUN_CR_LDO_BYPASS_RDY_MASK;
+	mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
+
+	/* 6. Assert CPU POR reset */
+	reg_val = mmio_read_32(CCU_B_PRCRN_REG(cpu_id));
+	reg_val &= ~CCU_B_PRCRN_CPUPORESET_STATIC_MASK;
+	mmio_write_32(CCU_B_PRCRN_REG(cpu_id), reg_val);
+
+	/* 7. Read and poll on Validate the CPU is out of reset */
+	exit_loop = REG_WR_VALIDATE_TIMEOUT;
+	do {
+		reg_val = mmio_read_32(CCU_B_PRCRN_REG(cpu_id));
+		exit_loop--;
+	} while (reg_val & CCU_B_PRCRN_CPUPORESET_STATIC_MASK && exit_loop > 0);
+
+	if (exit_loop <= 0)
+		goto cpu_poweroff_error;
+
+	INFO("Successfully powered down CPU%d\n", cpu_id);
+
+	return 0;
+
+cpu_poweroff_error:
+	ERROR("ERROR: Can't power down CPU%d\n", cpu_id);
+	return -1;
+}
+
+/*
+ * Power down CPUs 1-3 at early boot stage,
+ * to reduce power consumption and SoC temperature.
+ * This is triggered by BLE prior to DDR initialization.
+ *
+ * Note:
+ * All CPUs will be powered up by plat_marvell_cpu_powerup on Linux boot stage,
+ * which is triggered by PSCI ops (pwr_domain_on).
+ */
+int plat_marvell_early_cpu_powerdown(void)
+{
+	uint32_t cpu_cluster_status =
+		mmio_read_32(FEATURE_DISABLE_STATUS_REG) &
+			     FEATURE_DISABLE_STATUS_CPU_CLUSTER_MASK;
+	/* if cpu_cluster_status bit is set,
+	 * that means we have only single cluster
+	 */
+	int cluster_count = cpu_cluster_status ? 1 : 2;
+
+	INFO("Powering off unused CPUs\n");
+
+	/* CPU1 is in AP806 cluster-0, which always exists, so power it down */
+	if (plat_marvell_cpu_powerdown(CPU1) == -1)
+		return -1;
+
+	/*
+	 * CPU2-3 are in AP806 2nd cluster (cluster-1),
+	 * which doesn't exists in dual-core systems.
+	 * so need to check if we have dual-core (single cluster)
+	 * or quad-code (2 clusters)
+	 */
+	if (cluster_count == 2) {
+		/* CPU2-3 are part of 2nd cluster */
+		if (plat_marvell_cpu_powerdown(CPU2) == -1)
+			return -1;
+		if (plat_marvell_cpu_powerdown(CPU3) == -1)
+			return -1;
+	}
+
+	return 0;
+}
+
+/*
+ * Power up CPU - part of Linux boot stage
+ */
+static int plat_marvell_cpu_powerup(u_register_t mpidr)
+{
+	uint32_t	reg_val;
+	int	cpu_id = MPIDR_CPU_GET(mpidr),
+		cluster = MPIDR_CLUSTER_GET(mpidr);
+	int	exit_loop = REG_WR_VALIDATE_TIMEOUT;
+
+	/* calculate absolute CPU ID */
+	cpu_id = cluster * PLAT_MARVELL_CLUSTER_CORE_COUNT + cpu_id;
+
+	INFO("Powering on CPU%d\n", cpu_id);
+
+#ifdef MVEBU_SOC_AP807
+	/* Activate 2 power switch fingers */
+	reg_val = mmio_read_32(AP807_PWRC_LDO_CR0_REG);
+	reg_val &= ~(AP807_PWRC_LDO_CR0_MASK);
+	reg_val |= (AP807_PWRC_LDO_CR0_VAL << AP807_PWRC_LDO_CR0_OFFSET);
+	mmio_write_32(AP807_PWRC_LDO_CR0_REG, reg_val);
+	udelay(100);
+#endif
+
+	/* 1. Switch CPU power ON */
+	reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+	reg_val |= 0x1 << PWRC_CPUN_CR_PWR_DN_RQ_OFFSET;
+	mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
+
+	/* 2. Wait for CPU on, up to 100 uSec: */
+	udelay(100);
+
+	/* 3. Assert power ready */
+	reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+	reg_val |= 0x1 << PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET;
+	mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
+
+	/* 4. Read & Validate power ready
+	 * used in order to generate 16 Host CPU cycles
+	 */
+	do {
+		reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+		exit_loop--;
+	} while (!(reg_val & (0x1 << PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET)) &&
+		 exit_loop > 0);
+
+	if (exit_loop <= 0)
+		goto cpu_poweron_error;
+
+	/* 5. Isolation disable */
+	reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+	reg_val &= ~PWRC_CPUN_CR_ISO_ENABLE_MASK;
+	mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
+
+	/* 6. Read and check Isolation enabled - verify bit set to 1 */
+	exit_loop = REG_WR_VALIDATE_TIMEOUT;
+	do {
+		reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+		exit_loop--;
+	} while ((reg_val & (0x1 << PWRC_CPUN_CR_ISO_ENABLE_OFFSET)) &&
+		 exit_loop > 0);
+
+	/* 7. De Assert CPU POR reset & Core reset */
+	reg_val = mmio_read_32(CCU_B_PRCRN_REG(cpu_id));
+	reg_val |= 0x1 << CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET;
+	mmio_write_32(CCU_B_PRCRN_REG(cpu_id), reg_val);
+
+	/* 8. Read & Validate CPU POR reset */
+	exit_loop = REG_WR_VALIDATE_TIMEOUT;
+	do {
+		reg_val = mmio_read_32(CCU_B_PRCRN_REG(cpu_id));
+		exit_loop--;
+	} while (!(reg_val & (0x1 << CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET)) &&
+		 exit_loop > 0);
+
+	if (exit_loop <= 0)
+		goto cpu_poweron_error;
+
+	INFO("Successfully powered on CPU%d\n", cpu_id);
+
+	return 0;
+
+cpu_poweron_error:
+	ERROR("ERROR: Can't power up CPU%d\n", cpu_id);
+	return -1;
+}
+
+static int plat_marvell_cpu_on(u_register_t mpidr)
+{
+	int cpu_id;
+	int cluster;
+
+	/* Set barierr */
+	dsbsy();
+
+	/* Get cpu number - use CPU ID */
+	cpu_id =  MPIDR_CPU_GET(mpidr);
+
+	/* Get cluster number - use affinity level 1 */
+	cluster = MPIDR_CLUSTER_GET(mpidr);
+
+	/* Set CPU private UID */
+	mmio_write_32(MVEBU_REGS_BASE + MVEBU_PRIVATE_UID_REG, cluster + 0x4);
+
+	/* Set the cpu start address to BL1 entry point (align to 0x10000) */
+	mmio_write_32(MVEBU_CCU_RVBAR(cpu_id),
+		      PLAT_MARVELL_CPU_ENTRY_ADDR >> 16);
+
+	/* Get the cpu out of reset */
+	mmio_write_32(MVEBU_CCU_CPU_UN_RESET(cpu_id), 0x10001);
+
+	return 0;
+}
+
+/*****************************************************************************
+ * A8K handler called to check the validity of the power state
+ * parameter.
+ *****************************************************************************
+ */
+static int a8k_validate_power_state(unsigned int power_state,
+			    psci_power_state_t *req_state)
+{
+	int pstate = psci_get_pstate_type(power_state);
+	int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
+	int i;
+
+	if (pwr_lvl > PLAT_MAX_PWR_LVL)
+		return PSCI_E_INVALID_PARAMS;
+
+	/* Sanity check the requested state */
+	if (pstate == PSTATE_TYPE_STANDBY) {
+		/*
+		 * It's possible to enter standby only on power level 0
+		 * Ignore any other power level.
+		 */
+		if (pwr_lvl != MARVELL_PWR_LVL0)
+			return PSCI_E_INVALID_PARAMS;
+
+		req_state->pwr_domain_state[MARVELL_PWR_LVL0] =
+					MARVELL_LOCAL_STATE_RET;
+	} else {
+		for (i = MARVELL_PWR_LVL0; i <= pwr_lvl; i++)
+			req_state->pwr_domain_state[i] =
+					MARVELL_LOCAL_STATE_OFF;
+	}
+
+	/*
+	 * We expect the 'state id' to be zero.
+	 */
+	if (psci_get_pstate_id(power_state))
+		return PSCI_E_INVALID_PARAMS;
+
+	return PSCI_E_SUCCESS;
+}
+
+/*****************************************************************************
+ * A8K handler called when a CPU is about to enter standby.
+ *****************************************************************************
+ */
+static void a8k_cpu_standby(plat_local_state_t cpu_state)
+{
+	ERROR("%s: needs to be implemented\n", __func__);
+	panic();
+}
+
+/*****************************************************************************
+ * A8K handler called when a power domain is about to be turned on. The
+ * mpidr determines the CPU to be turned on.
+ *****************************************************************************
+ */
+static int a8k_pwr_domain_on(u_register_t mpidr)
+{
+	/* Power up CPU (CPUs 1-3 are powered off at start of BLE) */
+	plat_marvell_cpu_powerup(mpidr);
+
+	if (is_pm_fw_running()) {
+		unsigned int target =
+				((mpidr & 0xFF) + (((mpidr >> 8) & 0xFF) * 2));
+
+		/*
+		 * pm system synchronization - used to synchronize
+		 * multiple core access to MSS
+		 */
+		bakery_lock_get(&pm_sys_lock);
+
+		/* send CPU ON IPC Message to MSS */
+		mss_pm_ipc_msg_send(target, PM_IPC_MSG_CPU_ON, 0);
+
+		/* trigger IPC message to MSS */
+		mss_pm_ipc_msg_trigger();
+
+		/* pm system synchronization */
+		bakery_lock_release(&pm_sys_lock);
+
+		/* trace message */
+		PM_TRACE(TRACE_PWR_DOMAIN_ON | target);
+	} else {
+		/* proprietary CPU ON exection flow */
+		plat_marvell_cpu_on(mpidr);
+	}
+
+	return 0;
+}
+
+/*****************************************************************************
+ * A8K handler called to validate the entry point.
+ *****************************************************************************
+ */
+static int a8k_validate_ns_entrypoint(uintptr_t entrypoint)
+{
+	return PSCI_E_SUCCESS;
+}
+
+/*****************************************************************************
+ * A8K handler called when a power domain is about to be turned off. The
+ * target_state encodes the power state that each level should transition to.
+ *****************************************************************************
+ */
+static void a8k_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	if (is_pm_fw_running()) {
+		unsigned int idx = plat_my_core_pos();
+
+		/* Prevent interrupts from spuriously waking up this cpu */
+		gicv2_cpuif_disable();
+
+		/* pm system synchronization - used to synchronize multiple
+		 * core access to MSS
+		 */
+		bakery_lock_get(&pm_sys_lock);
+
+		/* send CPU OFF IPC Message to MSS */
+		mss_pm_ipc_msg_send(idx, PM_IPC_MSG_CPU_OFF, target_state);
+
+		/* trigger IPC message to MSS */
+		mss_pm_ipc_msg_trigger();
+
+		/* pm system synchronization */
+		bakery_lock_release(&pm_sys_lock);
+
+		/* trace message */
+		PM_TRACE(TRACE_PWR_DOMAIN_OFF);
+	} else {
+		INFO("%s: is not supported without SCP\n", __func__);
+	}
+}
+
+/* Get PM config to power off the SoC */
+void *plat_marvell_get_pm_cfg(void)
+{
+	return NULL;
+}
+
+/*
+ * This function should be called on restore from
+ * "suspend to RAM" state when the execution flow
+ * has to bypass BootROM image to RAM copy and speed up
+ * the system recovery
+ *
+ */
+static void plat_marvell_exit_bootrom(void)
+{
+	marvell_exit_bootrom(PLAT_MARVELL_TRUSTED_ROM_BASE);
+}
+
+/*
+ * Prepare for the power off of the system via GPIO
+ */
+static void plat_marvell_power_off_gpio(struct power_off_method *pm_cfg,
+					register_t *gpio_addr,
+					register_t *gpio_data)
+{
+	unsigned int gpio;
+	unsigned int idx;
+	unsigned int shift;
+	unsigned int reg;
+	unsigned int addr;
+	gpio_info_t *info;
+	unsigned int tog_bits;
+
+	assert((pm_cfg->cfg.gpio.pin_count < PMIC_GPIO_MAX_NUMBER) &&
+	       (pm_cfg->cfg.gpio.step_count < PMIC_GPIO_MAX_TOGGLE_STEP));
+
+	/* Prepare GPIOs for PMIC */
+	for (gpio = 0; gpio < pm_cfg->cfg.gpio.pin_count; gpio++) {
+		info = &pm_cfg->cfg.gpio.info[gpio];
+		/* Set PMIC GPIO to output mode */
+		reg = mmio_read_32(MVEBU_CP_GPIO_DATA_OUT_EN(
+				   info->cp_index, info->gpio_index));
+		mmio_write_32(MVEBU_CP_GPIO_DATA_OUT_EN(
+			      info->cp_index, info->gpio_index),
+			      reg & ~MVEBU_GPIO_MASK(info->gpio_index));
+
+		/* Set the appropriate MPP to GPIO mode */
+		reg = mmio_read_32(MVEBU_PM_MPP_REGS(info->cp_index,
+						     info->gpio_index));
+		mmio_write_32(MVEBU_PM_MPP_REGS(info->cp_index,
+						info->gpio_index),
+			reg & ~MVEBU_MPP_MASK(info->gpio_index));
+	}
+
+	/* Wait for MPP & GPIO pre-configurations done */
+	mdelay(pm_cfg->cfg.gpio.delay_ms);
+
+	/* Toggle the GPIO values, and leave final step to be triggered
+	 * after  DDR self-refresh is enabled
+	 */
+	for (idx = 0; idx < pm_cfg->cfg.gpio.step_count; idx++) {
+		tog_bits = pm_cfg->cfg.gpio.seq[idx];
+
+		/* The GPIOs must be within same GPIO register,
+		 * thus could get the original value by first GPIO
+		 */
+		info = &pm_cfg->cfg.gpio.info[0];
+		reg = mmio_read_32(MVEBU_CP_GPIO_DATA_OUT(
+				   info->cp_index, info->gpio_index));
+		addr = MVEBU_CP_GPIO_DATA_OUT(info->cp_index, info->gpio_index);
+
+		for (gpio = 0; gpio < pm_cfg->cfg.gpio.pin_count; gpio++) {
+			shift = pm_cfg->cfg.gpio.info[gpio].gpio_index % 32;
+			if (GPIO_LOW == (tog_bits & (1 << gpio)))
+				reg &= ~(1 << shift);
+			else
+				reg |= (1 << shift);
+		}
+
+		/* Set the GPIO register, for last step just store
+		 * register address and values to system registers
+		 */
+		if (idx < pm_cfg->cfg.gpio.step_count - 1) {
+			mmio_write_32(MVEBU_CP_GPIO_DATA_OUT(
+				      info->cp_index, info->gpio_index), reg);
+			mdelay(pm_cfg->cfg.gpio.delay_ms);
+		} else {
+			/* Save GPIO register and address values for
+			 * finishing the power down operation later
+			 */
+			*gpio_addr = addr;
+			*gpio_data = reg;
+		}
+	}
+}
+
+/*
+ * Prepare for the power off of the system
+ */
+static void plat_marvell_power_off_prepare(struct power_off_method *pm_cfg,
+					   register_t *addr, register_t *data)
+{
+	switch (pm_cfg->type) {
+	case PMIC_GPIO:
+		plat_marvell_power_off_gpio(pm_cfg, addr, data);
+		break;
+	default:
+		break;
+	}
+}
+
+/*****************************************************************************
+ * A8K handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
+ *****************************************************************************
+ */
+static void a8k_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+	if (is_pm_fw_running()) {
+		unsigned int idx;
+
+		/* Prevent interrupts from spuriously waking up this cpu */
+		gicv2_cpuif_disable();
+
+		idx = plat_my_core_pos();
+
+		/* pm system synchronization - used to synchronize multiple
+		 * core access to MSS
+		 */
+		bakery_lock_get(&pm_sys_lock);
+
+		/* send CPU Suspend IPC Message to MSS */
+		mss_pm_ipc_msg_send(idx, PM_IPC_MSG_CPU_SUSPEND, target_state);
+
+		/* trigger IPC message to MSS */
+		mss_pm_ipc_msg_trigger();
+
+		/* pm system synchronization */
+		bakery_lock_release(&pm_sys_lock);
+
+		/* trace message */
+		PM_TRACE(TRACE_PWR_DOMAIN_SUSPEND);
+	} else {
+		uintptr_t *mailbox = (void *)PLAT_MARVELL_MAILBOX_BASE;
+
+		INFO("Suspending to RAM\n");
+
+		/* Prevent interrupts from spuriously waking up this cpu */
+		gicv2_cpuif_disable();
+
+		mailbox[MBOX_IDX_SUSPEND_MAGIC] = MVEBU_MAILBOX_SUSPEND_STATE;
+		mailbox[MBOX_IDX_ROM_EXIT_ADDR] = (uintptr_t)&plat_marvell_exit_bootrom;
+
+#if PLAT_MARVELL_SHARED_RAM_CACHED
+		flush_dcache_range(PLAT_MARVELL_MAILBOX_BASE +
+		MBOX_IDX_SUSPEND_MAGIC * sizeof(uintptr_t),
+		2 * sizeof(uintptr_t));
+#endif
+		/* Flush and disable LLC before going off-power */
+		llc_disable(0);
+
+		isb();
+		/*
+		 * Do not halt here!
+		 * The function must return for allowing the caller function
+		 * psci_power_up_finish() to do the proper context saving and
+		 * to release the CPU lock.
+		 */
+	}
+}
+
+/*****************************************************************************
+ * A8K handler called when a power domain has just been powered on after
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from.
+ *****************************************************************************
+ */
+static void a8k_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	/* arch specific configuration */
+	marvell_psci_arch_init(0);
+
+	/* Interrupt initialization */
+	gicv2_pcpu_distif_init();
+	gicv2_cpuif_enable();
+
+	if (is_pm_fw_running()) {
+		/* trace message */
+		PM_TRACE(TRACE_PWR_DOMAIN_ON_FINISH);
+	}
+}
+
+/*****************************************************************************
+ * A8K handler called when a power domain has just been powered on after
+ * having been suspended earlier. The target_state encodes the low power state
+ * that each level has woken up from.
+ * TODO: At the moment we reuse the on finisher and reinitialize the secure
+ * context. Need to implement a separate suspend finisher.
+ *****************************************************************************
+ */
+static void a8k_pwr_domain_suspend_finish(
+					const psci_power_state_t *target_state)
+{
+	if (is_pm_fw_running()) {
+		/* arch specific configuration */
+		marvell_psci_arch_init(0);
+
+		/* Interrupt initialization */
+		gicv2_cpuif_enable();
+
+		/* trace message */
+		PM_TRACE(TRACE_PWR_DOMAIN_SUSPEND_FINISH);
+	} else {
+		uintptr_t *mailbox = (void *)PLAT_MARVELL_MAILBOX_BASE;
+
+		/* Only primary CPU requres platform init */
+		if (!plat_my_core_pos()) {
+			/* Initialize the console to provide
+			 * early debug support
+			 */
+			console_init(PLAT_MARVELL_BOOT_UART_BASE,
+			PLAT_MARVELL_BOOT_UART_CLK_IN_HZ,
+			MARVELL_CONSOLE_BAUDRATE);
+
+			bl31_plat_arch_setup();
+			marvell_bl31_platform_setup();
+			/*
+			 * Remove suspend to RAM marker from the mailbox
+			 * for treating a regular reset as a cold boot
+			 */
+			mailbox[MBOX_IDX_SUSPEND_MAGIC] = 0;
+			mailbox[MBOX_IDX_ROM_EXIT_ADDR] = 0;
+#if PLAT_MARVELL_SHARED_RAM_CACHED
+			flush_dcache_range(PLAT_MARVELL_MAILBOX_BASE +
+			MBOX_IDX_SUSPEND_MAGIC * sizeof(uintptr_t),
+			2 * sizeof(uintptr_t));
+#endif
+		}
+	}
+}
+
+/*****************************************************************************
+ * This handler is called by the PSCI implementation during the `SYSTEM_SUSPEND`
+ * call to get the `power_state` parameter. This allows the platform to encode
+ * the appropriate State-ID field within the `power_state` parameter which can
+ * be utilized in `pwr_domain_suspend()` to suspend to system affinity level.
+ *****************************************************************************
+ */
+static void a8k_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+	/* lower affinities use PLAT_MAX_OFF_STATE */
+	for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
+		req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
+}
+
+static void
+__dead2 a8k_pwr_domain_pwr_down_wfi(const psci_power_state_t *target_state)
+{
+	struct power_off_method *pm_cfg;
+	unsigned int srcmd;
+	unsigned int sdram_reg;
+	register_t gpio_data = 0, gpio_addr = 0;
+
+	if (is_pm_fw_running()) {
+		psci_power_down_wfi();
+		panic();
+	}
+
+	pm_cfg = (struct power_off_method *)plat_marvell_get_pm_cfg();
+
+	/* Prepare for power off */
+	plat_marvell_power_off_prepare(pm_cfg, &gpio_addr, &gpio_data);
+
+	/* First step to enable DDR self-refresh
+	 * to keep the data during suspend
+	 */
+	mmio_write_32(MVEBU_MC_PWR_CTRL_REG, 0x8C1);
+
+	/* Save DDR self-refresh second step register
+	 * and value to be issued later
+	 */
+	sdram_reg = MVEBU_USER_CMD_0_REG;
+	srcmd = mmio_read_32(sdram_reg);
+	srcmd &= ~(MVEBU_USER_CMD_CH0_MASK | MVEBU_USER_CMD_CS_MASK |
+		 MVEBU_USER_CMD_SR_MASK);
+	srcmd |= (MVEBU_USER_CMD_CH0_EN | MVEBU_USER_CMD_CS_ALL |
+		 MVEBU_USER_CMD_SR_ENTER);
+
+	/*
+	 * Wait for DRAM is done using registers access only.
+	 * At this stage any access to DRAM (procedure call) will
+	 * release it from the self-refresh mode
+	 */
+	__asm__ volatile (
+		/* Align to a cache line */
+		"	.balign 64\n\t"
+
+		/* Enter self refresh */
+		"	str %[srcmd], [%[sdram_reg]]\n\t"
+
+		/*
+		 * Wait 100 cycles for DDR to enter self refresh, by
+		 * doing 50 times two instructions.
+		 */
+		"	mov x1, #50\n\t"
+		"1:	subs x1, x1, #1\n\t"
+		"	bne 1b\n\t"
+
+		/* Issue the command to trigger the SoC power off */
+		"	str	%[gpio_data], [%[gpio_addr]]\n\t"
+
+		/* Trap the processor */
+		"	b .\n\t"
+		: : [srcmd] "r" (srcmd), [sdram_reg] "r" (sdram_reg),
+		    [gpio_addr] "r" (gpio_addr),  [gpio_data] "r" (gpio_data)
+		: "x1");
+
+	panic();
+}
+
+/*****************************************************************************
+ * A8K handlers to shutdown/reboot the system
+ *****************************************************************************
+ */
+static void __dead2 a8k_system_off(void)
+{
+	ERROR("%s:  needs to be implemented\n", __func__);
+	panic();
+}
+
+void plat_marvell_system_reset(void)
+{
+	mmio_write_32(MVEBU_RFU_BASE + MVEBU_RFU_GLOBL_SW_RST, 0x0);
+}
+
+static void __dead2 a8k_system_reset(void)
+{
+	plat_marvell_system_reset();
+
+	/* we shouldn't get to this point */
+	panic();
+}
+
+/*****************************************************************************
+ * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
+ * platform layer will take care of registering the handlers with PSCI.
+ *****************************************************************************
+ */
+const plat_psci_ops_t plat_arm_psci_pm_ops = {
+	.cpu_standby = a8k_cpu_standby,
+	.pwr_domain_on = a8k_pwr_domain_on,
+	.pwr_domain_off = a8k_pwr_domain_off,
+	.pwr_domain_suspend = a8k_pwr_domain_suspend,
+	.pwr_domain_on_finish = a8k_pwr_domain_on_finish,
+	.get_sys_suspend_power_state = a8k_get_sys_suspend_power_state,
+	.pwr_domain_suspend_finish = a8k_pwr_domain_suspend_finish,
+	.pwr_domain_pwr_down_wfi = a8k_pwr_domain_pwr_down_wfi,
+	.system_off = a8k_system_off,
+	.system_reset = a8k_system_reset,
+	.validate_power_state = a8k_validate_power_state,
+	.validate_ns_entrypoint = a8k_validate_ns_entrypoint
+};
diff --git a/plat/marvell/a8k/common/plat_pm_trace.c b/plat/marvell/a8k/common/plat_pm_trace.c
new file mode 100644
index 0000000..683e56f
--- /dev/null
+++ b/plat/marvell/a8k/common/plat_pm_trace.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <mmio.h>
+#include <mss_mem.h>
+#include <platform.h>
+#include <plat_pm_trace.h>
+
+#ifdef PM_TRACE_ENABLE
+
+/* core trace APIs */
+core_trace_func funcTbl[PLATFORM_CORE_COUNT] = {
+	pm_core_0_trace,
+	pm_core_1_trace,
+	pm_core_2_trace,
+	pm_core_3_trace};
+
+/*****************************************************************************
+ * pm_core0_trace
+ * pm_core1_trace
+ * pm_core2_trace
+ * pm_core_3trace
+ *
+ * This functions set trace info into core cyclic trace queue in MSS SRAM
+ * memory space
+ *****************************************************************************
+ */
+void pm_core_0_trace(unsigned int trace)
+{
+	unsigned int current_position_core_0 =
+			mmio_read_32(AP_MSS_ATF_CORE_0_CTRL_BASE);
+	mmio_write_32((AP_MSS_ATF_CORE_0_INFO_BASE  +
+		     (current_position_core_0 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+		     mmio_read_32(AP_MSS_TIMER_BASE));
+	mmio_write_32((AP_MSS_ATF_CORE_0_INFO_TRACE +
+		     (current_position_core_0 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+		     trace);
+	mmio_write_32(AP_MSS_ATF_CORE_0_CTRL_BASE,
+		     ((current_position_core_0 + 1) &
+		     AP_MSS_ATF_TRACE_SIZE_MASK));
+}
+
+void pm_core_1_trace(unsigned int trace)
+{
+	unsigned int current_position_core_1 =
+			mmio_read_32(AP_MSS_ATF_CORE_1_CTRL_BASE);
+	mmio_write_32((AP_MSS_ATF_CORE_1_INFO_BASE +
+		     (current_position_core_1 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+		     mmio_read_32(AP_MSS_TIMER_BASE));
+	mmio_write_32((AP_MSS_ATF_CORE_1_INFO_TRACE +
+		     (current_position_core_1 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+		     trace);
+	mmio_write_32(AP_MSS_ATF_CORE_1_CTRL_BASE,
+		     ((current_position_core_1 + 1) &
+		     AP_MSS_ATF_TRACE_SIZE_MASK));
+}
+
+void pm_core_2_trace(unsigned int trace)
+{
+	unsigned int current_position_core_2 =
+			mmio_read_32(AP_MSS_ATF_CORE_2_CTRL_BASE);
+	mmio_write_32((AP_MSS_ATF_CORE_2_INFO_BASE +
+		     (current_position_core_2 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+		     mmio_read_32(AP_MSS_TIMER_BASE));
+	mmio_write_32((AP_MSS_ATF_CORE_2_INFO_TRACE +
+		     (current_position_core_2 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+		     trace);
+	mmio_write_32(AP_MSS_ATF_CORE_2_CTRL_BASE,
+		     ((current_position_core_2 + 1) &
+		     AP_MSS_ATF_TRACE_SIZE_MASK));
+}
+
+void pm_core_3_trace(unsigned int trace)
+{
+	unsigned int current_position_core_3 =
+			mmio_read_32(AP_MSS_ATF_CORE_3_CTRL_BASE);
+	mmio_write_32((AP_MSS_ATF_CORE_3_INFO_BASE +
+		     (current_position_core_3 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+		     mmio_read_32(AP_MSS_TIMER_BASE));
+	mmio_write_32((AP_MSS_ATF_CORE_3_INFO_TRACE +
+		     (current_position_core_3 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+		     trace);
+	mmio_write_32(AP_MSS_ATF_CORE_3_CTRL_BASE,
+		     ((current_position_core_3 + 1) &
+		     AP_MSS_ATF_TRACE_SIZE_MASK));
+}
+#endif /* PM_TRACE_ENABLE */
diff --git a/plat/marvell/a8k/common/plat_thermal.c b/plat/marvell/a8k/common/plat_thermal.c
new file mode 100644
index 0000000..02fe820
--- /dev/null
+++ b/plat/marvell/a8k/common/plat_thermal.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <debug.h>
+#include <delay_timer.h>
+#include <mmio.h>
+#include <mvebu_def.h>
+#include <thermal.h>
+
+#define THERMAL_TIMEOUT					1200
+
+#define THERMAL_SEN_CTRL_LSB_STRT_OFFSET		0
+#define THERMAL_SEN_CTRL_LSB_STRT_MASK			\
+				(0x1 << THERMAL_SEN_CTRL_LSB_STRT_OFFSET)
+#define THERMAL_SEN_CTRL_LSB_RST_OFFSET			1
+#define THERMAL_SEN_CTRL_LSB_RST_MASK			\
+				(0x1 << THERMAL_SEN_CTRL_LSB_RST_OFFSET)
+#define THERMAL_SEN_CTRL_LSB_EN_OFFSET			2
+#define THERMAL_SEN_CTRL_LSB_EN_MASK			\
+				(0x1 << THERMAL_SEN_CTRL_LSB_EN_OFFSET)
+
+#define THERMAL_SEN_CTRL_STATS_VALID_OFFSET		16
+#define THERMAL_SEN_CTRL_STATS_VALID_MASK		\
+				(0x1 << THERMAL_SEN_CTRL_STATS_VALID_OFFSET)
+#define THERMAL_SEN_CTRL_STATS_TEMP_OUT_OFFSET		0
+#define THERMAL_SEN_CTRL_STATS_TEMP_OUT_MASK		\
+			(0x3FF << THERMAL_SEN_CTRL_STATS_TEMP_OUT_OFFSET)
+
+#define THERMAL_SEN_OUTPUT_MSB				512
+#define THERMAL_SEN_OUTPUT_COMP				1024
+
+struct tsen_regs {
+	uint32_t ext_tsen_ctrl_lsb;
+	uint32_t ext_tsen_ctrl_msb;
+	uint32_t ext_tsen_status;
+};
+
+static int ext_tsen_probe(struct tsen_config *tsen_cfg)
+{
+	uint32_t reg, timeout = 0;
+	struct tsen_regs *base;
+
+	if (tsen_cfg == NULL && tsen_cfg->regs_base == NULL) {
+		ERROR("initial thermal sensor configuration is missing\n");
+		return -1;
+	}
+	base = (struct tsen_regs *)tsen_cfg->regs_base;
+
+	INFO("initializing thermal sensor\n");
+
+	/* initialize thermal sensor hardware reset once */
+	reg = mmio_read_32((uintptr_t)&base->ext_tsen_ctrl_lsb);
+	reg &= ~THERMAL_SEN_CTRL_LSB_RST_OFFSET; /* de-assert TSEN_RESET */
+	reg |= THERMAL_SEN_CTRL_LSB_EN_MASK; /* set TSEN_EN to 1 */
+	reg |= THERMAL_SEN_CTRL_LSB_STRT_MASK; /* set TSEN_START to 1 */
+	mmio_write_32((uintptr_t)&base->ext_tsen_ctrl_lsb, reg);
+
+	reg = mmio_read_32((uintptr_t)&base->ext_tsen_status);
+	while ((reg & THERMAL_SEN_CTRL_STATS_VALID_MASK) == 0 &&
+	       timeout < THERMAL_TIMEOUT) {
+		udelay(100);
+		reg = mmio_read_32((uintptr_t)&base->ext_tsen_status);
+		timeout++;
+	}
+
+	if ((reg & THERMAL_SEN_CTRL_STATS_VALID_MASK) == 0) {
+		ERROR("thermal sensor is not ready\n");
+		return -1;
+	}
+
+	tsen_cfg->tsen_ready = 1;
+
+	VERBOSE("thermal sensor was initialized\n");
+
+	return 0;
+}
+
+static int ext_tsen_read(struct tsen_config *tsen_cfg, int *temp)
+{
+	uint32_t reg;
+	struct tsen_regs *base;
+
+	if (tsen_cfg == NULL && !tsen_cfg->tsen_ready) {
+		ERROR("thermal sensor was not initialized\n");
+		return -1;
+	}
+	base = (struct tsen_regs *)tsen_cfg->regs_base;
+
+	reg = mmio_read_32((uintptr_t)&base->ext_tsen_status);
+	reg = ((reg & THERMAL_SEN_CTRL_STATS_TEMP_OUT_MASK) >>
+		THERMAL_SEN_CTRL_STATS_TEMP_OUT_OFFSET);
+
+	/*
+	 * TSEN output format is signed as a 2s complement number
+	 * ranging from-512 to +511. when MSB is set, need to
+	 * calculate the complement number
+	 */
+	if (reg >= THERMAL_SEN_OUTPUT_MSB)
+		reg -= THERMAL_SEN_OUTPUT_COMP;
+
+	if (tsen_cfg->tsen_divisor == 0) {
+		ERROR("thermal sensor divisor cannot be zero\n");
+		return -1;
+	}
+
+	*temp = ((tsen_cfg->tsen_gain * ((int)reg)) +
+		 tsen_cfg->tsen_offset) / tsen_cfg->tsen_divisor;
+
+	return 0;
+}
+
+static struct tsen_config tsen_cfg = {
+	.tsen_offset = 153400,
+	.tsen_gain = 425,
+	.tsen_divisor = 1000,
+	.tsen_ready = 0,
+	.regs_base = (void *)MVEBU_AP_EXT_TSEN_BASE,
+	.ptr_tsen_probe = ext_tsen_probe,
+	.ptr_tsen_read = ext_tsen_read
+};
+
+struct tsen_config *marvell_thermal_config_get(void)
+{
+	return &tsen_cfg;
+}