plat: marvell: Add common ARMADA platform components
Add common Marvell ARMADA platform components.
This patch also includes common components for Marvell
ARMADA 8K platforms.
Change-Id: I42192fdc6525a42e46b3ac2ad63c83db9bcbfeaf
Signed-off-by: Hanna Hawa <hannah@marvell.com>
Signed-off-by: Konstantin Porotchkin <kostap@marvell.com>
diff --git a/include/drivers/marvell/aro.h b/include/drivers/marvell/aro.h
new file mode 100644
index 0000000..3627a20
--- /dev/null
+++ b/include/drivers/marvell/aro.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2017 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+#ifndef _ARO_H_
+#define _ARO_H_
+
+enum hws_freq {
+ CPU_FREQ_2000,
+ CPU_FREQ_1800,
+ CPU_FREQ_1600,
+ CPU_FREQ_1400,
+ CPU_FREQ_1300,
+ CPU_FREQ_1200,
+ CPU_FREQ_1000,
+ CPU_FREQ_600,
+ CPU_FREQ_800,
+ DDR_FREQ_LAST,
+ DDR_FREQ_SAR
+};
+
+enum cpu_clock_freq_mode {
+ CPU_2000_DDR_1200_RCLK_1200 = 0x0,
+ CPU_2000_DDR_1050_RCLK_1050 = 0x1,
+ CPU_1600_DDR_800_RCLK_800 = 0x4,
+ CPU_1800_DDR_1200_RCLK_1200 = 0x6,
+ CPU_1800_DDR_1050_RCLK_1050 = 0x7,
+ CPU_1600_DDR_900_RCLK_900 = 0x0B,
+ CPU_1600_DDR_1050_RCLK_1050 = 0x0D,
+ CPU_1600_DDR_900_RCLK_900_2 = 0x0E,
+ CPU_1000_DDR_650_RCLK_650 = 0x13,
+ CPU_1300_DDR_800_RCLK_800 = 0x14,
+ CPU_1300_DDR_650_RCLK_650 = 0x17,
+ CPU_1200_DDR_800_RCLK_800 = 0x19,
+ CPU_1400_DDR_800_RCLK_800 = 0x1a,
+ CPU_600_DDR_800_RCLK_800 = 0x1B,
+ CPU_800_DDR_800_RCLK_800 = 0x1C,
+ CPU_1000_DDR_800_RCLK_800 = 0x1D,
+ CPU_DDR_RCLK_INVALID
+};
+
+int init_aro(void);
+
+#endif /* _ARO_H_ */
diff --git a/include/plat/marvell/a8k/common/a8k_common.h b/include/plat/marvell/a8k/common/a8k_common.h
new file mode 100644
index 0000000..e727467
--- /dev/null
+++ b/include/plat/marvell/a8k/common/a8k_common.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __A8K_COMMON_H__
+#define __A8K_COMMON_H__
+
+#include <amb_adec.h>
+#include <io_win.h>
+#include <iob.h>
+#include <ccu.h>
+
+/*
+ * This struct supports skip image request
+ * detection_method: the method used to detect the request "signal".
+ * info:
+ * GPIO:
+ * detection_method: HIGH (pressed button), LOW (unpressed button),
+ * num (button mpp number).
+ * i2c:
+ * i2c_addr: the address of the i2c chosen.
+ * i2d_reg: the i2c register chosen.
+ * test:
+ * choose the DIE you picked the button in (AP or CP).
+ * in case of CP(cp_index = 0 if CP0, cp_index = 1 if CP1)
+ */
+struct skip_image {
+ enum {
+ GPIO,
+ I2C,
+ USER_DEFINED
+ } detection_method;
+
+ struct {
+ struct {
+ int num;
+ enum {
+ HIGH,
+ LOW
+ } button_state;
+
+ } gpio;
+
+ struct {
+ int i2c_addr;
+ int i2c_reg;
+ } i2c;
+
+ struct {
+ enum {
+ CP,
+ AP
+ } cp_ap;
+ int cp_index;
+ } test;
+ } info;
+};
+
+/*
+ * This struct supports SoC power off method
+ * type: the method used to power off the SoC
+ * cfg:
+ * PMIC_GPIO:
+ * pin_count: current GPIO pin number used for toggling the signal for
+ * notifying external PMIC
+ * info: holds the GPIOs information, CP GPIO should be used and
+ * all GPIOs should be within same GPIO config. register
+ * step_count: current step number to toggle the GPIO for PMIC
+ * seq: GPIO toggling values in sequence, each bit represents a GPIO.
+ * For example, bit0 represents first GPIO used for toggling
+ * the GPIO the last step is used to trigger the power off
+ * signal
+ * delay_ms: transition interval for the GPIO setting to take effect
+ * in unit of ms
+ */
+/* Max GPIO number used to notify PMIC to power off the SoC */
+#define PMIC_GPIO_MAX_NUMBER 8
+/* Max GPIO toggling steps in sequence to power off the SoC */
+#define PMIC_GPIO_MAX_TOGGLE_STEP 8
+
+enum gpio_output_state {
+ GPIO_LOW = 0,
+ GPIO_HIGH
+};
+
+typedef struct gpio_info {
+ int cp_index;
+ int gpio_index;
+} gpio_info_t;
+
+struct power_off_method {
+ enum {
+ PMIC_GPIO,
+ } type;
+
+ struct {
+ struct {
+ int pin_count;
+ struct gpio_info info[PMIC_GPIO_MAX_NUMBER];
+ int step_count;
+ uint32_t seq[PMIC_GPIO_MAX_TOGGLE_STEP];
+ int delay_ms;
+ } gpio;
+ } cfg;
+};
+
+int marvell_gpio_config(void);
+uint32_t marvell_get_io_win_gcr_target(int ap_idx);
+uint32_t marvell_get_ccu_gcr_target(int ap_idx);
+
+
+/*
+ * The functions below are defined as Weak and may be overridden
+ * in specific Marvell standard platform
+ */
+int marvell_get_amb_memory_map(struct addr_map_win **win,
+ uint32_t *size, uintptr_t base);
+int marvell_get_io_win_memory_map(int ap_idx, struct addr_map_win **win,
+ uint32_t *size);
+int marvell_get_iob_memory_map(struct addr_map_win **win,
+ uint32_t *size, uintptr_t base);
+int marvell_get_ccu_memory_map(int ap_idx, struct addr_map_win **win,
+ uint32_t *size);
+
+#endif /* __A8K_COMMON_H__ */
diff --git a/include/plat/marvell/a8k/common/board_marvell_def.h b/include/plat/marvell/a8k/common/board_marvell_def.h
new file mode 100644
index 0000000..b1054db
--- /dev/null
+++ b/include/plat/marvell/a8k/common/board_marvell_def.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __BOARD_MARVELL_DEF_H__
+#define __BOARD_MARVELL_DEF_H__
+
+/*
+ * Required platform porting definitions common to all ARM
+ * development platforms
+ */
+
+/* Size of cacheable stacks */
+#if DEBUG_XLAT_TABLE
+# define PLATFORM_STACK_SIZE 0x800
+#elif IMAGE_BL1
+#if TRUSTED_BOARD_BOOT
+# define PLATFORM_STACK_SIZE 0x1000
+#else
+# define PLATFORM_STACK_SIZE 0x440
+#endif
+#elif IMAGE_BL2
+# if TRUSTED_BOARD_BOOT
+# define PLATFORM_STACK_SIZE 0x1000
+# else
+# define PLATFORM_STACK_SIZE 0x400
+# endif
+#elif IMAGE_BL31
+# define PLATFORM_STACK_SIZE 0x400
+#elif IMAGE_BL32
+# define PLATFORM_STACK_SIZE 0x440
+#endif
+
+/*
+ * PLAT_MARVELL_MMAP_ENTRIES depends on the number of entries in the
+ * plat_arm_mmap array defined for each BL stage.
+ */
+#if IMAGE_BLE
+# define PLAT_MARVELL_MMAP_ENTRIES 3
+#endif
+#if IMAGE_BL1
+# if TRUSTED_BOARD_BOOT
+# define PLAT_MARVELL_MMAP_ENTRIES 7
+# else
+# define PLAT_MARVELL_MMAP_ENTRIES 6
+# endif /* TRUSTED_BOARD_BOOT */
+#endif
+#if IMAGE_BL2
+# define PLAT_MARVELL_MMAP_ENTRIES 8
+#endif
+#if IMAGE_BL31
+#define PLAT_MARVELL_MMAP_ENTRIES 5
+#endif
+
+/*
+ * Platform specific page table and MMU setup constants
+ */
+#if IMAGE_BL1
+#define MAX_XLAT_TABLES 4
+#elif IMAGE_BLE
+# define MAX_XLAT_TABLES 4
+#elif IMAGE_BL2
+# define MAX_XLAT_TABLES 4
+#elif IMAGE_BL31
+# define MAX_XLAT_TABLES 4
+#elif IMAGE_BL32
+# define MAX_XLAT_TABLES 4
+#endif
+
+#define MAX_IO_DEVICES 3
+#define MAX_IO_HANDLES 4
+
+#define PLAT_MARVELL_TRUSTED_SRAM_SIZE 0x80000 /* 512 KB */
+
+
+#endif /* __BOARD_MARVELL_DEF_H__ */
diff --git a/include/plat/marvell/a8k/common/marvell_def.h b/include/plat/marvell/a8k/common/marvell_def.h
new file mode 100644
index 0000000..7dacf82
--- /dev/null
+++ b/include/plat/marvell/a8k/common/marvell_def.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __MARVELL_DEF_H__
+#define __MARVELL_DEF_H__
+
+#include <arch.h>
+#include <common_def.h>
+#include <platform_def.h>
+#include <tbbr_img_def.h>
+#include <xlat_tables.h>
+
+
+/******************************************************************************
+ * Definitions common to all MARVELL standard platforms
+ *****************************************************************************/
+
+/* Special value used to verify platform parameters from BL2 to BL31 */
+#define MARVELL_BL31_PLAT_PARAM_VAL 0x0f1e2d3c4b5a6978ULL
+
+
+#define MARVELL_CACHE_WRITEBACK_SHIFT 6
+
+/*
+ * Macros mapping the MPIDR Affinity levels to MARVELL Platform Power levels.
+ * The power levels have a 1:1 mapping with the MPIDR affinity levels.
+ */
+#define MARVELL_PWR_LVL0 MPIDR_AFFLVL0
+#define MARVELL_PWR_LVL1 MPIDR_AFFLVL1
+#define MARVELL_PWR_LVL2 MPIDR_AFFLVL2
+
+/*
+ * Macros for local power states in Marvell platforms encoded by
+ * State-ID field within the power-state parameter.
+ */
+/* Local power state for power domains in Run state. */
+#define MARVELL_LOCAL_STATE_RUN 0
+/* Local power state for retention. Valid only for CPU power domains */
+#define MARVELL_LOCAL_STATE_RET 1
+/*
+ * Local power state for OFF/power-down. Valid for CPU
+ * and cluster power domains
+ */
+#define MARVELL_LOCAL_STATE_OFF 2
+
+/* The first 4KB of Trusted SRAM are used as shared memory */
+#define MARVELL_TRUSTED_SRAM_BASE PLAT_MARVELL_ATF_BASE
+#define MARVELL_SHARED_RAM_BASE MARVELL_TRUSTED_SRAM_BASE
+#define MARVELL_SHARED_RAM_SIZE 0x00001000 /* 4 KB */
+
+/* The remaining Trusted SRAM is used to load the BL images */
+#define MARVELL_BL_RAM_BASE (MARVELL_SHARED_RAM_BASE + \
+ MARVELL_SHARED_RAM_SIZE)
+#define MARVELL_BL_RAM_SIZE (PLAT_MARVELL_TRUSTED_SRAM_SIZE - \
+ MARVELL_SHARED_RAM_SIZE)
+/* Non-shared DRAM */
+#define MARVELL_DRAM_BASE ULL(0x0)
+#define MARVELL_DRAM_SIZE ULL(0x80000000)
+#define MARVELL_DRAM_END (MARVELL_DRAM_BASE + \
+ MARVELL_DRAM_SIZE - 1)
+
+#define MARVELL_IRQ_SEC_PHY_TIMER 29
+
+#define MARVELL_IRQ_SEC_SGI_0 8
+#define MARVELL_IRQ_SEC_SGI_1 9
+#define MARVELL_IRQ_SEC_SGI_2 10
+#define MARVELL_IRQ_SEC_SGI_3 11
+#define MARVELL_IRQ_SEC_SGI_4 12
+#define MARVELL_IRQ_SEC_SGI_5 13
+#define MARVELL_IRQ_SEC_SGI_6 14
+#define MARVELL_IRQ_SEC_SGI_7 15
+
+#define MARVELL_MAP_SHARED_RAM MAP_REGION_FLAT( \
+ MARVELL_SHARED_RAM_BASE,\
+ MARVELL_SHARED_RAM_SIZE,\
+ MT_MEMORY | MT_RW | MT_SECURE)
+
+#define MARVELL_MAP_DRAM MAP_REGION_FLAT( \
+ MARVELL_DRAM_BASE, \
+ MARVELL_DRAM_SIZE, \
+ MT_MEMORY | MT_RW | MT_NS)
+
+
+/*
+ * The number of regions like RO(code), coherent and data required by
+ * different BL stages which need to be mapped in the MMU.
+ */
+#if USE_COHERENT_MEM
+#define MARVELL_BL_REGIONS 3
+#else
+#define MARVELL_BL_REGIONS 2
+#endif
+
+#define MAX_MMAP_REGIONS (PLAT_MARVELL_MMAP_ENTRIES + \
+ MARVELL_BL_REGIONS)
+
+#define MARVELL_CONSOLE_BAUDRATE 115200
+
+/******************************************************************************
+ * Required platform porting definitions common to all MARVELL std. platforms
+ *****************************************************************************/
+
+#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 32)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 32)
+
+/*
+ * This macro defines the deepest retention state possible. A higher state
+ * id will represent an invalid or a power down state.
+ */
+#define PLAT_MAX_RET_STATE MARVELL_LOCAL_STATE_RET
+
+/*
+ * This macro defines the deepest power down states possible. Any state ID
+ * higher than this is invalid.
+ */
+#define PLAT_MAX_OFF_STATE MARVELL_LOCAL_STATE_OFF
+
+
+#define PLATFORM_CORE_COUNT PLAT_MARVELL_CORE_COUNT
+#define PLAT_NUM_PWR_DOMAINS (PLAT_MARVELL_CLUSTER_COUNT + \
+ PLATFORM_CORE_COUNT)
+
+/*
+ * Some data must be aligned on the biggest cache line size in the platform.
+ * This is known only to the platform as it might have a combination of
+ * integrated and external caches.
+ */
+#define CACHE_WRITEBACK_GRANULE (1 << MARVELL_CACHE_WRITEBACK_SHIFT)
+
+
+/*******************************************************************************
+ * BL1 specific defines.
+ * BL1 RW data is relocated from ROM to RAM at runtime so we need 2 sets of
+ * addresses.
+ ******************************************************************************/
+#define BL1_RO_BASE PLAT_MARVELL_TRUSTED_ROM_BASE
+#define BL1_RO_LIMIT (PLAT_MARVELL_TRUSTED_ROM_BASE \
+ + PLAT_MARVELL_TRUSTED_ROM_SIZE)
+/*
+ * Put BL1 RW at the top of the Trusted SRAM.
+ */
+#define BL1_RW_BASE (MARVELL_BL_RAM_BASE + \
+ MARVELL_BL_RAM_SIZE - \
+ PLAT_MARVELL_MAX_BL1_RW_SIZE)
+#define BL1_RW_LIMIT (MARVELL_BL_RAM_BASE + MARVELL_BL_RAM_SIZE)
+
+/*******************************************************************************
+ * BLE specific defines.
+ ******************************************************************************/
+#define BLE_BASE PLAT_MARVELL_SRAM_BASE
+#define BLE_LIMIT PLAT_MARVELL_SRAM_END
+
+/*******************************************************************************
+ * BL2 specific defines.
+ ******************************************************************************/
+/*
+ * Put BL2 just below BL31.
+ */
+#define BL2_BASE (BL31_BASE - PLAT_MARVELL_MAX_BL2_SIZE)
+#define BL2_LIMIT BL31_BASE
+
+/*******************************************************************************
+ * BL31 specific defines.
+ ******************************************************************************/
+/*
+ * Put BL31 at the top of the Trusted SRAM.
+ */
+#define BL31_BASE (MARVELL_BL_RAM_BASE + \
+ MARVELL_BL_RAM_SIZE - \
+ PLAT_MARVEL_MAX_BL31_SIZE)
+#define BL31_PROGBITS_LIMIT BL1_RW_BASE
+#define BL31_LIMIT (MARVELL_BL_RAM_BASE + \
+ MARVELL_BL_RAM_SIZE)
+
+
+#endif /* __MARVELL_DEF_H__ */
diff --git a/include/plat/marvell/a8k/common/plat_marvell.h b/include/plat/marvell/a8k/common/plat_marvell.h
new file mode 100644
index 0000000..aad5da7
--- /dev/null
+++ b/include/plat/marvell/a8k/common/plat_marvell.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __PLAT_MARVELL_H__
+#define __PLAT_MARVELL_H__
+
+#include <cassert.h>
+#include <cpu_data.h>
+#include <stdint.h>
+#include <utils.h>
+#include <xlat_tables.h>
+
+/*
+ * Extern declarations common to Marvell standard platforms
+ */
+extern const mmap_region_t plat_marvell_mmap[];
+
+#define MARVELL_CASSERT_MMAP \
+ CASSERT((ARRAY_SIZE(plat_marvell_mmap) + MARVELL_BL_REGIONS) \
+ <= MAX_MMAP_REGIONS, \
+ assert_max_mmap_regions)
+
+/*
+ * Utility functions common to Marvell standard platforms
+ */
+void marvell_setup_page_tables(uintptr_t total_base,
+ size_t total_size,
+ uintptr_t code_start,
+ uintptr_t code_limit,
+ uintptr_t rodata_start,
+ uintptr_t rodata_limit
+#if USE_COHERENT_MEM
+ , uintptr_t coh_start,
+ uintptr_t coh_limit
+#endif
+);
+
+/* IO storage utility functions */
+void marvell_io_setup(void);
+
+/* Systimer utility function */
+void marvell_configure_sys_timer(void);
+
+/* Topology utility function */
+int marvell_check_mpidr(u_register_t mpidr);
+
+/* BLE utility functions */
+int ble_plat_setup(int *skip);
+void plat_marvell_dram_update_topology(void);
+void ble_plat_pcie_ep_setup(void);
+struct pci_hw_cfg *plat_get_pcie_hw_data(void);
+
+/* BL1 utility functions */
+void marvell_bl1_early_platform_setup(void);
+void marvell_bl1_platform_setup(void);
+void marvell_bl1_plat_arch_setup(void);
+
+/* BL2 utility functions */
+void marvell_bl2_early_platform_setup(meminfo_t *mem_layout);
+void marvell_bl2_platform_setup(void);
+void marvell_bl2_plat_arch_setup(void);
+uint32_t marvell_get_spsr_for_bl32_entry(void);
+uint32_t marvell_get_spsr_for_bl33_entry(void);
+
+/* BL31 utility functions */
+void marvell_bl31_early_platform_setup(bl31_params_t *from_bl2,
+ void *plat_params_from_bl2);
+void marvell_bl31_platform_setup(void);
+void marvell_bl31_plat_runtime_setup(void);
+void marvell_bl31_plat_arch_setup(void);
+
+/* Power management config to power off the SoC */
+void *plat_marvell_get_pm_cfg(void);
+
+/* Check if MSS AP CM3 firmware contains PM support */
+_Bool is_pm_fw_running(void);
+
+/* Bootrom image recovery utility functions */
+void *plat_marvell_get_skip_image_data(void);
+
+/* FIP TOC validity check */
+int marvell_io_is_toc_valid(void);
+
+/*
+ * PSCI functionality
+ */
+void marvell_psci_arch_init(int ap_idx);
+void plat_marvell_system_reset(void);
+
+/*
+ * Optional functions required in Marvell standard platforms
+ */
+void plat_marvell_io_setup(void);
+int plat_marvell_get_alt_image_source(
+ unsigned int image_id,
+ uintptr_t *dev_handle,
+ uintptr_t *image_spec);
+unsigned int plat_marvell_calc_core_pos(u_register_t mpidr);
+
+const mmap_region_t *plat_marvell_get_mmap(void);
+void marvell_ble_prepare_exit(void);
+void marvell_exit_bootrom(uintptr_t base);
+
+int plat_marvell_early_cpu_powerdown(void);
+#endif /* __PLAT_MARVELL_H__ */
diff --git a/include/plat/marvell/a8k/common/plat_pm_trace.h b/include/plat/marvell/a8k/common/plat_pm_trace.h
new file mode 100644
index 0000000..0878959
--- /dev/null
+++ b/include/plat/marvell/a8k/common/plat_pm_trace.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __PLAT_PM_TRACE_H
+#define __PLAT_PM_TRACE_H
+
+
+/*
+ * PM Trace is for Debug purpose only!!!
+ * It should not be enabled during System Run time
+ */
+#undef PM_TRACE_ENABLE
+
+
+/* trace entry time */
+struct pm_trace_entry {
+ /* trace entry time stamp */
+ unsigned int timestamp;
+
+ /* trace info
+ * [16-31] - API Trace Id
+ * [00-15] - API Step Id
+ */
+ unsigned int trace_info;
+};
+
+struct pm_trace_ctrl {
+ /* trace pointer - points to next free entry in trace cyclic queue */
+ unsigned int trace_pointer;
+
+ /* trace count - number of entries in the queue, clear upon read */
+ unsigned int trace_count;
+};
+
+/* trace size definition */
+#define AP_MSS_ATF_CORE_INFO_SIZE (256)
+#define AP_MSS_ATF_CORE_ENTRY_SIZE (8)
+#define AP_MSS_ATF_TRACE_SIZE_MASK (0xFF)
+
+/* trace address definition */
+#define AP_MSS_TIMER_BASE (MVEBU_REGS_BASE_MASK + 0x580110)
+
+#define AP_MSS_ATF_CORE_0_CTRL_BASE (MVEBU_REGS_BASE_MASK + 0x520140)
+#define AP_MSS_ATF_CORE_1_CTRL_BASE (MVEBU_REGS_BASE_MASK + 0x520150)
+#define AP_MSS_ATF_CORE_2_CTRL_BASE (MVEBU_REGS_BASE_MASK + 0x520160)
+#define AP_MSS_ATF_CORE_3_CTRL_BASE (MVEBU_REGS_BASE_MASK + 0x520170)
+#define AP_MSS_ATF_CORE_CTRL_BASE (AP_MSS_ATF_CORE_0_CTRL_BASE)
+
+#define AP_MSS_ATF_CORE_0_INFO_BASE (MVEBU_REGS_BASE_MASK + 0x5201C0)
+#define AP_MSS_ATF_CORE_0_INFO_TRACE (MVEBU_REGS_BASE_MASK + 0x5201C4)
+#define AP_MSS_ATF_CORE_1_INFO_BASE (MVEBU_REGS_BASE_MASK + 0x5209C0)
+#define AP_MSS_ATF_CORE_1_INFO_TRACE (MVEBU_REGS_BASE_MASK + 0x5209C4)
+#define AP_MSS_ATF_CORE_2_INFO_BASE (MVEBU_REGS_BASE_MASK + 0x5211C0)
+#define AP_MSS_ATF_CORE_2_INFO_TRACE (MVEBU_REGS_BASE_MASK + 0x5211C4)
+#define AP_MSS_ATF_CORE_3_INFO_BASE (MVEBU_REGS_BASE_MASK + 0x5219C0)
+#define AP_MSS_ATF_CORE_3_INFO_TRACE (MVEBU_REGS_BASE_MASK + 0x5219C4)
+#define AP_MSS_ATF_CORE_INFO_BASE (AP_MSS_ATF_CORE_0_INFO_BASE)
+
+/* trace info definition */
+#define TRACE_PWR_DOMAIN_OFF (0x10000)
+#define TRACE_PWR_DOMAIN_SUSPEND (0x20000)
+#define TRACE_PWR_DOMAIN_SUSPEND_FINISH (0x30000)
+#define TRACE_PWR_DOMAIN_ON (0x40000)
+#define TRACE_PWR_DOMAIN_ON_FINISH (0x50000)
+
+#define TRACE_PWR_DOMAIN_ON_MASK (0xFF)
+
+#ifdef PM_TRACE_ENABLE
+
+/* trace API definition */
+void pm_core_0_trace(unsigned int trace);
+void pm_core_1_trace(unsigned int trace);
+void pm_core_2_trace(unsigned int trace);
+void pm_core_3_trace(unsigned int trace);
+
+typedef void (*core_trace_func)(unsigned int);
+
+extern core_trace_func funcTbl[PLATFORM_CORE_COUNT];
+
+#define PM_TRACE(trace) funcTbl[plat_my_core_pos()](trace)
+
+#else
+
+#define PM_TRACE(trace)
+
+#endif
+
+/*******************************************************************************
+ * pm_trace_add
+ *
+ * DESCRIPTION: Add PM trace
+ ******************************************************************************
+ */
+void pm_trace_add(unsigned int trace, unsigned int core);
+
+#endif /* __PLAT_PM_TRACE_H */
diff --git a/include/plat/marvell/common/aarch64/cci_macros.S b/include/plat/marvell/common/aarch64/cci_macros.S
new file mode 100644
index 0000000..d6080cf
--- /dev/null
+++ b/include/plat/marvell/common/aarch64/cci_macros.S
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __CCI_MACROS_S__
+#define __CCI_MACROS_S__
+
+#include <cci.h>
+#include <platform_def.h>
+
+.section .rodata.cci_reg_name, "aS"
+cci_iface_regs:
+ .asciz "cci_snoop_ctrl_cluster0", "cci_snoop_ctrl_cluster1" , ""
+
+ /* ------------------------------------------------
+ * The below required platform porting macro prints
+ * out relevant interconnect registers whenever an
+ * unhandled exception is taken in BL31.
+ * Clobbers: x0 - x9, sp
+ * ------------------------------------------------
+ */
+ .macro print_cci_regs
+ adr x6, cci_iface_regs
+ /* Store in x7 the base address of the first interface */
+ mov_imm x7, (PLAT_MARVELL_CCI_BASE + SLAVE_IFACE_OFFSET( \
+ PLAT_MARVELL_CCI_CLUSTER0_SL_IFACE_IX))
+ ldr w8, [x7, #SNOOP_CTRL_REG]
+ /* Store in x7 the base address of the second interface */
+ mov_imm x7, (PLAT_MARVELL_CCI_BASE + SLAVE_IFACE_OFFSET( \
+ PLAT_MARVELL_CCI_CLUSTER1_SL_IFACE_IX))
+ ldr w9, [x7, #SNOOP_CTRL_REG]
+ /* Store to the crash buf and print to console */
+ bl str_in_crash_buf_print
+ .endm
+
+#endif /* __CCI_MACROS_S__ */
diff --git a/include/plat/marvell/common/aarch64/marvell_macros.S b/include/plat/marvell/common/aarch64/marvell_macros.S
new file mode 100644
index 0000000..0102af0
--- /dev/null
+++ b/include/plat/marvell/common/aarch64/marvell_macros.S
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __MARVELL_MACROS_S__
+#define __MARVELL_MACROS_S__
+
+#include <cci.h>
+#include <gic_common.h>
+#include <gicv2.h>
+#include <gicv3.h>
+#include <platform_def.h>
+
+/*
+ * These Macros are required by ATF
+ */
+
+.section .rodata.gic_reg_name, "aS"
+/* Applicable only to GICv2 and GICv3 with SRE disabled (legacy mode) */
+gicc_regs:
+ .asciz "gicc_hppir", "gicc_ahppir", "gicc_ctlr", ""
+
+#ifdef USE_CCI
+/* Applicable only to GICv3 with SRE enabled */
+icc_regs:
+ .asciz "icc_hppir0_el1", "icc_hppir1_el1", "icc_ctlr_el3", ""
+#endif
+/* Registers common to both GICv2 and GICv3 */
+gicd_pend_reg:
+ .asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n" \
+ " Offset:\t\t\tvalue\n"
+newline:
+ .asciz "\n"
+spacer:
+ .asciz ":\t\t0x"
+
+ /* ---------------------------------------------
+ * The below utility macro prints out relevant GIC
+ * registers whenever an unhandled exception is
+ * taken in BL31 on ARM standard platforms.
+ * Expects: GICD base in x16, GICC base in x17
+ * Clobbers: x0 - x10, sp
+ * ---------------------------------------------
+ */
+ .macro arm_print_gic_regs
+ /* Check for GICv3 system register access */
+ mrs x7, id_aa64pfr0_el1
+ ubfx x7, x7, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_WIDTH
+ cmp x7, #1
+ b.ne print_gicv2
+
+ /* Check for SRE enable */
+ mrs x8, ICC_SRE_EL3
+ tst x8, #ICC_SRE_SRE_BIT
+ b.eq print_gicv2
+
+#ifdef USE_CCI
+ /* Load the icc reg list to x6 */
+ adr x6, icc_regs
+ /* Load the icc regs to gp regs used by str_in_crash_buf_print */
+ mrs x8, ICC_HPPIR0_EL1
+ mrs x9, ICC_HPPIR1_EL1
+ mrs x10, ICC_CTLR_EL3
+ /* Store to the crash buf and print to console */
+ bl str_in_crash_buf_print
+#endif
+ b print_gic_common
+
+print_gicv2:
+ /* Load the gicc reg list to x6 */
+ adr x6, gicc_regs
+ /* Load the gicc regs to gp regs used by str_in_crash_buf_print */
+ ldr w8, [x17, #GICC_HPPIR]
+ ldr w9, [x17, #GICC_AHPPIR]
+ ldr w10, [x17, #GICC_CTLR]
+ /* Store to the crash buf and print to console */
+ bl str_in_crash_buf_print
+
+print_gic_common:
+ /* Print the GICD_ISPENDR regs */
+ add x7, x16, #GICD_ISPENDR
+ adr x4, gicd_pend_reg
+ bl asm_print_str
+gicd_ispendr_loop:
+ sub x4, x7, x16
+ cmp x4, #0x280
+ b.eq exit_print_gic_regs
+ bl asm_print_hex
+
+ adr x4, spacer
+ bl asm_print_str
+
+ ldr x4, [x7], #8
+ bl asm_print_hex
+
+ adr x4, newline
+ bl asm_print_str
+ b gicd_ispendr_loop
+exit_print_gic_regs:
+ .endm
+
+
+.section .rodata.cci_reg_name, "aS"
+cci_iface_regs:
+ .asciz "cci_snoop_ctrl_cluster0", "cci_snoop_ctrl_cluster1" , ""
+
+ /* ------------------------------------------------
+ * The below required platform porting macro prints
+ * out relevant interconnect registers whenever an
+ * unhandled exception is taken in BL31.
+ * Clobbers: x0 - x9, sp
+ * ------------------------------------------------
+ */
+ .macro print_cci_regs
+#ifdef USE_CCI
+ adr x6, cci_iface_regs
+ /* Store in x7 the base address of the first interface */
+ mov_imm x7, (PLAT_MARVELL_CCI_BASE + SLAVE_IFACE_OFFSET( \
+ PLAT_MARVELL_CCI_CLUSTER0_SL_IFACE_IX))
+ ldr w8, [x7, #SNOOP_CTRL_REG]
+ /* Store in x7 the base address of the second interface */
+ mov_imm x7, (PLAT_MARVELL_CCI_BASE + SLAVE_IFACE_OFFSET( \
+ PLAT_MARVELL_CCI_CLUSTER1_SL_IFACE_IX))
+ ldr w9, [x7, #SNOOP_CTRL_REG]
+ /* Store to the crash buf and print to console */
+ bl str_in_crash_buf_print
+#endif
+ .endm
+
+
+#endif /* __MARVELL_MACROS_S__ */
diff --git a/include/plat/marvell/common/marvell_plat_priv.h b/include/plat/marvell/common/marvell_plat_priv.h
new file mode 100644
index 0000000..c1dad0e
--- /dev/null
+++ b/include/plat/marvell/common/marvell_plat_priv.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __MARVELL_PLAT_PRIV_H__
+#define __MARVELL_PLAT_PRIV_H__
+
+#include <utils.h>
+
+/*****************************************************************************
+ * Function and variable prototypes
+ *****************************************************************************
+ */
+void plat_delay_timer_init(void);
+
+uint64_t mvebu_get_dram_size(uint64_t ap_base_addr);
+
+/*
+ * GIC operation, mandatory functions required in Marvell standard platforms
+ */
+void plat_marvell_gic_driver_init(void);
+void plat_marvell_gic_init(void);
+void plat_marvell_gic_cpuif_enable(void);
+void plat_marvell_gic_cpuif_disable(void);
+void plat_marvell_gic_pcpu_init(void);
+void plat_marvell_gic_irq_save(void);
+void plat_marvell_gic_irq_restore(void);
+void plat_marvell_gic_irq_pcpu_save(void);
+void plat_marvell_gic_irq_pcpu_restore(void);
+
+#endif /* __MARVELL_PLAT_PRIV_H__ */
diff --git a/include/plat/marvell/common/marvell_pm.h b/include/plat/marvell/common/marvell_pm.h
new file mode 100644
index 0000000..2817a46
--- /dev/null
+++ b/include/plat/marvell/common/marvell_pm.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef _MARVELL_PM_H_
+#define _MARVELL_PM_H_
+
+#define MVEBU_MAILBOX_MAGIC_NUM PLAT_MARVELL_MAILBOX_MAGIC_NUM
+#define MVEBU_MAILBOX_SUSPEND_STATE 0xb007de7c
+
+/* Mailbox entry indexes */
+/* Magic number for validity check */
+#define MBOX_IDX_MAGIC 0
+/* Recovery from suspend entry point */
+#define MBOX_IDX_SEC_ADDR 1
+/* Suspend state magic number */
+#define MBOX_IDX_SUSPEND_MAGIC 2
+/* Recovery jump address for ROM bypass */
+#define MBOX_IDX_ROM_EXIT_ADDR 3
+/* BLE execution start counter value */
+#define MBOX_IDX_START_CNT 4
+
+#endif /* _MARVELL_PM_H_ */
diff --git a/include/plat/marvell/common/mvebu.h b/include/plat/marvell/common/mvebu.h
new file mode 100644
index 0000000..a20e538
--- /dev/null
+++ b/include/plat/marvell/common/mvebu.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef _MVEBU_H_
+#define _MVEBU_H_
+
+/* Use this functions only when printf is allowed */
+#define debug_enter() VERBOSE("----> Enter %s\n", __func__)
+#define debug_exit() VERBOSE("<---- Exit %s\n", __func__)
+
+/* Macro for testing alignment. Positive if number is NOT aligned */
+#define IS_NOT_ALIGN(number, align) ((number) & ((align) - 1))
+
+/* Macro for alignment up. For example, ALIGN_UP(0x0330, 0x20) = 0x0340 */
+#define ALIGN_UP(number, align) (((number) & ((align) - 1)) ? \
+ (((number) + (align)) & ~((align)-1)) : (number))
+
+/* Macro for testing whether a number is a power of 2. Positive if so */
+#define IS_POWER_OF_2(number) ((number) != 0 && \
+ (((number) & ((number) - 1)) == 0))
+
+/*
+ * Macro for ronding up to next power of 2
+ * it is done by count leading 0 (clz assembly opcode) and see msb set bit.
+ * then you can shift it left and get number which power of 2
+ * Note: this Macro is for 32 bit number
+ */
+#define ROUND_UP_TO_POW_OF_2(number) (1 << \
+ (32 - __builtin_clz((number) - 1)))
+
+#define _1MB_ (1024ULL*1024ULL)
+#define _1GB_ (_1MB_*1024ULL)
+
+#endif /* MVEBU_H */
diff --git a/plat/marvell/a8k/common/a8k_common.mk b/plat/marvell/a8k/common/a8k_common.mk
new file mode 100644
index 0000000..3bcce96
--- /dev/null
+++ b/plat/marvell/a8k/common/a8k_common.mk
@@ -0,0 +1,122 @@
+#
+# Copyright (C) 2016 - 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# https://spdx.org/licenses
+
+include tools/doimage/doimage.mk
+
+PLAT_FAMILY := a8k
+PLAT_FAMILY_BASE := plat/marvell/$(PLAT_FAMILY)
+PLAT_INCLUDE_BASE := include/plat/marvell/$(PLAT_FAMILY)
+PLAT_COMMON_BASE := $(PLAT_FAMILY_BASE)/common
+MARVELL_DRV_BASE := drivers/marvell
+MARVELL_COMMON_BASE := plat/marvell/common
+
+ERRATA_A72_859971 := 1
+
+# Enable MSS support for a8k family
+MSS_SUPPORT := 1
+
+# Disable EL3 cache for power management
+BL31_CACHE_DISABLE := 1
+$(eval $(call add_define,BL31_CACHE_DISABLE))
+
+$(eval $(call add_define,PCI_EP_SUPPORT))
+$(eval $(call assert_boolean,PCI_EP_SUPPORT))
+
+DOIMAGEPATH ?= tools/doimage
+DOIMAGETOOL ?= ${DOIMAGEPATH}/doimage
+
+ROM_BIN_EXT ?= $(BUILD_PLAT)/ble.bin
+DOIMAGE_FLAGS += -b $(ROM_BIN_EXT) $(NAND_DOIMAGE_FLAGS) $(DOIMAGE_SEC_FLAGS)
+
+# This define specifies DDR type for BLE
+$(eval $(call add_define,CONFIG_DDR4))
+
+MARVELL_GIC_SOURCES := drivers/arm/gic/common/gic_common.c \
+ drivers/arm/gic/v2/gicv2_main.c \
+ drivers/arm/gic/v2/gicv2_helpers.c \
+ plat/common/plat_gicv2.c
+
+ATF_INCLUDES := -Iinclude/common/tbbr
+
+PLAT_INCLUDES := -I$(PLAT_FAMILY_BASE)/$(PLAT) \
+ -I$(PLAT_COMMON_BASE)/include \
+ -I$(PLAT_INCLUDE_BASE)/common \
+ -Iinclude/drivers/marvell \
+ -Iinclude/drivers/marvell/mochi \
+ $(ATF_INCLUDES)
+
+PLAT_BL_COMMON_SOURCES := $(PLAT_COMMON_BASE)/aarch64/a8k_common.c \
+ drivers/console/aarch64/console.S \
+ drivers/ti/uart/aarch64/16550_console.S
+
+BLE_PORTING_SOURCES := $(PLAT_FAMILY_BASE)/$(PLAT)/board/dram_port.c \
+ $(PLAT_FAMILY_BASE)/$(PLAT)/board/marvell_plat_config.c
+
+MARVELL_MOCHI_DRV += $(MARVELL_DRV_BASE)/mochi/cp110_setup.c
+
+BLE_SOURCES := $(PLAT_COMMON_BASE)/plat_ble_setup.c \
+ $(MARVELL_MOCHI_DRV) \
+ $(MARVELL_DRV_BASE)/i2c/a8k_i2c.c \
+ $(PLAT_COMMON_BASE)/plat_pm.c \
+ $(MARVELL_DRV_BASE)/thermal.c \
+ $(PLAT_COMMON_BASE)/plat_thermal.c \
+ $(BLE_PORTING_SOURCES) \
+ $(MARVELL_DRV_BASE)/ccu.c \
+ $(MARVELL_DRV_BASE)/io_win.c
+
+BL1_SOURCES += $(PLAT_COMMON_BASE)/aarch64/plat_helpers.S \
+ lib/cpus/aarch64/cortex_a72.S
+
+MARVELL_DRV := $(MARVELL_DRV_BASE)/io_win.c \
+ $(MARVELL_DRV_BASE)/iob.c \
+ $(MARVELL_DRV_BASE)/mci.c \
+ $(MARVELL_DRV_BASE)/amb_adec.c \
+ $(MARVELL_DRV_BASE)/ccu.c \
+ $(MARVELL_DRV_BASE)/cache_llc.c \
+ $(MARVELL_DRV_BASE)/comphy/phy-comphy-cp110.c
+
+BL31_PORTING_SOURCES := $(PLAT_FAMILY_BASE)/$(PLAT)/board/marvell_plat_config.c
+
+BL31_SOURCES += lib/cpus/aarch64/cortex_a72.S \
+ $(PLAT_COMMON_BASE)/aarch64/plat_helpers.S \
+ $(PLAT_COMMON_BASE)/aarch64/plat_arch_config.c \
+ $(PLAT_COMMON_BASE)/plat_pm.c \
+ $(PLAT_COMMON_BASE)/plat_bl31_setup.c \
+ $(MARVELL_COMMON_BASE)/marvell_gicv2.c \
+ $(MARVELL_COMMON_BASE)/mrvl_sip_svc.c \
+ $(MARVELL_COMMON_BASE)/marvell_ddr_info.c \
+ $(BL31_PORTING_SOURCES) \
+ $(MARVELL_DRV) \
+ $(MARVELL_MOCHI_DRV) \
+ $(MARVELL_GIC_SOURCES)
+
+# Add trace functionality for PM
+BL31_SOURCES += $(PLAT_COMMON_BASE)/plat_pm_trace.c
+
+# Disable the PSCI platform compatibility layer (allows porting
+# from Old Platform APIs to the new APIs).
+# It is not needed since Marvell platform already used the new platform APIs.
+ENABLE_PLAT_COMPAT := 0
+
+# Force builds with BL2 image on a80x0 platforms
+ifndef SCP_BL2
+ $(error "Error: SCP_BL2 image is mandatory for a8k family")
+endif
+
+# MSS (SCP) build
+include $(PLAT_COMMON_BASE)/mss/mss_a8k.mk
+
+# BLE (ROM context execution code, AKA binary extension)
+BLE_PATH ?= ble
+
+include ${BLE_PATH}/ble.mk
+$(eval $(call MAKE_BL,e))
+
+mrvl_flash: ${BUILD_PLAT}/${FIP_NAME} ${DOIMAGETOOL} ${BUILD_PLAT}/ble.bin
+ $(shell truncate -s %128K ${BUILD_PLAT}/bl1.bin)
+ $(shell cat ${BUILD_PLAT}/bl1.bin ${BUILD_PLAT}/${FIP_NAME} > ${BUILD_PLAT}/${BOOT_IMAGE})
+ ${DOIMAGETOOL} ${DOIMAGE_FLAGS} ${BUILD_PLAT}/${BOOT_IMAGE} ${BUILD_PLAT}/${FLASH_IMAGE}
+
diff --git a/plat/marvell/a8k/common/aarch64/a8k_common.c b/plat/marvell/a8k/common/aarch64/a8k_common.c
new file mode 100644
index 0000000..7c2bf31
--- /dev/null
+++ b/plat/marvell/a8k/common/aarch64/a8k_common.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <plat_marvell.h>
+
+
+/* MMU entry for internal (register) space access */
+#define MAP_DEVICE0 MAP_REGION_FLAT(DEVICE0_BASE, \
+ DEVICE0_SIZE, \
+ MT_DEVICE | MT_RW | MT_SECURE)
+
+/*
+ * Table of regions for various BL stages to map using the MMU.
+ */
+#if IMAGE_BL1
+const mmap_region_t plat_marvell_mmap[] = {
+ MARVELL_MAP_SHARED_RAM,
+ MAP_DEVICE0,
+ {0}
+};
+#endif
+#if IMAGE_BL2
+const mmap_region_t plat_marvell_mmap[] = {
+ MARVELL_MAP_SHARED_RAM,
+ MAP_DEVICE0,
+ MARVELL_MAP_DRAM,
+ {0}
+};
+#endif
+
+#if IMAGE_BL2U
+const mmap_region_t plat_marvell_mmap[] = {
+ MAP_DEVICE0,
+ {0}
+};
+#endif
+
+#if IMAGE_BLE
+const mmap_region_t plat_marvell_mmap[] = {
+ MAP_DEVICE0,
+ {0}
+};
+#endif
+
+#if IMAGE_BL31
+const mmap_region_t plat_marvell_mmap[] = {
+ MARVELL_MAP_SHARED_RAM,
+ MAP_DEVICE0,
+ MARVELL_MAP_DRAM,
+ {0}
+};
+#endif
+#if IMAGE_BL32
+const mmap_region_t plat_marvell_mmap[] = {
+ MAP_DEVICE0,
+ {0}
+};
+#endif
+
+MARVELL_CASSERT_MMAP;
diff --git a/plat/marvell/a8k/common/aarch64/plat_arch_config.c b/plat/marvell/a8k/common/aarch64/plat_arch_config.c
new file mode 100644
index 0000000..8667331
--- /dev/null
+++ b/plat/marvell/a8k/common/aarch64/plat_arch_config.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <platform.h>
+#include <arch_helpers.h>
+#include <mmio.h>
+#include <debug.h>
+#include <cache_llc.h>
+
+
+#define CCU_HTC_ASET (MVEBU_CCU_BASE(MVEBU_AP0) + 0x264)
+#define MVEBU_IO_AFFINITY (0xF00)
+
+
+static void plat_enable_affinity(void)
+{
+ int cluster_id;
+ int affinity;
+
+ /* set CPU Affinity */
+ cluster_id = plat_my_core_pos() / PLAT_MARVELL_CLUSTER_CORE_COUNT;
+ affinity = (MVEBU_IO_AFFINITY | (1 << cluster_id));
+ mmio_write_32(CCU_HTC_ASET, affinity);
+
+ /* set barier */
+ isb();
+}
+
+void marvell_psci_arch_init(int die_index)
+{
+#if LLC_ENABLE
+ /* check if LLC is in exclusive mode
+ * as L2 is configured to UniqueClean eviction
+ * (in a8k reset handler)
+ */
+ if (llc_is_exclusive(0) == 0)
+ ERROR("LLC should be configured to exclusice mode\n");
+#endif
+
+ /* Enable Affinity */
+ plat_enable_affinity();
+}
diff --git a/plat/marvell/a8k/common/aarch64/plat_helpers.S b/plat/marvell/a8k/common/aarch64/plat_helpers.S
new file mode 100644
index 0000000..fadc4c2
--- /dev/null
+++ b/plat/marvell/a8k/common/aarch64/plat_helpers.S
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <asm_macros.S>
+#include <platform_def.h>
+#include <marvell_pm.h>
+
+ .globl plat_secondary_cold_boot_setup
+ .globl plat_get_my_entrypoint
+ .globl plat_is_my_cpu_primary
+ .globl plat_reset_handler
+
+ /* -----------------------------------------------------
+ * void plat_secondary_cold_boot_setup (void);
+ *
+ * This function performs any platform specific actions
+ * needed for a secondary cpu after a cold reset. Right
+ * now this is a stub function.
+ * -----------------------------------------------------
+ */
+func plat_secondary_cold_boot_setup
+ mov x0, #0
+ ret
+endfunc plat_secondary_cold_boot_setup
+
+ /* ---------------------------------------------------------------------
+ * unsigned long plat_get_my_entrypoint (void);
+ *
+ * Main job of this routine is to distinguish
+ * between a cold and warm boot
+ * For a cold boot, return 0.
+ * For a warm boot, read the mailbox and return the address it contains.
+ *
+ * ---------------------------------------------------------------------
+ */
+func plat_get_my_entrypoint
+ /* Read first word and compare it with magic num */
+ mov_imm x0, PLAT_MARVELL_MAILBOX_BASE
+ ldr x1, [x0]
+ mov_imm x2, MVEBU_MAILBOX_MAGIC_NUM
+ cmp x1, x2
+ beq warm_boot /* If compare failed, return 0, i.e. cold boot */
+ mov x0, #0
+ ret
+warm_boot:
+ mov_imm x1, MBOX_IDX_SEC_ADDR /* Get the jump address */
+ subs x1, x1, #1
+ mov x2, #(MBOX_IDX_SEC_ADDR * 8)
+ lsl x3, x2, x1
+ add x0, x0, x3
+ ldr x0, [x0]
+ ret
+endfunc plat_get_my_entrypoint
+
+ /* -----------------------------------------------------
+ * unsigned int plat_is_my_cpu_primary (void);
+ *
+ * Find out whether the current cpu is the primary
+ * cpu.
+ * -----------------------------------------------------
+ */
+func plat_is_my_cpu_primary
+ mrs x0, mpidr_el1
+ and x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
+ cmp x0, #MVEBU_PRIMARY_CPU
+ cset w0, eq
+ ret
+endfunc plat_is_my_cpu_primary
+
+ /* -----------------------------------------------------
+ * void plat_reset_handler (void);
+ *
+ * Platform specific configuration right after cpu is
+ * is our of reset.
+ *
+ * The plat_reset_handler can clobber x0 - x18, x30.
+ * -----------------------------------------------------
+ */
+func plat_reset_handler
+ /*
+ * Note: the configurations below should be done before MMU,
+ * I Cache and L2are enabled.
+ * The reset handler is executed right after reset
+ * and before Caches are enabled.
+ */
+
+ /* Enable L1/L2 ECC and Parity */
+ mrs x5, s3_1_c11_c0_2 /* L2 Ctrl */
+ orr x5, x5, #(1 << 21) /* Enable L1/L2 cache ECC & Parity */
+ msr s3_1_c11_c0_2, x5 /* L2 Ctrl */
+
+#if LLC_ENABLE
+ /*
+ * Enable L2 UniqueClean evictions
+ * Note: this configuration assumes that LLC is configured
+ * in exclusive mode.
+ * Later on in the code this assumption will be validated
+ */
+ mrs x5, s3_1_c15_c0_0 /* L2 Ctrl */
+ orr x5, x5, #(1 << 14) /* Enable UniqueClean evictions with data */
+ msr s3_1_c15_c0_0, x5 /* L2 Ctrl */
+#endif
+
+ /* Instruction Barrier to allow msr command completion */
+ isb
+
+ ret
+endfunc plat_reset_handler
diff --git a/plat/marvell/a8k/common/include/a8k_plat_def.h b/plat/marvell/a8k/common/include/a8k_plat_def.h
new file mode 100644
index 0000000..4ed8c7e
--- /dev/null
+++ b/plat/marvell/a8k/common/include/a8k_plat_def.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __A8K_PLAT_DEF_H__
+#define __A8K_PLAT_DEF_H__
+
+#include <marvell_def.h>
+
+#define MVEBU_PRIMARY_CPU 0x0
+#define MVEBU_AP0 0x0
+
+/* APN806 revision ID */
+#define MVEBU_CSS_GWD_CTRL_IIDR2_REG (MVEBU_REGS_BASE + 0x610FCC)
+#define GWD_IIDR2_REV_ID_OFFSET 12
+#define GWD_IIDR2_REV_ID_MASK 0xF
+#define GWD_IIDR2_CHIP_ID_OFFSET 20
+#define GWD_IIDR2_CHIP_ID_MASK (0xFFF << GWD_IIDR2_CHIP_ID_OFFSET)
+
+#define CHIP_ID_AP806 0x806
+#define CHIP_ID_AP807 0x807
+
+#define COUNTER_FREQUENCY 25000000
+
+#define MVEBU_REGS_BASE 0xF0000000
+#define MVEBU_REGS_BASE_MASK 0xF0000000
+#define MVEBU_REGS_BASE_AP(ap) MVEBU_REGS_BASE
+#define MVEBU_CP_REGS_BASE(cp_index) (0xF2000000 + (cp_index) * 0x2000000)
+#define MVEBU_RFU_BASE (MVEBU_REGS_BASE + 0x6F0000)
+#define MVEBU_IO_WIN_BASE(ap_index) (MVEBU_RFU_BASE)
+#define MVEBU_IO_WIN_GCR_OFFSET (0x70)
+#define MVEBU_IO_WIN_MAX_WINS (7)
+
+/* Misc SoC configurations Base */
+#define MVEBU_MISC_SOC_BASE (MVEBU_REGS_BASE + 0x6F4300)
+
+#define MVEBU_CCU_BASE(ap_index) (MVEBU_REGS_BASE + 0x4000)
+#define MVEBU_CCU_MAX_WINS (8)
+
+#define MVEBU_LLC_BASE(ap_index) (MVEBU_REGS_BASE + 0x8000)
+#define MVEBU_DRAM_MAC_BASE (MVEBU_REGS_BASE + 0x20000)
+#define MVEBU_DRAM_PHY_BASE (MVEBU_REGS_BASE + 0x20000)
+#define MVEBU_SMMU_BASE (MVEBU_REGS_BASE + 0x100000)
+#define MVEBU_CP_MPP_REGS(cp_index, n) (MVEBU_CP_REGS_BASE(cp_index) + \
+ 0x440000 + ((n) << 2))
+#define MVEBU_PM_MPP_REGS(cp_index, n) (MVEBU_CP_REGS_BASE(cp_index) + \
+ 0x440000 + ((n / 8) << 2))
+#define MVEBU_CP_GPIO_DATA_OUT(cp_index, n) \
+ (MVEBU_CP_REGS_BASE(cp_index) + \
+ 0x440100 + ((n > 32) ? 0x40 : 0x00))
+#define MVEBU_CP_GPIO_DATA_OUT_EN(cp_index, n) \
+ (MVEBU_CP_REGS_BASE(cp_index) + \
+ 0x440104 + ((n > 32) ? 0x40 : 0x00))
+#define MVEBU_CP_GPIO_DATA_IN(cp_index, n) (MVEBU_CP_REGS_BASE(cp_index) + \
+ 0x440110 + ((n > 32) ? 0x40 : 0x00))
+#define MVEBU_AP_MPP_REGS(n) (MVEBU_RFU_BASE + 0x4000 + ((n) << 2))
+#define MVEBU_AP_GPIO_REGS (MVEBU_RFU_BASE + 0x5040)
+#define MVEBU_AP_GPIO_DATA_IN (MVEBU_AP_GPIO_REGS + 0x10)
+#define MVEBU_AP_I2C_BASE (MVEBU_REGS_BASE + 0x511000)
+#define MVEBU_CP0_I2C_BASE (MVEBU_CP_REGS_BASE(0) + 0x701000)
+#define MVEBU_AP_EXT_TSEN_BASE (MVEBU_RFU_BASE + 0x8084)
+
+#define MVEBU_AP_MC_TRUSTZONE_REG_LOW(ap, win) (MVEBU_REGS_BASE_AP(ap) + \
+ 0x20080 + ((win) * 0x8))
+#define MVEBU_AP_MC_TRUSTZONE_REG_HIGH(ap, win) (MVEBU_REGS_BASE_AP(ap) + \
+ 0x20084 + ((win) * 0x8))
+
+/* MCI indirect access definitions */
+#define MCI_MAX_UNIT_ID 2
+/* SoC RFU / IHBx4 Control */
+#define MCIX4_REG_START_ADDRESS_REG(unit_id) (MVEBU_RFU_BASE + \
+ 0x4218 + (unit_id * 0x20))
+#define MCI_REMAP_OFF_SHIFT 8
+
+#define MVEBU_MCI_REG_BASE_REMAP(index) (0xFD000000 + \
+ ((index) * 0x1000000))
+
+#define MVEBU_PCIE_X4_MAC_BASE(x) (MVEBU_CP_REGS_BASE(x) + 0x600000)
+#define MVEBU_COMPHY_BASE(x) (MVEBU_CP_REGS_BASE(x) + 0x441000)
+#define MVEBU_HPIPE_BASE(x) (MVEBU_CP_REGS_BASE(x) + 0x120000)
+#define MVEBU_CP_DFX_OFFSET (0x400200)
+
+/*****************************************************************************
+ * MVEBU memory map related constants
+ *****************************************************************************
+ */
+/* Aggregate of all devices in the first GB */
+#define DEVICE0_BASE MVEBU_REGS_BASE
+#define DEVICE0_SIZE 0x10000000
+
+/*****************************************************************************
+ * GIC-400 & interrupt handling related constants
+ *****************************************************************************
+ */
+/* Base MVEBU compatible GIC memory map */
+#define MVEBU_GICD_BASE 0x210000
+#define MVEBU_GICC_BASE 0x220000
+
+
+/*****************************************************************************
+ * AXI Configuration
+ *****************************************************************************
+ */
+#define MVEBU_AXI_ATTR_ARCACHE_OFFSET 4
+#define MVEBU_AXI_ATTR_ARCACHE_MASK (0xF << \
+ MVEBU_AXI_ATTR_ARCACHE_OFFSET)
+#define MVEBU_AXI_ATTR_ARDOMAIN_OFFSET 12
+#define MVEBU_AXI_ATTR_ARDOMAIN_MASK (0x3 << \
+ MVEBU_AXI_ATTR_ARDOMAIN_OFFSET)
+#define MVEBU_AXI_ATTR_AWCACHE_OFFSET 20
+#define MVEBU_AXI_ATTR_AWCACHE_MASK (0xF << \
+ MVEBU_AXI_ATTR_AWCACHE_OFFSET)
+#define MVEBU_AXI_ATTR_AWDOMAIN_OFFSET 28
+#define MVEBU_AXI_ATTR_AWDOMAIN_MASK (0x3 << \
+ MVEBU_AXI_ATTR_AWDOMAIN_OFFSET)
+
+/* SATA MBUS to AXI configuration */
+#define MVEBU_SATA_M2A_AXI_ARCACHE_OFFSET 1
+#define MVEBU_SATA_M2A_AXI_ARCACHE_MASK (0xF << \
+ MVEBU_SATA_M2A_AXI_ARCACHE_OFFSET)
+#define MVEBU_SATA_M2A_AXI_AWCACHE_OFFSET 5
+#define MVEBU_SATA_M2A_AXI_AWCACHE_MASK (0xF << \
+ MVEBU_SATA_M2A_AXI_AWCACHE_OFFSET)
+
+/* ARM cache attributes */
+#define CACHE_ATTR_BUFFERABLE 0x1
+#define CACHE_ATTR_CACHEABLE 0x2
+#define CACHE_ATTR_READ_ALLOC 0x4
+#define CACHE_ATTR_WRITE_ALLOC 0x8
+/* Domain */
+#define DOMAIN_NON_SHAREABLE 0x0
+#define DOMAIN_INNER_SHAREABLE 0x1
+#define DOMAIN_OUTER_SHAREABLE 0x2
+#define DOMAIN_SYSTEM_SHAREABLE 0x3
+
+/************************************************************************
+ * Required platform porting definitions common to all
+ * Management Compute SubSystems (MSS)
+ ************************************************************************
+ */
+/*
+ * Load address of SCP_BL2
+ * SCP_BL2 is loaded to the same place as BL31.
+ * Once SCP_BL2 is transferred to the SCP,
+ * it is discarded and BL31 is loaded over the top.
+ */
+#ifdef SCP_IMAGE
+#define SCP_BL2_BASE BL31_BASE
+#endif
+
+#ifndef __ASSEMBLER__
+enum ap806_sar_target_dev {
+ SAR_PIDI_MCIX2 = 0x0,
+ SAR_MCIX4 = 0x1,
+ SAR_SPI = 0x2,
+ SAR_SD = 0x3,
+ SAR_PIDI_MCIX2_BD = 0x4, /* BootRom disabled */
+ SAR_MCIX4_DB = 0x5, /* BootRom disabled */
+ SAR_SPI_DB = 0x6, /* BootRom disabled */
+ SAR_EMMC = 0x7
+};
+
+enum io_win_target_ids {
+ MCI_0_TID = 0x0,
+ MCI_1_TID = 0x1,
+ MCI_2_TID = 0x2,
+ PIDI_TID = 0x3,
+ SPI_TID = 0x4,
+ STM_TID = 0x5,
+ BOOTROM_TID = 0x6,
+ IO_WIN_MAX_TID
+};
+
+enum ccu_target_ids {
+ IO_0_TID = 0x00,
+ DRAM_0_TID = 0x03,
+ IO_1_TID = 0x0F,
+ CFG_REG_TID = 0x10,
+ RAR_TID = 0x20,
+ SRAM_TID = 0x40,
+ DRAM_1_TID = 0xC0,
+ CCU_MAX_TID,
+ INVALID_TID = 0xFF
+};
+#endif /* __ASSEMBLER__ */
+
+#endif /* __A8K_PLAT_DEF_H__ */
diff --git a/plat/marvell/a8k/common/include/ddr_info.h b/plat/marvell/a8k/common/include/ddr_info.h
new file mode 100644
index 0000000..e19036a
--- /dev/null
+++ b/plat/marvell/a8k/common/include/ddr_info.h
@@ -0,0 +1,9 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#define DRAM_MAX_IFACE 1
+#define DRAM_CH0_MMAP_LOW_OFFSET 0x20200
diff --git a/plat/marvell/a8k/common/include/plat_macros.S b/plat/marvell/a8k/common/include/plat_macros.S
new file mode 100644
index 0000000..2a6ccf2
--- /dev/null
+++ b/plat/marvell/a8k/common/include/plat_macros.S
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __PLAT_MACROS_S__
+#define __PLAT_MACROS_S__
+
+#include <marvell_macros.S>
+
+/*
+ * Required platform porting macros
+ * (Provided by included headers)
+ */
+.macro plat_crash_print_regs
+.endm
+
+#endif /* __PLAT_MACROS_S__ */
diff --git a/plat/marvell/a8k/common/include/platform_def.h b/plat/marvell/a8k/common/include/platform_def.h
new file mode 100644
index 0000000..f7bd23f
--- /dev/null
+++ b/plat/marvell/a8k/common/include/platform_def.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <board_marvell_def.h>
+#include <gic_common.h>
+#include <interrupt_props.h>
+#include <mvebu_def.h>
+#ifndef __ASSEMBLY__
+#include <stdio.h>
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Most platform porting definitions provided by included headers
+ */
+
+/*
+ * DRAM Memory layout:
+ * +-----------------------+
+ * : :
+ * : Linux :
+ * 0x04X00000-->+-----------------------+
+ * | BL3-3(u-boot) |>>}>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
+ * |-----------------------| } |
+ * | BL3-[0,1, 2] | }---------------------------------> |
+ * |-----------------------| } || |
+ * | BL2 | }->FIP (loaded by || |
+ * |-----------------------| } BootROM to DRAM) || |
+ * | FIP_TOC | } || |
+ * 0x04120000-->|-----------------------| || |
+ * | BL1 (RO) | || |
+ * 0x04100000-->+-----------------------+ || |
+ * : : || |
+ * : Trusted SRAM section : \/ |
+ * 0x04040000-->+-----------------------+ Replaced by BL2 +----------------+ |
+ * | BL1 (RW) | <<<<<<<<<<<<<<<< | BL3-1 NOBITS | |
+ * 0x04037000-->|-----------------------| <<<<<<<<<<<<<<<< |----------------| |
+ * | | <<<<<<<<<<<<<<<< | BL3-1 PROGBITS | |
+ * 0x04023000-->|-----------------------| +----------------+ |
+ * | BL2 | |
+ * |-----------------------| |
+ * | | |
+ * 0x04001000-->|-----------------------| |
+ * | Shared | |
+ * 0x04000000-->+-----------------------+ |
+ * : : |
+ * : Linux : |
+ * : : |
+ * |-----------------------| |
+ * | | U-Boot(BL3-3) Loaded by BL2 |
+ * | U-Boot | <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
+ * 0x00000000-->+-----------------------+
+ *
+ * Trusted SRAM section 0x4000000..0x4200000:
+ * ----------------------------------------
+ * SRAM_BASE = 0x4001000
+ * BL2_BASE = 0x4006000
+ * BL2_LIMIT = BL31_BASE
+ * BL31_BASE = 0x4023000 = (64MB + 256KB - 0x1D000)
+ * BL31_PROGBITS_LIMIT = BL1_RW_BASE
+ * BL1_RW_BASE = 0x4037000 = (64MB + 256KB - 0x9000)
+ * BL1_RW_LIMIT = BL31_LIMIT = 0x4040000
+ *
+ *
+ * PLAT_MARVELL_FIP_BASE = 0x4120000
+ */
+
+/*
+ * Since BL33 is loaded by BL2 (and validated by BL31) to DRAM offset 0,
+ * it is allowed to load/copy images to 'NULL' pointers
+ */
+#if defined(IMAGE_BL2) || defined(IMAGE_BL31)
+#define PLAT_ALLOW_ZERO_ADDR_COPY
+#endif
+
+#define PLAT_MARVELL_SRAM_BASE 0xFFE1C048
+#define PLAT_MARVELL_SRAM_END 0xFFE78000
+
+#define PLAT_MARVELL_ATF_BASE 0x4000000
+#define PLAT_MARVELL_ATF_LOAD_ADDR (PLAT_MARVELL_ATF_BASE + \
+ 0x100000)
+
+#define PLAT_MARVELL_FIP_BASE (PLAT_MARVELL_ATF_LOAD_ADDR + \
+ 0x20000)
+#define PLAT_MARVELL_FIP_MAX_SIZE 0x4000000
+
+#define PLAT_MARVELL_NORTHB_COUNT 1
+
+#define PLAT_MARVELL_CLUSTER_COUNT 2
+#define PLAT_MARVELL_CLUSTER_CORE_COUNT 2
+
+#define PLAT_MARVELL_CORE_COUNT (PLAT_MARVELL_CLUSTER_COUNT * \
+ PLAT_MARVELL_CLUSTER_CORE_COUNT)
+
+/* DRAM[2MB..66MB] is used as Trusted ROM */
+#define PLAT_MARVELL_TRUSTED_ROM_BASE PLAT_MARVELL_ATF_LOAD_ADDR
+/* 64 MB TODO: reduce this to minimum needed according to fip image size */
+#define PLAT_MARVELL_TRUSTED_ROM_SIZE 0x04000000
+/* Reserve 16M for SCP (Secure PayLoad) Trusted DRAM */
+#define PLAT_MARVELL_TRUSTED_DRAM_BASE 0x04400000
+#define PLAT_MARVELL_TRUSTED_DRAM_SIZE 0x01000000 /* 16 MB */
+
+/*
+ * PLAT_ARM_MAX_BL1_RW_SIZE is calculated using the current BL1 RW debug size
+ * plus a little space for growth.
+ */
+#define PLAT_MARVELL_MAX_BL1_RW_SIZE 0xA000
+
+/*
+ * PLAT_ARM_MAX_BL2_SIZE is calculated using the current BL2 debug size plus a
+ * little space for growth.
+ */
+#define PLAT_MARVELL_MAX_BL2_SIZE 0xF000
+
+/*
+ * PLAT_ARM_MAX_BL31_SIZE is calculated using the current BL31 debug size plus a
+ * little space for growth.
+ */
+#define PLAT_MARVEL_MAX_BL31_SIZE 0x5D000
+
+#define PLAT_MARVELL_CPU_ENTRY_ADDR BL1_RO_BASE
+
+/* GIC related definitions */
+#define PLAT_MARVELL_GICD_BASE (MVEBU_REGS_BASE + MVEBU_GICD_BASE)
+#define PLAT_MARVELL_GICC_BASE (MVEBU_REGS_BASE + MVEBU_GICC_BASE)
+
+#define PLAT_MARVELL_G0_IRQ_PROPS(grp) \
+ INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_0, GIC_HIGHEST_SEC_PRIORITY, grp, \
+ GIC_INTR_CFG_LEVEL), \
+ INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, grp, \
+ GIC_INTR_CFG_LEVEL)
+
+#define PLAT_MARVELL_G1S_IRQ_PROPS(grp) \
+ INTR_PROP_DESC(MARVELL_IRQ_SEC_PHY_TIMER, GIC_HIGHEST_SEC_PRIORITY, \
+ grp, GIC_INTR_CFG_LEVEL), \
+ INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_1, GIC_HIGHEST_SEC_PRIORITY, grp, \
+ GIC_INTR_CFG_LEVEL), \
+ INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_2, GIC_HIGHEST_SEC_PRIORITY, grp, \
+ GIC_INTR_CFG_LEVEL), \
+ INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_3, GIC_HIGHEST_SEC_PRIORITY, grp, \
+ GIC_INTR_CFG_LEVEL), \
+ INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_4, GIC_HIGHEST_SEC_PRIORITY, grp, \
+ GIC_INTR_CFG_LEVEL), \
+ INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_5, GIC_HIGHEST_SEC_PRIORITY, grp, \
+ GIC_INTR_CFG_LEVEL), \
+ INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_7, GIC_HIGHEST_SEC_PRIORITY, grp, \
+ GIC_INTR_CFG_LEVEL)
+
+#define PLAT_MARVELL_SHARED_RAM_CACHED 1
+
+/*
+ * Load address of BL3-3 for this platform port
+ */
+#define PLAT_MARVELL_NS_IMAGE_OFFSET 0x0
+
+/* System Reference Clock*/
+#define PLAT_REF_CLK_IN_HZ COUNTER_FREQUENCY
+
+/*
+ * PL011 related constants
+ */
+#define PLAT_MARVELL_BOOT_UART_BASE (MVEBU_REGS_BASE + 0x512000)
+#define PLAT_MARVELL_BOOT_UART_CLK_IN_HZ 200000000
+
+#define PLAT_MARVELL_CRASH_UART_BASE PLAT_MARVELL_BOOT_UART_BASE
+#define PLAT_MARVELL_CRASH_UART_CLK_IN_HZ PLAT_MARVELL_BOOT_UART_CLK_IN_HZ
+
+#define PLAT_MARVELL_BL31_RUN_UART_BASE PLAT_MARVELL_BOOT_UART_BASE
+#define PLAT_MARVELL_BL31_RUN_UART_CLK_IN_HZ PLAT_MARVELL_BOOT_UART_CLK_IN_HZ
+
+/* Recovery image enable */
+#define PLAT_RECOVERY_IMAGE_ENABLE 0
+
+/* Required platform porting definitions */
+#define PLAT_MAX_PWR_LVL MPIDR_AFFLVL1
+
+/* System timer related constants */
+#define PLAT_MARVELL_NSTIMER_FRAME_ID 1
+
+/* Mailbox base address (note the lower memory space
+ * is reserved for BLE data)
+ */
+#define PLAT_MARVELL_MAILBOX_BASE (MARVELL_TRUSTED_SRAM_BASE \
+ + 0x400)
+#define PLAT_MARVELL_MAILBOX_SIZE 0x100
+#define PLAT_MARVELL_MAILBOX_MAGIC_NUM 0x6D72766C /* mrvl */
+
+/* Securities */
+#define IRQ_SEC_OS_TICK_INT MARVELL_IRQ_SEC_PHY_TIMER
+
+#define TRUSTED_DRAM_BASE PLAT_MARVELL_TRUSTED_DRAM_BASE
+#define TRUSTED_DRAM_SIZE PLAT_MARVELL_TRUSTED_DRAM_SIZE
+
+#define BL32_BASE TRUSTED_DRAM_BASE
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/marvell/a8k/common/mss/mss_a8k.mk b/plat/marvell/a8k/common/mss/mss_a8k.mk
new file mode 100644
index 0000000..58f23d8
--- /dev/null
+++ b/plat/marvell/a8k/common/mss/mss_a8k.mk
@@ -0,0 +1,20 @@
+#
+# Copyright (C) 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# https://spdx.org/licenses
+#
+
+PLAT_MARVELL := plat/marvell
+A8K_MSS_SOURCE := $(PLAT_MARVELL)/a8k/common/mss
+
+BL2_SOURCES += $(A8K_MSS_SOURCE)/mss_bl2_setup.c
+
+BL31_SOURCES += $(A8K_MSS_SOURCE)/mss_pm_ipc.c
+
+PLAT_INCLUDES += -I$(A8K_MSS_SOURCE)
+
+ifneq (${SCP_BL2},)
+# This define is used to inidcate the SCP image is present
+$(eval $(call add_define,SCP_IMAGE))
+endif
diff --git a/plat/marvell/a8k/common/mss/mss_bl2_setup.c b/plat/marvell/a8k/common/mss/mss_bl2_setup.c
new file mode 100644
index 0000000..6688551
--- /dev/null
+++ b/plat/marvell/a8k/common/mss/mss_bl2_setup.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <a8k_common.h>
+#include <bl_common.h>
+#include <ccu.h>
+#include <cp110_setup.h>
+#include <debug.h>
+#include <marvell_plat_priv.h> /* timer functionality */
+#include <mmio.h>
+#include <platform_def.h>
+
+#include "mss_scp_bootloader.h"
+
+/* IO windows configuration */
+#define IOW_GCR_OFFSET (0x70)
+
+/* MSS windows configuration */
+#define MSS_AEBR(base) (base + 0x160)
+#define MSS_AIBR(base) (base + 0x164)
+#define MSS_AEBR_MASK 0xFFF
+#define MSS_AIBR_MASK 0xFFF
+
+#define MSS_EXTERNAL_SPACE 0x50000000
+#define MSS_EXTERNAL_ACCESS_BIT 28
+#define MSS_EXTERNAL_ADDR_MASK 0xfffffff
+#define MSS_INTERNAL_ACCESS_BIT 28
+
+struct addr_map_win ccu_mem_map[] = {
+ {MVEBU_CP_REGS_BASE(0), 0x4000000, IO_0_TID}
+};
+
+/* Since the scp_bl2 image can contain firmware for cp1 and cp0 coprocessors,
+ * the access to cp0 and cp1 need to be provided. More precisely it is
+ * required to:
+ * - get the information about device id which is stored in CP0 registers
+ * (to distinguish between cases where we have cp0 and cp1 or standalone cp0)
+ * - get the access to cp which is needed for loading fw for cp0/cp1
+ * coprocessors
+ * This function configures ccu windows accordingly.
+ *
+ * Note: there is no need to restore previous ccu configuration, since in next
+ * phase (BL31) the init_ccu will be called (via apn806_init/
+ * bl31_plat_arch_setu) and therefore the ccu configuration will be overwritten.
+ */
+static int bl2_plat_mmap_init(void)
+{
+ int cfg_num, win_id, cfg_idx;
+
+ cfg_num = ARRAY_SIZE(ccu_mem_map);
+
+ /* CCU window-0 should not be counted - it's already used */
+ if (cfg_num > (MVEBU_CCU_MAX_WINS - 1)) {
+ ERROR("BL2: %s: trying to open too many windows\n", __func__);
+ return -1;
+ }
+
+ /* Enable required CCU windows
+ * Do not touch CCU window 0,
+ * it's used for the internal registers access
+ */
+ for (cfg_idx = 0, win_id = 1; cfg_idx < cfg_num; cfg_idx++, win_id++) {
+ /* Enable required CCU windows */
+ ccu_win_check(&ccu_mem_map[cfg_idx]);
+ ccu_enable_win(MVEBU_AP0, &ccu_mem_map[cfg_idx], win_id);
+ }
+
+ /* Set the default target id to PIDI */
+ mmio_write_32(MVEBU_IO_WIN_BASE(MVEBU_AP0) + IOW_GCR_OFFSET, PIDI_TID);
+
+ return 0;
+}
+
+/*****************************************************************************
+ * Transfer SCP_BL2 from Trusted RAM using the SCP Download protocol.
+ * Return 0 on success, -1 otherwise.
+ *****************************************************************************
+ */
+int bl2_plat_handle_scp_bl2(image_info_t *scp_bl2_image_info)
+{
+ int ret;
+
+ INFO("BL2: Initiating SCP_BL2 transfer to SCP\n");
+ printf("BL2: Initiating SCP_BL2 transfer to SCP\n");
+
+ /* initialize time (for delay functionality) */
+ plat_delay_timer_init();
+
+ ret = bl2_plat_mmap_init();
+ if (ret != 0)
+ return ret;
+
+ ret = scp_bootloader_transfer((void *)scp_bl2_image_info->image_base,
+ scp_bl2_image_info->image_size);
+
+ if (ret == 0)
+ INFO("BL2: SCP_BL2 transferred to SCP\n");
+ else
+ ERROR("BL2: SCP_BL2 transfer failure\n");
+
+ return ret;
+}
+
+uintptr_t bl2_plat_get_cp_mss_regs(int ap_idx, int cp_idx)
+{
+ return MVEBU_CP_REGS_BASE(cp_idx) + 0x280000;
+}
+
+uintptr_t bl2_plat_get_ap_mss_regs(int ap_idx)
+{
+ return MVEBU_REGS_BASE + 0x580000;
+}
+
+uint32_t bl2_plat_get_cp_count(int ap_idx)
+{
+ uint32_t revision = cp110_device_id_get(MVEBU_CP_REGS_BASE(0));
+ /* A8040: two CPs.
+ * A7040: one CP.
+ */
+ if (revision == MVEBU_80X0_DEV_ID ||
+ revision == MVEBU_80X0_CP115_DEV_ID)
+ return 2;
+ else
+ return 1;
+}
+
+uint32_t bl2_plat_get_ap_count(void)
+{
+ /* A8040 and A7040 have only one AP */
+ return 1;
+}
+
+void bl2_plat_configure_mss_windows(uintptr_t mss_regs)
+{
+ /* set AXI External and Internal Address Bus extension */
+ mmio_write_32(MSS_AEBR(mss_regs),
+ ((0x0 >> MSS_EXTERNAL_ACCESS_BIT) & MSS_AEBR_MASK));
+ mmio_write_32(MSS_AIBR(mss_regs),
+ ((mss_regs >> MSS_INTERNAL_ACCESS_BIT) & MSS_AIBR_MASK));
+}
diff --git a/plat/marvell/a8k/common/mss/mss_pm_ipc.c b/plat/marvell/a8k/common/mss/mss_pm_ipc.c
new file mode 100644
index 0000000..6ff4abc
--- /dev/null
+++ b/plat/marvell/a8k/common/mss/mss_pm_ipc.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <debug.h>
+#include <mmio.h>
+#include <psci.h>
+#include <string.h>
+
+#include <mss_pm_ipc.h>
+
+/*
+ * SISR is 32 bit interrupt register representing 32 interrupts
+ *
+ * +======+=============+=============+
+ * + Bits + 31 + 30 - 00 +
+ * +======+=============+=============+
+ * + Desc + MSS Msg Int + Reserved +
+ * +======+=============+=============+
+ */
+#define MSS_SISR (MVEBU_REGS_BASE + 0x5800D0)
+#define MSS_SISTR (MVEBU_REGS_BASE + 0x5800D8)
+
+#define MSS_MSG_INT_MASK (0x80000000)
+#define MSS_TIMER_BASE (MVEBU_REGS_BASE_MASK + 0x580110)
+#define MSS_TRIGGER_TIMEOUT (1000)
+
+/*****************************************************************************
+ * mss_pm_ipc_msg_send
+ *
+ * DESCRIPTION: create and transmit IPC message
+ *****************************************************************************
+ */
+int mss_pm_ipc_msg_send(unsigned int channel_id, unsigned int msg_id,
+ const psci_power_state_t *target_state)
+{
+ /* Transmit IPC message */
+#ifndef DISABLE_CLUSTER_LEVEL
+ mv_pm_ipc_msg_tx(channel_id, msg_id,
+ (unsigned int)target_state->pwr_domain_state[
+ MPIDR_AFFLVL1]);
+#else
+ mv_pm_ipc_msg_tx(channel_id, msg_id, 0);
+#endif
+
+ return 0;
+}
+
+/*****************************************************************************
+ * mss_pm_ipc_msg_trigger
+ *
+ * DESCRIPTION: Trigger IPC message interrupt to MSS
+ *****************************************************************************
+ */
+int mss_pm_ipc_msg_trigger(void)
+{
+ unsigned int timeout;
+ unsigned int t_end;
+ unsigned int t_start = mmio_read_32(MSS_TIMER_BASE);
+
+ mmio_write_32(MSS_SISR, MSS_MSG_INT_MASK);
+
+ do {
+ /* wait while SCP process incoming interrupt */
+ if (mmio_read_32(MSS_SISTR) != MSS_MSG_INT_MASK)
+ break;
+
+ /* check timeout */
+ t_end = mmio_read_32(MSS_TIMER_BASE);
+
+ timeout = ((t_start > t_end) ?
+ (t_start - t_end) : (t_end - t_start));
+ if (timeout > MSS_TRIGGER_TIMEOUT) {
+ ERROR("PM MSG Trigger Timeout\n");
+ break;
+ }
+
+ } while (1);
+
+ return 0;
+}
diff --git a/plat/marvell/a8k/common/mss/mss_pm_ipc.h b/plat/marvell/a8k/common/mss/mss_pm_ipc.h
new file mode 100644
index 0000000..0f69457
--- /dev/null
+++ b/plat/marvell/a8k/common/mss/mss_pm_ipc.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __MSS_PM_IPC_H
+#define __MSS_PM_IPC_H
+
+#include <mss_ipc_drv.h>
+
+/* Currently MSS does not support Cluster level Power Down */
+#define DISABLE_CLUSTER_LEVEL
+
+
+/*****************************************************************************
+ * mss_pm_ipc_msg_send
+ *
+ * DESCRIPTION: create and transmit IPC message
+ *****************************************************************************
+ */
+int mss_pm_ipc_msg_send(unsigned int channel_id, unsigned int msg_id,
+ const psci_power_state_t *target_state);
+
+/*****************************************************************************
+ * mss_pm_ipc_msg_trigger
+ *
+ * DESCRIPTION: Trigger IPC message interrupt to MSS
+ *****************************************************************************
+ */
+int mss_pm_ipc_msg_trigger(void);
+
+
+#endif /* __MSS_PM_IPC_H */
diff --git a/plat/marvell/a8k/common/plat_bl1_setup.c b/plat/marvell/a8k/common/plat_bl1_setup.c
new file mode 100644
index 0000000..5d85102
--- /dev/null
+++ b/plat/marvell/a8k/common/plat_bl1_setup.c
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <mmio.h>
+#include <plat_marvell.h>
+
+void marvell_bl1_setup_mpps(void)
+{
+ /* Enable UART MPPs.
+ ** In a normal system, this is done by Bootrom.
+ */
+ mmio_write_32(MVEBU_AP_MPP_REGS(1), 0x3000);
+ mmio_write_32(MVEBU_AP_MPP_REGS(2), 0x3000);
+}
diff --git a/plat/marvell/a8k/common/plat_bl31_setup.c b/plat/marvell/a8k/common/plat_bl31_setup.c
new file mode 100644
index 0000000..6c85fcc
--- /dev/null
+++ b/plat/marvell/a8k/common/plat_bl31_setup.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <a8k_common.h>
+#include <ap_setup.h>
+#include <cp110_setup.h>
+#include <debug.h>
+#include <marvell_plat_priv.h>
+#include <marvell_pm.h>
+#include <mmio.h>
+#include <mci.h>
+#include <plat_marvell.h>
+
+#include <mss_ipc_drv.h>
+#include <mss_mem.h>
+
+/* In Armada-8k family AP806/AP807, CP0 connected to PIDI
+ * and CP1 connected to IHB via MCI #0
+ */
+#define MVEBU_MCI0 0
+
+static _Bool pm_fw_running;
+
+/* Set a weak stub for platforms that don't need to configure GPIO */
+#pragma weak marvell_gpio_config
+int marvell_gpio_config(void)
+{
+ return 0;
+}
+
+static void marvell_bl31_mpp_init(int cp)
+{
+ uint32_t reg;
+
+ /* need to do for CP#0 only */
+ if (cp)
+ return;
+
+
+ /*
+ * Enable CP0 I2C MPPs (MPP: 37-38)
+ * U-Boot rely on proper MPP settings for I2C EEPROM usage
+ * (only for CP0)
+ */
+ reg = mmio_read_32(MVEBU_CP_MPP_REGS(0, 4));
+ mmio_write_32(MVEBU_CP_MPP_REGS(0, 4), reg | 0x2200000);
+}
+
+void marvell_bl31_mss_init(void)
+{
+ struct mss_pm_ctrl_block *mss_pm_crtl =
+ (struct mss_pm_ctrl_block *)MSS_SRAM_PM_CONTROL_BASE;
+
+ /* Check that the image was loaded successfully */
+ if (mss_pm_crtl->handshake != HOST_ACKNOWLEDGMENT) {
+ NOTICE("MSS PM is not supported in this build\n");
+ return;
+ }
+
+ /* If we got here it means that the PM firmware is running */
+ pm_fw_running = 1;
+
+ INFO("MSS IPC init\n");
+
+ if (mss_pm_crtl->ipc_state == IPC_INITIALIZED)
+ mv_pm_ipc_init(mss_pm_crtl->ipc_base_address | MVEBU_REGS_BASE);
+}
+
+_Bool is_pm_fw_running(void)
+{
+ return pm_fw_running;
+}
+
+/* This function overruns the same function in marvell_bl31_setup.c */
+void bl31_plat_arch_setup(void)
+{
+ int cp;
+ uintptr_t *mailbox = (void *)PLAT_MARVELL_MAILBOX_BASE;
+
+ /* initialize the timer for mdelay/udelay functionality */
+ plat_delay_timer_init();
+
+ /* configure apn806 */
+ ap_init();
+
+ /* In marvell_bl31_plat_arch_setup, el3 mmu is configured.
+ * el3 mmu configuration MUST be called after apn806_init, if not,
+ * this will cause an hang in init_io_win
+ * (after setting the IO windows GCR values).
+ */
+ if (mailbox[MBOX_IDX_MAGIC] != MVEBU_MAILBOX_MAGIC_NUM ||
+ mailbox[MBOX_IDX_SUSPEND_MAGIC] != MVEBU_MAILBOX_SUSPEND_STATE)
+ marvell_bl31_plat_arch_setup();
+
+ for (cp = 0; cp < CP_COUNT; cp++) {
+ /* configure cp110 for CP0*/
+ if (cp == 1)
+ mci_initialize(MVEBU_MCI0);
+
+ /* initialize MCI & CP1 */
+ cp110_init(MVEBU_CP_REGS_BASE(cp),
+ STREAM_ID_BASE + (cp * MAX_STREAM_ID_PER_CP));
+
+ /* Should be called only after setting IOB windows */
+ marvell_bl31_mpp_init(cp);
+ }
+
+ /* initialize IPC between MSS and ATF */
+ if (mailbox[MBOX_IDX_MAGIC] != MVEBU_MAILBOX_MAGIC_NUM ||
+ mailbox[MBOX_IDX_SUSPEND_MAGIC] != MVEBU_MAILBOX_SUSPEND_STATE)
+ marvell_bl31_mss_init();
+
+ /* Configure GPIO */
+ marvell_gpio_config();
+}
diff --git a/plat/marvell/a8k/common/plat_ble_setup.c b/plat/marvell/a8k/common/plat_ble_setup.c
new file mode 100644
index 0000000..0cd62cb
--- /dev/null
+++ b/plat/marvell/a8k/common/plat_ble_setup.c
@@ -0,0 +1,570 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <a8k_common.h>
+#include <ap_setup.h>
+#include <aro.h>
+#include <ccu.h>
+#include <cp110_setup.h>
+#include <debug.h>
+#include <io_win.h>
+#include <mv_ddr_if.h>
+#include <mvebu_def.h>
+#include <plat_marvell.h>
+
+/* Register for skip image use */
+#define SCRATCH_PAD_REG2 0xF06F00A8
+#define SCRATCH_PAD_SKIP_VAL 0x01
+#define NUM_OF_GPIO_PER_REG 32
+
+#define MMAP_SAVE_AND_CONFIG 0
+#define MMAP_RESTORE_SAVED 1
+
+/* SAR clock settings */
+#define MVEBU_AP_GEN_MGMT_BASE (MVEBU_RFU_BASE + 0x8000)
+#define MVEBU_AP_SAR_REG_BASE(r) (MVEBU_AP_GEN_MGMT_BASE + 0x200 +\
+ ((r) << 2))
+
+#define SAR_CLOCK_FREQ_MODE_OFFSET (0)
+#define SAR_CLOCK_FREQ_MODE_MASK (0x1f << SAR_CLOCK_FREQ_MODE_OFFSET)
+#define SAR_PIDI_LOW_SPEED_OFFSET (20)
+#define SAR_PIDI_LOW_SPEED_MASK (1 << SAR_PIDI_LOW_SPEED_OFFSET)
+#define SAR_PIDI_LOW_SPEED_SHIFT (15)
+#define SAR_PIDI_LOW_SPEED_SET (1 << SAR_PIDI_LOW_SPEED_SHIFT)
+
+#define FREQ_MODE_AP_SAR_REG_NUM (0)
+#define SAR_CLOCK_FREQ_MODE(v) (((v) & SAR_CLOCK_FREQ_MODE_MASK) >> \
+ SAR_CLOCK_FREQ_MODE_OFFSET)
+
+#define AVS_EN_CTRL_REG (MVEBU_AP_GEN_MGMT_BASE + 0x130)
+#define AVS_ENABLE_OFFSET (0)
+#define AVS_SOFT_RESET_OFFSET (2)
+#define AVS_LOW_VDD_LIMIT_OFFSET (4)
+#define AVS_HIGH_VDD_LIMIT_OFFSET (12)
+#define AVS_TARGET_DELTA_OFFSET (21)
+#define AVS_VDD_LOW_LIMIT_MASK (0xFF << AVS_LOW_VDD_LIMIT_OFFSET)
+#define AVS_VDD_HIGH_LIMIT_MASK (0xFF << AVS_HIGH_VDD_LIMIT_OFFSET)
+/* VDD limit is 0.9V for A70x0 @ CPU frequency < 1600MHz */
+#define AVS_A7K_LOW_CLK_VALUE ((0x80 << AVS_TARGET_DELTA_OFFSET) | \
+ (0x1A << AVS_HIGH_VDD_LIMIT_OFFSET) | \
+ (0x1A << AVS_LOW_VDD_LIMIT_OFFSET) | \
+ (0x1 << AVS_SOFT_RESET_OFFSET) | \
+ (0x1 << AVS_ENABLE_OFFSET))
+/* VDD limit is 1.0V for all A80x0 devices */
+#define AVS_A8K_CLK_VALUE ((0x80 << AVS_TARGET_DELTA_OFFSET) | \
+ (0x24 << AVS_HIGH_VDD_LIMIT_OFFSET) | \
+ (0x24 << AVS_LOW_VDD_LIMIT_OFFSET) | \
+ (0x1 << AVS_SOFT_RESET_OFFSET) | \
+ (0x1 << AVS_ENABLE_OFFSET))
+
+#define AVS_A3900_CLK_VALUE ((0x80 << 24) | \
+ (0x2c2 << 13) | \
+ (0x2c2 << 3) | \
+ (0x1 << AVS_SOFT_RESET_OFFSET) | \
+ (0x1 << AVS_ENABLE_OFFSET))
+
+#define MVEBU_AP_EFUSE_SRV_CTRL_REG (MVEBU_AP_GEN_MGMT_BASE + 0x8)
+#define EFUSE_SRV_CTRL_LD_SELECT_OFFS 6
+#define EFUSE_SRV_CTRL_LD_SEL_USER_MASK (1 << EFUSE_SRV_CTRL_LD_SELECT_OFFS)
+
+/* Notify bootloader on DRAM setup */
+#define AP807_CPU_ARO_0_CTRL_0 (MVEBU_RFU_BASE + 0x82A8)
+#define AP807_CPU_ARO_1_CTRL_0 (MVEBU_RFU_BASE + 0x8D00)
+
+/* 0 - ARO clock is enabled, 1 - ARO clock is disabled */
+#define AP807_CPU_ARO_CLK_EN_OFFSET 0
+#define AP807_CPU_ARO_CLK_EN_MASK (0x1 << AP807_CPU_ARO_CLK_EN_OFFSET)
+
+/* 0 - ARO is the clock source, 1 - PLL is the clock source */
+#define AP807_CPU_ARO_SEL_PLL_OFFSET 5
+#define AP807_CPU_ARO_SEL_PLL_MASK (0x1 << AP807_CPU_ARO_SEL_PLL_OFFSET)
+
+/*
+ * - AVS work points in the LD0 eFuse:
+ * SVC1 work point: LD0[88:81]
+ * SVC2 work point: LD0[96:89]
+ * SVC3 work point: LD0[104:97]
+ * SVC4 work point: LD0[112:105]
+ * - Identification information in the LD-0 eFuse:
+ * DRO: LD0[74:65] - Not used by the SW
+ * Revision: LD0[78:75] - Not used by the SW
+ * Bin: LD0[80:79] - Not used by the SW
+ * SW Revision: LD0[115:113]
+ * Cluster 1 PWR: LD0[193] - if set to 1, power down CPU Cluster-1
+ * resulting in 2 CPUs active only (7020)
+ */
+#define MVEBU_AP_LD_EFUSE_BASE (MVEBU_AP_GEN_MGMT_BASE + 0xF00)
+/* Bits [94:63] - 32 data bits total */
+#define MVEBU_AP_LD0_94_63_EFUSE_OFFS (MVEBU_AP_LD_EFUSE_BASE + 0x8)
+/* Bits [125:95] - 31 data bits total, 32nd bit is parity for bits [125:63] */
+#define MVEBU_AP_LD0_125_95_EFUSE_OFFS (MVEBU_AP_LD_EFUSE_BASE + 0xC)
+/* Bits [220:189] - 32 data bits total */
+#define MVEBU_AP_LD0_220_189_EFUSE_OFFS (MVEBU_AP_LD_EFUSE_BASE + 0x18)
+/* Offsets for the above 2 fields combined into single 64-bit value [125:63] */
+#define EFUSE_AP_LD0_DRO_OFFS 2 /* LD0[74:65] */
+#define EFUSE_AP_LD0_DRO_MASK 0x3FF
+#define EFUSE_AP_LD0_REVID_OFFS 12 /* LD0[78:75] */
+#define EFUSE_AP_LD0_REVID_MASK 0xF
+#define EFUSE_AP_LD0_BIN_OFFS 16 /* LD0[80:79] */
+#define EFUSE_AP_LD0_BIN_MASK 0x3
+#define EFUSE_AP_LD0_SWREV_OFFS 50 /* LD0[115:113] */
+#define EFUSE_AP_LD0_SWREV_MASK 0x7
+
+#define EFUSE_AP_LD0_SVC1_OFFS 18 /* LD0[88:81] */
+#define EFUSE_AP_LD0_SVC2_OFFS 26 /* LD0[96:89] */
+#define EFUSE_AP_LD0_SVC3_OFFS 34 /* LD0[104:97] */
+#define EFUSE_AP_LD0_SVC4_OFFS 42 /* LD0[112:105] */
+#define EFUSE_AP_LD0_WP_MASK 0xFF
+
+#define EFUSE_AP_LD0_CLUSTER_DOWN_OFFS 4
+
+/* Return the AP revision of the chip */
+static unsigned int ble_get_ap_type(void)
+{
+ unsigned int chip_rev_id;
+
+ chip_rev_id = mmio_read_32(MVEBU_CSS_GWD_CTRL_IIDR2_REG);
+ chip_rev_id = ((chip_rev_id & GWD_IIDR2_CHIP_ID_MASK) >>
+ GWD_IIDR2_CHIP_ID_OFFSET);
+
+ return chip_rev_id;
+}
+
+/******************************************************************************
+ * The routine allows to save the CCU and IO windows configuration during DRAM
+ * setup and restore them afterwards before exiting the BLE stage.
+ * Such window configuration is required since not all default settings coming
+ * from the HW and the BootROM allow access to peripherals connected to
+ * all available CPn components.
+ * For instance, when the boot device is located on CP0, the IO window to CP1
+ * is not opened automatically by the HW and if the DRAM SPD is located on CP1
+ * i2c channel, it cannot be read at BLE stage.
+ * Therefore the DRAM init procedure have to provide access to all available
+ * CPn peripherals during the BLE stage by setting the CCU IO window to all
+ * CPnph addresses and by enabling the IO windows accordingly.
+ * Additionally this function configures the CCU GCR to DRAM, which allows
+ * usage or more than 4GB DRAM as it configured by the default CCU DRAM window.
+ *
+ * IN:
+ * MMAP_SAVE_AND_CONFIG - save the existing configuration and update it
+ * MMAP_RESTORE_SAVED - restore saved configuration
+ * OUT:
+ * NONE
+ ****************************************************************************
+ */
+static void ble_plat_mmap_config(int restore)
+{
+ if (restore == MMAP_RESTORE_SAVED) {
+ /* Restore all orig. settings that were modified by BLE stage */
+ ccu_restore_win_all(MVEBU_AP0);
+ /* Restore CCU */
+ iow_restore_win_all(MVEBU_AP0);
+ return;
+ }
+
+ /* Store original values */
+ ccu_save_win_all(MVEBU_AP0);
+ /* Save CCU */
+ iow_save_win_all(MVEBU_AP0);
+
+ init_ccu(MVEBU_AP0);
+ /* The configuration saved, now all the changes can be done */
+ init_io_win(MVEBU_AP0);
+}
+
+/****************************************************************************
+ * Setup Adaptive Voltage Switching - this is required for some platforms
+ ****************************************************************************
+ */
+static void ble_plat_avs_config(void)
+{
+ uint32_t reg_val, device_id;
+
+ /* Check which SoC is running and act accordingly */
+ if (ble_get_ap_type() == CHIP_ID_AP807) {
+ VERBOSE("AVS: Setting AP807 AVS CTRL to 0x%x\n",
+ AVS_A3900_CLK_VALUE);
+ mmio_write_32(AVS_EN_CTRL_REG, AVS_A3900_CLK_VALUE);
+ return;
+ }
+
+ /* Check which SoC is running and act accordingly */
+ device_id = cp110_device_id_get(MVEBU_CP_REGS_BASE(0));
+ switch (device_id) {
+ case MVEBU_80X0_DEV_ID:
+ case MVEBU_80X0_CP115_DEV_ID:
+ /* Set the new AVS value - fix the default one on A80x0 */
+ mmio_write_32(AVS_EN_CTRL_REG, AVS_A8K_CLK_VALUE);
+ break;
+ case MVEBU_70X0_DEV_ID:
+ case MVEBU_70X0_CP115_DEV_ID:
+ /* Only fix AVS for CPU clocks lower than 1600MHz on A70x0 */
+ reg_val = mmio_read_32(MVEBU_AP_SAR_REG_BASE(
+ FREQ_MODE_AP_SAR_REG_NUM));
+ reg_val &= SAR_CLOCK_FREQ_MODE_MASK;
+ reg_val >>= SAR_CLOCK_FREQ_MODE_OFFSET;
+ if ((reg_val > CPU_1600_DDR_900_RCLK_900_2) &&
+ (reg_val < CPU_DDR_RCLK_INVALID))
+ mmio_write_32(AVS_EN_CTRL_REG, AVS_A7K_LOW_CLK_VALUE);
+ break;
+ default:
+ ERROR("Unsupported Device ID 0x%x\n", device_id);
+ }
+}
+
+/****************************************************************************
+ * SVC flow - v0.10
+ * The feature is intended to configure AVS value according to eFuse values
+ * that are burned individually for each SoC during the test process.
+ * Primary AVS value is stored in HD efuse and processed on power on
+ * by the HW engine
+ * Secondary AVS value is located in LD efuse and contains 4 work points for
+ * various CPU frequencies.
+ * The Secondary AVS value is only taken into account if the SW Revision stored
+ * in the efuse is greater than 0 and the CPU is running in a certain speed.
+ ****************************************************************************
+ */
+static void ble_plat_svc_config(void)
+{
+ uint32_t reg_val, avs_workpoint, freq_pidi_mode;
+ uint64_t efuse;
+ uint32_t device_id, single_cluster;
+ uint8_t svc[4], perr[4], i, sw_ver;
+
+ /* Due to a bug in A3900 device_id skip SVC config
+ * TODO: add SVC config once it is decided for a3900
+ */
+ if (ble_get_ap_type() == CHIP_ID_AP807) {
+ NOTICE("SVC: SVC is not supported on AP807\n");
+ ble_plat_avs_config();
+ return;
+ }
+
+ /* Set access to LD0 */
+ reg_val = mmio_read_32(MVEBU_AP_EFUSE_SRV_CTRL_REG);
+ reg_val &= ~EFUSE_SRV_CTRL_LD_SELECT_OFFS;
+ mmio_write_32(MVEBU_AP_EFUSE_SRV_CTRL_REG, reg_val);
+
+ /* Obtain the value of LD0[125:63] */
+ efuse = mmio_read_32(MVEBU_AP_LD0_125_95_EFUSE_OFFS);
+ efuse <<= 32;
+ efuse |= mmio_read_32(MVEBU_AP_LD0_94_63_EFUSE_OFFS);
+
+ /* SW Revision:
+ * Starting from SW revision 1 the SVC flow is supported.
+ * SW version 0 (efuse not programmed) should follow the
+ * regular AVS update flow.
+ */
+ sw_ver = (efuse >> EFUSE_AP_LD0_SWREV_OFFS) & EFUSE_AP_LD0_SWREV_MASK;
+ if (sw_ver < 1) {
+ NOTICE("SVC: SW Revision 0x%x. SVC is not supported\n", sw_ver);
+ ble_plat_avs_config();
+ return;
+ }
+
+ /* Frequency mode from SAR */
+ freq_pidi_mode = SAR_CLOCK_FREQ_MODE(
+ mmio_read_32(
+ MVEBU_AP_SAR_REG_BASE(
+ FREQ_MODE_AP_SAR_REG_NUM)));
+
+ /* Decode all SVC work points */
+ svc[0] = (efuse >> EFUSE_AP_LD0_SVC1_OFFS) & EFUSE_AP_LD0_WP_MASK;
+ svc[1] = (efuse >> EFUSE_AP_LD0_SVC2_OFFS) & EFUSE_AP_LD0_WP_MASK;
+ svc[2] = (efuse >> EFUSE_AP_LD0_SVC3_OFFS) & EFUSE_AP_LD0_WP_MASK;
+ svc[3] = (efuse >> EFUSE_AP_LD0_SVC4_OFFS) & EFUSE_AP_LD0_WP_MASK;
+ INFO("SVC: Efuse WP: [0]=0x%x, [1]=0x%x, [2]=0x%x, [3]=0x%x\n",
+ svc[0], svc[1], svc[2], svc[3]);
+
+ /* Validate parity of SVC workpoint values */
+ for (i = 0; i < 4; i++) {
+ uint8_t parity, bit;
+
+ perr[i] = 0;
+
+ for (bit = 1, parity = svc[i] & 1; bit < 7; bit++)
+ parity ^= (svc[i] >> bit) & 1;
+
+ /* Starting from SW version 2, the parity check is mandatory */
+ if ((sw_ver > 1) && (parity != ((svc[i] >> 7) & 1)))
+ perr[i] = 1; /* register the error */
+ }
+
+ single_cluster = mmio_read_32(MVEBU_AP_LD0_220_189_EFUSE_OFFS);
+ single_cluster = (single_cluster >> EFUSE_AP_LD0_CLUSTER_DOWN_OFFS) & 1;
+
+ device_id = cp110_device_id_get(MVEBU_CP_REGS_BASE(0));
+ if (device_id == MVEBU_80X0_DEV_ID ||
+ device_id == MVEBU_80X0_CP115_DEV_ID) {
+ /* A8040/A8020 */
+ NOTICE("SVC: DEV ID: %s, FREQ Mode: 0x%x\n",
+ single_cluster == 0 ? "8040" : "8020", freq_pidi_mode);
+ switch (freq_pidi_mode) {
+ case CPU_1800_DDR_1200_RCLK_1200:
+ case CPU_1800_DDR_1050_RCLK_1050:
+ if (perr[1])
+ goto perror;
+ avs_workpoint = svc[1];
+ break;
+ case CPU_1600_DDR_1050_RCLK_1050:
+ case CPU_1600_DDR_900_RCLK_900_2:
+ if (perr[2])
+ goto perror;
+ avs_workpoint = svc[2];
+ break;
+ case CPU_1300_DDR_800_RCLK_800:
+ case CPU_1300_DDR_650_RCLK_650:
+ if (perr[3])
+ goto perror;
+ avs_workpoint = svc[3];
+ break;
+ case CPU_2000_DDR_1200_RCLK_1200:
+ case CPU_2000_DDR_1050_RCLK_1050:
+ default:
+ if (perr[0])
+ goto perror;
+ avs_workpoint = svc[0];
+ break;
+ }
+ } else if (device_id == MVEBU_70X0_DEV_ID ||
+ device_id == MVEBU_70X0_CP115_DEV_ID) {
+ /* A7040/A7020/A6040 */
+ NOTICE("SVC: DEV ID: %s, FREQ Mode: 0x%x\n",
+ single_cluster == 0 ? "7040" : "7020", freq_pidi_mode);
+ switch (freq_pidi_mode) {
+ case CPU_1400_DDR_800_RCLK_800:
+ if (single_cluster) {/* 7020 */
+ if (perr[1])
+ goto perror;
+ avs_workpoint = svc[1];
+ } else {
+ if (perr[0])
+ goto perror;
+ avs_workpoint = svc[0];
+ }
+ break;
+ case CPU_1200_DDR_800_RCLK_800:
+ if (single_cluster) {/* 7020 */
+ if (perr[2])
+ goto perror;
+ avs_workpoint = svc[2];
+ } else {
+ if (perr[1])
+ goto perror;
+ avs_workpoint = svc[1];
+ }
+ break;
+ case CPU_800_DDR_800_RCLK_800:
+ case CPU_1000_DDR_800_RCLK_800:
+ if (single_cluster) {/* 7020 */
+ if (perr[3])
+ goto perror;
+ avs_workpoint = svc[3];
+ } else {
+ if (perr[2])
+ goto perror;
+ avs_workpoint = svc[2];
+ }
+ break;
+ case CPU_600_DDR_800_RCLK_800:
+ if (perr[3])
+ goto perror;
+ avs_workpoint = svc[3]; /* Same for 6040 and 7020 */
+ break;
+ case CPU_1600_DDR_800_RCLK_800: /* 7020 only */
+ default:
+ if (single_cluster) {/* 7020 */
+ if (perr[0])
+ goto perror;
+ avs_workpoint = svc[0];
+ } else
+ avs_workpoint = 0;
+ break;
+ }
+ } else {
+ ERROR("SVC: Unsupported Device ID 0x%x\n", device_id);
+ return;
+ }
+
+ /* Set AVS control if needed */
+ if (avs_workpoint == 0) {
+ ERROR("SVC: AVS work point not changed\n");
+ return;
+ }
+
+ /* Remove parity bit */
+ avs_workpoint &= 0x7F;
+
+ reg_val = mmio_read_32(AVS_EN_CTRL_REG);
+ NOTICE("SVC: AVS work point changed from 0x%x to 0x%x\n",
+ (reg_val & AVS_VDD_LOW_LIMIT_MASK) >> AVS_LOW_VDD_LIMIT_OFFSET,
+ avs_workpoint);
+ reg_val &= ~(AVS_VDD_LOW_LIMIT_MASK | AVS_VDD_HIGH_LIMIT_MASK);
+ reg_val |= 0x1 << AVS_ENABLE_OFFSET;
+ reg_val |= avs_workpoint << AVS_HIGH_VDD_LIMIT_OFFSET;
+ reg_val |= avs_workpoint << AVS_LOW_VDD_LIMIT_OFFSET;
+ mmio_write_32(AVS_EN_CTRL_REG, reg_val);
+ return;
+
+perror:
+ ERROR("Failed SVC WP[%d] parity check!\n", i);
+ ERROR("Ignoring the WP values\n");
+}
+
+#if PLAT_RECOVERY_IMAGE_ENABLE
+static int ble_skip_image_i2c(struct skip_image *skip_im)
+{
+ ERROR("skipping image using i2c is not supported\n");
+ /* not supported */
+ return 0;
+}
+
+static int ble_skip_image_other(struct skip_image *skip_im)
+{
+ ERROR("implementation missing for skip image request\n");
+ /* not supported, make your own implementation */
+ return 0;
+}
+
+static int ble_skip_image_gpio(struct skip_image *skip_im)
+{
+ unsigned int val;
+ unsigned int mpp_address = 0;
+ unsigned int offset = 0;
+
+ switch (skip_im->info.test.cp_ap) {
+ case(CP):
+ mpp_address = MVEBU_CP_GPIO_DATA_IN(skip_im->info.test.cp_index,
+ skip_im->info.gpio.num);
+ if (skip_im->info.gpio.num > NUM_OF_GPIO_PER_REG)
+ offset = skip_im->info.gpio.num - NUM_OF_GPIO_PER_REG;
+ else
+ offset = skip_im->info.gpio.num;
+ break;
+ case(AP):
+ mpp_address = MVEBU_AP_GPIO_DATA_IN;
+ offset = skip_im->info.gpio.num;
+ break;
+ }
+
+ val = mmio_read_32(mpp_address);
+ val &= (1 << offset);
+ if ((!val && skip_im->info.gpio.button_state == HIGH) ||
+ (val && skip_im->info.gpio.button_state == LOW)) {
+ mmio_write_32(SCRATCH_PAD_REG2, SCRATCH_PAD_SKIP_VAL);
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * This function checks if there's a skip image request:
+ * return values:
+ * 1: (true) images request been made.
+ * 0: (false) no image request been made.
+ */
+static int ble_skip_current_image(void)
+{
+ struct skip_image *skip_im;
+
+ /*fetching skip image info*/
+ skip_im = (struct skip_image *)plat_marvell_get_skip_image_data();
+
+ if (skip_im == NULL)
+ return 0;
+
+ /* check if skipping image request has already been made */
+ if (mmio_read_32(SCRATCH_PAD_REG2) == SCRATCH_PAD_SKIP_VAL)
+ return 0;
+
+ switch (skip_im->detection_method) {
+ case GPIO:
+ return ble_skip_image_gpio(skip_im);
+ case I2C:
+ return ble_skip_image_i2c(skip_im);
+ case USER_DEFINED:
+ return ble_skip_image_other(skip_im);
+ }
+
+ return 0;
+}
+#endif
+
+/* Switch to ARO from PLL in ap807 */
+static void aro_to_pll(void)
+{
+ unsigned int reg;
+
+ /* switch from ARO to PLL */
+ reg = mmio_read_32(AP807_CPU_ARO_0_CTRL_0);
+ reg |= AP807_CPU_ARO_SEL_PLL_MASK;
+ mmio_write_32(AP807_CPU_ARO_0_CTRL_0, reg);
+
+ reg = mmio_read_32(AP807_CPU_ARO_1_CTRL_0);
+ reg |= AP807_CPU_ARO_SEL_PLL_MASK;
+ mmio_write_32(AP807_CPU_ARO_1_CTRL_0, reg);
+
+ mdelay(1000);
+
+ /* disable ARO clk driver */
+ reg = mmio_read_32(AP807_CPU_ARO_0_CTRL_0);
+ reg |= (AP807_CPU_ARO_CLK_EN_MASK);
+ mmio_write_32(AP807_CPU_ARO_0_CTRL_0, reg);
+
+ reg = mmio_read_32(AP807_CPU_ARO_1_CTRL_0);
+ reg |= (AP807_CPU_ARO_CLK_EN_MASK);
+ mmio_write_32(AP807_CPU_ARO_1_CTRL_0, reg);
+}
+
+int ble_plat_setup(int *skip)
+{
+ int ret;
+
+ /* Power down unused CPUs */
+ plat_marvell_early_cpu_powerdown();
+
+ /*
+ * Save the current CCU configuration and make required changes:
+ * - Allow access to DRAM larger than 4GB
+ * - Open memory access to all CPn peripherals
+ */
+ ble_plat_mmap_config(MMAP_SAVE_AND_CONFIG);
+
+#if PLAT_RECOVERY_IMAGE_ENABLE
+ /* Check if there's a skip request to bootRom recovery Image */
+ if (ble_skip_current_image()) {
+ /* close memory access to all CPn peripherals. */
+ ble_plat_mmap_config(MMAP_RESTORE_SAVED);
+ *skip = 1;
+ return 0;
+ }
+#endif
+ /* Do required CP-110 setups for BLE stage */
+ cp110_ble_init(MVEBU_CP_REGS_BASE(0));
+
+ /* Setup AVS */
+ ble_plat_svc_config();
+
+ /* work with PLL clock driver in AP807 */
+ if (ble_get_ap_type() == CHIP_ID_AP807)
+ aro_to_pll();
+
+ /* Do required AP setups for BLE stage */
+ ap_ble_init();
+
+ /* Update DRAM topology (scan DIMM SPDs) */
+ plat_marvell_dram_update_topology();
+
+ /* Kick it in */
+ ret = dram_init();
+
+ /* Restore the original CCU configuration before exit from BLE */
+ ble_plat_mmap_config(MMAP_RESTORE_SAVED);
+
+ return ret;
+}
diff --git a/plat/marvell/a8k/common/plat_pm.c b/plat/marvell/a8k/common/plat_pm.c
new file mode 100644
index 0000000..c716ee0
--- /dev/null
+++ b/plat/marvell/a8k/common/plat_pm.c
@@ -0,0 +1,829 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <a8k_common.h>
+#include <assert.h>
+#include <bakery_lock.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <cache_llc.h>
+#include <console.h>
+#include <gicv2.h>
+#include <marvell_pm.h>
+#include <mmio.h>
+#include <mss_pm_ipc.h>
+#include <plat_marvell.h>
+#include <platform.h>
+#include <plat_pm_trace.h>
+#include <platform.h>
+
+#define MVEBU_PRIVATE_UID_REG 0x30
+#define MVEBU_RFU_GLOBL_SW_RST 0x84
+#define MVEBU_CCU_RVBAR(cpu) (MVEBU_REGS_BASE + 0x640 + (cpu * 4))
+#define MVEBU_CCU_CPU_UN_RESET(cpu) (MVEBU_REGS_BASE + 0x650 + (cpu * 4))
+
+#define MPIDR_CPU_GET(mpidr) ((mpidr) & MPIDR_CPU_MASK)
+#define MPIDR_CLUSTER_GET(mpidr) MPIDR_AFFLVL1_VAL((mpidr))
+
+#define MVEBU_GPIO_MASK(index) (1 << (index % 32))
+#define MVEBU_MPP_MASK(index) (0xF << (4 * (index % 8)))
+#define MVEBU_GPIO_VALUE(index, value) (value << (index % 32))
+
+#define MVEBU_USER_CMD_0_REG (MVEBU_DRAM_MAC_BASE + 0x20)
+#define MVEBU_USER_CMD_CH0_OFFSET 28
+#define MVEBU_USER_CMD_CH0_MASK (1 << MVEBU_USER_CMD_CH0_OFFSET)
+#define MVEBU_USER_CMD_CH0_EN (1 << MVEBU_USER_CMD_CH0_OFFSET)
+#define MVEBU_USER_CMD_CS_OFFSET 24
+#define MVEBU_USER_CMD_CS_MASK (0xF << MVEBU_USER_CMD_CS_OFFSET)
+#define MVEBU_USER_CMD_CS_ALL (0xF << MVEBU_USER_CMD_CS_OFFSET)
+#define MVEBU_USER_CMD_SR_OFFSET 6
+#define MVEBU_USER_CMD_SR_MASK (0x3 << MVEBU_USER_CMD_SR_OFFSET)
+#define MVEBU_USER_CMD_SR_ENTER (0x1 << MVEBU_USER_CMD_SR_OFFSET)
+#define MVEBU_MC_PWR_CTRL_REG (MVEBU_DRAM_MAC_BASE + 0x54)
+#define MVEBU_MC_AC_ON_DLY_OFFSET 8
+#define MVEBU_MC_AC_ON_DLY_MASK (0xF << MVEBU_MC_AC_ON_DLY_OFFSET)
+#define MVEBU_MC_AC_ON_DLY_DEF_VAR (8 << MVEBU_MC_AC_ON_DLY_OFFSET)
+#define MVEBU_MC_AC_OFF_DLY_OFFSET 4
+#define MVEBU_MC_AC_OFF_DLY_MASK (0xF << MVEBU_MC_AC_OFF_DLY_OFFSET)
+#define MVEBU_MC_AC_OFF_DLY_DEF_VAR (0xC << MVEBU_MC_AC_OFF_DLY_OFFSET)
+#define MVEBU_MC_PHY_AUTO_OFF_OFFSET 0
+#define MVEBU_MC_PHY_AUTO_OFF_MASK (1 << MVEBU_MC_PHY_AUTO_OFF_OFFSET)
+#define MVEBU_MC_PHY_AUTO_OFF_EN (1 << MVEBU_MC_PHY_AUTO_OFF_OFFSET)
+
+/* this lock synchronize AP multiple cores execution with MSS */
+DEFINE_BAKERY_LOCK(pm_sys_lock);
+
+/* Weak definitions may be overridden in specific board */
+#pragma weak plat_marvell_get_pm_cfg
+
+/* AP806 CPU power down /power up definitions */
+enum CPU_ID {
+ CPU0,
+ CPU1,
+ CPU2,
+ CPU3
+};
+
+#define REG_WR_VALIDATE_TIMEOUT (2000)
+
+#define FEATURE_DISABLE_STATUS_REG \
+ (MVEBU_REGS_BASE + 0x6F8230)
+#define FEATURE_DISABLE_STATUS_CPU_CLUSTER_OFFSET 4
+#define FEATURE_DISABLE_STATUS_CPU_CLUSTER_MASK \
+ (0x1 << FEATURE_DISABLE_STATUS_CPU_CLUSTER_OFFSET)
+
+#ifdef MVEBU_SOC_AP807
+ #define PWRC_CPUN_CR_PWR_DN_RQ_OFFSET 1
+ #define PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET 0
+#else
+#define PWRC_CPUN_CR_PWR_DN_RQ_OFFSET 0
+ #define PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET 31
+#endif
+
+#define PWRC_CPUN_CR_REG(cpu_id) \
+ (MVEBU_REGS_BASE + 0x680000 + (cpu_id * 0x10))
+#define PWRC_CPUN_CR_PWR_DN_RQ_MASK \
+ (0x1 << PWRC_CPUN_CR_PWR_DN_RQ_OFFSET)
+#define PWRC_CPUN_CR_ISO_ENABLE_OFFSET 16
+#define PWRC_CPUN_CR_ISO_ENABLE_MASK \
+ (0x1 << PWRC_CPUN_CR_ISO_ENABLE_OFFSET)
+#define PWRC_CPUN_CR_LDO_BYPASS_RDY_MASK \
+ (0x1 << PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET)
+
+#define CCU_B_PRCRN_REG(cpu_id) \
+ (MVEBU_REGS_BASE + 0x1A50 + \
+ ((cpu_id / 2) * (0x400)) + ((cpu_id % 2) * 4))
+#define CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET 0
+#define CCU_B_PRCRN_CPUPORESET_STATIC_MASK \
+ (0x1 << CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET)
+
+/* power switch fingers */
+#define AP807_PWRC_LDO_CR0_REG \
+ (MVEBU_REGS_BASE + 0x680000 + 0x100)
+#define AP807_PWRC_LDO_CR0_OFFSET 16
+#define AP807_PWRC_LDO_CR0_MASK \
+ (0xff << AP807_PWRC_LDO_CR0_OFFSET)
+#define AP807_PWRC_LDO_CR0_VAL 0xfd
+
+/*
+ * Power down CPU:
+ * Used to reduce power consumption, and avoid SoC unnecessary temperature rise.
+ */
+static int plat_marvell_cpu_powerdown(int cpu_id)
+{
+ uint32_t reg_val;
+ int exit_loop = REG_WR_VALIDATE_TIMEOUT;
+
+ INFO("Powering down CPU%d\n", cpu_id);
+
+ /* 1. Isolation enable */
+ reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+ reg_val |= 0x1 << PWRC_CPUN_CR_ISO_ENABLE_OFFSET;
+ mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
+
+ /* 2. Read and check Isolation enabled - verify bit set to 1 */
+ do {
+ reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+ exit_loop--;
+ } while (!(reg_val & (0x1 << PWRC_CPUN_CR_ISO_ENABLE_OFFSET)) &&
+ exit_loop > 0);
+
+ /* 3. Switch off CPU power */
+ reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+ reg_val &= ~PWRC_CPUN_CR_PWR_DN_RQ_MASK;
+ mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
+
+ /* 4. Read and check Switch Off - verify bit set to 0 */
+ exit_loop = REG_WR_VALIDATE_TIMEOUT;
+ do {
+ reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+ exit_loop--;
+ } while (reg_val & PWRC_CPUN_CR_PWR_DN_RQ_MASK && exit_loop > 0);
+
+ if (exit_loop <= 0)
+ goto cpu_poweroff_error;
+
+ /* 5. De-Assert power ready */
+ reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+ reg_val &= ~PWRC_CPUN_CR_LDO_BYPASS_RDY_MASK;
+ mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
+
+ /* 6. Assert CPU POR reset */
+ reg_val = mmio_read_32(CCU_B_PRCRN_REG(cpu_id));
+ reg_val &= ~CCU_B_PRCRN_CPUPORESET_STATIC_MASK;
+ mmio_write_32(CCU_B_PRCRN_REG(cpu_id), reg_val);
+
+ /* 7. Read and poll on Validate the CPU is out of reset */
+ exit_loop = REG_WR_VALIDATE_TIMEOUT;
+ do {
+ reg_val = mmio_read_32(CCU_B_PRCRN_REG(cpu_id));
+ exit_loop--;
+ } while (reg_val & CCU_B_PRCRN_CPUPORESET_STATIC_MASK && exit_loop > 0);
+
+ if (exit_loop <= 0)
+ goto cpu_poweroff_error;
+
+ INFO("Successfully powered down CPU%d\n", cpu_id);
+
+ return 0;
+
+cpu_poweroff_error:
+ ERROR("ERROR: Can't power down CPU%d\n", cpu_id);
+ return -1;
+}
+
+/*
+ * Power down CPUs 1-3 at early boot stage,
+ * to reduce power consumption and SoC temperature.
+ * This is triggered by BLE prior to DDR initialization.
+ *
+ * Note:
+ * All CPUs will be powered up by plat_marvell_cpu_powerup on Linux boot stage,
+ * which is triggered by PSCI ops (pwr_domain_on).
+ */
+int plat_marvell_early_cpu_powerdown(void)
+{
+ uint32_t cpu_cluster_status =
+ mmio_read_32(FEATURE_DISABLE_STATUS_REG) &
+ FEATURE_DISABLE_STATUS_CPU_CLUSTER_MASK;
+ /* if cpu_cluster_status bit is set,
+ * that means we have only single cluster
+ */
+ int cluster_count = cpu_cluster_status ? 1 : 2;
+
+ INFO("Powering off unused CPUs\n");
+
+ /* CPU1 is in AP806 cluster-0, which always exists, so power it down */
+ if (plat_marvell_cpu_powerdown(CPU1) == -1)
+ return -1;
+
+ /*
+ * CPU2-3 are in AP806 2nd cluster (cluster-1),
+ * which doesn't exists in dual-core systems.
+ * so need to check if we have dual-core (single cluster)
+ * or quad-code (2 clusters)
+ */
+ if (cluster_count == 2) {
+ /* CPU2-3 are part of 2nd cluster */
+ if (plat_marvell_cpu_powerdown(CPU2) == -1)
+ return -1;
+ if (plat_marvell_cpu_powerdown(CPU3) == -1)
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Power up CPU - part of Linux boot stage
+ */
+static int plat_marvell_cpu_powerup(u_register_t mpidr)
+{
+ uint32_t reg_val;
+ int cpu_id = MPIDR_CPU_GET(mpidr),
+ cluster = MPIDR_CLUSTER_GET(mpidr);
+ int exit_loop = REG_WR_VALIDATE_TIMEOUT;
+
+ /* calculate absolute CPU ID */
+ cpu_id = cluster * PLAT_MARVELL_CLUSTER_CORE_COUNT + cpu_id;
+
+ INFO("Powering on CPU%d\n", cpu_id);
+
+#ifdef MVEBU_SOC_AP807
+ /* Activate 2 power switch fingers */
+ reg_val = mmio_read_32(AP807_PWRC_LDO_CR0_REG);
+ reg_val &= ~(AP807_PWRC_LDO_CR0_MASK);
+ reg_val |= (AP807_PWRC_LDO_CR0_VAL << AP807_PWRC_LDO_CR0_OFFSET);
+ mmio_write_32(AP807_PWRC_LDO_CR0_REG, reg_val);
+ udelay(100);
+#endif
+
+ /* 1. Switch CPU power ON */
+ reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+ reg_val |= 0x1 << PWRC_CPUN_CR_PWR_DN_RQ_OFFSET;
+ mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
+
+ /* 2. Wait for CPU on, up to 100 uSec: */
+ udelay(100);
+
+ /* 3. Assert power ready */
+ reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+ reg_val |= 0x1 << PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET;
+ mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
+
+ /* 4. Read & Validate power ready
+ * used in order to generate 16 Host CPU cycles
+ */
+ do {
+ reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+ exit_loop--;
+ } while (!(reg_val & (0x1 << PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET)) &&
+ exit_loop > 0);
+
+ if (exit_loop <= 0)
+ goto cpu_poweron_error;
+
+ /* 5. Isolation disable */
+ reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+ reg_val &= ~PWRC_CPUN_CR_ISO_ENABLE_MASK;
+ mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
+
+ /* 6. Read and check Isolation enabled - verify bit set to 1 */
+ exit_loop = REG_WR_VALIDATE_TIMEOUT;
+ do {
+ reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
+ exit_loop--;
+ } while ((reg_val & (0x1 << PWRC_CPUN_CR_ISO_ENABLE_OFFSET)) &&
+ exit_loop > 0);
+
+ /* 7. De Assert CPU POR reset & Core reset */
+ reg_val = mmio_read_32(CCU_B_PRCRN_REG(cpu_id));
+ reg_val |= 0x1 << CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET;
+ mmio_write_32(CCU_B_PRCRN_REG(cpu_id), reg_val);
+
+ /* 8. Read & Validate CPU POR reset */
+ exit_loop = REG_WR_VALIDATE_TIMEOUT;
+ do {
+ reg_val = mmio_read_32(CCU_B_PRCRN_REG(cpu_id));
+ exit_loop--;
+ } while (!(reg_val & (0x1 << CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET)) &&
+ exit_loop > 0);
+
+ if (exit_loop <= 0)
+ goto cpu_poweron_error;
+
+ INFO("Successfully powered on CPU%d\n", cpu_id);
+
+ return 0;
+
+cpu_poweron_error:
+ ERROR("ERROR: Can't power up CPU%d\n", cpu_id);
+ return -1;
+}
+
+static int plat_marvell_cpu_on(u_register_t mpidr)
+{
+ int cpu_id;
+ int cluster;
+
+ /* Set barierr */
+ dsbsy();
+
+ /* Get cpu number - use CPU ID */
+ cpu_id = MPIDR_CPU_GET(mpidr);
+
+ /* Get cluster number - use affinity level 1 */
+ cluster = MPIDR_CLUSTER_GET(mpidr);
+
+ /* Set CPU private UID */
+ mmio_write_32(MVEBU_REGS_BASE + MVEBU_PRIVATE_UID_REG, cluster + 0x4);
+
+ /* Set the cpu start address to BL1 entry point (align to 0x10000) */
+ mmio_write_32(MVEBU_CCU_RVBAR(cpu_id),
+ PLAT_MARVELL_CPU_ENTRY_ADDR >> 16);
+
+ /* Get the cpu out of reset */
+ mmio_write_32(MVEBU_CCU_CPU_UN_RESET(cpu_id), 0x10001);
+
+ return 0;
+}
+
+/*****************************************************************************
+ * A8K handler called to check the validity of the power state
+ * parameter.
+ *****************************************************************************
+ */
+static int a8k_validate_power_state(unsigned int power_state,
+ psci_power_state_t *req_state)
+{
+ int pstate = psci_get_pstate_type(power_state);
+ int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
+ int i;
+
+ if (pwr_lvl > PLAT_MAX_PWR_LVL)
+ return PSCI_E_INVALID_PARAMS;
+
+ /* Sanity check the requested state */
+ if (pstate == PSTATE_TYPE_STANDBY) {
+ /*
+ * It's possible to enter standby only on power level 0
+ * Ignore any other power level.
+ */
+ if (pwr_lvl != MARVELL_PWR_LVL0)
+ return PSCI_E_INVALID_PARAMS;
+
+ req_state->pwr_domain_state[MARVELL_PWR_LVL0] =
+ MARVELL_LOCAL_STATE_RET;
+ } else {
+ for (i = MARVELL_PWR_LVL0; i <= pwr_lvl; i++)
+ req_state->pwr_domain_state[i] =
+ MARVELL_LOCAL_STATE_OFF;
+ }
+
+ /*
+ * We expect the 'state id' to be zero.
+ */
+ if (psci_get_pstate_id(power_state))
+ return PSCI_E_INVALID_PARAMS;
+
+ return PSCI_E_SUCCESS;
+}
+
+/*****************************************************************************
+ * A8K handler called when a CPU is about to enter standby.
+ *****************************************************************************
+ */
+static void a8k_cpu_standby(plat_local_state_t cpu_state)
+{
+ ERROR("%s: needs to be implemented\n", __func__);
+ panic();
+}
+
+/*****************************************************************************
+ * A8K handler called when a power domain is about to be turned on. The
+ * mpidr determines the CPU to be turned on.
+ *****************************************************************************
+ */
+static int a8k_pwr_domain_on(u_register_t mpidr)
+{
+ /* Power up CPU (CPUs 1-3 are powered off at start of BLE) */
+ plat_marvell_cpu_powerup(mpidr);
+
+ if (is_pm_fw_running()) {
+ unsigned int target =
+ ((mpidr & 0xFF) + (((mpidr >> 8) & 0xFF) * 2));
+
+ /*
+ * pm system synchronization - used to synchronize
+ * multiple core access to MSS
+ */
+ bakery_lock_get(&pm_sys_lock);
+
+ /* send CPU ON IPC Message to MSS */
+ mss_pm_ipc_msg_send(target, PM_IPC_MSG_CPU_ON, 0);
+
+ /* trigger IPC message to MSS */
+ mss_pm_ipc_msg_trigger();
+
+ /* pm system synchronization */
+ bakery_lock_release(&pm_sys_lock);
+
+ /* trace message */
+ PM_TRACE(TRACE_PWR_DOMAIN_ON | target);
+ } else {
+ /* proprietary CPU ON exection flow */
+ plat_marvell_cpu_on(mpidr);
+ }
+
+ return 0;
+}
+
+/*****************************************************************************
+ * A8K handler called to validate the entry point.
+ *****************************************************************************
+ */
+static int a8k_validate_ns_entrypoint(uintptr_t entrypoint)
+{
+ return PSCI_E_SUCCESS;
+}
+
+/*****************************************************************************
+ * A8K handler called when a power domain is about to be turned off. The
+ * target_state encodes the power state that each level should transition to.
+ *****************************************************************************
+ */
+static void a8k_pwr_domain_off(const psci_power_state_t *target_state)
+{
+ if (is_pm_fw_running()) {
+ unsigned int idx = plat_my_core_pos();
+
+ /* Prevent interrupts from spuriously waking up this cpu */
+ gicv2_cpuif_disable();
+
+ /* pm system synchronization - used to synchronize multiple
+ * core access to MSS
+ */
+ bakery_lock_get(&pm_sys_lock);
+
+ /* send CPU OFF IPC Message to MSS */
+ mss_pm_ipc_msg_send(idx, PM_IPC_MSG_CPU_OFF, target_state);
+
+ /* trigger IPC message to MSS */
+ mss_pm_ipc_msg_trigger();
+
+ /* pm system synchronization */
+ bakery_lock_release(&pm_sys_lock);
+
+ /* trace message */
+ PM_TRACE(TRACE_PWR_DOMAIN_OFF);
+ } else {
+ INFO("%s: is not supported without SCP\n", __func__);
+ }
+}
+
+/* Get PM config to power off the SoC */
+void *plat_marvell_get_pm_cfg(void)
+{
+ return NULL;
+}
+
+/*
+ * This function should be called on restore from
+ * "suspend to RAM" state when the execution flow
+ * has to bypass BootROM image to RAM copy and speed up
+ * the system recovery
+ *
+ */
+static void plat_marvell_exit_bootrom(void)
+{
+ marvell_exit_bootrom(PLAT_MARVELL_TRUSTED_ROM_BASE);
+}
+
+/*
+ * Prepare for the power off of the system via GPIO
+ */
+static void plat_marvell_power_off_gpio(struct power_off_method *pm_cfg,
+ register_t *gpio_addr,
+ register_t *gpio_data)
+{
+ unsigned int gpio;
+ unsigned int idx;
+ unsigned int shift;
+ unsigned int reg;
+ unsigned int addr;
+ gpio_info_t *info;
+ unsigned int tog_bits;
+
+ assert((pm_cfg->cfg.gpio.pin_count < PMIC_GPIO_MAX_NUMBER) &&
+ (pm_cfg->cfg.gpio.step_count < PMIC_GPIO_MAX_TOGGLE_STEP));
+
+ /* Prepare GPIOs for PMIC */
+ for (gpio = 0; gpio < pm_cfg->cfg.gpio.pin_count; gpio++) {
+ info = &pm_cfg->cfg.gpio.info[gpio];
+ /* Set PMIC GPIO to output mode */
+ reg = mmio_read_32(MVEBU_CP_GPIO_DATA_OUT_EN(
+ info->cp_index, info->gpio_index));
+ mmio_write_32(MVEBU_CP_GPIO_DATA_OUT_EN(
+ info->cp_index, info->gpio_index),
+ reg & ~MVEBU_GPIO_MASK(info->gpio_index));
+
+ /* Set the appropriate MPP to GPIO mode */
+ reg = mmio_read_32(MVEBU_PM_MPP_REGS(info->cp_index,
+ info->gpio_index));
+ mmio_write_32(MVEBU_PM_MPP_REGS(info->cp_index,
+ info->gpio_index),
+ reg & ~MVEBU_MPP_MASK(info->gpio_index));
+ }
+
+ /* Wait for MPP & GPIO pre-configurations done */
+ mdelay(pm_cfg->cfg.gpio.delay_ms);
+
+ /* Toggle the GPIO values, and leave final step to be triggered
+ * after DDR self-refresh is enabled
+ */
+ for (idx = 0; idx < pm_cfg->cfg.gpio.step_count; idx++) {
+ tog_bits = pm_cfg->cfg.gpio.seq[idx];
+
+ /* The GPIOs must be within same GPIO register,
+ * thus could get the original value by first GPIO
+ */
+ info = &pm_cfg->cfg.gpio.info[0];
+ reg = mmio_read_32(MVEBU_CP_GPIO_DATA_OUT(
+ info->cp_index, info->gpio_index));
+ addr = MVEBU_CP_GPIO_DATA_OUT(info->cp_index, info->gpio_index);
+
+ for (gpio = 0; gpio < pm_cfg->cfg.gpio.pin_count; gpio++) {
+ shift = pm_cfg->cfg.gpio.info[gpio].gpio_index % 32;
+ if (GPIO_LOW == (tog_bits & (1 << gpio)))
+ reg &= ~(1 << shift);
+ else
+ reg |= (1 << shift);
+ }
+
+ /* Set the GPIO register, for last step just store
+ * register address and values to system registers
+ */
+ if (idx < pm_cfg->cfg.gpio.step_count - 1) {
+ mmio_write_32(MVEBU_CP_GPIO_DATA_OUT(
+ info->cp_index, info->gpio_index), reg);
+ mdelay(pm_cfg->cfg.gpio.delay_ms);
+ } else {
+ /* Save GPIO register and address values for
+ * finishing the power down operation later
+ */
+ *gpio_addr = addr;
+ *gpio_data = reg;
+ }
+ }
+}
+
+/*
+ * Prepare for the power off of the system
+ */
+static void plat_marvell_power_off_prepare(struct power_off_method *pm_cfg,
+ register_t *addr, register_t *data)
+{
+ switch (pm_cfg->type) {
+ case PMIC_GPIO:
+ plat_marvell_power_off_gpio(pm_cfg, addr, data);
+ break;
+ default:
+ break;
+ }
+}
+
+/*****************************************************************************
+ * A8K handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
+ *****************************************************************************
+ */
+static void a8k_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+ if (is_pm_fw_running()) {
+ unsigned int idx;
+
+ /* Prevent interrupts from spuriously waking up this cpu */
+ gicv2_cpuif_disable();
+
+ idx = plat_my_core_pos();
+
+ /* pm system synchronization - used to synchronize multiple
+ * core access to MSS
+ */
+ bakery_lock_get(&pm_sys_lock);
+
+ /* send CPU Suspend IPC Message to MSS */
+ mss_pm_ipc_msg_send(idx, PM_IPC_MSG_CPU_SUSPEND, target_state);
+
+ /* trigger IPC message to MSS */
+ mss_pm_ipc_msg_trigger();
+
+ /* pm system synchronization */
+ bakery_lock_release(&pm_sys_lock);
+
+ /* trace message */
+ PM_TRACE(TRACE_PWR_DOMAIN_SUSPEND);
+ } else {
+ uintptr_t *mailbox = (void *)PLAT_MARVELL_MAILBOX_BASE;
+
+ INFO("Suspending to RAM\n");
+
+ /* Prevent interrupts from spuriously waking up this cpu */
+ gicv2_cpuif_disable();
+
+ mailbox[MBOX_IDX_SUSPEND_MAGIC] = MVEBU_MAILBOX_SUSPEND_STATE;
+ mailbox[MBOX_IDX_ROM_EXIT_ADDR] = (uintptr_t)&plat_marvell_exit_bootrom;
+
+#if PLAT_MARVELL_SHARED_RAM_CACHED
+ flush_dcache_range(PLAT_MARVELL_MAILBOX_BASE +
+ MBOX_IDX_SUSPEND_MAGIC * sizeof(uintptr_t),
+ 2 * sizeof(uintptr_t));
+#endif
+ /* Flush and disable LLC before going off-power */
+ llc_disable(0);
+
+ isb();
+ /*
+ * Do not halt here!
+ * The function must return for allowing the caller function
+ * psci_power_up_finish() to do the proper context saving and
+ * to release the CPU lock.
+ */
+ }
+}
+
+/*****************************************************************************
+ * A8K handler called when a power domain has just been powered on after
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from.
+ *****************************************************************************
+ */
+static void a8k_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+ /* arch specific configuration */
+ marvell_psci_arch_init(0);
+
+ /* Interrupt initialization */
+ gicv2_pcpu_distif_init();
+ gicv2_cpuif_enable();
+
+ if (is_pm_fw_running()) {
+ /* trace message */
+ PM_TRACE(TRACE_PWR_DOMAIN_ON_FINISH);
+ }
+}
+
+/*****************************************************************************
+ * A8K handler called when a power domain has just been powered on after
+ * having been suspended earlier. The target_state encodes the low power state
+ * that each level has woken up from.
+ * TODO: At the moment we reuse the on finisher and reinitialize the secure
+ * context. Need to implement a separate suspend finisher.
+ *****************************************************************************
+ */
+static void a8k_pwr_domain_suspend_finish(
+ const psci_power_state_t *target_state)
+{
+ if (is_pm_fw_running()) {
+ /* arch specific configuration */
+ marvell_psci_arch_init(0);
+
+ /* Interrupt initialization */
+ gicv2_cpuif_enable();
+
+ /* trace message */
+ PM_TRACE(TRACE_PWR_DOMAIN_SUSPEND_FINISH);
+ } else {
+ uintptr_t *mailbox = (void *)PLAT_MARVELL_MAILBOX_BASE;
+
+ /* Only primary CPU requres platform init */
+ if (!plat_my_core_pos()) {
+ /* Initialize the console to provide
+ * early debug support
+ */
+ console_init(PLAT_MARVELL_BOOT_UART_BASE,
+ PLAT_MARVELL_BOOT_UART_CLK_IN_HZ,
+ MARVELL_CONSOLE_BAUDRATE);
+
+ bl31_plat_arch_setup();
+ marvell_bl31_platform_setup();
+ /*
+ * Remove suspend to RAM marker from the mailbox
+ * for treating a regular reset as a cold boot
+ */
+ mailbox[MBOX_IDX_SUSPEND_MAGIC] = 0;
+ mailbox[MBOX_IDX_ROM_EXIT_ADDR] = 0;
+#if PLAT_MARVELL_SHARED_RAM_CACHED
+ flush_dcache_range(PLAT_MARVELL_MAILBOX_BASE +
+ MBOX_IDX_SUSPEND_MAGIC * sizeof(uintptr_t),
+ 2 * sizeof(uintptr_t));
+#endif
+ }
+ }
+}
+
+/*****************************************************************************
+ * This handler is called by the PSCI implementation during the `SYSTEM_SUSPEND`
+ * call to get the `power_state` parameter. This allows the platform to encode
+ * the appropriate State-ID field within the `power_state` parameter which can
+ * be utilized in `pwr_domain_suspend()` to suspend to system affinity level.
+ *****************************************************************************
+ */
+static void a8k_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+ /* lower affinities use PLAT_MAX_OFF_STATE */
+ for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
+ req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
+}
+
+static void
+__dead2 a8k_pwr_domain_pwr_down_wfi(const psci_power_state_t *target_state)
+{
+ struct power_off_method *pm_cfg;
+ unsigned int srcmd;
+ unsigned int sdram_reg;
+ register_t gpio_data = 0, gpio_addr = 0;
+
+ if (is_pm_fw_running()) {
+ psci_power_down_wfi();
+ panic();
+ }
+
+ pm_cfg = (struct power_off_method *)plat_marvell_get_pm_cfg();
+
+ /* Prepare for power off */
+ plat_marvell_power_off_prepare(pm_cfg, &gpio_addr, &gpio_data);
+
+ /* First step to enable DDR self-refresh
+ * to keep the data during suspend
+ */
+ mmio_write_32(MVEBU_MC_PWR_CTRL_REG, 0x8C1);
+
+ /* Save DDR self-refresh second step register
+ * and value to be issued later
+ */
+ sdram_reg = MVEBU_USER_CMD_0_REG;
+ srcmd = mmio_read_32(sdram_reg);
+ srcmd &= ~(MVEBU_USER_CMD_CH0_MASK | MVEBU_USER_CMD_CS_MASK |
+ MVEBU_USER_CMD_SR_MASK);
+ srcmd |= (MVEBU_USER_CMD_CH0_EN | MVEBU_USER_CMD_CS_ALL |
+ MVEBU_USER_CMD_SR_ENTER);
+
+ /*
+ * Wait for DRAM is done using registers access only.
+ * At this stage any access to DRAM (procedure call) will
+ * release it from the self-refresh mode
+ */
+ __asm__ volatile (
+ /* Align to a cache line */
+ " .balign 64\n\t"
+
+ /* Enter self refresh */
+ " str %[srcmd], [%[sdram_reg]]\n\t"
+
+ /*
+ * Wait 100 cycles for DDR to enter self refresh, by
+ * doing 50 times two instructions.
+ */
+ " mov x1, #50\n\t"
+ "1: subs x1, x1, #1\n\t"
+ " bne 1b\n\t"
+
+ /* Issue the command to trigger the SoC power off */
+ " str %[gpio_data], [%[gpio_addr]]\n\t"
+
+ /* Trap the processor */
+ " b .\n\t"
+ : : [srcmd] "r" (srcmd), [sdram_reg] "r" (sdram_reg),
+ [gpio_addr] "r" (gpio_addr), [gpio_data] "r" (gpio_data)
+ : "x1");
+
+ panic();
+}
+
+/*****************************************************************************
+ * A8K handlers to shutdown/reboot the system
+ *****************************************************************************
+ */
+static void __dead2 a8k_system_off(void)
+{
+ ERROR("%s: needs to be implemented\n", __func__);
+ panic();
+}
+
+void plat_marvell_system_reset(void)
+{
+ mmio_write_32(MVEBU_RFU_BASE + MVEBU_RFU_GLOBL_SW_RST, 0x0);
+}
+
+static void __dead2 a8k_system_reset(void)
+{
+ plat_marvell_system_reset();
+
+ /* we shouldn't get to this point */
+ panic();
+}
+
+/*****************************************************************************
+ * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
+ * platform layer will take care of registering the handlers with PSCI.
+ *****************************************************************************
+ */
+const plat_psci_ops_t plat_arm_psci_pm_ops = {
+ .cpu_standby = a8k_cpu_standby,
+ .pwr_domain_on = a8k_pwr_domain_on,
+ .pwr_domain_off = a8k_pwr_domain_off,
+ .pwr_domain_suspend = a8k_pwr_domain_suspend,
+ .pwr_domain_on_finish = a8k_pwr_domain_on_finish,
+ .get_sys_suspend_power_state = a8k_get_sys_suspend_power_state,
+ .pwr_domain_suspend_finish = a8k_pwr_domain_suspend_finish,
+ .pwr_domain_pwr_down_wfi = a8k_pwr_domain_pwr_down_wfi,
+ .system_off = a8k_system_off,
+ .system_reset = a8k_system_reset,
+ .validate_power_state = a8k_validate_power_state,
+ .validate_ns_entrypoint = a8k_validate_ns_entrypoint
+};
diff --git a/plat/marvell/a8k/common/plat_pm_trace.c b/plat/marvell/a8k/common/plat_pm_trace.c
new file mode 100644
index 0000000..683e56f
--- /dev/null
+++ b/plat/marvell/a8k/common/plat_pm_trace.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <mmio.h>
+#include <mss_mem.h>
+#include <platform.h>
+#include <plat_pm_trace.h>
+
+#ifdef PM_TRACE_ENABLE
+
+/* core trace APIs */
+core_trace_func funcTbl[PLATFORM_CORE_COUNT] = {
+ pm_core_0_trace,
+ pm_core_1_trace,
+ pm_core_2_trace,
+ pm_core_3_trace};
+
+/*****************************************************************************
+ * pm_core0_trace
+ * pm_core1_trace
+ * pm_core2_trace
+ * pm_core_3trace
+ *
+ * This functions set trace info into core cyclic trace queue in MSS SRAM
+ * memory space
+ *****************************************************************************
+ */
+void pm_core_0_trace(unsigned int trace)
+{
+ unsigned int current_position_core_0 =
+ mmio_read_32(AP_MSS_ATF_CORE_0_CTRL_BASE);
+ mmio_write_32((AP_MSS_ATF_CORE_0_INFO_BASE +
+ (current_position_core_0 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+ mmio_read_32(AP_MSS_TIMER_BASE));
+ mmio_write_32((AP_MSS_ATF_CORE_0_INFO_TRACE +
+ (current_position_core_0 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+ trace);
+ mmio_write_32(AP_MSS_ATF_CORE_0_CTRL_BASE,
+ ((current_position_core_0 + 1) &
+ AP_MSS_ATF_TRACE_SIZE_MASK));
+}
+
+void pm_core_1_trace(unsigned int trace)
+{
+ unsigned int current_position_core_1 =
+ mmio_read_32(AP_MSS_ATF_CORE_1_CTRL_BASE);
+ mmio_write_32((AP_MSS_ATF_CORE_1_INFO_BASE +
+ (current_position_core_1 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+ mmio_read_32(AP_MSS_TIMER_BASE));
+ mmio_write_32((AP_MSS_ATF_CORE_1_INFO_TRACE +
+ (current_position_core_1 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+ trace);
+ mmio_write_32(AP_MSS_ATF_CORE_1_CTRL_BASE,
+ ((current_position_core_1 + 1) &
+ AP_MSS_ATF_TRACE_SIZE_MASK));
+}
+
+void pm_core_2_trace(unsigned int trace)
+{
+ unsigned int current_position_core_2 =
+ mmio_read_32(AP_MSS_ATF_CORE_2_CTRL_BASE);
+ mmio_write_32((AP_MSS_ATF_CORE_2_INFO_BASE +
+ (current_position_core_2 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+ mmio_read_32(AP_MSS_TIMER_BASE));
+ mmio_write_32((AP_MSS_ATF_CORE_2_INFO_TRACE +
+ (current_position_core_2 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+ trace);
+ mmio_write_32(AP_MSS_ATF_CORE_2_CTRL_BASE,
+ ((current_position_core_2 + 1) &
+ AP_MSS_ATF_TRACE_SIZE_MASK));
+}
+
+void pm_core_3_trace(unsigned int trace)
+{
+ unsigned int current_position_core_3 =
+ mmio_read_32(AP_MSS_ATF_CORE_3_CTRL_BASE);
+ mmio_write_32((AP_MSS_ATF_CORE_3_INFO_BASE +
+ (current_position_core_3 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+ mmio_read_32(AP_MSS_TIMER_BASE));
+ mmio_write_32((AP_MSS_ATF_CORE_3_INFO_TRACE +
+ (current_position_core_3 * AP_MSS_ATF_CORE_ENTRY_SIZE)),
+ trace);
+ mmio_write_32(AP_MSS_ATF_CORE_3_CTRL_BASE,
+ ((current_position_core_3 + 1) &
+ AP_MSS_ATF_TRACE_SIZE_MASK));
+}
+#endif /* PM_TRACE_ENABLE */
diff --git a/plat/marvell/a8k/common/plat_thermal.c b/plat/marvell/a8k/common/plat_thermal.c
new file mode 100644
index 0000000..02fe820
--- /dev/null
+++ b/plat/marvell/a8k/common/plat_thermal.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <debug.h>
+#include <delay_timer.h>
+#include <mmio.h>
+#include <mvebu_def.h>
+#include <thermal.h>
+
+#define THERMAL_TIMEOUT 1200
+
+#define THERMAL_SEN_CTRL_LSB_STRT_OFFSET 0
+#define THERMAL_SEN_CTRL_LSB_STRT_MASK \
+ (0x1 << THERMAL_SEN_CTRL_LSB_STRT_OFFSET)
+#define THERMAL_SEN_CTRL_LSB_RST_OFFSET 1
+#define THERMAL_SEN_CTRL_LSB_RST_MASK \
+ (0x1 << THERMAL_SEN_CTRL_LSB_RST_OFFSET)
+#define THERMAL_SEN_CTRL_LSB_EN_OFFSET 2
+#define THERMAL_SEN_CTRL_LSB_EN_MASK \
+ (0x1 << THERMAL_SEN_CTRL_LSB_EN_OFFSET)
+
+#define THERMAL_SEN_CTRL_STATS_VALID_OFFSET 16
+#define THERMAL_SEN_CTRL_STATS_VALID_MASK \
+ (0x1 << THERMAL_SEN_CTRL_STATS_VALID_OFFSET)
+#define THERMAL_SEN_CTRL_STATS_TEMP_OUT_OFFSET 0
+#define THERMAL_SEN_CTRL_STATS_TEMP_OUT_MASK \
+ (0x3FF << THERMAL_SEN_CTRL_STATS_TEMP_OUT_OFFSET)
+
+#define THERMAL_SEN_OUTPUT_MSB 512
+#define THERMAL_SEN_OUTPUT_COMP 1024
+
+struct tsen_regs {
+ uint32_t ext_tsen_ctrl_lsb;
+ uint32_t ext_tsen_ctrl_msb;
+ uint32_t ext_tsen_status;
+};
+
+static int ext_tsen_probe(struct tsen_config *tsen_cfg)
+{
+ uint32_t reg, timeout = 0;
+ struct tsen_regs *base;
+
+ if (tsen_cfg == NULL && tsen_cfg->regs_base == NULL) {
+ ERROR("initial thermal sensor configuration is missing\n");
+ return -1;
+ }
+ base = (struct tsen_regs *)tsen_cfg->regs_base;
+
+ INFO("initializing thermal sensor\n");
+
+ /* initialize thermal sensor hardware reset once */
+ reg = mmio_read_32((uintptr_t)&base->ext_tsen_ctrl_lsb);
+ reg &= ~THERMAL_SEN_CTRL_LSB_RST_OFFSET; /* de-assert TSEN_RESET */
+ reg |= THERMAL_SEN_CTRL_LSB_EN_MASK; /* set TSEN_EN to 1 */
+ reg |= THERMAL_SEN_CTRL_LSB_STRT_MASK; /* set TSEN_START to 1 */
+ mmio_write_32((uintptr_t)&base->ext_tsen_ctrl_lsb, reg);
+
+ reg = mmio_read_32((uintptr_t)&base->ext_tsen_status);
+ while ((reg & THERMAL_SEN_CTRL_STATS_VALID_MASK) == 0 &&
+ timeout < THERMAL_TIMEOUT) {
+ udelay(100);
+ reg = mmio_read_32((uintptr_t)&base->ext_tsen_status);
+ timeout++;
+ }
+
+ if ((reg & THERMAL_SEN_CTRL_STATS_VALID_MASK) == 0) {
+ ERROR("thermal sensor is not ready\n");
+ return -1;
+ }
+
+ tsen_cfg->tsen_ready = 1;
+
+ VERBOSE("thermal sensor was initialized\n");
+
+ return 0;
+}
+
+static int ext_tsen_read(struct tsen_config *tsen_cfg, int *temp)
+{
+ uint32_t reg;
+ struct tsen_regs *base;
+
+ if (tsen_cfg == NULL && !tsen_cfg->tsen_ready) {
+ ERROR("thermal sensor was not initialized\n");
+ return -1;
+ }
+ base = (struct tsen_regs *)tsen_cfg->regs_base;
+
+ reg = mmio_read_32((uintptr_t)&base->ext_tsen_status);
+ reg = ((reg & THERMAL_SEN_CTRL_STATS_TEMP_OUT_MASK) >>
+ THERMAL_SEN_CTRL_STATS_TEMP_OUT_OFFSET);
+
+ /*
+ * TSEN output format is signed as a 2s complement number
+ * ranging from-512 to +511. when MSB is set, need to
+ * calculate the complement number
+ */
+ if (reg >= THERMAL_SEN_OUTPUT_MSB)
+ reg -= THERMAL_SEN_OUTPUT_COMP;
+
+ if (tsen_cfg->tsen_divisor == 0) {
+ ERROR("thermal sensor divisor cannot be zero\n");
+ return -1;
+ }
+
+ *temp = ((tsen_cfg->tsen_gain * ((int)reg)) +
+ tsen_cfg->tsen_offset) / tsen_cfg->tsen_divisor;
+
+ return 0;
+}
+
+static struct tsen_config tsen_cfg = {
+ .tsen_offset = 153400,
+ .tsen_gain = 425,
+ .tsen_divisor = 1000,
+ .tsen_ready = 0,
+ .regs_base = (void *)MVEBU_AP_EXT_TSEN_BASE,
+ .ptr_tsen_probe = ext_tsen_probe,
+ .ptr_tsen_read = ext_tsen_read
+};
+
+struct tsen_config *marvell_thermal_config_get(void)
+{
+ return &tsen_cfg;
+}
diff --git a/plat/marvell/common/aarch64/marvell_common.c b/plat/marvell/common/aarch64/marvell_common.c
new file mode 100644
index 0000000..abc501a
--- /dev/null
+++ b/plat/marvell/common/aarch64/marvell_common.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <mmio.h>
+#include <plat_marvell.h>
+#include <platform_def.h>
+#include <xlat_tables.h>
+
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak plat_get_ns_image_entrypoint
+#pragma weak plat_marvell_get_mmap
+
+/*
+ * Set up the page tables for the generic and platform-specific memory regions.
+ * The extents of the generic memory regions are specified by the function
+ * arguments and consist of:
+ * - Trusted SRAM seen by the BL image;
+ * - Code section;
+ * - Read-only data section;
+ * - Coherent memory region, if applicable.
+ */
+void marvell_setup_page_tables(uintptr_t total_base,
+ size_t total_size,
+ uintptr_t code_start,
+ uintptr_t code_limit,
+ uintptr_t rodata_start,
+ uintptr_t rodata_limit
+#if USE_COHERENT_MEM
+ ,
+ uintptr_t coh_start,
+ uintptr_t coh_limit
+#endif
+ )
+{
+ /*
+ * Map the Trusted SRAM with appropriate memory attributes.
+ * Subsequent mappings will adjust the attributes for specific regions.
+ */
+ VERBOSE("Trusted SRAM seen by this BL image: %p - %p\n",
+ (void *) total_base, (void *) (total_base + total_size));
+ mmap_add_region(total_base, total_base,
+ total_size,
+ MT_MEMORY | MT_RW | MT_SECURE);
+
+ /* Re-map the code section */
+ VERBOSE("Code region: %p - %p\n",
+ (void *) code_start, (void *) code_limit);
+ mmap_add_region(code_start, code_start,
+ code_limit - code_start,
+ MT_CODE | MT_SECURE);
+
+ /* Re-map the read-only data section */
+ VERBOSE("Read-only data region: %p - %p\n",
+ (void *) rodata_start, (void *) rodata_limit);
+ mmap_add_region(rodata_start, rodata_start,
+ rodata_limit - rodata_start,
+ MT_RO_DATA | MT_SECURE);
+
+#if USE_COHERENT_MEM
+ /* Re-map the coherent memory region */
+ VERBOSE("Coherent region: %p - %p\n",
+ (void *) coh_start, (void *) coh_limit);
+ mmap_add_region(coh_start, coh_start,
+ coh_limit - coh_start,
+ MT_DEVICE | MT_RW | MT_SECURE);
+#endif
+
+ /* Now (re-)map the platform-specific memory regions */
+ mmap_add(plat_marvell_get_mmap());
+
+ /* Create the page tables to reflect the above mappings */
+ init_xlat_tables();
+}
+
+unsigned long plat_get_ns_image_entrypoint(void)
+{
+ return PLAT_MARVELL_NS_IMAGE_OFFSET;
+}
+
+/*****************************************************************************
+ * Gets SPSR for BL32 entry
+ *****************************************************************************
+ */
+uint32_t marvell_get_spsr_for_bl32_entry(void)
+{
+ /*
+ * The Secure Payload Dispatcher service is responsible for
+ * setting the SPSR prior to entry into the BL32 image.
+ */
+ return 0;
+}
+
+/*****************************************************************************
+ * Gets SPSR for BL33 entry
+ *****************************************************************************
+ */
+uint32_t marvell_get_spsr_for_bl33_entry(void)
+{
+ unsigned long el_status;
+ unsigned int mode;
+ uint32_t spsr;
+
+ /* Figure out what mode we enter the non-secure world in */
+ el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+ el_status &= ID_AA64PFR0_ELX_MASK;
+
+ mode = (el_status) ? MODE_EL2 : MODE_EL1;
+
+ /*
+ * TODO: Consider the possibility of specifying the SPSR in
+ * the FIP ToC and allowing the platform to have a say as
+ * well.
+ */
+ spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+ return spsr;
+}
+
+/*****************************************************************************
+ * Returns ARM platform specific memory map regions.
+ *****************************************************************************
+ */
+const mmap_region_t *plat_marvell_get_mmap(void)
+{
+ return plat_marvell_mmap;
+}
+
diff --git a/plat/marvell/common/aarch64/marvell_helpers.S b/plat/marvell/common/aarch64/marvell_helpers.S
new file mode 100644
index 0000000..a3dc917
--- /dev/null
+++ b/plat/marvell/common/aarch64/marvell_helpers.S
@@ -0,0 +1,223 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <asm_macros.S>
+#include <cortex_a72.h>
+#include <marvell_def.h>
+#include <platform_def.h>
+#ifndef PLAT_a3700
+#include <ccu.h>
+#include <cache_llc.h>
+#endif
+
+ .weak plat_marvell_calc_core_pos
+ .weak plat_my_core_pos
+ .globl plat_crash_console_init
+ .globl plat_crash_console_putc
+ .globl platform_mem_init
+ .globl disable_mmu_dcache
+ .globl invalidate_tlb_all
+ .globl platform_unmap_sram
+ .globl disable_sram
+ .globl disable_icache
+ .globl invalidate_icache_all
+ .globl marvell_exit_bootrom
+ .globl ca72_l2_enable_unique_clean
+
+ /* -----------------------------------------------------
+ * unsigned int plat_my_core_pos(void)
+ * This function uses the plat_marvell_calc_core_pos()
+ * definition to get the index of the calling CPU.
+ * -----------------------------------------------------
+ */
+func plat_my_core_pos
+ mrs x0, mpidr_el1
+ b plat_marvell_calc_core_pos
+endfunc plat_my_core_pos
+
+ /* -----------------------------------------------------
+ * unsigned int plat_marvell_calc_core_pos(uint64_t mpidr)
+ * Helper function to calculate the core position.
+ * With this function: CorePos = (ClusterId * 2) +
+ * CoreId
+ * -----------------------------------------------------
+ */
+func plat_marvell_calc_core_pos
+ and x1, x0, #MPIDR_CPU_MASK
+ and x0, x0, #MPIDR_CLUSTER_MASK
+ add x0, x1, x0, LSR #7
+ ret
+endfunc plat_marvell_calc_core_pos
+
+ /* ---------------------------------------------
+ * int plat_crash_console_init(void)
+ * Function to initialize the crash console
+ * without a C Runtime to print crash report.
+ * Clobber list : x0, x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_init
+ mov_imm x0, PLAT_MARVELL_CRASH_UART_BASE
+ mov_imm x1, PLAT_MARVELL_CRASH_UART_CLK_IN_HZ
+ mov_imm x2, MARVELL_CONSOLE_BAUDRATE
+ b console_core_init
+endfunc plat_crash_console_init
+
+ /* ---------------------------------------------
+ * int plat_crash_console_putc(int c)
+ * Function to print a character on the crash
+ * console without a C Runtime.
+ * Clobber list : x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_putc
+ mov_imm x1, PLAT_MARVELL_CRASH_UART_BASE
+ b console_core_putc
+endfunc plat_crash_console_putc
+
+ /* ---------------------------------------------------------------------
+ * We don't need to carry out any memory initialization on ARM
+ * platforms. The Secure RAM is accessible straight away.
+ * ---------------------------------------------------------------------
+ */
+func platform_mem_init
+ ret
+endfunc platform_mem_init
+
+ /* -----------------------------------------------------
+ * Disable icache, dcache, and MMU
+ * -----------------------------------------------------
+ */
+func disable_mmu_dcache
+ mrs x0, sctlr_el3
+ bic x0, x0, 0x1 /* M bit - MMU */
+ bic x0, x0, 0x4 /* C bit - Dcache L1 & L2 */
+ msr sctlr_el3, x0
+ isb
+ b mmu_off
+mmu_off:
+ ret
+endfunc disable_mmu_dcache
+
+ /* -----------------------------------------------------
+ * Disable all TLB entries
+ * -----------------------------------------------------
+ */
+func invalidate_tlb_all
+ tlbi alle3
+ dsb sy
+ isb
+ ret
+endfunc invalidate_tlb_all
+
+ /* -----------------------------------------------------
+ * Disable the i cache
+ * -----------------------------------------------------
+ */
+func disable_icache
+ mrs x0, sctlr_el3
+ bic x0, x0, 0x1000 /* I bit - Icache L1 & L2 */
+ msr sctlr_el3, x0
+ isb
+ ret
+endfunc disable_icache
+
+ /* -----------------------------------------------------
+ * Disable all of the i caches
+ * -----------------------------------------------------
+ */
+func invalidate_icache_all
+ ic ialluis
+ isb sy
+ ret
+endfunc invalidate_icache_all
+
+ /* -----------------------------------------------------
+ * Clear the SRAM enabling bit to unmap SRAM
+ * -----------------------------------------------------
+ */
+func platform_unmap_sram
+ ldr x0, =CCU_SRAM_WIN_CR
+ str wzr, [x0]
+ ret
+endfunc platform_unmap_sram
+
+ /* -----------------------------------------------------
+ * Disable the SRAM
+ * -----------------------------------------------------
+ */
+func disable_sram
+ /* Disable the line lockings. They must be disabled expictly
+ * or the OS will have problems using the cache */
+ ldr x1, =MASTER_LLC_TC0_LOCK
+ str wzr, [x1]
+
+ /* Invalidate all ways */
+ ldr w1, =LLC_WAY_MASK
+ ldr x0, =MASTER_L2X0_INV_WAY
+ str w1, [x0]
+
+ /* Finally disable LLC */
+ ldr x0, =MASTER_LLC_CTRL
+ str wzr, [x0]
+
+ ret
+endfunc disable_sram
+
+ /* -----------------------------------------------------
+ * Operation when exit bootROM:
+ * Disable the MMU
+ * Disable and invalidate the dcache
+ * Unmap and disable the SRAM
+ * Disable and invalidate the icache
+ * -----------------------------------------------------
+ */
+func marvell_exit_bootrom
+ /* Save the system restore address */
+ mov x28, x0
+
+ /* Close the caches and MMU */
+ bl disable_mmu_dcache
+
+ /*
+ * There is nothing important in the caches now,
+ * so invalidate them instead of cleaning.
+ */
+ adr x0, __RW_START__
+ adr x1, __RW_END__
+ sub x1, x1, x0
+ bl inv_dcache_range
+ bl invalidate_tlb_all
+
+ /*
+ * Clean the memory mapping of SRAM
+ * the DDR mapping will remain to enable boot image to execute
+ */
+ bl platform_unmap_sram
+
+ /* Disable the SRAM */
+ bl disable_sram
+
+ /* Disable and invalidate icache */
+ bl disable_icache
+ bl invalidate_icache_all
+
+ mov x0, x28
+ br x0
+endfunc marvell_exit_bootrom
+
+ /*
+ * Enable L2 UniqueClean evictions with data
+ */
+func ca72_l2_enable_unique_clean
+
+ mrs x0, CORTEX_A72_L2ACTLR_EL1
+ orr x0, x0, #CORTEX_A72_L2ACTLR_ENABLE_UNIQUE_CLEAN
+ msr CORTEX_A72_L2ACTLR_EL1, x0
+
+ ret
+endfunc ca72_l2_enable_unique_clean
diff --git a/plat/marvell/common/marvell_bl1_setup.c b/plat/marvell/common/marvell_bl1_setup.c
new file mode 100644
index 0000000..981cfbe
--- /dev/null
+++ b/plat/marvell/common/marvell_bl1_setup.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <bl1.h>
+#include <bl1/bl1_private.h>
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <plat_marvell.h>
+#include <sp805.h>
+
+/* Weak definitions may be overridden in specific Marvell standard platform */
+#pragma weak bl1_early_platform_setup
+#pragma weak bl1_plat_arch_setup
+#pragma weak bl1_platform_setup
+#pragma weak bl1_plat_sec_mem_layout
+
+
+/* Data structure which holds the extents of the RAM for BL1*/
+static meminfo_t bl1_ram_layout;
+
+meminfo_t *bl1_plat_sec_mem_layout(void)
+{
+ return &bl1_ram_layout;
+}
+
+/*
+ * BL1 specific platform actions shared between Marvell standard platforms.
+ */
+void marvell_bl1_early_platform_setup(void)
+{
+ const size_t bl1_size = BL1_RAM_LIMIT - BL1_RAM_BASE;
+
+ /* Initialize the console to provide early debug support */
+ console_init(PLAT_MARVELL_BOOT_UART_BASE,
+ PLAT_MARVELL_BOOT_UART_CLK_IN_HZ,
+ MARVELL_CONSOLE_BAUDRATE);
+
+ /* Allow BL1 to see the whole Trusted RAM */
+ bl1_ram_layout.total_base = MARVELL_BL_RAM_BASE;
+ bl1_ram_layout.total_size = MARVELL_BL_RAM_SIZE;
+
+ /* Calculate how much RAM BL1 is using and how much remains free */
+ bl1_ram_layout.free_base = MARVELL_BL_RAM_BASE;
+ bl1_ram_layout.free_size = MARVELL_BL_RAM_SIZE;
+ reserve_mem(&bl1_ram_layout.free_base,
+ &bl1_ram_layout.free_size,
+ BL1_RAM_BASE,
+ bl1_size);
+}
+
+void bl1_early_platform_setup(void)
+{
+ marvell_bl1_early_platform_setup();
+}
+
+/*
+ * Perform the very early platform specific architecture setup shared between
+ * MARVELL standard platforms. This only does basic initialization. Later
+ * architectural setup (bl1_arch_setup()) does not do anything platform
+ * specific.
+ */
+void marvell_bl1_plat_arch_setup(void)
+{
+ marvell_setup_page_tables(bl1_ram_layout.total_base,
+ bl1_ram_layout.total_size,
+ BL1_RO_BASE,
+ BL1_RO_LIMIT,
+ BL1_RO_DATA_BASE,
+ BL1_RO_DATA_END
+#if USE_COHERENT_MEM
+ , BL_COHERENT_RAM_BASE,
+ BL_COHERENT_RAM_END
+#endif
+ );
+ enable_mmu_el3(0);
+}
+
+void bl1_plat_arch_setup(void)
+{
+ marvell_bl1_plat_arch_setup();
+}
+
+/*
+ * Perform the platform specific architecture setup shared between
+ * MARVELL standard platforms.
+ */
+void marvell_bl1_platform_setup(void)
+{
+ /* Initialise the IO layer and register platform IO devices */
+ plat_marvell_io_setup();
+}
+
+void bl1_platform_setup(void)
+{
+ marvell_bl1_platform_setup();
+}
+
+void bl1_plat_prepare_exit(entry_point_info_t *ep_info)
+{
+#ifdef EL3_PAYLOAD_BASE
+ /*
+ * Program the EL3 payload's entry point address into the CPUs mailbox
+ * in order to release secondary CPUs from their holding pen and make
+ * them jump there.
+ */
+ marvell_program_trusted_mailbox(ep_info->pc);
+ dsbsy();
+ sev();
+#endif
+}
diff --git a/plat/marvell/common/marvell_bl2_setup.c b/plat/marvell/common/marvell_bl2_setup.c
new file mode 100644
index 0000000..7c87ce3
--- /dev/null
+++ b/plat/marvell/common/marvell_bl2_setup.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <arch_helpers.h>
+#include <bl_common.h>
+#include <console.h>
+#include <marvell_def.h>
+#include <platform_def.h>
+#include <plat_marvell.h>
+#include <string.h>
+
+/* Data structure which holds the extents of the trusted SRAM for BL2 */
+static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
+
+
+/*****************************************************************************
+ * This structure represents the superset of information that is passed to
+ * BL31, e.g. while passing control to it from BL2, bl31_params
+ * and other platform specific parameters
+ *****************************************************************************
+ */
+typedef struct bl2_to_bl31_params_mem {
+ bl31_params_t bl31_params;
+ image_info_t bl31_image_info;
+ image_info_t bl32_image_info;
+ image_info_t bl33_image_info;
+ entry_point_info_t bl33_ep_info;
+ entry_point_info_t bl32_ep_info;
+ entry_point_info_t bl31_ep_info;
+} bl2_to_bl31_params_mem_t;
+
+
+static bl2_to_bl31_params_mem_t bl31_params_mem;
+
+
+/* Weak definitions may be overridden in specific MARVELL standard platform */
+#pragma weak bl2_early_platform_setup
+#pragma weak bl2_platform_setup
+#pragma weak bl2_plat_arch_setup
+#pragma weak bl2_plat_sec_mem_layout
+#pragma weak bl2_plat_get_bl31_params
+#pragma weak bl2_plat_get_bl31_ep_info
+#pragma weak bl2_plat_flush_bl31_params
+#pragma weak bl2_plat_set_bl31_ep_info
+#pragma weak bl2_plat_get_scp_bl2_meminfo
+#pragma weak bl2_plat_get_bl32_meminfo
+#pragma weak bl2_plat_set_bl32_ep_info
+#pragma weak bl2_plat_get_bl33_meminfo
+#pragma weak bl2_plat_set_bl33_ep_info
+
+
+meminfo_t *bl2_plat_sec_mem_layout(void)
+{
+ return &bl2_tzram_layout;
+}
+
+/*****************************************************************************
+ * This function assigns a pointer to the memory that the platform has kept
+ * aside to pass platform specific and trusted firmware related information
+ * to BL31. This memory is allocated by allocating memory to
+ * bl2_to_bl31_params_mem_t structure which is a superset of all the
+ * structure whose information is passed to BL31
+ * NOTE: This function should be called only once and should be done
+ * before generating params to BL31
+ *****************************************************************************
+ */
+bl31_params_t *bl2_plat_get_bl31_params(void)
+{
+ bl31_params_t *bl2_to_bl31_params;
+
+ /*
+ * Initialise the memory for all the arguments that needs to
+ * be passed to BL31
+ */
+ memset(&bl31_params_mem, 0, sizeof(bl2_to_bl31_params_mem_t));
+
+ /* Assign memory for TF related information */
+ bl2_to_bl31_params = &bl31_params_mem.bl31_params;
+ SET_PARAM_HEAD(bl2_to_bl31_params, PARAM_BL31, VERSION_1, 0);
+
+ /* Fill BL31 related information */
+ bl2_to_bl31_params->bl31_image_info = &bl31_params_mem.bl31_image_info;
+ SET_PARAM_HEAD(bl2_to_bl31_params->bl31_image_info, PARAM_IMAGE_BINARY,
+ VERSION_1, 0);
+
+ /* Fill BL32 related information if it exists */
+#if BL32_BASE
+ bl2_to_bl31_params->bl32_ep_info = &bl31_params_mem.bl32_ep_info;
+ SET_PARAM_HEAD(bl2_to_bl31_params->bl32_ep_info, PARAM_EP,
+ VERSION_1, 0);
+ bl2_to_bl31_params->bl32_image_info = &bl31_params_mem.bl32_image_info;
+ SET_PARAM_HEAD(bl2_to_bl31_params->bl32_image_info, PARAM_IMAGE_BINARY,
+ VERSION_1, 0);
+#endif
+
+ /* Fill BL33 related information */
+ bl2_to_bl31_params->bl33_ep_info = &bl31_params_mem.bl33_ep_info;
+ SET_PARAM_HEAD(bl2_to_bl31_params->bl33_ep_info,
+ PARAM_EP, VERSION_1, 0);
+
+ /* BL33 expects to receive the primary CPU MPID (through x0) */
+ bl2_to_bl31_params->bl33_ep_info->args.arg0 = 0xffff & read_mpidr();
+
+ bl2_to_bl31_params->bl33_image_info = &bl31_params_mem.bl33_image_info;
+ SET_PARAM_HEAD(bl2_to_bl31_params->bl33_image_info, PARAM_IMAGE_BINARY,
+ VERSION_1, 0);
+
+ return bl2_to_bl31_params;
+}
+
+/* Flush the TF params and the TF plat params */
+void bl2_plat_flush_bl31_params(void)
+{
+ flush_dcache_range((unsigned long)&bl31_params_mem,
+ sizeof(bl2_to_bl31_params_mem_t));
+}
+
+/*****************************************************************************
+ * This function returns a pointer to the shared memory that the platform
+ * has kept to point to entry point information of BL31 to BL2
+ *****************************************************************************
+ */
+struct entry_point_info *bl2_plat_get_bl31_ep_info(void)
+{
+#if DEBUG
+ bl31_params_mem.bl31_ep_info.args.arg1 = MARVELL_BL31_PLAT_PARAM_VAL;
+#endif
+
+ return &bl31_params_mem.bl31_ep_info;
+}
+
+/*****************************************************************************
+ * BL1 has passed the extents of the trusted SRAM that should be visible to BL2
+ * in x0. This memory layout is sitting at the base of the free trusted SRAM.
+ * Copy it to a safe location before its reclaimed by later BL2 functionality.
+ *****************************************************************************
+ */
+void marvell_bl2_early_platform_setup(meminfo_t *mem_layout)
+{
+ /* Initialize the console to provide early debug support */
+ console_init(PLAT_MARVELL_BOOT_UART_BASE,
+ PLAT_MARVELL_BOOT_UART_CLK_IN_HZ,
+ MARVELL_CONSOLE_BAUDRATE);
+
+ /* Setup the BL2 memory layout */
+ bl2_tzram_layout = *mem_layout;
+
+ /* Initialise the IO layer and register platform IO devices */
+ plat_marvell_io_setup();
+}
+
+void bl2_early_platform_setup(meminfo_t *mem_layout)
+{
+ marvell_bl2_early_platform_setup(mem_layout);
+}
+
+void bl2_platform_setup(void)
+{
+ /* Nothing to do */
+}
+
+/*****************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only initializes the mmu in a quick and dirty way.
+ *****************************************************************************
+ */
+void marvell_bl2_plat_arch_setup(void)
+{
+ marvell_setup_page_tables(bl2_tzram_layout.total_base,
+ bl2_tzram_layout.total_size,
+ BL_CODE_BASE,
+ BL_CODE_END,
+ BL_RO_DATA_BASE,
+ BL_RO_DATA_END
+#if USE_COHERENT_MEM
+ , BL_COHERENT_RAM_BASE,
+ BL_COHERENT_RAM_END
+#endif
+ );
+ enable_mmu_el1(0);
+}
+
+void bl2_plat_arch_setup(void)
+{
+ marvell_bl2_plat_arch_setup();
+}
+
+/*****************************************************************************
+ * Populate the extents of memory available for loading SCP_BL2 (if used),
+ * i.e. anywhere in trusted RAM as long as it doesn't overwrite BL2.
+ *****************************************************************************
+ */
+void bl2_plat_get_scp_bl2_meminfo(meminfo_t *scp_bl2_meminfo)
+{
+ *scp_bl2_meminfo = bl2_tzram_layout;
+}
+
+/*****************************************************************************
+ * Before calling this function BL31 is loaded in memory and its entrypoint
+ * is set by load_image. This is a placeholder for the platform to change
+ * the entrypoint of BL31 and set SPSR and security state.
+ * On MARVELL std. platforms we only set the security state of the entrypoint
+ *****************************************************************************
+ */
+void bl2_plat_set_bl31_ep_info(image_info_t *bl31_image_info,
+ entry_point_info_t *bl31_ep_info)
+{
+ SET_SECURITY_STATE(bl31_ep_info->h.attr, SECURE);
+ bl31_ep_info->spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS);
+}
+
+/*****************************************************************************
+ * Populate the extents of memory available for loading BL32
+ *****************************************************************************
+ */
+#ifdef BL32_BASE
+void bl2_plat_get_bl32_meminfo(meminfo_t *bl32_meminfo)
+{
+ /*
+ * Populate the extents of memory available for loading BL32.
+ */
+ bl32_meminfo->total_base = BL32_BASE;
+ bl32_meminfo->free_base = BL32_BASE;
+ bl32_meminfo->total_size =
+ (TRUSTED_DRAM_BASE + TRUSTED_DRAM_SIZE) - BL32_BASE;
+ bl32_meminfo->free_size =
+ (TRUSTED_DRAM_BASE + TRUSTED_DRAM_SIZE) - BL32_BASE;
+}
+#endif
+
+/*****************************************************************************
+ * Before calling this function BL32 is loaded in memory and its entrypoint
+ * is set by load_image. This is a placeholder for the platform to change
+ * the entrypoint of BL32 and set SPSR and security state.
+ * On MARVELL std. platforms we only set the security state of the entrypoint
+ *****************************************************************************
+ */
+void bl2_plat_set_bl32_ep_info(image_info_t *bl32_image_info,
+ entry_point_info_t *bl32_ep_info)
+{
+ SET_SECURITY_STATE(bl32_ep_info->h.attr, SECURE);
+ bl32_ep_info->spsr = marvell_get_spsr_for_bl32_entry();
+}
+
+/*****************************************************************************
+ * Before calling this function BL33 is loaded in memory and its entrypoint
+ * is set by load_image. This is a placeholder for the platform to change
+ * the entrypoint of BL33 and set SPSR and security state.
+ * On MARVELL std. platforms we only set the security state of the entrypoint
+ *****************************************************************************
+ */
+void bl2_plat_set_bl33_ep_info(image_info_t *image,
+ entry_point_info_t *bl33_ep_info)
+{
+
+ SET_SECURITY_STATE(bl33_ep_info->h.attr, NON_SECURE);
+ bl33_ep_info->spsr = marvell_get_spsr_for_bl33_entry();
+}
+
+/*****************************************************************************
+ * Populate the extents of memory available for loading BL33
+ *****************************************************************************
+ */
+void bl2_plat_get_bl33_meminfo(meminfo_t *bl33_meminfo)
+{
+ bl33_meminfo->total_base = MARVELL_DRAM_BASE;
+ bl33_meminfo->total_size = MARVELL_DRAM_SIZE;
+ bl33_meminfo->free_base = MARVELL_DRAM_BASE;
+ bl33_meminfo->free_size = MARVELL_DRAM_SIZE;
+}
diff --git a/plat/marvell/common/marvell_bl31_setup.c b/plat/marvell/common/marvell_bl31_setup.c
new file mode 100644
index 0000000..a74816b
--- /dev/null
+++ b/plat/marvell/common/marvell_bl31_setup.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <arch.h>
+#include <assert.h>
+#include <console.h>
+#include <debug.h>
+#include <marvell_def.h>
+#include <marvell_plat_priv.h>
+#include <plat_marvell.h>
+#include <platform.h>
+
+#ifdef USE_CCI
+#include <cci.h>
+#endif
+
+/*
+ * The next 3 constants identify the extents of the code, RO data region and the
+ * limit of the BL31 image. These addresses are used by the MMU setup code and
+ * therefore they must be page-aligned. It is the responsibility of the linker
+ * script to ensure that __RO_START__, __RO_END__ & __BL31_END__ linker symbols
+ * refer to page-aligned addresses.
+ */
+#define BL31_END (unsigned long)(&__BL31_END__)
+
+/*
+ * Placeholder variables for copying the arguments that have been passed to
+ * BL31 from BL2.
+ */
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak bl31_early_platform_setup
+#pragma weak bl31_platform_setup
+#pragma weak bl31_plat_arch_setup
+#pragma weak bl31_plat_get_next_image_ep_info
+#pragma weak plat_get_syscnt_freq2
+
+/*****************************************************************************
+ * Return a pointer to the 'entry_point_info' structure of the next image for
+ * the security state specified. BL33 corresponds to the non-secure image type
+ * while BL32 corresponds to the secure image type. A NULL pointer is returned
+ * if the image does not exist.
+ *****************************************************************************
+ */
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+ entry_point_info_t *next_image_info;
+
+ assert(sec_state_is_valid(type));
+ next_image_info = (type == NON_SECURE)
+ ? &bl33_image_ep_info : &bl32_image_ep_info;
+
+ return next_image_info;
+}
+
+/*****************************************************************************
+ * Perform any BL31 early platform setup common to ARM standard platforms.
+ * Here is an opportunity to copy parameters passed by the calling EL (S-EL1
+ * in BL2 & S-EL3 in BL1) before they are lost (potentially). This needs to be
+ * done before the MMU is initialized so that the memory layout can be used
+ * while creating page tables. BL2 has flushed this information to memory, so
+ * we are guaranteed to pick up good data.
+ *****************************************************************************
+ */
+void marvell_bl31_early_platform_setup(bl31_params_t *from_bl2,
+ void *plat_params_from_bl2)
+{
+ /* Initialize the console to provide early debug support */
+ console_init(PLAT_MARVELL_BOOT_UART_BASE,
+ PLAT_MARVELL_BOOT_UART_CLK_IN_HZ,
+ MARVELL_CONSOLE_BAUDRATE);
+
+#if RESET_TO_BL31
+ /* There are no parameters from BL2 if BL31 is a reset vector */
+ assert(from_bl2 == NULL);
+ assert(plat_params_from_bl2 == NULL);
+
+#ifdef BL32_BASE
+ /* Populate entry point information for BL32 */
+ SET_PARAM_HEAD(&bl32_image_ep_info,
+ PARAM_EP,
+ VERSION_1,
+ 0);
+ SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
+ bl32_image_ep_info.pc = BL32_BASE;
+ bl32_image_ep_info.spsr = marvell_get_spsr_for_bl32_entry();
+#endif /* BL32_BASE */
+
+ /* Populate entry point information for BL33 */
+ SET_PARAM_HEAD(&bl33_image_ep_info,
+ PARAM_EP,
+ VERSION_1,
+ 0);
+ /*
+ * Tell BL31 where the non-trusted software image
+ * is located and the entry state information
+ */
+ bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
+ bl33_image_ep_info.spsr = marvell_get_spsr_for_bl33_entry();
+ SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+
+#else
+ /*
+ * Check params passed from BL2 should not be NULL,
+ */
+ assert(from_bl2 != NULL);
+ assert(from_bl2->h.type == PARAM_BL31);
+ assert(from_bl2->h.version >= VERSION_1);
+ /*
+ * In debug builds, we pass a special value in 'plat_params_from_bl2'
+ * to verify platform parameters from BL2 to BL31.
+ * In release builds, it's not used.
+ */
+ assert(((unsigned long long)plat_params_from_bl2) ==
+ MARVELL_BL31_PLAT_PARAM_VAL);
+
+ /*
+ * Copy BL32 (if populated by BL2) and BL33 entry point information.
+ * They are stored in Secure RAM, in BL2's address space.
+ */
+ if (from_bl2->bl32_ep_info)
+ bl32_image_ep_info = *from_bl2->bl32_ep_info;
+ bl33_image_ep_info = *from_bl2->bl33_ep_info;
+#endif
+}
+
+void bl31_early_platform_setup(bl31_params_t *from_bl2,
+ void *plat_params_from_bl2)
+{
+ marvell_bl31_early_platform_setup(from_bl2, plat_params_from_bl2);
+
+#ifdef USE_CCI
+ /*
+ * Initialize CCI for this cluster during cold boot.
+ * No need for locks as no other CPU is active.
+ */
+ plat_marvell_interconnect_init();
+
+ /*
+ * Enable CCI coherency for the primary CPU's cluster.
+ * Platform specific PSCI code will enable coherency for other
+ * clusters.
+ */
+ plat_marvell_interconnect_enter_coherency();
+#endif
+}
+
+/*****************************************************************************
+ * Perform any BL31 platform setup common to ARM standard platforms
+ *****************************************************************************
+ */
+void marvell_bl31_platform_setup(void)
+{
+ /* Initialize the GIC driver, cpu and distributor interfaces */
+ plat_marvell_gic_driver_init();
+ plat_marvell_gic_init();
+
+ /* For Armada-8k-plus family, the SoC includes more than
+ * a single AP die, but the default die that boots is AP #0.
+ * For other families there is only one die (#0).
+ * Initialize psci arch from die 0
+ */
+ marvell_psci_arch_init(0);
+}
+
+/*****************************************************************************
+ * Perform any BL31 platform runtime setup prior to BL31 exit common to ARM
+ * standard platforms
+ *****************************************************************************
+ */
+void marvell_bl31_plat_runtime_setup(void)
+{
+ /* Initialize the runtime console */
+ console_init(PLAT_MARVELL_BL31_RUN_UART_BASE,
+ PLAT_MARVELL_BL31_RUN_UART_CLK_IN_HZ,
+ MARVELL_CONSOLE_BAUDRATE);
+}
+
+void bl31_platform_setup(void)
+{
+ marvell_bl31_platform_setup();
+}
+
+void bl31_plat_runtime_setup(void)
+{
+ marvell_bl31_plat_runtime_setup();
+}
+
+/*****************************************************************************
+ * Perform the very early platform specific architectural setup shared between
+ * ARM standard platforms. This only does basic initialization. Later
+ * architectural setup (bl31_arch_setup()) does not do anything platform
+ * specific.
+ *****************************************************************************
+ */
+void marvell_bl31_plat_arch_setup(void)
+{
+ marvell_setup_page_tables(BL31_BASE,
+ BL31_END - BL31_BASE,
+ BL_CODE_BASE,
+ BL_CODE_END,
+ BL_RO_DATA_BASE,
+ BL_RO_DATA_END
+#if USE_COHERENT_MEM
+ , BL_COHERENT_RAM_BASE,
+ BL_COHERENT_RAM_END
+#endif
+ );
+
+#if BL31_CACHE_DISABLE
+ enable_mmu_el3(DISABLE_DCACHE);
+ INFO("Cache is disabled in BL3\n");
+#else
+ enable_mmu_el3(0);
+#endif
+}
+
+void bl31_plat_arch_setup(void)
+{
+ marvell_bl31_plat_arch_setup();
+}
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+ return PLAT_REF_CLK_IN_HZ;
+}
diff --git a/plat/marvell/common/marvell_cci.c b/plat/marvell/common/marvell_cci.c
new file mode 100644
index 0000000..2df4802
--- /dev/null
+++ b/plat/marvell/common/marvell_cci.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <cci.h>
+#include <plat_marvell.h>
+
+static const int cci_map[] = {
+ PLAT_MARVELL_CCI_CLUSTER0_SL_IFACE_IX,
+ PLAT_MARVELL_CCI_CLUSTER1_SL_IFACE_IX
+};
+
+/****************************************************************************
+ * The following functions are defined as weak to allow a platform to override
+ * the way ARM CCI driver is initialised and used.
+ ****************************************************************************
+ */
+#pragma weak plat_marvell_interconnect_init
+#pragma weak plat_marvell_interconnect_enter_coherency
+#pragma weak plat_marvell_interconnect_exit_coherency
+
+
+/****************************************************************************
+ * Helper function to initialize ARM CCI driver.
+ ****************************************************************************
+ */
+void plat_marvell_interconnect_init(void)
+{
+ cci_init(PLAT_MARVELL_CCI_BASE, cci_map, ARRAY_SIZE(cci_map));
+}
+
+/****************************************************************************
+ * Helper function to place current master into coherency
+ ****************************************************************************
+ */
+void plat_marvell_interconnect_enter_coherency(void)
+{
+ cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
+
+/****************************************************************************
+ * Helper function to remove current master from coherency
+ ****************************************************************************
+ */
+void plat_marvell_interconnect_exit_coherency(void)
+{
+ cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
diff --git a/plat/marvell/common/marvell_common.mk b/plat/marvell/common/marvell_common.mk
new file mode 100644
index 0000000..3ee2f3d
--- /dev/null
+++ b/plat/marvell/common/marvell_common.mk
@@ -0,0 +1,67 @@
+# Copyright (C) 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# https://spdx.org/licenses
+
+MARVELL_PLAT_BASE := plat/marvell
+MARVELL_PLAT_INCLUDE_BASE := include/plat/marvell
+
+include $(MARVELL_PLAT_BASE)/version.mk
+include $(MARVELL_PLAT_BASE)/marvell.mk
+
+VERSION_STRING +=(Marvell-${SUBVERSION})
+
+SEPARATE_CODE_AND_RODATA := 1
+
+# flag to switch from PLL to ARO
+ARO_ENABLE := 0
+$(eval $(call add_define,ARO_ENABLE))
+# Enable/Disable LLC
+LLC_ENABLE := 1
+$(eval $(call add_define,LLC_ENABLE))
+
+PLAT_INCLUDES += -I. -Iinclude/common/tbbr \
+ -I$(MARVELL_PLAT_INCLUDE_BASE)/common \
+ -I$(MARVELL_PLAT_INCLUDE_BASE)/common/aarch64
+
+
+PLAT_BL_COMMON_SOURCES += lib/xlat_tables/xlat_tables_common.c \
+ lib/xlat_tables/aarch64/xlat_tables.c \
+ $(MARVELL_PLAT_BASE)/common/aarch64/marvell_common.c \
+ $(MARVELL_PLAT_BASE)/common/aarch64/marvell_helpers.S
+
+BL1_SOURCES += drivers/delay_timer/delay_timer.c \
+ drivers/io/io_fip.c \
+ drivers/io/io_memmap.c \
+ drivers/io/io_storage.c \
+ $(MARVELL_PLAT_BASE)/common/marvell_bl1_setup.c \
+ $(MARVELL_PLAT_BASE)/common/marvell_io_storage.c \
+ $(MARVELL_PLAT_BASE)/common/plat_delay_timer.c
+
+ifdef EL3_PAYLOAD_BASE
+# Need the arm_program_trusted_mailbox() function to release secondary CPUs from
+# their holding pen
+endif
+
+BL2_SOURCES += drivers/io/io_fip.c \
+ drivers/io/io_memmap.c \
+ drivers/io/io_storage.c \
+ $(MARVELL_PLAT_BASE)/common/marvell_bl2_setup.c \
+ $(MARVELL_PLAT_BASE)/common/marvell_io_storage.c
+
+BL31_SOURCES += $(MARVELL_PLAT_BASE)/common/marvell_bl31_setup.c \
+ $(MARVELL_PLAT_BASE)/common/marvell_pm.c \
+ $(MARVELL_PLAT_BASE)/common/marvell_topology.c \
+ plat/common/plat_psci_common.c \
+ $(MARVELL_PLAT_BASE)/common/plat_delay_timer.c \
+ drivers/delay_timer/delay_timer.c
+
+# PSCI functionality
+$(eval $(call add_define,CONFIG_ARM64))
+
+# MSS (SCP) build
+ifeq (${MSS_SUPPORT}, 1)
+include $(MARVELL_PLAT_BASE)/common/mss/mss_common.mk
+endif
+
+fip: mrvl_flash
diff --git a/plat/marvell/common/marvell_ddr_info.c b/plat/marvell/common/marvell_ddr_info.c
new file mode 100644
index 0000000..68bff99
--- /dev/null
+++ b/plat/marvell/common/marvell_ddr_info.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <debug.h>
+#include <platform_def.h>
+#include <ddr_info.h>
+#include <mmio.h>
+
+#define DRAM_CH0_MMAP_LOW_REG(iface, cs, base) \
+ (base + DRAM_CH0_MMAP_LOW_OFFSET + (iface) * 0x10000 + (cs) * 0x8)
+#define DRAM_CH0_MMAP_HIGH_REG(iface, cs, base) \
+ (DRAM_CH0_MMAP_LOW_REG(iface, cs, base) + 4)
+#define DRAM_CS_VALID_ENABLED_MASK 0x1
+#define DRAM_AREA_LENGTH_OFFS 16
+#define DRAM_AREA_LENGTH_MASK (0x1f << DRAM_AREA_LENGTH_OFFS)
+#define DRAM_START_ADDRESS_L_OFFS 23
+#define DRAM_START_ADDRESS_L_MASK \
+ (0x1ff << DRAM_START_ADDRESS_L_OFFS)
+#define DRAM_START_ADDR_HTOL_OFFS 32
+
+#define DRAM_MAX_CS_NUM 2
+
+#define DRAM_CS_ENABLED(iface, cs, base) \
+ (mmio_read_32(DRAM_CH0_MMAP_LOW_REG(iface, cs, base)) & \
+ DRAM_CS_VALID_ENABLED_MASK)
+#define GET_DRAM_REGION_SIZE_CODE(iface, cs, base) \
+ (mmio_read_32(DRAM_CH0_MMAP_LOW_REG(iface, cs, base)) & \
+ DRAM_AREA_LENGTH_MASK) >> DRAM_AREA_LENGTH_OFFS
+
+/* Mapping between DDR area length and real DDR size is specific and looks like
+ * bellow:
+ * 0 => 384 MB
+ * 1 => 768 MB
+ * 2 => 1536 MB
+ * 3 => 3 GB
+ * 4 => 6 GB
+ *
+ * 7 => 8 MB
+ * 8 => 16 MB
+ * 9 => 32 MB
+ * 10 => 64 MB
+ * 11 => 128 MB
+ * 12 => 256 MB
+ * 13 => 512 MB
+ * 14 => 1 GB
+ * 15 => 2 GB
+ * 16 => 4 GB
+ * 17 => 8 GB
+ * 18 => 16 GB
+ * 19 => 32 GB
+ * 20 => 64 GB
+ * 21 => 128 GB
+ * 22 => 256 GB
+ * 23 => 512 GB
+ * 24 => 1 TB
+ * 25 => 2 TB
+ * 26 => 4 TB
+ *
+ * to calculate real size we need to use two different formulas:
+ * -- GET_DRAM_REGION_SIZE_ODD for values 0-4 (DRAM_REGION_SIZE_ODD)
+ * -- GET_DRAM_REGION_SIZE_EVEN for values 7-26 (DRAM_REGION_SIZE_EVEN)
+ * using mentioned formulas we cover whole mapping between "Area length" value
+ * and real size (see above mapping).
+ */
+#define DRAM_REGION_SIZE_EVEN(C) (((C) >= 7) && ((C) <= 26))
+#define GET_DRAM_REGION_SIZE_EVEN(C) ((uint64_t)1 << ((C) + 16))
+#define DRAM_REGION_SIZE_ODD(C) ((C) <= 4)
+#define GET_DRAM_REGION_SIZE_ODD(C) ((uint64_t)0x18000000 << (C))
+
+
+uint64_t mvebu_get_dram_size(uint64_t ap_base_addr)
+{
+ uint64_t mem_size = 0;
+ uint8_t region_code;
+ uint8_t cs, iface;
+
+ for (iface = 0; iface < DRAM_MAX_IFACE; iface++) {
+ for (cs = 0; cs < DRAM_MAX_CS_NUM; cs++) {
+
+ /* Exit loop on first disabled DRAM CS */
+ if (!DRAM_CS_ENABLED(iface, cs, ap_base_addr))
+ break;
+
+ /* Decode area length for current CS
+ * from register value
+ */
+ region_code =
+ GET_DRAM_REGION_SIZE_CODE(iface, cs,
+ ap_base_addr);
+
+ if (DRAM_REGION_SIZE_EVEN(region_code)) {
+ mem_size +=
+ GET_DRAM_REGION_SIZE_EVEN(region_code);
+ } else if (DRAM_REGION_SIZE_ODD(region_code)) {
+ mem_size +=
+ GET_DRAM_REGION_SIZE_ODD(region_code);
+ } else {
+ WARN("%s: Invalid mem region (0x%x) CS#%d\n",
+ __func__, region_code, cs);
+ return 0;
+ }
+ }
+ }
+
+ return mem_size;
+}
diff --git a/plat/marvell/common/marvell_gicv2.c b/plat/marvell/common/marvell_gicv2.c
new file mode 100644
index 0000000..ba8e409
--- /dev/null
+++ b/plat/marvell/common/marvell_gicv2.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <gicv2.h>
+#include <plat_marvell.h>
+#include <platform.h>
+#include <platform_def.h>
+
+/*
+ * The following functions are defined as weak to allow a platform to override
+ * the way the GICv2 driver is initialised and used.
+ */
+#pragma weak plat_marvell_gic_driver_init
+#pragma weak plat_marvell_gic_init
+
+/*
+ * On a GICv2 system, the Group 1 secure interrupts are treated as Group 0
+ * interrupts.
+ */
+static const interrupt_prop_t marvell_interrupt_props[] = {
+ PLAT_MARVELL_G1S_IRQ_PROPS(GICV2_INTR_GROUP0),
+ PLAT_MARVELL_G0_IRQ_PROPS(GICV2_INTR_GROUP0)
+};
+
+static unsigned int target_mask_array[PLATFORM_CORE_COUNT];
+
+/*
+ * Ideally `marvell_gic_data` structure definition should be a `const` but it is
+ * kept as modifiable for overwriting with different GICD and GICC base when
+ * running on FVP with VE memory map.
+ */
+static gicv2_driver_data_t marvell_gic_data = {
+ .gicd_base = PLAT_MARVELL_GICD_BASE,
+ .gicc_base = PLAT_MARVELL_GICC_BASE,
+ .interrupt_props = marvell_interrupt_props,
+ .interrupt_props_num = ARRAY_SIZE(marvell_interrupt_props),
+ .target_masks = target_mask_array,
+ .target_masks_num = ARRAY_SIZE(target_mask_array),
+};
+
+/*
+ * ARM common helper to initialize the GICv2 only driver.
+ */
+void plat_marvell_gic_driver_init(void)
+{
+ gicv2_driver_init(&marvell_gic_data);
+}
+
+void plat_marvell_gic_init(void)
+{
+ gicv2_distif_init();
+ gicv2_pcpu_distif_init();
+ gicv2_set_pe_target_mask(plat_my_core_pos());
+ gicv2_cpuif_enable();
+}
diff --git a/plat/marvell/common/marvell_io_storage.c b/plat/marvell/common/marvell_io_storage.c
new file mode 100644
index 0000000..cb9ece2
--- /dev/null
+++ b/plat/marvell/common/marvell_io_storage.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <assert.h>
+#include <bl_common.h> /* For ARRAY_SIZE */
+#include <debug.h>
+#include <firmware_image_package.h>
+#include <io_driver.h>
+#include <io_fip.h>
+#include <io_memmap.h>
+#include <io_storage.h>
+#include <platform_def.h>
+#include <string.h>
+
+/* IO devices */
+static const io_dev_connector_t *fip_dev_con;
+static uintptr_t fip_dev_handle;
+static const io_dev_connector_t *memmap_dev_con;
+static uintptr_t memmap_dev_handle;
+
+static const io_block_spec_t fip_block_spec = {
+ .offset = PLAT_MARVELL_FIP_BASE,
+ .length = PLAT_MARVELL_FIP_MAX_SIZE
+};
+
+static const io_uuid_spec_t bl2_uuid_spec = {
+ .uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2,
+};
+
+static const io_uuid_spec_t scp_bl2_uuid_spec = {
+ .uuid = UUID_SCP_FIRMWARE_SCP_BL2,
+};
+
+static const io_uuid_spec_t bl31_uuid_spec = {
+ .uuid = UUID_EL3_RUNTIME_FIRMWARE_BL31,
+};
+static const io_uuid_spec_t bl32_uuid_spec = {
+ .uuid = UUID_SECURE_PAYLOAD_BL32,
+};
+static const io_uuid_spec_t bl33_uuid_spec = {
+ .uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
+};
+
+static int open_fip(const uintptr_t spec);
+static int open_memmap(const uintptr_t spec);
+
+struct plat_io_policy {
+ uintptr_t *dev_handle;
+ uintptr_t image_spec;
+ int (*check)(const uintptr_t spec);
+};
+
+/* By default, Marvell platforms load images from the FIP */
+static const struct plat_io_policy policies[] = {
+ [FIP_IMAGE_ID] = {
+ &memmap_dev_handle,
+ (uintptr_t)&fip_block_spec,
+ open_memmap
+ },
+ [BL2_IMAGE_ID] = {
+ &fip_dev_handle,
+ (uintptr_t)&bl2_uuid_spec,
+ open_fip
+ },
+ [SCP_BL2_IMAGE_ID] = {
+ &fip_dev_handle,
+ (uintptr_t)&scp_bl2_uuid_spec,
+ open_fip
+ },
+ [BL31_IMAGE_ID] = {
+ &fip_dev_handle,
+ (uintptr_t)&bl31_uuid_spec,
+ open_fip
+ },
+ [BL32_IMAGE_ID] = {
+ &fip_dev_handle,
+ (uintptr_t)&bl32_uuid_spec,
+ open_fip
+ },
+ [BL33_IMAGE_ID] = {
+ &fip_dev_handle,
+ (uintptr_t)&bl33_uuid_spec,
+ open_fip
+ },
+};
+
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak plat_marvell_io_setup
+#pragma weak plat_marvell_get_alt_image_source
+
+
+static int open_fip(const uintptr_t spec)
+{
+ int result;
+ uintptr_t local_image_handle;
+
+ /* See if a Firmware Image Package is available */
+ result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
+ if (result == 0) {
+ result = io_open(fip_dev_handle, spec, &local_image_handle);
+ if (result == 0) {
+ VERBOSE("Using FIP\n");
+ io_close(local_image_handle);
+ }
+ }
+ return result;
+}
+
+
+static int open_memmap(const uintptr_t spec)
+{
+ int result;
+ uintptr_t local_image_handle;
+
+ result = io_dev_init(memmap_dev_handle, (uintptr_t)NULL);
+ if (result == 0) {
+ result = io_open(memmap_dev_handle, spec, &local_image_handle);
+ if (result == 0) {
+ VERBOSE("Using Memmap\n");
+ io_close(local_image_handle);
+ }
+ }
+ return result;
+}
+
+
+void marvell_io_setup(void)
+{
+ int io_result;
+
+ io_result = register_io_dev_fip(&fip_dev_con);
+ assert(io_result == 0);
+
+ io_result = register_io_dev_memmap(&memmap_dev_con);
+ assert(io_result == 0);
+
+ /* Open connections to devices and cache the handles */
+ io_result = io_dev_open(fip_dev_con, (uintptr_t)NULL,
+ &fip_dev_handle);
+ assert(io_result == 0);
+
+ io_result = io_dev_open(memmap_dev_con, (uintptr_t)NULL,
+ &memmap_dev_handle);
+ assert(io_result == 0);
+
+ /* Ignore improbable errors in release builds */
+ (void)io_result;
+}
+
+void plat_marvell_io_setup(void)
+{
+ marvell_io_setup();
+}
+
+int plat_marvell_get_alt_image_source(
+ unsigned int image_id __attribute__((unused)),
+ uintptr_t *dev_handle __attribute__((unused)),
+ uintptr_t *image_spec __attribute__((unused)))
+{
+ /* By default do not try an alternative */
+ return -ENOENT;
+}
+
+/*
+ * Return an IO device handle and specification which can be used to access
+ * an image. Use this to enforce platform load policy
+ */
+int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
+ uintptr_t *image_spec)
+{
+ int result;
+ const struct plat_io_policy *policy;
+
+ assert(image_id < ARRAY_SIZE(policies));
+
+ policy = &policies[image_id];
+ result = policy->check(policy->image_spec);
+ if (result == 0) {
+ *image_spec = policy->image_spec;
+ *dev_handle = *(policy->dev_handle);
+ } else {
+ VERBOSE("Trying alternative IO\n");
+ result = plat_marvell_get_alt_image_source(image_id, dev_handle,
+ image_spec);
+ }
+
+ return result;
+}
+
+/*
+ * See if a Firmware Image Package is available,
+ * by checking if TOC is valid or not.
+ */
+int marvell_io_is_toc_valid(void)
+{
+ int result;
+
+ result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
+
+ return result == 0;
+}
diff --git a/plat/marvell/common/marvell_pm.c b/plat/marvell/common/marvell_pm.c
new file mode 100644
index 0000000..2a75790
--- /dev/null
+++ b/plat/marvell/common/marvell_pm.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <psci.h>
+#include <marvell_pm.h>
+
+/* Standard ARM platforms are expected to export plat_arm_psci_pm_ops */
+extern const plat_psci_ops_t plat_arm_psci_pm_ops;
+
+/*****************************************************************************
+ * Private function to program the mailbox for a cpu before it is released
+ * from reset. This function assumes that the mail box base is within
+ * the MARVELL_SHARED_RAM region
+ *****************************************************************************
+ */
+void marvell_program_mailbox(uintptr_t address)
+{
+ uintptr_t *mailbox = (void *)PLAT_MARVELL_MAILBOX_BASE;
+
+ /*
+ * Ensure that the PLAT_MARVELL_MAILBOX_BASE is within
+ * MARVELL_SHARED_RAM region.
+ */
+ assert((PLAT_MARVELL_MAILBOX_BASE >= MARVELL_SHARED_RAM_BASE) &&
+ ((PLAT_MARVELL_MAILBOX_BASE + sizeof(*mailbox)) <=
+ (MARVELL_SHARED_RAM_BASE + MARVELL_SHARED_RAM_SIZE)));
+
+ mailbox[MBOX_IDX_MAGIC] = MVEBU_MAILBOX_MAGIC_NUM;
+ mailbox[MBOX_IDX_SEC_ADDR] = address;
+
+ /* Flush data cache if the mail box shared RAM is cached */
+#if PLAT_MARVELL_SHARED_RAM_CACHED
+ flush_dcache_range((uintptr_t)PLAT_MARVELL_MAILBOX_BASE +
+ 8 * MBOX_IDX_MAGIC,
+ 2 * sizeof(uint64_t));
+#endif
+}
+
+/*****************************************************************************
+ * The ARM Standard platform definition of platform porting API
+ * `plat_setup_psci_ops`.
+ *****************************************************************************
+ */
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+ const plat_psci_ops_t **psci_ops)
+{
+ *psci_ops = &plat_arm_psci_pm_ops;
+
+ /* Setup mailbox with entry point. */
+ marvell_program_mailbox(sec_entrypoint);
+ return 0;
+}
diff --git a/plat/marvell/common/marvell_topology.c b/plat/marvell/common/marvell_topology.c
new file mode 100644
index 0000000..a40ff6f
--- /dev/null
+++ b/plat/marvell/common/marvell_topology.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <plat_marvell.h>
+
+/* The power domain tree descriptor */
+unsigned char marvell_power_domain_tree_desc[PLAT_MARVELL_CLUSTER_COUNT + 1];
+
+/*****************************************************************************
+ * This function dynamically constructs the topology according to
+ * PLAT_MARVELL_CLUSTER_COUNT and returns it.
+ *****************************************************************************
+ */
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+ int i;
+
+ /*
+ * The power domain tree does not have a single system level power
+ * domain i.e. a single root node. The first entry in the power domain
+ * descriptor specifies the number of power domains at the highest power
+ * level.
+ * For Marvell Platform this is the number of cluster power domains.
+ */
+ marvell_power_domain_tree_desc[0] = PLAT_MARVELL_CLUSTER_COUNT;
+
+ for (i = 0; i < PLAT_MARVELL_CLUSTER_COUNT; i++)
+ marvell_power_domain_tree_desc[i + 1] =
+ PLAT_MARVELL_CLUSTER_CORE_COUNT;
+
+ return marvell_power_domain_tree_desc;
+}
+
+/*****************************************************************************
+ * This function validates an MPIDR by checking whether it falls within the
+ * acceptable bounds. An error code (-1) is returned if an incorrect mpidr
+ * is passed.
+ *****************************************************************************
+ */
+int marvell_check_mpidr(u_register_t mpidr)
+{
+ unsigned int nb_id, cluster_id, cpu_id;
+
+ mpidr &= MPIDR_AFFINITY_MASK;
+
+ if (mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK |
+ MPIDR_AFFLVL_MASK << MPIDR_AFF2_SHIFT))
+ return -1;
+
+ /* Get north bridge ID */
+ nb_id = MPIDR_AFFLVL3_VAL(mpidr);
+ cluster_id = MPIDR_AFFLVL1_VAL(mpidr);
+ cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
+
+ if (nb_id >= PLAT_MARVELL_CLUSTER_COUNT)
+ return -1;
+
+ if (cluster_id >= PLAT_MARVELL_CLUSTER_COUNT)
+ return -1;
+
+ if (cpu_id >= PLAT_MARVELL_CLUSTER_CORE_COUNT)
+ return -1;
+
+ return 0;
+}
+
+/*****************************************************************************
+ * This function implements a part of the critical interface between the PSCI
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is returned
+ * in case the MPIDR is invalid.
+ *****************************************************************************
+ */
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+ if (marvell_check_mpidr(mpidr) == -1)
+ return -1;
+
+ return plat_marvell_calc_core_pos(mpidr);
+}
diff --git a/plat/marvell/common/mrvl_sip_svc.c b/plat/marvell/common/mrvl_sip_svc.c
new file mode 100644
index 0000000..ec293af
--- /dev/null
+++ b/plat/marvell/common/mrvl_sip_svc.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <ap_setup.h>
+#include <cache_llc.h>
+#include <debug.h>
+#include <marvell_plat_priv.h>
+#include <runtime_svc.h>
+#include <smcc.h>
+#include "comphy/phy-comphy-cp110.h"
+
+/* #define DEBUG_COMPHY */
+#ifdef DEBUG_COMPHY
+#define debug(format...) NOTICE(format)
+#else
+#define debug(format, arg...)
+#endif
+
+/* Comphy related FID's */
+#define MV_SIP_COMPHY_POWER_ON 0x82000001
+#define MV_SIP_COMPHY_POWER_OFF 0x82000002
+#define MV_SIP_COMPHY_PLL_LOCK 0x82000003
+#define MV_SIP_COMPHY_XFI_TRAIN 0x82000004
+#define MV_SIP_COMPHY_DIG_RESET 0x82000005
+
+/* Miscellaneous FID's' */
+#define MV_SIP_DRAM_SIZE 0x82000010
+#define MV_SIP_LLC_ENABLE 0x82000011
+
+#define MAX_LANE_NR 6
+#define MVEBU_COMPHY_OFFSET 0x441000
+#define MVEBU_SD_OFFSET 0x120000
+
+/* This macro is used to identify COMPHY related calls from SMC function ID */
+#define is_comphy_fid(fid) \
+ ((fid) >= MV_SIP_COMPHY_POWER_ON && (fid) <= MV_SIP_COMPHY_DIG_RESET)
+
+
+uintptr_t mrvl_sip_smc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ u_register_t ret;
+ int i;
+
+ debug("%s: got SMC (0x%x) x1 0x%lx, x2 0x%lx, x3 0x%lx\n",
+ __func__, smc_fid, x1, x2, x3);
+ if (is_comphy_fid(smc_fid)) {
+
+ /* some systems passes SD phys address instead of COMPHY phys
+ * address - convert it
+ */
+ if (x1 & MVEBU_SD_OFFSET)
+ x1 = (x1 & ~0xffffff) + MVEBU_COMPHY_OFFSET;
+
+ if ((x1 & 0xffffff) != MVEBU_COMPHY_OFFSET) {
+ ERROR("%s: Wrong smc (0x%x) address: %lx\n",
+ __func__, smc_fid, x1);
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ if (x2 >= MAX_LANE_NR) {
+ ERROR("%s: Wrong smc (0x%x) lane nr: %lx\n",
+ __func__, smc_fid, x2);
+ SMC_RET1(handle, SMC_UNK);
+ }
+ }
+
+ switch (smc_fid) {
+
+ /* Comphy related FID's */
+ case MV_SIP_COMPHY_POWER_ON:
+ /* x1: comphy_base, x2: comphy_index, x3: comphy_mode */
+ ret = mvebu_cp110_comphy_power_on(x1, x2, x3);
+ SMC_RET1(handle, ret);
+ case MV_SIP_COMPHY_POWER_OFF:
+ /* x1: comphy_base, x2: comphy_index */
+ ret = mvebu_cp110_comphy_power_off(x1, x2);
+ SMC_RET1(handle, ret);
+ case MV_SIP_COMPHY_PLL_LOCK:
+ /* x1: comphy_base, x2: comphy_index */
+ ret = mvebu_cp110_comphy_is_pll_locked(x1, x2);
+ SMC_RET1(handle, ret);
+ case MV_SIP_COMPHY_XFI_TRAIN:
+ /* x1: comphy_base, x2: comphy_index */
+ ret = mvebu_cp110_comphy_xfi_rx_training(x1, x2);
+ SMC_RET1(handle, ret);
+ case MV_SIP_COMPHY_DIG_RESET:
+ /* x1: comphy_base, x2: comphy_index, x3: mode, x4: command */
+ ret = mvebu_cp110_comphy_digital_reset(x1, x2, x3, x4);
+ SMC_RET1(handle, ret);
+
+ /* Miscellaneous FID's' */
+ case MV_SIP_DRAM_SIZE:
+ /* x1: ap_base_addr */
+ ret = mvebu_get_dram_size(x1);
+ SMC_RET1(handle, ret);
+ case MV_SIP_LLC_ENABLE:
+ for (i = 0; i < ap_get_count(); i++)
+ llc_runtime_enable(i);
+
+ SMC_RET1(handle, 0);
+
+ default:
+ ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+ }
+}
+
+/* Define a runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+ marvell_sip_svc,
+ OEN_SIP_START,
+ OEN_SIP_END,
+ SMC_TYPE_FAST,
+ NULL,
+ mrvl_sip_smc_handler
+);
diff --git a/plat/marvell/common/mss/mss_common.mk b/plat/marvell/common/mss/mss_common.mk
new file mode 100644
index 0000000..898b6dc
--- /dev/null
+++ b/plat/marvell/common/mss/mss_common.mk
@@ -0,0 +1,20 @@
+#
+# Copyright (C) 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# https://spdx.org/licenses
+#
+
+
+PLAT_MARVELL := plat/marvell
+MSS_SOURCE := $(PLAT_MARVELL)/common/mss
+
+BL2_SOURCES += $(MSS_SOURCE)/mss_scp_bootloader.c \
+ $(PLAT_MARVELL)/common/plat_delay_timer.c \
+ drivers/delay_timer/delay_timer.c \
+ $(MARVELL_DRV) \
+ $(PLAT_FAMILY_BASE)/$(PLAT)/board/marvell_plat_config.c
+
+BL31_SOURCES += $(MSS_SOURCE)/mss_ipc_drv.c
+
+PLAT_INCLUDES += -I$(MSS_SOURCE)
diff --git a/plat/marvell/common/mss/mss_ipc_drv.c b/plat/marvell/common/mss/mss_ipc_drv.c
new file mode 100644
index 0000000..731c315
--- /dev/null
+++ b/plat/marvell/common/mss/mss_ipc_drv.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <plat_marvell.h>
+#include <debug.h>
+#include <string.h>
+#include <mss_ipc_drv.h>
+#include <mmio.h>
+
+#define IPC_MSG_BASE_MASK MVEBU_REGS_BASE_MASK
+
+#define IPC_CH_NUM_OF_MSG (16)
+#define IPC_CH_MSG_IDX (-1)
+
+unsigned long mv_pm_ipc_msg_base;
+unsigned int mv_pm_ipc_queue_size;
+
+unsigned int msg_sync;
+int msg_index = IPC_CH_MSG_IDX;
+
+/******************************************************************************
+ * mss_pm_ipc_init
+ *
+ * DESCRIPTION: Initialize PM IPC infrastructure
+ ******************************************************************************
+ */
+int mv_pm_ipc_init(unsigned long ipc_control_addr)
+{
+ struct mss_pm_ipc_ctrl *ipc_control =
+ (struct mss_pm_ipc_ctrl *)ipc_control_addr;
+
+ /* Initialize PM IPC control block */
+ mv_pm_ipc_msg_base = ipc_control->msg_base_address |
+ IPC_MSG_BASE_MASK;
+ mv_pm_ipc_queue_size = ipc_control->queue_size;
+
+ return 0;
+}
+
+/******************************************************************************
+ * mv_pm_ipc_queue_addr_get
+ *
+ * DESCRIPTION: Returns the IPC queue address
+ ******************************************************************************
+ */
+unsigned int mv_pm_ipc_queue_addr_get(void)
+{
+ unsigned int addr;
+
+ inv_dcache_range((uint64_t)&msg_index, sizeof(msg_index));
+ msg_index = msg_index + 1;
+ if (msg_index >= IPC_CH_NUM_OF_MSG)
+ msg_index = 0;
+
+ addr = (unsigned int)(mv_pm_ipc_msg_base +
+ (msg_index * mv_pm_ipc_queue_size));
+
+ flush_dcache_range((uint64_t)&msg_index, sizeof(msg_index));
+
+ return addr;
+}
+
+/******************************************************************************
+ * mv_pm_ipc_msg_rx
+ *
+ * DESCRIPTION: Retrieve message from IPC channel
+ ******************************************************************************
+ */
+int mv_pm_ipc_msg_rx(unsigned int channel_id, struct mss_pm_ipc_msg *msg)
+{
+ unsigned int addr = mv_pm_ipc_queue_addr_get();
+
+ msg->msg_reply = mmio_read_32(addr + IPC_MSG_REPLY_LOC);
+
+ return 0;
+}
+
+/******************************************************************************
+ * mv_pm_ipc_msg_tx
+ *
+ * DESCRIPTION: Send message via IPC channel
+ ******************************************************************************
+ */
+int mv_pm_ipc_msg_tx(unsigned int channel_id, unsigned int msg_id,
+ unsigned int cluster_power_state)
+{
+ unsigned int addr = mv_pm_ipc_queue_addr_get();
+
+ /* Validate the entry for message placed by the host is free */
+ if (mmio_read_32(addr + IPC_MSG_STATE_LOC) == IPC_MSG_FREE) {
+ inv_dcache_range((uint64_t)&msg_sync, sizeof(msg_sync));
+ msg_sync = msg_sync + 1;
+ flush_dcache_range((uint64_t)&msg_sync, sizeof(msg_sync));
+
+ mmio_write_32(addr + IPC_MSG_SYNC_ID_LOC, msg_sync);
+ mmio_write_32(addr + IPC_MSG_ID_LOC, msg_id);
+ mmio_write_32(addr + IPC_MSG_CPU_ID_LOC, channel_id);
+ mmio_write_32(addr + IPC_MSG_POWER_STATE_LOC,
+ cluster_power_state);
+ mmio_write_32(addr + IPC_MSG_STATE_LOC, IPC_MSG_OCCUPY);
+
+ } else {
+ ERROR("%s: FAILED\n", __func__);
+ }
+
+ return 0;
+}
diff --git a/plat/marvell/common/mss/mss_ipc_drv.h b/plat/marvell/common/mss/mss_ipc_drv.h
new file mode 100644
index 0000000..28eb907
--- /dev/null
+++ b/plat/marvell/common/mss/mss_ipc_drv.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __PM_IPC_DRV_H
+#define __PM_IPC_DRV_H
+
+#include <psci.h>
+
+#define MV_PM_FW_IPC_VERSION_MAGIC (0xCA530000) /* Do NOT change */
+/* Increament for each version */
+#define MV_PM_FW_IPC_VERSION_SEQ (0x00000001)
+#define MV_PM_FW_IPC_VERSION (MV_PM_FW_IPC_VERSION_MAGIC | \
+ MV_PM_FW_IPC_VERSION_SEQ)
+
+#define IPC_MSG_STATE_LOC (0x0)
+#define IPC_MSG_SYNC_ID_LOC (0x4)
+#define IPC_MSG_ID_LOC (0x8)
+#define IPC_MSG_RET_CH_ID_LOC (0xC)
+#define IPC_MSG_CPU_ID_LOC (0x10)
+#define IPC_MSG_CLUSTER_ID_LOC (0x14)
+#define IPC_MSG_SYSTEM_ID_LOC (0x18)
+#define IPC_MSG_POWER_STATE_LOC (0x1C)
+#define IPC_MSG_REPLY_LOC (0x20)
+#define IPC_MSG_RESERVED_LOC (0x24)
+
+/* IPC initialization state */
+enum mss_pm_ipc_init_state {
+ IPC_UN_INITIALIZED = 1,
+ IPC_INITIALIZED = 2
+};
+
+/* IPC queue direction */
+enum mss_pm_ipc_init_msg_dir {
+ IPC_MSG_TX = 0,
+ IPC_MSG_RX = 1
+};
+
+/* IPC message state */
+enum mss_pm_ipc_msg_state {
+ IPC_MSG_FREE = 1,
+ IPC_MSG_OCCUPY = 2
+
+};
+
+/* IPC control block */
+struct mss_pm_ipc_ctrl {
+ unsigned int ctrl_base_address;
+ unsigned int msg_base_address;
+ unsigned int num_of_channels;
+ unsigned int channel_size;
+ unsigned int queue_size;
+};
+
+/* IPC message types */
+enum mss_pm_msg_id {
+ PM_IPC_MSG_CPU_SUSPEND = 1,
+ PM_IPC_MSG_CPU_OFF = 2,
+ PM_IPC_MSG_CPU_ON = 3,
+ PM_IPC_MSG_SYSTEM_RESET = 4,
+ PM_IPC_MSG_SYSTEM_SUSPEND = 5,
+ PM_IPC_MAX_MSG
+};
+
+struct mss_pm_ipc_msg {
+ unsigned int msg_sync_id; /*
+ * Sync number, validate message
+ * reply corresponding to message
+ * received
+ */
+ unsigned int msg_id; /* Message Id */
+ unsigned int ret_channel_id; /* IPC channel reply */
+ unsigned int cpu_id; /* CPU Id */
+ unsigned int cluster_id; /* Cluster Id */
+ unsigned int system_id; /* System Id */
+ unsigned int power_state;
+ unsigned int msg_reply; /* Message reply */
+};
+
+/* IPC queue */
+struct mss_pm_ipc_queue {
+ unsigned int state;
+ struct mss_pm_ipc_msg msg;
+};
+
+/* IPC channel */
+struct mss_pm_ipc_ch {
+ struct mss_pm_ipc_queue *tx_queue;
+ struct mss_pm_ipc_queue *rx_queue;
+};
+
+/*****************************************************************************
+ * mv_pm_ipc_init
+ *
+ * DESCRIPTION: Initialize PM IPC infrastructure
+ *****************************************************************************
+ */
+int mv_pm_ipc_init(unsigned long ipc_control_addr);
+
+/*****************************************************************************
+ * mv_pm_ipc_msg_rx
+ *
+ * DESCRIPTION: Retrieve message from IPC channel
+ *****************************************************************************
+ */
+int mv_pm_ipc_msg_rx(unsigned int channel_id, struct mss_pm_ipc_msg *msg);
+
+/*****************************************************************************
+ * mv_pm_ipc_msg_tx
+ *
+ * DESCRIPTION: Send message via IPC channel
+ *****************************************************************************
+ */
+int mv_pm_ipc_msg_tx(unsigned int channel_id, unsigned int msg_id,
+ unsigned int cluster_power_state);
+
+#endif /* __PM_IPC_DRV_H */
diff --git a/plat/marvell/common/mss/mss_mem.h b/plat/marvell/common/mss/mss_mem.h
new file mode 100644
index 0000000..efff59e
--- /dev/null
+++ b/plat/marvell/common/mss/mss_mem.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __MSS_PM_MEM_H
+#define __MSS_PM_MEM_H
+
+/* MSS SRAM Memory base */
+#define MSS_SRAM_PM_CONTROL_BASE (MVEBU_REGS_BASE + 0x520000)
+
+enum mss_pm_ctrl_handshake {
+ MSS_UN_INITIALIZED = 0,
+ MSS_COMPATIBILITY_ERROR = 1,
+ MSS_ACKNOWLEDGMENT = 2,
+ HOST_ACKNOWLEDGMENT = 3
+};
+
+enum mss_pm_ctrl_rtos_env {
+ MSS_MULTI_PROCESS_ENV = 0,
+ MSS_SINGLE_PROCESS_ENV = 1,
+ MSS_MAX_PROCESS_ENV
+};
+
+struct mss_pm_ctrl_block {
+ /* This field is used to synchronize the Host
+ * and MSS initialization sequence
+ * Valid Values
+ * 0 - Un-Initialized
+ * 1 - Compatibility Error
+ * 2 - MSS Acknowledgment
+ * 3 - Host Acknowledgment
+ */
+ unsigned int handshake;
+
+ /*
+ * This field include Host IPC version. Once received by the MSS
+ * It will be compared to MSS IPC version and set MSS Acknowledge to
+ * "compatibility error" in case there is no match
+ */
+ unsigned int ipc_version;
+ unsigned int ipc_base_address;
+ unsigned int ipc_state;
+
+ /* Following fields defines firmware core architecture */
+ unsigned int num_of_cores;
+ unsigned int num_of_clusters;
+ unsigned int num_of_cores_per_cluster;
+
+ /* Following fields define pm trace debug base address */
+ unsigned int pm_trace_ctrl_base_address;
+ unsigned int pm_trace_info_base_address;
+ unsigned int pm_trace_info_core_size;
+
+ unsigned int ctrl_blk_size;
+};
+
+#endif /* __MSS_PM_MEM_H */
diff --git a/plat/marvell/common/mss/mss_scp_bl2_format.h b/plat/marvell/common/mss/mss_scp_bl2_format.h
new file mode 100644
index 0000000..c04df72
--- /dev/null
+++ b/plat/marvell/common/mss/mss_scp_bl2_format.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __MSS_SCP_BL2_FORMAT_H
+#define __MSS_SCP_BL2_FORMAT_H
+
+#define MAX_NR_OF_FILES 5
+#define FILE_MAGIC 0xddd01ff
+#define HEADER_VERSION 0x1
+
+#define MSS_IDRAM_SIZE 0x10000 /* 64KB */
+#define MG_SRAM_SIZE 0x20000 /* 128KB */
+
+/* Types definitions */
+typedef struct file_header {
+ /* Magic specific for concatenated file (used for validation) */
+ uint32_t magic;
+ uint32_t nr_of_imgs; /* Number of images concatenated */
+} file_header_t;
+
+/* Types definitions */
+enum cm3_t {
+ MSS_AP,
+ MSS_CP0,
+ MSS_CP1,
+ MSS_CP2,
+ MSS_CP3,
+ MG_CP0,
+ MG_CP1,
+};
+
+typedef struct img_header {
+ uint32_t type; /* CM3 type, can be one of cm3_t */
+ uint32_t length; /* Image length */
+ uint32_t version; /* For sanity checks and future
+ * extended functionality
+ */
+} img_header_t;
+
+#endif /* __MSS_SCP_BL2_FORMAT_H */
diff --git a/plat/marvell/common/mss/mss_scp_bootloader.c b/plat/marvell/common/mss/mss_scp_bootloader.c
new file mode 100644
index 0000000..334fcfc
--- /dev/null
+++ b/plat/marvell/common/mss/mss_scp_bootloader.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <mmio.h>
+#include <arch_helpers.h> /* for cache maintanance operations */
+#include <platform_def.h>
+#include <delay_timer.h>
+
+#include <plat_pm_trace.h>
+#include <mss_scp_bootloader.h>
+#include <mss_ipc_drv.h>
+#include <mss_mem.h>
+#include <mss_scp_bl2_format.h>
+
+#define MSS_DMA_SRCBR(base) (base + 0xC0)
+#define MSS_DMA_DSTBR(base) (base + 0xC4)
+#define MSS_DMA_CTRLR(base) (base + 0xC8)
+#define MSS_M3_RSTCR(base) (base + 0xFC)
+
+#define MSS_DMA_CTRLR_SIZE_OFFSET (0)
+#define MSS_DMA_CTRLR_REQ_OFFSET (15)
+#define MSS_DMA_CTRLR_REQ_SET (1)
+#define MSS_DMA_CTRLR_ACK_OFFSET (12)
+#define MSS_DMA_CTRLR_ACK_MASK (0x1)
+#define MSS_DMA_CTRLR_ACK_READY (1)
+#define MSS_M3_RSTCR_RST_OFFSET (0)
+#define MSS_M3_RSTCR_RST_OFF (1)
+
+#define MSS_DMA_TIMEOUT 1000
+#define MSS_EXTERNAL_SPACE 0x50000000
+#define MSS_EXTERNAL_ADDR_MASK 0xfffffff
+
+#define DMA_SIZE 128
+
+#define MSS_HANDSHAKE_TIMEOUT 50
+
+static int mss_check_image_ready(volatile struct mss_pm_ctrl_block *mss_pm_crtl)
+{
+ int timeout = MSS_HANDSHAKE_TIMEOUT;
+
+ /* Wait for SCP to signal it's ready */
+ while ((mss_pm_crtl->handshake != MSS_ACKNOWLEDGMENT) &&
+ (timeout-- > 0))
+ mdelay(1);
+
+ if (mss_pm_crtl->handshake != MSS_ACKNOWLEDGMENT)
+ return -1;
+
+ mss_pm_crtl->handshake = HOST_ACKNOWLEDGMENT;
+
+ return 0;
+}
+
+static int mss_image_load(uint32_t src_addr, uint32_t size, uintptr_t mss_regs)
+{
+ uint32_t i, loop_num, timeout;
+
+ /* Check if the img size is not bigger than ID-RAM size of MSS CM3 */
+ if (size > MSS_IDRAM_SIZE) {
+ ERROR("image is too big to fit into MSS CM3 memory\n");
+ return 1;
+ }
+
+ NOTICE("Loading MSS image from addr. 0x%x Size 0x%x to MSS at 0x%lx\n",
+ src_addr, size, mss_regs);
+ /* load image to MSS RAM using DMA */
+ loop_num = (size / DMA_SIZE) + (((size & (DMA_SIZE - 1)) == 0) ? 0 : 1);
+
+ for (i = 0; i < loop_num; i++) {
+ /* write destination and source addresses */
+ mmio_write_32(MSS_DMA_SRCBR(mss_regs),
+ MSS_EXTERNAL_SPACE |
+ ((src_addr & MSS_EXTERNAL_ADDR_MASK) +
+ (i * DMA_SIZE)));
+ mmio_write_32(MSS_DMA_DSTBR(mss_regs), (i * DMA_SIZE));
+
+ dsb(); /* make sure DMA data is ready before triggering it */
+
+ /* set the DMA control register */
+ mmio_write_32(MSS_DMA_CTRLR(mss_regs), ((MSS_DMA_CTRLR_REQ_SET
+ << MSS_DMA_CTRLR_REQ_OFFSET) |
+ (DMA_SIZE << MSS_DMA_CTRLR_SIZE_OFFSET)));
+
+ /* Poll DMA_ACK at MSS_DMACTLR until it is ready */
+ timeout = MSS_DMA_TIMEOUT;
+ while (timeout) {
+ if ((mmio_read_32(MSS_DMA_CTRLR(mss_regs)) >>
+ MSS_DMA_CTRLR_ACK_OFFSET & MSS_DMA_CTRLR_ACK_MASK)
+ == MSS_DMA_CTRLR_ACK_READY) {
+ break;
+ }
+
+ udelay(50);
+ timeout--;
+ }
+
+ if (timeout == 0) {
+ ERROR("\nDMA failed to load MSS image\n");
+ return 1;
+ }
+ }
+
+ bl2_plat_configure_mss_windows(mss_regs);
+
+ /* Release M3 from reset */
+ mmio_write_32(MSS_M3_RSTCR(mss_regs), (MSS_M3_RSTCR_RST_OFF <<
+ MSS_M3_RSTCR_RST_OFFSET));
+
+ NOTICE("Done\n");
+
+ return 0;
+}
+
+/* Load image to MSS AP and do PM related initialization
+ * Note that this routine is different than other CM3 loading routines, because
+ * firmware for AP is dedicated for PM and therefore some additional PM
+ * initialization is required
+ */
+static int mss_ap_load_image(uintptr_t single_img,
+ uint32_t image_size, uint32_t ap_idx)
+{
+ volatile struct mss_pm_ctrl_block *mss_pm_crtl;
+ int ret;
+
+ /* TODO: add PM Control Info from platform */
+ mss_pm_crtl = (struct mss_pm_ctrl_block *)MSS_SRAM_PM_CONTROL_BASE;
+ mss_pm_crtl->ipc_version = MV_PM_FW_IPC_VERSION;
+ mss_pm_crtl->num_of_clusters = PLAT_MARVELL_CLUSTER_COUNT;
+ mss_pm_crtl->num_of_cores_per_cluster =
+ PLAT_MARVELL_CLUSTER_CORE_COUNT;
+ mss_pm_crtl->num_of_cores = PLAT_MARVELL_CLUSTER_COUNT *
+ PLAT_MARVELL_CLUSTER_CORE_COUNT;
+ mss_pm_crtl->pm_trace_ctrl_base_address = AP_MSS_ATF_CORE_CTRL_BASE;
+ mss_pm_crtl->pm_trace_info_base_address = AP_MSS_ATF_CORE_INFO_BASE;
+ mss_pm_crtl->pm_trace_info_core_size = AP_MSS_ATF_CORE_INFO_SIZE;
+ VERBOSE("MSS Control Block = 0x%x\n", MSS_SRAM_PM_CONTROL_BASE);
+ VERBOSE("mss_pm_crtl->ipc_version = 0x%x\n",
+ mss_pm_crtl->ipc_version);
+ VERBOSE("mss_pm_crtl->num_of_cores = 0x%x\n",
+ mss_pm_crtl->num_of_cores);
+ VERBOSE("mss_pm_crtl->num_of_clusters = 0x%x\n",
+ mss_pm_crtl->num_of_clusters);
+ VERBOSE("mss_pm_crtl->num_of_cores_per_cluster = 0x%x\n",
+ mss_pm_crtl->num_of_cores_per_cluster);
+ VERBOSE("mss_pm_crtl->pm_trace_ctrl_base_address = 0x%x\n",
+ mss_pm_crtl->pm_trace_ctrl_base_address);
+ VERBOSE("mss_pm_crtl->pm_trace_info_base_address = 0x%x\n",
+ mss_pm_crtl->pm_trace_info_base_address);
+ VERBOSE("mss_pm_crtl->pm_trace_info_core_size = 0x%x\n",
+ mss_pm_crtl->pm_trace_info_core_size);
+
+ /* TODO: add checksum to image */
+ VERBOSE("Send info about the SCP_BL2 image to be transferred to SCP\n");
+
+ ret = mss_image_load(single_img, image_size,
+ bl2_plat_get_ap_mss_regs(ap_idx));
+ if (ret != 0) {
+ ERROR("SCP Image load failed\n");
+ return -1;
+ }
+
+ /* check that the image was loaded successfully */
+ ret = mss_check_image_ready(mss_pm_crtl);
+ if (ret != 0)
+ NOTICE("SCP Image doesn't contain PM firmware\n");
+
+ return 0;
+}
+
+/* Load CM3 image (single_img) to CM3 pointed by cm3_type */
+static int load_img_to_cm3(enum cm3_t cm3_type,
+ uintptr_t single_img, uint32_t image_size)
+{
+ int ret, ap_idx, cp_index;
+ uint32_t ap_count = bl2_plat_get_ap_count();
+
+ switch (cm3_type) {
+ case MSS_AP:
+ for (ap_idx = 0; ap_idx < ap_count; ap_idx++) {
+ NOTICE("Load image to AP%d MSS\n", ap_idx);
+ ret = mss_ap_load_image(single_img, image_size, ap_idx);
+ if (ret != 0)
+ return ret;
+ }
+ break;
+ case MSS_CP0:
+ case MSS_CP1:
+ case MSS_CP2:
+ case MSS_CP3:
+ /* MSS_AP = 0
+ * MSS_CP1 = 1
+ * .
+ * .
+ * MSS_CP3 = 4
+ * Actual CP index is MSS_CPX - 1
+ */
+ cp_index = cm3_type - 1;
+ for (ap_idx = 0; ap_idx < ap_count; ap_idx++) {
+ /* Check if we should load this image
+ * according to number of CPs
+ */
+ if (bl2_plat_get_cp_count(ap_idx) <= cp_index) {
+ NOTICE("Skipping MSS CP%d related image\n",
+ cp_index);
+ break;
+ }
+
+ NOTICE("Load image to CP%d MSS AP%d\n",
+ cp_index, ap_idx);
+ ret = mss_image_load(single_img, image_size,
+ bl2_plat_get_cp_mss_regs(
+ ap_idx, cp_index));
+ if (ret != 0) {
+ ERROR("SCP Image load failed\n");
+ return -1;
+ }
+ }
+ break;
+ case MG_CP0:
+ /* TODO: */
+ NOTICE("Load image to CP0 MG not supported\n");
+ break;
+ case MG_CP1:
+ /* TODO: */
+ NOTICE("Load image to CP1 MG not supported\n");
+ break;
+ default:
+ ERROR("SCP_BL2 wrong img format (cm3_type=%d)\n", cm3_type);
+ break;
+ }
+
+ return 0;
+}
+
+/* The Armada 8K has 5 service CPUs and Armada 7K has 3. Therefore it was
+ * required to provide a method for loading firmware to all of the service CPUs.
+ * To achieve that, the scp_bl2 image in fact is file containing up to 5
+ * concatenated firmwares and this routine splits concatenated image into single
+ * images dedicated for appropriate service CPU and then load them.
+ */
+static int split_and_load_bl2_image(void *image)
+{
+ file_header_t *file_hdr;
+ img_header_t *img_hdr;
+ uintptr_t single_img;
+ int i;
+
+ file_hdr = (file_header_t *)image;
+
+ if (file_hdr->magic != FILE_MAGIC) {
+ ERROR("SCP_BL2 wrong img format\n");
+ return -1;
+ }
+
+ if (file_hdr->nr_of_imgs > MAX_NR_OF_FILES) {
+ ERROR("SCP_BL2 concatenated image contains to many images\n");
+ return -1;
+ }
+
+ img_hdr = (img_header_t *)((uintptr_t)image + sizeof(file_header_t));
+ single_img = (uintptr_t)image + sizeof(file_header_t) +
+ sizeof(img_header_t) * file_hdr->nr_of_imgs;
+
+ NOTICE("SCP_BL2 contains %d concatenated images\n",
+ file_hdr->nr_of_imgs);
+ for (i = 0; i < file_hdr->nr_of_imgs; i++) {
+
+ /* Before loading make sanity check on header */
+ if (img_hdr->version != HEADER_VERSION) {
+ ERROR("Wrong header, img corrupted exiting\n");
+ return -1;
+ }
+
+ load_img_to_cm3(img_hdr->type, single_img, img_hdr->length);
+
+ /* Prepare offsets for next run */
+ single_img += img_hdr->length;
+ img_hdr++;
+ }
+
+ return 0;
+}
+
+int scp_bootloader_transfer(void *image, unsigned int image_size)
+{
+#ifdef SCP_BL2_BASE
+ assert((uintptr_t) image == SCP_BL2_BASE);
+#endif
+
+ VERBOSE("Concatenated img size %d\n", image_size);
+
+ if (image_size == 0) {
+ ERROR("SCP_BL2 image size can't be 0 (current size = 0x%x)\n",
+ image_size);
+ return -1;
+ }
+
+ if (split_and_load_bl2_image(image))
+ return -1;
+
+ return 0;
+}
diff --git a/plat/marvell/common/mss/mss_scp_bootloader.h b/plat/marvell/common/mss/mss_scp_bootloader.h
new file mode 100644
index 0000000..67c387a
--- /dev/null
+++ b/plat/marvell/common/mss/mss_scp_bootloader.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef __MSS_SCP_BOOTLOADER_H__
+#define __MSS_SCP_BOOTLOADER_H__
+
+int scp_bootloader_transfer(void *image, unsigned int image_size);
+uintptr_t bl2_plat_get_cp_mss_regs(int ap_idx, int cp_idx);
+uintptr_t bl2_plat_get_ap_mss_regs(int ap_idx);
+uint32_t bl2_plat_get_cp_count(int ap_idx);
+uint32_t bl2_plat_get_ap_count(void);
+void bl2_plat_configure_mss_windows(uintptr_t mss_regs);
+int bl2_plat_mss_check_image_ready(void);
+
+#endif /* __MSS_SCP_BOOTLOADER_H__ */
diff --git a/plat/marvell/common/plat_delay_timer.c b/plat/marvell/common/plat_delay_timer.c
new file mode 100644
index 0000000..dfc77c7
--- /dev/null
+++ b/plat/marvell/common/plat_delay_timer.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <arch_helpers.h>
+#include <delay_timer.h>
+#include <mvebu_def.h>
+
+#define SYS_COUNTER_FREQ_IN_MHZ (COUNTER_FREQUENCY/1000000)
+
+static uint32_t plat_get_timer_value(void)
+{
+ /*
+ * Generic delay timer implementation expects the timer to be a down
+ * counter. We apply bitwise NOT operator to the tick values returned
+ * by read_cntpct_el0() to simulate the down counter.
+ */
+ return (uint32_t)(~read_cntpct_el0());
+}
+
+static const timer_ops_t plat_timer_ops = {
+ .get_timer_value = plat_get_timer_value,
+ .clk_mult = 1,
+ .clk_div = SYS_COUNTER_FREQ_IN_MHZ
+};
+
+void plat_delay_timer_init(void)
+{
+ timer_init(&plat_timer_ops);
+}
diff --git a/plat/marvell/marvell.mk b/plat/marvell/marvell.mk
new file mode 100644
index 0000000..217ad46
--- /dev/null
+++ b/plat/marvell/marvell.mk
@@ -0,0 +1,53 @@
+# Copyright (C) 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# https://spdx.org/licenses
+
+# Marvell images
+BOOT_IMAGE := boot-image.bin
+BOOT_ENC_IMAGE := boot-image-enc.bin
+FLASH_IMAGE := flash-image.bin
+
+# Make non-trusted image by default
+MARVELL_SECURE_BOOT := 0
+$(eval $(call add_define,MARVELL_SECURE_BOOT))
+
+# Enable compilation for Palladium emulation platform
+PALLADIUM := 0
+$(eval $(call add_define,PALLADIUM))
+
+ifeq (${MARVELL_SECURE_BOOT},1)
+DOIMAGE_SEC_FLAGS := -c $(DOIMAGE_SEC)
+DOIMAGE_LIBS_CHECK = \
+ if ! [ -d "/usr/include/mbedtls" ]; then \
+ echo "****************************************" >&2; \
+ echo "Missing mbedTLS installation! " >&2; \
+ echo "Please download it from \"tls.mbed.org\"" >&2; \
+ echo "Alternatively on Debian/Ubuntu system install" >&2; \
+ echo "\"libmbedtls-dev\" package" >&2; \
+ echo "Make sure to use version 2.1.0 or later" >&2; \
+ echo "****************************************" >&2; \
+ exit 1; \
+ else if ! [ -f "/usr/include/libconfig.h" ]; then \
+ echo "********************************************************" >&2; \
+ echo "Missing Libconfig installation!" >&2; \
+ echo "Please download it from \"www.hyperrealm.com/libconfig/\"" >&2; \
+ echo "Alternatively on Debian/Ubuntu system install packages" >&2; \
+ echo "\"libconfig8\" and \"libconfig8-dev\"" >&2; \
+ echo "********************************************************" >&2; \
+ exit 1; \
+ fi \
+ fi
+else #MARVELL_SECURE_BOOT
+DOIMAGE_LIBS_CHECK =
+DOIMAGE_SEC_FLAGS =
+endif #MARVELL_SECURE_BOOT
+
+mrvl_clean:
+ @echo " Doimage CLEAN"
+ ${Q}${MAKE} PLAT=${PLAT} --no-print-directory -C ${DOIMAGEPATH} clean
+
+${DOIMAGETOOL}: mrvl_clean
+ ${Q}${MAKE} --no-print-directory -C ${DOIMAGEPATH} WTMI_IMG=$(WTMI_IMG)
+
+
diff --git a/plat/marvell/version.mk b/plat/marvell/version.mk
new file mode 100644
index 0000000..017e119
--- /dev/null
+++ b/plat/marvell/version.mk
@@ -0,0 +1 @@
+SUBVERSION = devel-18.08.0