marvell: drivers: Add MoChi drivers

Add ModularChip and MCI drivers for A8K SoC family.
ModularChip drivers include support for the internal building
blocks of Marvell ARMADA SoCs - APN806, APN807 and CP110

Change-Id: I9559343788fa2e5eb47e6384a4a7d47408787c02
Signed-off-by: Hanna Hawa <hannah@marvell.com>
Signed-off-by: Konstantin Porotchkin <kostap@marvell.com>
diff --git a/drivers/marvell/mci.c b/drivers/marvell/mci.c
new file mode 100644
index 0000000..721504e
--- /dev/null
+++ b/drivers/marvell/mci.c
@@ -0,0 +1,832 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* MCI bus driver for Marvell ARMADA 8K and 8K+ SoCs */
+
+#include <debug.h>
+#include <delay_timer.h>
+#include <mmio.h>
+#include <mci.h>
+#include <mvebu.h>
+#include <mvebu_def.h>
+#include <plat_marvell.h>
+
+/* /HB /Units /Direct_regs /Direct regs
+ * /Configuration Register Write/Read Data Register
+ */
+#define MCI_WRITE_READ_DATA_REG(mci_index)	\
+					MVEBU_MCI_REG_BASE_REMAP(mci_index)
+/* /HB /Units /Direct_regs /Direct regs
+ * /Configuration Register Access Command Register
+ */
+#define MCI_ACCESS_CMD_REG(mci_index)		\
+				(MVEBU_MCI_REG_BASE_REMAP(mci_index) + 0x4)
+
+/* Access Command fields :
+ * bit[3:0]   - Sub command: 1 => Peripheral Config Register Read,
+ *			     0 => Peripheral Config Register Write,
+ *			     2 => Peripheral Assign ID request,
+ *			     3 => Circular Config Write
+ * bit[5]     - 1 => Local (same chip access) 0 => Remote
+ * bit[15:8]  - Destination hop ID. Put Global ID (GID) here (see scheme below).
+ * bit[23:22] - 0x3 IHB PHY REG address space, 0x0 IHB Controller space
+ * bit[21:16] - Low 6 bits of offset. Hight 2 bits are taken from bit[28:27]
+ *		of IHB_PHY_CTRL
+ *		(must be set before any PHY register access occurs):
+ *		/IHB_REG /IHB_REGInterchip Hopping Bus Registers
+ *		/IHB Version Control Register
+ *
+ *		ixi_ihb_top		IHB PHY
+ *  AXI -----------------------------   -------------
+ *   <--| axi_hb_top | ihb_pipe_top |-->|           |
+ *   -->|   GID=1    |     GID=0    |<--|           |
+ *      -----------------------------   -------------
+ */
+#define MCI_INDIRECT_CTRL_READ_CMD		0x1
+#define MCI_INDIRECT_CTRL_ASSIGN_CMD		0x2
+#define MCI_INDIRECT_CTRL_CIRCULAR_CMD		0x3
+#define MCI_INDIRECT_CTRL_LOCAL_PKT		(1 << 5)
+#define MCI_INDIRECT_CTRL_CMD_DONE_OFFSET	6
+#define MCI_INDIRECT_CTRL_CMD_DONE		\
+				(1 << MCI_INDIRECT_CTRL_CMD_DONE_OFFSET)
+#define MCI_INDIRECT_CTRL_DATA_READY_OFFSET	7
+#define MCI_INDIRECT_CTRL_DATA_READY		\
+				(1 << MCI_INDIRECT_CTRL_DATA_READY_OFFSET)
+#define MCI_INDIRECT_CTRL_HOPID_OFFSET		8
+#define MCI_INDIRECT_CTRL_HOPID(id)		\
+			(((id) & 0xFF) << MCI_INDIRECT_CTRL_HOPID_OFFSET)
+#define MCI_INDIRECT_CTRL_REG_CHIPID_OFFSET	16
+#define MCI_INDIRECT_REG_CTRL_ADDR(reg_num)	\
+			(reg_num << MCI_INDIRECT_CTRL_REG_CHIPID_OFFSET)
+
+/* Hop ID values */
+#define GID_IHB_PIPE					0
+#define GID_AXI_HB					1
+#define GID_IHB_EXT					2
+
+#define MCI_DID_GLOBAL_ASSIGNMENT_REQUEST_REG		0x2
+/* Target MCi Local ID (LID, which is = self DID) */
+#define MCI_DID_GLOBAL_ASSIGN_REQ_MCI_LOCAL_ID(val)	(((val) & 0xFF) << 16)
+/* Bits [15:8]: Number of MCis on chip of target MCi */
+#define MCI_DID_GLOBAL_ASSIGN_REQ_MCI_COUNT(val)	(((val) & 0xFF) << 8)
+/* Bits [7:0]: Number of hops on chip of target MCi */
+#define MCI_DID_GLOBAL_ASSIGN_REQ_HOPS_NUM(val)		(((val) & 0xFF) << 0)
+
+/* IHB_REG domain registers */
+/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers/
+ * Rx Memory Configuration Register (RX_MEM_CFG)
+ */
+#define MCI_CTRL_RX_MEM_CFG_REG_NUM			0x0
+#define MCI_CTRL_RX_TX_MEM_CFG_RQ_THRESH(val)		(((val) & 0xFF) << 24)
+#define MCI_CTRL_RX_TX_MEM_CFG_PQ_THRESH(val)		(((val) & 0xFF) << 16)
+#define MCI_CTRL_RX_TX_MEM_CFG_NQ_THRESH(val)		(((val) & 0xFF) << 8)
+#define MCI_CTRL_RX_TX_MEM_CFG_DELTA_THRESH(val)	(((val) & 0xF) << 4)
+#define MCI_CTRL_RX_TX_MEM_CFG_RTC(val)			(((val) & 0x3) << 2)
+#define MCI_CTRL_RX_TX_MEM_CFG_WTC(val)			(((val) & 0x3) << 0)
+#define MCI_CTRL_RX_MEM_CFG_REG_DEF_CP_VAL		\
+				(MCI_CTRL_RX_TX_MEM_CFG_RQ_THRESH(0x07) | \
+				MCI_CTRL_RX_TX_MEM_CFG_PQ_THRESH(0x3f) | \
+				MCI_CTRL_RX_TX_MEM_CFG_NQ_THRESH(0x3f) | \
+				MCI_CTRL_RX_TX_MEM_CFG_DELTA_THRESH(0xf) | \
+				MCI_CTRL_RX_TX_MEM_CFG_RTC(1) | \
+				MCI_CTRL_RX_TX_MEM_CFG_WTC(1))
+
+#define MCI_CTRL_RX_MEM_CFG_REG_DEF_AP_VAL		\
+				(MCI_CTRL_RX_TX_MEM_CFG_RQ_THRESH(0x3f) | \
+				MCI_CTRL_RX_TX_MEM_CFG_PQ_THRESH(0x03) | \
+				MCI_CTRL_RX_TX_MEM_CFG_NQ_THRESH(0x3f) | \
+				MCI_CTRL_RX_TX_MEM_CFG_DELTA_THRESH(0xf) | \
+				MCI_CTRL_RX_TX_MEM_CFG_RTC(1) | \
+				MCI_CTRL_RX_TX_MEM_CFG_WTC(1))
+
+
+/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers/
+ * Tx Memory Configuration Register (TX_MEM_CFG)
+ */
+#define MCI_CTRL_TX_MEM_CFG_REG_NUM			0x1
+/* field mapping for TX mem config register
+ * are the same as for RX register - see register above
+ */
+#define MCI_CTRL_TX_MEM_CFG_REG_DEF_VAL			\
+				(MCI_CTRL_RX_TX_MEM_CFG_RQ_THRESH(0x20) | \
+				MCI_CTRL_RX_TX_MEM_CFG_PQ_THRESH(0x20) | \
+				MCI_CTRL_RX_TX_MEM_CFG_NQ_THRESH(0x20) | \
+				MCI_CTRL_RX_TX_MEM_CFG_DELTA_THRESH(2) | \
+				MCI_CTRL_RX_TX_MEM_CFG_RTC(1) | \
+				MCI_CTRL_RX_TX_MEM_CFG_WTC(1))
+
+/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers
+ * /IHB Link CRC Control
+ */
+/* MCi Link CRC Control Register (MCi_CRC_CTRL) */
+#define MCI_LINK_CRC_CTRL_REG_NUM			0x4
+
+/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers
+ * /IHB Status Register
+ */
+/* MCi Status Register (MCi_STS) */
+#define MCI_CTRL_STATUS_REG_NUM				0x5
+#define MCI_CTRL_STATUS_REG_PHY_READY			(1 << 12)
+#define MCI_CTRL_STATUS_REG_LINK_PRESENT		(1 << 15)
+#define MCI_CTRL_STATUS_REG_PHY_CID_VIO_OFFSET		24
+#define MCI_CTRL_STATUS_REG_PHY_CID_VIO_MASK		\
+				(0xF << MCI_CTRL_STATUS_REG_PHY_CID_VIO_OFFSET)
+/* Expected successful Link result, including reserved bit */
+#define MCI_CTRL_PHY_READY		(MCI_CTRL_STATUS_REG_PHY_READY | \
+					MCI_CTRL_STATUS_REG_LINK_PRESENT | \
+					MCI_CTRL_STATUS_REG_PHY_CID_VIO_MASK)
+
+/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers/
+ * MCi PHY Speed Settings Register (MCi_PHY_SETTING)
+ */
+#define MCI_CTRL_MCI_PHY_SETTINGS_REG_NUM		0x8
+#define MCI_CTRL_MCI_PHY_SET_DLO_FIFO_FULL_TRESH(val)	(((val) & 0xF) << 28)
+#define MCI_CTRL_MCI_PHY_SET_PHY_MAX_SPEED(val)		(((val) & 0xF) << 12)
+#define MCI_CTRL_MCI_PHY_SET_PHYCLK_SEL(val)		(((val) & 0xF) << 8)
+#define MCI_CTRL_MCI_PHY_SET_REFCLK_FREQ_SEL(val)	(((val) & 0xF) << 4)
+#define MCI_CTRL_MCI_PHY_SET_AUTO_LINK_EN(val)		(((val) & 0x1) << 1)
+#define MCI_CTRL_MCI_PHY_SET_REG_DEF_VAL		\
+			(MCI_CTRL_MCI_PHY_SET_DLO_FIFO_FULL_TRESH(0x3) | \
+			MCI_CTRL_MCI_PHY_SET_PHY_MAX_SPEED(0x3) | \
+			MCI_CTRL_MCI_PHY_SET_PHYCLK_SEL(0x2) | \
+			MCI_CTRL_MCI_PHY_SET_REFCLK_FREQ_SEL(0x1))
+#define MCI_CTRL_MCI_PHY_SET_REG_DEF_VAL2		\
+			(MCI_CTRL_MCI_PHY_SET_DLO_FIFO_FULL_TRESH(0x3) | \
+			MCI_CTRL_MCI_PHY_SET_PHY_MAX_SPEED(0x3) | \
+			MCI_CTRL_MCI_PHY_SET_PHYCLK_SEL(0x5) | \
+			MCI_CTRL_MCI_PHY_SET_REFCLK_FREQ_SEL(0x1))
+
+/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers
+ * /IHB Mode Config
+ */
+#define MCI_CTRL_IHB_MODE_CFG_REG_NUM			0x25
+#define MCI_CTRL_IHB_MODE_HBCLK_DIV(val)		((val) & 0xFF)
+#define MCI_CTRL_IHB_MODE_CHUNK_MOD_OFFSET		8
+#define MCI_CTRL_IHB_MODE_CHUNK_MOD			\
+				(1 << MCI_CTRL_IHB_MODE_CHUNK_MOD_OFFSET)
+#define MCI_CTRL_IHB_MODE_FWD_MOD_OFFSET		9
+#define MCI_CTRL_IHB_MODE_FWD_MOD			\
+				(1 << MCI_CTRL_IHB_MODE_FWD_MOD_OFFSET)
+#define MCI_CTRL_IHB_MODE_SEQFF_FINE_MOD(val)		(((val) & 0xF) << 12)
+#define MCI_CTRL_IHB_MODE_RX_COMB_THRESH(val)		(((val) & 0xFF) << 16)
+#define MCI_CTRL_IHB_MODE_TX_COMB_THRESH(val)		(((val) & 0xFF) << 24)
+
+#define MCI_CTRL_IHB_MODE_CFG_REG_DEF_VAL		\
+				(MCI_CTRL_IHB_MODE_HBCLK_DIV(6) | \
+				MCI_CTRL_IHB_MODE_FWD_MOD | \
+				MCI_CTRL_IHB_MODE_SEQFF_FINE_MOD(0xF) | \
+				MCI_CTRL_IHB_MODE_RX_COMB_THRESH(0x3f) | \
+				MCI_CTRL_IHB_MODE_TX_COMB_THRESH(0x40))
+/* AXI_HB registers */
+#define MCI_AXI_ACCESS_DATA_REG_NUM			0x0
+#define MCI_AXI_ACCESS_PCIE_MODE			1
+#define MCI_AXI_ACCESS_CACHE_CHECK_OFFSET		5
+#define MCI_AXI_ACCESS_CACHE_CHECK			\
+				(1 << MCI_AXI_ACCESS_CACHE_CHECK_OFFSET)
+#define MCI_AXI_ACCESS_FORCE_POST_WR_OFFSET		6
+#define MCI_AXI_ACCESS_FORCE_POST_WR			\
+				(1 << MCI_AXI_ACCESS_FORCE_POST_WR_OFFSET)
+#define MCI_AXI_ACCESS_DISABLE_CLK_GATING_OFFSET	9
+#define MCI_AXI_ACCESS_DISABLE_CLK_GATING		\
+				(1 << MCI_AXI_ACCESS_DISABLE_CLK_GATING_OFFSET)
+
+/* /HB /Units /HB_REG /HB_REGHopping Bus Registers
+ * /Window 0 Address Mask Register
+ */
+#define MCI_HB_CTRL_WIN0_ADDRESS_MASK_REG_NUM		0x2
+
+/* /HB /Units /HB_REG /HB_REGHopping Bus Registers
+ * /Window 0 Destination Register
+ */
+#define MCI_HB_CTRL_WIN0_DESTINATION_REG_NUM		0x3
+#define MCI_HB_CTRL_WIN0_DEST_VALID_FLAG(val)		(((val) & 0x1) << 16)
+#define MCI_HB_CTRL_WIN0_DEST_ID(val)			(((val) & 0xFF) << 0)
+
+/* /HB /Units /HB_REG /HB_REGHopping Bus Registers /Tx Control Register */
+#define MCI_HB_CTRL_TX_CTRL_REG_NUM			0xD
+#define MCI_HB_CTRL_TX_CTRL_PCIE_MODE_OFFSET		24
+#define MCI_HB_CTRL_TX_CTRL_PCIE_MODE			\
+				(1 << MCI_HB_CTRL_TX_CTRL_PCIE_MODE_OFFSET)
+#define MCI_HB_CTRL_TX_CTRL_PRI_TH_QOS(val)		(((val) & 0xF) << 12)
+#define MCI_HB_CTRL_TX_CTRL_MAX_RD_CNT(val)		(((val) & 0x1F) << 6)
+#define MCI_HB_CTRL_TX_CTRL_MAX_WR_CNT(val)		(((val) & 0x1F) << 0)
+
+/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers
+ * /IHB Version Control Register
+ */
+#define MCI_PHY_CTRL_REG_NUM				0x7
+#define MCI_PHY_CTRL_MCI_MINOR				0x8 /* BITS [3:0] */
+#define MCI_PHY_CTRL_MCI_MAJOR_OFFSET			4
+#define MCI_PHY_CTRL_MCI_MAJOR				\
+				(1 << MCI_PHY_CTRL_MCI_MAJOR_OFFSET)
+#define MCI_PHY_CTRL_MCI_SLEEP_REQ_OFFSET		11
+#define MCI_PHY_CTRL_MCI_SLEEP_REQ			\
+				(1 << MCI_PHY_CTRL_MCI_SLEEP_REQ_OFFSET)
+/* Host=1 / Device=0 PHY mode */
+#define MCI_PHY_CTRL_MCI_PHY_MODE_OFFSET		24
+#define MCI_PHY_CTRL_MCI_PHY_MODE_HOST			\
+				(1 << MCI_PHY_CTRL_MCI_PHY_MODE_OFFSET)
+/* Register=1 / PWM=0 interface */
+#define MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE_OFFSET		25
+#define MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE		\
+				(1 << MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE_OFFSET)
+ /* PHY code InReset=1 */
+#define MCI_PHY_CTRL_MCI_PHY_RESET_CORE_OFFSET		26
+#define MCI_PHY_CTRL_MCI_PHY_RESET_CORE			\
+				(1 << MCI_PHY_CTRL_MCI_PHY_RESET_CORE_OFFSET)
+#define MCI_PHY_CTRL_PHY_ADDR_MSB_OFFSET		27
+#define MCI_PHY_CTRL_PHY_ADDR_MSB(addr)			\
+				(((addr) & 0x3) << \
+				MCI_PHY_CTRL_PHY_ADDR_MSB_OFFSET)
+#define MCI_PHY_CTRL_PIDI_MODE_OFFSET			31
+#define MCI_PHY_CTRL_PIDI_MODE				\
+				(1 << MCI_PHY_CTRL_PIDI_MODE_OFFSET)
+
+/* Number of times to wait for the MCI link ready after MCI configurations
+ * Normally takes 34-35 successive reads
+ */
+#define LINK_READY_TIMEOUT				100
+
+enum mci_register_type {
+	MCI_REG_TYPE_PHY = 0,
+	MCI_REG_TYPE_CTRL,
+};
+
+enum {
+	MCI_CMD_WRITE,
+	MCI_CMD_READ
+};
+
+/* Write wrapper callback for debug:
+ * will print written data in case LOG_LEVEL >= 40
+ */
+static void mci_mmio_write_32(uintptr_t addr, uint32_t value)
+{
+	VERBOSE("Write:\t0x%x = 0x%x\n", (uint32_t)addr, value);
+	mmio_write_32(addr, value);
+}
+/* Read wrapper callback for debug:
+ * will print read data in case LOG_LEVEL >= 40
+ */
+static uint32_t mci_mmio_read_32(uintptr_t addr)
+{
+	uint32_t value;
+
+	value = mmio_read_32(addr);
+	VERBOSE("Read:\t0x%x = 0x%x\n", (uint32_t)addr, value);
+	return value;
+}
+
+/* MCI indirect access command completion polling:
+ * Each write/read command done via MCI indirect registers must be polled
+ * for command completions status.
+ *
+ * Returns 1 in case of error
+ * Returns 0 in case of command completed successfully.
+ */
+static int mci_poll_command_completion(int mci_index, int command_type)
+{
+	uint32_t mci_cmd_value = 0, retry_count = 100, ret = 0;
+	uint32_t completion_flags = MCI_INDIRECT_CTRL_CMD_DONE;
+
+	debug_enter();
+	/* Read commands require validating that requested data is ready */
+	if (command_type == MCI_CMD_READ)
+		completion_flags |= MCI_INDIRECT_CTRL_DATA_READY;
+
+	do {
+		/* wait 1 ms before each polling */
+		mdelay(1);
+		mci_cmd_value = mci_mmio_read_32(MCI_ACCESS_CMD_REG(mci_index));
+	} while (((mci_cmd_value & completion_flags) != completion_flags) &&
+			 (retry_count-- > 0));
+
+	if (retry_count == 0) {
+		ERROR("%s: MCI command timeout (command status = 0x%x)\n",
+		      __func__, mci_cmd_value);
+		ret = 1;
+	}
+
+	debug_exit();
+	return ret;
+}
+
+int mci_read(int mci_idx, uint32_t cmd, uint32_t *value)
+{
+	int rval;
+
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_idx), cmd);
+
+	rval = mci_poll_command_completion(mci_idx, MCI_CMD_READ);
+
+	*value = mci_mmio_read_32(MCI_WRITE_READ_DATA_REG(mci_idx));
+
+	return rval;
+}
+
+int  mci_write(int mci_idx, uint32_t cmd, uint32_t data)
+{
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_idx), data);
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_idx), cmd);
+
+	return mci_poll_command_completion(mci_idx, MCI_CMD_WRITE);
+}
+
+/* Perform 3 configurations in one command: PCI mode,
+ * queues separation and cache bit
+ */
+static int mci_axi_set_pcie_mode(int mci_index)
+{
+	uint32_t reg_data, ret = 1;
+
+	debug_enter();
+	/* This configuration makes MCI IP behave consistently with AXI protocol
+	 * It should be configured at one side only (for example locally at AP).
+	 * The IP takes care of performing the same configurations at MCI on
+	 * another side (for example remotely at CP).
+	 */
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+			  MCI_AXI_ACCESS_PCIE_MODE |
+			  MCI_AXI_ACCESS_CACHE_CHECK |
+			  MCI_AXI_ACCESS_FORCE_POST_WR |
+			  MCI_AXI_ACCESS_DISABLE_CLK_GATING);
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+			  MCI_INDIRECT_REG_CTRL_ADDR(
+				MCI_AXI_ACCESS_DATA_REG_NUM)  |
+			  MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB) |
+			  MCI_INDIRECT_CTRL_LOCAL_PKT |
+			  MCI_INDIRECT_CTRL_CIRCULAR_CMD);
+
+	/* if Write command was successful, verify PCIe mode */
+	if (mci_poll_command_completion(mci_index, MCI_CMD_WRITE) == 0) {
+		/* Verify the PCIe mode selected */
+		mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+				  MCI_INDIRECT_REG_CTRL_ADDR(
+					MCI_HB_CTRL_TX_CTRL_REG_NUM)  |
+				  MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB) |
+				  MCI_INDIRECT_CTRL_LOCAL_PKT |
+				  MCI_INDIRECT_CTRL_READ_CMD);
+		/* if read was completed, verify PCIe mode */
+		if (mci_poll_command_completion(mci_index, MCI_CMD_READ) == 0) {
+			reg_data = mci_mmio_read_32(
+					MCI_WRITE_READ_DATA_REG(mci_index));
+			if (reg_data & MCI_HB_CTRL_TX_CTRL_PCIE_MODE)
+				ret = 0;
+		}
+	}
+
+	debug_exit();
+	return ret;
+}
+
+/* Reduce sequence FIFO timer expiration threshold */
+static int mci_axi_set_fifo_thresh(int mci_index)
+{
+	uint32_t reg_data, ret = 0;
+
+	debug_enter();
+	/* This configuration reduces sequence FIFO timer expiration threshold
+	 * (to 0x7 instead of 0xA).
+	 * In MCI 1.6 version this configuration prevents possible functional
+	 * issues.
+	 * In version 1.82 the configuration prevents performance degradation
+	 */
+
+	/* Configure local AP side */
+	reg_data = MCI_PHY_CTRL_PIDI_MODE |
+		   MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE |
+		   MCI_PHY_CTRL_MCI_PHY_MODE_HOST |
+		   MCI_PHY_CTRL_MCI_MAJOR |
+		   MCI_PHY_CTRL_MCI_MINOR;
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), reg_data);
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+			  MCI_INDIRECT_REG_CTRL_ADDR(MCI_PHY_CTRL_REG_NUM) |
+			  MCI_INDIRECT_CTRL_LOCAL_PKT);
+	ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+	/* Reduce the threshold */
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+			  MCI_CTRL_IHB_MODE_CFG_REG_DEF_VAL);
+
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+			  MCI_INDIRECT_REG_CTRL_ADDR(
+				MCI_CTRL_IHB_MODE_CFG_REG_NUM) |
+			  MCI_INDIRECT_CTRL_LOCAL_PKT);
+	ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+	/* Exit PIDI mode */
+	reg_data = MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE |
+		   MCI_PHY_CTRL_MCI_PHY_MODE_HOST |
+		   MCI_PHY_CTRL_MCI_MAJOR |
+		   MCI_PHY_CTRL_MCI_MINOR;
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), reg_data);
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+			  MCI_INDIRECT_REG_CTRL_ADDR(MCI_PHY_CTRL_REG_NUM) |
+			  MCI_INDIRECT_CTRL_LOCAL_PKT);
+	ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+	/* Configure remote CP side */
+	reg_data = MCI_PHY_CTRL_PIDI_MODE |
+		   MCI_PHY_CTRL_MCI_MAJOR |
+		   MCI_PHY_CTRL_MCI_MINOR |
+		   MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE;
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), reg_data);
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+			  MCI_INDIRECT_REG_CTRL_ADDR(MCI_PHY_CTRL_REG_NUM) |
+			  MCI_CTRL_IHB_MODE_FWD_MOD);
+	ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+	/* Reduce the threshold */
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+			  MCI_CTRL_IHB_MODE_CFG_REG_DEF_VAL);
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+			  MCI_INDIRECT_REG_CTRL_ADDR(
+				MCI_CTRL_IHB_MODE_CFG_REG_NUM) |
+			  MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT));
+	ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+	/* Exit PIDI mode */
+	reg_data = MCI_PHY_CTRL_MCI_MAJOR |
+		   MCI_PHY_CTRL_MCI_MINOR |
+		   MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE;
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), reg_data);
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+			  MCI_INDIRECT_REG_CTRL_ADDR(MCI_PHY_CTRL_REG_NUM) |
+			  MCI_CTRL_IHB_MODE_FWD_MOD);
+
+	ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+	debug_exit();
+	return ret;
+}
+
+/* Configure:
+ * 1. AP & CP TX thresholds and delta configurations
+ * 2. DLO & DLI FIFO full threshold
+ * 3. RX thresholds and delta configurations
+ * 4. CP AR and AW outstanding
+ * 5. AP AR and AW outstanding
+ */
+static int mci_axi_set_fifo_rx_tx_thresh(int mci_index)
+{
+	uint32_t ret = 0;
+
+	debug_enter();
+	/* AP TX thresholds and delta configurations (IHB_reg 0x1) */
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+			  MCI_CTRL_TX_MEM_CFG_REG_DEF_VAL);
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+			  MCI_INDIRECT_REG_CTRL_ADDR(
+				MCI_CTRL_TX_MEM_CFG_REG_NUM) |
+			  MCI_INDIRECT_CTRL_LOCAL_PKT);
+	ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+	/* CP TX thresholds and delta configurations (IHB_reg 0x1) */
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+			  MCI_CTRL_TX_MEM_CFG_REG_DEF_VAL);
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+			  MCI_INDIRECT_REG_CTRL_ADDR(
+				MCI_CTRL_TX_MEM_CFG_REG_NUM) |
+			  MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT));
+	ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+	/* AP DLO & DLI FIFO full threshold & Auto-Link enable (IHB_reg 0x8) */
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+			  MCI_CTRL_MCI_PHY_SET_REG_DEF_VAL |
+			  MCI_CTRL_MCI_PHY_SET_AUTO_LINK_EN(1));
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+			  MCI_INDIRECT_REG_CTRL_ADDR(
+				MCI_CTRL_MCI_PHY_SETTINGS_REG_NUM) |
+			  MCI_INDIRECT_CTRL_LOCAL_PKT);
+	ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+	/* CP DLO & DLI FIFO full threshold (IHB_reg 0x8) */
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+			  MCI_CTRL_MCI_PHY_SET_REG_DEF_VAL);
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+			  MCI_INDIRECT_REG_CTRL_ADDR(
+				MCI_CTRL_MCI_PHY_SETTINGS_REG_NUM) |
+			  MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT));
+	ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+	/* AP RX thresholds and delta configurations (IHB_reg 0x0) */
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+			  MCI_CTRL_RX_MEM_CFG_REG_DEF_AP_VAL);
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+			  MCI_INDIRECT_REG_CTRL_ADDR(
+				MCI_CTRL_RX_MEM_CFG_REG_NUM) |
+			  MCI_INDIRECT_CTRL_LOCAL_PKT);
+	ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+	/* CP RX thresholds and delta configurations (IHB_reg 0x0) */
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+			  MCI_CTRL_RX_MEM_CFG_REG_DEF_CP_VAL);
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+			  MCI_INDIRECT_REG_CTRL_ADDR(
+				MCI_CTRL_RX_MEM_CFG_REG_NUM) |
+			  MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT));
+	ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+	/* AP AR & AW maximum AXI outstanding request cfg (HB_reg 0xd) */
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+			  MCI_HB_CTRL_TX_CTRL_PRI_TH_QOS(8) |
+			  MCI_HB_CTRL_TX_CTRL_MAX_RD_CNT(3) |
+			  MCI_HB_CTRL_TX_CTRL_MAX_WR_CNT(3));
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+			  MCI_INDIRECT_REG_CTRL_ADDR(
+				MCI_HB_CTRL_TX_CTRL_REG_NUM) |
+			  MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB) |
+			  MCI_INDIRECT_CTRL_LOCAL_PKT);
+	ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+	/* CP AR & AW maximum AXI outstanding request cfg (HB_reg 0xd) */
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index),
+			  MCI_HB_CTRL_TX_CTRL_PRI_TH_QOS(8) |
+			  MCI_HB_CTRL_TX_CTRL_MAX_RD_CNT(0xB) |
+			  MCI_HB_CTRL_TX_CTRL_MAX_WR_CNT(0x11));
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index),
+			  MCI_INDIRECT_REG_CTRL_ADDR(
+				MCI_HB_CTRL_TX_CTRL_REG_NUM) |
+			  MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT) |
+			  MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB));
+	ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+	debug_exit();
+	return ret;
+}
+
+/* configure MCI to allow read & write transactions to arrive at the same time.
+ * Without the below configuration, MCI won't sent response to CPU for
+ * transactions which arrived simultaneously and will lead to CPU hang.
+ * The below will configure MCI to be able to pass transactions from/to CP/AP.
+ */
+static int mci_enable_simultaneous_transactions(int mci_index)
+{
+	uint32_t ret = 0;
+
+	debug_enter();
+	/* ID assignment (assigning global ID offset to CP) */
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(0),
+			  MCI_DID_GLOBAL_ASSIGN_REQ_MCI_LOCAL_ID(2) |
+			  MCI_DID_GLOBAL_ASSIGN_REQ_MCI_COUNT(2) |
+			  MCI_DID_GLOBAL_ASSIGN_REQ_HOPS_NUM(2));
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(0),
+			  MCI_INDIRECT_REG_CTRL_ADDR(
+				MCI_DID_GLOBAL_ASSIGNMENT_REQUEST_REG) |
+			  MCI_INDIRECT_CTRL_ASSIGN_CMD);
+	ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+	/* Assigning dest. ID=3 to all transactions entering from AXI at AP */
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(0),
+			  MCI_HB_CTRL_WIN0_DEST_VALID_FLAG(1) |
+			  MCI_HB_CTRL_WIN0_DEST_ID(3));
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(0),
+			  MCI_INDIRECT_REG_CTRL_ADDR(
+				MCI_HB_CTRL_WIN0_DESTINATION_REG_NUM) |
+			  MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB) |
+			  MCI_INDIRECT_CTRL_LOCAL_PKT);
+	ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+	/* Assigning dest. ID=1 to all transactions entering from AXI at CP */
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(0),
+			  MCI_HB_CTRL_WIN0_DEST_VALID_FLAG(1) |
+			  MCI_HB_CTRL_WIN0_DEST_ID(1));
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(0),
+			  MCI_INDIRECT_REG_CTRL_ADDR(
+				MCI_HB_CTRL_WIN0_DESTINATION_REG_NUM) |
+			  MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT) |
+			  MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB));
+	ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+	/* End address to all transactions entering from AXI at AP.
+	 * This will lead to get match for any AXI address
+	 * and receive destination ID=3
+	 */
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(0), 0xffffffff);
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(0),
+			  MCI_INDIRECT_REG_CTRL_ADDR(
+				MCI_HB_CTRL_WIN0_ADDRESS_MASK_REG_NUM) |
+			  MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB) |
+			  MCI_INDIRECT_CTRL_LOCAL_PKT);
+	ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+	/* End address to all transactions entering from AXI at CP.
+	 * This will lead to get match for any AXI address
+	 * and receive destination ID=1
+	 */
+	mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(0), 0xffffffff);
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(0),
+			  MCI_INDIRECT_REG_CTRL_ADDR(
+				MCI_HB_CTRL_WIN0_ADDRESS_MASK_REG_NUM) |
+			  MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT) |
+			  MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB));
+	ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE);
+
+	debug_exit();
+	return ret;
+}
+
+/* Check if MCI simultaneous transaction was already enabled.
+ * Currently bootrom does this mci configuration only when the boot source is
+ * SAR_MCIX4, in other cases it should be done at this stage.
+ * It is worth noticing that in case of booting from uart, the bootrom
+ * flow is different and this mci initialization is skipped even if boot
+ * source is SAR_MCIX4. Therefore new verification bases on appropriate mci's
+ * register content: if the appropriate reg contains 0x0 it means that the
+ * bootrom didn't perform required mci configuration.
+ *
+ * Returns:
+ * 0 - configuration already done
+ * 1 - configuration missing
+ */
+static _Bool mci_simulatenous_trans_missing(int mci_index)
+{
+	uint32_t reg, ret;
+
+	/* read 'Window 0 Destination ID assignment' from HB register 0x3
+	 * (TX_CFG_W0_DST_ID) to check whether ID assignment was already
+	 * performed by BootROM.
+	 */
+	debug_enter();
+	mci_mmio_write_32(MCI_ACCESS_CMD_REG(0),
+			  MCI_INDIRECT_REG_CTRL_ADDR(
+				MCI_HB_CTRL_WIN0_DESTINATION_REG_NUM) |
+			  MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB) |
+			  MCI_INDIRECT_CTRL_LOCAL_PKT |
+			  MCI_INDIRECT_CTRL_READ_CMD);
+	ret = mci_poll_command_completion(mci_index, MCI_CMD_READ);
+
+	reg = mci_mmio_read_32(MCI_WRITE_READ_DATA_REG(mci_index));
+
+	if (ret)
+		ERROR("Failed to verify MCI simultaneous read/write status\n");
+
+	debug_exit();
+	/* default ID assignment is 0, so if register doesn't contain zeros
+	 * it means that bootrom already performed required configuration.
+	 */
+	if (reg != 0)
+		return 0;
+
+	return 1;
+}
+
+/* For A1 revision, configure the MCI link for performance improvement:
+ * - set MCI to support read/write transactions to arrive at the same time
+ * - Switch AXI to PCIe mode
+ * - Reduce sequence FIFO threshold
+ * - Configure RX/TX FIFO thresholds
+ *
+ *   Note:
+ *   We don't exit on error code from any sub routine, to try (best effort) to
+ *   complete the MCI configuration.
+ *   (If we exit - Bootloader will surely fail to boot)
+ */
+int mci_configure(int mci_index)
+{
+	int rval;
+
+	debug_enter();
+	/* According to design guidelines the MCI simultaneous transaction
+	 * shouldn't be enabled more then once - therefore make sure that it
+	 * wasn't already enabled in bootrom.
+	 */
+	if (mci_simulatenous_trans_missing(mci_index)) {
+		VERBOSE("Enabling MCI simultaneous transaction\n");
+		/* set MCI to support read/write transactions
+		 * to arrive at the same time
+		 */
+		rval = mci_enable_simultaneous_transactions(mci_index);
+		if (rval)
+			ERROR("Failed to set MCI simultaneous read/write\n");
+	} else
+		VERBOSE("Skip MCI ID assignment - already done by bootrom\n");
+
+	/* Configure MCI for more consistent behavior with AXI protocol */
+	rval = mci_axi_set_pcie_mode(mci_index);
+	if (rval)
+		ERROR("Failed to set MCI to AXI PCIe mode\n");
+
+	/* reduce FIFO global threshold */
+	rval = mci_axi_set_fifo_thresh(mci_index);
+	if (rval)
+		ERROR("Failed to set MCI FIFO global threshold\n");
+
+	/* configure RX/TX FIFO thresholds */
+	rval = mci_axi_set_fifo_rx_tx_thresh(mci_index);
+	if (rval)
+		ERROR("Failed to set MCI RX/TX FIFO threshold\n");
+
+	debug_exit();
+	return 1;
+}
+
+int mci_get_link_status(void)
+{
+	uint32_t cmd, data;
+
+	cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_CTRL_STATUS_REG_NUM) |
+		MCI_INDIRECT_CTRL_LOCAL_PKT | MCI_INDIRECT_CTRL_READ_CMD);
+	if (mci_read(0, cmd, &data)) {
+		ERROR("Failed to read status register\n");
+		return -1;
+	}
+
+	/* Check if the link is ready */
+	if (data != MCI_CTRL_PHY_READY) {
+		ERROR("Bad link status %x\n", data);
+		return -1;
+	}
+
+	return 0;
+}
+
+void mci_turn_link_down(void)
+{
+	uint32_t cmd, data;
+	int rval = 0;
+
+	debug_enter();
+
+	/* Turn off auto-link */
+	cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_CTRL_MCI_PHY_SETTINGS_REG_NUM) |
+			MCI_INDIRECT_CTRL_LOCAL_PKT);
+	data = (MCI_CTRL_MCI_PHY_SET_REG_DEF_VAL2 |
+		MCI_CTRL_MCI_PHY_SET_AUTO_LINK_EN(0));
+	rval = mci_write(0, cmd, data);
+	if (rval)
+		ERROR("Failed to turn off auto-link\n");
+
+	/* Reset AP PHY */
+	cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_PHY_CTRL_REG_NUM) |
+		MCI_INDIRECT_CTRL_LOCAL_PKT);
+	data = (MCI_PHY_CTRL_MCI_MINOR |
+		MCI_PHY_CTRL_MCI_MAJOR |
+		MCI_PHY_CTRL_MCI_PHY_MODE_HOST |
+		MCI_PHY_CTRL_MCI_PHY_RESET_CORE);
+	rval = mci_write(0, cmd, data);
+	if (rval)
+		ERROR("Failed to reset AP PHY\n");
+
+	/* Clear all status & CRC values */
+	cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_LINK_CRC_CTRL_REG_NUM) |
+	       MCI_INDIRECT_CTRL_LOCAL_PKT);
+	data = 0x0;
+	mci_write(0, cmd, data);
+	cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_CTRL_STATUS_REG_NUM) |
+	       MCI_INDIRECT_CTRL_LOCAL_PKT);
+	data = 0x0;
+	rval = mci_write(0, cmd, data);
+	if (rval)
+		ERROR("Failed to reset AP PHY\n");
+
+	/* Wait 5ms before un-reset the PHY */
+	mdelay(5);
+
+	/* Un-reset AP PHY */
+	cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_PHY_CTRL_REG_NUM) |
+	       MCI_INDIRECT_CTRL_LOCAL_PKT);
+	data = (MCI_PHY_CTRL_MCI_MINOR | MCI_PHY_CTRL_MCI_MAJOR |
+		MCI_PHY_CTRL_MCI_PHY_MODE_HOST);
+	rval = mci_write(0, cmd, data);
+	if (rval)
+		ERROR("Failed to un-reset AP PHY\n");
+
+	debug_exit();
+}
+
+void mci_turn_link_on(void)
+{
+	uint32_t cmd, data;
+	int rval = 0;
+
+	debug_enter();
+	/* Turn on auto-link */
+	cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_CTRL_MCI_PHY_SETTINGS_REG_NUM) |
+			MCI_INDIRECT_CTRL_LOCAL_PKT);
+	data = (MCI_CTRL_MCI_PHY_SET_REG_DEF_VAL2 |
+		MCI_CTRL_MCI_PHY_SET_AUTO_LINK_EN(1));
+	rval = mci_write(0, cmd, data);
+	if (rval)
+		ERROR("Failed to turn on auto-link\n");
+
+	debug_exit();
+}
+
+/* Initialize MCI for performance improvements */
+int mci_initialize(int mci_index)
+{
+	int ret;
+
+	debug_enter();
+	INFO("MCI%d initialization:\n", mci_index);
+
+	ret = mci_configure(mci_index);
+
+	debug_exit();
+	return ret;
+}
diff --git a/drivers/marvell/mochi/ap807_setup.c b/drivers/marvell/mochi/ap807_setup.c
new file mode 100644
index 0000000..075ca31
--- /dev/null
+++ b/drivers/marvell/mochi/ap807_setup.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:	BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* AP807 Marvell SoC driver */
+
+#include <ap_setup.h>
+#include <cache_llc.h>
+#include <ccu.h>
+#include <debug.h>
+#include <io_win.h>
+#include <mci.h>
+#include <mmio.h>
+#include <mvebu_def.h>
+
+#define SMMU_sACR				(MVEBU_SMMU_BASE + 0x10)
+#define SMMU_sACR_PG_64K			(1 << 16)
+
+#define CCU_GSPMU_CR				(MVEBU_CCU_BASE(MVEBU_AP0) \
+								+ 0x3F0)
+#define GSPMU_CPU_CONTROL			(0x1 << 0)
+
+#define CCU_HTC_CR				(MVEBU_CCU_BASE(MVEBU_AP0) \
+								+ 0x200)
+#define CCU_SET_POC_OFFSET			5
+
+#define DSS_CR0					(MVEBU_RFU_BASE + 0x100)
+#define DVM_48BIT_VA_ENABLE			(1 << 21)
+
+/* Secure MoChi incoming access */
+#define SEC_MOCHI_IN_ACC_REG			(MVEBU_RFU_BASE + 0x4738)
+#define SEC_MOCHI_IN_ACC_IHB0_EN		(1)
+#define SEC_MOCHI_IN_ACC_IHB1_EN		(1 << 3)
+#define SEC_MOCHI_IN_ACC_IHB2_EN		(1 << 6)
+#define SEC_MOCHI_IN_ACC_PIDI_EN		(1 << 9)
+#define SEC_IN_ACCESS_ENA_ALL_MASTERS		(SEC_MOCHI_IN_ACC_IHB0_EN | \
+						 SEC_MOCHI_IN_ACC_IHB1_EN | \
+						 SEC_MOCHI_IN_ACC_IHB2_EN | \
+						 SEC_MOCHI_IN_ACC_PIDI_EN)
+
+/* SYSRST_OUTn Config definitions */
+#define MVEBU_SYSRST_OUT_CONFIG_REG		(MVEBU_MISC_SOC_BASE + 0x4)
+#define WD_MASK_SYS_RST_OUT			(1 << 2)
+
+/* DSS PHY for DRAM */
+#define DSS_SCR_REG				(MVEBU_RFU_BASE + 0x208)
+#define DSS_PPROT_OFFS				4
+#define DSS_PPROT_MASK				0x7
+#define DSS_PPROT_PRIV_SECURE_DATA		0x1
+
+/* Used for Units of AP-807 (e.g. SDIO and etc) */
+#define MVEBU_AXI_ATTR_BASE			(MVEBU_REGS_BASE + 0x6F4580)
+#define MVEBU_AXI_ATTR_REG(index)		(MVEBU_AXI_ATTR_BASE + \
+							0x4 * index)
+
+enum axi_attr {
+	AXI_SDIO_ATTR = 0,
+	AXI_DFX_ATTR,
+	AXI_MAX_ATTR,
+};
+
+static void ap_sec_masters_access_en(uint32_t enable)
+{
+	uint32_t reg;
+
+	/* Open/Close incoming access for all masters.
+	 * The access is disabled in trusted boot mode
+	 * Could only be done in EL3
+	 */
+	reg = mmio_read_32(SEC_MOCHI_IN_ACC_REG);
+	if (enable)
+		mmio_write_32(SEC_MOCHI_IN_ACC_REG, reg |
+			      SEC_IN_ACCESS_ENA_ALL_MASTERS);
+	else
+		mmio_write_32(SEC_MOCHI_IN_ACC_REG,
+			      reg & ~SEC_IN_ACCESS_ENA_ALL_MASTERS);
+}
+
+static void setup_smmu(void)
+{
+	uint32_t reg;
+
+	/* Set the SMMU page size to 64 KB */
+	reg = mmio_read_32(SMMU_sACR);
+	reg |= SMMU_sACR_PG_64K;
+	mmio_write_32(SMMU_sACR, reg);
+}
+
+static void init_aurora2(void)
+{
+	uint32_t reg;
+
+	/* Enable GSPMU control by CPU */
+	reg = mmio_read_32(CCU_GSPMU_CR);
+	reg |= GSPMU_CPU_CONTROL;
+	mmio_write_32(CCU_GSPMU_CR, reg);
+
+#if LLC_ENABLE
+	/* Enable LLC for AP807 in exclusive mode */
+	llc_enable(0, 1);
+
+	/* Set point of coherency to DDR.
+	 * This is required by units which have
+	 * SW cache coherency
+	 */
+	reg = mmio_read_32(CCU_HTC_CR);
+	reg |= (0x1 << CCU_SET_POC_OFFSET);
+	mmio_write_32(CCU_HTC_CR, reg);
+#endif /* LLC_ENABLE */
+}
+
+
+/* MCIx indirect access register are based by default at 0xf4000000/0xf6000000
+ * to avoid conflict of internal registers of units connected via MCIx, which
+ * can be based on the same address (i.e CP1 base is also 0xf4000000),
+ * the following routines remaps the MCIx indirect bases to another domain
+ */
+static void mci_remap_indirect_access_base(void)
+{
+	uint32_t mci;
+
+	for (mci = 0; mci < MCI_MAX_UNIT_ID; mci++)
+		mmio_write_32(MCIX4_REG_START_ADDRESS_REG(mci),
+				  MVEBU_MCI_REG_BASE_REMAP(mci) >>
+				  MCI_REMAP_OFF_SHIFT);
+}
+
+static void ap807_axi_attr_init(void)
+{
+	uint32_t index, data;
+
+	/* Initialize AXI attributes for AP807 */
+	/* Go over the AXI attributes and set Ax-Cache and Ax-Domain */
+	for (index = 0; index < AXI_MAX_ATTR; index++) {
+		switch (index) {
+		/* DFX works with no coherent only -
+		 * there's no option to configure the Ax-Cache and Ax-Domain
+		 */
+		case AXI_DFX_ATTR:
+			continue;
+		default:
+			/* Set Ax-Cache as cacheable, no allocate, modifiable,
+			 * bufferable.
+			 * The values are different because Read & Write
+			 * definition is different in Ax-Cache
+			 */
+			data = mmio_read_32(MVEBU_AXI_ATTR_REG(index));
+			data &= ~MVEBU_AXI_ATTR_ARCACHE_MASK;
+			data |= (CACHE_ATTR_WRITE_ALLOC |
+				 CACHE_ATTR_CACHEABLE   |
+				 CACHE_ATTR_BUFFERABLE) <<
+				 MVEBU_AXI_ATTR_ARCACHE_OFFSET;
+			data &= ~MVEBU_AXI_ATTR_AWCACHE_MASK;
+			data |= (CACHE_ATTR_READ_ALLOC |
+				 CACHE_ATTR_CACHEABLE  |
+				 CACHE_ATTR_BUFFERABLE) <<
+				 MVEBU_AXI_ATTR_AWCACHE_OFFSET;
+			/* Set Ax-Domain as Outer domain */
+			data &= ~MVEBU_AXI_ATTR_ARDOMAIN_MASK;
+			data |= DOMAIN_OUTER_SHAREABLE <<
+				MVEBU_AXI_ATTR_ARDOMAIN_OFFSET;
+			data &= ~MVEBU_AXI_ATTR_AWDOMAIN_MASK;
+			data |= DOMAIN_OUTER_SHAREABLE <<
+				MVEBU_AXI_ATTR_AWDOMAIN_OFFSET;
+			mmio_write_32(MVEBU_AXI_ATTR_REG(index), data);
+		}
+	}
+}
+
+static void misc_soc_configurations(void)
+{
+	uint32_t reg;
+
+	/* Enable 48-bit VA */
+	mmio_setbits_32(DSS_CR0, DVM_48BIT_VA_ENABLE);
+
+	/* Un-mask Watchdog reset from influencing the SYSRST_OUTn.
+	 * Otherwise, upon WD timeout, the WD reset signal won't trigger reset
+	 */
+	reg = mmio_read_32(MVEBU_SYSRST_OUT_CONFIG_REG);
+	reg &= ~(WD_MASK_SYS_RST_OUT);
+	mmio_write_32(MVEBU_SYSRST_OUT_CONFIG_REG, reg);
+}
+
+void ap_init(void)
+{
+	/* Setup Aurora2. */
+	init_aurora2();
+
+	/* configure MCI mapping */
+	mci_remap_indirect_access_base();
+
+	/* configure IO_WIN windows */
+	init_io_win(MVEBU_AP0);
+
+	/* configure CCU windows */
+	init_ccu(MVEBU_AP0);
+
+	/* configure the SMMU */
+	setup_smmu();
+
+	/* Open AP incoming access for all masters */
+	ap_sec_masters_access_en(1);
+
+	/* configure axi for AP */
+	ap807_axi_attr_init();
+
+	/* misc configuration of the SoC */
+	misc_soc_configurations();
+}
+
+static void ap807_dram_phy_access_config(void)
+{
+	uint32_t reg_val;
+	/* Update DSS port access permission to DSS_PHY */
+	reg_val = mmio_read_32(DSS_SCR_REG);
+	reg_val &= ~(DSS_PPROT_MASK << DSS_PPROT_OFFS);
+	reg_val |= ((DSS_PPROT_PRIV_SECURE_DATA & DSS_PPROT_MASK) <<
+		    DSS_PPROT_OFFS);
+	mmio_write_32(DSS_SCR_REG, reg_val);
+}
+
+void ap_ble_init(void)
+{
+	/* Enable DSS port */
+	ap807_dram_phy_access_config();
+}
+
+int ap_get_count(void)
+{
+	return 1;
+}
+
+
diff --git a/drivers/marvell/mochi/apn806_setup.c b/drivers/marvell/mochi/apn806_setup.c
new file mode 100644
index 0000000..1d33be9
--- /dev/null
+++ b/drivers/marvell/mochi/apn806_setup.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* AP806 Marvell SoC driver */
+
+#include <ap_setup.h>
+#include <ccu.h>
+#include <cache_llc.h>
+#include <debug.h>
+#include <io_win.h>
+#include <mci.h>
+#include <mmio.h>
+#include <mvebu_def.h>
+
+#define SMMU_sACR				(MVEBU_SMMU_BASE + 0x10)
+#define SMMU_sACR_PG_64K			(1 << 16)
+
+#define CCU_GSPMU_CR				(MVEBU_CCU_BASE(MVEBU_AP0) + \
+							0x3F0)
+#define GSPMU_CPU_CONTROL			(0x1 << 0)
+
+#define CCU_HTC_CR				(MVEBU_CCU_BASE(MVEBU_AP0) + \
+							0x200)
+#define CCU_SET_POC_OFFSET			5
+
+#define CCU_RGF(win)				(MVEBU_CCU_BASE(MVEBU_AP0) + \
+							0x90 + 4 * (win))
+
+#define DSS_CR0					(MVEBU_RFU_BASE + 0x100)
+#define DVM_48BIT_VA_ENABLE			(1 << 21)
+
+/* Secure MoChi incoming access */
+#define SEC_MOCHI_IN_ACC_REG			(MVEBU_RFU_BASE + 0x4738)
+#define SEC_MOCHI_IN_ACC_IHB0_EN		(1)
+#define SEC_MOCHI_IN_ACC_IHB1_EN		(1 << 3)
+#define SEC_MOCHI_IN_ACC_IHB2_EN		(1 << 6)
+#define SEC_MOCHI_IN_ACC_PIDI_EN		(1 << 9)
+#define SEC_IN_ACCESS_ENA_ALL_MASTERS		(SEC_MOCHI_IN_ACC_IHB0_EN | \
+						 SEC_MOCHI_IN_ACC_IHB1_EN | \
+						 SEC_MOCHI_IN_ACC_IHB2_EN | \
+						 SEC_MOCHI_IN_ACC_PIDI_EN)
+
+/* SYSRST_OUTn Config definitions */
+#define MVEBU_SYSRST_OUT_CONFIG_REG		(MVEBU_MISC_SOC_BASE + 0x4)
+#define WD_MASK_SYS_RST_OUT			(1 << 2)
+
+/* Generic Timer System Controller */
+#define MVEBU_MSS_GTCR_REG			(MVEBU_REGS_BASE + 0x581000)
+#define MVEBU_MSS_GTCR_ENABLE_BIT		0x1
+
+/*
+ * AXI Configuration.
+ */
+
+/* Used for Units of AP-806 (e.g. SDIO and etc) */
+#define MVEBU_AXI_ATTR_BASE			(MVEBU_REGS_BASE + 0x6F4580)
+#define MVEBU_AXI_ATTR_REG(index)		(MVEBU_AXI_ATTR_BASE + \
+							0x4 * index)
+
+enum axi_attr {
+	AXI_SDIO_ATTR = 0,
+	AXI_DFX_ATTR,
+	AXI_MAX_ATTR,
+};
+
+static void apn_sec_masters_access_en(uint32_t enable)
+{
+	uint32_t reg;
+
+	/* Open/Close incoming access for all masters.
+	 * The access is disabled in trusted boot mode
+	 * Could only be done in EL3
+	 */
+	reg = mmio_read_32(SEC_MOCHI_IN_ACC_REG);
+	if (enable)
+		mmio_write_32(SEC_MOCHI_IN_ACC_REG, reg |
+			      SEC_IN_ACCESS_ENA_ALL_MASTERS);
+	else
+		mmio_write_32(SEC_MOCHI_IN_ACC_REG, reg &
+			      ~SEC_IN_ACCESS_ENA_ALL_MASTERS);
+}
+
+static void setup_smmu(void)
+{
+	uint32_t reg;
+
+	/* Set the SMMU page size to 64 KB */
+	reg = mmio_read_32(SMMU_sACR);
+	reg |= SMMU_sACR_PG_64K;
+	mmio_write_32(SMMU_sACR, reg);
+}
+
+static void apn806_errata_wa_init(void)
+{
+	/*
+	 * ERRATA ID: RES-3033912 - Internal Address Space Init state causes
+	 * a hang upon accesses to [0xf070_0000, 0xf07f_ffff]
+	 * Workaround: Boot Firmware (ATF) should configure CCU_RGF_WIN(4) to
+	 * split [0x6e_0000, 0xff_ffff] to values [0x6e_0000, 0x6f_ffff] and
+	 * [0x80_0000, 0xff_ffff] that cause accesses to the
+	 * segment of [0xf070_0000, 0xf07f_ffff] to act as RAZWI.
+	 */
+	mmio_write_32(CCU_RGF(4), 0x37f9b809);
+	mmio_write_32(CCU_RGF(5), 0x7ffa0009);
+}
+
+static void init_aurora2(void)
+{
+	uint32_t reg;
+
+	/* Enable GSPMU control by CPU */
+	reg = mmio_read_32(CCU_GSPMU_CR);
+	reg |= GSPMU_CPU_CONTROL;
+	mmio_write_32(CCU_GSPMU_CR, reg);
+
+#if LLC_ENABLE
+	/* Enable LLC for AP806 in exclusive mode */
+	llc_enable(0, 1);
+
+	/* Set point of coherency to DDR.
+	 * This is required by units which have
+	 * SW cache coherency
+	 */
+	reg = mmio_read_32(CCU_HTC_CR);
+	reg |= (0x1 << CCU_SET_POC_OFFSET);
+	mmio_write_32(CCU_HTC_CR, reg);
+#endif /* LLC_ENABLE */
+
+	apn806_errata_wa_init();
+}
+
+
+/* MCIx indirect access register are based by default at 0xf4000000/0xf6000000
+ * to avoid conflict of internal registers of units connected via MCIx, which
+ * can be based on the same address (i.e CP1 base is also 0xf4000000),
+ * the following routines remaps the MCIx indirect bases to another domain
+ */
+static void mci_remap_indirect_access_base(void)
+{
+	uint32_t mci;
+
+	for (mci = 0; mci < MCI_MAX_UNIT_ID; mci++)
+		mmio_write_32(MCIX4_REG_START_ADDRESS_REG(mci),
+			      MVEBU_MCI_REG_BASE_REMAP(mci) >>
+			      MCI_REMAP_OFF_SHIFT);
+}
+
+static void apn806_axi_attr_init(void)
+{
+	uint32_t index, data;
+
+	/* Initialize AXI attributes for APN806 */
+
+	/* Go over the AXI attributes and set Ax-Cache and Ax-Domain */
+	for (index = 0; index < AXI_MAX_ATTR; index++) {
+		switch (index) {
+		/* DFX works with no coherent only -
+		 * there's no option to configure the Ax-Cache and Ax-Domain
+		 */
+		case AXI_DFX_ATTR:
+			continue;
+		default:
+			/* Set Ax-Cache as cacheable, no allocate, modifiable,
+			 * bufferable
+			 * The values are different because Read & Write
+			 * definition is different in Ax-Cache
+			 */
+			data = mmio_read_32(MVEBU_AXI_ATTR_REG(index));
+			data &= ~MVEBU_AXI_ATTR_ARCACHE_MASK;
+			data |= (CACHE_ATTR_WRITE_ALLOC |
+				 CACHE_ATTR_CACHEABLE   |
+				 CACHE_ATTR_BUFFERABLE) <<
+				 MVEBU_AXI_ATTR_ARCACHE_OFFSET;
+			data &= ~MVEBU_AXI_ATTR_AWCACHE_MASK;
+			data |= (CACHE_ATTR_READ_ALLOC |
+				 CACHE_ATTR_CACHEABLE  |
+				 CACHE_ATTR_BUFFERABLE) <<
+				 MVEBU_AXI_ATTR_AWCACHE_OFFSET;
+			/* Set Ax-Domain as Outer domain */
+			data &= ~MVEBU_AXI_ATTR_ARDOMAIN_MASK;
+			data |= DOMAIN_OUTER_SHAREABLE <<
+				MVEBU_AXI_ATTR_ARDOMAIN_OFFSET;
+			data &= ~MVEBU_AXI_ATTR_AWDOMAIN_MASK;
+			data |= DOMAIN_OUTER_SHAREABLE <<
+				MVEBU_AXI_ATTR_AWDOMAIN_OFFSET;
+			mmio_write_32(MVEBU_AXI_ATTR_REG(index), data);
+		}
+	}
+}
+
+static void dss_setup(void)
+{
+	/* Enable 48-bit VA */
+	mmio_setbits_32(DSS_CR0, DVM_48BIT_VA_ENABLE);
+}
+
+void misc_soc_configurations(void)
+{
+	uint32_t reg;
+
+	/* Un-mask Watchdog reset from influencing the SYSRST_OUTn.
+	 * Otherwise, upon WD timeout, the WD reset signal won't trigger reset
+	 */
+	reg = mmio_read_32(MVEBU_SYSRST_OUT_CONFIG_REG);
+	reg &= ~(WD_MASK_SYS_RST_OUT);
+	mmio_write_32(MVEBU_SYSRST_OUT_CONFIG_REG, reg);
+}
+
+void ap_init(void)
+{
+	/* Setup Aurora2. */
+	init_aurora2();
+
+	/* configure MCI mapping */
+	mci_remap_indirect_access_base();
+
+	/* configure IO_WIN windows */
+	init_io_win(MVEBU_AP0);
+
+	/* configure CCU windows */
+	init_ccu(MVEBU_AP0);
+
+	/* configure DSS */
+	dss_setup();
+
+	/* configure the SMMU */
+	setup_smmu();
+
+	/* Open APN incoming access for all masters */
+	apn_sec_masters_access_en(1);
+
+	/* configure axi for APN*/
+	apn806_axi_attr_init();
+
+	/* misc configuration of the SoC */
+	misc_soc_configurations();
+}
+
+void ap_ble_init(void)
+{
+}
+
+int ap_get_count(void)
+{
+	return 1;
+}
+
diff --git a/drivers/marvell/mochi/cp110_setup.c b/drivers/marvell/mochi/cp110_setup.c
new file mode 100644
index 0000000..c4cb307
--- /dev/null
+++ b/drivers/marvell/mochi/cp110_setup.c
@@ -0,0 +1,429 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+/* CP110 Marvell SoC driver */
+
+#include <amb_adec.h>
+#include <cp110_setup.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <iob.h>
+#include <plat_marvell.h>
+
+/*
+ * AXI Configuration.
+ */
+
+ /* Used for Units of CP-110 (e.g. USB device, USB Host, and etc) */
+#define MVEBU_AXI_ATTR_OFFSET			(0x441300)
+#define MVEBU_AXI_ATTR_REG(index)		(MVEBU_AXI_ATTR_OFFSET + \
+							0x4 * index)
+
+/* AXI Protection bits */
+#define MVEBU_AXI_PROT_OFFSET				(0x441200)
+
+/* AXI Protection regs */
+#define MVEBU_AXI_PROT_REG(index)		((index <= 4) ? \
+						(MVEBU_AXI_PROT_OFFSET + \
+							0x4 * index) : \
+						(MVEBU_AXI_PROT_OFFSET + 0x18))
+#define MVEBU_AXI_PROT_REGS_NUM			(6)
+
+#define MVEBU_SOC_CFGS_OFFSET			(0x441900)
+#define MVEBU_SOC_CFG_REG(index)		(MVEBU_SOC_CFGS_OFFSET + \
+							0x4 * index)
+#define MVEBU_SOC_CFG_REG_NUM			(0)
+#define MVEBU_SOC_CFG_GLOG_SECURE_EN_MASK	(0xE)
+
+/* SATA3 MBUS to AXI regs */
+#define MVEBU_BRIDGE_WIN_DIS_REG		(MVEBU_SOC_CFGS_OFFSET + 0x10)
+#define MVEBU_BRIDGE_WIN_DIS_OFF		(0x0)
+
+/* SATA3 MBUS to AXI regs */
+#define MVEBU_SATA_M2A_AXI_PORT_CTRL_REG	(0x54ff04)
+
+/* AXI to MBUS bridge registers */
+#define MVEBU_AMB_IP_OFFSET			(0x13ff00)
+#define MVEBU_AMB_IP_BRIDGE_WIN_REG(win)	(MVEBU_AMB_IP_OFFSET + \
+							(win * 0x8))
+#define MVEBU_AMB_IP_BRIDGE_WIN_EN_OFFSET	0
+#define MVEBU_AMB_IP_BRIDGE_WIN_EN_MASK		\
+				(0x1 << MVEBU_AMB_IP_BRIDGE_WIN_EN_OFFSET)
+#define MVEBU_AMB_IP_BRIDGE_WIN_SIZE_OFFSET	16
+#define MVEBU_AMB_IP_BRIDGE_WIN_SIZE_MASK	\
+				(0xffff << MVEBU_AMB_IP_BRIDGE_WIN_SIZE_OFFSET)
+
+#define MVEBU_SAMPLE_AT_RESET_REG	(0x440600)
+#define SAR_PCIE1_CLK_CFG_OFFSET	31
+#define SAR_PCIE1_CLK_CFG_MASK		(0x1 << SAR_PCIE1_CLK_CFG_OFFSET)
+#define SAR_PCIE0_CLK_CFG_OFFSET	30
+#define SAR_PCIE0_CLK_CFG_MASK		(0x1 << SAR_PCIE0_CLK_CFG_OFFSET)
+#define SAR_I2C_INIT_EN_OFFSET		24
+#define SAR_I2C_INIT_EN_MASK		(1 << SAR_I2C_INIT_EN_OFFSET)
+
+/*******************************************************************************
+ * PCIE clock buffer control
+ ******************************************************************************/
+#define MVEBU_PCIE_REF_CLK_BUF_CTRL			(0x4404F0)
+#define PCIE1_REFCLK_BUFF_SOURCE			0x800
+#define PCIE0_REFCLK_BUFF_SOURCE			0x400
+
+/*******************************************************************************
+ * MSS Device Push Set Register
+ ******************************************************************************/
+#define MVEBU_CP_MSS_DPSHSR_REG				(0x280040)
+#define MSS_DPSHSR_REG_PCIE_CLK_SEL			0x8
+
+/*******************************************************************************
+ * RTC Configuration
+ ******************************************************************************/
+#define MVEBU_RTC_BASE					(0x284000)
+#define MVEBU_RTC_STATUS_REG				(MVEBU_RTC_BASE + 0x0)
+#define MVEBU_RTC_STATUS_ALARM1_MASK			0x1
+#define MVEBU_RTC_STATUS_ALARM2_MASK			0x2
+#define MVEBU_RTC_IRQ_1_CONFIG_REG			(MVEBU_RTC_BASE + 0x4)
+#define MVEBU_RTC_IRQ_2_CONFIG_REG			(MVEBU_RTC_BASE + 0x8)
+#define MVEBU_RTC_TIME_REG				(MVEBU_RTC_BASE + 0xC)
+#define MVEBU_RTC_ALARM_1_REG				(MVEBU_RTC_BASE + 0x10)
+#define MVEBU_RTC_ALARM_2_REG				(MVEBU_RTC_BASE + 0x14)
+#define MVEBU_RTC_CCR_REG				(MVEBU_RTC_BASE + 0x18)
+#define MVEBU_RTC_NOMINAL_TIMING			0x2000
+#define MVEBU_RTC_NOMINAL_TIMING_MASK			0x7FFF
+#define MVEBU_RTC_TEST_CONFIG_REG			(MVEBU_RTC_BASE + 0x1C)
+#define MVEBU_RTC_BRIDGE_TIMING_CTRL0_REG		(MVEBU_RTC_BASE + 0x80)
+#define MVEBU_RTC_WRCLK_PERIOD_MASK			0xFFFF
+#define MVEBU_RTC_WRCLK_PERIOD_DEFAULT			0x3FF
+#define MVEBU_RTC_WRCLK_SETUP_OFFS			16
+#define MVEBU_RTC_WRCLK_SETUP_MASK			0xFFFF0000
+#define MVEBU_RTC_WRCLK_SETUP_DEFAULT			0x29
+#define MVEBU_RTC_BRIDGE_TIMING_CTRL1_REG		(MVEBU_RTC_BASE + 0x84)
+#define MVEBU_RTC_READ_OUTPUT_DELAY_MASK		0xFFFF
+#define MVEBU_RTC_READ_OUTPUT_DELAY_DEFAULT		0x1F
+
+enum axi_attr {
+	AXI_ADUNIT_ATTR = 0,
+	AXI_COMUNIT_ATTR,
+	AXI_EIP197_ATTR,
+	AXI_USB3D_ATTR,
+	AXI_USB3H0_ATTR,
+	AXI_USB3H1_ATTR,
+	AXI_SATA0_ATTR,
+	AXI_SATA1_ATTR,
+	AXI_DAP_ATTR,
+	AXI_DFX_ATTR,
+	AXI_DBG_TRC_ATTR = 12,
+	AXI_SDIO_ATTR,
+	AXI_MSS_ATTR,
+	AXI_MAX_ATTR,
+};
+
+/* Most stream IDS are configured centrally in the CP-110 RFU
+ * but some are configured inside the unit registers
+ */
+#define RFU_STREAM_ID_BASE	(0x450000)
+#define USB3H_0_STREAM_ID_REG	(RFU_STREAM_ID_BASE + 0xC)
+#define USB3H_1_STREAM_ID_REG	(RFU_STREAM_ID_BASE + 0x10)
+#define SATA_0_STREAM_ID_REG	(RFU_STREAM_ID_BASE + 0x14)
+#define SATA_1_STREAM_ID_REG	(RFU_STREAM_ID_BASE + 0x18)
+
+#define CP_DMA_0_STREAM_ID_REG  (0x6B0010)
+#define CP_DMA_1_STREAM_ID_REG  (0x6D0010)
+
+/* We allocate IDs 128-255 for PCIe */
+#define MAX_STREAM_ID		(0x80)
+
+uintptr_t stream_id_reg[] = {
+	USB3H_0_STREAM_ID_REG,
+	USB3H_1_STREAM_ID_REG,
+	CP_DMA_0_STREAM_ID_REG,
+	CP_DMA_1_STREAM_ID_REG,
+	SATA_0_STREAM_ID_REG,
+	SATA_1_STREAM_ID_REG,
+	0
+};
+
+static void cp110_errata_wa_init(uintptr_t base)
+{
+	uint32_t data;
+
+	/* ERRATA GL-4076863:
+	 * Reset value for global_secure_enable inputs must be changed
+	 * from '1' to '0'.
+	 * When asserted, only "secured" transactions can enter IHB
+	 * configuration space.
+	 * However, blocking AXI transactions is performed by IOB.
+	 * Performing it also at IHB/HB complicates programming model.
+	 *
+	 * Enable non-secure access in SOC configuration register
+	 */
+	data = mmio_read_32(base + MVEBU_SOC_CFG_REG(MVEBU_SOC_CFG_REG_NUM));
+	data &= ~MVEBU_SOC_CFG_GLOG_SECURE_EN_MASK;
+	mmio_write_32(base + MVEBU_SOC_CFG_REG(MVEBU_SOC_CFG_REG_NUM), data);
+}
+
+static void cp110_pcie_clk_cfg(uintptr_t base)
+{
+	uint32_t pcie0_clk, pcie1_clk, reg;
+
+	/*
+	 * Determine the pcie0/1 clock direction (input/output) from the
+	 * sample at reset.
+	 */
+	reg = mmio_read_32(base + MVEBU_SAMPLE_AT_RESET_REG);
+	pcie0_clk = (reg & SAR_PCIE0_CLK_CFG_MASK) >> SAR_PCIE0_CLK_CFG_OFFSET;
+	pcie1_clk = (reg & SAR_PCIE1_CLK_CFG_MASK) >> SAR_PCIE1_CLK_CFG_OFFSET;
+
+	/* CP110 revision A2 */
+	if (cp110_rev_id_get(base) == MVEBU_CP110_REF_ID_A2) {
+		/*
+		 * PCIe Reference Clock Buffer Control register must be
+		 * set according to the clock direction (input/output)
+		 */
+		reg = mmio_read_32(base + MVEBU_PCIE_REF_CLK_BUF_CTRL);
+		reg &= ~(PCIE0_REFCLK_BUFF_SOURCE | PCIE1_REFCLK_BUFF_SOURCE);
+		if (!pcie0_clk)
+			reg |= PCIE0_REFCLK_BUFF_SOURCE;
+		if (!pcie1_clk)
+			reg |= PCIE1_REFCLK_BUFF_SOURCE;
+
+		mmio_write_32(base + MVEBU_PCIE_REF_CLK_BUF_CTRL, reg);
+	}
+
+	/* CP110 revision A1 */
+	if (cp110_rev_id_get(base) == MVEBU_CP110_REF_ID_A1) {
+		if (!pcie0_clk || !pcie1_clk) {
+			/*
+			 * if one of the pcie clocks is set to input,
+			 * we need to set mss_push[131] field, otherwise,
+			 * the pcie clock might not work.
+			 */
+			reg = mmio_read_32(base + MVEBU_CP_MSS_DPSHSR_REG);
+			reg |= MSS_DPSHSR_REG_PCIE_CLK_SEL;
+			mmio_write_32(base + MVEBU_CP_MSS_DPSHSR_REG, reg);
+		}
+	}
+}
+
+/* Set a unique stream id for all DMA capable devices */
+static void cp110_stream_id_init(uintptr_t base, uint32_t stream_id)
+{
+	int i = 0;
+
+	while (stream_id_reg[i]) {
+		if (i > MAX_STREAM_ID_PER_CP) {
+			NOTICE("Only first %d (maximum) Stream IDs allocated\n",
+			       MAX_STREAM_ID_PER_CP);
+			return;
+		}
+
+		if ((stream_id_reg[i] == CP_DMA_0_STREAM_ID_REG) ||
+		    (stream_id_reg[i] == CP_DMA_1_STREAM_ID_REG))
+			mmio_write_32(base + stream_id_reg[i],
+				      stream_id << 16 |  stream_id);
+		else
+			mmio_write_32(base + stream_id_reg[i], stream_id);
+
+		/* SATA port 0/1 are in the same SATA unit, and they should use
+		 * the same STREAM ID number
+		 */
+		if (stream_id_reg[i] != SATA_0_STREAM_ID_REG)
+			stream_id++;
+
+		i++;
+	}
+}
+
+static void cp110_axi_attr_init(uintptr_t base)
+{
+	uint32_t index, data;
+
+	/* Initialize AXI attributes for Armada-7K/8K SoC */
+
+	/* Go over the AXI attributes and set Ax-Cache and Ax-Domain */
+	for (index = 0; index < AXI_MAX_ATTR; index++) {
+		switch (index) {
+		/* DFX and MSS unit works with no coherent only -
+		 * there's no option to configure the Ax-Cache and Ax-Domain
+		 */
+		case AXI_DFX_ATTR:
+		case AXI_MSS_ATTR:
+			continue;
+		default:
+			/* Set Ax-Cache as cacheable, no allocate, modifiable,
+			 * bufferable
+			 * The values are different because Read & Write
+			 * definition is different in Ax-Cache
+			 */
+			data = mmio_read_32(base + MVEBU_AXI_ATTR_REG(index));
+			data &= ~MVEBU_AXI_ATTR_ARCACHE_MASK;
+			data |= (CACHE_ATTR_WRITE_ALLOC |
+				 CACHE_ATTR_CACHEABLE   |
+				 CACHE_ATTR_BUFFERABLE) <<
+				 MVEBU_AXI_ATTR_ARCACHE_OFFSET;
+			data &= ~MVEBU_AXI_ATTR_AWCACHE_MASK;
+			data |= (CACHE_ATTR_READ_ALLOC |
+				 CACHE_ATTR_CACHEABLE  |
+				 CACHE_ATTR_BUFFERABLE) <<
+				 MVEBU_AXI_ATTR_AWCACHE_OFFSET;
+			/* Set Ax-Domain as Outer domain */
+			data &= ~MVEBU_AXI_ATTR_ARDOMAIN_MASK;
+			data |= DOMAIN_OUTER_SHAREABLE <<
+				MVEBU_AXI_ATTR_ARDOMAIN_OFFSET;
+			data &= ~MVEBU_AXI_ATTR_AWDOMAIN_MASK;
+			data |= DOMAIN_OUTER_SHAREABLE <<
+				MVEBU_AXI_ATTR_AWDOMAIN_OFFSET;
+			mmio_write_32(base + MVEBU_AXI_ATTR_REG(index), data);
+		}
+	}
+
+	/* SATA IOCC supported, cache attributes
+	 * for SATA MBUS to AXI configuration.
+	 */
+	data = mmio_read_32(base + MVEBU_SATA_M2A_AXI_PORT_CTRL_REG);
+	data &= ~MVEBU_SATA_M2A_AXI_AWCACHE_MASK;
+	data |= (CACHE_ATTR_WRITE_ALLOC |
+		 CACHE_ATTR_CACHEABLE   |
+		 CACHE_ATTR_BUFFERABLE) <<
+		 MVEBU_SATA_M2A_AXI_AWCACHE_OFFSET;
+	data &= ~MVEBU_SATA_M2A_AXI_ARCACHE_MASK;
+	data |= (CACHE_ATTR_READ_ALLOC |
+		 CACHE_ATTR_CACHEABLE  |
+		 CACHE_ATTR_BUFFERABLE) <<
+		 MVEBU_SATA_M2A_AXI_ARCACHE_OFFSET;
+	mmio_write_32(base + MVEBU_SATA_M2A_AXI_PORT_CTRL_REG, data);
+
+	/* Set all IO's AXI attribute to non-secure access. */
+	for (index = 0; index < MVEBU_AXI_PROT_REGS_NUM; index++)
+		mmio_write_32(base + MVEBU_AXI_PROT_REG(index),
+			      DOMAIN_SYSTEM_SHAREABLE);
+}
+
+static void amb_bridge_init(uintptr_t base)
+{
+	uint32_t reg;
+
+	/* Open AMB bridge Window to Access COMPHY/MDIO registers */
+	reg = mmio_read_32(base + MVEBU_AMB_IP_BRIDGE_WIN_REG(0));
+	reg &= ~(MVEBU_AMB_IP_BRIDGE_WIN_SIZE_MASK |
+		 MVEBU_AMB_IP_BRIDGE_WIN_EN_MASK);
+	reg |= (0x7ff << MVEBU_AMB_IP_BRIDGE_WIN_SIZE_OFFSET) |
+	       (0x1 << MVEBU_AMB_IP_BRIDGE_WIN_EN_OFFSET);
+	mmio_write_32(base + MVEBU_AMB_IP_BRIDGE_WIN_REG(0), reg);
+}
+
+static void cp110_rtc_init(uintptr_t base)
+{
+	/* Update MBus timing parameters before accessing RTC registers */
+	mmio_clrsetbits_32(base + MVEBU_RTC_BRIDGE_TIMING_CTRL0_REG,
+			   MVEBU_RTC_WRCLK_PERIOD_MASK,
+			   MVEBU_RTC_WRCLK_PERIOD_DEFAULT);
+
+	mmio_clrsetbits_32(base + MVEBU_RTC_BRIDGE_TIMING_CTRL0_REG,
+			   MVEBU_RTC_WRCLK_SETUP_MASK,
+			   MVEBU_RTC_WRCLK_SETUP_DEFAULT <<
+			   MVEBU_RTC_WRCLK_SETUP_OFFS);
+
+	mmio_clrsetbits_32(base + MVEBU_RTC_BRIDGE_TIMING_CTRL1_REG,
+			   MVEBU_RTC_READ_OUTPUT_DELAY_MASK,
+			   MVEBU_RTC_READ_OUTPUT_DELAY_DEFAULT);
+
+	/*
+	 * Issue reset to the RTC if Clock Correction register
+	 * contents did not sustain the reboot/power-on.
+	 */
+	if ((mmio_read_32(base + MVEBU_RTC_CCR_REG) &
+	    MVEBU_RTC_NOMINAL_TIMING_MASK) != MVEBU_RTC_NOMINAL_TIMING) {
+		/* Reset Test register */
+		mmio_write_32(base + MVEBU_RTC_TEST_CONFIG_REG, 0);
+		mdelay(500);
+
+		/* Reset Time register */
+		mmio_write_32(base + MVEBU_RTC_TIME_REG, 0);
+		udelay(62);
+
+		/* Reset Status register */
+		mmio_write_32(base + MVEBU_RTC_STATUS_REG,
+			      (MVEBU_RTC_STATUS_ALARM1_MASK |
+			      MVEBU_RTC_STATUS_ALARM2_MASK));
+		udelay(62);
+
+		/* Turn off Int1 and Int2 sources & clear the Alarm count */
+		mmio_write_32(base + MVEBU_RTC_IRQ_1_CONFIG_REG, 0);
+		mmio_write_32(base + MVEBU_RTC_IRQ_2_CONFIG_REG, 0);
+		mmio_write_32(base + MVEBU_RTC_ALARM_1_REG, 0);
+		mmio_write_32(base + MVEBU_RTC_ALARM_2_REG, 0);
+
+		/* Setup nominal register access timing */
+		mmio_write_32(base + MVEBU_RTC_CCR_REG,
+			      MVEBU_RTC_NOMINAL_TIMING);
+
+		/* Reset Time register */
+		mmio_write_32(base + MVEBU_RTC_TIME_REG, 0);
+		udelay(10);
+
+		/* Reset Status register */
+		mmio_write_32(base + MVEBU_RTC_STATUS_REG,
+			      (MVEBU_RTC_STATUS_ALARM1_MASK |
+			      MVEBU_RTC_STATUS_ALARM2_MASK));
+		udelay(50);
+	}
+}
+
+static void cp110_amb_adec_init(uintptr_t base)
+{
+	/* enable AXI-MBUS by clearing "Bridge Windows Disable" */
+	mmio_clrbits_32(base + MVEBU_BRIDGE_WIN_DIS_REG,
+			(1 << MVEBU_BRIDGE_WIN_DIS_OFF));
+
+	/* configure AXI-MBUS windows for CP */
+	init_amb_adec(base);
+}
+
+void cp110_init(uintptr_t cp110_base, uint32_t stream_id)
+{
+	INFO("%s: Initialize CPx - base = %lx\n", __func__, cp110_base);
+
+	/* configure IOB windows for CP0*/
+	init_iob(cp110_base);
+
+	/* configure AXI-MBUS windows for CP0*/
+	cp110_amb_adec_init(cp110_base);
+
+	/* configure axi for CP0*/
+	cp110_axi_attr_init(cp110_base);
+
+	/* Execute SW WA for erratas */
+	cp110_errata_wa_init(cp110_base);
+
+	/* Confiure pcie clock according to clock direction */
+	cp110_pcie_clk_cfg(cp110_base);
+
+	/* configure stream id for CP0 */
+	cp110_stream_id_init(cp110_base, stream_id);
+
+	/* Open AMB bridge for comphy for CP0 & CP1*/
+	amb_bridge_init(cp110_base);
+
+	/* Reset RTC if needed */
+	cp110_rtc_init(cp110_base);
+}
+
+/* Do the minimal setup required to configure the CP in BLE */
+void cp110_ble_init(uintptr_t cp110_base)
+{
+#if PCI_EP_SUPPORT
+	INFO("%s: Initialize CPx - base = %lx\n", __func__, cp110_base);
+
+	amb_bridge_init(cp110_base);
+
+	/* Configure PCIe clock */
+	cp110_pcie_clk_cfg(cp110_base);
+
+	/* Configure PCIe endpoint */
+	ble_plat_pcie_ep_setup();
+#endif
+}