Merge "docs: add guidelines for abandoning patches" into integration
diff --git a/Makefile b/Makefile
index 8a0a2e0..f33a12e 100644
--- a/Makefile
+++ b/Makefile
@@ -220,14 +220,6 @@
 # Will set march-directive from platform configuration
 else
 	target32-directive	= 	-target armv8a-none-eabi
-
-# Set the compiler's target architecture profile based on
-# ARM_ARCH_MAJOR ARM_ARCH_MINOR options
-	ifeq (${ARM_ARCH_MINOR},0)
-		march-directive	= 	-march=armv${ARM_ARCH_MAJOR}-a
-	else
-		march-directive	= 	-march=armv${ARM_ARCH_MAJOR}.${ARM_ARCH_MINOR}-a
-	endif #(ARM_ARCH_MINOR)
 endif #(ARM_ARCH_MAJOR)
 
 ################################################################################
@@ -279,12 +271,12 @@
 
 ifneq ($(findstring clang,$(notdir $(CC))),)
 	ifneq ($(findstring armclang,$(notdir $(CC))),)
-		TF_CFLAGS_aarch32	:=	-target arm-arm-none-eabi $(march-directive)
-		TF_CFLAGS_aarch64	:=	-target aarch64-arm-none-eabi $(march-directive)
+		TF_CFLAGS_aarch32	:=	-target arm-arm-none-eabi
+		TF_CFLAGS_aarch64	:=	-target aarch64-arm-none-eabi
 		LD			:=	$(LINKER)
 	else
-		TF_CFLAGS_aarch32	=	$(target32-directive) $(march-directive)
-		TF_CFLAGS_aarch64	:=	-target aarch64-elf $(march-directive)
+		TF_CFLAGS_aarch32	=	$(target32-directive)
+		TF_CFLAGS_aarch64	:=	-target aarch64-elf
 		LD			:=	$(shell $(CC) --print-prog-name ld.lld)
 
 		AR			:=	$(shell $(CC) --print-prog-name llvm-ar)
@@ -296,8 +288,6 @@
 	PP		:=	$(CC) -E $(TF_CFLAGS_$(ARCH))
 	AS		:=	$(CC) -c -x assembler-with-cpp $(TF_CFLAGS_$(ARCH))
 else ifneq ($(findstring gcc,$(notdir $(CC))),)
-	TF_CFLAGS_aarch32	=	$(march-directive)
-	TF_CFLAGS_aarch64	=	$(march-directive)
 	ifeq ($(ENABLE_LTO),1)
 		# Enable LTO only for aarch64
 		ifeq (${ARCH},aarch64)
@@ -308,8 +298,6 @@
 	endif
 	LD			=	$(LINKER)
 else
-	TF_CFLAGS_aarch32	=	$(march-directive)
-	TF_CFLAGS_aarch64	=	$(march-directive)
 	LD			=	$(LINKER)
 endif #(clang)
 
@@ -671,6 +659,14 @@
 
 include ${PLAT_MAKEFILE_FULL}
 
+################################################################################
+# Platform specific Makefile might provide us ARCH_MAJOR/MINOR use that to come
+# up with appropriate march values for compiler.
+################################################################################
+include ${MAKE_HELPERS_DIRECTORY}march.mk
+
+TF_CFLAGS   +=	$(march-directive)
+
 # This internal flag is common option which is set to 1 for scenarios
 # when the BL2 is running in EL3 level. This occurs in two scenarios -
 # 4 world system running BL2 at EL3 and two world system without BL1 running
diff --git a/changelog.yaml b/changelog.yaml
index f21aa16..47cfc1e 100644
--- a/changelog.yaml
+++ b/changelog.yaml
@@ -406,6 +406,13 @@
               - title: i.MX 8
                 scope: imx8
 
+              - title: i.MX 9
+                scope: imx9
+
+                subsections:
+                  - title: i.MX93
+                    scope: imx93
+
           - title: Layerscape
             scope: layerscape
 
@@ -1044,6 +1051,9 @@
           - title: TZC-380
             scope: nxp-tzc380
 
+          - title: TRDC
+            scope: imx-trdc
+
       - title: Renesas
         scope: renesas-drivers
 
diff --git a/docs/plat/imx9.rst b/docs/plat/imx9.rst
new file mode 100644
index 0000000..4adaae3
--- /dev/null
+++ b/docs/plat/imx9.rst
@@ -0,0 +1,60 @@
+NXP i.MX 9 Series
+==================
+
+Building on the market-proven i.MX 6 and i.MX 8 series, i.MX 9 series applications
+processors bring together higher performance applications cores, an independent
+MCU-like real-time domain, Energy Flex architecture, state-of-the-art security
+with EdgeLock® secure enclave and dedicated multi-sensory data processing engines
+(graphics, image, display, audio and voice). The i.MX 9 series, part of the EdgeVerse™
+edge computing platform, integrates hardware neural processing units across many
+members of the series for acceleration of machine learning applications at the edge
+`i.MX9 Applications Processors`_.
+
+Boot Sequence
+-------------
+
+BootROM --> SPL --> BL31 --> BL33(u-boot) --> Linux kernel
+
+How to build
+------------
+
+Build Procedure
+~~~~~~~~~~~~~~~
+
+-  Prepare AARCH64 toolchain.
+
+- Get the ELE FW image from NXP linux SDK package
+
+-  Build SPL and u-boot firstly, and get binary images: u-boot-spl.bin,
+   u-boot.bin and dtb
+
+-  Build TF-A
+
+   Build bl31:
+
+   .. code:: shell
+
+       CROSS_COMPILE=aarch64-linux-gnu- make PLAT=<Target_SoC> bl31
+
+   Target_SoC should be "imx93" for i.MX93 SoC.
+
+Deploy TF-A Images
+~~~~~~~~~~~~~~~~~~
+
+TF-A binary(bl31.bin), u-boot-spl.bin u-boot.bin, ELE FW image are combined
+together to generate a binary file called flash.bin, the imx-mkimage tool is
+used to generate flash.bin, and flash.bin needs to be flashed into SD card
+with certain offset for BOOT ROM.
+
+Reference Documentation
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Details on how to prepare, generate & deploy the boot image be found in following documents:
+
+- i.MX Linux User's Guide
+  `link <https://www.nxp.com/design/software/embedded-software/i-mx-software/embedded-linux-for-i-mx-applications-processors:IMXLINUX>`__
+- i.MX Linux Reference Manual
+  `link <https://www.nxp.com/design/software/embedded-software/i-mx-software/embedded-linux-for-i-mx-applications-processors:IMXLINUX>`__
+
+.. _i.MX9 Applications Processors: https://www.nxp.com/products/processors-and-microcontrollers/arm-processors/i-mx-applications-processors/i-mx-9-processors:IMX9-PROCESSORS
+
diff --git a/docs/plat/index.rst b/docs/plat/index.rst
index 188c986..fe2cc44 100644
--- a/docs/plat/index.rst
+++ b/docs/plat/index.rst
@@ -27,6 +27,7 @@
    warp7
    imx8
    imx8m
+   imx9
    nxp/index
    poplar
    qemu
diff --git a/drivers/cadence/combo_phy/cdns_combo_phy.c b/drivers/cadence/combo_phy/cdns_combo_phy.c
new file mode 100644
index 0000000..f00d0c1
--- /dev/null
+++ b/drivers/cadence/combo_phy/cdns_combo_phy.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/cadence/cdns_combo_phy.h>
+#include <drivers/cadence/cdns_sdmmc.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <lib/utils.h>
+
+int cdns_sdmmc_write_phy_reg(uint32_t phy_reg_addr, uint32_t phy_reg_addr_value,
+			uint32_t phy_reg_data, uint32_t phy_reg_data_value)
+{
+	uint32_t data = 0U;
+	uint32_t value = 0U;
+
+	/* Get PHY register address, write HRS04*/
+	value = mmio_read_32(phy_reg_addr);
+	value &= ~PHY_REG_ADDR_MASK;
+	value |= phy_reg_addr_value;
+	mmio_write_32(phy_reg_addr, value);
+	data = mmio_read_32(phy_reg_addr);
+	if ((data & PHY_REG_ADDR_MASK) != phy_reg_addr_value) {
+		ERROR("PHY_REG_ADDR is not set properly\n");
+		return -ENXIO;
+	}
+
+	/* Get PHY register data, write HRS05 */
+	value &= ~PHY_REG_DATA_MASK;
+	value |= phy_reg_data_value;
+	mmio_write_32(phy_reg_data, value);
+	data = mmio_read_32(phy_reg_data);
+	if (data != phy_reg_data_value) {
+		ERROR("PHY_REG_DATA is not set properly\n");
+		return -ENXIO;
+	}
+
+	return 0;
+}
+
+int cdns_sd_card_detect(void)
+{
+	uint32_t value = 0;
+
+	/* Card detection */
+	do {
+		value = mmio_read_32(SDMMC_CDN(SRS09));
+	/* Wait for card insertion. SRS09.CI = 1 */
+	} while ((value & (1 << SDMMC_CDN_CI)) == 0);
+
+	if ((value & (1 << SDMMC_CDN_CI)) == 0) {
+		ERROR("Card does not detect\n");
+		return -ENXIO;
+	}
+
+	return 0;
+}
+
+int cdns_emmc_card_reset(void)
+{
+	uint32_t _status = 0;
+
+	/* Reset embedded card */
+	mmio_write_32(SDMMC_CDN(SRS10), (7 << SDMMC_CDN_BVS) | (1 << SDMMC_CDN_BP) | _status);
+	mdelay(68680); /* ~68680us */
+	mmio_write_32(SDMMC_CDN(SRS10), (7 << SDMMC_CDN_BVS) | (0 << SDMMC_CDN_BP));
+	udelay(340); /* ~340us */
+
+	/* Turn on supply voltage */
+	/* BVS = 7, BP = 1, BP2 only in UHS2 mode */
+	mmio_write_32(SDMMC_CDN(SRS10), (7 << SDMMC_CDN_BVS) | (1 << SDMMC_CDN_BP) | _status);
+
+	return 0;
+}
diff --git a/drivers/cadence/emmc/cdns_sdmmc.c b/drivers/cadence/emmc/cdns_sdmmc.c
new file mode 100644
index 0000000..d2cd4d6
--- /dev/null
+++ b/drivers/cadence/emmc/cdns_sdmmc.c
@@ -0,0 +1,824 @@
+/*
+ * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/cadence/cdns_sdmmc.h>
+#include <drivers/delay_timer.h>
+#include <drivers/mmc.h>
+#include <lib/mmio.h>
+#include <lib/utils.h>
+
+/* Card busy and present */
+#define CARD_BUSY					1
+#define CARD_NOT_BUSY					0
+
+/* 500 ms delay to read the RINST register */
+#define DELAY_MS_SRS_READ				500
+#define DELAY_RES					10
+
+/* SRS12 error mask */
+#define SRS12_ERR_MASK					0xFFFF8000
+
+/* Check DV dfi_init val=0 */
+#define IO_MASK_END_DATA				0x0
+
+/* Check DV dfi_init val=2; DDR Mode */
+#define IO_MASK_END_DATA_DDR				0x2
+#define IO_MASK_START_DATA				0x0
+#define DATA_SELECT_OE_END_DATA				0x1
+
+#define TIMEOUT						100000
+
+/* General define */
+#define SDHC_REG_MASK					UINT_MAX
+#define SD_HOST_BLOCK_SIZE				0x200
+#define DTCVVAL_DEFAULT_VAL				0xE
+#define CDMMC_DMA_MAX_BUFFER_SIZE			64*1024
+#define CDNSMMC_ADDRESS_MASK				U(0x0f)
+#define CONFIG_CDNS_DESC_COUNT				8
+
+void cdns_init(void);
+int cdns_send_cmd(struct mmc_cmd *cmd);
+int cdns_set_ios(unsigned int clk, unsigned int width);
+int cdns_prepare(int lba, uintptr_t buf, size_t size);
+int cdns_read(int lba, uintptr_t buf, size_t size);
+int cdns_write(int lba, uintptr_t buf, size_t size);
+
+const struct mmc_ops cdns_sdmmc_ops = {
+	.init			= cdns_init,
+	.send_cmd		= cdns_send_cmd,
+	.set_ios		= cdns_set_ios,
+	.prepare		= cdns_prepare,
+	.read			= cdns_read,
+	.write			= cdns_write,
+};
+
+struct cdns_sdmmc_params cdns_params;
+struct cdns_sdmmc_combo_phy sdmmc_combo_phy_reg;
+struct cdns_sdmmc_sdhc sdmmc_sdhc_reg;
+#ifdef CONFIG_DMA_ADDR_T_64BIT
+struct cdns_idmac_desc cdns_desc[CONFIG_CDNS_DESC_COUNT];
+#else
+struct cdns_idmac_desc cdns_desc[CONFIG_CDNS_DESC_COUNT] __aligned(32);
+#endif
+
+bool data_cmd;
+
+int cdns_wait_ics(uint16_t timeout, uint32_t cdn_srs_res)
+{
+	/* Clock for sdmclk and sdclk */
+	uint32_t count = 0;
+	uint32_t data = 0;
+
+	/* Wait status command response ready */
+	do {
+		data = mmio_read_32(cdn_srs_res);
+		count++;
+		if (count >= timeout) {
+			return -ETIMEDOUT;
+		}
+	} while ((data & (1 << SDMMC_CDN_ICS)) == 0);
+
+	return 0;
+}
+
+int cdns_busy(void)
+{
+	unsigned int data;
+
+	data = mmio_read_32(MMC_REG_BASE + SDHC_CDNS_SRS09);
+	return (data & STATUS_DATA_BUSY) ? CARD_BUSY : CARD_NOT_BUSY;
+}
+
+int cdns_vol_reset(void)
+{
+	/* Reset embedded card */
+	mmio_write_32((MMC_REG_BASE + SDHC_CDNS_SRS10), (7 << SDMMC_CDN_BVS) | (1 << SDMMC_CDN_BP));
+	udelay(250);
+	mmio_write_32((MMC_REG_BASE + SDHC_CDNS_SRS10), (7 << SDMMC_CDN_BVS) | (0 << SDMMC_CDN_BP));
+	udelay(500);
+
+	/* Turn on supply voltage */
+	/* BVS = 7, BP = 1, BP2 only in UHS2 mode */
+	mmio_write_32((MMC_REG_BASE + SDHC_CDNS_SRS10), (7 << SDMMC_CDN_BVS) | (1 << SDMMC_CDN_BP));
+	udelay(250);
+	return 0;
+}
+
+void cdns_set_sdmmc_var(struct cdns_sdmmc_combo_phy *combo_phy_reg,
+	struct cdns_sdmmc_sdhc *sdhc_reg)
+{
+	/* Values are taken by the reference of cadence IP documents */
+	combo_phy_reg->cp_clk_wr_delay = 0;
+	combo_phy_reg->cp_clk_wrdqs_delay = 0;
+	combo_phy_reg->cp_data_select_oe_end = 0;
+	combo_phy_reg->cp_dll_bypass_mode = 1;
+	combo_phy_reg->cp_dll_locked_mode = 0;
+	combo_phy_reg->cp_dll_start_point = 0;
+	combo_phy_reg->cp_gate_cfg_always_on = 1;
+	combo_phy_reg->cp_io_mask_always_on = 0;
+	combo_phy_reg->cp_io_mask_end = 0;
+	combo_phy_reg->cp_io_mask_start = 0;
+	combo_phy_reg->cp_rd_del_sel = 52;
+	combo_phy_reg->cp_read_dqs_cmd_delay = 0;
+	combo_phy_reg->cp_read_dqs_delay = 0;
+	combo_phy_reg->cp_sw_half_cycle_shift = 0;
+	combo_phy_reg->cp_sync_method = 1;
+	combo_phy_reg->cp_underrun_suppress = 1;
+	combo_phy_reg->cp_use_ext_lpbk_dqs = 1;
+	combo_phy_reg->cp_use_lpbk_dqs = 1;
+	combo_phy_reg->cp_use_phony_dqs = 1;
+	combo_phy_reg->cp_use_phony_dqs_cmd = 1;
+
+	sdhc_reg->sdhc_extended_rd_mode = 1;
+	sdhc_reg->sdhc_extended_wr_mode = 1;
+	sdhc_reg->sdhc_hcsdclkadj = 0;
+	sdhc_reg->sdhc_idelay_val = 0;
+	sdhc_reg->sdhc_rdcmd_en = 1;
+	sdhc_reg->sdhc_rddata_en = 1;
+	sdhc_reg->sdhc_rw_compensate = 9;
+	sdhc_reg->sdhc_sdcfsh = 0;
+	sdhc_reg->sdhc_sdcfsl = 1;
+	sdhc_reg->sdhc_wrcmd0_dly = 1;
+	sdhc_reg->sdhc_wrcmd0_sdclk_dly = 0;
+	sdhc_reg->sdhc_wrcmd1_dly = 0;
+	sdhc_reg->sdhc_wrcmd1_sdclk_dly = 0;
+	sdhc_reg->sdhc_wrdata0_dly = 1;
+	sdhc_reg->sdhc_wrdata0_sdclk_dly = 0;
+	sdhc_reg->sdhc_wrdata1_dly = 0;
+	sdhc_reg->sdhc_wrdata1_sdclk_dly = 0;
+}
+
+static int cdns_program_phy_reg(struct cdns_sdmmc_combo_phy *combo_phy_reg,
+	struct cdns_sdmmc_sdhc *sdhc_reg)
+{
+	uint32_t value = 0;
+	int ret = 0;
+
+	/* program PHY_DQS_TIMING_REG */
+	value = (CP_USE_EXT_LPBK_DQS(combo_phy_reg->cp_use_ext_lpbk_dqs)) |
+		(CP_USE_LPBK_DQS(combo_phy_reg->cp_use_lpbk_dqs)) |
+		(CP_USE_PHONY_DQS(combo_phy_reg->cp_use_phony_dqs)) |
+		(CP_USE_PHONY_DQS_CMD(combo_phy_reg->cp_use_phony_dqs_cmd));
+	ret = cdns_sdmmc_write_phy_reg(MMC_REG_BASE + SDHC_CDNS_HRS04,
+			COMBO_PHY_REG + PHY_DQS_TIMING_REG, MMC_REG_BASE +
+			SDHC_CDNS_HRS05, value);
+	if (ret != 0) {
+		return ret;
+	}
+
+	/* program PHY_GATE_LPBK_CTRL_REG */
+	value = (CP_SYNC_METHOD(combo_phy_reg->cp_sync_method)) |
+		(CP_SW_HALF_CYCLE_SHIFT(combo_phy_reg->cp_sw_half_cycle_shift)) |
+		(CP_RD_DEL_SEL(combo_phy_reg->cp_rd_del_sel)) |
+		(CP_UNDERRUN_SUPPRESS(combo_phy_reg->cp_underrun_suppress)) |
+		(CP_GATE_CFG_ALWAYS_ON(combo_phy_reg->cp_gate_cfg_always_on));
+	ret = cdns_sdmmc_write_phy_reg(MMC_REG_BASE + SDHC_CDNS_HRS04,
+			COMBO_PHY_REG + PHY_GATE_LPBK_CTRL_REG, MMC_REG_BASE +
+			SDHC_CDNS_HRS05, value);
+	if (ret != 0) {
+		return ret;
+	}
+
+	/* program PHY_DLL_MASTER_CTRL_REG */
+	value = (CP_DLL_BYPASS_MODE(combo_phy_reg->cp_dll_bypass_mode))
+			| (CP_DLL_START_POINT(combo_phy_reg->cp_dll_start_point));
+	ret = cdns_sdmmc_write_phy_reg(MMC_REG_BASE + SDHC_CDNS_HRS04,
+			COMBO_PHY_REG + PHY_DLL_MASTER_CTRL_REG, MMC_REG_BASE
+			+ SDHC_CDNS_HRS05, value);
+	if (ret != 0) {
+		return ret;
+	}
+
+	/* program PHY_DLL_SLAVE_CTRL_REG */
+	value = (CP_READ_DQS_CMD_DELAY(combo_phy_reg->cp_read_dqs_cmd_delay))
+		| (CP_CLK_WRDQS_DELAY(combo_phy_reg->cp_clk_wrdqs_delay))
+		| (CP_CLK_WR_DELAY(combo_phy_reg->cp_clk_wr_delay))
+		| (CP_READ_DQS_DELAY(combo_phy_reg->cp_read_dqs_delay));
+	ret = cdns_sdmmc_write_phy_reg(MMC_REG_BASE + SDHC_CDNS_HRS04,
+			COMBO_PHY_REG + PHY_DLL_SLAVE_CTRL_REG, MMC_REG_BASE
+			+ SDHC_CDNS_HRS05, value);
+	if (ret != 0) {
+		return ret;
+	}
+
+	/* program PHY_CTRL_REG */
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_HRS04, COMBO_PHY_REG
+			+ PHY_CTRL_REG);
+	value = mmio_read_32(MMC_REG_BASE + SDHC_CDNS_HRS05);
+
+	/* phony_dqs_timing=0 */
+	value &= ~(CP_PHONY_DQS_TIMING_MASK << CP_PHONY_DQS_TIMING_SHIFT);
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_HRS05, value);
+
+	/* switch off DLL_RESET */
+	do {
+		value = mmio_read_32(MMC_REG_BASE + SDHC_CDNS_HRS09);
+		value |= SDHC_PHY_SW_RESET;
+		mmio_write_32(MMC_REG_BASE + SDHC_CDNS_HRS09, value);
+		value = mmio_read_32(MMC_REG_BASE + SDHC_CDNS_HRS09);
+	/* polling PHY_INIT_COMPLETE */
+	} while ((value & SDHC_PHY_INIT_COMPLETE) != SDHC_PHY_INIT_COMPLETE);
+
+	/* program PHY_DQ_TIMING_REG */
+	combo_phy_reg->cp_io_mask_end = 0U;
+	value = (CP_IO_MASK_ALWAYS_ON(combo_phy_reg->cp_io_mask_always_on))
+		| (CP_IO_MASK_END(combo_phy_reg->cp_io_mask_end))
+		| (CP_IO_MASK_START(combo_phy_reg->cp_io_mask_start))
+		| (CP_DATA_SELECT_OE_END(combo_phy_reg->cp_data_select_oe_end));
+
+	ret = cdns_sdmmc_write_phy_reg(MMC_REG_BASE + SDHC_CDNS_HRS04,
+			COMBO_PHY_REG + PHY_DQ_TIMING_REG, MMC_REG_BASE
+			+ SDHC_CDNS_HRS05, value);
+	if (ret != 0) {
+		return ret;
+	}
+	return 0;
+}
+
+int cdns_read(int lba, uintptr_t buf, size_t size)
+{
+	inv_dcache_range(buf, size);
+
+	return 0;
+}
+
+void cdns_init(void)
+{
+	/* Dummy function pointer for cdns_init. */
+}
+
+int cdns_prepare(int dma_start_addr, uintptr_t dma_buff, size_t size)
+{
+	data_cmd = true;
+	struct cdns_idmac_desc *desc;
+	uint32_t desc_cnt, i;
+	uint64_t desc_base;
+
+	assert(((dma_buff & CDNSMMC_ADDRESS_MASK) == 0) &&
+			(cdns_params.desc_size > 0) &&
+			((MMC_REG_BASE & MMC_BLOCK_MASK) == 0) &&
+			((cdns_params.desc_base & MMC_BLOCK_MASK) == 0) &&
+			((cdns_params.desc_size & MMC_BLOCK_MASK) == 0));
+
+	flush_dcache_range(dma_buff, size);
+
+	desc_cnt = (size + (CDMMC_DMA_MAX_BUFFER_SIZE) - 1) / (CDMMC_DMA_MAX_BUFFER_SIZE);
+	assert(desc_cnt * sizeof(struct cdns_idmac_desc) < cdns_params.desc_size);
+
+	if (desc_cnt > CONFIG_CDNS_DESC_COUNT) {
+		ERROR("Requested data transfer length %ld is greater than configured length %d",
+				size, (CONFIG_CDNS_DESC_COUNT * CDMMC_DMA_MAX_BUFFER_SIZE));
+		return -EINVAL;
+	}
+
+	desc = (struct cdns_idmac_desc *)cdns_params.desc_base;
+	desc_base = (uint64_t)desc;
+	i = 0;
+
+	while ((i + 1) < desc_cnt) {
+		desc->attr = ADMA_DESC_ATTR_VALID | ADMA_DESC_TRANSFER_DATA;
+		desc->reserved = 0;
+		desc->len = MAX_64KB_PAGE;
+		desc->addr_lo = (dma_buff & UINT_MAX) + (CDMMC_DMA_MAX_BUFFER_SIZE * i);
+#if CONFIG_DMA_ADDR_T_64BIT == 1
+		desc->addr_hi = (dma_buff >> 32) & 0xffffffff;
+#endif
+		size -= CDMMC_DMA_MAX_BUFFER_SIZE;
+		desc++;
+		i++;
+	}
+
+	desc->attr = ADMA_DESC_ATTR_VALID | ADMA_DESC_TRANSFER_DATA |
+			ADMA_DESC_ATTR_END;
+	desc->reserved = 0;
+	desc->len = size;
+#if CONFIG_DMA_ADDR_T_64BIT == 1
+	desc->addr_lo = (dma_buff & UINT_MAX) + (CDMMC_DMA_MAX_BUFFER_SIZE * i);
+	desc->addr_hi = (dma_buff >> 32) & UINT_MAX;
+#else
+	desc->addr_lo = (dma_buff & UINT_MAX);
+#endif
+
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS22, (uint32_t)desc_base);
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS23, (uint32_t)(desc_base >> 32));
+	flush_dcache_range(cdns_params.desc_base,
+				desc_cnt * CDMMC_DMA_MAX_BUFFER_SIZE);
+
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS01,
+			((512 << BLOCK_SIZE) | ((size/512) << BLK_COUNT_CT) | SDMA_BUF));
+	return 0;
+}
+
+static void cdns_host_set_clk(int clk)
+{
+	uint32_t ret = 0;
+	uint32_t sdclkfsval = 0;
+	uint32_t dtcvval = DTCVVAL_DEFAULT_VAL;
+
+	sdclkfsval = (cdns_params.clk_rate / 2000) / clk;
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS11, 0);
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS11, (dtcvval << SDMMC_CDN_DTCV) |
+			(sdclkfsval << SDMMC_CDN_SDCLKFS) | (1 << SDMMC_CDN_ICE));
+
+	ret = cdns_wait_ics(5000, MMC_REG_BASE + SDHC_CDNS_SRS11);
+	if (ret != 0U) {
+		ERROR("Waiting SDMMC_CDN_ICS timeout");
+	}
+
+	/* Enable DLL reset */
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_HRS09, mmio_read_32(MMC_REG_BASE + SDHC_CDNS_HRS09) &
+			~SDHC_DLL_RESET_MASK);
+	/* Set extended_wr_mode */
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_HRS09, (mmio_read_32(MMC_REG_BASE + SDHC_CDNS_HRS09)
+			& SDHC_EXTENDED_WR_MODE_MASK) | (1 << EXTENDED_WR_MODE));
+	/* Release DLL reset */
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_HRS09, mmio_read_32(MMC_REG_BASE
+			+ SDHC_CDNS_HRS09) | 1);
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_HRS09, mmio_read_32(MMC_REG_BASE
+			+ SDHC_CDNS_HRS09) | (3 << RDCMD_EN));
+
+	do {
+		mmio_read_32(MMC_REG_BASE + SDHC_CDNS_HRS09);
+	} while (~mmio_read_32(MMC_REG_BASE + SDHC_CDNS_HRS09) & (1 << 1));
+
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS11, (dtcvval << SDMMC_CDN_DTCV) |
+	(sdclkfsval << SDMMC_CDN_SDCLKFS) | (1 << SDMMC_CDN_ICE) | (1 << SDMMC_CDN_SDCE));
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS13, UINT_MAX);
+}
+
+int cdns_set_ios(unsigned int clk, unsigned int width)
+{
+
+	switch (width) {
+	case MMC_BUS_WIDTH_1:
+		mmio_write_32((MMC_REG_BASE + SDHC_CDNS_SRS10), LEDC_OFF);
+		break;
+	case MMC_BUS_WIDTH_4:
+		mmio_write_32((MMC_REG_BASE + SDHC_CDNS_SRS10), DTW_4BIT);
+		break;
+	case MMC_BUS_WIDTH_8:
+		mmio_write_32((MMC_REG_BASE + SDHC_CDNS_SRS10), EDTW_8BIT);
+		break;
+	default:
+		assert(0);
+		break;
+	}
+	cdns_host_set_clk(clk);
+
+	return 0;
+}
+
+int cdns_sdmmc_write_sd_host_reg(uint32_t addr, uint32_t data)
+{
+	uint32_t value = 0;
+
+	value = mmio_read_32(addr);
+	value &= ~SDHC_REG_MASK;
+	value |= data;
+	mmio_write_32(addr, value);
+	value = mmio_read_32(addr);
+	if (value != data) {
+		ERROR("SD host address is not set properly\n");
+		return -ENXIO;
+	}
+
+	return 0;
+}
+
+int cdns_write(int lba, uintptr_t buf, size_t size)
+{
+	return 0;
+}
+
+static int cdns_init_hrs_io(struct cdns_sdmmc_combo_phy *combo_phy_reg,
+	struct cdns_sdmmc_sdhc *sdhc_reg)
+{
+	uint32_t value = 0;
+	int ret = 0;
+
+	/* program HRS09, register 42 */
+	value = (SDHC_RDDATA_EN(sdhc_reg->sdhc_rddata_en))
+		| (SDHC_RDCMD_EN(sdhc_reg->sdhc_rdcmd_en))
+		| (SDHC_EXTENDED_WR_MODE(sdhc_reg->sdhc_extended_wr_mode))
+		| (SDHC_EXTENDED_RD_MODE(sdhc_reg->sdhc_extended_rd_mode));
+	ret = cdns_sdmmc_write_sd_host_reg(MMC_REG_BASE + SDHC_CDNS_HRS09, value);
+	if (ret != 0) {
+		ERROR("Program HRS09 failed");
+		return ret;
+	}
+
+	/* program HRS10, register 43 */
+	value = (SDHC_HCSDCLKADJ(sdhc_reg->sdhc_hcsdclkadj));
+	ret = cdns_sdmmc_write_sd_host_reg(MMC_REG_BASE + SDHC_CDNS_HRS10, value);
+	if (ret != 0) {
+		ERROR("Program HRS10 failed");
+		return ret;
+	}
+
+	/* program HRS16, register 48 */
+	value = (SDHC_WRDATA1_SDCLK_DLY(sdhc_reg->sdhc_wrdata1_sdclk_dly))
+		| (SDHC_WRDATA0_SDCLK_DLY(sdhc_reg->sdhc_wrdata0_sdclk_dly))
+		| (SDHC_WRCMD1_SDCLK_DLY(sdhc_reg->sdhc_wrcmd1_sdclk_dly))
+		| (SDHC_WRCMD0_SDCLK_DLY(sdhc_reg->sdhc_wrcmd0_sdclk_dly))
+		| (SDHC_WRDATA1_DLY(sdhc_reg->sdhc_wrdata1_dly))
+		| (SDHC_WRDATA0_DLY(sdhc_reg->sdhc_wrdata0_dly))
+		| (SDHC_WRCMD1_DLY(sdhc_reg->sdhc_wrcmd1_dly))
+		| (SDHC_WRCMD0_DLY(sdhc_reg->sdhc_wrcmd0_dly));
+	ret = cdns_sdmmc_write_sd_host_reg(MMC_REG_BASE + SDHC_CDNS_HRS16, value);
+	if (ret != 0) {
+		ERROR("Program HRS16 failed");
+		return ret;
+	}
+
+	/* program HRS07, register 40 */
+	value = (SDHC_RW_COMPENSATE(sdhc_reg->sdhc_rw_compensate))
+		| (SDHC_IDELAY_VAL(sdhc_reg->sdhc_idelay_val));
+	ret = cdns_sdmmc_write_sd_host_reg(MMC_REG_BASE + SDHC_CDNS_HRS07, value);
+	if (ret != 0) {
+		ERROR("Program HRS07 failed");
+		return ret;
+	}
+
+	return ret;
+}
+
+static int cdns_hc_set_clk(struct cdns_sdmmc_params *cdn_sdmmc_dev_mode_params)
+{
+	uint32_t ret = 0;
+	uint32_t dtcvval, sdclkfsval;
+
+	dtcvval = DTC_VAL;
+	sdclkfsval = 0;
+
+	if ((cdn_sdmmc_dev_mode_params->cdn_sdmmc_dev_mode == SD_DS) ||
+		(cdn_sdmmc_dev_mode_params->cdn_sdmmc_dev_mode == SD_UHS_SDR12) ||
+		(cdn_sdmmc_dev_mode_params->cdn_sdmmc_dev_mode == EMMC_SDR_BC)) {
+		sdclkfsval = 4;
+	} else if ((cdn_sdmmc_dev_mode_params->cdn_sdmmc_dev_mode == SD_HS) ||
+		(cdn_sdmmc_dev_mode_params->cdn_sdmmc_dev_mode == SD_UHS_SDR25) ||
+		(cdn_sdmmc_dev_mode_params->cdn_sdmmc_dev_mode == SD_UHS_DDR50) ||
+		(cdn_sdmmc_dev_mode_params->cdn_sdmmc_dev_mode == EMMC_SDR)) {
+		sdclkfsval = 2;
+	} else if ((cdn_sdmmc_dev_mode_params->cdn_sdmmc_dev_mode == SD_UHS_SDR50) ||
+		(cdn_sdmmc_dev_mode_params->cdn_sdmmc_dev_mode == EMMC_DDR) ||
+		(cdn_sdmmc_dev_mode_params->cdn_sdmmc_dev_mode == EMMC_HS400) ||
+		(cdn_sdmmc_dev_mode_params->cdn_sdmmc_dev_mode == EMMC_HS400es)) {
+		sdclkfsval = 1;
+	} else if ((cdn_sdmmc_dev_mode_params->cdn_sdmmc_dev_mode == SD_UHS_SDR104) ||
+		(cdn_sdmmc_dev_mode_params->cdn_sdmmc_dev_mode == EMMC_HS200)) {
+		sdclkfsval = 0;
+	}
+
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS11, 0);
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS11, (dtcvval << SDMMC_CDN_DTCV) |
+		(sdclkfsval << SDMMC_CDN_SDCLKFS) | (1 << SDMMC_CDN_ICE));
+	ret = cdns_wait_ics(5000, MMC_REG_BASE + SDHC_CDNS_SRS11);
+	if (ret != 0U) {
+		ERROR("Waiting SDMMC_CDN_ICS timeout");
+		return ret;
+	}
+
+	/* Enable DLL reset */
+	mmio_write_32((MMC_REG_BASE + SDHC_CDNS_HRS09), mmio_read_32(MMC_REG_BASE
+			+ SDHC_CDNS_HRS09) & ~SDHC_DLL_RESET_MASK);
+	/* Set extended_wr_mode */
+	mmio_write_32((MMC_REG_BASE + SDHC_CDNS_HRS09),
+	(mmio_read_32(MMC_REG_BASE + SDHC_CDNS_HRS09) &	SDHC_EXTENDED_WR_MODE_MASK) |
+			(1 << EXTENDED_WR_MODE));
+	/* Release DLL reset */
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_HRS09, mmio_read_32(MMC_REG_BASE
+			+ SDHC_CDNS_HRS09) | 1);
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_HRS09, mmio_read_32(MMC_REG_BASE
+			+ SDHC_CDNS_HRS09) | (3 << RDCMD_EN));
+	do {
+		mmio_read_32(MMC_REG_BASE + SDHC_CDNS_HRS09);
+	} while (~mmio_read_32(MMC_REG_BASE + SDHC_CDNS_HRS09) & (1 << 1));
+
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS11, (dtcvval << SDMMC_CDN_DTCV) |
+		(sdclkfsval << SDMMC_CDN_SDCLKFS) | (1 << SDMMC_CDN_ICE) | (1 << SDMMC_CDN_SDCE));
+
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS13, UINT_MAX);
+	return 0;
+}
+
+int cdns_reset(void)
+{
+	uint32_t data = 0;
+	uint32_t count = 0;
+	uint32_t value = 0;
+
+	value = mmio_read_32(MMC_REG_BASE + SDHC_CDNS_SRS11);
+	value &= ~(0xFFFF);
+	value |= 0x0;
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS11, value);
+	udelay(500);
+
+	/* Software reset */
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_HRS00, 1);
+	/* Wait status command response ready */
+	do {
+		data = mmio_read_32(MMC_REG_BASE + SDHC_CDNS_HRS00);
+		count++;
+		if (count >= 5000) {
+			return -ETIMEDOUT;
+		}
+	/* Wait for HRS00.SWR */
+	} while ((data & 1) == 1);
+
+	/* Step 1, switch on DLL_RESET */
+	value = mmio_read_32(MMC_REG_BASE + SDHC_CDNS_HRS09);
+	value &= ~SDHC_PHY_SW_RESET;
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_HRS09, value);
+
+	return 0;
+}
+
+int cdns_sd_host_init(struct cdns_sdmmc_combo_phy *mmc_combo_phy_reg,
+struct cdns_sdmmc_sdhc *mmc_sdhc_reg)
+{
+	int ret = 0;
+
+	ret = cdns_reset();
+	if (ret != 0) {
+		ERROR("Program phy reg init failed");
+		return ret;
+	}
+
+	ret = cdns_program_phy_reg(&sdmmc_combo_phy_reg, &sdmmc_sdhc_reg);
+	if (ret != 0) {
+		ERROR("Program phy reg init failed");
+		return ret;
+	}
+
+	ret = cdns_init_hrs_io(&sdmmc_combo_phy_reg, &sdmmc_sdhc_reg);
+	if (ret != 0) {
+		ERROR("Program init for HRS reg is failed");
+		return ret;
+	}
+
+	ret = cdns_sd_card_detect();
+	if (ret != 0) {
+		ERROR("SD card does not detect");
+		return ret;
+	}
+
+	ret = cdns_vol_reset();
+	if (ret != 0) {
+		ERROR("eMMC card reset failed");
+		return ret;
+	}
+
+	ret = cdns_hc_set_clk(&cdns_params);
+	if (ret != 0) {
+		ERROR("hc set clk failed");
+		return ret;
+	}
+
+	return 0;
+}
+
+void cdns_srs10_value_toggle(uint8_t write_val, uint8_t prev_val)
+{
+	uint32_t data_op = 0U;
+
+	data_op = mmio_read_32(MMC_REG_BASE + SDHC_CDNS_SRS10);
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS10, (data_op & (prev_val << 0)));
+	mmio_read_32(MMC_REG_BASE + SDHC_CDNS_SRS10);
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS10, data_op | (write_val << 0));
+}
+
+void cdns_srs11_srs15_config(uint32_t srs11_val, uint32_t srs15_val)
+{
+	uint32_t data = 0U;
+
+	data = mmio_read_32(MMC_REG_BASE + SDHC_CDNS_SRS11);
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS11, (data | srs11_val));
+	data = mmio_read_32(MMC_REG_BASE + SDHC_CDNS_SRS15);
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS15, (data | srs15_val));
+}
+
+int cdns_send_cmd(struct mmc_cmd *cmd)
+{
+	uint32_t op = 0, ret = 0;
+	uint8_t write_value = 0, prev_val = 0;
+	uint32_t value;
+	int32_t timeout;
+	uint32_t cmd_indx;
+	uint32_t status = 0, srs15_val = 0, srs11_val = 0;
+	uint32_t status_check = 0;
+
+	assert(cmd);
+	cmd_indx = (cmd->cmd_idx) << COM_IDX;
+
+	if (data_cmd) {
+		switch (cmd->cmd_idx) {
+		case SD_SWITCH:
+			op = DATA_PRESENT;
+			write_value = ADMA2_32 | DT_WIDTH;
+			prev_val = ADMA2_32 | DT_WIDTH;
+			cdns_srs10_value_toggle(write_value, prev_val);
+			srs11_val = READ_CLK | SDMMC_CDN_ICE | SDMMC_CDN_ICS | SDMMC_CDN_SDCE;
+			srs15_val = BIT_AD_64 | HV4E | V18SE;
+			cdns_srs11_srs15_config(srs11_val, srs15_val);
+			break;
+
+		case SD_WRITE_SINGLE_BLOCK:
+		case SD_READ_SINGLE_BLOCK:
+			op = DATA_PRESENT;
+			write_value = ADMA2_32 | HS_EN | DT_WIDTH | LEDC;
+			prev_val = ADMA2_32 | HS_EN | DT_WIDTH;
+			cdns_srs10_value_toggle(write_value, prev_val);
+			srs15_val = PVE | BIT_AD_64 | HV4E | SDR104_MODE | V18SE;
+			srs11_val = READ_CLK | SDMMC_CDN_ICE | SDMMC_CDN_ICS | SDMMC_CDN_SDCE;
+			cdns_srs11_srs15_config(srs11_val, srs15_val);
+			mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS00, SAAR);
+			break;
+
+		case SD_WRITE_MULTIPLE_BLOCK:
+		case SD_READ_MULTIPLE_BLOCK:
+			op = DATA_PRESENT | AUTO_CMD_EN | MULTI_BLK_READ;
+			write_value = ADMA2_32 | HS_EN | DT_WIDTH | LEDC;
+			prev_val = ADMA2_32 | HS_EN | DT_WIDTH;
+			cdns_srs10_value_toggle(write_value, prev_val);
+			srs15_val = PVE | BIT_AD_64 | HV4E | SDR104_MODE | V18SE;
+			srs11_val = READ_CLK | SDMMC_CDN_ICE | SDMMC_CDN_ICS | SDMMC_CDN_SDCE;
+			cdns_srs11_srs15_config(srs11_val, srs15_val);
+			mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS00, SAAR);
+			break;
+
+		case SD_APP_SEND_SCR:
+			op = DATA_PRESENT;
+			write_value = ADMA2_32 | LEDC;
+			prev_val = LEDC;
+			cdns_srs10_value_toggle(write_value, prev_val);
+			srs15_val = BIT_AD_64 | HV4E | V18SE;
+			srs11_val = READ_CLK | SDMMC_CDN_ICE | SDMMC_CDN_ICS | SDMMC_CDN_SDCE;
+			cdns_srs11_srs15_config(srs11_val, srs15_val);
+			break;
+
+		case SD_SEND_IF_COND:
+			op = DATA_PRESENT | CMD_IDX_CHK_ENABLE;
+			write_value = LEDC;
+			prev_val = 0x0;
+			cdns_srs10_value_toggle(write_value, prev_val);
+			srs15_val = HV4E;
+			srs11_val = READ_CLK | SDMMC_CDN_ICE | SDMMC_CDN_ICS | SDMMC_CDN_SDCE;
+			cdns_srs11_srs15_config(srs11_val, srs15_val);
+			break;
+
+		default:
+			write_value = LEDC;
+			prev_val = 0x0;
+			cdns_srs10_value_toggle(write_value, prev_val);
+			op = 0;
+			break;
+		}
+	} else {
+		switch (cmd->cmd_idx) {
+		case SD_GO_IDLE_STATE:
+			write_value = LEDC;
+			prev_val = 0x0;
+			cdns_srs10_value_toggle(write_value, prev_val);
+			srs15_val = HV4E;
+			srs11_val = SDMMC_CDN_ICE | SDMMC_CDN_ICS | SDMMC_CDN_SDCE;
+			cdns_srs11_srs15_config(srs11_val, srs15_val);
+			break;
+
+		case SD_ALL_SEND_CID:
+			write_value = LEDC;
+			prev_val = 0x0;
+			cdns_srs10_value_toggle(write_value, prev_val);
+			srs15_val = HV4E | V18SE;
+			srs11_val = SDMMC_CDN_ICE | SDMMC_CDN_ICS | SDMMC_CDN_SDCE;
+			cdns_srs11_srs15_config(srs11_val, srs15_val);
+			break;
+
+		case SD_SEND_IF_COND:
+			op = CMD_IDX_CHK_ENABLE;
+			write_value = LEDC;
+			prev_val = 0x0;
+			cdns_srs10_value_toggle(write_value, prev_val);
+			srs15_val = HV4E;
+			srs11_val = READ_CLK | SDMMC_CDN_ICE | SDMMC_CDN_ICS | SDMMC_CDN_SDCE;
+			cdns_srs11_srs15_config(srs11_val, srs15_val);
+			break;
+
+		case SD_STOP_TRANSMISSION:
+			op = CMD_STOP_ABORT_CMD;
+			break;
+
+		case SD_SEND_STATUS:
+			break;
+
+		case 1:
+			cmd->cmd_arg = 0;
+			break;
+
+		case SD_SELECT_CARD:
+			op = MULTI_BLK_READ;
+			break;
+
+		case SD_APP_CMD:
+		default:
+			write_value = LEDC;
+			prev_val = 0x0;
+			cdns_srs10_value_toggle(write_value, prev_val);
+			op = 0;
+			break;
+		}
+	}
+
+	switch (cmd->resp_type) {
+	case MMC_RESPONSE_NONE:
+		op |= CMD_READ | MULTI_BLK_READ | DMA_ENABLED | BLK_CNT_EN;
+		break;
+
+	case MMC_RESPONSE_R2:
+		op |= CMD_READ | MULTI_BLK_READ | DMA_ENABLED | BLK_CNT_EN |
+			RES_TYPE_SEL_136 | CMD_CHECK_RESP_CRC;
+		break;
+
+	case MMC_RESPONSE_R3:
+		op |= CMD_READ | MULTI_BLK_READ | DMA_ENABLED | BLK_CNT_EN |
+			RES_TYPE_SEL_48;
+		break;
+
+	case MMC_RESPONSE_R1:
+		if ((cmd->cmd_idx == SD_WRITE_SINGLE_BLOCK) || (cmd->cmd_idx
+			== SD_WRITE_MULTIPLE_BLOCK)) {
+			op |= DMA_ENABLED | BLK_CNT_EN | RES_TYPE_SEL_48
+			| CMD_CHECK_RESP_CRC | CMD_IDX_CHK_ENABLE;
+		} else {
+			op |= DMA_ENABLED | BLK_CNT_EN | CMD_READ | RES_TYPE_SEL_48
+			| CMD_CHECK_RESP_CRC | CMD_IDX_CHK_ENABLE;
+		}
+		break;
+
+	default:
+		op |= DMA_ENABLED | BLK_CNT_EN | CMD_READ | MULTI_BLK_READ |
+			RES_TYPE_SEL_48 | CMD_CHECK_RESP_CRC | CMD_IDX_CHK_ENABLE;
+		break;
+	}
+
+	timeout = TIMEOUT;
+	do {
+		udelay(100);
+		ret = cdns_busy();
+		if (--timeout <= 0) {
+			udelay(50);
+			panic();
+		}
+	} while (ret);
+
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS12, UINT_MAX);
+
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS02, cmd->cmd_arg);
+	mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS14, 0x00000000);
+	if (cmd_indx == 1)
+		mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS03, SDHC_CDNS_SRS03_VALUE);
+	else
+		mmio_write_32(MMC_REG_BASE + SDHC_CDNS_SRS03, op | cmd_indx);
+
+	timeout = TIMEOUT;
+	do {
+		udelay(500);
+		value = mmio_read_32(MMC_REG_BASE + SDHC_CDNS_SRS12);
+	} while (((value & (INT_CMD_DONE | ERROR_INT)) == 0) && (timeout-- > 0));
+
+	timeout = TIMEOUT;
+
+	if (data_cmd) {
+		data_cmd = false;
+		do {
+			udelay(250);
+		} while (((value & TRAN_COMP) == 0) && (timeout-- > 0));
+	}
+
+	status_check = value & SRS12_ERR_MASK;
+	if (status_check != 0U) {
+		ERROR("SD host controller send command failed, SRS12 = %x", status);
+		return -1;
+	}
+
+	if ((op & RES_TYPE_SEL_48) || (op & RES_TYPE_SEL_136)) {
+		cmd->resp_data[0] = mmio_read_32(MMC_REG_BASE + SDHC_CDNS_SRS04);
+		if (op & RES_TYPE_SEL_136) {
+			cmd->resp_data[1] = mmio_read_32(MMC_REG_BASE + SDHC_CDNS_SRS05);
+			cmd->resp_data[2] = mmio_read_32(MMC_REG_BASE + SDHC_CDNS_SRS06);
+			cmd->resp_data[3] = mmio_read_32(MMC_REG_BASE + SDHC_CDNS_SRS07);
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/cadence/nand/cdns_nand.c b/drivers/cadence/nand/cdns_nand.c
new file mode 100644
index 0000000..5a66262
--- /dev/null
+++ b/drivers/cadence/nand/cdns_nand.c
@@ -0,0 +1,435 @@
+/*
+ * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/cadence/cdns_nand.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <lib/utils.h>
+#include <platform_def.h>
+
+/* NAND flash device information struct */
+static cnf_dev_info_t dev_info;
+
+/* Scratch buffers for read and write operations */
+static uint8_t scratch_buff[PLATFORM_MTD_MAX_PAGE_SIZE];
+
+/* Wait for controller to be in idle state */
+static inline void cdns_nand_wait_idle(void)
+{
+	uint32_t reg = 0U;
+
+	do {
+		udelay(CNF_DEF_DELAY_US);
+		reg = mmio_read_32(CNF_CMDREG(CTRL_STATUS));
+	} while (CNF_GET_CTRL_BUSY(reg) != 0U);
+}
+
+/* Wait for given thread to be in ready state */
+static inline void cdns_nand_wait_thread_ready(uint8_t thread_id)
+{
+	uint32_t reg = 0U;
+
+	do {
+		udelay(CNF_DEF_DELAY_US);
+		reg = mmio_read_32(CNF_CMDREG(TRD_STATUS));
+		reg &= (1U << (uint32_t)thread_id);
+	} while (reg != 0U);
+}
+
+/* Check if the last operation/command in selected thread is completed */
+static int cdns_nand_last_opr_status(uint8_t thread_id)
+{
+	uint8_t nthreads = 0U;
+	uint32_t reg = 0U;
+
+	/* Get number of threads */
+	reg = mmio_read_32(CNF_CTRLPARAM(FEATURE));
+	nthreads = CNF_GET_NTHREADS(reg);
+
+	if (thread_id > nthreads) {
+		ERROR("%s: Invalid thread ID\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Select thread */
+	mmio_write_32(CNF_CMDREG(CMD_STAT_PTR), (uint32_t)thread_id);
+
+	uint32_t err_mask = CNF_ECMD | CNF_EECC | CNF_EDEV | CNF_EDQS | CNF_EFAIL |
+				CNF_EBUS | CNF_EDI | CNF_EPAR | CNF_ECTX | CNF_EPRO;
+
+	do {
+		udelay(CNF_DEF_DELAY_US * 2);
+		reg = mmio_read_32(CNF_CMDREG(CMD_STAT));
+	} while ((reg & CNF_CMPLT) == 0U);
+
+	/* last operation is completed, make sure no other error bits are set */
+	if ((reg & err_mask) == 1U) {
+		ERROR("%s, CMD_STATUS:0x%x\n", __func__, reg);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/* Set feature command */
+int cdns_nand_set_feature(uint8_t feat_addr, uint8_t feat_val, uint8_t thread_id)
+{
+	/* Wait for thread to be ready */
+	cdns_nand_wait_thread_ready(thread_id);
+
+	/* Set feature address */
+	mmio_write_32(CNF_CMDREG(CMD_REG1), (uint32_t)feat_addr);
+	/* Set feature volume */
+	mmio_write_32(CNF_CMDREG(CMD_REG2), (uint32_t)feat_val);
+
+	/* Set feature command */
+	uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
+
+	reg |= (thread_id << CNF_CMDREG0_TRD);
+	reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
+	reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
+	reg |= (CNF_CT_SET_FEATURE << CNF_CMDREG0_CMD);
+	mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
+
+	return cdns_nand_last_opr_status(thread_id);
+}
+
+/* Reset command to the selected device */
+int cdns_nand_reset(uint8_t thread_id)
+{
+	/* Operation is executed in selected thread */
+	cdns_nand_wait_thread_ready(thread_id);
+
+	/* Select memory */
+	mmio_write_32(CNF_CMDREG(CMD_REG4), (CNF_DEF_DEVICE << CNF_CMDREG4_MEM));
+
+	/* Issue reset command */
+	uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
+
+	reg |= (thread_id << CNF_CMDREG0_TRD);
+	reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
+	reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
+	reg |= (CNF_CT_RESET_ASYNC << CNF_CMDREG0_CMD);
+	mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
+
+	return cdns_nand_last_opr_status(thread_id);
+}
+
+/* Set operation work mode */
+static void cdns_nand_set_opr_mode(uint8_t opr_mode)
+{
+	/* Wait for controller to be in idle state */
+	cdns_nand_wait_idle();
+
+	/* Reset DLL PHY */
+	uint32_t reg = mmio_read_32(CNF_MINICTRL(DLL_PHY_CTRL));
+
+	reg &= ~(1 << CNF_DLL_PHY_RST_N);
+	mmio_write_32(CNF_MINICTRL(DLL_PHY_CTRL), reg);
+
+	if (opr_mode == CNF_OPR_WORK_MODE_SDR) {
+		/* Combo PHY Control Timing Block register settings */
+		mmio_write_32(CP_CTB(CTRL_REG), CP_CTRL_REG_SDR);
+		mmio_write_32(CP_CTB(TSEL_REG), CP_TSEL_REG_SDR);
+
+		/* Combo PHY DLL register settings */
+		mmio_write_32(CP_DLL(DQ_TIMING_REG), CP_DQ_TIMING_REG_SDR);
+		mmio_write_32(CP_DLL(DQS_TIMING_REG), CP_DQS_TIMING_REG_SDR);
+		mmio_write_32(CP_DLL(GATE_LPBK_CTRL_REG), CP_GATE_LPBK_CTRL_REG_SDR);
+		mmio_write_32(CP_DLL(MASTER_CTRL_REG), CP_DLL_MASTER_CTRL_REG_SDR);
+
+		/* Async mode timing settings */
+		mmio_write_32(CNF_MINICTRL(ASYNC_TOGGLE_TIMINGS),
+								(2 << CNF_ASYNC_TIMINGS_TRH) |
+								(4 << CNF_ASYNC_TIMINGS_TRP) |
+								(2 << CNF_ASYNC_TIMINGS_TWH) |
+								(4 << CNF_ASYNC_TIMINGS_TWP));
+
+		/* Set extended read and write mode */
+		reg |= (1 << CNF_DLL_PHY_EXT_RD_MODE);
+		reg |= (1 << CNF_DLL_PHY_EXT_WR_MODE);
+
+		/* Set operation work mode in common settings */
+		uint32_t data = mmio_read_32(CNF_MINICTRL(CMN_SETTINGS));
+
+		data |= (CNF_OPR_WORK_MODE_SDR << CNF_CMN_SETTINGS_OPR);
+		mmio_write_32(CNF_MINICTRL(CMN_SETTINGS), data);
+
+	} else if (opr_mode == CNF_OPR_WORK_MODE_NVDDR) {
+		; /* ToDo: add DDR mode settings also once available on SIMICS */
+	} else {
+		;
+	}
+
+	reg |= (1 << CNF_DLL_PHY_RST_N);
+	mmio_write_32(CNF_MINICTRL(DLL_PHY_CTRL), reg);
+}
+
+/* Data transfer configuration */
+static void cdns_nand_transfer_config(void)
+{
+	/* Wait for controller to be in idle state */
+	cdns_nand_wait_idle();
+
+	/* Configure data transfer parameters */
+	mmio_write_32(CNF_CTRLCFG(TRANS_CFG0), 1);
+
+	/* ECC is disabled */
+	mmio_write_32(CNF_CTRLCFG(ECC_CFG0), 0);
+
+	/* DMA burst select */
+	mmio_write_32(CNF_CTRLCFG(DMA_SETTINGS),
+					(CNF_DMA_BURST_SIZE_MAX << CNF_DMA_SETTINGS_BURST) |
+					(1 << CNF_DMA_SETTINGS_OTE));
+
+	/* Enable pre-fetching for 1K */
+	mmio_write_32(CNF_CTRLCFG(FIFO_TLEVEL),
+					(CNF_DMA_PREFETCH_SIZE << CNF_FIFO_TLEVEL_POS) |
+					(CNF_DMA_PREFETCH_SIZE << CNF_FIFO_TLEVEL_DMA_SIZE));
+
+	/* Select access type */
+	mmio_write_32(CNF_CTRLCFG(MULTIPLANE_CFG), 0);
+	mmio_write_32(CNF_CTRLCFG(CACHE_CFG), 0);
+}
+
+/* Update the nand flash device info */
+static int cdns_nand_update_dev_info(void)
+{
+	uint32_t reg = 0U;
+
+	/* Read the device type and number of LUNs */
+	reg = mmio_read_32(CNF_CTRLPARAM(DEV_PARAMS0));
+	dev_info.type = CNF_GET_DEV_TYPE(reg);
+	if (dev_info.type == CNF_DT_UNKNOWN) {
+		ERROR("%s: device type unknown\n", __func__);
+		return -ENXIO;
+	}
+	dev_info.nluns = CNF_GET_NLUNS(reg);
+
+	/* Pages per block */
+	reg = mmio_read_32(CNF_CTRLCFG(DEV_LAYOUT));
+	dev_info.npages_per_block = CNF_GET_NPAGES_PER_BLOCK(reg);
+
+	/* Sector size and last sector size */
+	reg = mmio_read_32(CNF_CTRLCFG(TRANS_CFG1));
+	dev_info.sector_size = CNF_GET_SCTR_SIZE(reg);
+	dev_info.last_sector_size = CNF_GET_LAST_SCTR_SIZE(reg);
+
+	/* Page size and spare size */
+	reg = mmio_read_32(CNF_CTRLPARAM(DEV_AREA));
+	dev_info.page_size = CNF_GET_PAGE_SIZE(reg);
+	dev_info.spare_size = CNF_GET_SPARE_SIZE(reg);
+
+	/* Device blocks per LUN */
+	dev_info.nblocks_per_lun = mmio_read_32(CNF_CTRLPARAM(DEV_BLOCKS_PLUN));
+
+	/* Calculate block size and total device size */
+	dev_info.block_size = (dev_info.npages_per_block * dev_info.page_size);
+	dev_info.total_size = (dev_info.block_size * dev_info.nblocks_per_lun *
+							dev_info.nluns);
+
+	VERBOSE("CNF params: page %d, spare %d, block %d, total %lld\n",
+				dev_info.page_size, dev_info.spare_size,
+				dev_info.block_size, dev_info.total_size);
+
+	return 0;
+}
+
+/* NAND Flash Controller/Host initialization */
+int cdns_nand_host_init(void)
+{
+	uint32_t reg = 0U;
+	int ret = 0;
+
+	do {
+		/* Read controller status register for init complete */
+		reg = mmio_read_32(CNF_CMDREG(CTRL_STATUS));
+	} while (CNF_GET_INIT_COMP(reg) == 0);
+
+	ret = cdns_nand_update_dev_info();
+	if (ret != 0) {
+		return ret;
+	}
+
+	INFO("CNF: device discovery process completed and device type %d\n",
+			dev_info.type);
+
+	/* Enable data integrity, enable CRC and parity */
+	reg = mmio_read_32(CNF_DI(CONTROL));
+	reg |= (1 << CNF_DI_PAR_EN);
+	reg |= (1 << CNF_DI_CRC_EN);
+	mmio_write_32(CNF_DI(CONTROL), reg);
+
+	/* Status polling mode, device control and status register */
+	cdns_nand_wait_idle();
+	reg = mmio_read_32(CNF_CTRLCFG(DEV_STAT));
+	reg = reg & ~1;
+	mmio_write_32(CNF_CTRLCFG(DEV_STAT), reg);
+
+	/* Set operation work mode */
+	cdns_nand_set_opr_mode(CNF_OPR_WORK_MODE_SDR);
+
+	/* Set data transfer configuration parameters */
+	cdns_nand_transfer_config();
+
+	return 0;
+}
+
+/* erase: Block erase command */
+int cdns_nand_erase(uint32_t offset, uint32_t size)
+{
+	/* Determine the starting block offset i.e row address */
+	uint32_t row_address = dev_info.npages_per_block * offset;
+
+	/* Wait for thread to be in ready state */
+	cdns_nand_wait_thread_ready(CNF_DEF_TRD);
+
+	/*Set row address */
+	mmio_write_32(CNF_CMDREG(CMD_REG1), row_address);
+
+	/* Operation bank number */
+	mmio_write_32(CNF_CMDREG(CMD_REG4), (CNF_DEF_DEVICE << CNF_CMDREG4_MEM));
+
+	/* Block erase command */
+	uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
+
+	reg |= (CNF_DEF_TRD << CNF_CMDREG0_TRD);
+	reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
+	reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
+	reg |= (CNF_CT_ERASE << CNF_CMDREG0_CMD);
+	reg |= (((size-1) & 0xFF) << CNF_CMDREG0_CMD);
+	mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
+
+	/* Wait for erase operation to complete */
+	return cdns_nand_last_opr_status(CNF_DEF_TRD);
+}
+
+/* io mtd functions */
+int cdns_nand_init_mtd(unsigned long long *size, unsigned int *erase_size)
+{
+	*size = dev_info.total_size;
+	*erase_size = dev_info.block_size;
+
+	return 0;
+}
+
+/* NAND Flash page read */
+static int cdns_nand_read_page(uint32_t block, uint32_t page, uintptr_t buffer)
+{
+	/* Wait for thread to be ready */
+	cdns_nand_wait_thread_ready(CNF_DEF_TRD);
+
+	/* Select device */
+	mmio_write_32(CNF_CMDREG(CMD_REG4),
+					(CNF_DEF_DEVICE << CNF_CMDREG4_MEM));
+
+	/* Set host memory address for DMA transfers */
+	mmio_write_32(CNF_CMDREG(CMD_REG2), (buffer & 0xFFFF));
+	mmio_write_32(CNF_CMDREG(CMD_REG3), ((buffer >> 32) & 0xFFFF));
+
+	/* Set row address */
+	uint32_t row_address = 0U;
+
+	row_address |= ((page & 0x3F) | (block << 6));
+	mmio_write_32(CNF_CMDREG(CMD_REG1), row_address);
+
+	/* Page read command */
+	uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
+
+	reg |= (CNF_DEF_TRD << CNF_CMDREG0_TRD);
+	reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
+	reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
+	reg |= (CNF_DMA_MASTER_SEL << CNF_CMDREG0_DMA);
+	reg |= (CNF_CT_PAGE_READ << CNF_CMDREG0_CMD);
+	reg |= (((CNF_READ_SINGLE_PAGE-1) & 0xFF) << CNF_CMDREG0_CMD);
+	mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
+
+	/* Wait for read operation to complete */
+	if (cdns_nand_last_opr_status(CNF_DEF_TRD)) {
+		ERROR("%s: Page read failed\n", __func__);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+int cdns_nand_read(unsigned int offset, uintptr_t buffer, size_t length,
+					size_t *out_length)
+{
+	uint32_t block = offset / dev_info.block_size;
+	uint32_t end_block = (offset + length - 1U) / dev_info.block_size;
+	uint32_t page_start = (offset % dev_info.block_size) / dev_info.page_size;
+	uint32_t start_offset = offset % dev_info.page_size;
+	uint32_t nb_pages = dev_info.block_size / dev_info.page_size;
+	uint32_t bytes_read = 0U;
+	uint32_t page = 0U;
+	int result = 0;
+
+	VERBOSE("CNF: block %u-%u, page_start %u, len %zu, offset %u\n",
+				block, end_block, page_start, length, offset);
+
+	if ((offset >= dev_info.total_size) ||
+		(offset + length-1 >= dev_info.total_size) ||
+		(length == 0U)) {
+		ERROR("CNF: Invalid read parameters\n");
+		return -EINVAL;
+	}
+
+	*out_length = 0UL;
+
+	while (block <= end_block) {
+		for (page = page_start; page < nb_pages; page++) {
+			if ((start_offset != 0U) || (length < dev_info.page_size)) {
+				/* Partial page read */
+				result = cdns_nand_read_page(block, page,
+				(uintptr_t)scratch_buff);
+				if (result != 0) {
+					return result;
+				}
+
+				bytes_read = MIN((size_t)(dev_info.page_size - start_offset),
+								length);
+
+				memcpy((uint8_t *)buffer, scratch_buff + start_offset,
+						bytes_read);
+				start_offset = 0U;
+			} else {
+				/* Full page read */
+				result = cdns_nand_read_page(block, page,
+				(uintptr_t)scratch_buff);
+				if (result != 0) {
+					return result;
+				}
+
+				bytes_read = dev_info.page_size;
+				memcpy((uint8_t *)buffer, scratch_buff, bytes_read);
+			}
+
+			length -= bytes_read;
+			buffer += bytes_read;
+			*out_length += bytes_read;
+
+			/* All the bytes have read */
+			if (length == 0U) {
+				break;
+			}
+
+			udelay(CNF_READ_INT_DELAY_US);
+		} /* for */
+
+		page_start = 0U;
+		block++;
+	} /* while */
+
+	return 0;
+}
diff --git a/drivers/nxp/trdc/imx_trdc.c b/drivers/nxp/trdc/imx_trdc.c
new file mode 100644
index 0000000..ab66585
--- /dev/null
+++ b/drivers/nxp/trdc/imx_trdc.c
@@ -0,0 +1,365 @@
+/*
+ * Copyright 2022-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <drivers/nxp/trdc/imx_trdc.h>
+#include <lib/mmio.h>
+
+
+int trdc_mda_set_cpu(uintptr_t trdc_base, uint32_t mda_inst,
+		     uint32_t mda_reg, uint8_t sa, uint8_t dids,
+		     uint8_t did, uint8_t pe, uint8_t pidm, uint8_t pid)
+{
+	uint32_t val = mmio_read_32(trdc_base + MDAC_W_X(mda_inst, mda_reg));
+	/* invalid: config non-cpu master with cpu config format. */
+	if ((val & MDA_DFMT) != 0U) {
+		return -EINVAL;
+	}
+
+	val = MDA_VLD | MDA_DFMT0_DID(pid) | MDA_DFMT0_PIDM(pidm) | MDA_DFMT0_PE(pe) |
+	      MDA_DFMT0_SA(sa) | MDA_DFMT0_DIDS(dids) | MDA_DFMT0_DID(did);
+
+	mmio_write_32(trdc_base + MDAC_W_X(mda_inst, mda_reg), val);
+
+	return 0;
+}
+
+int trdc_mda_set_noncpu(uintptr_t trdc_base, uint32_t mda_inst,
+			bool did_bypass, uint8_t sa, uint8_t pa,
+			uint8_t did)
+{
+	uint32_t val = mmio_read_32(trdc_base + MDAC_W_X(mda_inst, 0));
+
+	/* invalid: config cpu master with non-cpu config format. */
+	if ((val & MDA_DFMT) == 0U) {
+		return -EINVAL;
+	}
+
+	val = MDA_VLD | MDA_DFMT1_SA(sa) | MDA_DFMT1_PA(pa) | MDA_DFMT1_DID(did) |
+	      MDA_DFMT1_DIDB(did_bypass ? 1U : 0U);
+
+	mmio_write_32(trdc_base + MDAC_W_X(mda_inst, 0), val);
+
+	return 0;
+}
+
+static uintptr_t trdc_get_mbc_base(uintptr_t trdc_reg, uint32_t mbc_x)
+{
+	struct trdc_mgr *trdc_base = (struct trdc_mgr *)trdc_reg;
+	uint32_t mbc_num = MBC_NUM(trdc_base->trdc_hwcfg0);
+
+	if (mbc_x >= mbc_num) {
+		return 0U;
+	}
+
+	return trdc_reg + 0x10000 + 0x2000 * mbc_x;
+}
+
+static uintptr_t trdc_get_mrc_base(uintptr_t trdc_reg, uint32_t mrc_x)
+{
+	struct trdc_mgr *trdc_base = (struct trdc_mgr *)trdc_reg;
+	uint32_t mbc_num = MBC_NUM(trdc_base->trdc_hwcfg0);
+	uint32_t mrc_num = MRC_NUM(trdc_base->trdc_hwcfg0);
+
+	if (mrc_x >= mrc_num) {
+		return 0U;
+	}
+
+	return trdc_reg + 0x10000 + 0x2000 * mbc_num + 0x1000 * mrc_x;
+}
+
+uint32_t trdc_mbc_blk_num(uintptr_t trdc_reg, uint32_t mbc_x, uint32_t mem_x)
+{
+	uint32_t glbcfg;
+	struct mbc_mem_dom *mbc_dom;
+	struct trdc_mbc *mbc_base = (struct trdc_mbc *)trdc_get_mbc_base(trdc_reg, mbc_x);
+
+	if (mbc_base == NULL) {
+		return 0;
+	}
+
+	/* only first dom has the glbcfg */
+	mbc_dom = &mbc_base->mem_dom[0];
+	glbcfg = mmio_read_32((uintptr_t)&mbc_dom->mem_glbcfg[mem_x]);
+
+	return MBC_BLK_NUM(glbcfg);
+}
+
+uint32_t trdc_mrc_rgn_num(uintptr_t trdc_reg, uint32_t mrc_x)
+{
+	uint32_t glbcfg;
+	struct mrc_rgn_dom *mrc_dom;
+	struct trdc_mrc *mrc_base = (struct trdc_mrc *)trdc_get_mrc_base(trdc_reg, mrc_x);
+
+	if (mrc_base == NULL) {
+		return 0;
+	}
+
+	/* only first dom has the glbcfg */
+	mrc_dom = &mrc_base->mrc_dom[0];
+	glbcfg = mmio_read_32((uintptr_t)&mrc_dom->mrc_glbcfg[0]);
+
+	return MBC_BLK_NUM(glbcfg);
+}
+
+int trdc_mbc_set_control(uintptr_t trdc_reg, uint32_t mbc_x,
+			 uint32_t glbac_id, uint32_t glbac_val)
+{
+	struct mbc_mem_dom *mbc_dom;
+	struct trdc_mbc *mbc_base = (struct trdc_mbc *)trdc_get_mbc_base(trdc_reg, mbc_x);
+
+	if (mbc_base == NULL || glbac_id >= GLBAC_NUM) {
+		return -EINVAL;
+	}
+
+	/* only first dom has the glbac */
+	mbc_dom = &mbc_base->mem_dom[0];
+
+	mmio_write_32((uintptr_t)&mbc_dom->memn_glbac[glbac_id], glbac_val);
+
+	return 0;
+}
+
+int trdc_mbc_blk_config(uintptr_t trdc_reg, uint32_t mbc_x,
+			uint32_t dom_x, uint32_t mem_x, uint32_t blk_x,
+			bool sec_access, uint32_t glbac_id)
+{
+	uint32_t *cfg_w;
+	uint32_t index, offset, val;
+	struct mbc_mem_dom *mbc_dom;
+	struct trdc_mbc *mbc_base = (struct trdc_mbc *)trdc_get_mbc_base(trdc_reg, mbc_x);
+
+	if (mbc_base == NULL || glbac_id >= GLBAC_NUM) {
+		return -EINVAL;
+	}
+
+	mbc_dom = &mbc_base->mem_dom[dom_x];
+
+	switch (mem_x) {
+	case 0:
+		cfg_w = &mbc_dom->mem0_blk_cfg_w[blk_x / 8];
+		break;
+	case 1:
+		cfg_w = &mbc_dom->mem1_blk_cfg_w[blk_x / 8];
+		break;
+	case 2:
+		cfg_w = &mbc_dom->mem2_blk_cfg_w[blk_x / 8];
+		break;
+	case 3:
+		cfg_w = &mbc_dom->mem3_blk_cfg_w[blk_x / 8];
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	index = blk_x % 8;
+	offset = index * 4;
+
+	val = mmio_read_32((uintptr_t)cfg_w);
+	val &= ~(0xF << offset);
+
+	/*
+	 * MBC0-3
+	 * Global 0, 0x7777 secure pri/user read/write/execute,
+	 * S400 has already set it. So select MBC0_MEMN_GLBAC0
+	 */
+	if (sec_access) {
+		val |= ((0x0 | (glbac_id & 0x7)) << offset);
+		mmio_write_32((uintptr_t)cfg_w, val);
+	} else {
+		/* nse bit set */
+		val |= ((0x8 | (glbac_id & 0x7)) << offset);
+		mmio_write_32((uintptr_t)cfg_w, val);
+	}
+
+	return 0;
+}
+
+int trdc_mrc_set_control(uintptr_t trdc_reg, uint32_t mrc_x,
+			 uint32_t glbac_id, uint32_t glbac_val)
+{
+	struct mrc_rgn_dom *mrc_dom;
+	struct trdc_mrc *mrc_base = (struct trdc_mrc *)trdc_get_mrc_base(trdc_reg, mrc_x);
+
+	if (mrc_base == NULL || glbac_id >= GLBAC_NUM) {
+		return -EINVAL;
+	}
+
+	/* only first dom has the glbac */
+	mrc_dom = &mrc_base->mrc_dom[0];
+
+	mmio_write_32((uintptr_t)&mrc_dom->memn_glbac[glbac_id], glbac_val);
+
+	return 0;
+}
+
+int trdc_mrc_rgn_config(uintptr_t trdc_reg, uint32_t mrc_x,
+			uint32_t dom_x, uint32_t rgn_id,
+			uint32_t addr_start, uint32_t addr_size,
+			bool sec_access, uint32_t glbac_id)
+{
+	uint32_t *desc_w;
+	uint32_t addr_end;
+	struct mrc_rgn_dom *mrc_dom;
+	struct trdc_mrc *mrc_base = (struct trdc_mrc *)trdc_get_mrc_base(trdc_reg, mrc_x);
+
+	if (mrc_base == NULL || glbac_id >= GLBAC_NUM || rgn_id >= MRC_REG_ALL) {
+		return -EINVAL;
+	}
+
+	mrc_dom = &mrc_base->mrc_dom[dom_x];
+
+	addr_end = addr_start + addr_size - 1;
+	addr_start &= ~0x3fff;
+	addr_end &= ~0x3fff;
+
+	desc_w = &mrc_dom->rgn_desc_words[rgn_id][0];
+
+	if (sec_access) {
+		mmio_write_32((uintptr_t)desc_w, addr_start | (glbac_id & 0x7));
+		mmio_write_32((uintptr_t)(desc_w + 1), addr_end | 0x1);
+	} else {
+		mmio_write_32((uintptr_t)desc_w, addr_start | (glbac_id & 0x7));
+		mmio_write_32((uintptr_t)(desc_w + 1), (addr_end | 0x1 | 0x10));
+	}
+
+	return 0;
+}
+
+bool trdc_mrc_enabled(uintptr_t mrc_base)
+{
+	return (mmio_read_32(mrc_base) & BIT(15));
+}
+
+bool trdc_mbc_enabled(uintptr_t mbc_base)
+{
+	return (mmio_read_32(mbc_base) & BIT(14));
+}
+
+static bool is_trdc_mgr_slot(uintptr_t trdc_base, uint8_t mbc_id,
+			     uint8_t mem_id, uint16_t blk_id)
+{
+	unsigned int i;
+
+	for (i = 0U; i < trdc_mgr_num; i++) {
+		if (trdc_mgr_blks[i].trdc_base == trdc_base) {
+			if (mbc_id == trdc_mgr_blks[i].mbc_id &&
+			    mem_id == trdc_mgr_blks[i].mbc_mem_id &&
+			   (blk_id == trdc_mgr_blks[i].blk_mgr ||
+			    blk_id == trdc_mgr_blks[i].blk_mc)) {
+				return true;
+			}
+		}
+	}
+
+	return false;
+}
+
+/*
+ * config the TRDC MGR & MC's access policy. only the secure privilege
+ * mode SW can access it.
+ */
+void trdc_mgr_mbc_setup(struct trdc_mgr_info *mgr)
+{
+	unsigned int i;
+
+	/*
+	 * If the MBC is global enabled, need to cconfigure the MBCs of
+	 * TRDC MGR & MC correctly.
+	 */
+	if (trdc_mbc_enabled(mgr->trdc_base)) {
+		/* ONLY secure privilige can access */
+		trdc_mbc_set_control(mgr->trdc_base, mgr->mbc_id, 7, 0x6000);
+		for (i = 0U; i < 16U; i++) {
+			trdc_mbc_blk_config(mgr->trdc_base, mgr->mbc_id, i,
+				mgr->mbc_mem_id, mgr->blk_mgr, true, 7);
+
+			trdc_mbc_blk_config(mgr->trdc_base, mgr->mbc_id, i,
+				mgr->mbc_mem_id, mgr->blk_mc, true, 7);
+		}
+	}
+}
+
+/*
+ * Set up the TRDC access policy for all the resources under
+ * the TRDC control.
+ */
+void trdc_setup(struct trdc_config_info *cfg)
+{
+	unsigned int i, j, num;
+	bool is_mgr;
+
+	/* config the MRCs */
+	if (trdc_mrc_enabled(cfg->trdc_base)) {
+		/* set global access policy */
+		for (i = 0U; i < cfg->num_mrc_glbac; i++) {
+			trdc_mrc_set_control(cfg->trdc_base,
+					     cfg->mrc_glbac[i].mbc_mrc_id,
+					     cfg->mrc_glbac[i].glbac_id,
+					     cfg->mrc_glbac[i].glbac_val);
+		}
+
+		/* set each MRC region access policy */
+		for (i = 0U; i < cfg->num_mrc_cfg; i++) {
+			trdc_mrc_rgn_config(cfg->trdc_base, cfg->mrc_cfg[i].mrc_id,
+					    cfg->mrc_cfg[i].dom_id,
+					    cfg->mrc_cfg[i].region_id,
+					    cfg->mrc_cfg[i].region_start,
+					    cfg->mrc_cfg[i].region_size,
+					    cfg->mrc_cfg[i].secure,
+					    cfg->mrc_cfg[i].glbac_id);
+		}
+	}
+
+	/* config the MBCs */
+	if (trdc_mbc_enabled(cfg->trdc_base)) {
+		/* set MBC global access policy */
+		for (i = 0U; i < cfg->num_mbc_glbac; i++) {
+			trdc_mbc_set_control(cfg->trdc_base,
+					     cfg->mbc_glbac[i].mbc_mrc_id,
+					     cfg->mbc_glbac[i].glbac_id,
+					     cfg->mbc_glbac[i].glbac_val);
+		}
+
+		for (i = 0U; i < cfg->num_mbc_cfg; i++) {
+			if (cfg->mbc_cfg[i].blk_id == MBC_BLK_ALL) {
+				num = trdc_mbc_blk_num(cfg->trdc_base,
+						       cfg->mbc_cfg[i].mbc_id,
+						       cfg->mbc_cfg[i].mem_id);
+
+				for (j = 0U; j < num; j++) {
+					/* Skip mgr and mc */
+					is_mgr = is_trdc_mgr_slot(cfg->trdc_base,
+								  cfg->mbc_cfg[i].mbc_id,
+								  cfg->mbc_cfg[i].mem_id, j);
+					if (is_mgr) {
+						continue;
+					}
+
+					trdc_mbc_blk_config(cfg->trdc_base,
+							    cfg->mbc_cfg[i].mbc_id,
+							    cfg->mbc_cfg[i].dom_id,
+							    cfg->mbc_cfg[i].mem_id, j,
+							    cfg->mbc_cfg[i].secure,
+							    cfg->mbc_cfg[i].glbac_id);
+				}
+			} else {
+				trdc_mbc_blk_config(cfg->trdc_base,
+						    cfg->mbc_cfg[i].mbc_id,
+						    cfg->mbc_cfg[i].dom_id,
+						    cfg->mbc_cfg[i].mem_id,
+						    cfg->mbc_cfg[i].blk_id,
+						    cfg->mbc_cfg[i].secure,
+						    cfg->mbc_cfg[i].glbac_id);
+			}
+		}
+	}
+}
diff --git a/include/drivers/cadence/cdns_combo_phy.h b/include/drivers/cadence/cdns_combo_phy.h
new file mode 100644
index 0000000..f5dabda
--- /dev/null
+++ b/include/drivers/cadence/cdns_combo_phy.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CDN_COMBOPHY_H
+#define CDN_COMBOPHY_H
+
+/* SRS */
+#define SDMMC_CDN_SRS02			0x8
+#define SDMMC_CDN_SRS03			0xC
+#define SDMMC_CDN_SRS04			0x10
+#define SDMMC_CDN_SRS05			0x14
+#define SDMMC_CDN_SRS06			0x18
+#define SDMMC_CDN_SRS07			0x1C
+#define SDMMC_CDN_SRS09			0x24
+#define SDMMC_CDN_SRS10			0x28
+#define SDMMC_CDN_SRS11			0x2C
+#define SDMMC_CDN_SRS12			0x30
+#define SDMMC_CDN_SRS13			0x34
+#define SDMMC_CDN_SRS14			0x38
+
+/* SRS03 */
+/* Response Type Select
+ * Defines the expected response length.
+ */
+#define SDMMC_CDN_RTS				16
+
+/* Command CRC Check Enable
+ * When set to 1, the host checks if the CRC field of the response is valid.
+ * When 0, the CRC check is disabled and the CRC field of the response is ignored.
+ */
+#define SDMMC_CDN_CRCCE				19
+
+/* Command Index
+ * This field contains a command number (index) of the command to be sent.
+ */
+#define SDMMC_CDN_CIDX				24
+
+/* SRS09 */
+/* Card Inserted
+ * Indicates if the card is inserted inside the slot.
+ */
+#define SDMMC_CDN_CI				16
+
+/* SRS10 */
+/* Data Transfer Width
+ * Bit used to configure DAT bus width to 1 or 4.
+ */
+#define SDMMC_CDN_DTW				1
+
+/* Extended Data Transfer Width
+ * This bit is to enable/disable 8-bit DAT bus width mode.
+ */
+#define SDMMC_CDN_EDTW				5
+
+/* SD Bus Power for VDD1
+ * When set to 1, the VDD1 voltage is supplied to card/device.
+ */
+#define SDMMC_CDN_BP				8
+
+/* SD Bus Voltage Select
+ * This field is used to configure VDD1 voltage level.
+ */
+#define SDMMC_CDN_BVS				9
+
+/* SRS11 */
+/* Internal Clock Enable
+ * This field is designated to controls (enable/disable) external clock generator.
+ */
+#define SDMMC_CDN_ICE				0
+
+/* Internal Clock Stable
+ * When 1, indicates that the clock on sdmclk pin of the host is stable.
+ * When 0, indicates that the clock is not stable .
+ */
+#define SDMMC_CDN_ICS				1
+
+/* SD Clock Enable
+ * When set, SDCLK clock is enabled.
+ * When clear, SDCLK clock is stopped.
+ */
+#define SDMMC_CDN_SDCE				2
+
+/* USDCLK Frequency Select
+ * This is used to calculate frequency of USDCLK clock.
+ */
+#define SDMMC_CDN_USDCLKFS			6
+
+/* SDCLK Frequency Select
+ * This is used to calculate frequency of SDCLK clock.
+ */
+#define SDMMC_CDN_SDCLKFS				8
+
+/* Data Timeout Counter Value
+ * This value determines the interval by which DAT line timeouts are detected
+ */
+#define SDMMC_CDN_DTCV				16
+
+/* SRS12 */
+/* Command Complete
+ * Generated when the end bit of the response is received.
+ */
+#define SDMMC_CDN_CC				0
+
+/* Transfer Complete
+ * Generated when the transfer which uses the DAT line is complete.
+ */
+#define SDMMC_CDN_TC				1
+
+/* Error Interrupt
+ * The software can check for an error by reading this single bit first.
+ */
+#define SDMMC_CDN_EINT				15
+
+/* SRS14 */
+/* Command Complete Interrupt Enable */
+#define SDMMC_CDN_CC_IE				0
+
+/* Transfer Complete Interrupt Enable */
+#define SDMMC_CDN_TC_IE				1
+
+/* DMA Interrupt Enable */
+#define SDMMC_CDN_DMAINT_IE			3
+
+/* Combo PHY DLL registers */
+#define CP_DLL_REG_BASE			(0x10B92000)
+#define CP_DLL_DQ_TIMING_REG		(0x00)
+#define CP_DLL_DQS_TIMING_REG		(0x04)
+#define CP_DLL_GATE_LPBK_CTRL_REG	(0x08)
+#define CP_DLL_MASTER_CTRL_REG		(0x0C)
+#define CP_DLL_SLAVE_CTRL_REG		(0x10)
+#define CP_DLL_IE_TIMING_REG		(0x14)
+
+#define CP_DQ_TIMING_REG_SDR		(0x00000002)
+#define CP_DQS_TIMING_REG_SDR		(0x00100004)
+#define CP_GATE_LPBK_CTRL_REG_SDR	(0x00D80000)
+#define CP_DLL_MASTER_CTRL_REG_SDR	(0x00800000)
+#define CP_DLL_SLAVE_CTRL_REG_SDR	(0x00000000)
+
+#define CP_DLL(_reg)			(CP_DLL_REG_BASE \
+					+ (CP_DLL_##_reg))
+
+/* Control Timing Block registers */
+#define CP_CTB_REG_BASE			(0x10B92080)
+#define CP_CTB_CTRL_REG			(0x00)
+#define CP_CTB_TSEL_REG			(0x04)
+#define CP_CTB_GPIO_CTRL0		(0x08)
+#define CP_CTB_GPIO_CTRL1		(0x0C)
+#define CP_CTB_GPIO_STATUS0		(0x10)
+#define CP_CTB_GPIO_STATUS1		(0x14)
+
+#define CP_CTRL_REG_SDR			(0x00004040)
+#define CP_TSEL_REG_SDR			(0x00000000)
+
+#define CP_CTB(_reg)			(CP_CTB_REG_BASE \
+					+ (CP_CTB_##_reg))
+
+/* Combo PHY */
+#define SDMMC_CDN_REG_BASE		0x10808200
+#define PHY_DQ_TIMING_REG		0x2000
+#define PHY_DQS_TIMING_REG		0x2004
+#define PHY_GATE_LPBK_CTRL_REG		0x2008
+#define PHY_DLL_MASTER_CTRL_REG		0x200C
+#define PHY_DLL_SLAVE_CTRL_REG		0x2010
+#define PHY_CTRL_REG			0x2080
+#define PHY_REG_ADDR_MASK		0xFFFF
+#define PHY_REG_DATA_MASK		0xFFFFFFFF
+
+/* PHY_DQS_TIMING_REG */
+#define CP_USE_EXT_LPBK_DQS(x)		((x) << 22) //0x1
+#define CP_USE_LPBK_DQS(x)		((x) << 21) //0x1
+#define CP_USE_PHONY_DQS(x)		((x) << 20) //0x1
+#define CP_USE_PHONY_DQS_CMD(x)		((x) << 19) //0x1
+
+/* PHY_GATE_LPBK_CTRL_REG */
+#define CP_SYNC_METHOD(x)		((x) << 31) //0x1
+#define CP_SW_HALF_CYCLE_SHIFT(x)	((x) << 28) //0x1
+#define CP_RD_DEL_SEL(x)		((x) << 19) //0x3f
+#define CP_UNDERRUN_SUPPRESS(x)		((x) << 18) //0x1
+#define CP_GATE_CFG_ALWAYS_ON(x)	((x) << 6) //0x1
+
+/* PHY_DLL_MASTER_CTRL_REG */
+#define CP_DLL_BYPASS_MODE(x)		((x) << 23) //0x1
+#define CP_DLL_START_POINT(x)		((x) << 0) //0xff
+
+/* PHY_DLL_SLAVE_CTRL_REG */
+#define CP_READ_DQS_CMD_DELAY(x)	((x) << 24) //0xff
+#define CP_CLK_WRDQS_DELAY(x)		((x) << 16) //0xff
+#define CP_CLK_WR_DELAY(x)		((x) << 8) //0xff
+#define CP_READ_DQS_DELAY(x)		((x) << 0) //0xff
+
+/* PHY_DQ_TIMING_REG */
+#define CP_IO_MASK_ALWAYS_ON(x)		((x) << 31) //0x1
+#define CP_IO_MASK_END(x)		((x) << 27) //0x7
+#define CP_IO_MASK_START(x)		((x) << 24) //0x7
+#define CP_DATA_SELECT_OE_END(x)	((x) << 0) //0x7
+
+/* PHY_CTRL_REG */
+#define CP_PHONY_DQS_TIMING_MASK	0x3F
+#define CP_PHONY_DQS_TIMING_SHIFT	4
+
+/* Shared Macros */
+#define SDMMC_CDN(_reg)			(SDMMC_CDN_REG_BASE + \
+					(SDMMC_CDN_##_reg))
+
+struct cdns_sdmmc_combo_phy {
+	uint32_t	cp_clk_wr_delay;
+	uint32_t	cp_clk_wrdqs_delay;
+	uint32_t	cp_data_select_oe_end;
+	uint32_t	cp_dll_bypass_mode;
+	uint32_t	cp_dll_locked_mode;
+	uint32_t	cp_dll_start_point;
+	uint32_t	cp_gate_cfg_always_on;
+	uint32_t	cp_io_mask_always_on;
+	uint32_t	cp_io_mask_end;
+	uint32_t	cp_io_mask_start;
+	uint32_t	cp_rd_del_sel;
+	uint32_t	cp_read_dqs_cmd_delay;
+	uint32_t	cp_read_dqs_delay;
+	uint32_t	cp_sw_half_cycle_shift;
+	uint32_t	cp_sync_method;
+	uint32_t	cp_underrun_suppress;
+	uint32_t	cp_use_ext_lpbk_dqs;
+	uint32_t	cp_use_lpbk_dqs;
+	uint32_t	cp_use_phony_dqs;
+	uint32_t	cp_use_phony_dqs_cmd;
+};
+
+/* Function Prototype */
+
+int cdns_sdmmc_write_phy_reg(uint32_t phy_reg_addr, uint32_t phy_reg_addr_value,
+			uint32_t phy_reg_data, uint32_t phy_reg_data_value);
+int cdns_sd_card_detect(void);
+int cdns_emmc_card_reset(void);
+
+#endif
diff --git a/include/drivers/cadence/cdns_nand.h b/include/drivers/cadence/cdns_nand.h
new file mode 100644
index 0000000..64ba267
--- /dev/null
+++ b/include/drivers/cadence/cdns_nand.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CDN_NAND_H
+#define CDN_NAND_H
+
+#include <drivers/cadence/cdns_combo_phy.h>
+
+/* NAND flash device information */
+typedef struct cnf_dev_info {
+	uint8_t type;
+	uint8_t nluns;
+	uint8_t sector_cnt;
+	uint16_t npages_per_block;
+	uint16_t sector_size;
+	uint16_t last_sector_size;
+	uint16_t page_size;
+	uint16_t spare_size;
+	uint32_t nblocks_per_lun;
+	uint32_t block_size;
+	unsigned long long total_size;
+} cnf_dev_info_t;
+
+/* Shared Macros */
+
+/* Default values */
+#define CNF_DEF_VOL_ID					0
+#define CNF_DEF_DEVICE					0
+#define CNF_DEF_TRD					0
+#define CNF_READ_SINGLE_PAGE				1
+#define CNF_DEF_DELAY_US				500
+#define CNF_READ_INT_DELAY_US				10
+
+/* Work modes */
+#define CNF_WORK_MODE_CDMA				0
+#define CNF_WORK_MODE_PIO				1
+
+/* Command types */
+#define CNF_CT_SET_FEATURE				0x0100
+#define CNF_CT_RESET_ASYNC				0x1100
+#define CNF_CT_RESET_SYNC				0x1101
+#define CNF_CT_RESET_LUN				0x1102
+#define CNF_CT_ERASE					0x1000
+#define CNF_CT_PAGE_PROGRAM				0x2100
+#define CNF_CT_PAGE_READ				0x2200
+
+/* Interrupts enable or disable */
+#define CNF_INT_EN					1
+#define CNF_INT_DIS					0
+
+/* Device types */
+#define CNF_DT_UNKNOWN					0x00
+#define CNF_DT_ONFI					0x01
+#define CNF_DT_JEDEC					0x02
+#define CNF_DT_LEGACY					0x03
+
+/* Command and status registers */
+#define CNF_CMDREG_REG_BASE				SOCFPGA_NAND_REG_BASE
+
+/* DMA maximum burst size 0-127*/
+#define CNF_DMA_BURST_SIZE_MAX				127
+
+/* DMA settings register field offsets */
+#define CNF_DMA_SETTINGS_BURST				0
+#define CNF_DMA_SETTINGS_OTE				16
+#define CNF_DMA_SETTINGS_SDMA_ERR			17
+
+#define CNF_DMA_MASTER_SEL				1
+#define CNF_DMA_SLAVE_SEL				0
+
+/* DMA FIFO trigger level register field offsets */
+#define CNF_FIFO_TLEVEL_POS				0
+#define CNF_FIFO_TLEVEL_DMA_SIZE			16
+#define CNF_DMA_PREFETCH_SIZE				(1024 / 8)
+
+#define CNF_GET_CTRL_BUSY(x)				(x & (1 << 8))
+#define CNF_GET_INIT_COMP(x)				(x & (1 << 9))
+
+/* Command register0 field offsets */
+#define CNF_CMDREG0_CT					30
+#define CNF_CMDREG0_TRD					24
+#define CNF_CMDREG0_INTR				20
+#define CNF_CMDREG0_DMA					21
+#define CNF_CMDREG0_VOL					16
+#define CNF_CMDREG0_CMD					0
+#define CNF_CMDREG4_MEM					24
+
+/* Command status register field offsets */
+#define CNF_ECMD					BIT(0)
+#define CNF_EECC					BIT(1)
+#define CNF_EMAX					BIT(2)
+#define CNF_EDEV					BIT(12)
+#define CNF_EDQS					BIT(13)
+#define CNF_EFAIL					BIT(14)
+#define CNF_CMPLT					BIT(15)
+#define CNF_EBUS					BIT(16)
+#define CNF_EDI						BIT(17)
+#define CNF_EPAR					BIT(18)
+#define CNF_ECTX					BIT(19)
+#define CNF_EPRO					BIT(20)
+#define CNF_EIDX					BIT(24)
+
+#define CNF_CMDREG_CMD_REG0				0x00
+#define CNF_CMDREG_CMD_REG1				0x04
+#define CNF_CMDREG_CMD_REG2				0x08
+#define CNF_CMDREG_CMD_REG3				0x0C
+#define CNF_CMDREG_CMD_STAT_PTR				0x10
+#define CNF_CMDREG_CMD_STAT				0x14
+#define CNF_CMDREG_CMD_REG4				0x20
+#define CNF_CMDREG_CTRL_STATUS				0x118
+#define CNF_CMDREG_TRD_STATUS				0x120
+
+#define CNF_CMDREG(_reg)				(CNF_CMDREG_REG_BASE \
+							+ (CNF_CMDREG_##_reg))
+
+/* Controller configuration registers */
+#define CNF_LSB16_MASK					0xFFFF
+#define CNF_GET_NPAGES_PER_BLOCK(x)			(x & CNF_LSB16_MASK)
+
+#define CNF_GET_SCTR_SIZE(x)				(x & CNF_LSB16_MASK)
+#define CNF_GET_LAST_SCTR_SIZE(x)			((x >> 16) & CNF_LSB16_MASK)
+
+#define CNF_GET_PAGE_SIZE(x)				(x & CNF_LSB16_MASK)
+#define CNF_GET_SPARE_SIZE(x)				((x >> 16) & CNF_LSB16_MASK)
+
+#define CNF_CTRLCFG_REG_BASE				0x10B80400
+#define CNF_CTRLCFG_TRANS_CFG0				0x00
+#define CNF_CTRLCFG_TRANS_CFG1				0x04
+#define CNF_CTRLCFG_LONG_POLL				0x08
+#define CNF_CTRLCFG_SHORT_POLL				0x0C
+#define CNF_CTRLCFG_DEV_STAT				0x10
+#define CNF_CTRLCFG_DEV_LAYOUT				0x24
+#define CNF_CTRLCFG_ECC_CFG0				0x28
+#define CNF_CTRLCFG_ECC_CFG1				0x2C
+#define CNF_CTRLCFG_MULTIPLANE_CFG			0x34
+#define CNF_CTRLCFG_CACHE_CFG				0x38
+#define CNF_CTRLCFG_DMA_SETTINGS			0x3C
+#define CNF_CTRLCFG_FIFO_TLEVEL				0x54
+
+#define CNF_CTRLCFG(_reg)				(CNF_CTRLCFG_REG_BASE \
+							+ (CNF_CTRLCFG_##_reg))
+
+/* Data integrity registers */
+#define CNF_DI_PAR_EN					0
+#define CNF_DI_CRC_EN					1
+
+#define CNF_DI_REG_BASE					0x10B80700
+#define CNF_DI_CONTROL					0x00
+#define CNF_DI_INJECT0					0x04
+#define CNF_DI_INJECT1					0x08
+#define CNF_DI_ERR_REG_ADDR				0x0C
+#define CNF_DI_INJECT2					0x10
+
+#define CNF_DI(_reg)					(CNF_DI_REG_BASE \
+							+ (CNF_DI_##_reg))
+
+/* Controller parameter registers */
+#define CNF_NTHREADS_MASK				0x07
+#define CNF_GET_NLUNS(x)				(x & 0xFF)
+#define CNF_GET_DEV_TYPE(x)				((x >> 30) & 0x03)
+#define CNF_GET_NTHREADS(x)				(1 << (x & CNF_NTHREADS_MASK))
+
+#define CNF_CTRLPARAM_REG_BASE				0x10B80800
+#define CNF_CTRLPARAM_VERSION				0x00
+#define CNF_CTRLPARAM_FEATURE				0x04
+#define CNF_CTRLPARAM_MFR_ID				0x08
+#define CNF_CTRLPARAM_DEV_AREA				0x0C
+#define CNF_CTRLPARAM_DEV_PARAMS0			0x10
+#define CNF_CTRLPARAM_DEV_PARAMS1			0x14
+#define CNF_CTRLPARAM_DEV_FEATUERS			0x18
+#define CNF_CTRLPARAM_DEV_BLOCKS_PLUN			0x1C
+
+#define CNF_CTRLPARAM(_reg)				(CNF_CTRLPARAM_REG_BASE \
+							+ (CNF_CTRLPARAM_##_reg))
+
+/* Protection mechanism registers */
+#define CNF_PROT_REG_BASE				0x10B80900
+#define CNF_PROT_CTRL0					0x00
+#define CNF_PROT_DOWN0					0x04
+#define CNF_PROT_UP0					0x08
+#define CNF_PROT_CTRL1					0x10
+#define CNF_PROT_DOWN1					0x14
+#define CNF_PROT_UP1					0x18
+
+#define CNF_PROT(_reg)					(CNF_PROT_REG_BASE \
+							+ (CNF_PROT_##_reg))
+
+/* Mini controller registers */
+#define CNF_MINICTRL_REG_BASE				0x10B81000
+
+/* Operation work modes */
+#define CNF_OPR_WORK_MODE_SDR				0
+#define CNF_OPR_WORK_MODE_NVDDR				1
+#define CNF_OPR_WORK_MODE_TOGGLE_NVDDR2_3		2
+#define CNF_OPR_WORK_MODE_RES				3
+
+/* Mini controller common settings register field offsets */
+#define CNF_CMN_SETTINGS_WR_WUP				20
+#define CNF_CMN_SETTINGS_RD_WUP				16
+#define CNF_CMN_SETTINGS_DEV16				8
+#define CNF_CMN_SETTINGS_OPR				0
+
+/* Async mode register field offsets */
+#define CNF_ASYNC_TIMINGS_TRH				24
+#define CNF_ASYNC_TIMINGS_TRP				16
+#define CNF_ASYNC_TIMINGS_TWH				8
+#define CNF_ASYNC_TIMINGS_TWP				0
+
+/* Mini controller DLL PHY controller register field offsets */
+#define CNF_DLL_PHY_RST_N				24
+#define CNF_DLL_PHY_EXT_WR_MODE				17
+#define CNF_DLL_PHY_EXT_RD_MODE				16
+
+#define CNF_MINICTRL_WP_SETTINGS			0x00
+#define CNF_MINICTRL_RBN_SETTINGS			0x04
+#define CNF_MINICTRL_CMN_SETTINGS			0x08
+#define CNF_MINICTRL_SKIP_BYTES_CFG			0x0C
+#define CNF_MINICTRL_SKIP_BYTES_OFFSET			0x10
+#define CNF_MINICTRL_TOGGLE_TIMINGS0			0x14
+#define CNF_MINICTRL_TOGGLE_TIMINGS1			0x18
+#define CNF_MINICTRL_ASYNC_TOGGLE_TIMINGS		0x1C
+#define CNF_MINICTRL_SYNC_TIMINGS			0x20
+#define CNF_MINICTRL_DLL_PHY_CTRL			0x34
+
+#define CNF_MINICTRL(_reg)				(CNF_MINICTRL_REG_BASE \
+							+ (CNF_MINICTRL_##_reg))
+
+/*
+ * @brief Nand IO MTD initialization routine
+ *
+ * @total_size: [out] Total size of the NAND flash device
+ * @erase_size: [out] Minimum erase size of the NAND flash device
+ * Return: 0 on success, a negative errno on failure
+ */
+int cdns_nand_init_mtd(unsigned long long *total_size,
+						unsigned int *erase_size);
+
+/*
+ * @brief Read bytes from the NAND flash device
+ *
+ * @offset: Byte offset to read from in device
+ * @buffer: [out] Bytes read from device
+ * @length: Number of bytes to read
+ * @out_length: [out] Number of bytes read from device
+ * Return: 0 on success, a negative errno on failure
+ */
+int cdns_nand_read(unsigned int offset, uintptr_t buffer,
+					size_t length, size_t *out_length);
+
+/* NAND Flash Controller/Host initialization */
+int cdns_nand_host_init(void);
+
+#endif
diff --git a/include/drivers/cadence/cdns_sdmmc.h b/include/drivers/cadence/cdns_sdmmc.h
new file mode 100644
index 0000000..6452725
--- /dev/null
+++ b/include/drivers/cadence/cdns_sdmmc.h
@@ -0,0 +1,474 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CDN_MMC_H
+#define CDN_MMC_H
+
+#include <drivers/cadence/cdns_combo_phy.h>
+#include <drivers/mmc.h>
+#include "socfpga_plat_def.h"
+
+#if MMC_DEVICE_TYPE == 0
+#define CONFIG_DMA_ADDR_T_64BIT		0
+#endif
+
+#define MMC_REG_BASE			SOCFPGA_MMC_REG_BASE
+#define COMBO_PHY_REG		0x0
+#define SDHC_EXTENDED_WR_MODE_MASK	0xFFFFFFF7
+#define SDHC_DLL_RESET_MASK	0x00000001
+/* HRS09 */
+#define SDHC_PHY_SW_RESET			BIT(0)
+#define SDHC_PHY_INIT_COMPLETE		BIT(1)
+#define SDHC_EXTENDED_RD_MODE(x)	((x) << 2)
+#define EXTENDED_WR_MODE			3
+#define SDHC_EXTENDED_WR_MODE(x)	((x) << 3)
+#define RDCMD_EN					15
+#define SDHC_RDCMD_EN(x)			((x) << 15)
+#define SDHC_RDDATA_EN(x)			((x) << 16)
+
+/* CMD_DATA_OUTPUT */
+#define SDHC_CDNS_HRS16				0x40
+
+/* This value determines the interval by which DAT line timeouts are detected */
+/* The interval can be computed as below: */
+/* • 1111b - Reserved */
+/* • 1110b - t_sdmclk*2(27+2) */
+/* • 1101b - t_sdmclk*2(26+2) */
+#define READ_CLK					0xa << 16
+#define WRITE_CLK					0xe << 16
+#define DTC_VAL						0xE
+
+/* SRS00 */
+/* System Address / Argument 2 / 32-bit block count
+ * This field is used as:
+ * • 32-bit Block Count register
+ * • SDMA system memory address
+ * • Auto CMD23 Argument
+ */
+#define SAAR						(1)
+
+/* SRS01 */
+/* Transfer Block Size
+ * This field defines block size for block data transfers
+ */
+#define BLOCK_SIZE					0
+
+/* SDMA Buffer Boundary
+ * System address boundary can be set for SDMA engine.
+ */
+#define SDMA_BUF					7 << 12
+
+/* Block Count For Current Transfer
+ * To set the number of data blocks can be defined for next transfer
+ */
+#define BLK_COUNT_CT				16
+
+/* SRS03 */
+#define CMD_START					(U(1) << 31)
+#define CMD_USE_HOLD_REG			(1 << 29)
+#define CMD_UPDATE_CLK_ONLY			(1 << 21)
+#define CMD_SEND_INIT				(1 << 15)
+#define CMD_STOP_ABORT_CMD			(4 << 22)
+#define CMD_RESUME_CMD				(2 << 22)
+#define CMD_SUSPEND_CMD				(1 << 22)
+#define DATA_PRESENT				(1 << 21)
+#define CMD_IDX_CHK_ENABLE			(1 << 20)
+#define CMD_WRITE					(0 << 4)
+#define CMD_READ					(1 << 4)
+#define	MULTI_BLK_READ				(1 << 5)
+#define RESP_ERR					(1 << 7)
+#define CMD_CHECK_RESP_CRC			(1 << 19)
+#define RES_TYPE_SEL_48				(2 << 16)
+#define RES_TYPE_SEL_136			(1 << 16)
+#define RES_TYPE_SEL_48_B			(3 << 16)
+#define RES_TYPE_SEL_NO				(0 << 16)
+#define DMA_ENABLED					(1 << 0)
+#define BLK_CNT_EN					(1 << 1)
+#define AUTO_CMD_EN					(2 << 2)
+#define COM_IDX						24
+#define ERROR_INT					(1 << 15)
+#define INT_SBE						(1 << 13)
+#define INT_HLE						(1 << 12)
+#define INT_FRUN					(1 << 11)
+#define INT_DRT						(1 << 9)
+#define INT_RTO						(1 << 8)
+#define INT_DCRC					(1 << 7)
+#define INT_RCRC					(1 << 6)
+#define INT_RXDR					(1 << 5)
+#define INT_TXDR					(1 << 4)
+#define INT_DTO						(1 << 3)
+#define INT_CMD_DONE				(1 << 0)
+#define TRAN_COMP					(1 << 1)
+
+/* SRS09 */
+#define STATUS_DATA_BUSY			BIT(2)
+
+/* SRS10 */
+/* LED Control
+ * State of this bit directly drives led port of the host
+ * in order to control the external LED diode
+ * Default value 0 << 1
+ */
+#define LEDC						BIT(0)
+#define LEDC_OFF					0 << 1
+
+/* Data Transfer Width
+ * Bit used to configure DAT bus width to 1 or 4
+ * Default value 1 << 1
+ */
+#define DT_WIDTH					BIT(1)
+#define DTW_4BIT					1 << 1
+
+/* Extended Data Transfer Width
+ * This bit is to enable/disable 8-bit DAT bus width mode
+ * Default value 1 << 5
+ */
+#define EDTW_8BIT					1 << 5
+
+/* High Speed Enable
+ * Selects operating mode to Default Speed (HSE=0) or High Speed (HSE=1)
+ */
+#define HS_EN						BIT(2)
+
+/* here 0 defines the 64 Kb size */
+#define MAX_64KB_PAGE				0
+#define EMMC_DESC_SIZE		(1<<20)
+
+/* SRS11 */
+/* Software Reset For All
+ * When set to 1, the entire slot is reset
+ * After completing the reset operation, SRFA bit is automatically cleared
+ */
+#define SRFA						BIT(24)
+
+/* Software Reset For CMD Line
+ * When set to 1, resets the logic related to the command generation and response checking
+ */
+#define SRCMD						BIT(25)
+
+/* Software Reset For DAT Line
+ * When set to 1, resets the logic related to the data path,
+ * including data buffers and the DMA logic
+ */
+#define SRDAT						BIT(26)
+
+/* SRS15 */
+/* UHS Mode Select
+ * Used to select one of UHS-I modes.
+ * • 000b - SDR12
+ * • 001b - SDR25
+ * • 010b - SDR50
+ * • 011b - SDR104
+ * • 100b - DDR50
+ */
+#define SDR12_MODE					0 << 16
+#define SDR25_MODE					1 << 16
+#define SDR50_MODE					2 << 16
+#define SDR104_MODE					3 << 16
+#define DDR50_MODE					4 << 16
+/* 1.8V Signaling Enable
+ * • 0 - for Default Speed, High Speed mode
+ * • 1 - for UHS-I mode
+ */
+#define V18SE						BIT(19)
+
+/* CMD23 Enable
+ * In result of Card Identification process,
+ * Host Driver set this bit to 1 if Card supports CMD23
+ */
+#define CMD23_EN					BIT(27)
+
+/* Host Version 4.00 Enable
+ * • 0 - Version 3.00
+ * • 1 - Version 4.00
+ */
+#define HV4E						BIT(28)
+/* Conf depends on SRS15.HV4E */
+#define SDMA						0 << 3
+#define ADMA2_32					2 << 3
+#define ADMA2_64					3 << 3
+
+/* Preset Value Enable
+ * Setting this bit to 1 triggers an automatically update of SRS11
+ */
+#define PVE							BIT(31)
+
+#define BIT_AD_32					0 << 29
+#define BIT_AD_64					1 << 29
+
+/* SW RESET REG*/
+#define SDHC_CDNS_HRS00				(0x00)
+#define SDHC_CDNS_HRS00_SWR			BIT(0)
+
+/* PHY access port */
+#define SDHC_CDNS_HRS04				0x10
+#define SDHC_CDNS_HRS04_ADDR		GENMASK(5, 0)
+
+/* PHY data access port */
+#define SDHC_CDNS_HRS05				0x14
+
+/* eMMC control registers */
+#define SDHC_CDNS_HRS06				0x18
+
+/* SRS */
+#define SDHC_CDNS_SRS_BASE			0x200
+#define SDHC_CDNS_SRS00				0x200
+#define SDHC_CDNS_SRS01				0x204
+#define SDHC_CDNS_SRS02				0x208
+#define SDHC_CDNS_SRS03				0x20c
+#define SDHC_CDNS_SRS04				0x210
+#define SDHC_CDNS_SRS05				0x214
+#define SDHC_CDNS_SRS06				0x218
+#define SDHC_CDNS_SRS07				0x21C
+#define SDHC_CDNS_SRS08				0x220
+#define SDHC_CDNS_SRS09				0x224
+#define SDHC_CDNS_SRS09_CI			BIT(16)
+#define SDHC_CDNS_SRS10				0x228
+#define SDHC_CDNS_SRS11				0x22C
+#define SDHC_CDNS_SRS12				0x230
+#define SDHC_CDNS_SRS13				0x234
+#define SDHC_CDNS_SRS14				0x238
+#define SDHC_CDNS_SRS15				0x23c
+#define SDHC_CDNS_SRS21				0x254
+#define SDHC_CDNS_SRS22				0x258
+#define SDHC_CDNS_SRS23				0x25c
+
+/* HRS07 */
+#define SDHC_CDNS_HRS07				0x1c
+#define SDHC_IDELAY_VAL(x)			((x) << 0)
+#define SDHC_RW_COMPENSATE(x)		((x) << 16)
+
+/* PHY reset port */
+#define SDHC_CDNS_HRS09				0x24
+
+/* HRS10 */
+/* PHY reset port */
+#define SDHC_CDNS_HRS10				0x28
+
+/* HCSDCLKADJ DATA; DDR Mode */
+#define SDHC_HCSDCLKADJ(x)			((x) << 16)
+
+/* Pinmux headers will reomove after ATF driver implementation */
+#define PINMUX_SDMMC_SEL			0x0
+#define PIN0SEL						0x00
+#define PIN1SEL						0x04
+#define PIN2SEL						0x08
+#define PIN3SEL						0x0C
+#define PIN4SEL						0x10
+#define PIN5SEL						0x14
+#define PIN6SEL						0x18
+#define PIN7SEL						0x1C
+#define PIN8SEL						0x20
+#define PIN9SEL						0x24
+#define PIN10SEL					0x28
+
+/* HRS16 */
+#define SDHC_WRCMD0_DLY(x)			((x) << 0)
+#define SDHC_WRCMD1_DLY(x)			((x) << 4)
+#define SDHC_WRDATA0_DLY(x)			((x) << 8)
+#define SDHC_WRDATA1_DLY(x)			((x) << 12)
+#define SDHC_WRCMD0_SDCLK_DLY(x)	((x) << 16)
+#define SDHC_WRCMD1_SDCLK_DLY(x)	((x) << 20)
+#define SDHC_WRDATA0_SDCLK_DLY(x)	((x) << 24)
+#define SDHC_WRDATA1_SDCLK_DLY(x)	((x) << 28)
+
+/* Shared Macros */
+#define SDMMC_CDN(_reg)				(SDMMC_CDN_REG_BASE + \
+								(SDMMC_CDN_##_reg))
+
+/* Refer to atf/tools/cert_create/include/debug.h */
+#define BIT_32(nr)					(U(1) << (nr))
+
+/* MMC Peripheral Definition */
+#define SOCFPGA_MMC_BLOCK_SIZE		U(8192)
+#define SOCFPGA_MMC_BLOCK_MASK		(SOCFPGA_MMC_BLOCK_SIZE - U(1))
+#define SOCFPGA_MMC_BOOT_CLK_RATE	(400 * 1000)
+#define MMC_RESPONSE_NONE			0
+#define SDHC_CDNS_SRS03_VALUE		0x01020013
+
+/* Value randomly chosen for eMMC RCA, it should be > 1 */
+#define MMC_FIX_RCA					6
+#define RCA_SHIFT_OFFSET			16
+
+#define CMD_EXTCSD_PARTITION_CONFIG	179
+#define CMD_EXTCSD_BUS_WIDTH		183
+#define CMD_EXTCSD_HS_TIMING		185
+#define CMD_EXTCSD_SEC_CNT			212
+
+#define PART_CFG_BOOT_PARTITION1_ENABLE	(U(1) << 3)
+#define PART_CFG_PARTITION1_ACCESS	(U(1) << 0)
+
+/* Values in EXT CSD register */
+#define MMC_BUS_WIDTH_1				U(0)
+#define MMC_BUS_WIDTH_4				U(1)
+#define MMC_BUS_WIDTH_8				U(2)
+#define MMC_BUS_WIDTH_DDR_4			U(5)
+#define MMC_BUS_WIDTH_DDR_8			U(6)
+#define MMC_BOOT_MODE_BACKWARD		(U(0) << 3)
+#define MMC_BOOT_MODE_HS_TIMING		(U(1) << 3)
+#define MMC_BOOT_MODE_DDR			(U(2) << 3)
+
+#define EXTCSD_SET_CMD				(U(0) << 24)
+#define EXTCSD_SET_BITS				(U(1) << 24)
+#define EXTCSD_CLR_BITS				(U(2) << 24)
+#define EXTCSD_WRITE_BYTES			(U(3) << 24)
+#define EXTCSD_CMD(x)				(((x) & 0xff) << 16)
+#define EXTCSD_VALUE(x)				(((x) & 0xff) << 8)
+#define EXTCSD_CMD_SET_NORMAL		U(1)
+
+#define CSD_TRAN_SPEED_UNIT_MASK	GENMASK(2, 0)
+#define CSD_TRAN_SPEED_MULT_MASK	GENMASK(6, 3)
+#define CSD_TRAN_SPEED_MULT_SHIFT	3
+
+#define STATUS_CURRENT_STATE(x)		(((x) & 0xf) << 9)
+#define STATUS_READY_FOR_DATA		BIT(8)
+#define STATUS_SWITCH_ERROR			BIT(7)
+#define MMC_GET_STATE(x)			(((x) >> 9) & 0xf)
+#define MMC_STATE_IDLE				0
+#define MMC_STATE_READY				1
+#define MMC_STATE_IDENT				2
+#define MMC_STATE_STBY				3
+#define MMC_STATE_TRAN				4
+#define MMC_STATE_DATA				5
+#define MMC_STATE_RCV				6
+#define MMC_STATE_PRG				7
+#define MMC_STATE_DIS				8
+#define MMC_STATE_BTST				9
+#define MMC_STATE_SLP				10
+
+#define MMC_FLAG_CMD23				(U(1) << 0)
+
+#define CMD8_CHECK_PATTERN			U(0xAA)
+#define VHS_2_7_3_6_V				BIT(8)
+
+/*ADMA table component*/
+#define ADMA_DESC_ATTR_VALID		BIT(0)
+#define ADMA_DESC_ATTR_END			BIT(1)
+#define ADMA_DESC_ATTR_INT			BIT(2)
+#define ADMA_DESC_ATTR_ACT1			BIT(4)
+#define ADMA_DESC_ATTR_ACT2			BIT(5)
+#define ADMA_DESC_TRANSFER_DATA		ADMA_DESC_ATTR_ACT2
+
+enum sd_opcode {
+	SD_GO_IDLE_STATE = 0,
+	SD_ALL_SEND_CID = 2,
+	SD_SEND_RELATIVE_ADDR = 3,
+	SDIO_SEND_OP_COND = 5, /* SDIO cards only */
+	SD_SWITCH = 6,
+	SD_SELECT_CARD = 7,
+	SD_SEND_IF_COND = 8,
+	SD_SEND_CSD = 9,
+	SD_SEND_CID = 10,
+	SD_VOL_SWITCH = 11,
+	SD_STOP_TRANSMISSION = 12,
+	SD_SEND_STATUS = 13,
+	SD_GO_INACTIVE_STATE = 15,
+	SD_SET_BLOCK_SIZE = 16,
+	SD_READ_SINGLE_BLOCK = 17,
+	SD_READ_MULTIPLE_BLOCK = 18,
+	SD_SEND_TUNING_BLOCK = 19,
+	SD_SET_BLOCK_COUNT = 23,
+	SD_WRITE_SINGLE_BLOCK = 24,
+	SD_WRITE_MULTIPLE_BLOCK = 25,
+	SD_ERASE_BLOCK_START = 32,
+	SD_ERASE_BLOCK_END = 33,
+	SD_ERASE_BLOCK_OPERATION = 38,
+	SD_APP_CMD = 55,
+	SD_SPI_READ_OCR = 58, /* SPI mode only */
+	SD_SPI_CRC_ON_OFF = 59, /* SPI mode only */
+};
+
+enum sd_app_cmd {
+	SD_APP_SET_BUS_WIDTH = 6,
+	SD_APP_SEND_STATUS = 13,
+	SD_APP_SEND_NUM_WRITTEN_BLK = 22,
+	SD_APP_SET_WRITE_BLK_ERASE_CNT = 23,
+	SD_APP_SEND_OP_COND = 41,
+	SD_APP_CLEAR_CARD_DETECT = 42,
+	SD_APP_SEND_SCR = 51,
+};
+
+struct cdns_sdmmc_sdhc {
+	uint32_t	sdhc_extended_rd_mode;
+	uint32_t	sdhc_extended_wr_mode;
+	uint32_t	sdhc_hcsdclkadj;
+	uint32_t	sdhc_idelay_val;
+	uint32_t	sdhc_rdcmd_en;
+	uint32_t	sdhc_rddata_en;
+	uint32_t	sdhc_rw_compensate;
+	uint32_t	sdhc_sdcfsh;
+	uint32_t	sdhc_sdcfsl;
+	uint32_t	sdhc_wrcmd0_dly;
+	uint32_t	sdhc_wrcmd0_sdclk_dly;
+	uint32_t	sdhc_wrcmd1_dly;
+	uint32_t	sdhc_wrcmd1_sdclk_dly;
+	uint32_t	sdhc_wrdata0_dly;
+	uint32_t	sdhc_wrdata0_sdclk_dly;
+	uint32_t	sdhc_wrdata1_dly;
+	uint32_t	sdhc_wrdata1_sdclk_dly;
+};
+
+enum sdmmc_device_mode {
+	SD_DS_ID, /* Identification */
+	SD_DS, /* Default speed */
+	SD_HS, /* High speed */
+	SD_UHS_SDR12, /* Ultra high speed SDR12 */
+	SD_UHS_SDR25, /* Ultra high speed SDR25 */
+	SD_UHS_SDR50, /* Ultra high speed SDR`50 */
+	SD_UHS_SDR104, /* Ultra high speed SDR104 */
+	SD_UHS_DDR50, /* Ultra high speed DDR50 */
+	EMMC_SDR_BC, /* SDR backward compatible */
+	EMMC_SDR, /* SDR */
+	EMMC_DDR, /* DDR */
+	EMMC_HS200, /* High speed 200Mhz in SDR */
+	EMMC_HS400, /* High speed 200Mhz in DDR */
+	EMMC_HS400es, /* High speed 200Mhz in SDR with enhanced strobe*/
+};
+
+struct cdns_sdmmc_params {
+	uintptr_t	reg_base;
+	uintptr_t	reg_pinmux;
+	uintptr_t	reg_phy;
+	uintptr_t	desc_base;
+	size_t		desc_size;
+	int		clk_rate;
+	int		bus_width;
+	unsigned int	flags;
+	enum sdmmc_device_mode	cdn_sdmmc_dev_mode;
+	enum mmc_device_type	cdn_sdmmc_dev_type;
+	uint32_t	combophy;
+};
+
+/* read and write API */
+size_t sdmmc_read_blocks(int lba, uintptr_t buf, size_t size);
+size_t sdmmc_write_blocks(int lba, const uintptr_t buf, size_t size);
+
+struct cdns_idmac_desc {
+	/*8 bit attribute*/
+	uint8_t attr;
+	/*reserved bits in desc*/
+	uint8_t reserved;
+	/*page length for the descriptor*/
+	uint16_t len;
+	/*lower 32 bits for buffer (64 bit addressing)*/
+	uint32_t addr_lo;
+#if CONFIG_DMA_ADDR_T_64BIT == 1
+	/*higher 32 bits for buffer (64 bit addressing)*/
+	uint32_t addr_hi;
+} __aligned(8);
+#else
+} __packed;
+#endif
+
+
+
+/* Function Prototype */
+int cdns_sd_host_init(struct cdns_sdmmc_combo_phy *mmc_combo_phy_reg,
+struct cdns_sdmmc_sdhc *mmc_sdhc_reg);
+void cdns_set_sdmmc_var(struct cdns_sdmmc_combo_phy *combo_phy_reg,
+struct cdns_sdmmc_sdhc *sdhc_reg);
+#endif
diff --git a/include/drivers/nxp/trdc/imx_trdc.h b/include/drivers/nxp/trdc/imx_trdc.h
new file mode 100644
index 0000000..0b41fcf
--- /dev/null
+++ b/include/drivers/nxp/trdc/imx_trdc.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2022-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef IMX_TRDC_H
+#define IMX_XRDC_H
+
+#define MBC_BLK_ALL	U(255)
+#define MRC_REG_ALL	U(16)
+#define GLBAC_NUM	U(8)
+
+#define DID_NUM		U(16)
+#define MBC_MAX_NUM	U(4)
+#define MRC_MAX_NUM	U(2)
+#define MBC_NUM(HWCFG)	(((HWCFG) >> 16) & 0xF)
+#define MRC_NUM(HWCFG)	(((HWCFG) >> 24) & 0x1F)
+
+#define MBC_BLK_NUM(GLBCFG)	((GLBCFG) & 0x3FF)
+#define MRC_RGN_NUM(GLBCFG)	((GLBCFG) & 0x1F)
+
+#define MDAC_W_X(m, r)	(0x800 + (m) * 0x20 + (r) * 0x4)
+
+/* CPU/non-CPU domain common bits */
+#define MDA_VLD		BIT(31)
+#define MDA_LK1		BIT(30)
+#define MDA_DFMT	BIT(29)
+
+/* CPU domain bits */
+#define MDA_DFMT0_DID(x)	((x) & 0xF)
+#define MDA_DFMT0_DIDS(x)	(((x) & 0x3) << 4)
+#define MDA_DFMT0_PE(x)		(((x) & 0x3) << 6)
+#define MDA_DFMT0_PIDM(x)	(((x) & 0x3F) << 8)
+#define MDA_DFMT0_SA(x)		(((x) & 0x3) << 14)
+#define MDA_DFMT0_PID(x)	(((x) & 0x3F) << 16)
+
+/* non-CPU domain bits */
+#define MDA_DFMT1_DID(x)	((x) & 0xF)
+#define MDA_DFMT1_PA(x)		(((x) & 0x3) << 4)
+#define MDA_DFMT1_SA(x)		(((x) & 0x3) << 6)
+#define MDA_DFMT1_DIDB(x)	((x) << 8)
+
+#define SP(X)	((X) << 12)
+#define SU(X)	((X) << 8)
+#define NP(X)	((X) << 4)
+#define NU(X)	((X) << 0)
+
+#define RWX	U(7)
+#define RW	U(6)
+#define RX	U(5)
+#define R	U(4)
+#define X	U(1)
+
+struct mbc_mem_dom {
+	uint32_t mem_glbcfg[4];
+	uint32_t nse_blk_index;
+	uint32_t nse_blk_set;
+	uint32_t nse_blk_clr;
+	uint32_t nsr_blk_clr_all;
+	uint32_t memn_glbac[8];
+	/* The upper only existed in the beginning of each MBC */
+	uint32_t mem0_blk_cfg_w[64];
+	uint32_t mem0_blk_nse_w[16];
+	uint32_t mem1_blk_cfg_w[8];
+	uint32_t mem1_blk_nse_w[2];
+	uint32_t mem2_blk_cfg_w[8];
+	uint32_t mem2_blk_nse_w[2];
+	uint32_t mem3_blk_cfg_w[8];
+	uint32_t mem3_blk_nse_w[2]; /*0x1F0, 0x1F4 */
+	uint32_t reserved[2];
+};
+
+struct mrc_rgn_dom {
+	uint32_t mrc_glbcfg[4];
+	uint32_t nse_rgn_indirect;
+	uint32_t nse_rgn_set;
+	uint32_t nse_rgn_clr;
+	uint32_t nse_rgn_clr_all;
+	uint32_t memn_glbac[8];
+	/* The upper only existed in the beginning of each MRC */
+	uint32_t rgn_desc_words[16][2]; /* 16 regions at max, 2 words per region */
+	uint32_t rgn_nse;
+	uint32_t reserved2[15];
+};
+
+struct mda_inst {
+	uint32_t mda_w[8];
+};
+
+struct trdc_mgr {
+	uint32_t trdc_cr;
+	uint32_t res0[59];
+	uint32_t trdc_hwcfg0;
+	uint32_t trdc_hwcfg1;
+	uint32_t res1[450];
+	struct mda_inst mda[128];
+};
+
+struct trdc_mbc {
+	struct mbc_mem_dom mem_dom[DID_NUM];
+};
+
+struct trdc_mrc {
+	struct mrc_rgn_dom mrc_dom[DID_NUM];
+};
+
+/***************************************************************
+ * Below structs used fro provding the TRDC configuration info
+ * that will be used to init the TRDC based on use case.
+ ***************************************************************/
+struct trdc_glbac_config {
+	uint8_t mbc_mrc_id;
+	uint8_t glbac_id;
+	uint32_t glbac_val;
+};
+
+struct trdc_mbc_config {
+	uint8_t mbc_id;
+	uint8_t dom_id;
+	uint8_t mem_id;
+	uint8_t blk_id;
+	uint8_t glbac_id;
+	bool secure;
+};
+
+struct trdc_mrc_config {
+	uint8_t mrc_id;
+	uint8_t dom_id;
+	uint8_t region_id;
+	uint32_t region_start;
+	uint32_t region_size;
+	uint8_t glbac_id;
+	bool secure;
+};
+
+struct trdc_mgr_info {
+	uintptr_t trdc_base;
+	uint8_t mbc_id;
+	uint8_t mbc_mem_id;
+	uint8_t blk_mgr;
+	uint8_t blk_mc;
+};
+
+struct trdc_config_info {
+	uintptr_t trdc_base;
+	struct trdc_glbac_config *mbc_glbac;
+	uint32_t num_mbc_glbac;
+	struct trdc_mbc_config *mbc_cfg;
+	uint32_t num_mbc_cfg;
+	struct trdc_glbac_config *mrc_glbac;
+	uint32_t num_mrc_glbac;
+	struct trdc_mrc_config *mrc_cfg;
+	uint32_t num_mrc_cfg;
+};
+
+extern struct trdc_mgr_info trdc_mgr_blks[];
+extern unsigned int trdc_mgr_num;
+/* APIs to apply and enable TRDC */
+int trdc_mda_set_cpu(uintptr_t trdc_base, uint32_t mda_inst,
+		     uint32_t mda_reg, uint8_t sa, uint8_t dids,
+		     uint8_t did, uint8_t pe, uint8_t pidm, uint8_t pid);
+
+int trdc_mda_set_noncpu(uintptr_t trdc_base, uint32_t mda_inst,
+			bool did_bypass, uint8_t sa, uint8_t pa,
+			uint8_t did);
+
+void trdc_mgr_mbc_setup(struct trdc_mgr_info *mgr);
+void trdc_setup(struct trdc_config_info *cfg);
+void trdc_config(void);
+
+#endif /* IMX_TRDC_H */
diff --git a/make_helpers/march.mk b/make_helpers/march.mk
new file mode 100644
index 0000000..2417709
--- /dev/null
+++ b/make_helpers/march.mk
@@ -0,0 +1,85 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# This build helper makefile is used to determine a suitable march build
+# option to be used, based on platform provided ARM_ARCH_MAJOR/MINOR
+# data and compiler supported march version.
+
+# Set the compiler's target architecture profile based on
+# ARM_ARCH_MAJOR and ARM_ARCH_MINOR options.
+
+
+# This is used to collect available march values from compiler.
+# Example on a gcc-12.2 arm64 toolchain it will look like:
+# [...]
+#./aarch64-none-elf-gcc --target-help -march=foo
+# cc1: error: unknown value 'foo' for '-march'
+# cc1: note: valid arguments are: armv8-a armv8.1-a armv8.2-a armv8.3-a armv8.4-a armv8.5-a
+# armv8.6-a armv8.7-a armv8.8-a armv8-r armv9-a
+# [...]
+#
+GCC_MARCH_OUTPUT := $(shell $(CC) -march=foo -Q --help=target -v 2>&1)
+
+# This function is used to find the best march value supported by the given compiler.
+# We try to use `GCC_MARCH_OUTPUT` which has verbose message with supported march values we filter that
+# to find armvX.Y-a or armvX-a values, then filter the best supported arch based on ARM_ARCH_MAJOR.
+#
+# Example on a gcc-12.2 arm64 toolchain this will return armv9-a if platform requested for armv9.2-a
+# Similarly on gcc 11.3 it would return armv8.6-a if platform requested armv8.8-a
+define major_best_avail_march
+$(1) := $(lastword $(filter armv$(ARM_ARCH_MAJOR)% ,$(filter armv%-a, $(GCC_MARCH_OUTPUT))))
+endef
+
+# This function is used to just return latest march value supported by the given compiler.
+#
+# Example: this would return armv8.6-a on a gcc-11.3 when platform requested for armv9.0-a
+#
+# Thus this function should be used in conjunction with major_best_avail_march, when best match
+# is not found it should be ok to try with lastest known supported march value from the
+# compiler.
+define latest_match_march
+$(1) := $(lastword $(filter armv%-a, $(GCC_MARCH_OUTPUT)))
+endef
+
+ifdef MARCH_DIRECTIVE
+    march-directive		:= $(MARCH_DIRECTIVE)
+else
+
+ifeq (${ARM_ARCH_MINOR},0)
+    provided-march = armv${ARM_ARCH_MAJOR}-a
+else
+    provided-march = armv${ARM_ARCH_MAJOR}.${ARM_ARCH_MINOR}-a
+endif
+
+ifeq ($(findstring clang,$(notdir $(CC))),)
+
+# We expect from Platform to provide a correct Major/Minor value but expecting something
+# from compiler with unsupported march means we shouldn't fail without trying anything,
+# so here we try to find best supported march value and use that for compilation.
+# If we don't support current march version from gcc compiler, try with lower arch based on
+# availability. In TF-A there is no hard rule for need of higher version march for basic
+# functionality, denying a build on only this fact doesn't look correct, so try with best
+# or latest march values from whats available from compiler.
+ifeq (,$(findstring $(provided-march), $(GCC_MARCH_OUTPUT)))
+    $(eval $(call major_best_avail_march, available-march))
+
+ifeq (, $(available-march))
+    $(eval $(call latest_match_march, available-march))
+endif
+
+# If we fail to come up with any available-march value so far, don't update
+# provided-march and thus allow the build to fail using the provided-march
+# which is derived based on arch_major and arch_minor values.
+ifneq (,$(available-march))
+    provided-march := ${available-march}
+endif
+
+endif # provided-march supported
+endif # not clang
+
+march-directive := -march=${provided-march}
+
+endif # MARCH_DIRECTIVE
diff --git a/plat/imx/imx93/aarch64/plat_helpers.S b/plat/imx/imx93/aarch64/plat_helpers.S
new file mode 100644
index 0000000..9987a53
--- /dev/null
+++ b/plat/imx/imx93/aarch64/plat_helpers.S
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2022-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <cortex_a55.h>
+
+#include <platform_def.h>
+
+	.globl	plat_is_my_cpu_primary
+	.globl	plat_my_core_pos
+	.globl	plat_calc_core_pos
+	.globl	platform_mem_init
+
+	/* ------------------------------------------------------
+	 * Helper macro that reads the part number of the current
+	 * CPU and jumps to the given label if it matches the CPU
+	 * MIDR provided.
+	 *
+	 * Clobbers x0.
+	 * ------------------------------------------------------
+	 */
+	.macro  jump_if_cpu_midr _cpu_midr, _label
+
+	mrs	x0, midr_el1
+	ubfx	x0, x0, MIDR_PN_SHIFT, #12
+	cmp     w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
+	b.eq	\_label
+
+	.endm
+
+	/* ----------------------------------------------
+	 * unsigned int plat_is_my_cpu_primary(void);
+	 * This function checks if this is the primary CPU
+	 * ----------------------------------------------
+	 */
+func plat_is_my_cpu_primary
+	mrs	x0, mpidr_el1
+	mov_imm x1, MPIDR_AFFINITY_MASK
+	and	x0, x0, x1
+	cmp	x0, #PLAT_PRIMARY_CPU
+	cset	x0, eq
+	ret
+endfunc plat_is_my_cpu_primary
+
+	/* ----------------------------------------------
+	 * unsigned int plat_my_core_pos(void)
+	 * This function uses the plat_calc_core_pos()
+	 * to get the index of the calling CPU.
+	 * ----------------------------------------------
+	 */
+func plat_my_core_pos
+	mrs	x0, mpidr_el1
+	mov	x1, #MPIDR_AFFLVL_MASK
+	and	x0, x1, x0, lsr #MPIDR_AFF1_SHIFT
+	ret
+endfunc plat_my_core_pos
+
+	/*
+	 * unsigned int plat_calc_core_pos(uint64_t mpidr)
+	 * helper function to calculate the core position.
+	 * With this function.
+	 */
+func plat_calc_core_pos
+	mov	x1, #MPIDR_AFFLVL_MASK
+	and	x0, x1, x0, lsr #MPIDR_AFF1_SHIFT
+	ret
+endfunc plat_calc_core_pos
+
+func platform_mem_init
+	ret
+endfunc platform_mem_init
diff --git a/plat/imx/imx93/imx93_bl31_setup.c b/plat/imx/imx93/imx93_bl31_setup.c
new file mode 100644
index 0000000..8458f6c
--- /dev/null
+++ b/plat/imx/imx93/imx93_bl31_setup.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2022-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <context.h>
+#include <drivers/console.h>
+#include <drivers/generic_delay_timer.h>
+#include <drivers/nxp/trdc/imx_trdc.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/mmio.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/platform.h>
+
+#include <imx8_lpuart.h>
+#include <plat_imx8.h>
+#include <platform_def.h>
+
+#define MAP_BL31_TOTAL										   \
+	MAP_REGION_FLAT(BL31_BASE, BL31_LIMIT - BL31_BASE, MT_MEMORY | MT_RW | MT_SECURE)
+#define MAP_BL31_RO										   \
+	MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE, MT_MEMORY | MT_RO | MT_SECURE)
+
+static const mmap_region_t imx_mmap[] = {
+	AIPS1_MAP, AIPS2_MAP, AIPS4_MAP, GIC_MAP,
+	TRDC_A_MAP, TRDC_W_MAP, TRDC_M_MAP,
+	TRDC_N_MAP,
+	{0},
+};
+
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+/* get SPSR for BL33 entry */
+static uint32_t get_spsr_for_bl33_entry(void)
+{
+	unsigned long el_status;
+	unsigned long mode;
+	uint32_t spsr;
+
+	/* figure out what mode we enter the non-secure world */
+	el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+	el_status &= ID_AA64PFR0_ELX_MASK;
+
+	mode = (el_status) ? MODE_EL2 : MODE_EL1;
+
+	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+	return spsr;
+}
+
+void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
+		u_register_t arg2, u_register_t arg3)
+{
+	static console_t console;
+
+	console_lpuart_register(IMX_LPUART_BASE, IMX_BOOT_UART_CLK_IN_HZ,
+		     IMX_CONSOLE_BAUDRATE, &console);
+
+	/* This console is only used for boot stage */
+	console_set_scope(&console, CONSOLE_FLAG_BOOT);
+
+	/*
+	 * tell BL3-1 where the non-secure software image is located
+	 * and the entry state information.
+	 */
+	bl33_image_ep_info.pc = PLAT_NS_IMAGE_OFFSET;
+	bl33_image_ep_info.spsr = get_spsr_for_bl33_entry();
+	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+
+#if defined(SPD_opteed)
+	/* Populate entry point information for BL32 */
+	SET_PARAM_HEAD(&bl32_image_ep_info, PARAM_EP, VERSION_1, 0);
+	SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
+	bl32_image_ep_info.pc = BL32_BASE;
+	bl32_image_ep_info.spsr = 0;
+
+	/* Pass TEE base and size to bl33 */
+	bl33_image_ep_info.args.arg1 = BL32_BASE;
+	bl33_image_ep_info.args.arg2 = BL32_SIZE;
+
+	/* Make sure memory is clean */
+	mmio_write_32(BL32_FDT_OVERLAY_ADDR, 0);
+	bl33_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR;
+	bl32_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR;
+#endif
+}
+
+void bl31_plat_arch_setup(void)
+{
+	/* no coherence memory support on i.MX9 */
+	const mmap_region_t bl_regions[] = {
+		MAP_BL31_TOTAL,
+		MAP_BL31_RO,
+	};
+
+	/* Assign all the GPIO pins to non-secure world by default */
+	mmio_write_32(GPIO2_BASE + 0x10, 0xffffffff);
+	mmio_write_32(GPIO2_BASE + 0x14, 0x3);
+	mmio_write_32(GPIO2_BASE + 0x18, 0xffffffff);
+	mmio_write_32(GPIO2_BASE + 0x1c, 0x3);
+
+	mmio_write_32(GPIO3_BASE + 0x10, 0xffffffff);
+	mmio_write_32(GPIO3_BASE + 0x14, 0x3);
+	mmio_write_32(GPIO3_BASE + 0x18, 0xffffffff);
+	mmio_write_32(GPIO3_BASE + 0x1c, 0x3);
+
+	mmio_write_32(GPIO4_BASE + 0x10, 0xffffffff);
+	mmio_write_32(GPIO4_BASE + 0x14, 0x3);
+	mmio_write_32(GPIO4_BASE + 0x18, 0xffffffff);
+	mmio_write_32(GPIO4_BASE + 0x1c, 0x3);
+
+	mmio_write_32(GPIO1_BASE + 0x10, 0xffffffff);
+	mmio_write_32(GPIO1_BASE + 0x14, 0x3);
+	mmio_write_32(GPIO1_BASE + 0x18, 0xffffffff);
+	mmio_write_32(GPIO1_BASE + 0x1c, 0x3);
+
+	setup_page_tables(bl_regions, imx_mmap);
+	enable_mmu_el3(0);
+
+	/* trdc must be initialized */
+	trdc_config();
+}
+
+void bl31_platform_setup(void)
+{
+	generic_delay_timer_init();
+
+	plat_gic_driver_init();
+	plat_gic_init();
+}
+
+void bl31_plat_runtime_setup(void)
+{
+	console_switch_state(CONSOLE_FLAG_RUNTIME);
+}
+
+entry_point_info_t *bl31_plat_get_next_image_ep_info(unsigned int type)
+{
+	if (type == NON_SECURE) {
+		return &bl33_image_ep_info;
+	}
+
+	if (type == SECURE) {
+		return &bl32_image_ep_info;
+	}
+
+	return NULL;
+}
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+	return COUNTER_FREQUENCY;
+}
diff --git a/plat/imx/imx93/imx93_psci.c b/plat/imx/imx93/imx93_psci.c
new file mode 100644
index 0000000..5e1fa95
--- /dev/null
+++ b/plat/imx/imx93/imx93_psci.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2022-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdbool.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <lib/psci/psci.h>
+
+#include <plat_imx8.h>
+#include <pwr_ctrl.h>
+
+#define CORE_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL0])
+#define CLUSTER_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL1])
+#define SYSTEM_PWR_STATE(state) ((state)->pwr_domain_state[PLAT_MAX_PWR_LVL])
+
+/* platform secure warm boot entry */
+static uintptr_t secure_entrypoint;
+
+static bool boot_stage = true;
+
+int imx_validate_ns_entrypoint(uintptr_t ns_entrypoint)
+{
+	/* The non-secure entrypoint should be in RAM space */
+	if (ns_entrypoint < PLAT_NS_IMAGE_OFFSET) {
+		return PSCI_E_INVALID_PARAMS;
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+int imx_validate_power_state(unsigned int power_state,
+			 psci_power_state_t *req_state)
+{
+	int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
+	int pwr_type = psci_get_pstate_type(power_state);
+	int state_id = psci_get_pstate_id(power_state);
+
+	if (pwr_lvl > PLAT_MAX_PWR_LVL) {
+		return PSCI_E_INVALID_PARAMS;
+	}
+
+	if (pwr_type == PSTATE_TYPE_STANDBY) {
+		CORE_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
+		CLUSTER_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
+	}
+
+	if (pwr_type == PSTATE_TYPE_POWERDOWN && state_id == 0x33) {
+		CORE_PWR_STATE(req_state) = PLAT_MAX_OFF_STATE;
+		CLUSTER_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+void imx_set_cpu_boot_entry(unsigned int core_id, uint64_t boot_entry)
+{
+	/* set the cpu core reset entry: BLK_CTRL_S */
+	mmio_write_32(BLK_CTRL_S_BASE + CA55_RVBADDR0_L + core_id * 8, boot_entry >> 2);
+}
+
+int imx_pwr_domain_on(u_register_t mpidr)
+{
+	unsigned int core_id;
+
+	core_id = MPIDR_AFFLVL1_VAL(mpidr);
+
+	imx_set_cpu_boot_entry(core_id, secure_entrypoint);
+
+	/*
+	 * When the core is first time boot up, the core is already ON after SoC POR,
+	 * So 'SW_WAKEUP' can not work, so need to toggle core's reset then release
+	 * the core from cpu_wait.
+	 */
+	if (boot_stage) {
+		/* assert CPU core SW reset */
+		mmio_clrbits_32(SRC_SLICE(SRC_A55C0 + core_id) + 0x24, BIT(2) | BIT(0));
+		/* deassert CPU core SW reset */
+		mmio_setbits_32(SRC_SLICE(SRC_A55C0 + core_id) + 0x24, BIT(2) | BIT(0));
+		/* release the cpuwait to kick the cpu */
+		mmio_clrbits_32(BLK_CTRL_S_BASE + CA55_CPUWAIT, BIT(core_id));
+	} else {
+		/* assert the CMC MISC SW WAKEUP BIT to kick the offline core */
+		gpc_assert_sw_wakeup(CPU_A55C0 + core_id);
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+void imx_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	uint64_t mpidr = read_mpidr_el1();
+	unsigned int core_id = MPIDR_AFFLVL1_VAL(mpidr);
+
+	plat_gic_pcpu_init();
+	plat_gic_cpuif_enable();
+
+	/* below config is ok both for boot & hotplug */
+	/* clear the CPU power mode */
+	gpc_set_cpu_mode(CPU_A55C0 + core_id, CM_MODE_RUN);
+	/* clear the SW wakeup */
+	gpc_deassert_sw_wakeup(CPU_A55C0 + core_id);
+	/* switch to GIC wakeup source */
+	gpc_select_wakeup_gic(CPU_A55C0 + core_id);
+
+	if (boot_stage) {
+		/* SRC config */
+		/* config the MEM LPM */
+		src_mem_lpm_en(SRC_A55P0_MEM + core_id, MEM_OFF);
+		/* LPM config to only ON in run mode to its domain */
+		src_mix_set_lpm(SRC_A55C0 + core_id, core_id, CM_MODE_WAIT);
+		/* white list config, only enable its own domain */
+		src_authen_config(SRC_A55C0 + core_id, 1 << core_id, 0x1);
+
+		boot_stage = false;
+	}
+}
+
+void imx_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	uint64_t mpidr = read_mpidr_el1();
+	unsigned int core_id = MPIDR_AFFLVL1_VAL(mpidr);
+	unsigned int i;
+
+	plat_gic_cpuif_disable();
+	write_clusterpwrdn(DSU_CLUSTER_PWR_OFF);
+
+	/*
+	 * mask all the GPC IRQ wakeup to make sure no IRQ can wakeup this core
+	 * as we need to use SW_WAKEUP for hotplug purpose
+	 */
+	for (i = 0U; i < IMR_NUM; i++) {
+		gpc_set_irq_mask(CPU_A55C0 + core_id, i, 0xffffffff);
+	}
+	/* switch to GPC wakeup source */
+	gpc_select_wakeup_raw_irq(CPU_A55C0 + core_id);
+	/* config the target mode to suspend */
+	gpc_set_cpu_mode(CPU_A55C0 + core_id, CM_MODE_SUSPEND);
+}
+
+void imx_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+	uint64_t mpidr = read_mpidr_el1();
+	unsigned int core_id = MPIDR_AFFLVL1_VAL(mpidr);
+
+	/* do cpu level config */
+	if (is_local_state_off(CORE_PWR_STATE(target_state))) {
+		plat_gic_cpuif_disable();
+		imx_set_cpu_boot_entry(core_id, secure_entrypoint);
+		/* config the target mode to WAIT */
+		gpc_set_cpu_mode(CPU_A55C0 + core_id, CM_MODE_WAIT);
+	}
+
+	/* do cluster level config */
+	if (!is_local_state_run(CLUSTER_PWR_STATE(target_state))) {
+		/* config the A55 cluster target mode to WAIT */
+		gpc_set_cpu_mode(CPU_A55_PLAT, CM_MODE_WAIT);
+
+		/* config DSU for cluster power down with L3 MEM RET */
+		if (is_local_state_retn(CLUSTER_PWR_STATE(target_state))) {
+			write_clusterpwrdn(DSU_CLUSTER_PWR_OFF | BIT(1));
+		}
+	}
+}
+
+void imx_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+	uint64_t mpidr = read_mpidr_el1();
+	unsigned int core_id = MPIDR_AFFLVL1_VAL(mpidr);
+
+	/* cluster level */
+	if (!is_local_state_run(CLUSTER_PWR_STATE(target_state))) {
+		/* set the cluster's target mode to RUN */
+		gpc_set_cpu_mode(CPU_A55_PLAT, CM_MODE_RUN);
+	}
+
+	/* do core level */
+	if (is_local_state_off(CORE_PWR_STATE(target_state))) {
+		/* set A55 CORE's power mode to RUN */
+		gpc_set_cpu_mode(CPU_A55C0 + core_id, CM_MODE_RUN);
+		plat_gic_cpuif_enable();
+	}
+}
+
+void imx_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+	unsigned int i;
+
+	for (i = IMX_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++) {
+		req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
+	}
+
+	SYSTEM_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
+	CLUSTER_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
+}
+
+void __dead2 imx_system_reset(void)
+{
+	mmio_write_32(WDOG3_BASE + WDOG_CNT, 0xd928c520);
+	while ((mmio_read_32(WDOG3_BASE + WDOG_CS) & WDOG_CS_ULK) == 0U) {
+		;
+	}
+
+	mmio_write_32(WDOG3_BASE + WDOG_TOVAL, 0x10);
+	mmio_write_32(WDOG3_BASE + WDOG_CS, 0x21e3);
+
+	while (1) {
+		wfi();
+	}
+}
+
+void __dead2 imx_system_off(void)
+{
+	mmio_setbits_32(BBNSM_BASE + BBNSM_CTRL, BBNSM_DP_EN | BBNSM_TOSP);
+
+	while (1) {
+		wfi();
+	}
+}
+
+static const plat_psci_ops_t imx_plat_psci_ops = {
+	.validate_ns_entrypoint = imx_validate_ns_entrypoint,
+	.validate_power_state = imx_validate_power_state,
+	.pwr_domain_on = imx_pwr_domain_on,
+	.pwr_domain_off = imx_pwr_domain_off,
+	.pwr_domain_on_finish = imx_pwr_domain_on_finish,
+	.pwr_domain_suspend = imx_pwr_domain_suspend,
+	.pwr_domain_suspend_finish = imx_pwr_domain_suspend_finish,
+	.get_sys_suspend_power_state = imx_get_sys_suspend_power_state,
+	.system_reset = imx_system_reset,
+	.system_off = imx_system_off,
+};
+
+/* export the platform specific psci ops */
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const plat_psci_ops_t **psci_ops)
+{
+	/* sec_entrypoint is used for warm reset */
+	secure_entrypoint = sec_entrypoint;
+	imx_set_cpu_boot_entry(0, sec_entrypoint);
+
+	pwr_sys_init();
+
+	*psci_ops = &imx_plat_psci_ops;
+
+	return 0;
+}
diff --git a/plat/imx/imx93/include/platform_def.h b/plat/imx/imx93/include/platform_def.h
new file mode 100644
index 0000000..7efbf1c
--- /dev/null
+++ b/plat/imx/imx93/include/platform_def.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2022-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef PLATFORM_DEF_H
+#define PLATFORM_DEF_H
+
+#include <lib/utils_def.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#define PLATFORM_LINKER_FORMAT		"elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH		aarch64
+
+#define PLATFORM_STACK_SIZE		0xB00
+#define CACHE_WRITEBACK_GRANULE		64
+
+#define PLAT_PRIMARY_CPU		U(0x0)
+#define PLATFORM_MAX_CPU_PER_CLUSTER	U(2)
+#define PLATFORM_CLUSTER_COUNT		U(1)
+#define PLATFORM_CLUSTER0_CORE_COUNT	U(2)
+#define PLATFORM_CORE_COUNT		U(2)
+
+#define IMX_PWR_LVL0			MPIDR_AFFLVL0
+
+#define PWR_DOMAIN_AT_MAX_LVL		U(1)
+#define PLAT_MAX_PWR_LVL		U(2)
+#define PLAT_MAX_OFF_STATE		U(4)
+#define PLAT_MAX_RET_STATE		U(2)
+
+#define BL31_BASE			U(0x204E0000)
+#define BL31_LIMIT			U(0x20520000)
+
+/* non-secure uboot base */
+/* TODO */
+#define PLAT_NS_IMAGE_OFFSET		U(0x80200000)
+#define BL32_FDT_OVERLAY_ADDR           (PLAT_NS_IMAGE_OFFSET + 0x3000000)
+
+/* GICv4 base address */
+#define PLAT_GICD_BASE			U(0x48000000)
+#define PLAT_GICR_BASE			U(0x48040000)
+
+#define PLAT_VIRT_ADDR_SPACE_SIZE	(ULL(1) << 32)
+#define PLAT_PHY_ADDR_SPACE_SIZE	(ULL(1) << 32)
+
+#define MAX_XLAT_TABLES			8
+#define MAX_MMAP_REGIONS		16
+
+#define IMX_LPUART_BASE			U(0x44380000)
+#define IMX_BOOT_UART_CLK_IN_HZ		U(24000000) /* Select 24MHz oscillator */
+#define IMX_CONSOLE_BAUDRATE		115200
+
+#define AIPSx_SIZE			U(0x800000)
+#define AIPS1_BASE			U(0x44000000)
+#define AIPS2_BASE			U(0x42000000)
+#define AIPS3_BASE			U(0x42800000)
+#define AIPS4_BASE			U(0x49000000)
+#define GPIO1_BASE			U(0x47400000)
+#define GPIO2_BASE			U(0x43810000)
+#define GPIO3_BASE			U(0x43820000)
+#define GPIO4_BASE			U(0x43830000)
+
+#define TRDC_A_BASE			U(0x44270000)
+#define TRDC_W_BASE			U(0x42460000)
+#define TRDC_M_BASE			U(0x42810000)
+#define TRDC_N_BASE			U(0x49010000)
+#define TRDC_x_SISE			U(0x20000)
+
+#define WDOG3_BASE			U(0x42490000)
+#define WDOG_CS				U(0x0)
+#define WDOG_CS_ULK			BIT(11)
+#define WDOG_CNT			U(0x4)
+#define WDOG_TOVAL			U(0x8)
+
+#define BBNSM_BASE			U(0x44440000)
+#define BBNSM_CTRL			U(0x8)
+#define BBNSM_DP_EN			BIT(24)
+#define BBNSM_TOSP			BIT(25)
+
+#define SRC_BASE			U(0x44460000)
+#define GPC_BASE			U(0x44470000)
+#define BLK_CTRL_S_BASE			U(0x444F0000)
+#define S400_MU_BASE			U(0x47520000)
+
+/* system memory map define */
+#define AIPS2_MAP	MAP_REGION_FLAT(AIPS2_BASE, AIPSx_SIZE, MT_DEVICE | MT_RW | MT_NS)
+#define AIPS1_MAP	MAP_REGION_FLAT(AIPS1_BASE, AIPSx_SIZE, MT_DEVICE | MT_RW)
+#define AIPS4_MAP	MAP_REGION_FLAT(AIPS4_BASE, AIPSx_SIZE, MT_DEVICE | MT_RW | MT_NS)
+#define GIC_MAP		MAP_REGION_FLAT(PLAT_GICD_BASE, 0x200000, MT_DEVICE | MT_RW)
+#define TRDC_A_MAP	MAP_REGION_FLAT(TRDC_A_BASE, TRDC_x_SISE, MT_DEVICE | MT_RW)
+#define TRDC_W_MAP	MAP_REGION_FLAT(TRDC_W_BASE, TRDC_x_SISE, MT_DEVICE | MT_RW)
+#define TRDC_M_MAP	MAP_REGION_FLAT(TRDC_M_BASE, TRDC_x_SISE, MT_DEVICE | MT_RW)
+#define TRDC_N_MAP	MAP_REGION_FLAT(TRDC_N_BASE, TRDC_x_SISE, MT_DEVICE | MT_RW)
+
+#define COUNTER_FREQUENCY		24000000
+
+#endif /* platform_def.h */
diff --git a/plat/imx/imx93/include/pwr_ctrl.h b/plat/imx/imx93/include/pwr_ctrl.h
new file mode 100644
index 0000000..9bcf486
--- /dev/null
+++ b/plat/imx/imx93/include/pwr_ctrl.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PWR_CTRL_H
+#define PWR_CTRL_H
+
+#include <stdbool.h>
+
+#include <lib/mmio.h>
+
+#include <platform_def.h>
+
+/*******************************************************************************
+ * GPC definitions & declarations
+ ******************************************************************************/
+/* GPC GLOBAL */
+#define GPC_GLOBAL_BASE		U(GPC_BASE + 0x4000)
+#define GPC_AUTHEN_CTRL		U(0x4)
+#define GPC_DOMAIN		U(0x10)
+#define GPC_MASTER		U(0x1c)
+#define GPC_SYS_SLEEP		U(0x40)
+#define PMIC_CTRL		U(0x100)
+#define PMIC_PRE_DLY_CTRL	U(0x104)
+#define PMIC_STBY_ACK_CTRL	U(0x108)
+#define GPC_ROSC_CTRL		U(0x200)
+#define GPC_AON_MEM_CTRL	U(0x204)
+#define GPC_EFUSE_CTRL		U(0x208)
+
+#define FORCE_CPUx_DISABLE(x)	(1 << (16 + (x)))
+#define PMIC_STBY_EN		BIT(0)
+#define ROSC_OFF_EN		BIT(0)
+
+/* GPC CPU_CTRL */
+#define CM_SLICE(x)		(GPC_BASE + 0x800 * (x))
+#define CM_AUTHEN_CTRL		U(0x4)
+#define CM_MISC			U(0xc)
+#define CM_MODE_CTRL		U(0x10)
+#define CM_IRQ_WAKEUP_MASK0	U(0x100)
+#define CM_SYS_SLEEP_CTRL	U(0x380)
+#define IMR_NUM			U(8)
+
+/* CM_MISC */
+#define SLEEP_HOLD_EN		BIT(1)
+#define IRQ_MUX			BIT(5)
+#define SW_WAKEUP		BIT(6)
+
+/* CM_SYS_SLEEP_CTRL */
+#define SS_WAIT			BIT(0)
+#define SS_STOP			BIT(1)
+#define SS_SUSPEND		BIT(2)
+
+#define CM_MODE_RUN		U(0x0)
+#define CM_MODE_WAIT		U(0x1)
+#define CM_MODE_STOP		U(0x2)
+#define CM_MODE_SUSPEND		U(0x3)
+
+#define LPM_SETTING(d, m)	((m) << (((d) % 8) * 4))
+
+enum gpc_cmc_slice {
+	CPU_M33,
+	CPU_A55C0,
+	CPU_A55C1,
+	CPU_A55_PLAT,
+};
+
+/* set gpc domain assignment */
+static inline void gpc_assign_domains(unsigned int domains)
+{
+	mmio_write_32(GPC_GLOBAL_BASE + GPC_DOMAIN, domains);
+}
+
+/* force a cpu into sleep status */
+static inline void gpc_force_cpu_suspend(unsigned int cpu)
+{
+	mmio_setbits_32(GPC_GLOBAL_BASE + GPC_SYS_SLEEP, FORCE_CPUx_DISABLE(cpu));
+}
+
+static inline void gpc_pmic_stby_en(bool en)
+{
+	mmio_write_32(GPC_GLOBAL_BASE + PMIC_CTRL, en ? 1 : 0);
+}
+
+static inline void gpc_rosc_off(bool off)
+{
+	mmio_write_32(GPC_GLOBAL_BASE + GPC_ROSC_CTRL, off ? 1 : 0);
+}
+
+static inline void gpc_set_cpu_mode(unsigned int cpu, unsigned int mode)
+{
+	mmio_write_32(CM_SLICE(cpu) + CM_MODE_CTRL, mode);
+}
+
+static inline void gpc_select_wakeup_gic(unsigned int cpu)
+{
+	mmio_setbits_32(CM_SLICE(cpu) + CM_MISC, IRQ_MUX);
+}
+
+static inline void gpc_select_wakeup_raw_irq(unsigned int cpu)
+{
+	mmio_clrbits_32(CM_SLICE(cpu) + CM_MISC, IRQ_MUX);
+}
+
+static inline void gpc_assert_sw_wakeup(unsigned int cpu)
+{
+	mmio_setbits_32(CM_SLICE(cpu) + CM_MISC, SW_WAKEUP);
+}
+
+static inline void gpc_deassert_sw_wakeup(unsigned int cpu)
+{
+	mmio_clrbits_32(CM_SLICE(cpu) + CM_MISC, SW_WAKEUP);
+}
+
+static inline void gpc_clear_cpu_sleep_hold(unsigned int cpu)
+{
+	mmio_clrbits_32(CM_SLICE(cpu) + CM_MISC, SLEEP_HOLD_EN);
+}
+
+static inline void gpc_set_irq_mask(unsigned int cpu, unsigned int idx, uint32_t mask)
+{
+	mmio_write_32(CM_SLICE(cpu) + idx * 0x4 + CM_IRQ_WAKEUP_MASK0, mask);
+}
+
+/*******************************************************************************
+ * SRC definitions & declarations
+ ******************************************************************************/
+#define SRC_SLICE(x)		(SRC_BASE + 0x400 * (x))
+#define SRC_AUTHEN_CTRL		U(0x4)
+#define SRC_LPM_SETTING0	U(0x10)
+#define SRC_LPM_SETTING1	U(0x14)
+#define SRC_LPM_SETTING2	U(0x18)
+#define SRC_SLICE_SW_CTRL	U(0x20)
+
+#define SRC_MEM_CTRL		U(0x4)
+#define MEM_LP_EN		BIT(2)
+#define MEM_LP_RETN		BIT(1)
+
+enum mix_mem_mode {
+	MEM_OFF,
+	MEM_RETN,
+};
+
+enum src_mix_mem_slice {
+	SRC_GLOBAL,
+
+	/* MIX slice */
+	SRC_SENTINEL,
+	SRC_AON,
+	SRC_WKUP,
+	SRC_DDR,
+	SRC_DPHY,
+	SRC_ML,
+	SRC_NIC,
+	SRC_HSIO,
+	SRC_MEDIA,
+	SRC_M33P,
+	SRC_A55C0,
+	SRC_A55C1,
+	SRC_A55P,
+
+	/* MEM slice */
+	SRC_AON_MEM,
+	SRC_WKUP_MEM,
+	SRC_DDR_MEM,
+	SRC_DPHY_MEM,
+	SRC_ML_MEM,
+	SRC_NIC_MEM,
+	SRC_NIC_OCRAM,
+	SRC_HSIO_MEM,
+	SRC_MEDIA_MEM,
+	SRC_A55P0_MEM,
+	SRC_A55P1_MEM,
+	SRC_A55_SCU_MEM,
+	SRC_A55_L3_MEM,
+};
+
+static inline void src_authen_config(unsigned int mix, unsigned int wlist,
+				unsigned int lpm_en)
+{
+	mmio_write_32(SRC_SLICE(mix) + SRC_AUTHEN_CTRL, (wlist << 16) | (lpm_en << 2));
+}
+
+static inline void src_mix_set_lpm(unsigned int mix, unsigned int did, unsigned int lpm_mode)
+{
+	mmio_clrsetbits_32(SRC_SLICE(mix) + SRC_LPM_SETTING1 + (did / 8) * 0x4,
+			   LPM_SETTING(did, 0x7), LPM_SETTING(did, lpm_mode));
+}
+
+static inline void src_mem_lpm_en(unsigned int mix, bool retn)
+{
+	mmio_setbits_32(SRC_SLICE(mix) + SRC_MEM_CTRL, MEM_LP_EN | (retn ? MEM_LP_RETN : 0));
+}
+
+static inline void src_mem_lpm_dis(unsigned int mix)
+{
+	mmio_clrbits_32(SRC_SLICE(mix) + SRC_MEM_CTRL, MEM_LP_EN | MEM_LP_RETN);
+}
+
+/*******************************************************************************
+ * BLK_CTRL_S definitions & declarations
+ ******************************************************************************/
+#define HW_LP_HANDHSK		U(0x110)
+#define HW_LP_HANDHSK2		U(0x114)
+#define CA55_CPUWAIT		U(0x118)
+#define CA55_RVBADDR0_L		U(0x11c)
+#define CA55_RVBADDR0_H		U(0x120)
+
+/*******************************************************************************
+ * Other definitions & declarations
+ ******************************************************************************/
+void pwr_sys_init(void);
+
+#endif /* PWR_CTRL_H */
+
diff --git a/plat/imx/imx93/plat_topology.c b/plat/imx/imx93/plat_topology.c
new file mode 100644
index 0000000..739e2b9
--- /dev/null
+++ b/plat/imx/imx93/plat_topology.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2022-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <plat/common/platform.h>
+
+const unsigned char imx_power_domain_tree_desc[] = {
+	PWR_DOMAIN_AT_MAX_LVL,
+	PLATFORM_CLUSTER_COUNT,
+	PLATFORM_CLUSTER0_CORE_COUNT,
+};
+
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+	return imx_power_domain_tree_desc;
+}
+
+/*
+ * Only one cluster is planned for i.MX9 family, no need
+ * to consider the cluster id
+ */
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+	unsigned int cpu_id;
+
+	mpidr &= MPIDR_AFFINITY_MASK;
+
+	if (mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)) {
+		return -1;
+	}
+
+	cpu_id = MPIDR_AFFLVL1_VAL(mpidr);
+
+	return cpu_id;
+}
diff --git a/plat/imx/imx93/platform.mk b/plat/imx/imx93/platform.mk
new file mode 100644
index 0000000..ed7e81f
--- /dev/null
+++ b/plat/imx/imx93/platform.mk
@@ -0,0 +1,46 @@
+#
+# Copyright 2022-2023 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PLAT_INCLUDES		:=	-Iplat/imx/common/include		\
+				-Iplat/imx/imx93/include		\
+# Translation tables library
+include lib/xlat_tables_v2/xlat_tables.mk
+
+GICV3_SUPPORT_GIC600  :=      1
+
+# Include GICv3 driver files
+include drivers/arm/gic/v3/gicv3.mk
+
+IMX_GIC_SOURCES		:=	${GICV3_SOURCES}			\
+				plat/common/plat_gicv3.c		\
+				plat/common/plat_psci_common.c		\
+				plat/imx/common/plat_imx8_gic.c
+
+BL31_SOURCES		+=	plat/common/aarch64/crash_console_helpers.S   \
+				plat/imx/imx93/aarch64/plat_helpers.S		\
+				plat/imx/imx93/plat_topology.c			\
+				plat/imx/common/lpuart_console.S		\
+				plat/imx/imx93/trdc.c			\
+				plat/imx/imx93/pwr_ctrl.c			\
+				plat/imx/imx93/imx93_bl31_setup.c		\
+				plat/imx/imx93/imx93_psci.c			\
+				lib/cpus/aarch64/cortex_a55.S			\
+				drivers/delay_timer/delay_timer.c		\
+				drivers/delay_timer/generic_delay_timer.c	\
+				drivers/nxp/trdc/imx_trdc.c			\
+				${IMX_GIC_SOURCES}				\
+				${XLAT_TABLES_LIB_SRCS}
+
+RESET_TO_BL31		:=	1
+HW_ASSISTED_COHERENCY	:= 	1
+USE_COHERENT_MEM	:=	0
+PROGRAMMABLE_RESET_ADDRESS :=	1
+COLD_BOOT_SINGLE_CPU	:=	1
+
+BL32_BASE               ?=      0x96000000
+BL32_SIZE               ?=      0x02000000
+$(eval $(call add_define,BL32_BASE))
+$(eval $(call add_define,BL32_SIZE))
diff --git a/plat/imx/imx93/pwr_ctrl.c b/plat/imx/imx93/pwr_ctrl.c
new file mode 100644
index 0000000..624c605
--- /dev/null
+++ b/plat/imx/imx93/pwr_ctrl.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <stdbool.h>
+
+#include <platform_def.h>
+#include <pwr_ctrl.h>
+
+/*Do the necessary GPC, SRC, BLK_CTRL_S init */
+void pwr_sys_init(void)
+{
+	unsigned int cpu;
+
+	/*
+	 * Assigned A55 cluster to 3, m33 to 2, A55 CORE0 & CORE1 to 0/1.
+	 * domain0/1 only used for trigger LPM of themselves. A55 cluster & M33's
+	 * domain assignment should be align with the TRDC DID.
+	 */
+	gpc_assign_domains(0x3102);
+
+	/* CA55 core0/1 config */
+	for (cpu = CPU_A55C0; cpu <= CPU_A55_PLAT; cpu++) {
+		/* clear the cpu sleep hold */
+		gpc_clear_cpu_sleep_hold(cpu);
+		/* use gic wakeup source by default */
+		gpc_select_wakeup_gic(cpu);
+		/*
+		 * Ignore A55 core0/1's LPM trigger for system sleep.
+		 * normally, for A55 side, only the A55 cluster(plat)
+		 * domain will be used to trigger the system wide low
+		 * power mode transition.
+		 */
+		if (cpu != CPU_A55_PLAT) {
+			gpc_force_cpu_suspend(cpu);
+		}
+	}
+
+	/* boot core(A55C0) */
+	src_mem_lpm_en(SRC_A55P0_MEM, MEM_OFF);
+	/* For A55 core, only need to be on in RUN mode */
+	src_mix_set_lpm(SRC_A55C0, 0x0, CM_MODE_WAIT);
+	/* whitelist: 0x1 for domain 0 only */
+	src_authen_config(SRC_A55C0, 0x1, 0x1);
+
+	/* A55 cluster */
+	gpc_select_wakeup_gic(CPU_A55_PLAT);
+	gpc_clear_cpu_sleep_hold(CPU_A55_PLAT);
+
+	/* SCU MEM must be OFF when A55 PLAT OFF */
+	src_mem_lpm_en(SRC_A55_SCU_MEM, MEM_OFF);
+	/* L3 memory in retention by default */
+	src_mem_lpm_en(SRC_A55_L3_MEM, MEM_RETN);
+
+	src_mix_set_lpm(SRC_A55P, 0x3, 0x1);
+	/* whitelist: 0x8 for domain 3 only */
+	src_authen_config(SRC_A55P, 0x8, 0x1);
+
+	/* enable the HW LP handshake between S401 & A55 cluster */
+	mmio_setbits_32(BLK_CTRL_S_BASE + HW_LP_HANDHSK, BIT(5));
+}
+
diff --git a/plat/imx/imx93/trdc.c b/plat/imx/imx93/trdc.c
new file mode 100644
index 0000000..0d09aa6
--- /dev/null
+++ b/plat/imx/imx93/trdc.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2022-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <lib/mmio.h>
+
+#include "trdc_config.h"
+
+struct trdc_mgr_info trdc_mgr_blks[] = {
+	{ TRDC_A_BASE, 0, 0, 39, 40 },
+	{ TRDC_W_BASE, 0, 0, 70, 71 },
+	{ TRDC_M_BASE, 1, 0, 1, 2 },
+	{ TRDC_N_BASE, 0, 1, 1, 2 },
+};
+
+unsigned int trdc_mgr_num = ARRAY_SIZE(trdc_mgr_blks);
+
+struct trdc_config_info trdc_cfg_info[] = {
+	{	TRDC_A_BASE,
+		trdc_a_mbc_glbac, ARRAY_SIZE(trdc_a_mbc_glbac),
+		trdc_a_mbc, ARRAY_SIZE(trdc_a_mbc),
+		trdc_a_mrc_glbac, ARRAY_SIZE(trdc_a_mrc_glbac),
+		trdc_a_mrc, ARRAY_SIZE(trdc_a_mrc)
+	}, /* TRDC_A */
+	{	TRDC_W_BASE,
+		trdc_w_mbc_glbac, ARRAY_SIZE(trdc_w_mbc_glbac),
+		trdc_w_mbc, ARRAY_SIZE(trdc_w_mbc),
+		trdc_w_mrc_glbac, ARRAY_SIZE(trdc_w_mrc_glbac),
+		trdc_w_mrc, ARRAY_SIZE(trdc_w_mrc)
+	}, /* TRDC_W */
+	{	TRDC_N_BASE,
+		trdc_n_mbc_glbac, ARRAY_SIZE(trdc_n_mbc_glbac),
+		trdc_n_mbc, ARRAY_SIZE(trdc_n_mbc),
+		trdc_n_mrc_glbac, ARRAY_SIZE(trdc_n_mrc_glbac),
+		trdc_n_mrc, ARRAY_SIZE(trdc_n_mrc)
+	}, /* TRDC_N */
+};
+
+void trdc_config(void)
+{
+	unsigned int i;
+
+	/* Set MTR to DID1 */
+	trdc_mda_set_noncpu(TRDC_A_BASE, 4, false, 0x2, 0x2, 0x1);
+
+	/* Set M33 to DID2*/
+	trdc_mda_set_cpu(TRDC_A_BASE, 1, 0, 0x2, 0x0, 0x2, 0x0, 0x0, 0x0);
+
+	/* Configure the access permission for TRDC MGR and MC slots */
+	for (i = 0U; i < ARRAY_SIZE(trdc_mgr_blks); i++) {
+		trdc_mgr_mbc_setup(&trdc_mgr_blks[i]);
+	}
+
+	/* Configure TRDC user settings from config table */
+	for (i = 0U; i < ARRAY_SIZE(trdc_cfg_info); i++) {
+		trdc_setup(&trdc_cfg_info[i]);
+	}
+
+	NOTICE("TRDC init done\n");
+}
diff --git a/plat/imx/imx93/trdc_config.h b/plat/imx/imx93/trdc_config.h
new file mode 100644
index 0000000..c623a19
--- /dev/null
+++ b/plat/imx/imx93/trdc_config.h
@@ -0,0 +1,254 @@
+/*
+ * Copyright 2022-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <drivers/nxp/trdc/imx_trdc.h>
+
+#define TRDC_A_BASE	U(0x44270000)
+#define TRDC_W_BASE	U(0x42460000)
+#define TRDC_M_BASE	U(0x42460000)
+#define TRDC_N_BASE	U(0x49010000)
+
+/* GLBAC7 is used for TRDC only, any setting to GLBAC7 will be ignored */
+
+/* aonmix */
+struct trdc_glbac_config trdc_a_mbc_glbac[] = {
+	/* MBC0 */
+	{ 0, 0, SP(RW)  | SU(RW)   | NP(RW)  | NU(RW) },
+	/* MBC1 */
+	{ 1, 0, SP(RW)  | SU(RW)   | NP(RW)  | NU(RW) },
+	{ 1, 1, SP(RW)  | SU(R)    | NP(RW)  | NU(R)  },
+	{ 1, 2, SP(RWX) | SU(RWX)  | NP(RWX) | NU(RWX)  },
+};
+
+struct trdc_mbc_config trdc_a_mbc[] = {
+	{ 0, 0, 0, MBC_BLK_ALL, 0, true }, /* MBC0 AIPS1 for S401 DID0 */
+	{ 0, 0, 1, MBC_BLK_ALL, 0, true }, /* MBC0 Sentinel_SOC_In for S401 DID0 */
+	{ 0, 0, 2, MBC_BLK_ALL, 0, true }, /* MBC0 GPIO1 for S401 DID0 */
+	{ 1, 0, 0, MBC_BLK_ALL, 0, true }, /* MBC1 CM33 code TCM for S401 DID0 */
+	{ 1, 0, 1, MBC_BLK_ALL, 0, true }, /* MBC1 CM33 system TCM for S401 DID0 */
+
+	{ 0, 1, 0, MBC_BLK_ALL, 0, true }, /* MBC0 AIPS1 for MTR DID1 */
+	{ 0, 1, 1, MBC_BLK_ALL, 0, true }, /* MBC0 Sentinel_SOC_In for MTR DID1 */
+
+	{ 0, 2, 0, MBC_BLK_ALL, 0, true }, /* MBC0 AIPS1 for M33 DID2 */
+	{ 0, 2, 1, MBC_BLK_ALL, 0, true }, /* MBC0 Sentinel_SOC_In for M33 DID2 */
+	{ 0, 2, 2, MBC_BLK_ALL, 0, true }, /* MBC0 GPIO1 for M33 DID2 */
+	{ 1, 2, 0, MBC_BLK_ALL, 2, true  }, /* MBC1 CM33 code TCM for M33 DID2 */
+	{ 1, 2, 1, MBC_BLK_ALL, 2, true  }, /* MBC1 CM33 system TCM for M33 DID2 */
+
+	{ 0, 3, 0, MBC_BLK_ALL, 0, false }, /* MBC0 AIPS1 for A55 DID3 */
+	{ 0, 3, 1, MBC_BLK_ALL, 0, false }, /* MBC0 Sentinel_SOC_In for A55 DID3 */
+	{ 0, 3, 2, MBC_BLK_ALL, 0, false }, /* MBC0 GPIO1 for A55 DID3 */
+	{ 1, 3, 0, MBC_BLK_ALL, 1, false }, /* MBC1 CM33 code TCM for A55 DID3 */
+	{ 1, 3, 1, MBC_BLK_ALL, 1, false }, /* MBC1 CM33 system TCM for A55 DID3 */
+	{ 1, 10, 1, MBC_BLK_ALL, 2, false }, /* MBC1 CM33 system TCM for SoC masters DID10 */
+
+	{ 0, 7, 0, MBC_BLK_ALL, 0, false }, /* MBC0 AIPS1 for eDMA DID7 */
+};
+
+struct trdc_glbac_config trdc_a_mrc_glbac[] = {
+	{ 0, 0, SP(RWX) | SU(RWX) | NP(RWX) | NU(RWX) },
+	{ 0, 1, SP(R)   | SU(0)   | NP(R)   | NU(0)   },
+};
+
+struct trdc_mrc_config trdc_a_mrc[] = {
+	{ 0, 2, 0, 0x00000000, 0x00040000, 0, true }, /* MRC0 M33 ROM for M33 DID2 */
+	{ 0, 3, 0, 0x00100000, 0x00040000, 1, true }, /* MRC0 M33 ROM for A55 DID3 */
+};
+
+/* wakeupmix */
+struct trdc_glbac_config trdc_w_mbc_glbac[] = {
+	/* MBC0 */
+	{ 0, 0, SP(RW) | SU(RW) | NP(RW) | NU(RW) },
+	/* MBC1 */
+	{ 1, 0, SP(RW) | SU(RW) | NP(RW) | NU(RW) },
+};
+
+struct trdc_mbc_config trdc_w_mbc[] = {
+	{ 0, 1, 0, MBC_BLK_ALL, 0, true }, /* MBC0 AIPS2 for MTR DID1 */
+	{ 1, 1, 0, MBC_BLK_ALL, 0, true }, /* MBC1 AIPS3 for MTR DID1 */
+
+	{ 0, 2, 0, MBC_BLK_ALL, 0, true }, /* MBC0 AIPS2 for M33 DID2 */
+	{ 0, 2, 1, MBC_BLK_ALL, 0, true }, /* MBC0 GPIO2_In for M33 DID2 */
+	{ 0, 2, 2, MBC_BLK_ALL, 0, true }, /* MBC0 GPIO3 for M33 DID2 */
+	{ 0, 2, 3, MBC_BLK_ALL, 0, true }, /* MBC0 DAP  for M33 DID2 */
+	{ 1, 2, 0, MBC_BLK_ALL, 0, true }, /* MBC1 AIPS3 for M33 DID2 */
+	{ 1, 2, 1, MBC_BLK_ALL, 0, true }, /* MBC1 AHB_ISPAP for M33 DID2 */
+	{ 1, 2, 2, MBC_BLK_ALL, 0, true },  /* MBC1 NIC_MAIN_GPV for M33 DID2 */
+	{ 1, 2, 3, MBC_BLK_ALL, 0, true }, /* MBC1 GPIO4 for M33 DID2 */
+
+	{ 0, 3, 0, MBC_BLK_ALL, 0, false }, /* MBC0 AIPS2 for A55 DID3 */
+	{ 0, 3, 1, MBC_BLK_ALL, 0, false }, /* MBC0 GPIO2_In for A55 DID3 */
+	{ 0, 3, 2, MBC_BLK_ALL, 0, false }, /* MBC0 GPIO3 for A55 DID3 */
+	{ 0, 3, 3, MBC_BLK_ALL, 0, false }, /* MBC0 DAP  for A55 DID3 */
+	{ 1, 3, 0, MBC_BLK_ALL, 0, false }, /* MBC1 AIPS3 for A55 DID3 */
+	{ 1, 3, 1, MBC_BLK_ALL, 0, false }, /* MBC1 AHB_ISPAP for A55 DID3 */
+	{ 1, 3, 2, MBC_BLK_ALL, 0, true },  /* MBC1 NIC_MAIN_GPV for A55 DID3 */
+	{ 1, 3, 3, MBC_BLK_ALL, 0, false }, /* MBC1 GPIO4 for A55 DID3 */
+
+	{ 0, 7, 0, MBC_BLK_ALL, 0, false }, /* MBC0 AIPS2 for eDMA DID7 */
+	{ 1, 7, 0, MBC_BLK_ALL, 0, false }, /* MBC1 AIPS3 for eDMA DID7  */
+};
+
+struct trdc_glbac_config trdc_w_mrc_glbac[] = {
+	/* MRC0 */
+	{ 0, 0, SP(RX)  | SU(RX)   | NP(RX)   | NU(RX)    },
+	/* MRC1 */
+	{ 1, 0, SP(RWX) | SU(RWX)  | NP(RWX)  | NU(RWX)   },
+};
+
+struct trdc_mrc_config trdc_w_mrc[] = {
+	{ 0, 3, 0, 0x00000000, 0x00040000, 0, false }, /* MRC0 A55 ROM for A55 DID3 */
+	{ 1, 2, 0, 0x28000000, 0x08000000, 0, true  }, /* MRC1 FLEXSPI1 for M33 DID2 */
+	{ 1, 3, 0, 0x28000000, 0x08000000, 0, false }, /* MRC1 FLEXSPI1 for A55 DID3 */
+};
+
+/* nicmix */
+struct trdc_glbac_config trdc_n_mbc_glbac[] = {
+	/* MBC0 */
+	{ 0, 0, SP(RW) | SU(RW) | NP(RW) | NU(RW) },
+	/* MBC1 */
+	{ 1, 0, SP(RW) | SU(RW) | NP(RW) | NU(RW) },
+	/* MBC2 */
+	{ 2, 0, SP(RW) | SU(RW) | NP(RW) | NU(RW) },
+	{ 2, 1, SP(R) | SU(R) | NP(R) | NU(R) },
+	/* MBC3 */
+	{ 3, 0, SP(RW) | SU(RW) | NP(RW) | NU(RW) },
+	{ 3, 1, SP(RWX) | SU(RWX) | NP(RWX) | NU(RWX) },
+};
+
+struct trdc_mbc_config trdc_n_mbc[] = {
+	{ 0, 0, 0, MBC_BLK_ALL, 0, true }, /* MBC0 DDRCFG for S401 DID0 */
+	{ 0, 0, 1, MBC_BLK_ALL, 0, true }, /* MBC0 AIPS4 for  S401 DID0 */
+	{ 0, 0, 2, MBC_BLK_ALL, 0, true }, /* MBC0 MEDIAMIX for  S401 DID0 */
+	{ 0, 0, 3, MBC_BLK_ALL, 0, true }, /* MBC0 HSIOMIX for  S401 DID0 */
+	{ 1, 0, 0, MBC_BLK_ALL, 0, true }, /* MBC1 MTR_DCA, TCU, TROUT for  S401 DID0 */
+	{ 1, 0, 1, MBC_BLK_ALL, 0, true }, /* MBC1 MTR_DCA, TCU, TROUT for  S401 DID0 */
+	{ 1, 0, 2, MBC_BLK_ALL, 0, true }, /* MBC1 MLMIX for  S401 DID0 */
+	{ 1, 0, 3, MBC_BLK_ALL, 0, true }, /* MBC1 MLMIX for  S401 DID0 */
+	{ 2, 0, 0, MBC_BLK_ALL, 0, true }, /* MBC2 GIC for  S401 DID0 */
+	{ 2, 0, 1, MBC_BLK_ALL, 0, true }, /* MBC2 GIC for  S401 DID0 */
+	{ 3, 0, 0, MBC_BLK_ALL, 0, true }, /* MBC3 OCRAM for  S401 DID0 */
+	{ 3, 0, 1, MBC_BLK_ALL, 0, true }, /* MBC3 OCRAM for  S401 DID0 */
+
+	{ 0, 1, 0, MBC_BLK_ALL, 0, true }, /* MBC0 DDRCFG for MTR DID1 */
+	{ 0, 1, 1, MBC_BLK_ALL, 0, true }, /* MBC0 AIPS4 for  MTR DID1 */
+	{ 0, 1, 2, MBC_BLK_ALL, 0, true }, /* MBC0 MEDIAMIX for MTR DID1 */
+	{ 0, 1, 3, MBC_BLK_ALL, 0, true }, /* MBC0 HSIOMIX for  MTR DID1 */
+	{ 1, 1, 0, MBC_BLK_ALL, 0, true }, /* MBC1 MTR_DCA, TCU, TROUT for  MTR DID1 */
+	{ 1, 1, 1, MBC_BLK_ALL, 0, true }, /* MBC1 MTR_DCA, TCU, TROUT for  MTR DID1 */
+	{ 1, 1, 2, MBC_BLK_ALL, 0, true }, /* MBC1 MLMIX for  MTR DID1 */
+	{ 1, 1, 3, MBC_BLK_ALL, 0, true }, /* MBC1 MLMIX for  MTR DID1 */
+
+	{ 0, 2, 0, MBC_BLK_ALL, 0, true }, /* MBC0 DDRCFG for M33 DID2 */
+	{ 0, 2, 1, MBC_BLK_ALL, 0, true }, /* MBC0 AIPS4 for M33 DID2 */
+	{ 0, 2, 2, MBC_BLK_ALL, 0, true }, /* MBC0 MEDIAMIX for M33 DID2 */
+	{ 0, 2, 3, MBC_BLK_ALL, 0, true }, /* MBC0 HSIOMIX for M33 DID2 */
+	{ 1, 2, 0, MBC_BLK_ALL, 0, true }, /* MBC1 MTR_DCA, TCU, TROUT for M33 DID2 */
+	{ 1, 2, 1, MBC_BLK_ALL, 0, true }, /* MBC1 MTR_DCA, TCU, TROUT for M33 DID2 */
+	{ 1, 2, 2, MBC_BLK_ALL, 0, true }, /* MBC1 MLMIX for M33 DID2 */
+	{ 1, 2, 3, MBC_BLK_ALL, 0, true }, /* MBC1 MLMIX for M33 DID2 */
+	{ 2, 2, 0, MBC_BLK_ALL, 1, true }, /* MBC2 GIC for M33 DID2 */
+	{ 2, 2, 1, MBC_BLK_ALL, 1, true }, /* MBC2 GIC for M33 DID2 */
+	{ 3, 2, 0, MBC_BLK_ALL, 0, true  }, /* MBC3 OCRAM for M33 DID2 */
+	{ 3, 2, 1, MBC_BLK_ALL, 0, true  }, /* MBC3 OCRAM for M33 DID2 */
+
+	{ 0, 3, 0, MBC_BLK_ALL, 0, false }, /* MBC0 DDRCFG for A55 DID3 */
+	{ 0, 3, 1, MBC_BLK_ALL, 0, false }, /* MBC0 AIPS4 for A55 DID3 */
+	{ 0, 3, 2, MBC_BLK_ALL, 0, false }, /* MBC0 MEDIAMIX for A55 DID3 */
+	{ 0, 3, 3, MBC_BLK_ALL, 0, false }, /* MBC0 HSIOMIX for A55 DID3 */
+	{ 1, 3, 0, MBC_BLK_ALL, 0, false }, /* MBC1 MTR_DCA, TCU, TROUT for A55 DID3 */
+	{ 1, 3, 1, MBC_BLK_ALL, 0, false }, /* MBC1 MTR_DCA, TCU, TROUT for A55 DID3 */
+	{ 1, 3, 2, MBC_BLK_ALL, 0, false }, /* MBC1 MLMIX for A55 DID3 */
+	{ 1, 3, 3, MBC_BLK_ALL, 0, false }, /* MBC1 MLMIX for A55 DID3 */
+	{ 2, 3, 0, MBC_BLK_ALL, 0, false }, /* MBC2 GIC for A55 DID3 */
+	{ 2, 3, 1, MBC_BLK_ALL, 0, false }, /* MBC2 GIC for A55 DID3 */
+	{ 3, 3, 0, MBC_BLK_ALL, 1, true  }, /* MBC3 OCRAM for A55 DID3 */
+	{ 3, 3, 1, MBC_BLK_ALL, 1, true  }, /* MBC3 OCRAM for A55 DID3 */
+
+	{ 3, 3, 0, 0, 0, false  }, /* MBC3 OCRAM for A55 DID3 */
+	{ 3, 3, 0, 1, 0, false  }, /* MBC3 OCRAM for A55 DID3 */
+	{ 3, 3, 0, 2, 0, false  }, /* MBC3 OCRAM for A55 DID3 */
+	{ 3, 3, 0, 3, 0, false  }, /* MBC3 OCRAM for A55 DID3 */
+	{ 3, 3, 0, 4, 0, false  }, /* MBC3 OCRAM for A55 DID3 */
+	{ 3, 3, 0, 5, 0, false  }, /* MBC3 OCRAM for A55 DID3 */
+	{ 3, 3, 1, 0, 0, false  }, /* MBC3 OCRAM for A55 DID3 */
+	{ 3, 3, 1, 1, 0, false  }, /* MBC3 OCRAM for A55 DID3 */
+	{ 3, 3, 1, 2, 0, false  }, /* MBC3 OCRAM for A55 DID3 */
+	{ 3, 3, 1, 3, 0, false  }, /* MBC3 OCRAM for A55 DID3 */
+	{ 3, 3, 1, 4, 0, false  }, /* MBC3 OCRAM for A55 DID3 */
+	{ 3, 3, 1, 5, 0, false  }, /* MBC3 OCRAM for A55 DID3 */
+
+	{ 0, 7, 1, MBC_BLK_ALL, 0, false }, /* MBC0 AIPS4 for eDMA DID7 */
+	{ 0, 7, 2, MBC_BLK_ALL, 0, false }, /* MBC0 MEDIAMIX for eDMA DID7 */
+	{ 0, 7, 3, MBC_BLK_ALL, 0, false }, /* MBC0 HSIOMIX for eDMA DID7 */
+
+	{ 3, 10, 0, MBC_BLK_ALL, 0, false }, /* MBC3 OCRAM for DID10 */
+	{ 3, 10, 1, MBC_BLK_ALL, 0, false }, /* MBC3 OCRAM for DID10 */
+};
+
+struct trdc_glbac_config trdc_n_mrc_glbac[] = {
+	{ 0, 0, SP(RW)  | SU(RW)  | NP(RW)  | NU(RW)  },
+	{ 0, 1, SP(RWX) | SU(RWX) | NP(RWX) | NU(RWX) },
+};
+
+#if defined(SPD_opteed)
+#define TEE_SHM_SIZE 0x200000
+
+#define DRAM_MEM_0_START (0x80000000)
+#define DRAM_MEM_0_SIZE (BL32_BASE - 0x80000000)
+
+#define DRAM_MEM_1_START (BL32_BASE)
+#define DRAM_MEM_1_SIZE (BL32_SIZE - TEE_SHM_SIZE)
+
+#define DRAM_MEM_2_START (DRAM_MEM_1_START + DRAM_MEM_1_SIZE)
+#define DRAM_MEM_2_SIZE (0x80000000 - DRAM_MEM_1_SIZE - DRAM_MEM_0_SIZE)
+
+struct trdc_mrc_config trdc_n_mrc[] = {
+	{ 0, 0, 0, 0x80000000, 0x80000000, 0, false }, /* MRC0 DRAM for S400 DID0 */
+	{ 0, 1, 0, 0x80000000, 0x80000000, 0, false }, /* MRC0 DRAM for MTR DID1 */
+	{ 0, 2, 0, 0x80000000, 0x80000000, 0, true }, /* MRC0 DRAM for M33 DID2 */
+	{ 0, 8, 0, 0x80000000, 0x80000000, 1, false }, /* MRC0 DRAM for Coresight, Testport DID8 */
+	{ 0, 9, 0, 0x80000000, 0x80000000, 1, false }, /* MRC0 DRAM for DAP DID9 */
+
+	{ 0, 3, 0, DRAM_MEM_0_START, DRAM_MEM_0_SIZE, 1, false }, /* MRC0 DRAM for A55 DID3 */
+	{ 0, 5, 0, DRAM_MEM_0_START, DRAM_MEM_0_SIZE, 0, false }, /* MRC0 DRAM for USDHC1 DID5 */
+	{ 0, 6, 0, DRAM_MEM_0_START, DRAM_MEM_0_SIZE, 0, false }, /* MRC0 DRAM for USDHC2 DID6 */
+	{ 0, 7, 0, DRAM_MEM_0_START, DRAM_MEM_0_SIZE, 0, false }, /* MRC0 DRAM for eDMA DID7 */
+	{ 0, 10, 0, DRAM_MEM_0_START, DRAM_MEM_0_SIZE, 0, false }, /* MRC0 DRAM for SoC masters DID10 */
+	{ 0, 11, 0, DRAM_MEM_0_START, DRAM_MEM_0_SIZE, 0, false }, /* MRC0 DRAM for USB DID11 */
+
+	/* OPTEE memory for  secure access only. */
+	{ 0, 3, 1, DRAM_MEM_1_START, DRAM_MEM_1_SIZE, 1, true }, /* MRC0 DRAM for A55 DID3 */
+	{ 0, 5, 1, DRAM_MEM_1_START, DRAM_MEM_1_SIZE, 0, true }, /* MRC0 DRAM for USDHC1 DID5 */
+	{ 0, 6, 1, DRAM_MEM_1_START, DRAM_MEM_1_SIZE, 0, true }, /* MRC0 DRAM for USDHC2 DID6 */
+	{ 0, 7, 1, DRAM_MEM_1_START, DRAM_MEM_1_SIZE, 0, true }, /* MRC0 DRAM for eDMA DID7 */
+	{ 0, 10, 1, DRAM_MEM_1_START, DRAM_MEM_1_SIZE, 0, true }, /* MRC0 DRAM for SoC masters DID10 */
+	{ 0, 11, 1, DRAM_MEM_1_START, DRAM_MEM_1_SIZE, 0, true }, /* MRC0 DRAM for USB DID11 */
+
+	{ 0, 3, 2, DRAM_MEM_2_START, DRAM_MEM_2_SIZE, 1, false }, /* MRC0 DRAM for A55 DID3 */
+	{ 0, 5, 2, DRAM_MEM_2_START, DRAM_MEM_2_SIZE, 0, false }, /* MRC0 DRAM for USDHC1 DID5 */
+	{ 0, 6, 2, DRAM_MEM_2_START, DRAM_MEM_2_SIZE, 0, false }, /* MRC0 DRAM for USDHC2 DID6 */
+	{ 0, 7, 2, DRAM_MEM_2_START, DRAM_MEM_2_SIZE, 0, false }, /* MRC0 DRAM for eDMA DID7 */
+	{ 0, 10, 2, DRAM_MEM_2_START, DRAM_MEM_2_SIZE, 0, false }, /* MRC0 DRAM for SoC masters DID10 */
+	{ 0, 11, 2, DRAM_MEM_2_START, DRAM_MEM_2_SIZE, 0, false }, /* MRC0 DRAM for USB DID11 */
+
+};
+#else
+struct trdc_mrc_config trdc_n_mrc[] = {
+	{ 0, 0, 0, 0x80000000, 0x80000000, 0, false }, /* MRC0 DRAM for S400 DID0 */
+	{ 0, 1, 0, 0x80000000, 0x80000000, 0, false }, /* MRC0 DRAM for MTR DID1 */
+	{ 0, 2, 0, 0x80000000, 0x80000000, 0, true }, /* MRC0 DRAM for M33 DID2 */
+	{ 0, 3, 0, 0x80000000, 0x80000000, 1, false }, /* MRC0 DRAM for A55 DID3 */
+	{ 0, 5, 0, 0x80000000, 0x80000000, 0, false }, /* MRC0 DRAM for USDHC1 DID5 */
+	{ 0, 6, 0, 0x80000000, 0x80000000, 0, false }, /* MRC0 DRAM for USDHC2 DID6 */
+	{ 0, 7, 0, 0x80000000, 0x80000000, 0, false }, /* MRC0 DRAM for eDMA DID7 */
+	{ 0, 8, 0, 0x80000000, 0x80000000, 1, false }, /* MRC0 DRAM for Coresight, Testport DID8 */
+	{ 0, 9, 0, 0x80000000, 0x80000000, 1, false }, /* MRC0 DRAM for DAP DID9 */
+	{ 0, 10, 0, 0x80000000, 0x80000000, 0, false }, /* MRC0 DRAM for SoC masters DID10 */
+	{ 0, 11, 0, 0x80000000, 0x80000000, 0, false }, /* MRC0 DRAM for USB DID11 */
+};
+#endif
diff --git a/plat/intel/soc/agilex/include/socfpga_plat_def.h b/plat/intel/soc/agilex/include/socfpga_plat_def.h
index 85dfeab..a744d09 100644
--- a/plat/intel/soc/agilex/include/socfpga_plat_def.h
+++ b/plat/intel/soc/agilex/include/socfpga_plat_def.h
@@ -87,6 +87,18 @@
 #define PLAT_SYS_COUNTER_FREQ_IN_TICKS	(400000000)
 #define PLAT_HZ_CONVERT_TO_MHZ	(1000000)
 
+/*******************************************************************************
+ * SDMMC related pointer function
+ ******************************************************************************/
+#define SDMMC_READ_BLOCKS	mmc_read_blocks
+#define SDMMC_WRITE_BLOCKS	mmc_write_blocks
+
+/*******************************************************************************
+ * sysmgr.boot_scratch_cold6 & 7 (64bit) are used to indicate L2 reset
+ * is done and HPS should trigger warm reset via RMR_EL3.
+ ******************************************************************************/
+#define L2_RESET_DONE_REG			0xFFD12218
+
 /* Platform specific system counter */
 #define PLAT_SYS_COUNTER_FREQ_IN_MHZ	get_cpu_clk()
 
diff --git a/plat/intel/soc/agilex5/bl2_plat_setup.c b/plat/intel/soc/agilex5/bl2_plat_setup.c
new file mode 100644
index 0000000..88f9880
--- /dev/null
+++ b/plat/intel/soc/agilex5/bl2_plat_setup.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2019-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/desc_image_load.h>
+#include <drivers/cadence/cdns_sdmmc.h>
+#include <drivers/generic_delay_timer.h>
+#include <drivers/synopsys/dw_mmc.h>
+#include <drivers/ti/uart/uart_16550.h>
+#include <lib/mmio.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#include "agilex5_clock_manager.h"
+#include "agilex5_memory_controller.h"
+#include "agilex5_mmc.h"
+#include "agilex5_pinmux.h"
+#include "agilex5_system_manager.h"
+#include "ccu/ncore_ccu.h"
+#include "combophy/combophy.h"
+#include "nand/nand.h"
+#include "qspi/cadence_qspi.h"
+#include "sdmmc/sdmmc.h"
+#include "socfpga_emac.h"
+#include "socfpga_f2sdram_manager.h"
+#include "socfpga_handoff.h"
+#include "socfpga_mailbox.h"
+#include "socfpga_private.h"
+#include "socfpga_reset_manager.h"
+#include "wdt/watchdog.h"
+
+
+/* Declare mmc_info */
+static struct mmc_device_info mmc_info;
+
+/* Declare cadence idmac descriptor */
+extern struct cdns_idmac_desc cdns_desc[8] __aligned(32);
+
+const mmap_region_t agilex_plat_mmap[] = {
+	MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE,
+		MT_MEMORY | MT_RW | MT_NS),
+	MAP_REGION_FLAT(PSS_BASE, PSS_SIZE,
+		MT_DEVICE | MT_RW | MT_NS),
+	MAP_REGION_FLAT(MPFE_BASE, MPFE_SIZE,
+		MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(OCRAM_BASE, OCRAM_SIZE,
+		MT_NON_CACHEABLE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(CCU_BASE, CCU_SIZE,
+		MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(MEM64_BASE, MEM64_SIZE,
+		MT_DEVICE | MT_RW | MT_NS),
+	MAP_REGION_FLAT(GIC_BASE, GIC_SIZE,
+		MT_DEVICE | MT_RW | MT_SECURE),
+	{0},
+};
+
+boot_source_type boot_source = BOOT_SOURCE;
+
+void bl2_el3_early_platform_setup(u_register_t x0, u_register_t x1,
+				u_register_t x2, u_register_t x4)
+{
+	static console_t console;
+
+	handoff reverse_handoff_ptr;
+
+	generic_delay_timer_init();
+	config_clkmgr_handoff(&reverse_handoff_ptr);
+	mailbox_init();
+	enable_nonsecure_access();
+
+	deassert_peripheral_reset();
+	if (combo_phy_init(&reverse_handoff_ptr) != 0) {
+		ERROR("Combo Phy initialization failed\n");
+	}
+
+	console_16550_register(PLAT_INTEL_UART_BASE, PLAT_UART_CLOCK,
+	PLAT_BAUDRATE, &console);
+
+	/* Store magic number */
+	mmio_write_32(L2_RESET_DONE_REG, PLAT_L2_RESET_REQ);
+}
+
+void bl2_el3_plat_arch_setup(void)
+{
+	handoff reverse_handoff_ptr;
+
+	struct cdns_sdmmc_params params = EMMC_INIT_PARAMS((uintptr_t) &cdns_desc, get_mmc_clk());
+
+	mmc_info.mmc_dev_type = MMC_DEVICE_TYPE;
+	mmc_info.ocr_voltage = OCR_3_3_3_4 | OCR_3_2_3_3;
+
+	/* Request ownership and direct access to QSPI */
+	mailbox_hps_qspi_enable();
+
+	switch (boot_source) {
+	case BOOT_SOURCE_SDMMC:
+		NOTICE("SDMMC boot\n");
+		sdmmc_init(&reverse_handoff_ptr, &params, &mmc_info);
+		socfpga_io_setup(boot_source);
+		break;
+
+	case BOOT_SOURCE_QSPI:
+		NOTICE("QSPI boot\n");
+		cad_qspi_init(0, QSPI_CONFIG_CPHA, QSPI_CONFIG_CPOL,
+			QSPI_CONFIG_CSDA, QSPI_CONFIG_CSDADS,
+			QSPI_CONFIG_CSEOT, QSPI_CONFIG_CSSOT, 0);
+		socfpga_io_setup(boot_source);
+		break;
+
+	case BOOT_SOURCE_NAND:
+		NOTICE("NAND boot\n");
+		nand_init(&reverse_handoff_ptr);
+		socfpga_io_setup(boot_source);
+		break;
+
+	default:
+		ERROR("Unsupported boot source\n");
+		panic();
+		break;
+	}
+}
+
+uint32_t get_spsr_for_bl33_entry(void)
+{
+	unsigned long el_status;
+	unsigned int mode;
+	uint32_t spsr;
+
+	/* Figure out what mode we enter the non-secure world in */
+	el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+	el_status &= ID_AA64PFR0_ELX_MASK;
+
+	mode = (el_status) ? MODE_EL2 : MODE_EL1;
+
+	/*
+	 * TODO: Consider the possibility of specifying the SPSR in
+	 * the FIP ToC and allowing the platform to have a say as
+	 * well.
+	 */
+	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+	return spsr;
+}
+
+int bl2_plat_handle_post_image_load(unsigned int image_id)
+{
+	bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
+
+	assert(bl_mem_params);
+
+	switch (image_id) {
+	case BL33_IMAGE_ID:
+		bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
+		bl_mem_params->ep_info.spsr = get_spsr_for_bl33_entry();
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/*******************************************************************************
+ * Perform any BL3-1 platform setup code
+ ******************************************************************************/
+void bl2_platform_setup(void)
+{
+}
diff --git a/plat/intel/soc/agilex5/bl31_plat_setup.c b/plat/intel/soc/agilex5/bl31_plat_setup.c
new file mode 100644
index 0000000..5ae4bf7
--- /dev/null
+++ b/plat/intel/soc/agilex5/bl31_plat_setup.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <drivers/arm/gic_common.h>
+#include <drivers/arm/gicv3.h>
+#include <drivers/ti/uart/uart_16550.h>
+#include <lib/mmio.h>
+#include <lib/xlat_tables/xlat_mmu_helpers.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/platform.h>
+
+#include "agilex5_power_manager.h"
+#include "ccu/ncore_ccu.h"
+#include "socfpga_mailbox.h"
+#include "socfpga_private.h"
+#include "socfpga_reset_manager.h"
+
+/* Get non-secure SPSR for BL33. Zephyr and Linux */
+uint32_t arm_get_spsr_for_bl33_entry(void);
+
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+/* The GICv3 driver only needs to be initialized in EL3 */
+static uintptr_t rdistif_base_addrs[PLATFORM_CORE_COUNT];
+
+#define SMMU_SDMMC
+
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+	entry_point_info_t *next_image_info;
+
+	next_image_info = (type == NON_SECURE) ?
+			  &bl33_image_ep_info : &bl32_image_ep_info;
+
+	/* None of the images on this platform can have 0x0 as the entrypoint */
+	if (next_image_info->pc)
+		return next_image_info;
+	else
+		return NULL;
+}
+
+void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
+				u_register_t arg2, u_register_t arg3)
+{
+	static console_t console;
+
+	mmio_write_64(PLAT_SEC_ENTRY, PLAT_SEC_WARM_ENTRY);
+
+	console_16550_register(PLAT_INTEL_UART_BASE, PLAT_UART_CLOCK,
+	PLAT_BAUDRATE, &console);
+
+	init_ncore_ccu();
+	setup_smmu_stream_id();
+
+	/*
+	 * Check params passed from BL31 should not be NULL,
+	 */
+	void *from_bl2 = (void *) arg0;
+
+#if RESET_TO_BL31
+	/* There are no parameters from BL2 if BL31 is a reset vector */
+	assert(from_bl2 == NULL);
+	void *plat_params_from_bl2 = (void *) arg3;
+
+	assert(plat_params_from_bl2 == NULL);
+
+	/* Populate entry point information for BL33 */
+	SET_PARAM_HEAD(&bl33_image_ep_info,
+				PARAM_EP,
+				VERSION_1,
+				0);
+
+# if ARM_LINUX_KERNEL_AS_BL33
+	/*
+	 * According to the file ``Documentation/arm64/booting.txt`` of the
+	 * Linux kernel tree, Linux expects the physical address of the device
+	 * tree blob (DTB) in x0, while x1-x3 are reserved for future use and
+	 * must be 0.
+	 */
+	bl33_image_ep_info.args.arg0 = (u_register_t)ARM_PRELOADED_DTB_BASE;
+	bl33_image_ep_info.args.arg1 = 0U;
+	bl33_image_ep_info.args.arg2 = 0U;
+	bl33_image_ep_info.args.arg3 = 0U;
+# endif
+
+#else /* RESET_TO_BL31 */
+	bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
+
+	assert(params_from_bl2 != NULL);
+
+	/*
+	 * Copy BL32 (if populated by BL31) and BL33 entry point information.
+	 * They are stored in Secure RAM, in BL31's address space.
+	 */
+
+	if (params_from_bl2->h.type == PARAM_BL_PARAMS &&
+		params_from_bl2->h.version >= VERSION_2) {
+
+		bl_params_node_t *bl_params = params_from_bl2->head;
+
+		while (bl_params) {
+			if (bl_params->image_id == BL33_IMAGE_ID) {
+				bl33_image_ep_info = *bl_params->ep_info;
+			}
+				bl_params = bl_params->next_params_info;
+		}
+	} else {
+		struct socfpga_bl31_params *arg_from_bl2 =
+			(struct socfpga_bl31_params *) from_bl2;
+
+		assert(arg_from_bl2->h.type == PARAM_BL31);
+		assert(arg_from_bl2->h.version >= VERSION_1);
+
+		bl32_image_ep_info = *arg_from_bl2->bl32_ep_info;
+		bl33_image_ep_info = *arg_from_bl2->bl33_ep_info;
+	}
+
+	bl33_image_ep_info.args.arg0 = (u_register_t)ARM_PRELOADED_DTB_BASE;
+	bl33_image_ep_info.args.arg1 = 0U;
+	bl33_image_ep_info.args.arg2 = 0U;
+	bl33_image_ep_info.args.arg3 = 0U;
+#endif
+
+	/*
+	 * Tell BL31 where the non-trusted software image
+	 * is located and the entry state information
+	 */
+	bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
+	bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry();
+
+	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+}
+
+static const interrupt_prop_t agx5_interrupt_props[] = {
+	PLAT_INTEL_SOCFPGA_G1S_IRQ_PROPS(INTR_GROUP1S),
+	PLAT_INTEL_SOCFPGA_G0_IRQ_PROPS(INTR_GROUP0)
+};
+
+static const gicv3_driver_data_t plat_gicv3_gic_data = {
+	.gicd_base = PLAT_INTEL_SOCFPGA_GICD_BASE,
+	.gicr_base = PLAT_INTEL_SOCFPGA_GICR_BASE,
+	.interrupt_props = agx5_interrupt_props,
+	.interrupt_props_num = ARRAY_SIZE(agx5_interrupt_props),
+	.rdistif_num = PLATFORM_CORE_COUNT,
+	.rdistif_base_addrs = rdistif_base_addrs,
+};
+
+/*******************************************************************************
+ * Perform any BL3-1 platform setup code
+ ******************************************************************************/
+void bl31_platform_setup(void)
+{
+	socfpga_delay_timer_init();
+
+	/* Initialize the gic cpu and distributor interfaces */
+	gicv3_driver_init(&plat_gicv3_gic_data);
+	gicv3_distif_init();
+	gicv3_rdistif_init(plat_my_core_pos());
+	gicv3_cpuif_enable(plat_my_core_pos());
+	mailbox_hps_stage_notify(HPS_EXECUTION_STATE_SSBL);
+#if !defined(SIMICS_RUN)
+	ncore_enable_ocram_firewall();
+#endif
+
+}
+
+const mmap_region_t plat_agilex_mmap[] = {
+	MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS),
+	MAP_REGION_FLAT(PSS_BASE, PSS_SIZE, MT_DEVICE | MT_RW | MT_NS),
+	MAP_REGION_FLAT(MPFE_BASE, MPFE_SIZE, MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(OCRAM_BASE, OCRAM_SIZE, MT_NON_CACHEABLE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(CCU_BASE, CCU_SIZE, MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(MEM64_BASE, MEM64_SIZE, MT_DEVICE | MT_RW | MT_NS),
+	MAP_REGION_FLAT(GIC_BASE, GIC_SIZE, MT_DEVICE | MT_RW | MT_SECURE),
+	{0}
+};
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only intializes the mmu in a quick and dirty way.
+ ******************************************************************************/
+void bl31_plat_arch_setup(void)
+{
+	uint32_t boot_core = 0x00;
+	uint32_t cpuid = 0x00;
+
+	cpuid = read_mpidr();
+	boot_core = (mmio_read_32(AGX5_PWRMGR(MPU_BOOTCONFIG)) & 0xC00);
+	NOTICE("BL31: Boot Core = %x\n", boot_core);
+	NOTICE("BL31: CPU ID = %x\n", cpuid);
+
+}
+
+/* Get non-secure image entrypoint for BL33. Zephyr and Linux */
+uintptr_t plat_get_ns_image_entrypoint(void)
+{
+#ifdef PRELOADED_BL33_BASE
+	return PRELOADED_BL33_BASE;
+#else
+	return PLAT_NS_IMAGE_OFFSET;
+#endif
+}
+
+/* Get non-secure SPSR for BL33. Zephyr and Linux */
+uint32_t arm_get_spsr_for_bl33_entry(void)
+{
+	unsigned int mode;
+	uint32_t spsr;
+
+	/* Figure out what mode we enter the non-secure world in */
+	mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
+
+	/*
+	 * TODO: Consider the possibility of specifying the SPSR in
+	 * the FIP ToC and allowing the platform to have a say as
+	 * well.
+	 */
+	spsr = SPSR_64((uint64_t)mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+	return spsr;
+}
+
+/* SMP: Secondary cores BL31 setup reset vector */
+void bl31_plat_set_secondary_cpu_entrypoint(unsigned int cpu_id)
+{
+	unsigned int pch_cpu = 0x00;
+	unsigned int pchctlr_old = 0x00;
+	unsigned int pchctlr_new = 0x00;
+	uint32_t boot_core = 0x00;
+
+	boot_core = (mmio_read_32(AGX5_PWRMGR(MPU_BOOTCONFIG)) & 0xC00);
+	/* Update the p-channel based on cpu id */
+	pch_cpu = 1 << cpu_id;
+
+	if (boot_core == 0x00) {
+		/* Update reset vector to 0x00 */
+		mmio_write_64(RSTMGR_CPUxRESETBASELOW_CPU2,
+(uint64_t) plat_secondary_cpus_bl31_entry >> 2);
+	} else {
+		/* Update reset vector to 0x00 */
+		mmio_write_64(RSTMGR_CPUxRESETBASELOW_CPU0,
+(uint64_t) plat_secondary_cpus_bl31_entry >> 2);
+	}
+
+	/* Update reset vector to 0x00 */
+	mmio_write_64(RSTMGR_CPUxRESETBASELOW_CPU1, (uint64_t) plat_secondary_cpus_bl31_entry >> 2);
+	mmio_write_64(RSTMGR_CPUxRESETBASELOW_CPU3, (uint64_t) plat_secondary_cpus_bl31_entry >> 2);
+
+	/* On all cores - temporary */
+	pchctlr_old = mmio_read_32(AGX5_PWRMGR(MPU_PCHCTLR));
+	pchctlr_new = pchctlr_old | (pch_cpu<<1);
+	mmio_write_32(AGX5_PWRMGR(MPU_PCHCTLR), pchctlr_new);
+
+	/* We will only release the target secondary CPUs */
+	/* Bit mask for each CPU BIT0-3 */
+	mmio_write_32(RSTMGR_CPUSTRELEASE_CPUx, pch_cpu);
+}
+
+void bl31_plat_set_secondary_cpu_off(void)
+{
+	unsigned int pch_cpu = 0x00;
+	unsigned int pch_cpu_off = 0x00;
+	unsigned int cpu_id = plat_my_core_pos();
+
+	pch_cpu_off = 1 << cpu_id;
+
+	pch_cpu = mmio_read_32(AGX5_PWRMGR(MPU_PCHCTLR));
+	pch_cpu = pch_cpu & ~(pch_cpu_off << 1);
+
+	mmio_write_32(AGX5_PWRMGR(MPU_PCHCTLR), pch_cpu);
+}
+
+void bl31_plat_enable_mmu(uint32_t flags)
+{
+	/* TODO: Enable mmu when needed */
+}
diff --git a/plat/intel/soc/agilex5/include/agilex5_clock_manager.h b/plat/intel/soc/agilex5/include/agilex5_clock_manager.h
new file mode 100644
index 0000000..566a80d
--- /dev/null
+++ b/plat/intel/soc/agilex5/include/agilex5_clock_manager.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CLOCKMANAGER_H
+#define CLOCKMANAGER_H
+
+#include "socfpga_handoff.h"
+
+/* Clock Manager Registers */
+#define CLKMGR_OFFSET					0x10d10000
+
+#define CLKMGR_CTRL					0x0
+#define CLKMGR_STAT					0x4
+#define CLKMGR_TESTIOCTROL				0x8
+#define CLKMGR_INTRGEN					0xc
+#define CLKMGR_INTRMSK					0x10
+#define CLKMGR_INTRCLR					0x14
+#define CLKMGR_INTRSTS					0x18
+#define CLKMGR_INTRSTK					0x1c
+#define CLKMGR_INTRRAW					0x20
+
+/* Main PLL Group */
+#define CLKMGR_MAINPLL					0x10d10024
+#define CLKMGR_MAINPLL_EN				0x0
+#define CLKMGR_MAINPLL_ENS				0x4
+#define CLKMGR_MAINPLL_BYPASS				0xc
+#define CLKMGR_MAINPLL_BYPASSS				0x10
+#define CLKMGR_MAINPLL_BYPASSR				0x14
+#define CLKMGR_MAINPLL_NOCCLK				0x1c
+#define CLKMGR_MAINPLL_NOCDIV				0x20
+#define CLKMGR_MAINPLL_PLLGLOB				0x24
+#define CLKMGR_MAINPLL_FDBCK				0x28
+#define CLKMGR_MAINPLL_MEM				0x2c
+#define CLKMGR_MAINPLL_MEMSTAT				0x30
+#define CLKMGR_MAINPLL_VCOCALIB				0x34
+#define CLKMGR_MAINPLL_PLLC0				0x38
+#define CLKMGR_MAINPLL_PLLC1				0x3c
+#define CLKMGR_MAINPLL_PLLC2				0x40
+#define CLKMGR_MAINPLL_PLLC3				0x44
+#define CLKMGR_MAINPLL_PLLM				0x48
+#define CLKMGR_MAINPLL_FHOP				0x4c
+#define CLKMGR_MAINPLL_SSC				0x50
+#define CLKMGR_MAINPLL_LOSTLOCK				0x54
+
+/* Peripheral PLL Group */
+#define CLKMGR_PERPLL					0x10d1007c
+#define CLKMGR_PERPLL_EN				0x0
+#define CLKMGR_PERPLL_ENS				0x4
+#define CLKMGR_PERPLL_BYPASS				0xc
+#define CLKMGR_PERPLL_EMACCTL				0x18
+#define CLKMGR_PERPLL_GPIODIV				0x1c
+#define CLKMGR_PERPLL_PLLGLOB				0x20
+#define CLKMGR_PERPLL_FDBCK				0x24
+#define CLKMGR_PERPLL_MEM				0x28
+#define CLKMGR_PERPLL_MEMSTAT				0x2c
+#define CLKMGR_PERPLL_PLLC0				0x30
+#define CLKMGR_PERPLL_PLLC1				0x34
+#define CLKMGR_PERPLL_VCOCALIB				0x38
+#define CLKMGR_PERPLL_PLLC2				0x3c
+#define CLKMGR_PERPLL_PLLC3				0x40
+#define CLKMGR_PERPLL_PLLM				0x44
+#define CLKMGR_PERPLL_LOSTLOCK				0x50
+
+/* Altera Group */
+#define CLKMGR_ALTERA					0x10d100d0
+#define CLKMGR_ALTERA_JTAG				0x0
+#define CLKMGR_ALTERA_EMACACTR				0x4
+#define CLKMGR_ALTERA_EMACBCTR				0x8
+#define CLKMGR_ALTERA_EMACPTPCTR			0xc
+#define CLKMGR_ALTERA_GPIODBCTR				0x10
+#define CLKMGR_ALTERA_S2FUSER0CTR			0x18
+#define CLKMGR_ALTERA_S2FUSER1CTR			0x1c
+#define CLKMGR_ALTERA_PSIREFCTR				0x20
+#define CLKMGR_ALTERA_EXTCNTRST				0x24
+#define CLKMGR_ALTERA_USB31CTR				0x28
+#define CLKMGR_ALTERA_DSUCTR				0x2c
+#define CLKMGR_ALTERA_CORE01CTR				0x30
+#define CLKMGR_ALTERA_CORE23CTR				0x34
+#define CLKMGR_ALTERA_CORE2CTR				0x38
+#define CLKMGR_ALTERA_CORE3CTR				0x3c
+
+/* Membus */
+#define CLKMGR_MEM_REQ					BIT(24)
+#define CLKMGR_MEM_WR					BIT(25)
+#define CLKMGR_MEM_ERR					BIT(26)
+#define CLKMGR_MEM_WDAT_OFFSET				16
+#define CLKMGR_MEM_ADDR					0x4027
+#define CLKMGR_MEM_WDAT					0x80
+
+/* Clock Manager Macros */
+#define CLKMGR_CTRL_BOOTMODE_SET_MSK			0x00000001
+#define CLKMGR_STAT_BUSY_E_BUSY				0x1
+#define CLKMGR_STAT_BUSY(x)				(((x) & 0x00000001) >> 0)
+#define CLKMGR_STAT_MAINPLLLOCKED(x)			(((x) & 0x00000100) >> 8)
+#define CLKMGR_STAT_PERPLLLOCKED(x)			(((x) & 0x00010000) >> 16)
+#define CLKMGR_INTRCLR_MAINLOCKLOST_SET_MSK		0x00000004
+#define CLKMGR_INTRCLR_PERLOCKLOST_SET_MSK		0x00000008
+#define CLKMGR_INTOSC_HZ				460000000
+
+/* Main PLL Macros */
+#define CLKMGR_MAINPLL_EN_RESET				0x0000005e
+#define CLKMGR_MAINPLL_ENS_RESET			0x0000005e
+
+/* Peripheral PLL Macros */
+#define CLKMGR_PERPLL_EN_RESET				0x040007FF
+#define CLKMGR_PERPLL_ENS_RESET			0x040007FF
+
+#define CLKMGR_PERPLL_EN_SDMMCCLK			BIT(5)
+#define CLKMGR_PERPLL_GPIODIV_GPIODBCLK_SET(x)		(((x) << 0) & 0x0000ffff)
+
+/* Altera Macros */
+#define CLKMGR_ALTERA_EXTCNTRST_RESET			0xff
+
+/* Shared Macros */
+#define CLKMGR_PSRC(x)					(((x) & 0x00030000) >> 16)
+#define CLKMGR_PSRC_MAIN				0
+#define CLKMGR_PSRC_PER					1
+
+#define CLKMGR_PLLGLOB_PSRC_EOSC1			0x0
+#define CLKMGR_PLLGLOB_PSRC_INTOSC			0x1
+#define CLKMGR_PLLGLOB_PSRC_F2S				0x2
+
+#define CLKMGR_PLLM_MDIV(x)				((x) & 0x000003ff)
+#define CLKMGR_PLLGLOB_PD_SET_MSK			0x00000001
+#define CLKMGR_PLLGLOB_RST_SET_MSK			0x00000002
+
+#define CLKMGR_PLLGLOB_REFCLKDIV(x)			(((x) & 0x00003f00) >> 8)
+#define CLKMGR_PLLGLOB_AREFCLKDIV(x)			(((x) & 0x00000f00) >> 8)
+#define CLKMGR_PLLGLOB_DREFCLKDIV(x)			(((x) & 0x00003000) >> 12)
+
+#define CLKMGR_VCOCALIB_HSCNT_SET(x)			(((x) << 0) & 0x000003ff)
+#define CLKMGR_VCOCALIB_MSCNT_SET(x)			(((x) << 16) & 0x00ff0000)
+
+#define CLKMGR_CLR_LOSTLOCK_BYPASS			0x20000000
+
+typedef struct {
+	uint32_t  clk_freq_of_eosc1;
+	uint32_t  clk_freq_of_f2h_free;
+	uint32_t  clk_freq_of_cb_intosc_ls;
+} CLOCK_SOURCE_CONFIG;
+
+void config_clkmgr_handoff(handoff *hoff_ptr);
+uint32_t get_wdt_clk(void);
+uint32_t get_uart_clk(void);
+uint32_t get_mmc_clk(void);
+
+#endif
diff --git a/plat/intel/soc/agilex5/include/agilex5_memory_controller.h b/plat/intel/soc/agilex5/include/agilex5_memory_controller.h
new file mode 100644
index 0000000..1708488
--- /dev/null
+++ b/plat/intel/soc/agilex5/include/agilex5_memory_controller.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AGX_MEMORYCONTROLLER_H
+#define AGX_MEMORYCONTROLLER_H
+
+#include "socfpga_plat_def.h"
+
+#define AGX_MPFE_IOHMC_REG_DRAMADDRW				0xf80100a8
+#define AGX_MPFE_IOHMC_CTRLCFG0					0xf8010028
+#define AGX_MPFE_IOHMC_CTRLCFG1					0xf801002c
+#define AGX_MPFE_IOHMC_CTRLCFG2					0xf8010030
+#define AGX_MPFE_IOHMC_CTRLCFG3					0xf8010034
+#define AGX_MPFE_IOHMC_DRAMADDRW				0xf80100a8
+#define AGX_MPFE_IOHMC_DRAMTIMING0				0xf8010050
+#define AGX_MPFE_IOHMC_CALTIMING0				0xf801007c
+#define AGX_MPFE_IOHMC_CALTIMING1				0xf8010080
+#define AGX_MPFE_IOHMC_CALTIMING2				0xf8010084
+#define AGX_MPFE_IOHMC_CALTIMING3				0xf8010088
+#define AGX_MPFE_IOHMC_CALTIMING4				0xf801008c
+#define AGX_MPFE_IOHMC_CALTIMING9				0xf80100a0
+#define AGX_MPFE_IOHMC_CALTIMING9_ACT_TO_ACT(x)			(((x) & 0x000000ff) >> 0)
+#define AGX_MPFE_IOHMC_CTRLCFG1_CFG_ADDR_ORDER(value)		(((value) & 0x00000060) >> 5)
+
+#define AGX_MPFE_HMC_ADP_ECCCTRL1				0xf8011100
+#define AGX_MPFE_HMC_ADP_ECCCTRL2				0xf8011104
+#define AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT			0xf8011218
+#define AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT_SEQ2CORE		0x000000ff
+#define AGX_MPFE_HMC_ADP_RSTHANDSHAKECTRL			0xf8011214
+
+
+#define AGX_MPFE_IOHMC_REG_CTRLCFG1				0xf801002c
+
+#define AGX_MPFE_IOHMC_REG_NIOSRESERVE0_OFST			0xf8010110
+
+#define IOHMC_DRAMADDRW_COL_ADDR_WIDTH(x)			(((x) & 0x0000001f) >> 0)
+#define IOHMC_DRAMADDRW_ROW_ADDR_WIDTH(x)			(((x) & 0x000003e0) >> 5)
+#define IOHMC_DRAMADDRW_CS_ADDR_WIDTH(x)			(((x) & 0x00070000) >> 16)
+#define IOHMC_DRAMADDRW_BANK_GRP_ADDR_WIDTH(x)			(((x) & 0x0000c000) >> 14)
+#define IOHMC_DRAMADDRW_BANK_ADDR_WIDTH(x)			(((x) & 0x00003c00) >> 10)
+
+#define AGX_MPFE_DDR(x)						(0xf8000000 + x)
+#define AGX_MPFE_HMC_ADP_DDRCALSTAT				0xf801100c
+#define AGX_MPFE_DDR_MAIN_SCHED					0xf8000400
+#define AGX_MPFE_DDR_MAIN_SCHED_DDRCONF				0xf8000408
+#define AGX_MPFE_DDR_MAIN_SCHED_DDRTIMING			0xf800040c
+#define AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET_MSK			0x0000001f
+#define AGX_MPFE_DDR_MAIN_SCHED_DDRMODE				0xf8000410
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV			0xf800043c
+#define AGX_MPFE_DDR_MAIN_SCHED_READLATENCY			0xf8000414
+#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE			0xf8000438
+#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAWBANK_OFST		10
+#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAW_OFST		4
+#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_RRD_OFST		0
+#define AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET(x)			(((x) << 0) & 0x0000001f)
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_OFST		0
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_MSK		(BIT(0) | BIT(1))
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_OFST		2
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_MSK		(BIT(2) | BIT(3))
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_OFST		4
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_MSK		(BIT(4) | BIT(5))
+
+#define AGX_MPFE_HMC_ADP(x)					(0xf8011000 + (x))
+#define AGX_MPFE_HMC_ADP_HPSINTFCSEL				0xf8011210
+#define AGX_MPFE_HMC_ADP_DDRIOCTRL				0xf8011008
+#define HMC_ADP_DDRIOCTRL					0x8
+#define HMC_ADP_DDRIOCTRL_IO_SIZE(x)				(((x) & 0x00000003) >> 0)
+#define HMC_ADP_DDRIOCTRL_CTRL_BURST_LENGTH(x)			(((x) & 0x00003e00) >> 9)
+#define ADP_DRAMADDRWIDTH					0xe0
+
+#define ACT_TO_ACT_DIFF_BANK(value)				(((value) & 0x00fc0000) >> 18)
+#define ACT_TO_ACT(value)					(((value) & 0x0003f000) >> 12)
+#define ACT_TO_RDWR(value)					(((value) & 0x0000003f) >> 0)
+#define ACT_TO_ACT(value)					(((value) & 0x0003f000) >> 12)
+
+/* timing 2 */
+#define RD_TO_RD_DIFF_CHIP(value)				(((value) & 0x00000fc0) >> 6)
+#define RD_TO_WR_DIFF_CHIP(value)				(((value) & 0x3f000000) >> 24)
+#define RD_TO_WR(value)						(((value) & 0x00fc0000) >> 18)
+#define RD_TO_PCH(value)					(((value) & 0x00000fc0) >> 6)
+
+/* timing 3 */
+#define CALTIMING3_WR_TO_RD_DIFF_CHIP(value)			(((value) & 0x0003f000) >> 12)
+#define CALTIMING3_WR_TO_RD(value)				(((value) & 0x00000fc0) >> 6)
+
+/* timing 4 */
+#define PCH_TO_VALID(value)					(((value) & 0x00000fc0) >> 6)
+
+#define DDRTIMING_BWRATIO_OFST					31
+#define DDRTIMING_WRTORD_OFST					26
+#define DDRTIMING_RDTOWR_OFST					21
+#define DDRTIMING_BURSTLEN_OFST					18
+#define DDRTIMING_WRTOMISS_OFST					12
+#define DDRTIMING_RDTOMISS_OFST					6
+#define DDRTIMING_ACTTOACT_OFST					0
+
+#define ADP_DDRIOCTRL_IO_SIZE(x)				(((x) & 0x3) >> 0)
+
+#define DDRMODE_AUTOPRECHARGE_OFST				1
+#define DDRMODE_BWRATIOEXTENDED_OFST				0
+
+
+#define AGX_MPFE_IOHMC_REG_DRAMTIMING0_CFG_TCL(x)		(((x) & 0x7f) >> 0)
+#define AGX_MPFE_IOHMC_REG_CTRLCFG0_CFG_MEM_TYPE(x)		(((x) & 0x0f) >> 0)
+
+#define AGX_CCU_CPU0_MPRT_DDR					0xf7004400
+#define AGX_CCU_CPU0_MPRT_MEM0					0xf70045c0
+#define AGX_CCU_CPU0_MPRT_MEM1A					0xf70045e0
+#define AGX_CCU_CPU0_MPRT_MEM1B					0xf7004600
+#define AGX_CCU_CPU0_MPRT_MEM1C					0xf7004620
+#define AGX_CCU_CPU0_MPRT_MEM1D					0xf7004640
+#define AGX_CCU_CPU0_MPRT_MEM1E					0xf7004660
+#define AGX_CCU_IOM_MPRT_MEM0					0xf7018560
+#define AGX_CCU_IOM_MPRT_MEM1A					0xf7018580
+#define	AGX_CCU_IOM_MPRT_MEM1B					0xf70185a0
+#define	AGX_CCU_IOM_MPRT_MEM1C					0xf70185c0
+#define	AGX_CCU_IOM_MPRT_MEM1D					0xf70185e0
+#define	AGX_CCU_IOM_MPRT_MEM1E					0xf7018600
+
+#define AGX_NOC_FW_DDR_SCR					0xf8020200
+#define AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMITEXT		0xf802021c
+#define AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMIT			0xf8020218
+#define AGX_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMITEXT		0xf802029c
+#define AGX_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMIT		0xf8020298
+
+#define AGX_SOC_NOC_FW_DDR_SCR_ENABLE				0xf8020200
+#define AGX_SOC_NOC_FW_DDR_SCR_ENABLESET			0xf8020204
+#define AGX_CCU_NOC_DI_SET_MSK					0x10
+
+#define AGX_SYSMGR_CORE_HMC_CLK					0xffd120b4
+#define AGX_SYSMGR_CORE_HMC_CLK_STATUS				0x00000001
+
+#define AGX_MPFE_IOHMC_NIOSRESERVE0_NIOS_RESERVE0(x)		(((x) & 0xffff) >> 0)
+#define AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_MSK			0x00000003
+#define AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_OFST			0
+#define AGX_MPFE_HMC_ADP_HPSINTFCSEL_ENABLE			0x001f1f1f
+#define AGX_IOHMC_CTRLCFG1_ENABLE_ECC_OFST			7
+
+#define AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK	0x00010000
+#define AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK		0x00000100
+#define AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK		0x00000001
+
+#define AGX_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK		0x00000001
+#define AGX_MPFE_HMC_ADP_ECCCTRL2_OVRW_RB_ECC_EN_SET_MSK	0x00010000
+#define AGX_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK		0x00000100
+#define AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(value)			(((value) & 0x1) >> 0)
+
+
+#define AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE(x)			(((x) & 0x00003) >> 0)
+#define IOHMC_DRAMADDRW_CFG_BANK_ADDR_WIDTH(x)			(((x) & 0x03c00) >> 10)
+#define IOHMC_DRAMADDRW_CFG_BANK_GROUP_ADDR_WIDTH(x)		(((x) & 0x0c000) >> 14)
+#define IOHMC_DRAMADDRW_CFG_COL_ADDR_WIDTH(x)			(((x) & 0x0001f) >> 0)
+#define IOHMC_DRAMADDRW_CFG_CS_ADDR_WIDTH(x)			(((x) & 0x70000) >> 16)
+#define IOHMC_DRAMADDRW_CFG_ROW_ADDR_WIDTH(x)			(((x) & 0x003e0) >> 5)
+
+#define AGX_SDRAM_0_LB_ADDR					0x0
+#define AGX_DDR_SIZE						0x40000000
+
+/* Macros */
+#define SOCFPGA_MEMCTRL_ECCCTRL1				0x008
+#define SOCFPGA_MEMCTRL_ERRINTEN				0x010
+#define SOCFPGA_MEMCTRL_ERRINTENS				0x014
+#define SOCFPGA_MEMCTRL_ERRINTENR				0x018
+#define SOCFPGA_MEMCTRL_INTMODE					0x01C
+#define SOCFPGA_MEMCTRL_INTSTAT					0x020
+#define SOCFPGA_MEMCTRL_DIAGINTTEST				0x024
+#define SOCFPGA_MEMCTRL_DERRADDRA				0x02C
+
+#define SOCFPGA_MEMCTRL(_reg)					(SOCFPGA_MEMCTRL_REG_BASE \
+								+ (SOCFPGA_MEMCTRL_##_reg))
+
+#endif
diff --git a/plat/intel/soc/agilex5/include/agilex5_mmc.h b/plat/intel/soc/agilex5/include/agilex5_mmc.h
new file mode 100644
index 0000000..c8a5fba
--- /dev/null
+++ b/plat/intel/soc/agilex5/include/agilex5_mmc.h
@@ -0,0 +1,7 @@
+/*
+ * Copyright (c) 2020-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+void agx5_mmc_init(void);
diff --git a/plat/intel/soc/agilex5/include/agilex5_pinmux.h b/plat/intel/soc/agilex5/include/agilex5_pinmux.h
new file mode 100644
index 0000000..8a8e8c7
--- /dev/null
+++ b/plat/intel/soc/agilex5/include/agilex5_pinmux.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AGX5_PINMUX_H
+#define AGX5_PINMUX_H
+
+/* PINMUX REGISTER ADDRESS */
+#define AGX5_PINMUX_PIN0SEL					0x10d13000
+#define AGX5_PINMUX_IO0CTRL					0x10d13130
+#define AGX5_PINMUX_EMAC0_USEFPGA				0x10d13300
+#define AGX5_PINMUX_IO0_DELAY					0x10d13400
+#define AGX5_PERIPHERAL						0x10d14044
+
+#include "socfpga_handoff.h"
+
+/* PINMUX DEFINE */
+#define PINMUX_HANDOFF_ARRAY_SIZE(x)				(sizeof(x) / sizeof((x)[0]))
+#define PINMUX_HANDOFF_CONFIG_ADDR				0xbeec
+#define PINMUX_HANDOFF_CONFIG_VAL				0x7e000
+
+/* Macros */
+#define SOCFPGA_PINMUX_SEL_NAND					(0x03)
+#define SOCFPGA_PINMUX_PIN0SEL					(0x00)
+#define SOCFPGA_PINMUX_PIN1SEL					(0x04)
+#define SOCFPGA_PINMUX_PIN2SEL					(0x08)
+#define SOCFPGA_PINMUX_PIN3SEL					(0x0C)
+#define SOCFPGA_PINMUX_PIN4SEL					(0x10)
+#define SOCFPGA_PINMUX_PIN5SEL					(0x14)
+#define SOCFPGA_PINMUX_PIN6SEL					(0x18)
+#define SOCFPGA_PINMUX_PIN7SEL					(0x1C)
+#define SOCFPGA_PINMUX_PIN8SEL					(0x20)
+#define SOCFPGA_PINMUX_PIN9SEL					(0x24)
+#define SOCFPGA_PINMUX_PIN10SEL					(0x28)
+#define SOCFPGA_PINMUX_PIN11SEL					(0x2C)
+#define SOCFPGA_PINMUX_PIN12SEL					(0x30)
+#define SOCFPGA_PINMUX_PIN13SEL					(0x34)
+#define SOCFPGA_PINMUX_PIN14SEL					(0x38)
+#define SOCFPGA_PINMUX_PIN15SEL					(0x3C)
+#define SOCFPGA_PINMUX_PIN16SEL					(0x40)
+#define SOCFPGA_PINMUX_PIN17SEL					(0x44)
+#define SOCFPGA_PINMUX_PIN18SEL					(0x48)
+#define SOCFPGA_PINMUX_PIN19SEL					(0x4C)
+#define SOCFPGA_PINMUX_PIN20SEL					(0x50)
+#define SOCFPGA_PINMUX_PIN21SEL					(0x54)
+#define SOCFPGA_PINMUX_PIN22SEL					(0x58)
+#define SOCFPGA_PINMUX_PIN23SEL					(0x5C)
+#define SOCFPGA_PINMUX_PIN24SEL					(0x60)
+#define SOCFPGA_PINMUX_PIN25SEL					(0x64)
+#define SOCFPGA_PINMUX_PIN26SEL					(0x68)
+#define SOCFPGA_PINMUX_PIN27SEL					(0x6C)
+#define SOCFPGA_PINMUX_PIN28SEL					(0x70)
+#define SOCFPGA_PINMUX_PIN29SEL					(0x74)
+#define SOCFPGA_PINMUX_PIN30SEL					(0x78)
+#define SOCFPGA_PINMUX_PIN31SEL					(0x7C)
+#define SOCFPGA_PINMUX_PIN32SEL					(0x80)
+#define SOCFPGA_PINMUX_PIN33SEL					(0x84)
+#define SOCFPGA_PINMUX_PIN34SEL					(0x88)
+#define SOCFPGA_PINMUX_PIN35SEL					(0x8C)
+#define SOCFPGA_PINMUX_PIN36SEL					(0x90)
+#define SOCFPGA_PINMUX_PIN37SEL					(0x94)
+#define SOCFPGA_PINMUX_PIN38SEL					(0x98)
+#define SOCFPGA_PINMUX_PIN39SEL					(0x9C)
+#define SOCFPGA_PINMUX_PIN40SEL					(0x100)
+#define SOCFPGA_PINMUX_PIN41SEL					(0x104)
+#define SOCFPGA_PINMUX_PIN42SEL					(0x108)
+#define SOCFPGA_PINMUX_PIN43SEL					(0x10C)
+#define SOCFPGA_PINMUX_PIN44SEL					(0x110)
+#define SOCFPGA_PINMUX_PIN45SEL					(0x114)
+#define SOCFPGA_PINMUX_PIN46SEL					(0x118)
+#define SOCFPGA_PINMUX_PIN47SEL					(0x11C)
+
+#define SOCFPGA_PINMUX_IO0CTRL					(0x00)
+#define SOCFPGA_PINMUX_IO1CTRL					(0x04)
+#define SOCFPGA_PINMUX_IO2CTRL					(0x08)
+#define SOCFPGA_PINMUX_IO3CTRL					(0x0C)
+#define SOCFPGA_PINMUX_IO4CTRL					(0x10)
+#define SOCFPGA_PINMUX_IO5CTRL					(0x14)
+#define SOCFPGA_PINMUX_IO6CTRL					(0x18)
+#define SOCFPGA_PINMUX_IO7CTRL					(0x1C)
+#define SOCFPGA_PINMUX_IO8CTRL					(0x20)
+#define SOCFPGA_PINMUX_IO9CTRL					(0x24)
+#define SOCFPGA_PINMUX_IO10CTRL					(0x28)
+#define SOCFPGA_PINMUX_IO11CTRL					(0x2C)
+#define SOCFPGA_PINMUX_IO12CTRL					(0x30)
+#define SOCFPGA_PINMUX_IO13CTRL					(0x34)
+#define SOCFPGA_PINMUX_IO14CTRL					(0x38)
+#define SOCFPGA_PINMUX_IO15CTRL					(0x3C)
+#define SOCFPGA_PINMUX_IO16CTRL					(0x40)
+#define SOCFPGA_PINMUX_IO17CTRL					(0x44)
+#define SOCFPGA_PINMUX_IO18CTRL					(0x48)
+#define SOCFPGA_PINMUX_IO19CTRL					(0x4C)
+#define SOCFPGA_PINMUX_IO20CTRL					(0x50)
+#define SOCFPGA_PINMUX_IO21CTRL					(0x54)
+#define SOCFPGA_PINMUX_IO22CTRL					(0x58)
+#define SOCFPGA_PINMUX_IO23CTRL					(0x5C)
+#define SOCFPGA_PINMUX_IO24CTRL					(0x60)
+#define SOCFPGA_PINMUX_IO25CTRL					(0x64)
+#define SOCFPGA_PINMUX_IO26CTRL					(0x68)
+#define SOCFPGA_PINMUX_IO27CTRL					(0x6C)
+#define SOCFPGA_PINMUX_IO28CTRL					(0xD0)
+#define SOCFPGA_PINMUX_IO29CTRL					(0xD4)
+#define SOCFPGA_PINMUX_IO30CTRL					(0xD8)
+#define SOCFPGA_PINMUX_IO31CTRL					(0xDC)
+#define SOCFPGA_PINMUX_IO32CTRL					(0xE0)
+#define SOCFPGA_PINMUX_IO33CTRL					(0xE4)
+#define SOCFPGA_PINMUX_IO34CTRL					(0xE8)
+#define SOCFPGA_PINMUX_IO35CTRL					(0xEC)
+#define SOCFPGA_PINMUX_IO36CTRL					(0xF0)
+#define SOCFPGA_PINMUX_IO37CTRL					(0xF4)
+#define SOCFPGA_PINMUX_IO38CTRL					(0xF8)
+#define SOCFPGA_PINMUX_IO39CTRL					(0xFC)
+#define SOCFPGA_PINMUX_IO40CTRL					(0x100)
+#define SOCFPGA_PINMUX_IO41CTRL					(0x104)
+#define SOCFPGA_PINMUX_IO42CTRL					(0x108)
+#define SOCFPGA_PINMUX_IO43CTRL					(0x10C)
+#define SOCFPGA_PINMUX_IO44CTRL					(0x110)
+#define SOCFPGA_PINMUX_IO45CTRL					(0x114)
+#define SOCFPGA_PINMUX_IO46CTRL					(0x118)
+#define SOCFPGA_PINMUX_IO47CTRL					(0x11C)
+
+#define SOCFPGA_PINMUX_EMAC0_USEFPGA				(0x00)
+#define SOCFPGA_PINMUX_EMAC1_USEFPGA				(0x04)
+#define SOCFPGA_PINMUX_EMAC2_USEFPGA				(0x08)
+#define SOCFPGA_PINMUX_I2C0_USEFPGA				(0x0C)
+#define SOCFPGA_PINMUX_I2C1_USEFPGA				(0x10)
+#define SOCFPGA_PINMUX_I2C_EMAC0_USEFPGA			(0x14)
+#define SOCFPGA_PINMUX_I2C_EMAC1_USEFPGA			(0x18)
+#define SOCFPGA_PINMUX_I2C_EMAC2_USEFPGA			(0x1C)
+#define SOCFPGA_PINMUX_NAND_USEFPGA				(0x20)
+#define SOCFPGA_PINMUX_SPIM0_USEFPGA				(0x28)
+#define SOCFPGA_PINMUX_SPIM1_USEFPGA				(0x2C)
+#define SOCFPGA_PINMUX_SPIS0_USEFPGA				(0x30)
+#define SOCFPGA_PINMUX_SPIS1_USEFPGA				(0x34)
+#define SOCFPGA_PINMUX_UART0_USEFPGA				(0x38)
+#define SOCFPGA_PINMUX_UART1_USEFPGA				(0x3C)
+#define SOCFPGA_PINMUX_MDIO0_USEFPGA				(0x40)
+#define SOCFPGA_PINMUX_MDIO1_USEFPGA				(0x44)
+#define SOCFPGA_PINMUX_MDIO2_USEFPGA				(0x48)
+#define SOCFPGA_PINMUX_JTAG_USEFPGA				(0x50)
+#define SOCFPGA_PINMUX_SDMMC_USEFPGA				(0x54)
+
+#define SOCFPGA_PINMUX_IO0DELAY					(0x00)
+#define SOCFPGA_PINMUX_IO1DELAY					(0x04)
+#define SOCFPGA_PINMUX_IO2DELAY					(0x08)
+#define SOCFPGA_PINMUX_IO3DELAY					(0x0C)
+#define SOCFPGA_PINMUX_IO4DELAY					(0x10)
+#define SOCFPGA_PINMUX_IO5DELAY					(0x14)
+#define SOCFPGA_PINMUX_IO6DELAY					(0x18)
+#define SOCFPGA_PINMUX_IO7DELAY					(0x1C)
+#define SOCFPGA_PINMUX_IO8DELAY					(0x20)
+#define SOCFPGA_PINMUX_IO9DELAY					(0x24)
+#define SOCFPGA_PINMUX_IO10DELAY				(0x28)
+#define SOCFPGA_PINMUX_IO11DELAY				(0x2C)
+#define SOCFPGA_PINMUX_IO12DELAY				(0x30)
+#define SOCFPGA_PINMUX_IO13DELAY				(0x34)
+#define SOCFPGA_PINMUX_IO14DELAY				(0x38)
+#define SOCFPGA_PINMUX_IO15DELAY				(0x3C)
+#define SOCFPGA_PINMUX_IO16DELAY				(0x40)
+#define SOCFPGA_PINMUX_IO17DELAY				(0x44)
+#define SOCFPGA_PINMUX_IO18DELAY				(0x48)
+#define SOCFPGA_PINMUX_IO19DELAY				(0x4C)
+#define SOCFPGA_PINMUX_IO20DELAY				(0x50)
+#define SOCFPGA_PINMUX_IO21DELAY				(0x54)
+#define SOCFPGA_PINMUX_IO22DELAY				(0x58)
+#define SOCFPGA_PINMUX_IO23DELAY				(0x5C)
+#define SOCFPGA_PINMUX_IO24DELAY				(0x60)
+#define SOCFPGA_PINMUX_IO25DELAY				(0x64)
+#define SOCFPGA_PINMUX_IO26DELAY				(0x68)
+#define SOCFPGA_PINMUX_IO27DELAY				(0x6C)
+#define SOCFPGA_PINMUX_IO28DELAY				(0x70)
+#define SOCFPGA_PINMUX_IO29DELAY				(0x74)
+#define SOCFPGA_PINMUX_IO30DELAY				(0x78)
+#define SOCFPGA_PINMUX_IO31DELAY				(0x7C)
+#define SOCFPGA_PINMUX_IO32DELAY				(0x80)
+#define SOCFPGA_PINMUX_IO33DELAY				(0x84)
+#define SOCFPGA_PINMUX_IO34DELAY				(0x88)
+#define SOCFPGA_PINMUX_IO35DELAY				(0x8C)
+#define SOCFPGA_PINMUX_IO36DELAY				(0x90)
+#define SOCFPGA_PINMUX_IO37DELAY				(0x94)
+#define SOCFPGA_PINMUX_IO38DELAY				(0x98)
+#define SOCFPGA_PINMUX_IO39DELAY				(0x9C)
+#define SOCFPGA_PINMUX_IO40DELAY				(0xA0)
+#define SOCFPGA_PINMUX_IO41DELAY				(0xA4)
+#define SOCFPGA_PINMUX_IO42DELAY				(0xA8)
+#define SOCFPGA_PINMUX_IO43DELAY				(0xAC)
+#define SOCFPGA_PINMUX_IO44DELAY				(0xB0)
+#define SOCFPGA_PINMUX_IO45DELAY				(0xB4)
+#define SOCFPGA_PINMUX_IO46DELAY				(0xB8)
+#define SOCFPGA_PINMUX_IO47DELAY				(0xBC)
+
+#define SOCFPGA_PINMUX_I3C0_USEFPGA				(0xC0)
+#define SOCFPGA_PINMUX_I3C1_USEFPGA				(0xC4)
+
+#define SOCFPGA_PINMUX(_reg)					(SOCFPGA_PINMUX_REG_BASE \
+								+ (SOCFPGA_PINMUX_##_reg))
+
+void config_pinmux(handoff *handoff);
+void config_peripheral(handoff *handoff);
+#endif
diff --git a/plat/intel/soc/agilex5/include/agilex5_power_manager.h b/plat/intel/soc/agilex5/include/agilex5_power_manager.h
new file mode 100644
index 0000000..1bba74b
--- /dev/null
+++ b/plat/intel/soc/agilex5/include/agilex5_power_manager.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef POWERMANAGER_H
+#define POWERMANAGER_H
+
+#include "socfpga_handoff.h"
+
+#define AGX5_PWRMGR_BASE					0x10d14000
+
+/* DSU */
+#define AGX5_PWRMGR_DSU_FWENCTL					0x0
+#define AGX5_PWRMGR_DSU_PGENCTL					0x4
+#define AGX5_PWRMGR_DSU_PGSTAT					0x8
+#define AGX5_PWRMGR_DSU_PWRCTLR					0xc
+#define AGX5_PWRMGR_DSU_PWRSTAT0				0x10
+#define AGX5_PWRMGR_DSU_PWRSTAT1				0x14
+
+/* DSU Macros*/
+#define AGX5_PWRMGR_DSU_FWEN(x)					((x) & 0xf)
+#define AGX5_PWRMGR_DSU_PGEN(x)					((x) & 0xf)
+#define AGX5_PWRMGR_DSU_PGEN_OUT(x)				((x) & 0xf)
+#define AGX5_PWRMGR_DSU_SINGLE_PACCEPT(x)			((x) & 0x1)
+#define AGX5_PWRMGR_DSU_SINGLE_PDENY(x)				(((x) & 0x1) << 1)
+#define AGX5_PWRMGR_DSU_SINGLE_FSM_STATE(x)			(((x) & 0xff) << 8)
+#define AGX5_PWRMGR_DSU_SINGLE_PCH_DONE(x)			(((x) & 0x1) << 31)
+#define AGX5_PWRMGR_DSU_MULTI_PACTIVE_IN(x)			((x) & 0xff)
+#define AGX5_PWRMGR_DSU_MULTI_PACCEPT(x)			(((x) & 0xff) << 8)
+#define AGX5_PWRMGR_DSU_MULTI_PDENY(x)				(((x) & 0xff) << 16)
+#define AGX5_PWRMGR_DSU_MULTI_PCH_DONE(x)			(((x) & 0x1) << 31)
+
+/* CPU */
+#define AGX5_PWRMGR_CPU_PWRCTLR0				0x18
+#define AGX5_PWRMGR_CPU_PWRCTLR1				0x20
+#define AGX5_PWRMGR_CPU_PWRCTLR2				0x28
+#define AGX5_PWRMGR_CPU_PWRCTLR3				0x30
+#define AGX5_PWRMGR_CPU_PWRSTAT0				0x1c
+#define AGX5_PWRMGR_CPU_PWRSTAT1				0x24
+#define AGX5_PWRMGR_CPU_PWRSTAT2				0x2c
+#define AGX5_PWRMGR_CPU_PWRSTAT3				0x34
+
+/* APS */
+#define AGX5_PWRMGR_APS_FWENCTL					0x38
+#define AGX5_PWRMGR_APS_PGENCTL					0x3C
+#define AGX5_PWRMGR_APS_PGSTAT					0x40
+
+/* PSS */
+#define AGX5_PWRMGR_PSS_FWENCTL					0x44
+#define AGX5_PWRMGR_PSS_PGENCTL					0x48
+#define AGX5_PWRMGR_PSS_PGSTAT					0x4c
+
+/* PSS Macros*/
+#define AGX5_PWRMGR_PSS_FWEN(x)					((x) & 0xff)
+#define AGX5_PWRMGR_PSS_PGEN(x)					((x) & 0xff)
+#define AGX5_PWRMGR_PSS_PGEN_OUT(x)				((x) & 0xff)
+
+/* MPU */
+#define AGX5_PWRMGR_MPU_PCHCTLR					0x50
+#define AGX5_PWRMGR_MPU_PCHSTAT					0x54
+#define AGX5_PWRMGR_MPU_BOOTCONFIG				0x58
+#define AGX5_PWRMGR_CPU_POWER_STATE_MASK			0x1E
+
+/* MPU Macros*/
+#define AGX5_PWRMGR_MPU_TRIGGER_PCH_DSU(x)			((x) & 0x1)
+#define AGX5_PWRMGR_MPU_TRIGGER_PCH_CPU(x)			(((x) & 0xf) << 1)
+#define AGX5_PWRMGR_MPU_STATUS_PCH_CPU(x)			(((x) & 0xf) << 1)
+
+/* Shared Macros */
+#define AGX5_PWRMGR(_reg)					(AGX5_PWRMGR_BASE + \
+								(AGX5_PWRMGR_##_reg))
+
+/* POWER MANAGER ERROR CODE */
+#define AGX5_PWRMGR_HANDOFF_PERIPHERAL				-1
+#define AGX5_PWRMGR_PSS_STAT_BUSY_E_BUSY			0x0
+#define AGX5_PWRMGR_PSS_STAT_BUSY(x)				(((x) & 0x000000FF) >> 0)
+
+int pss_sram_power_off(handoff *hoff_ptr);
+int wait_verify_fsm(uint16_t timeout, uint32_t peripheral_handoff);
+
+#endif
diff --git a/plat/intel/soc/agilex5/include/agilex5_system_manager.h b/plat/intel/soc/agilex5/include/agilex5_system_manager.h
new file mode 100644
index 0000000..9a58cdb
--- /dev/null
+++ b/plat/intel/soc/agilex5/include/agilex5_system_manager.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef AGX5_SOCFPGA_SYSTEMMANAGER_H
+#define AGX5_SOCFPGA_SYSTEMMANAGER_H
+
+#include "socfpga_plat_def.h"
+
+/* System Manager Register Map */
+#define SOCFPGA_SYSMGR_SILICONID_1					0x00
+#define SOCFPGA_SYSMGR_SILICONID_2					0x04
+#define SOCFPGA_SYSMGR_WDDBG						0x08
+#define SOCFPGA_SYSMGR_MPU_STATUS					0x10
+#define SOCFPGA_SYSMGR_SDMMC_L3_MASTER					0x2C
+#define SOCFPGA_SYSMGR_NAND_L3_MASTER					0x34
+#define SOCFPGA_SYSMGR_USB0_L3_MASTER					0x38
+#define SOCFPGA_SYSMGR_USB1_L3_MASTER					0x3C
+#define SOCFPGA_SYSMGR_TSN_GLOBAL					0x40
+#define SOCFPGA_SYSMGR_EMAC_0						0x44 /* TSN_0 */
+#define SOCFPGA_SYSMGR_EMAC_1						0x48 /* TSN_1 */
+#define SOCFPGA_SYSMGR_EMAC_2						0x4C /* TSN_2 */
+#define SOCFPGA_SYSMGR_TSN_0_ACE					0x50
+#define SOCFPGA_SYSMGR_TSN_1_ACE					0x54
+#define SOCFPGA_SYSMGR_TSN_2_ACE					0x58
+#define SOCFPGA_SYSMGR_FPGAINTF_EN_1					0x68
+#define SOCFPGA_SYSMGR_FPGAINTF_EN_2					0x6C
+#define SOCFPGA_SYSMGR_FPGAINTF_EN_3					0x70
+#define SOCFPGA_SYSMGR_DMAC0_L3_MASTER					0x74
+#define SOCFPGA_SYSMGR_ETR_L3_MASTER					0x78
+#define SOCFPGA_SYSMGR_DMAC1_L3_MASTER					0x7C
+#define SOCFPGA_SYSMGR_SEC_CTRL_SLT					0x80
+#define SOCFPGA_SYSMGR_OSC_TRIM						0x84
+#define SOCFPGA_SYSMGR_DMAC0_CTRL_STATUS_REG				0x88
+#define SOCFPGA_SYSMGR_DMAC1_CTRL_STATUS_REG				0x8C
+#define SOCFPGA_SYSMGR_ECC_INTMASK_VALUE				0x90
+#define SOCFPGA_SYSMGR_ECC_INTMASK_SET					0x94
+#define SOCFPGA_SYSMGR_ECC_INTMASK_CLR					0x98
+#define SOCFPGA_SYSMGR_ECC_INTMASK_SERR					0x9C
+#define SOCFPGA_SYSMGR_ECC_INTMASK_DERR					0xA0
+/* NOC configuration value */
+#define SOCFPGA_SYSMGR_NOC_TIMEOUT					0xC0
+#define SOCFPGA_SYSMGR_NOC_IDLEREQ_SET					0xC4
+#define SOCFPGA_SYSMGR_NOC_IDLEREQ_CLR					0xC8
+#define SOCFPGA_SYSMGR_NOC_IDLEREQ_VAL					0xCC
+#define SOCFPGA_SYSMGR_NOC_IDLEACK					0xD0
+#define SOCFPGA_SYSMGR_NOC_IDLESTATUS					0xD4
+#define SOCFPGA_SYSMGR_FPGA2SOC_CTRL					0xD8
+#define SOCFPGA_SYSMGR_FPGA_CFG						0xDC
+#define SOCFPGA_SYSMGR_GPO						0xE4
+#define SOCFPGA_SYSMGR_GPI						0xE8
+#define SOCFPGA_SYSMGR_MPU						0xF0
+#define SOCFPGA_SYSMGR_SDM_HPS_SPARE					0xF4
+#define SOCFPGA_SYSMGR_HPS_SDM_SPARE					0xF8
+#define SOCFPGA_SYSMGR_DFI_INTF						0xFC
+#define SOCFPGA_SYSMGR_NAND_DD_CTRL					0x100
+#define SOCFPGA_SYSMGR_NAND_PHY_CTRL_REG				0x104
+#define SOCFPGA_SYSMGR_NAND_PHY_TSEL_REG				0x108
+#define SOCFPGA_SYSMGR_NAND_DQ_TIMING_REG				0x10C
+#define SOCFPGA_SYSMGR_PHY_DQS_TIMING_REG				0x110
+#define SOCFPGA_SYSMGR_NAND_PHY_GATE_LPBK_CTRL_REG			0x114
+#define SOCFPGA_SYSMGR_NAND_PHY_DLL_MASTER_CTRL_REG			0x118
+#define SOCFPGA_SYSMGR_NAND_PHY_DLL_SLAVE_CTRL_REG			0x11C
+#define SOCFPGA_SYSMGR_NAND_DD_DEFAULT_SETTING_REG0			0x120
+#define SOCFPGA_SYSMGR_NAND_DD_DEFAULT_SETTING_REG1			0x124
+#define SOCFPGA_SYSMGR_NAND_DD_STATUS_REG				0x128
+#define SOCFPGA_SYSMGR_NAND_DD_ID_LOW_REG				0x12C
+#define SOCFPGA_SYSMGR_NAND_DD_ID_HIGH_REG				0x130
+#define SOCFPGA_SYSMGR_NAND_WRITE_PROT_EN_REG				0x134
+#define SOCFPGA_SYSMGR_SDMMC_CMD_QUEUE_SETTING_REG			0x138
+#define SOCFPGA_SYSMGR_I3C_SLV_PID_LOW					0x13C
+#define SOCFPGA_SYSMGR_I3C_SLV_PID_HIGH					0x140
+#define SOCFPGA_SYSMGR_I3C_SLV_CTRL_0					0x144
+#define SOCFPGA_SYSMGR_I3C_SLV_CTRL_1					0x148
+#define SOCFPGA_SYSMGR_F2S_BRIDGE_CTRL					0x14C
+#define SOCFPGA_SYSMGR_DMA_TBU_STASH_CTRL_REG_0_DMA0			0x150
+#define SOCFPGA_SYSMGR_DMA_TBU_STASH_CTRL_REG_0_DMA1			0x154
+#define SOCFPGA_SYSMGR_SDM_TBU_STASH_CTRL_REG_1_SDM			0x158
+#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_USB2			0x15C
+#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_USB3			0x160
+#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_SDMMC			0x164
+#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_NAND			0x168
+#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_ETR			0x16C
+#define SOCFPGA_SYSMGR_TSN_TBU_STASH_CTRL_REG_3_TSN0			0x170
+#define SOCFPGA_SYSMGR_TSN_TBU_STASH_CTRL_REG_3_TSN1			0x174
+#define SOCFPGA_SYSMGR_TSN_TBU_STASH_CTRL_REG_3_TSN2			0x178
+#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_CTRL_REG_0_DMA0			0x17C
+#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_CTRL_REG_0_DMA1			0x180
+#define SOCFPGA_SYSMGR_SDM_TBU_STREAM_CTRL_REG_1_SDM			0x184
+#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_USB2			0x188
+#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_USB3			0x18C
+#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_SDMMC			0x190
+#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_NAND			0x194
+#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_ETR			0x198
+#define SOCFPGA_SYSMGR_TSN_TBU_STREAM_CTRL_REG_3_TSN0			0x19C
+#define SOCFPGA_SYSMGR_TSN_TBU_STREAM_CTRL_REG_3_TSN1			0x1A0
+#define SOCFPGA_SYSMGR_TSN_TBU_STREAM_CTRL_REG_3_TSN2			0x1A4
+#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_ID_AX_REG_0_DMA0			0x1A8
+#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_ID_AX_REG_0_DMA1			0x1AC
+#define SOCFPGA_SYSMGR_SDM_TBU_STREAM_ID_AX_REG_1_SDM			0x1B0
+#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_USB2			0x1B4
+#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_USB3			0x1B8
+#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_SDMMC			0x1BC
+#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_NAND			0x1C0
+#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_ETR			0x1C4
+#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_TSN0			0x1C8
+#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_TSN1			0x1CC
+#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_TSN2			0x1D0
+#define SOCFPGA_SYSMGR_USB3_MISC_CTRL_REG0				0x1F0
+#define SOCFPGA_SYSMGR_USB3_MISC_CTRL_REG1				0x1F4
+
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_0				0x200
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_1				0x204
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_2				0x208
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_3				0x20C
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_4				0x210
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_5				0x214
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_6				0x218
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_7				0x21C
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_8				0x220
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_9				0x224
+#define SOCFPGA_SYSMGR_MPFE_CONFIG					0x228
+#define SOCFPGA_SYSMGR_MPFE_status					0x22C
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_0				0x230
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_1				0x234
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_2				0x238
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_3				0x23C
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_4				0x240
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_5				0x244
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_6				0x248
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_7				0x24C
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_8				0x250
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_9				0x254
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_0				0x258
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_1				0x25C
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_2				0x260
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_3				0x264
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_4				0x268
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_5				0x26C
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_6				0x270
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_7				0x274
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_8				0x278
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_9				0x27C
+
+#define DMA0_STREAM_CTRL_REG						0x10D1217C
+#define DMA1_STREAM_CTRL_REG						0x10D12180
+#define SDM_STREAM_CTRL_REG						0x10D12184
+#define USB2_STREAM_CTRL_REG						0x10D12188
+#define USB3_STREAM_CTRL_REG						0x10D1218C
+#define SDMMC_STREAM_CTRL_REG						0x10D12190
+#define NAND_STREAM_CTRL_REG						0x10D12194
+#define ETR_STREAM_CTRL_REG						0x10D12198
+#define TSN0_STREAM_CTRL_REG						0x10D1219C
+#define TSN1_STREAM_CTRL_REG						0x10D121A0
+#define TSN2_STREAM_CTRL_REG						0x10D121A4
+
+/* Stream ID configuration value for Agilex5 */
+#define TSN0								0x00010001
+#define TSN1								0x00020002
+#define TSN2								0x00030003
+#define NAND								0x00040004
+#define SDMMC								0x00050005
+#define USB0								0x00060006
+#define USB1								0x00070007
+#define DMA0								0x00080008
+#define DMA1								0x00090009
+#define SDM								0x000A000A
+#define CORE_SIGHT_DEBUG						0x000B000B
+
+/* Field Masking */
+#define SYSMGR_SDMMC_DRVSEL(x)						(((x) & 0x7) << 0)
+#define SYSMGR_SDMMC_SMPLSEL(x)						(((x) & 0x7) << 4)
+
+#define SYSMGR_F2S_BRIDGE_CTRL_EN					BIT(0)
+#define IDLE_DATA_LWSOC2FPGA						BIT(4)
+#define IDLE_DATA_SOC2FPGA						BIT(0)
+#define IDLE_DATA_MASK							(IDLE_DATA_LWSOC2FPGA \
+									| IDLE_DATA_SOC2FPGA)
+#define SYSMGR_ECC_OCRAM_MASK						BIT(1)
+#define SYSMGR_ECC_DDR0_MASK						BIT(16)
+#define SYSMGR_ECC_DDR1_MASK						BIT(17)
+
+#define WSTREAMIDEN_REG_CTRL						BIT(0)
+#define RSTREAMIDEN_REG_CTRL						BIT(1)
+#define WMMUSECSID_REG_VAL						BIT(4)
+#define RMMUSECSID_REG_VAL						BIT(5)
+
+/* Macros */
+#define SOCFPGA_SYSMGR(_reg)						(SOCFPGA_SYSMGR_REG_BASE \
+									+ (SOCFPGA_SYSMGR_##_reg))
+
+#define ENABLE_STREAMID							WSTREAMIDEN_REG_CTRL \
+									| RSTREAMIDEN_REG_CTRL
+#define ENABLE_STREAMID_SECURE_TX					WSTREAMIDEN_REG_CTRL \
+									| RSTREAMIDEN_REG_CTRL \
+									| WMMUSECSID_REG_VAL \
+									| RMMUSECSID_REG_VAL
+
+#endif /* AGX5_SOCFPGA_SYSTEMMANAGER_H */
diff --git a/plat/intel/soc/agilex5/include/socfpga_plat_def.h b/plat/intel/soc/agilex5/include/socfpga_plat_def.h
new file mode 100644
index 0000000..8a49d61
--- /dev/null
+++ b/plat/intel/soc/agilex5/include/socfpga_plat_def.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLAT_SOCFPGA_DEF_H
+#define PLAT_SOCFPGA_DEF_H
+
+#include "agilex5_memory_controller.h"
+#include "agilex5_system_manager.h"
+#include <platform_def.h>
+
+/* Platform Setting */
+#define PLATFORM_MODEL						PLAT_SOCFPGA_AGILEX5
+#define BOOT_SOURCE						BOOT_SOURCE_SDMMC
+#define MMC_DEVICE_TYPE						1  /* MMC = 0, SD = 1 */
+#define XLAT_TABLES_V2						U(1)
+#define PLAT_PRIMARY_CPU_A55					0x000
+#define PLAT_PRIMARY_CPU_A76					0x200
+#define PLAT_CLUSTER_ID_MPIDR_AFF_SHIFT				MPIDR_AFF2_SHIFT
+#define PLAT_CPU_ID_MPIDR_AFF_SHIFT				MPIDR_AFF1_SHIFT
+#define PLAT_L2_RESET_REQ			0xB007C0DE
+
+/* System Counter */ /* TODO: Update back to 400MHz */
+#define PLAT_SYS_COUNTER_FREQ_IN_TICKS				(80000000)
+#define PLAT_SYS_COUNTER_FREQ_IN_MHZ				(80)
+
+/* FPGA config helpers */
+#define INTEL_SIP_SMC_FPGA_CONFIG_ADDR				0x400000
+#define INTEL_SIP_SMC_FPGA_CONFIG_SIZE				0x2000000
+
+/* QSPI Setting */
+#define CAD_QSPIDATA_OFST					0x10900000
+#define CAD_QSPI_OFFSET						0x108d2000
+
+/* Register Mapping */
+#define SOCFPGA_CCU_NOC_REG_BASE				0x1c000000
+#define SOCFPGA_F2SDRAMMGR_REG_BASE				0x18001000
+
+#define SOCFPGA_MMC_REG_BASE					0x10808000
+#define SOCFPGA_MEMCTRL_REG_BASE				0x108CC000
+#define SOCFPGA_RSTMGR_REG_BASE					0x10d11000
+#define SOCFPGA_SYSMGR_REG_BASE					0x10d12000
+#define SOCFPGA_PINMUX_REG_BASE					0x10d13000
+#define SOCFPGA_NAND_REG_BASE					0x10B80000
+
+#define SOCFPGA_L4_PER_SCR_REG_BASE				0x10d21000
+#define SOCFPGA_L4_SYS_SCR_REG_BASE				0x10d21100
+#define SOCFPGA_SOC2FPGA_SCR_REG_BASE				0x10d21200
+#define SOCFPGA_LWSOC2FPGA_SCR_REG_BASE				0x10d21300
+
+/* Define maximum page size for NAND flash devices */
+#define PLATFORM_MTD_MAX_PAGE_SIZE				U(0x1000)
+
+/*******************************************************************************
+ * Platform memory map related constants
+ ******************************************************************************/
+#define DRAM_BASE						(0x80000000)
+#define DRAM_SIZE						(0x80000000)
+
+#define OCRAM_BASE						(0x00000000)
+#define OCRAM_SIZE						(0x00080000)
+
+#define MEM64_BASE						(0x0080000000)
+#define MEM64_SIZE						(0x0080000000)
+
+//128MB PSS
+#define PSS_BASE						(0x10000000)
+#define PSS_SIZE						(0x08000000)
+
+//64MB MPFE
+#define MPFE_BASE						(0x18000000)
+#define MPFE_SIZE						(0x04000000)
+
+//16MB CCU
+#define CCU_BASE						(0x1C000000)
+#define CCU_SIZE						(0x01000000)
+
+//1MB GIC
+#define GIC_BASE						(0x1D000000)
+#define GIC_SIZE						(0x00100000)
+
+#define BL2_BASE						(0x00000000)
+#define BL2_LIMIT						(0x0001b000)
+
+#define BL31_BASE						(0x80000000)
+#define BL31_LIMIT						(0x82000000)
+
+/*******************************************************************************
+ * UART related constants
+ ******************************************************************************/
+#define PLAT_UART0_BASE						(0x10C02000)
+#define PLAT_UART1_BASE						(0x10C02100)
+
+/*******************************************************************************
+ * GIC related constants
+ ******************************************************************************/
+#define PLAT_GIC_BASE						(0x1D000000)
+#define PLAT_GICC_BASE						(PLAT_GIC_BASE + 0x20000)
+#define PLAT_GICD_BASE						(PLAT_GIC_BASE + 0x00000)
+#define PLAT_GICR_BASE						(PLAT_GIC_BASE + 0x60000)
+
+#define PLAT_INTEL_SOCFPGA_GICR_BASE				PLAT_GICR_BASE
+
+/*******************************************************************************
+ * SDMMC related pointer function
+ ******************************************************************************/
+#define SDMMC_READ_BLOCKS	sdmmc_read_blocks
+#define SDMMC_WRITE_BLOCKS	sdmmc_write_blocks
+
+/*******************************************************************************
+ * sysmgr.boot_scratch_cold6 & 7 (64bit) are used to indicate L2 reset
+ * is done and HPS should trigger warm reset via RMR_EL3.
+ ******************************************************************************/
+#define L2_RESET_DONE_REG			0x10D12218
+
+#endif /* PLAT_SOCFPGA_DEF_H */
diff --git a/plat/intel/soc/agilex5/platform.mk b/plat/intel/soc/agilex5/platform.mk
new file mode 100644
index 0000000..779c629
--- /dev/null
+++ b/plat/intel/soc/agilex5/platform.mk
@@ -0,0 +1,107 @@
+#
+# Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+include lib/xlat_tables_v2/xlat_tables.mk
+PLAT_INCLUDES		:=	\
+			-Iplat/intel/soc/agilex5/include/		\
+			-Iplat/intel/soc/common/drivers/		\
+			-Iplat/intel/soc/common/include/
+
+# GIC-600 configuration
+GICV3_SUPPORT_GIC600	:=	1
+# Include GICv3 driver files
+include drivers/arm/gic/v3/gicv3.mk
+AGX5_GICv3_SOURCES	:=	\
+			${GICV3_SOURCES}				\
+			plat/common/plat_gicv3.c
+
+PLAT_BL_COMMON_SOURCES	:=	\
+			${AGX5_GICv3_SOURCES}				\
+			drivers/cadence/combo_phy/cdns_combo_phy.c	\
+			drivers/cadence/emmc/cdns_sdmmc.c	\
+			drivers/cadence/nand/cdns_nand.c	\
+			drivers/delay_timer/delay_timer.c		\
+			drivers/delay_timer/generic_delay_timer.c	\
+			drivers/ti/uart/aarch64/16550_console.S		\
+			plat/intel/soc/common/aarch64/platform_common.c	\
+			plat/intel/soc/common/aarch64/plat_helpers.S	\
+			plat/intel/soc/common/drivers/ccu/ncore_ccu.c	\
+			plat/intel/soc/common/drivers/combophy/combophy.c			\
+			plat/intel/soc/common/drivers/sdmmc/sdmmc.c			\
+			plat/intel/soc/common/drivers/ddr/ddr.c			\
+			plat/intel/soc/common/drivers/nand/nand.c			\
+			plat/intel/soc/common/socfpga_delay_timer.c
+
+BL2_SOURCES		+=	\
+		common/desc_image_load.c				\
+		lib/xlat_tables_v2/aarch64/enable_mmu.S	\
+		lib/xlat_tables_v2/xlat_tables_context.c \
+		lib/xlat_tables_v2/xlat_tables_core.c \
+		lib/xlat_tables_v2/aarch64/xlat_tables_arch.c \
+		lib/xlat_tables_v2/xlat_tables_utils.c \
+		drivers/mmc/mmc.c					\
+		drivers/intel/soc/stratix10/io/s10_memmap_qspi.c	\
+		drivers/io/io_storage.c					\
+		drivers/io/io_block.c					\
+		drivers/io/io_fip.c					\
+		drivers/io/io_mtd.c					\
+		drivers/partition/partition.c				\
+		drivers/partition/gpt.c					\
+		drivers/synopsys/emmc/dw_mmc.c				\
+		lib/cpus/aarch64/cortex_a55.S				\
+		lib/cpus/aarch64/cortex_a76.S				\
+		plat/intel/soc/agilex5/soc/agilex5_clock_manager.c	\
+		plat/intel/soc/agilex5/soc/agilex5_memory_controller.c	\
+		plat/intel/soc/agilex5/soc/agilex5_mmc.c			\
+		plat/intel/soc/agilex5/soc/agilex5_pinmux.c		\
+		plat/intel/soc/agilex5/soc/agilex5_power_manager.c	\
+		plat/intel/soc/common/bl2_plat_mem_params_desc.c	\
+		plat/intel/soc/common/socfpga_image_load.c		\
+		plat/intel/soc/common/socfpga_storage.c			\
+		plat/intel/soc/common/socfpga_vab.c				\
+		plat/intel/soc/common/soc/socfpga_emac.c		\
+		plat/intel/soc/common/soc/socfpga_firewall.c		\
+		plat/intel/soc/common/soc/socfpga_handoff.c		\
+		plat/intel/soc/common/soc/socfpga_mailbox.c		\
+		plat/intel/soc/common/soc/socfpga_reset_manager.c	\
+		plat/intel/soc/common/drivers/qspi/cadence_qspi.c	\
+		plat/intel/soc/agilex5/bl2_plat_setup.c			\
+		plat/intel/soc/common/drivers/wdt/watchdog.c
+
+include lib/zlib/zlib.mk
+PLAT_INCLUDES	+=	-Ilib/zlib
+BL2_SOURCES	+=	$(ZLIB_SOURCES)
+
+BL31_SOURCES	+=	\
+		drivers/arm/cci/cci.c					\
+		${XLAT_TABLES_LIB_SRCS}						\
+		lib/cpus/aarch64/aem_generic.S				\
+		lib/cpus/aarch64/cortex_a55.S				\
+		lib/cpus/aarch64/cortex_a76.S				\
+		plat/common/plat_psci_common.c				\
+		plat/intel/soc/agilex5/bl31_plat_setup.c		\
+		plat/intel/soc/agilex5/soc/agilex5_power_manager.c	\
+		plat/intel/soc/common/socfpga_psci.c			\
+		plat/intel/soc/common/socfpga_sip_svc.c			\
+		plat/intel/soc/common/socfpga_sip_svc_v2.c			\
+		plat/intel/soc/common/socfpga_topology.c		\
+		plat/intel/soc/common/sip/socfpga_sip_ecc.c		\
+		plat/intel/soc/common/sip/socfpga_sip_fcs.c		\
+		plat/intel/soc/common/soc/socfpga_mailbox.c		\
+		plat/intel/soc/common/soc/socfpga_reset_manager.c
+
+# Configs for A76 and A55
+HW_ASSISTED_COHERENCY := 1
+USE_COHERENT_MEM := 0
+CTX_INCLUDE_AARCH32_REGS := 0
+ERRATA_A55_1530923 := 1
+
+$(eval $(call add_define,ARM_PRELOADED_DTB_BASE))
+
+PROGRAMMABLE_RESET_ADDRESS	:= 0
+RESET_TO_BL2			:= 1
+BL2_INV_DCACHE			:= 0
+MULTI_CONSOLE_API		:= 1
diff --git a/plat/intel/soc/agilex5/soc/agilex5_clock_manager.c b/plat/intel/soc/agilex5/soc/agilex5_clock_manager.c
new file mode 100644
index 0000000..522bf5d
--- /dev/null
+++ b/plat/intel/soc/agilex5/soc/agilex5_clock_manager.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+
+#include "agilex5_clock_manager.h"
+#include "agilex5_system_manager.h"
+#include "socfpga_handoff.h"
+
+uint32_t wait_pll_lock(void)
+{
+	uint32_t data;
+	uint32_t count = 0;
+
+	do {
+		data = mmio_read_32(CLKMGR_OFFSET + CLKMGR_STAT);
+		count++;
+		if (count >= 1000)
+			return -ETIMEDOUT;
+
+	} while ((CLKMGR_STAT_MAINPLLLOCKED(data) == 0) ||
+			(CLKMGR_STAT_PERPLLLOCKED(data) == 0));
+	return 0;
+}
+
+uint32_t wait_fsm(void)
+{
+	uint32_t data;
+	uint32_t count = 0;
+
+	do {
+		data = mmio_read_32(CLKMGR_OFFSET + CLKMGR_STAT);
+		count++;
+		if (count >= 1000)
+			return -ETIMEDOUT;
+
+	} while (CLKMGR_STAT_BUSY(data) == CLKMGR_STAT_BUSY_E_BUSY);
+
+	return 0;
+}
+
+uint32_t pll_source_sync_config(uint32_t pll_mem_offset, uint32_t data)
+{
+	uint32_t val = 0;
+	uint32_t count = 0;
+	uint32_t req_status = 0;
+
+	val = (CLKMGR_MEM_WR | CLKMGR_MEM_REQ |
+		(data << CLKMGR_MEM_WDAT_OFFSET) | CLKMGR_MEM_ADDR);
+	mmio_write_32(pll_mem_offset, val);
+
+	do {
+		req_status = mmio_read_32(pll_mem_offset);
+		count++;
+	} while ((req_status & CLKMGR_MEM_REQ) && (count < 10));
+
+	if (count >= 10)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+uint32_t pll_source_sync_read(uint32_t pll_mem_offset)
+{
+	uint32_t val = 0;
+	uint32_t rdata = 0;
+	uint32_t count = 0;
+	uint32_t req_status = 0;
+
+	val = (CLKMGR_MEM_REQ | CLKMGR_MEM_ADDR);
+	mmio_write_32(pll_mem_offset, val);
+
+	do {
+		req_status = mmio_read_32(pll_mem_offset);
+		count++;
+	} while ((req_status & CLKMGR_MEM_REQ) && (count < 10));
+
+	if (count >= 10)
+		return -ETIMEDOUT;
+
+	rdata = mmio_read_32(pll_mem_offset + 0x4);
+	INFO("rdata (%x) = %x\n", pll_mem_offset + 0x4, rdata);
+
+	return rdata;
+}
+
+void config_clkmgr_handoff(handoff *hoff_ptr)
+{
+	/* Take both PLL out of reset and power up */
+
+	mmio_setbits_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB,
+			CLKMGR_PLLGLOB_PD_SET_MSK |
+			CLKMGR_PLLGLOB_RST_SET_MSK);
+	mmio_setbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB,
+			CLKMGR_PLLGLOB_PD_SET_MSK |
+			CLKMGR_PLLGLOB_RST_SET_MSK);
+
+	/* PLL lock */
+	wait_pll_lock();
+
+	/* Bypass all mainpllgrp's clocks to input clock ref */
+	mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_BYPASSS, 0xff);
+	/* Bypass all perpllgrp's clocks to input clock ref */
+	mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_BYPASS, 0xff);
+
+	/* Pass clock source frequency into scratch register */
+	mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_1),
+		hoff_ptr->hps_osc_clk_hz);
+	mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_2),
+		hoff_ptr->fpga_clk_hz);
+
+	/* Take all PLLs out of bypass */
+	mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_BYPASS, 0);
+	wait_fsm();
+	mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_BYPASS, 0);
+	wait_fsm();
+
+	/* Enable mainpllgrp's software-managed clock */
+	mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_EN,
+			CLKMGR_MAINPLL_EN_RESET);
+	mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_EN,
+			CLKMGR_PERPLL_EN_RESET);
+}
+
+/* Extract reference clock from platform clock source */
+uint32_t get_ref_clk(uint32_t pllglob)
+{
+	uint32_t arefclkdiv, ref_clk;
+	uint32_t scr_reg;
+
+	switch (CLKMGR_PSRC(pllglob)) {
+	case CLKMGR_PLLGLOB_PSRC_EOSC1:
+		scr_reg = SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_1);
+		ref_clk = mmio_read_32(scr_reg);
+		break;
+	case CLKMGR_PLLGLOB_PSRC_INTOSC:
+		ref_clk = CLKMGR_INTOSC_HZ;
+		break;
+	case CLKMGR_PLLGLOB_PSRC_F2S:
+		scr_reg = SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_2);
+		ref_clk = mmio_read_32(scr_reg);
+		break;
+	default:
+		ref_clk = 0;
+		assert(0);
+		break;
+	}
+
+	arefclkdiv = CLKMGR_PLLGLOB_AREFCLKDIV(pllglob);
+	ref_clk /= arefclkdiv;
+
+	return ref_clk;
+}
+
+/* Calculate clock frequency based on parameter */
+uint32_t get_clk_freq(uint32_t psrc_reg, uint32_t main_pllc, uint32_t per_pllc)
+{
+	uint32_t ref_clk = 0;
+
+	uint32_t clk_psrc, mdiv;
+	uint32_t pllm_reg, pllc_reg, pllc_div, pllglob_reg;
+
+
+	clk_psrc = mmio_read_32(CLKMGR_MAINPLL + psrc_reg);
+	clk_psrc = 0;
+
+	switch (clk_psrc) {
+	case 0:
+		pllm_reg = CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLM;
+		pllc_reg = CLKMGR_MAINPLL + main_pllc;
+		pllglob_reg = CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB;
+		break;
+	case CLKMGR_PSRC_PER:
+		pllm_reg = CLKMGR_PERPLL + CLKMGR_PERPLL_PLLM;
+		pllc_reg = CLKMGR_PERPLL + per_pllc;
+		pllglob_reg = CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB;
+		break;
+	default:
+		return 0;
+	}
+	pllm_reg = CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLM;
+			pllc_reg = CLKMGR_MAINPLL + main_pllc;
+			pllglob_reg = CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB;
+
+	ref_clk = get_ref_clk(mmio_read_32(pllglob_reg));
+	mdiv = CLKMGR_PLLM_MDIV(mmio_read_32(pllm_reg));
+	ref_clk *= mdiv;
+
+	pllc_div = mmio_read_32(pllc_reg) & 0x7ff;
+	NOTICE("return = %d Hz\n", (ref_clk / pllc_div));
+
+	ref_clk = 200000000;
+	return (uint32_t) ref_clk;
+
+}
+
+/* Return L3 interconnect clock */
+uint32_t get_l3_clk(void)
+{
+	uint32_t l3_clk;
+
+	l3_clk = get_clk_freq(CLKMGR_MAINPLL_NOCCLK, CLKMGR_MAINPLL_PLLC1,
+				CLKMGR_PERPLL_PLLC1);
+	return l3_clk;
+}
+
+/* Calculate clock frequency to be used for watchdog timer */
+uint32_t get_wdt_clk(void)
+{
+	uint32_t l3_clk, l4_sys_clk;
+
+	l3_clk = get_l3_clk();
+	l4_sys_clk = l3_clk / 4;
+
+	return l4_sys_clk;
+}
+
+/* Calculate clock frequency to be used for UART driver */
+uint32_t get_uart_clk(void)
+{
+	uint32_t data32, l3_clk, l4_sp_clk;
+
+	l3_clk = get_l3_clk();
+
+	data32 = mmio_read_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_NOCDIV);
+	data32 = (data32 >> 16) & 0x3;
+
+	l4_sp_clk = l3_clk >> data32;
+
+	return l4_sp_clk;
+}
+
+/* Calculate clock frequency to be used for SDMMC driver */
+uint32_t get_mmc_clk(void)
+{
+	uint32_t mmc_clk;
+
+	//TODO: To update when handoff data is ready
+	//uint32_t data32;
+
+	//mmc_clk = get_clk_freq(CLKMGR_ALTERA_SDMMCCTR, CLKMGR_MAINPLL_PLLC3, CLKMGR_PERPLL_PLLC3);
+
+	//data32 = mmio_read_32(CLKMGR_ALTERA + CLKMGR_ALTERA_SDMMCCTR);
+	//data32 = (data32 & 0x7ff) + 1;
+	//mmc_clk = (mmc_clk / data32) / 4;
+
+
+	mmc_clk = get_clk_freq(CLKMGR_MAINPLL_NOCCLK, CLKMGR_MAINPLL_PLLC3,
+				CLKMGR_PERPLL_PLLC3);
+
+	// TODO: To update when handoff data is ready
+	NOTICE("mmc_clk = %d Hz\n", mmc_clk);
+
+	return mmc_clk;
+}
diff --git a/plat/intel/soc/agilex5/soc/agilex5_memory_controller.c b/plat/intel/soc/agilex5/soc/agilex5_memory_controller.c
new file mode 100644
index 0000000..0ddff4a
--- /dev/null
+++ b/plat/intel/soc/agilex5/soc/agilex5_memory_controller.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <errno.h>
+
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <lib/utils.h>
+
+#include "agilex5_memory_controller.h"
+#include <platform_def.h>
+
+#define ALT_CCU_NOC_DI_SET_MSK		0x10
+
+#define DDR_READ_LATENCY_DELAY		40
+#define MAX_MEM_CAL_RETRY		3
+#define PRE_CALIBRATION_DELAY		1
+#define POST_CALIBRATION_DELAY		1
+#define TIMEOUT_EMIF_CALIBRATION	1000
+#define CLEAR_EMIF_DELAY		1000
+#define CLEAR_EMIF_TIMEOUT		1000
+
+#define DDR_CONFIG(A, B, C, R)	(((A) << 24) | ((B) << 16) | ((C) << 8) | (R))
+#define DDR_CONFIG_ELEMENTS	(ARRAY_SIZE(ddr_config))
+
+/* tWR = Min. 15ns constant, see JEDEC standard eg. DDR4 is JESD79-4.pdf */
+#define tWR_IN_NS 15
+
+void configure_hmc_adaptor_regs(void);
+void configure_ddr_sched_ctrl_regs(void);
+
+/* The followring are the supported configurations */
+uint32_t ddr_config[] = {
+	/* DDR_CONFIG(Address order,Bank,Column,Row) */
+	/* List for DDR3 or LPDDR3 (pinout order > chip, row, bank, column) */
+	DDR_CONFIG(0, 3, 10, 12),
+	DDR_CONFIG(0, 3,  9, 13),
+	DDR_CONFIG(0, 3, 10, 13),
+	DDR_CONFIG(0, 3,  9, 14),
+	DDR_CONFIG(0, 3, 10, 14),
+	DDR_CONFIG(0, 3, 10, 15),
+	DDR_CONFIG(0, 3, 11, 14),
+	DDR_CONFIG(0, 3, 11, 15),
+	DDR_CONFIG(0, 3, 10, 16),
+	DDR_CONFIG(0, 3, 11, 16),
+	DDR_CONFIG(0, 3, 12, 15),	/* 0xa */
+	/* List for DDR4 only (pinout order > chip, bank, row, column) */
+	DDR_CONFIG(1, 3, 10, 14),
+	DDR_CONFIG(1, 4, 10, 14),
+	DDR_CONFIG(1, 3, 10, 15),
+	DDR_CONFIG(1, 4, 10, 15),
+	DDR_CONFIG(1, 3, 10, 16),
+	DDR_CONFIG(1, 4, 10, 16),
+	DDR_CONFIG(1, 3, 10, 17),
+	DDR_CONFIG(1, 4, 10, 17),
+};
+
+static int match_ddr_conf(uint32_t ddr_conf)
+{
+	int i;
+
+	for (i = 0; i < DDR_CONFIG_ELEMENTS; i++) {
+		if (ddr_conf == ddr_config[i])
+			return i;
+	}
+	return 0;
+}
+
+static int check_hmc_clk(void)
+{
+	unsigned long timeout = 0;
+	uint32_t hmc_clk;
+
+	do {
+		hmc_clk = mmio_read_32(AGX_SYSMGR_CORE_HMC_CLK);
+		if (hmc_clk & AGX_SYSMGR_CORE_HMC_CLK_STATUS)
+			break;
+		udelay(1);
+	} while (++timeout < 1000);
+	if (timeout >= 1000)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int clear_emif(void)
+{
+	uint32_t data;
+	unsigned long timeout;
+
+	mmio_write_32(AGX_MPFE_HMC_ADP_RSTHANDSHAKECTRL, 0);
+
+	timeout = 0;
+	do {
+		data = mmio_read_32(AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT);
+		if ((data & AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT_SEQ2CORE) == 0)
+			break;
+		udelay(CLEAR_EMIF_DELAY);
+	} while (++timeout < CLEAR_EMIF_TIMEOUT);
+	if (timeout >= CLEAR_EMIF_TIMEOUT)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int mem_calibration(void)
+{
+	int status;
+	uint32_t data;
+	unsigned long timeout;
+	unsigned long retry = 0;
+
+	udelay(PRE_CALIBRATION_DELAY);
+
+	do {
+		if (retry != 0)
+			INFO("DDR: Retrying DRAM calibration\n");
+
+		timeout = 0;
+		do {
+			data = mmio_read_32(AGX_MPFE_HMC_ADP_DDRCALSTAT);
+			if (AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 1)
+				break;
+			udelay(500);
+		} while (++timeout < TIMEOUT_EMIF_CALIBRATION);
+
+		if (AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 0) {
+			status = clear_emif();
+			if (status)
+				ERROR("Failed to clear Emif\n");
+		} else {
+			break;
+		}
+	} while (++retry < MAX_MEM_CAL_RETRY);
+
+	if (AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 0) {
+		ERROR("DDR: DRAM calibration failed.\n");
+		status = -EIO;
+	} else {
+		INFO("DDR: DRAM calibration success.\n");
+		status = 0;
+	}
+
+	udelay(POST_CALIBRATION_DELAY);
+
+	return status;
+}
+
+int init_hard_memory_controller(void)
+{
+	int status;
+
+	status = check_hmc_clk();
+	if (status) {
+		ERROR("DDR: Error, HMC clock not running\n");
+		return status;
+	}
+
+	status = mem_calibration();
+	if (status) {
+		ERROR("DDR: Memory Calibration Failed\n");
+		return status;
+	}
+
+	configure_hmc_adaptor_regs();
+
+	return 0;
+}
+
+void configure_ddr_sched_ctrl_regs(void)
+{
+	uint32_t data, dram_addr_order, ddr_conf, bank, row, col,
+		rd_to_miss, wr_to_miss, burst_len, burst_len_ddr_clk,
+		burst_len_sched_clk, act_to_act, rd_to_wr, wr_to_rd, bw_ratio,
+		t_rtp, t_rp, t_rcd, rd_latency, tw_rin_clk_cycles,
+		bw_ratio_extended, auto_precharge = 0, act_to_act_bank, faw,
+		faw_bank, bus_rd_to_rd, bus_rd_to_wr, bus_wr_to_rd;
+
+	INFO("Init HPS NOC's DDR Scheduler.\n");
+
+	data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG1);
+	dram_addr_order = AGX_MPFE_IOHMC_CTRLCFG1_CFG_ADDR_ORDER(data);
+
+	data = mmio_read_32(AGX_MPFE_IOHMC_DRAMADDRW);
+
+	col  = IOHMC_DRAMADDRW_COL_ADDR_WIDTH(data);
+	row  = IOHMC_DRAMADDRW_ROW_ADDR_WIDTH(data);
+	bank = IOHMC_DRAMADDRW_BANK_ADDR_WIDTH(data) +
+		IOHMC_DRAMADDRW_BANK_GRP_ADDR_WIDTH(data);
+
+	ddr_conf = match_ddr_conf(DDR_CONFIG(dram_addr_order, bank, col, row));
+
+	if (ddr_conf) {
+		mmio_clrsetbits_32(
+			AGX_MPFE_DDR_MAIN_SCHED_DDRCONF,
+			AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET_MSK,
+			AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET(ddr_conf));
+	} else {
+		ERROR("DDR: Cannot find predefined ddrConf configuration.\n");
+	}
+
+	mmio_write_32(AGX_MPFE_HMC_ADP(ADP_DRAMADDRWIDTH), data);
+
+	data = mmio_read_32(AGX_MPFE_IOHMC_DRAMTIMING0);
+	rd_latency = AGX_MPFE_IOHMC_REG_DRAMTIMING0_CFG_TCL(data);
+
+	data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING0);
+	act_to_act = ACT_TO_ACT(data);
+	t_rcd = ACT_TO_RDWR(data);
+	act_to_act_bank = ACT_TO_ACT_DIFF_BANK(data);
+
+	data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING1);
+	rd_to_wr = RD_TO_WR(data);
+	bus_rd_to_rd = RD_TO_RD_DIFF_CHIP(data);
+	bus_rd_to_wr = RD_TO_WR_DIFF_CHIP(data);
+
+	data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING2);
+	t_rtp = RD_TO_PCH(data);
+
+	data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING3);
+	wr_to_rd = CALTIMING3_WR_TO_RD(data);
+	bus_wr_to_rd = CALTIMING3_WR_TO_RD_DIFF_CHIP(data);
+
+	data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING4);
+	t_rp = PCH_TO_VALID(data);
+
+	data = mmio_read_32(AGX_MPFE_HMC_ADP(HMC_ADP_DDRIOCTRL));
+	bw_ratio = ((HMC_ADP_DDRIOCTRL_IO_SIZE(data) == 0) ? 0 : 1);
+
+	data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG0);
+	burst_len = HMC_ADP_DDRIOCTRL_CTRL_BURST_LENGTH(data);
+	burst_len_ddr_clk = burst_len / 2;
+	burst_len_sched_clk = ((burst_len/2) / 2);
+
+	data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG0);
+	switch (AGX_MPFE_IOHMC_REG_CTRLCFG0_CFG_MEM_TYPE(data)) {
+	case 1:
+		/* DDR4 - 1333MHz */
+		/* 20 (19.995) clock cycles = 15ns */
+		/* Calculate with rounding */
+		tw_rin_clk_cycles = (((tWR_IN_NS * 1333) % 1000) >= 500) ?
+			((tWR_IN_NS * 1333) / 1000) + 1 :
+			((tWR_IN_NS * 1333) / 1000);
+		break;
+	default:
+		/* Others - 1066MHz or slower */
+		/* 16 (15.990) clock cycles = 15ns */
+		/* Calculate with rounding */
+		tw_rin_clk_cycles = (((tWR_IN_NS * 1066) % 1000) >= 500) ?
+			((tWR_IN_NS * 1066) / 1000) + 1 :
+			((tWR_IN_NS * 1066) / 1000);
+		break;
+	}
+
+	rd_to_miss = t_rtp + t_rp + t_rcd - burst_len_sched_clk;
+	wr_to_miss = ((rd_latency + burst_len_ddr_clk + 2 + tw_rin_clk_cycles)
+			/ 2) - rd_to_wr + t_rp + t_rcd;
+
+	mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_DDRTIMING,
+		bw_ratio << DDRTIMING_BWRATIO_OFST |
+		wr_to_rd << DDRTIMING_WRTORD_OFST|
+		rd_to_wr << DDRTIMING_RDTOWR_OFST |
+		burst_len_sched_clk << DDRTIMING_BURSTLEN_OFST |
+		wr_to_miss << DDRTIMING_WRTOMISS_OFST |
+		rd_to_miss << DDRTIMING_RDTOMISS_OFST |
+		act_to_act << DDRTIMING_ACTTOACT_OFST);
+
+	data = mmio_read_32(AGX_MPFE_HMC_ADP(HMC_ADP_DDRIOCTRL));
+	bw_ratio_extended = ((ADP_DDRIOCTRL_IO_SIZE(data) == 0) ? 1 : 0);
+
+	mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_DDRMODE,
+		bw_ratio_extended << DDRMODE_BWRATIOEXTENDED_OFST |
+		auto_precharge << DDRMODE_AUTOPRECHARGE_OFST);
+
+	mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_READLATENCY,
+		(rd_latency / 2) + DDR_READ_LATENCY_DELAY);
+
+	data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING9);
+	faw = AGX_MPFE_IOHMC_CALTIMING9_ACT_TO_ACT(data);
+
+	faw_bank = 1; // always 1 because we always have 4 bank DDR.
+
+	mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE,
+		faw_bank << AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAWBANK_OFST |
+		faw << AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAW_OFST |
+		act_to_act_bank << AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_RRD_OFST);
+
+	mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV,
+		((bus_rd_to_rd
+			<< AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_OFST)
+			& AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_MSK) |
+		((bus_rd_to_wr
+			<< AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_OFST)
+			& AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_MSK) |
+		((bus_wr_to_rd
+			<< AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_OFST)
+			& AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_MSK));
+
+}
+
+unsigned long get_physical_dram_size(void)
+{
+	uint32_t data;
+	unsigned long ram_addr_width, ram_ext_if_io_width;
+
+	data = mmio_read_32(AGX_MPFE_HMC_ADP_DDRIOCTRL);
+	switch (AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE(data)) {
+	case 0:
+		ram_ext_if_io_width = 16;
+		break;
+	case 1:
+		ram_ext_if_io_width = 32;
+		break;
+	case 2:
+		ram_ext_if_io_width = 64;
+		break;
+	default:
+		ram_ext_if_io_width = 0;
+		break;
+	}
+
+	data = mmio_read_32(AGX_MPFE_IOHMC_REG_DRAMADDRW);
+	ram_addr_width = IOHMC_DRAMADDRW_CFG_COL_ADDR_WIDTH(data) +
+		IOHMC_DRAMADDRW_CFG_ROW_ADDR_WIDTH(data) +
+		IOHMC_DRAMADDRW_CFG_BANK_ADDR_WIDTH(data) +
+		IOHMC_DRAMADDRW_CFG_BANK_GROUP_ADDR_WIDTH(data) +
+		IOHMC_DRAMADDRW_CFG_CS_ADDR_WIDTH(data);
+
+	return (1 << ram_addr_width) * (ram_ext_if_io_width / 8);
+}
+
+
+
+void configure_hmc_adaptor_regs(void)
+{
+	uint32_t data;
+	uint32_t dram_io_width;
+
+	/* Configure DDR data rate */
+	dram_io_width = AGX_MPFE_IOHMC_NIOSRESERVE0_NIOS_RESERVE0(
+		mmio_read_32(AGX_MPFE_IOHMC_REG_NIOSRESERVE0_OFST));
+	dram_io_width = (dram_io_width & 0xFF) >> 5;
+
+	data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG3);
+
+	dram_io_width |= (data & 0x4);
+
+	mmio_write_32(AGX_MPFE_HMC_ADP_DDRIOCTRL, dram_io_width);
+
+	/* Copy dram addr width from IOHMC to HMC ADP */
+	data = mmio_read_32(AGX_MPFE_IOHMC_DRAMADDRW);
+	mmio_write_32(AGX_MPFE_HMC_ADP(ADP_DRAMADDRWIDTH), data);
+
+	/* Enable nonsecure access to DDR */
+	data = get_physical_dram_size();
+
+	if (data < AGX_DDR_SIZE)
+		data = AGX_DDR_SIZE;
+
+	mmio_write_32(AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMIT, data - 1);
+	mmio_write_32(AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMITEXT, 0x1f);
+
+	mmio_write_32(AGX_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMIT, data - 1);
+
+	mmio_write_32(AGX_SOC_NOC_FW_DDR_SCR_ENABLESET, BIT(0) | BIT(8));
+
+	/* ECC enablement */
+	data = mmio_read_32(AGX_MPFE_IOHMC_REG_CTRLCFG1);
+	if (data & (1 << AGX_IOHMC_CTRLCFG1_ENABLE_ECC_OFST)) {
+		mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_ECCCTRL1,
+			AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK |
+			AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK |
+			AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK,
+			AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK |
+			AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK);
+
+		mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_ECCCTRL2,
+			AGX_MPFE_HMC_ADP_ECCCTRL2_OVRW_RB_ECC_EN_SET_MSK |
+			AGX_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK |
+			AGX_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK,
+			AGX_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK |
+			AGX_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK);
+
+		mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_ECCCTRL1,
+			AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK |
+			AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK |
+			AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK,
+			AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK);
+		INFO("Scrubbing ECC\n");
+
+		/* ECC Scrubbing */
+		zeromem((void *)DRAM_BASE, DRAM_SIZE);
+	} else {
+		INFO("ECC is disabled.\n");
+	}
+}
diff --git a/plat/intel/soc/agilex5/soc/agilex5_mmc.c b/plat/intel/soc/agilex5/soc/agilex5_mmc.c
new file mode 100644
index 0000000..48f7341
--- /dev/null
+++ b/plat/intel/soc/agilex5/soc/agilex5_mmc.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2020-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <lib/mmio.h>
+
+#include "agilex5_clock_manager.h"
+#include "agilex5_system_manager.h"
+
+void agx5_mmc_init(void)
+{
+// TODO: To update when handoff data is ready
+
+	//mmio_clrbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_EN,
+	//	CLKMGR_PERPLL_EN_SDMMCCLK);
+	//mmio_write_32(SOCFPGA_SYSMGR(SDMMC),
+	//	SYSMGR_SDMMC_SMPLSEL(0) | SYSMGR_SDMMC_DRVSEL(3));
+	//mmio_setbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_EN,
+	//	CLKMGR_PERPLL_EN_SDMMCCLK);
+
+}
diff --git a/plat/intel/soc/agilex5/soc/agilex5_pinmux.c b/plat/intel/soc/agilex5/soc/agilex5_pinmux.c
new file mode 100644
index 0000000..50d9e36
--- /dev/null
+++ b/plat/intel/soc/agilex5/soc/agilex5_pinmux.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/mmio.h>
+
+#include "agilex5_pinmux.h"
+#include "agilex5_system_manager.h"
+
+const uint32_t sysmgr_pinmux_array_sel[] = {
+	0x00000000, 0x00000001, /* usb */
+	0x00000004, 0x00000001,
+	0x00000008, 0x00000001,
+	0x0000000c, 0x00000001,
+	0x00000010, 0x00000001,
+	0x00000014, 0x00000001,
+	0x00000018, 0x00000001,
+	0x0000001c, 0x00000001,
+	0x00000020, 0x00000001,
+	0x00000024, 0x00000001,
+	0x00000028, 0x00000001,
+	0x0000002c, 0x00000001,
+	0x00000030, 0x00000000, /* emac0 */
+	0x00000034, 0x00000000,
+	0x00000038, 0x00000000,
+	0x0000003c, 0x00000000,
+	0x00000040, 0x00000000,
+	0x00000044, 0x00000000,
+	0x00000048, 0x00000000,
+	0x0000004c, 0x00000000,
+	0x00000050, 0x00000000,
+	0x00000054, 0x00000000,
+	0x00000058, 0x00000000,
+	0x0000005c, 0x00000000,
+	0x00000060, 0x00000008, /* gpio1 */
+	0x00000064, 0x00000008,
+	0x00000068, 0x00000005, /* uart0 tx */
+	0x0000006c, 0x00000005, /* uart 0 rx */
+	0x00000070, 0x00000008, /* gpio */
+	0x00000074, 0x00000008,
+	0x00000078, 0x00000004, /* i2c1 */
+	0x0000007c, 0x00000004,
+	0x00000080, 0x00000007, /* jtag */
+	0x00000084, 0x00000007,
+	0x00000088, 0x00000007,
+	0x0000008c, 0x00000007,
+	0x00000090, 0x00000001, /* sdmmc data0 */
+	0x00000094, 0x00000001,
+	0x00000098, 0x00000001,
+	0x0000009c, 0x00000001,
+	0x00000100, 0x00000001,
+	0x00000104, 0x00000001, /* sdmmc.data3 */
+	0x00000108, 0x00000008, /* loan */
+	0x0000010c, 0x00000008, /* gpio */
+	0x00000110, 0x00000008,
+	0x00000114, 0x00000008, /* gpio1.io21 */
+	0x00000118, 0x00000005, /* mdio0.mdio */
+	0x0000011c, 0x00000005  /* mdio0.mdc */
+};
+
+const uint32_t sysmgr_pinmux_array_ctrl[] = {
+	0x00000000, 0x00502c38, /* Q1_1 */
+	0x00000004, 0x00102c38,
+	0x00000008, 0x00502c38,
+	0x0000000c, 0x00502c38,
+	0x00000010, 0x00502c38,
+	0x00000014, 0x00502c38,
+	0x00000018, 0x00502c38,
+	0x0000001c, 0x00502c38,
+	0x00000020, 0x00502c38,
+	0x00000024, 0x00502c38,
+	0x00000028, 0x00502c38,
+	0x0000002c, 0x00502c38,
+	0x00000030, 0x00102c38, /* Q2_1 */
+	0x00000034, 0x00102c38,
+	0x00000038, 0x00502c38,
+	0x0000003c, 0x00502c38,
+	0x00000040, 0x00102c38,
+	0x00000044, 0x00102c38,
+	0x00000048, 0x00502c38,
+	0x0000004c, 0x00502c38,
+	0x00000050, 0x00102c38,
+	0x00000054, 0x00102c38,
+	0x00000058, 0x00502c38,
+	0x0000005c, 0x00502c38,
+	0x00000060, 0x00502c38, /* Q3_1 */
+	0x00000064, 0x00502c38,
+	0x00000068, 0x00102c38,
+	0x0000006c, 0x00502c38,
+	0x000000d0, 0x00502c38,
+	0x000000d4, 0x00502c38,
+	0x000000d8, 0x00542c38,
+	0x000000dc, 0x00542c38,
+	0x000000e0, 0x00502c38,
+	0x000000e4, 0x00502c38,
+	0x000000e8, 0x00102c38,
+	0x000000ec, 0x00502c38,
+	0x000000f0, 0x00502c38, /* Q4_1 */
+	0x000000f4, 0x00502c38,
+	0x000000f8, 0x00102c38,
+	0x000000fc, 0x00502c38,
+	0x00000100, 0x00502c38,
+	0x00000104, 0x00502c38,
+	0x00000108, 0x00102c38,
+	0x0000010c, 0x00502c38,
+	0x00000110, 0x00502c38,
+	0x00000114, 0x00502c38,
+	0x00000118, 0x00542c38,
+	0x0000011c, 0x00102c38
+};
+
+const uint32_t sysmgr_pinmux_array_fpga[] = {
+	0x00000000, 0x00000000,
+	0x00000004, 0x00000000,
+	0x00000008, 0x00000000,
+	0x0000000c, 0x00000000,
+	0x00000010, 0x00000000,
+	0x00000014, 0x00000000,
+	0x00000018, 0x00000000,
+	0x0000001c, 0x00000000,
+	0x00000020, 0x00000000,
+	0x00000028, 0x00000000,
+	0x0000002c, 0x00000000,
+	0x00000030, 0x00000000,
+	0x00000034, 0x00000000,
+	0x00000038, 0x00000000,
+	0x0000003c, 0x00000000,
+	0x00000040, 0x00000000,
+	0x00000044, 0x00000000,
+	0x00000048, 0x00000000,
+	0x00000050, 0x00000000,
+	0x00000054, 0x00000000,
+	0x00000058, 0x0000002a
+};
+
+const uint32_t sysmgr_pinmux_array_iodelay[] = {
+	0x00000000, 0x00000000,
+	0x00000004, 0x00000000,
+	0x00000008, 0x00000000,
+	0x0000000c, 0x00000000,
+	0x00000010, 0x00000000,
+	0x00000014, 0x00000000,
+	0x00000018, 0x00000000,
+	0x0000001c, 0x00000000,
+	0x00000020, 0x00000000,
+	0x00000024, 0x00000000,
+	0x00000028, 0x00000000,
+	0x0000002c, 0x00000000,
+	0x00000030, 0x00000000,
+	0x00000034, 0x00000000,
+	0x00000038, 0x00000000,
+	0x0000003c, 0x00000000,
+	0x00000040, 0x00000000,
+	0x00000044, 0x00000000,
+	0x00000048, 0x00000000,
+	0x0000004c, 0x00000000,
+	0x00000050, 0x00000000,
+	0x00000054, 0x00000000,
+	0x00000058, 0x00000000,
+	0x0000005c, 0x00000000,
+	0x00000060, 0x00000000,
+	0x00000064, 0x00000000,
+	0x00000068, 0x00000000,
+	0x0000006c, 0x00000000,
+	0x00000070, 0x00000000,
+	0x00000074, 0x00000000,
+	0x00000078, 0x00000000,
+	0x0000007c, 0x00000000,
+	0x00000080, 0x00000000,
+	0x00000084, 0x00000000,
+	0x00000088, 0x00000000,
+	0x0000008c, 0x00000000,
+	0x00000090, 0x00000000,
+	0x00000094, 0x00000000,
+	0x00000098, 0x00000000,
+	0x0000009c, 0x00000000,
+	0x00000100, 0x00000000,
+	0x00000104, 0x00000000,
+	0x00000108, 0x00000000,
+	0x0000010c, 0x00000000,
+	0x00000110, 0x00000000,
+	0x00000114, 0x00000000,
+	0x00000118, 0x00000000,
+	0x0000011c, 0x00000000
+};
+
+void config_fpgaintf_mod(void)
+{
+	mmio_write_32(SOCFPGA_SYSMGR(FPGAINTF_EN_2), 1<<8);
+}
+
+void config_pinmux(handoff *hoff_ptr)
+{
+	unsigned int i;
+
+	mmio_write_32(PINMUX_HANDOFF_CONFIG_ADDR, PINMUX_HANDOFF_CONFIG_VAL);
+	for (i = 0; i < PINMUX_HANDOFF_ARRAY_SIZE(hoff_ptr->pinmux_sel_array); i += 2) {
+		mmio_write_32(AGX5_PINMUX_PIN0SEL +
+			hoff_ptr->pinmux_sel_array[i],
+			hoff_ptr->pinmux_sel_array[i + 1]);
+	}
+
+	config_fpgaintf_mod();
+}
+
+void config_peripheral(handoff *hoff_ptr)
+{
+
+	// TODO: This need to be update due to peripheral_pwr_gate_array handoff change
+	// Pending SDM to pass over handoff data
+	// unsigned int i;
+
+	// for (i = 0; i < 4; i += 2) {
+	//	mmio_write_32(AGX_EDGE_PERIPHERAL +
+	//	hoff_ptr->peripheral_pwr_gate_array[i],
+	//	hoff_ptr->peripheral_pwr_gate_array[i+1]);
+	// }
+
+
+	// TODO: This need to be update due to peripheral_pwr_gate_array handoff change
+	mmio_write_32(AGX5_PERIPHERAL,
+	hoff_ptr->peripheral_pwr_gate_array);
+}
diff --git a/plat/intel/soc/agilex5/soc/agilex5_power_manager.c b/plat/intel/soc/agilex5/soc/agilex5_power_manager.c
new file mode 100644
index 0000000..0d81970
--- /dev/null
+++ b/plat/intel/soc/agilex5/soc/agilex5_power_manager.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+
+#include "agilex5_power_manager.h"
+#include "socfpga_reset_manager.h"
+
+int wait_verify_fsm(uint16_t timeout, uint32_t peripheral_handoff)
+{
+	uint32_t data = 0;
+	uint32_t count = 0;
+	uint32_t pgstat = 0;
+
+	/* Wait FSM ready */
+	do {
+		data = mmio_read_32(AGX5_PWRMGR(PSS_PGSTAT));
+		count++;
+		if (count >= 1000) {
+			return -ETIMEDOUT;
+		}
+
+	} while (AGX5_PWRMGR_PSS_STAT_BUSY(data) == AGX5_PWRMGR_PSS_STAT_BUSY_E_BUSY);
+
+	/* Verify PSS SRAM power gated */
+	pgstat = mmio_read_32(AGX5_PWRMGR(PSS_PGSTAT));
+	if (pgstat != (AGX5_PWRMGR_PSS_PGEN_OUT(peripheral_handoff))) {
+		return AGX5_PWRMGR_HANDOFF_PERIPHERAL;
+	}
+
+	return 0;
+}
+
+int pss_sram_power_off(handoff *hoff_ptr)
+{
+	int ret = 0;
+	uint32_t peripheral_handoff = 0;
+
+	/* Get PSS SRAM handoff data */
+	peripheral_handoff = hoff_ptr->peripheral_pwr_gate_array;
+
+	/* Enable firewall for PSS SRAM */
+	mmio_write_32(AGX5_PWRMGR(PSS_FWENCTL),
+			AGX5_PWRMGR_PSS_FWEN(peripheral_handoff));
+
+	/* Wait */
+	udelay(1);
+
+	/* Power gating PSS SRAM */
+	mmio_write_32(AGX5_PWRMGR(PSS_PGENCTL),
+			AGX5_PWRMGR_PSS_PGEN(peripheral_handoff));
+
+	ret = wait_verify_fsm(1000, peripheral_handoff);
+
+	return ret;
+}
+
+void config_pwrmgr_handoff(handoff *hoff_ptr)
+{
+	int ret = 0;
+
+	switch (hoff_ptr->header_magic) {
+	case HANDOFF_MAGIC_PERIPHERAL:
+		ret = pss_sram_power_off(hoff_ptr);
+		break;
+	default:
+		break;
+	}
+
+	if (ret != 0) {
+		ERROR("Config PwrMgr handoff failed. error %d\n", ret);
+		assert(false);
+	}
+}
diff --git a/plat/intel/soc/common/aarch64/plat_helpers.S b/plat/intel/soc/common/aarch64/plat_helpers.S
index 6bf2d82..cbd0121 100644
--- a/plat/intel/soc/common/aarch64/plat_helpers.S
+++ b/plat/intel/soc/common/aarch64/plat_helpers.S
@@ -34,6 +34,11 @@
 func plat_secondary_cold_boot_setup
 	/* Wait until the it gets reset signal from rstmgr gets populated */
 poll_mailbox:
+#if	PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+	mov_imm x0, PLAT_SEC_ENTRY
+	cbz	x0, poll_mailbox
+	br      x0
+#else
 	wfi
 	mov_imm	x0, PLAT_SEC_ENTRY
 	ldr	x1, [x0]
@@ -44,8 +49,13 @@
 	cmp	x3, x4
 	b.ne	poll_mailbox
 	br	x1
+#endif
 endfunc plat_secondary_cold_boot_setup
 
+#if	((PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10) || \
+	(PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX) || \
+	(PLATFORM_MODEL == PLAT_SOCFPGA_N5X))
+
 func platform_is_primary_cpu
 	and	x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
 	cmp	x0, #PLAT_PRIMARY_CPU
@@ -53,6 +63,21 @@
 	ret
 endfunc platform_is_primary_cpu
 
+#else
+
+func platform_is_primary_cpu
+	and	x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
+	cmp x0, #(PLAT_PRIMARY_CPU_A76)
+	b.eq primary_cpu
+	cmp x0, #(PLAT_PRIMARY_CPU_A55)
+	b.eq primary_cpu
+primary_cpu:
+	cset	x0, eq
+	ret
+endfunc platform_is_primary_cpu
+
+#endif
+
 func plat_is_my_cpu_primary
 	mrs	x0, mpidr_el1
 	b   platform_is_primary_cpu
@@ -62,11 +87,27 @@
 	mrs	x0, mpidr_el1
 	and	x1, x0, #MPIDR_CPU_MASK
 	and	x0, x0, #MPIDR_CLUSTER_MASK
+#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+	add	x0, x1, x0, LSR #8
+#else
 	add	x0, x1, x0, LSR #6
+#endif
 	ret
 endfunc plat_my_core_pos
 
 func warm_reset_req
+#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+	bl	plat_is_my_cpu_primary
+	cbnz	x0, warm_reset
+warm_reset:
+	mov_imm x1, PLAT_SEC_ENTRY
+	str	xzr, [x1]
+	mrs	x1, rmr_el3
+	orr	x1, x1, #0x02
+	msr	rmr_el3, x1
+	isb
+	dsb	sy
+#else
 	str	xzr, [x4]
 	bl	plat_is_my_cpu_primary
 	cbz	x0, cpu_in_wfi
@@ -80,11 +121,28 @@
 cpu_in_wfi:
 	wfi
 	b	cpu_in_wfi
+#endif
 endfunc warm_reset_req
 
+/* TODO: Zephyr warm reset test */
+#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
 func plat_get_my_entrypoint
 	ldr	x4, =L2_RESET_DONE_REG
 	ldr	x5, [x4]
+	ldr	x1, =PLAT_L2_RESET_REQ
+	cmp	x1, x5
+	b.eq	zephyr_reset_req
+	mov_imm	x1, PLAT_SEC_ENTRY
+	ldr	x0, [x1]
+	ret
+zephyr_reset_req:
+	ldr	x0, =0x00
+	ret
+endfunc plat_get_my_entrypoint
+#else
+func plat_get_my_entrypoint
+	ldr	x4, =L2_RESET_DONE_REG
+	ldr	x5, [x4]
 	ldr	x1, =L2_RESET_DONE_STATUS
 	cmp	x1, x5
 	b.eq	warm_reset_req
@@ -92,7 +150,7 @@
 	ldr	x0, [x1]
 	ret
 endfunc plat_get_my_entrypoint
-
+#endif
 
 	/* ---------------------------------------------
 	 * int plat_crash_console_init(void)
@@ -138,6 +196,13 @@
 	ret
 endfunc platform_mem_init
 
+	/* --------------------------------------------------------
+	 * macro plat_secondary_cpus_bl31_entry;
+	 *
+	 * el3_entrypoint_common init param configuration.
+	 * Called very early in the secondary cores boot process.
+	 * --------------------------------------------------------
+	 */
 func plat_secondary_cpus_bl31_entry
 	el3_entrypoint_common                                   \
 		_init_sctlr=0                                   \
diff --git a/plat/intel/soc/common/drivers/ccu/ncore_ccu.c b/plat/intel/soc/common/drivers/ccu/ncore_ccu.c
index 0148b75..684a625 100644
--- a/plat/intel/soc/common/drivers/ccu/ncore_ccu.c
+++ b/plat/intel/soc/common/drivers/ccu/ncore_ccu.c
@@ -3,7 +3,6 @@
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
-
 #include <assert.h>
 #include <common/debug.h>
 #include <errno.h>
@@ -16,64 +15,52 @@
 
 uint32_t poll_active_bit(uint32_t dir);
 
-static coh_ss_id_t subsystem_id;
+#define SMMU_DMI			1
 
 
+static coh_ss_id_t subsystem_id;
 void get_subsystem_id(void)
 {
 	uint32_t snoop_filter, directory, coh_agent;
-
 	snoop_filter = CSIDR_NUM_SF(mmio_read_32(NCORE_CCU_CSR(NCORE_CSIDR)));
 	directory = CSUIDR_NUM_DIR(mmio_read_32(NCORE_CCU_CSR(NCORE_CSUIDR)));
 	coh_agent = CSUIDR_NUM_CAI(mmio_read_32(NCORE_CCU_CSR(NCORE_CSUIDR)));
-
 	subsystem_id.num_snoop_filter = snoop_filter + 1;
 	subsystem_id.num_directory = directory;
 	subsystem_id.num_coh_agent = coh_agent;
 }
-
 uint32_t directory_init(void)
 {
 	uint32_t dir_sf_mtn, dir_sf_en;
 	uint32_t dir, sf, ret;
-
 	for (dir = 0; dir < subsystem_id.num_directory; dir++) {
 		for (sf = 0; sf < subsystem_id.num_snoop_filter; sf++) {
 			dir_sf_mtn = DIRECTORY_UNIT(dir, NCORE_DIRUSFMCR);
 			dir_sf_en = DIRECTORY_UNIT(dir, NCORE_DIRUSFER);
-
 			/* Initialize All Entries */
 			mmio_write_32(dir_sf_mtn, SNOOP_FILTER_ID(sf));
-
 			/* Poll Active Bit */
 			ret = poll_active_bit(dir);
 			if (ret != 0) {
 				ERROR("Timeout during active bit polling");
 				return -ETIMEDOUT;
 			}
-
 			/* Disable snoop filter, a bit per snoop filter */
 			mmio_clrbits_32(dir_sf_en, BIT(sf));
-
 		}
 	}
-
 	return 0;
 }
-
 uint32_t coherent_agent_intfc_init(void)
 {
 	uint32_t dir, ca, ca_id, ca_type, ca_snoop_en;
-
 	for (dir = 0; dir < subsystem_id.num_directory; dir++) {
 		for (ca = 0; ca < subsystem_id.num_coh_agent; ca++) {
 			ca_snoop_en = DIRECTORY_UNIT(ca, NCORE_DIRUCASER0);
 			ca_id = mmio_read_32(COH_AGENT_UNIT(ca, NCORE_CAIUIDR));
-
 			/* Coh Agent Snoop Enable */
 			if (CACHING_AGENT_BIT(ca_id))
 				mmio_setbits_32(ca_snoop_en, BIT(ca));
-
 			/* Coh Agent Snoop DVM Enable */
 			ca_type = CACHING_AGENT_TYPE(ca_id);
 			if (ca_type == ACE_W_DVM || ca_type == ACE_L_W_DVM)
@@ -81,24 +68,19 @@
 				BIT(ca));
 		}
 	}
-
 	return 0;
 }
-
 uint32_t poll_active_bit(uint32_t dir)
 {
 	uint32_t timeout = 80000;
 	uint32_t poll_dir =  DIRECTORY_UNIT(dir, NCORE_DIRUSFMAR);
-
 	while (timeout > 0) {
 		if (mmio_read_32(poll_dir) == 0)
 			return 0;
 		timeout--;
 	}
-
 	return -1;
 }
-
 void bypass_ocram_firewall(void)
 {
 	mmio_clrbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF1),
@@ -110,7 +92,6 @@
 	mmio_clrbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF4),
 			OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK);
 }
-
 void ncore_enable_ocram_firewall(void)
 {
 	mmio_setbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF1),
@@ -122,15 +103,44 @@
 	mmio_setbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF4),
 			OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK);
 }
-
 uint32_t init_ncore_ccu(void)
 {
 	uint32_t status;
-
 	get_subsystem_id();
 	status = directory_init();
 	status = coherent_agent_intfc_init();
 	bypass_ocram_firewall();
-
 	return status;
 }
+
+void setup_smmu_stream_id(void)
+{
+	/* Configure Stream ID for Agilex5 */
+	mmio_write_32(SOCFPGA_SYSMGR(DMA_TBU_STREAM_ID_AX_REG_0_DMA0), DMA0);
+	mmio_write_32(SOCFPGA_SYSMGR(DMA_TBU_STREAM_ID_AX_REG_0_DMA1), DMA1);
+	mmio_write_32(SOCFPGA_SYSMGR(SDM_TBU_STREAM_ID_AX_REG_1_SDM), SDM);
+	/* Reg map showing USB2 but Linux USB0? */
+	mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_ID_AX_REG_2_USB2), USB0);
+	/* Reg map showing USB3 but Linux USB1? */
+	mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_ID_AX_REG_2_USB3), USB1);
+	mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_ID_AX_REG_2_SDMMC), SDMMC);
+	mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_ID_AX_REG_2_NAND), NAND);
+	/* To confirm ETR - core sight debug*/
+	mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_ID_AX_REG_2_ETR), CORE_SIGHT_DEBUG);
+	mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_ID_AX_REG_2_TSN0), TSN0);
+	mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_ID_AX_REG_2_TSN1), TSN1);
+	mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_ID_AX_REG_2_TSN2), TSN2);
+
+	/* Enabled Stream ctrl register for Agilex5 */
+	mmio_write_32(SOCFPGA_SYSMGR(DMA_TBU_STREAM_CTRL_REG_0_DMA0), ENABLE_STREAMID);
+	mmio_write_32(SOCFPGA_SYSMGR(DMA_TBU_STREAM_CTRL_REG_0_DMA1), ENABLE_STREAMID);
+	mmio_write_32(SOCFPGA_SYSMGR(SDM_TBU_STREAM_CTRL_REG_1_SDM), ENABLE_STREAMID_SECURE_TX);
+	mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_CTRL_REG_2_USB2), ENABLE_STREAMID);
+	mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_CTRL_REG_2_USB3), ENABLE_STREAMID);
+	mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_CTRL_REG_2_SDMMC), ENABLE_STREAMID);
+	mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_CTRL_REG_2_NAND), ENABLE_STREAMID);
+	mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_CTRL_REG_2_ETR), ENABLE_STREAMID);
+	mmio_write_32(SOCFPGA_SYSMGR(TSN_TBU_STREAM_CTRL_REG_3_TSN0), ENABLE_STREAMID);
+	mmio_write_32(SOCFPGA_SYSMGR(TSN_TBU_STREAM_CTRL_REG_3_TSN1), ENABLE_STREAMID);
+	mmio_write_32(SOCFPGA_SYSMGR(TSN_TBU_STREAM_CTRL_REG_3_TSN2), ENABLE_STREAMID);
+}
diff --git a/plat/intel/soc/common/drivers/ccu/ncore_ccu.h b/plat/intel/soc/common/drivers/ccu/ncore_ccu.h
index 3f662ff..6cdbeb8 100644
--- a/plat/intel/soc/common/drivers/ccu/ncore_ccu.h
+++ b/plat/intel/soc/common/drivers/ccu/ncore_ccu.h
@@ -1,99 +1,420 @@
 /*
- * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
-
 #ifndef NCORE_CCU_H
 #define NCORE_CCU_H
 
+#include <stdbool.h>
+#include <stdint.h>
 
-#define NCORE_CCU_OFFSET		0xf7000000
+#ifndef CCU_ACTIVATE_COH_FPGA
+#define CCU_ACTIVATE_COH_FPGA 0
+#endif
+// Address map for ccu init
+#define addr_CAIUIDR1				(0x1C000000)
+#define addr_GRBUNRRUCR				(0x1c0ffff8)
+#define base_addr_NRS_CAIU0			(0x1c000000)
+#define base_addr_NRS_NCAIU0			(0x1c001000)
+#define base_addr_NRS_NCAIU1			(0x1c002000)
+#define base_addr_NRS_NCAIU2			(0x1c003000)
+#define base_addr_NRS_NCAIU3			(0x1c004000)
+#define base_addr_NRS_DCE0			(0x1c005000)
+#define base_addr_NRS_DCE1			(0x1c006000)
+//#define base_addr_NRS_DMI0			(0x1c007000)
+//#define base_addr_NRS_DMI1			(0x1c008000)
+//DMI
+#define ALT_CCU_CCU_DMI0_DMIUSMCTCR_ADDR	0x1C007300
+#define ALT_CCU_CCU_DMI1_DMIUSMCTCR_ADDR	0x1C008300
+//DSU
+#define ALT_CCU_DSU_CAIUAMIGR_ADDR		0x1C0003C0
+#define ALT_CCU_DSU_CAIUMIFSR_ADDR		0x1C0003C4
+#define ALT_CCU_DSU_CAIUGPRBLR1_ADDR		0x1C000414
+#define ALT_CCU_DSU_CAIUGPRBHR1_ADDR		0x1C000418
+#define ALT_CCU_DSU_CAIUGPRAR1_ADDR		0x1C000410
+#define ALT_CCU_DSU_CAIUGPRBLR2_ADDR		0x1C000424
+#define ALT_CCU_DSU_CAIUGPRBHR2_ADDR		0x1C000428
+#define ALT_CCU_DSU_CAIUGPRAR2_ADDR		0x1C000420
+#define ALT_CCU_DSU_CAIUGPRBLR4_ADDR		0x1C000444
+#define ALT_CCU_DSU_CAIUGPRBHR4_ADDR		0x1C000448
+#define ALT_CCU_DSU_CAIUGPRAR4_ADDR		0x1C000440
+#define ALT_CCU_DSU_CAIUGPRBLR5_ADDR		0x1C000454
+#define ALT_CCU_DSU_CAIUGPRBHR5_ADDR		0x1C000458
+#define ALT_CCU_DSU_CAIUGPRAR5_ADDR		0x1C000450
+#define ALT_CCU_DSU_CAIUGPRBLR6_ADDR		0x1C000464
+#define ALT_CCU_DSU_CAIUGPRBHR6_ADDR		0x1C000468
+#define ALT_CCU_DSU_CAIUGPRAR6_ADDR		0x1C000460
+#define ALT_CCU_DSU_CAIUGPRBLR7_ADDR		0x1C000474
+#define ALT_CCU_DSU_CAIUGPRBHR7_ADDR		0x1C000478
+#define ALT_CCU_DSU_CAIUGPRAR7_ADDR		0x1C000470
+#define ALT_CCU_DSU_CAIUGPRBLR8_ADDR		0x1C000484
+#define ALT_CCU_DSU_CAIUGPRBHR8_ADDR		0x1C000488
+#define ALT_CCU_DSU_CAIUGPRAR8_ADDR		0x1C000480
+#define ALT_CCU_DSU_CAIUGPRBLR9_ADDR		0x1C000494
+#define ALT_CCU_DSU_CAIUGPRBHR9_ADDR		0x1C000498
+#define ALT_CCU_DSU_CAIUGPRAR9_ADDR		0x1C000490
+#define ALT_CCU_DSU_CAIUGPRBLR10_ADDR		0x1C0004A4
+#define ALT_CCU_DSU_CAIUGPRBHR10_ADDR		0x1C0004A8
+#define ALT_CCU_DSU_CAIUGPRAR10_ADDR		0x1C0004A0
+//GIC
+#define ALT_CCU_GIC_M_XAIUAMIGR_ADDR		0x1C0023C0
+#define ALT_CCU_GIC_M_XAIUMIFSR_ADDR		0x1C0023C4
+#define ALT_CCU_GIC_M_XAIUGPRBLR1_ADDR		0x1C002414
+#define ALT_CCU_GIC_M_XAIUGPRBHR1_ADDR		0x1C002418
+#define ALT_CCU_GIC_M_XAIUGPRAR1_ADDR		0x1C002410
+#define ALT_CCU_GIC_M_XAIUGPRBLR6_ADDR		0x1C002464
+#define ALT_CCU_GIC_M_XAIUGPRBHR6_ADDR		0x1C002468
+#define ALT_CCU_GIC_M_XAIUGPRAR6_ADDR		0x1C002460
+#define ALT_CCU_GIC_M_XAIUGPRBLR8_ADDR		0x1C002484
+#define ALT_CCU_GIC_M_XAIUGPRBHR8_ADDR		0x1C002488
+#define ALT_CCU_GIC_M_XAIUGPRAR8_ADDR		0x1C002480
+#define ALT_CCU_GIC_M_XAIUGPRBLR10_ADDR		0x1C0024A4
+#define ALT_CCU_GIC_M_XAIUGPRBHR10_ADDR		0x1C0024A8
+#define ALT_CCU_GIC_M_XAIUGPRAR10_ADDR		0x1C0024A0
+//FPGA2SOC
+#define ALT_CCU_FPGA2SOC_XAIUAMIGR_ADDR		0x1C0013C0
+#define ALT_CCU_FPGA2SOC_XAIUMIFSR_ADDR		0x1C0013C4
+#define ALT_CCU_FPGA2SOC_XAIUGPRBLR1_ADDR	0x1C001414
+#define ALT_CCU_FPGA2SOC_XAIUGPRBHR1_ADDR	0x1C001418
+#define ALT_CCU_FPGA2SOC_XAIUGPRAR1_ADDR	0x1C001410
+#define ALT_CCU_FPGA2SOC_XAIUGPRBLR6_ADDR	0x1C001464
+#define ALT_CCU_FPGA2SOC_XAIUGPRBHR6_ADDR	0x1C001468
+#define ALT_CCU_FPGA2SOC_XAIUGPRAR6_ADDR	0x1C001460
+#define ALT_CCU_FPGA2SOC_XAIUGPRBLR8_ADDR	0x1C001484
+#define ALT_CCU_FPGA2SOC_XAIUGPRBHR8_ADDR	0x1C001488
+#define ALT_CCU_FPGA2SOC_XAIUGPRAR8_ADDR	0x1C001480
+#define ALT_CCU_FPGA2SOC_XAIUGPRBLR10_ADDR	0x1C0014A4
+#define ALT_CCU_FPGA2SOC_XAIUGPRBHR10_ADDR	0x1C0014A8
+#define ALT_CCU_FPGA2SOC_XAIUGPRAR10_ADDR	0x1C0014A0
+//TCU
+#define ALT_CCU_TCU_BASE 0x1C003000
+#define ALT_CCU_TCU_XAIUAMIGR_ADDR		ALT_CCU_TCU_BASE + 0x03C0
+#define ALT_CCU_TCU_XAIUMIFSR_ADDR		ALT_CCU_TCU_BASE + 0x03C4
+#define ALT_CCU_TCU_XAIUGPRBLR0_ADDR		ALT_CCU_TCU_BASE + 0x0404
+#define ALT_CCU_TCU_XAIUGPRBHR0_ADDR		ALT_CCU_TCU_BASE + 0x0408
+#define ALT_CCU_TCU_XAIUGPRAR0_ADDR		ALT_CCU_TCU_BASE + 0x0400
+#define ALT_CCU_TCU_XAIUGPRBLR1_ADDR		ALT_CCU_TCU_BASE + 0x0414
+#define ALT_CCU_TCU_XAIUGPRBHR1_ADDR		ALT_CCU_TCU_BASE + 0x0418
+#define ALT_CCU_TCU_XAIUGPRAR1_ADDR		ALT_CCU_TCU_BASE + 0x0410
+#define ALT_CCU_TCU_XAIUGPRBLR2_ADDR		ALT_CCU_TCU_BASE + 0x0424
+#define ALT_CCU_TCU_XAIUGPRBHR2_ADDR		ALT_CCU_TCU_BASE + 0x0428
+#define ALT_CCU_TCU_XAIUGPRAR2_ADDR		ALT_CCU_TCU_BASE + 0x0420
+#define ALT_CCU_TCU_XAIUGPRBLR6_ADDR		0x1C003464
+#define ALT_CCU_TCU_XAIUGPRBHR6_ADDR		0x1C003468
+#define ALT_CCU_TCU_XAIUGPRAR6_ADDR		0x1C003460
+#define ALT_CCU_TCU_XAIUGPRBLR8_ADDR		0x1C003484
+#define ALT_CCU_TCU_XAIUGPRBHR8_ADDR		0x1C003488
+#define ALT_CCU_TCU_XAIUGPRAR8_ADDR		0x1C003480
+#define ALT_CCU_TCU_XAIUGPRBLR10_ADDR		0x1C0034A4
+#define ALT_CCU_TCU_XAIUGPRBHR10_ADDR		0x1C0034A8
+#define ALT_CCU_TCU_XAIUGPRAR10_ADDR		0x1C0034A0
+//IOM
+#define ALT_CCU_CCU_IOM_XAIUAMIGR_ADDR		0x1C0043C0
+#define ALT_CCU_CCU_IOM_XAIUMIFSR_ADDR		0x1C0013C4
+#define ALT_CCU_IOM_XAIUGPRBLR1_ADDR		0x1C001414
+#define ALT_CCU_IOM_XAIUGPRBHR1_ADDR		0x1C001418
+#define ALT_CCU_IOM_XAIUGPRAR1_ADDR		0x1C001410
+#define ALT_CCU_CCU_IOM_XAIUGPRBLR6_ADDR	0x1C001464
+#define ALT_CCU_CCU_IOM_XAIUGPRBHR6_ADDR	0x1C001468
+#define ALT_CCU_CCU_IOM_XAIUGPRAR6_ADDR		0x1C001460
+#define ALT_CCU_CCU_IOM_XAIUGPRBLR8_ADDR	0x1C001484
+#define ALT_CCU_CCU_IOM_XAIUGPRBHR8_ADDR	0x1C001488
+#define ALT_CCU_CCU_IOM_XAIUGPRAR8_ADDR		0x1C001480
+#define ALT_CCU_CCU_IOM_XAIUGPRBLR10_ADDR	0x1C0014A4
+#define ALT_CCU_CCU_IOM_XAIUGPRBHR10_ADDR	0x1C0014A8
+#define ALT_CCU_CCU_IOM_XAIUGPRAR10_ADDR	0x1C0014A0
+//DCE
+#define ALT_CCU_DCE0_DCEUAMIGR_ADDR		0x1C0053C0
+#define ALT_CCU_DCE0_DCEUMIFSR_ADDR		0x1C0053C4
+#define ALT_CCU_DCE0_DCEUGPRBLR6_ADDR		0x1C005464
+#define ALT_CCU_DCE0_DCEUGPRBHR6_ADDR		0x1C005468
+#define ALT_CCU_DCE0_DCEUGPRAR6_ADDR		0x1C005460
+#define ALT_CCU_DCE0_DCEUGPRBLR8_ADDR		0x1C005484
+#define ALT_CCU_DCE0_DCEUGPRBHR8_ADDR		0x1C005488
+#define ALT_CCU_DCE0_DCEUGPRAR8_ADDR		0x1C005480
+#define ALT_CCU_DCE0_DCEUGPRBLR10_ADDR		0x1C0054A4
+#define ALT_CCU_DCE0_DCEUGPRBHR10_ADDR		0x1C0054A8
+#define ALT_CCU_DCE0_DCEUGPRAR10_ADDR		0x1C0054A0
+#define ALT_CCU_DCE1_DCEUAMIGR_ADDR		0x1C0063C0
+#define ALT_CCU_DCE1_DCEUMIFSR_ADDR		0x1C0063C4
+#define ALT_CCU_DCE1_DCEUGPRBLR6_ADDR		0x1C006464
+#define ALT_CCU_DCE1_DCEUGPRBHR6_ADDR		0x1C006468
+#define ALT_CCU_DCE1_DCEUGPRAR6_ADDR		0x1C006460
+#define ALT_CCU_DCE1_DCEUGPRBLR8_ADDR		0x1C006484
+#define ALT_CCU_DCE1_DCEUGPRBHR8_ADDR		0x1C006488
+#define ALT_CCU_DCE1_DCEUGPRAR8_ADDR		0x1C006480
+#define ALT_CCU_DCE1_DCEUGPRBLR10_ADDR		0x1C0064A4
+#define ALT_CCU_DCE1_DCEUGPRBHR10_ADDR		0x1C0064A8
+#define ALT_CCU_DCE1_DCEUGPRAR10_ADDR		0x1C0064A0
+#define offset_NRS_GPRAR0			(0x400)
+#define offset_NRS_GPRBLR0			(0x404)
+#define offset_NRS_GPRBHR0			(0x408)
+#define offset_NRS_GPRAR1			(0x410)
+#define offset_NRS_GPRBLR1			(0x414)
+#define offset_NRS_GPRBHR1			(0x418)
+#define offset_NRS_GPRAR2			(0x420)
+#define offset_NRS_GPRBLR2			(0x424)
+#define offset_NRS_GPRBHR2			(0x428)
+#define offset_NRS_GPRAR3			(0x430)
+#define offset_NRS_GPRBLR3			(0x434)
+#define offset_NRS_GPRBHR3			(0x438)
+#define offset_NRS_GPRAR4			(0x440)
+#define offset_NRS_GPRBLR4			(0x444)
+#define offset_NRS_GPRBHR4			(0x448)
+#define offset_NRS_GPRAR5			(0x450)
+#define offset_NRS_GPRBLR5			(0x454)
+#define offset_NRS_GPRBHR5			(0x458)
+#define offset_NRS_GPRAR6			(0x460)
+#define offset_NRS_GPRBLR6			(0x464)
+#define offset_NRS_GPRBHR6			(0x468)
+#define offset_NRS_GPRAR7			(0x470)
+#define offset_NRS_GPRBLR7			(0x474)
+#define offset_NRS_GPRBHR7			(0x478)
+#define offset_NRS_GPRAR8			(0x480)
+#define offset_NRS_GPRBLR8			(0x484)
+#define offset_NRS_GPRBHR8			(0x488)
+#define offset_NRS_GPRAR9			(0x490)
+#define offset_NRS_GPRBLR9			(0x494)
+#define offset_NRS_GPRBHR9			(0x498)
+#define offset_NRS_GPRAR10			(0x4a0)
+#define offset_NRS_GPRBLR10			(0x4a4)
+#define offset_NRS_GPRBHR10			(0x4a8)
+#define offset_NRS_AMIGR			(0x3c0)
+#define offset_NRS_MIFSR			(0x3c4)
+#define offset_NRS_DMIUSMCTCR			(0x300)
+#define base_addr_DII0_PSSPERIPHS		(0x10000)
+#define base_addr_DII0_LWHPS2FPGA		(0x20000)
+#define base_addr_DII0_HPS2FPGA_1G		(0x40000)
+#define base_addr_DII0_HPS2FPGA_15G		(0x400000)
+#define base_addr_DII0_HPS2FPGA_240G		(0x4000000)
+#define base_addr_DII1_MPFEREGS			(0x18000)
+#define base_addr_DII2_GICREGS			(0x1D000)
+#define base_addr_DII3_OCRAM			(0x0)
+#define base_addr_BHR				(0x0)
+#define base_addr_DMI_SDRAM_2G			(0x80000)
+#define base_addr_DMI_SDRAM_30G			(0x800000)
+#define base_addr_DMI_SDRAM_480G		(0x8000000)
+// ((0x0<<9) | (0xf<<20) | (0x1<<30) | (0x1<<31))
+#define wr_DII0_PSSPERIPHS			0xC0F00000
+// ((0x0<<9) | (0x11<<20) | (0x1<<30) | (0x1<<31))
+#define wr_DII0_LWHPS2FPGA			0xC1100000
+// ((0x0<<9) | (0x12<<20) | (0x1<<30) | (0x1<<31))
+#define wr_DII0_HPS2FPGA_1G			0xC1200000
+// ((0x0<<9) | (0x16<<20) | (0x1<<30) | (0x1<<31))
+#define wr_DII0_HPS2FPGA_15G			0xC1600000
+// ((0x0<<9) | (0x1a<<20) | (0x1<<30) | (0x1<<31))
+#define wr_DII0_HPS2FPGA_240G			0xC1A00000
+// ((0x1<<9) | (0xe<<20) | (0x1<<30) | (0x1<<31))
+#define wr_DII1_MPFEREGS			0xC0E00200
+// ((0x2<<9) | (0x8<<20) | (0x1<<30) | (0x1<<31))
+#define wr_DII2_GICREGS				0xC0800400
+// ((0x3<<9) | (0x9<<20) | (0x1<<30) | (0x1<<31))
+#define wr_DII3_OCRAM				0xC0900600
+// ((0x0<<9) | (0x12<<20) | (0x0<<30) | (0x1<<31))
+#define wr_DMI_SDRAM_1G_ORDERED			0x81200000
+// ((0x1<<1) | (0x1<<2) | (0x0<<9) | (0x12<<20) | (0x0<<30) | (0x1<<31))
+#define wr_DMI_SDRAM_1G				0x81200006
+// ((0x0<<9) | (0x13<<20) | (0x0<<30) | (0x1<<31))
+#define wr_DMI_SDRAM_2G_ORDERED			0x81300000
+// ((0x1<<1) | (0x1<<2) | (0x0<<9) | (0x13<<20) | (0x0<<30) | (0x1<<31))
+#define wr_DMI_SDRAM_2G				0x81300006
+// ((0x0<<9) | (0x16<<20) | (0x0<<30) | (0x1<<31))
+#define wr_DMI_SDRAM_15G_ORDERED		0x81600000
+// ((0x1<<1) | (0x1<<2) | (0x0<<9) | (0x16<<20) | (0x0<<30) | (0x1<<31))
+#define wr_DMI_SDRAM_15G			0x81600006
+// ((0x0<<9) | (0x17<<20) | (0x0<<30) | (0x1<<31))
+#define wr_DMI_SDRAM_30G_ORDERED		0x81700000
+// ((0x1<<1) | (0x1<<2) | (0x0<<9) | (0x17<<20) | (0x0<<30) | (0x1<<31))
+#define wr_DMI_SDRAM_30G			0x81700006
+// ((0x0<<9) | (0x1a<<20) | (0x0<<30) | (0x1<<31))
+#define wr_DMI_SDRAM_240G_ORDERED		0x81a00000
+// ((0x1<<1) | (0x1<<2) | (0x0<<9) | (0x1a<<20) | (0x0<<30) | (0x1<<31))
+#define wr_DMI_SDRAM_240G			0x81a00006
+// ((0x0<<9) | (0x1b<<20) | (0x0<<30) | (0x1<<31))
+#define wr_DMI_SDRAM_480G_ORDERED		0x81b00000
+// ((0x1<<1) | (0x1<<2) | (0x0<<9) | (0x1b<<20) | (0x0<<30) | (0x1<<31))
+#define wr_DMI_SDRAM_480G			0x81b00006
 
+typedef enum CCU_REGION_SECURITY_e {
+    //
+    // Allow secure accesses only.
+    //
+	CCU_REGION_SECURITY_SECURE_ONLY,
+    //
+    // Allow non-secure accesses only.
+    //
+	CCU_REGION_SECURITY_NON_SECURE_ONLY,
+    //
+    // Allow accesses of any security state.
+    //
+	CCU_REGION_SECURITY_DONT_CARE
+} CCU_REGION_SECURITY_t;
+typedef enum CCU_REGION_PRIVILEGE_e {
+    //
+    // Allow privileged accesses only.
+    //
+	CCU_REGION_PRIVILEGE_PRIVILEGED_ONLY,
+    //
+    // Allow unprivileged accesses only.
+    //
+	CCU_REGION_PRIVILEGE_NON_PRIVILEGED_ONLY,
+    //
+    // Allow accesses of any privilege.
+    //
+	CCU_REGION_PRIVILEGE_DONT_CARE
+} CCU_REGION_PRIVILEGE_t;
+//
+// Initializes the CCU by enabling all regions except RAM 1 - 5.
+// This is needed because of an RTL change around 2016.02.24.
+//
+// Runtime measurement:
+//  - arm     : 14,830,000 ps (2016.05.31; sanity/printf_aarch32)
+//  - aarch64 : 14,837,500 ps (2016.05.31; sanity/printf)
+//
+// Runtime history:
+//  - arm     : 20,916,668 ps (2016.05.30; sanity/printf_aarch32)
+//  - aarch64 : 20,924,168 ps (2016.05.30; sanity/printf)
+//
+int ccu_hps_init(void);
 
-/* Coherent Sub-System Address Map */
-#define NCORE_CAIU_OFFSET		0x00000
-#define NCORE_CAIU_SIZE			0x01000
+typedef enum ccu_hps_ram_region_e {
+	ccu_hps_ram_region_ramspace0 = 0,
+	ccu_hps_ram_region_ramspace1 = 1,
+	ccu_hps_ram_region_ramspace2 = 2,
+	ccu_hps_ram_region_ramspace3 = 3,
+	ccu_hps_ram_region_ramspace4 = 4,
+	ccu_hps_ram_region_ramspace5 = 5,
+} ccu_hps_ram_region_t;
 
-#define NCORE_NCBU_OFFSET		0x60000
-#define NCORE_NCBU_SIZE			0x01000
+// Disables a RAM (OCRAM) region with the given ID.
+int ccu_hps_ram_region_disable(int id);
 
-#define NCORE_DIRU_OFFSET		0x80000
-#define NCORE_DIRU_SIZE			0x01000
+// Enables a RAM (OCRAM) region with the given ID.
+int ccu_hps_ram_region_enable(int id);
 
-#define NCORE_CMIU_OFFSET		0xc0000
-#define NCORE_CMIU_SIZE			0x01000
+// Attempts to remap a RAM (OCRAM) region with the given ID to span the given
+// start and end address. It also assigns the security and privilege policy.
+// Regions must be a power-of-two size with a minimum size of 64B.
+int ccu_hps_ram_region_remap(int id, uintptr_t start, uintptr_t end,
+CCU_REGION_SECURITY_t security, CCU_REGION_PRIVILEGE_t privilege);
 
-#define NCORE_CSR_OFFSET		0xff000
-#define NCORE_CSADSERO			0x00040
-#define NCORE_CSUIDR			0x00ff8
-#define NCORE_CSIDR			0x00ffc
+// Verifies that all enabled RAM (OCRAM) regions does not overlap.
+int ccu_hps_ram_validate(void);
 
-/* Directory Unit Register Map */
-#define NCORE_DIRUSFER			0x00010
-#define NCORE_DIRUMRHER			0x00070
-#define NCORE_DIRUSFMCR			0x00080
-#define NCORE_DIRUSFMAR			0x00084
+typedef enum ccu_hps_mem_region_e {
+	ccu_hps_mem_region_ddrspace0  = 0,
+	ccu_hps_mem_region_memspace0  = 1,
+	ccu_hps_mem_region_memspace1a = 2,
+	ccu_hps_mem_region_memspace1b = 3,
+	ccu_hps_mem_region_memspace1c = 4,
+	ccu_hps_mem_region_memspace1d = 5,
+	ccu_hps_mem_region_memspace1e = 6,
+} ccu_hps_mem_region_t;
 
-/* Coherent Agent Interface Unit Register Map */
-#define NCORE_CAIUIDR			0x00ffc
+// Disables mem0 (DDR) region with the given ID.
+int ccu_hps_mem0_region_disable(int id);
 
-/* Snoop Enable Register */
-#define NCORE_DIRUCASER0		0x00040
-#define NCORE_DIRUCASER1		0x00044
-#define NCORE_DIRUCASER2		0x00048
-#define NCORE_DIRUCASER3		0x0004c
+// Enables mem0 (DDR) region with the given ID.
+int ccu_hps_mem0_region_enable(int id);
 
-#define NCORE_CSADSER0			0x00040
-#define NCORE_CSADSER1			0x00044
-#define NCORE_CSADSER2			0x00048
-#define NCORE_CSADSER3			0x0004c
+// Attempts to remap mem0 (DDR) region with the given ID to span the given
+// start and end address. It also assigns the security nad privlege policy.
+// Regions must be a power-of-two in size with a minimum size of 64B.
+int ccu_hps_mem0_region_remap(int id, uintptr_t start, uintptr_t end,
+CCU_REGION_SECURITY_t security, CCU_REGION_PRIVILEGE_t privilege);
 
-/* Protocols Definition */
-#define ACE_W_DVM			0
-#define ACE_L_W_DVM			1
-#define ACE_WO_DVM			2
-#define ACE_L_WO_DVM			3
+// Verifies that all enabled mem0 (DDR) regions does not overlap.
+int ccu_hps_mem0_validate(void);
 
-/* Bypass OC Ram Firewall */
-#define NCORE_FW_OCRAM_BLK_BASE		0x100200
-#define NCORE_FW_OCRAM_BLK_CGF1		0x04
-#define NCORE_FW_OCRAM_BLK_CGF2		0x08
-#define NCORE_FW_OCRAM_BLK_CGF3		0x0c
-#define NCORE_FW_OCRAM_BLK_CGF4		0x10
+typedef enum ccu_hps_ios_region_e {
+	ccu_hps_ios_region_iospace0a = 0,
+	ccu_hps_ios_region_iospace0b = 1,
+	ccu_hps_ios_region_iospace1a = 2,
+	ccu_hps_ios_region_iospace1b = 3,
+	ccu_hps_ios_region_iospace1c = 4,
+	ccu_hps_ios_region_iospace1d = 5,
+	ccu_hps_ios_region_iospace1e = 6,
+	ccu_hps_ios_region_iospace1f = 7,
+	ccu_hps_ios_region_iospace1g = 8,
+	ccu_hps_ios_region_iospace2a = 9,
+	ccu_hps_ios_region_iospace2b = 10,
+	ccu_hps_ios_region_iospace2c = 11,
+} ccu_hps_ios_region_t;
+
+// Disables the IOS (IO Slave) region with the given ID.
+int ccu_hps_ios_region_disable(int id);
+
+// Enables the IOS (IO Slave) region with the given ID.
+int ccu_hps_ios_region_enable(int id);
 
-#define OCRAM_PRIVILEGED_MASK		BIT(29)
-#define OCRAM_SECURE_MASK		BIT(30)
 
+#define NCORE_CCU_OFFSET			0xf7000000
+
+/* Coherent Sub-System Address Map */
+#define NCORE_CAIU_OFFSET			0x00000
+#define NCORE_CAIU_SIZE				0x01000
+#define NCORE_NCBU_OFFSET			0x60000
+#define NCORE_NCBU_SIZE				0x01000
+#define NCORE_DIRU_OFFSET			0x80000
+#define NCORE_DIRU_SIZE				0x01000
+#define NCORE_CMIU_OFFSET			0xc0000
+#define NCORE_CMIU_SIZE				0x01000
+#define NCORE_CSR_OFFSET			0xff000
+#define NCORE_CSADSERO				0x00040
+#define NCORE_CSUIDR				0x00ff8
+#define NCORE_CSIDR				0x00ffc
+/* Directory Unit Register Map */
+#define NCORE_DIRUSFER				0x00010
+#define NCORE_DIRUMRHER				0x00070
+#define NCORE_DIRUSFMCR				0x00080
+#define NCORE_DIRUSFMAR				0x00084
+/* Coherent Agent Interface Unit Register Map */
+#define NCORE_CAIUIDR				0x00ffc
+/* Snoop Enable Register */
+#define NCORE_DIRUCASER0			0x00040
+#define NCORE_DIRUCASER1			0x00044
+#define NCORE_DIRUCASER2			0x00048
+#define NCORE_DIRUCASER3			0x0004c
+#define NCORE_CSADSER0				0x00040
+#define NCORE_CSADSER1				0x00044
+#define NCORE_CSADSER2				0x00048
+#define NCORE_CSADSER3				0x0004c
+/* Protocols Definition */
+#define ACE_W_DVM				0
+#define ACE_L_W_DVM				1
+#define ACE_WO_DVM				2
+#define ACE_L_WO_DVM				3
+/* Bypass OC Ram Firewall */
+#define NCORE_FW_OCRAM_BLK_BASE			0x100200
+#define NCORE_FW_OCRAM_BLK_CGF1			0x04
+#define NCORE_FW_OCRAM_BLK_CGF2			0x08
+#define NCORE_FW_OCRAM_BLK_CGF3			0x0c
+#define NCORE_FW_OCRAM_BLK_CGF4			0x10
+#define OCRAM_PRIVILEGED_MASK			BIT(29)
+#define OCRAM_SECURE_MASK			BIT(30)
 /* Macros */
-#define NCORE_CCU_REG(base)		(NCORE_CCU_OFFSET + (base))
-#define NCORE_CCU_CSR(reg)		(NCORE_CCU_REG(NCORE_CSR_OFFSET)\
+#define NCORE_CCU_REG(base)			(NCORE_CCU_OFFSET + (base))
+#define NCORE_CCU_CSR(reg)			(NCORE_CCU_REG(NCORE_CSR_OFFSET)\
 						+ (reg))
-#define NCORE_CCU_DIR(reg)		(NCORE_CCU_REG(NCORE_DIRU_OFFSET)\
+#define NCORE_CCU_DIR(reg)			(NCORE_CCU_REG(NCORE_DIRU_OFFSET)\
 						+ (reg))
-#define NCORE_CCU_CAI(reg)		(NCORE_CCU_REG(NCORE_CAIU_OFFSET)\
+#define NCORE_CCU_CAI(reg)			(NCORE_CCU_REG(NCORE_CAIU_OFFSET)\
 						+ (reg))
-
-#define DIRECTORY_UNIT(x, reg)		(NCORE_CCU_DIR(reg)\
+#define DIRECTORY_UNIT(x, reg)			(NCORE_CCU_DIR(reg)\
 						+ NCORE_DIRU_SIZE * (x))
-#define COH_AGENT_UNIT(x, reg)		(NCORE_CCU_CAI(reg)\
+#define COH_AGENT_UNIT(x, reg)			(NCORE_CCU_CAI(reg)\
 						+ NCORE_CAIU_SIZE * (x))
-
-#define COH_CPU0_BYPASS_REG(reg)	(NCORE_CCU_REG(NCORE_FW_OCRAM_BLK_BASE)\
+#define COH_CPU0_BYPASS_REG(reg)		(NCORE_CCU_REG(NCORE_FW_OCRAM_BLK_BASE)\
 						+ (reg))
-
-#define CSUIDR_NUM_CMI(x)		(((x) & 0x3f000000) >> 24)
-#define CSUIDR_NUM_DIR(x)		(((x) & 0x003f0000) >> 16)
-#define CSUIDR_NUM_NCB(x)		(((x) & 0x00003f00) >> 8)
-#define CSUIDR_NUM_CAI(x)		(((x) & 0x0000007f) >> 0)
-
-#define CSIDR_NUM_SF(x)			(((x) & 0x007c0000) >> 18)
-
-#define SNOOP_FILTER_ID(x)		(((x) << 16))
-
-#define CACHING_AGENT_BIT(x)		(((x) & 0x08000) >> 15)
-#define CACHING_AGENT_TYPE(x)		(((x) & 0xf0000) >> 16)
-
+#define CSUIDR_NUM_CMI(x)			(((x) & 0x3f000000) >> 24)
+#define CSUIDR_NUM_DIR(x)			(((x) & 0x003f0000) >> 16)
+#define CSUIDR_NUM_NCB(x)			(((x) & 0x00003f00) >> 8)
+#define CSUIDR_NUM_CAI(x)			(((x) & 0x0000007f) >> 0)
+#define CSIDR_NUM_SF(x)				(((x) & 0x007c0000) >> 18)
+#define SNOOP_FILTER_ID(x)			(((x) << 16))
+#define CACHING_AGENT_BIT(x)			(((x) & 0x08000) >> 15)
+#define CACHING_AGENT_TYPE(x)			(((x) & 0xf0000) >> 16)
 
 typedef struct coh_ss_id {
 	uint8_t num_coh_mem;
@@ -105,5 +426,6 @@
 
 uint32_t init_ncore_ccu(void);
 void ncore_enable_ocram_firewall(void);
+void setup_smmu_stream_id(void);
 
 #endif
diff --git a/plat/intel/soc/common/drivers/combophy/combophy.c b/plat/intel/soc/common/drivers/combophy/combophy.c
new file mode 100644
index 0000000..6c53bc1
--- /dev/null
+++ b/plat/intel/soc/common/drivers/combophy/combophy.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/cadence/cdns_sdmmc.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <lib/utils.h>
+
+#include "combophy.h"
+#include "sdmmc/sdmmc.h"
+
+/* Temp assigned handoff data, need to remove when SDM up and run. */
+void config_nand(handoff *hoff_ptr)
+{
+	/* This is hardcoded input value for Combo PHY and SD host controller. */
+	hoff_ptr->peripheral_pwr_gate_array = 0x40;
+
+}
+
+/* DFI configuration */
+int dfi_select(handoff *hoff_ptr)
+{
+	uint32_t data = 0;
+
+	/* Temp assigned handoff data, need to remove when SDM up and run. */
+	handoff reverse_handoff_ptr;
+
+	/* Temp assigned handoff data, need to remove when SDM up and run. */
+	config_nand(&reverse_handoff_ptr);
+
+	if (((reverse_handoff_ptr.peripheral_pwr_gate_array) & PERIPHERAL_SDMMC_MASK) == 0U) {
+		ERROR("SDMMC/NAND is not set properly\n");
+		return -ENXIO;
+	}
+
+	mmio_setbits_32(SOCFPGA_SYSMGR(DFI_INTF),
+		(((reverse_handoff_ptr.peripheral_pwr_gate_array) &
+		PERIPHERAL_SDMMC_MASK) >> PERIPHERAL_SDMMC_OFFSET));
+	data = mmio_read_32(SOCFPGA_SYSMGR(DFI_INTF));
+	if ((data & DFI_INTF_MASK) != (((reverse_handoff_ptr.peripheral_pwr_gate_array) &
+		PERIPHERAL_SDMMC_MASK) >> PERIPHERAL_SDMMC_OFFSET)) {
+		ERROR("DFI is not set properly\n");
+		return -ENXIO;
+	}
+
+	return 0;
+}
+
+int combo_phy_init(handoff *hoff_ptr)
+{
+	/* SDMMC/NAND DFI selection based on system manager DFI register */
+	int ret = dfi_select(hoff_ptr);
+
+	if (ret != 0U) {
+		ERROR("DFI configuration failed\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/plat/intel/soc/common/drivers/combophy/combophy.h b/plat/intel/soc/common/drivers/combophy/combophy.h
new file mode 100644
index 0000000..ca571f7
--- /dev/null
+++ b/plat/intel/soc/common/drivers/combophy/combophy.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef COMBOPHY_H
+#define COMBOPHY_H
+
+#include <lib/mmio.h>
+
+#include "socfpga_handoff.h"
+
+#define PERIPHERAL_SDMMC_MASK		0x60
+#define PERIPHERAL_SDMMC_OFFSET		6
+#define DFI_INTF_MASK			0x1
+
+/* FUNCTION DEFINATION */
+/*
+ * @brief Nand controller initialization function
+ *
+ * @hoff_ptr: Pointer to the hand-off data
+ * Return: 0 on success, a negative errno on failure
+ */
+int combo_phy_init(handoff *hoff_ptr);
+int dfi_select(handoff *hoff_ptr);
+
+#endif
diff --git a/plat/intel/soc/common/drivers/ddr/ddr.c b/plat/intel/soc/common/drivers/ddr/ddr.c
new file mode 100644
index 0000000..188302f
--- /dev/null
+++ b/plat/intel/soc/common/drivers/ddr/ddr.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <common/debug.h>
+#include "ddr.h"
+#include <lib/mmio.h>
+#include "socfpga_handoff.h"
+
+int ddr_calibration_check(void)
+{
+	// DDR calibration check
+	int status = 0;
+	uint32_t u32data_read = 0;
+
+	NOTICE("DDR: Access address 0x%x:...\n", IO96B_0_REG_BASE);
+	u32data_read = mmio_read_32(IO96B_0_REG_BASE);
+	NOTICE("DDR: Access address 0x%x: read 0x%04x\n", IO96B_0_REG_BASE, u32data_read);
+
+	if (u32data_read == -EPERM) {
+		status = -EPERM;
+		assert(u32data_read);
+	}
+
+	u32data_read = 0x0;
+	NOTICE("DDR: Access address 0x%x: ...\n", IO96B_1_REG_BASE);
+	u32data_read = mmio_read_32(IO96B_1_REG_BASE);
+	NOTICE("DDR: Access address 0x%x: read 0x%04x\n", IO96B_1_REG_BASE, u32data_read);
+
+	if (u32data_read == -EPERM) {
+		status = -EPERM;
+		assert(u32data_read);
+	}
+
+	return status;
+}
+
+int iossm_mb_init(void)
+{
+	// int status;
+
+	// Update according to IOSSM mailbox spec
+
+	// if (status) {
+		// return status;
+	// }
+
+	return 0;
+}
+
+int wait_respond(uint16_t timeout)
+{
+	uint32_t status = 0;
+	uint32_t count = 0;
+	uint32_t data = 0;
+
+	/* Wait status command response ready */
+	do {
+		data = mmio_read_32(IO96B_CSR_REG(CMD_RESPONSE_STATUS));
+		count++;
+		if (count >= timeout) {
+			return -ETIMEDOUT;
+		}
+
+	} while (STATUS_COMMAND_RESPONSE(data) != STATUS_COMMAND_RESPONSE_READY);
+
+	status = (data & STATUS_GENERAL_ERROR_MASK) >> STATUS_GENERAL_ERROR_OFFSET;
+	if (status != 0) {
+		return status;
+	}
+
+	status = (data & STATUS_CMD_RESPONSE_ERROR_MASK) >> STATUS_CMD_RESPONSE_ERROR_OFFSET;
+	if (status != 0) {
+		return status;
+	}
+
+	return status;
+}
+
+int iossm_mb_read_response(void)
+{
+	uint32_t status = 0;
+	unsigned int i;
+	uint32_t resp_data[IOSSM_RESP_MAX_WORD_SIZE];
+	uint32_t resp_param_reg;
+
+	// Check STATUS_CMD_RESPONSE_DATA_PTR_VALID in
+	// STATUS_COMMAND_RESPONSE to ensure data pointer response
+
+	/* Read CMD_RESPONSE_STATUS and CMD_RESPONSE_DATA_* */
+	resp_data[0] = mmio_read_32(IO96B_CSR_REG(CMD_RESPONSE_STATUS));
+	resp_data[0] = (resp_data[0] & CMD_RESPONSE_DATA_SHORT_MASK) >>
+			CMD_RESPONSE_DATA_SHORT_OFFSET;
+	resp_param_reg = CMD_RESPONSE_STATUS;
+	for (i = 1; i < IOSSM_RESP_MAX_WORD_SIZE; i++) {
+		resp_param_reg = resp_param_reg - CMD_RESPONSE_OFFSET;
+		resp_data[i] = mmio_read_32(IO96B_CSR_REG(resp_param_reg));
+	}
+
+	/* Wait for STATUS_COMMAND_RESPONSE_READY*/
+	status = wait_respond(1000);
+
+	/* Read CMD_RESPONSE_STATUS and CMD_RESPONSE_DATA_* */
+	mmio_setbits_32(STATUS_COMMAND_RESPONSE(IO96B_CSR_REG(
+			CMD_RESPONSE_STATUS)),
+			STATUS_COMMAND_RESPONSE_READY_CLEAR);
+
+	return status;
+}
+
+int iossm_mb_send(uint32_t cmd_target_ip_type, uint32_t cmd_target_ip_instance_id,
+			uint32_t cmd_type, uint32_t cmd_opcode, uint32_t *args,
+			unsigned int len)
+{
+	unsigned int i;
+	uint32_t status = 0;
+	uint32_t cmd_req;
+	uint32_t cmd_param_reg;
+
+	cmd_target_ip_type = (cmd_target_ip_type & CMD_TARGET_IP_TYPE_MASK) <<
+				CMD_TARGET_IP_TYPE_OFFSET;
+	cmd_target_ip_instance_id = (cmd_target_ip_instance_id &
+				CMD_TARGET_IP_INSTANCE_ID_MASK) <<
+				CMD_TARGET_IP_INSTANCE_ID_OFFSET;
+	cmd_type = (cmd_type & CMD_TYPE_MASK) << CMD_TYPE_OFFSET;
+	cmd_opcode = (cmd_opcode & CMD_OPCODE_MASK) << CMD_OPCODE_OFFSET;
+	cmd_req = cmd_target_ip_type | cmd_target_ip_instance_id | cmd_type |
+			cmd_opcode;
+
+	/* send mailbox request */
+	IOSSM_MB_WRITE(IO96B_CSR_REG(CMD_REQ), cmd_req);
+	if (len != 0) {
+		cmd_param_reg = CMD_REQ;
+		for (i = 0; i < len; i++) {
+			cmd_param_reg = cmd_param_reg - CMD_PARAM_OFFSET;
+			IOSSM_MB_WRITE(IO96B_CSR_REG(cmd_param_reg), args[i]);
+		}
+	}
+
+	status = iossm_mb_read_response();
+	if (status != 0) {
+		return status;
+	}
+
+	return status;
+}
+
+int ddr_iossm_mailbox_cmd(uint32_t cmd_opcode)
+{
+	// IOSSM
+	uint32_t status = 0;
+	unsigned int i = 0;
+	uint32_t payload[IOSSM_CMD_MAX_WORD_SIZE] = {0U};
+
+	switch (cmd_opcode) {
+	case CMD_INIT:
+		status = iossm_mb_init();
+		break;
+
+	case OPCODE_GET_MEM_INTF_INFO:
+		status = iossm_mb_send(0, 0, MBOX_CMD_GET_SYS_INFO,
+		OPCODE_GET_MEM_INTF_INFO, payload, i);
+		break;
+
+	case OPCODE_GET_MEM_TECHNOLOGY:
+		status = iossm_mb_send(0, 0, MBOX_CMD_GET_MEM_INFO,
+		OPCODE_GET_MEM_TECHNOLOGY, payload, i);
+		break;
+
+	case OPCODE_GET_MEM_WIDTH_INFO:
+		status = iossm_mb_send(0, 0, MBOX_CMD_GET_MEM_INFO,
+		OPCODE_GET_MEM_WIDTH_INFO, payload, i);
+		break;
+
+	case OPCODE_ECC_ENABLE_STATUS:
+		status = iossm_mb_send(0, 0,
+		MBOX_CMD_TRIG_CONTROLLER_OP, OPCODE_ECC_ENABLE_STATUS,
+		payload, i);
+		break;
+
+	case OPCODE_ECC_INTERRUPT_MASK:
+		// payload[i] = CMD_PARAM_0 [16:0]: ECC_INTERRUPT_MASK
+		status = iossm_mb_send(0, 0,
+		MBOX_CMD_TRIG_CONTROLLER_OP, OPCODE_ECC_INTERRUPT_MASK,
+		payload, i);
+		break;
+
+	case OPCODE_ECC_SCRUB_MODE_0_START:
+		// payload[i] = CMD_PARAM_0 [15:0]: ECC_SCRUB_INTERVAL
+		//i++;
+		// payload[i] = CMD_PARAM_1 [11:0]: ECC_SCRUB_LEN
+		//i++;
+		// payload[i] = CMD_PARAM_2 [0:0]: ECC_SCRUB_FULL_MEM
+		//i++;
+		// payload[i]= CMD_PARAM_3 [31:0]: ECC_SCRUB_START_ADDR [31:0]
+		//i++;
+		// payload[i] = CMD_PARAM_4 [5:0]: ECC_SCRUB_START_ADDR [36:32]
+		//i++;
+		// payload[i] = CMD_PARAM_5 [31:0]: ECC_SCRUB_END_ADDR [31:0]
+		//i++;
+		// payload[i] = CMD_PARAM_6 [5:0]: ECC_SCRUB_END_ADDR [36:32]
+		//i++;
+		status = iossm_mb_send(0, 0,
+		MBOX_CMD_TRIG_CONTROLLER_OP, OPCODE_ECC_SCRUB_MODE_0_START,
+		payload, i);
+		break;
+
+	case OPCODE_ECC_SCRUB_MODE_1_START:
+		// payload[i] = CMD_PARAM_0 [15:0]: ECC_SCRUB_IDLE_CNT
+		//i++;
+		// payload[i] = CMD_PARAM_1 [11:0]: ECC_SCRUB_LEN
+		//i++;
+		// payload[i] = CMD_PARAM_2 [0:0]: ECC_SCRUB_FULL_MEM
+		//i++;
+		// payload[i] = CMD_PARAM_3 [31:0]: ECC_SCRUB_START_ADDR [31:0]
+		//i++;
+		// payload[i] = CMD_PARAM_4 [5:0]: ECC_SCRUB_START_ADDR [36:32]
+		//i++;
+		// payload[i] = CMD_PARAM_5 [31:0]: ECC_SCRUB_END_ADDR [31:0]
+		//i++;
+		// payload[i] = CMD_PARAM_6 [5:0]: ECC_SCRUB_END_ADDR [36:32]
+		//i++;
+		status = iossm_mb_send(0, 0,
+		MBOX_CMD_TRIG_CONTROLLER_OP, OPCODE_ECC_SCRUB_MODE_1_START,
+		payload, i);
+		break;
+
+	case OPCODE_BIST_RESULTS_STATUS:
+		status = iossm_mb_send(0, 0,
+		MBOX_CMD_TRIG_CONTROLLER_OP, OPCODE_BIST_RESULTS_STATUS,
+		payload, i);
+		break;
+
+	case OPCODE_BIST_MEM_INIT_START:
+		status = iossm_mb_send(0, 0,
+		MBOX_CMD_TRIG_CONTROLLER_OP, OPCODE_BIST_MEM_INIT_START,
+		payload, i);
+		break;
+
+	case OPCODE_TRIG_MEM_CAL:
+		status = iossm_mb_send(0, 0, MBOX_CMD_TRIG_MEM_CAL_OP,
+		OPCODE_TRIG_MEM_CAL, payload, i);
+		break;
+
+	default:
+		break;
+	}
+
+	if (status == -EPERM) {
+		assert(status);
+	}
+
+	return status;
+}
+
+int ddr_config_handoff(handoff *hoff_ptr)
+{
+	/* Populate DDR handoff data */
+	/* TODO: To add in DDR handoff configuration once available */
+	return 0;
+}
+
+// DDR firewall and non secure access
+void ddr_enable_ns_access(void)
+{
+	/* Please set the ddr non secure registers accordingly */
+
+	mmio_setbits_32(CCU_REG(DMI0_DMIUSMCTCR),
+			CCU_DMI_ALLOCEN | CCU_DMI_LOOKUPEN);
+	mmio_setbits_32(CCU_REG(DMI1_DMIUSMCTCR),
+			CCU_DMI_ALLOCEN | CCU_DMI_LOOKUPEN);
+
+	/* TODO: To add in CCU NCORE OCRAM bypass mask for non secure registers */
+	NOTICE("DDR non secure configured\n");
+}
+
+void ddr_enable_firewall(void)
+{
+	/* Please set the ddr firewall registers accordingly */
+	/* TODO: To add in CCU NCORE OCRAM bypass mask for firewall registers */
+	NOTICE("DDR firewall enabled\n");
+}
+
+bool is_ddr_init_in_progress(void)
+{
+	uint32_t reg = mmio_read_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_POR_0));
+
+	if (reg & SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_0_MASK) {
+		return true;
+	}
+	return false;
+}
+
+int ddr_init(void)
+{
+	// DDR driver initialization
+	int status = -EPERM;
+	uint32_t cmd_opcode = 0;
+
+	// Check and set Boot Scratch Register
+	if (is_ddr_init_in_progress()) {
+		return status;
+	}
+	mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_POR_0), 0x01);
+
+	// Populate DDR handoff data
+	handoff reverse_handoff_ptr;
+
+	if (!socfpga_get_handoff(&reverse_handoff_ptr)) {
+		assert(status);
+	}
+	status = ddr_config_handoff(&reverse_handoff_ptr);
+	if (status == -EPERM) {
+		assert(status);
+	}
+
+	// CCU and firewall setup
+	ddr_enable_ns_access();
+	ddr_enable_firewall();
+
+	// DDR calibration check
+	status = ddr_calibration_check();
+	if (status == -EPERM) {
+		assert(status);
+	}
+
+	// DDR mailbox command
+	status = ddr_iossm_mailbox_cmd(cmd_opcode);
+	if (status != 0) {
+		assert(status);
+	}
+
+	// Check and set Boot Scratch Register
+	mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_POR_0), 0x00);
+
+	NOTICE("DDR init successfully\n");
+	return status;
+}
diff --git a/plat/intel/soc/common/drivers/ddr/ddr.h b/plat/intel/soc/common/drivers/ddr/ddr.h
new file mode 100644
index 0000000..416b64e
--- /dev/null
+++ b/plat/intel/soc/common/drivers/ddr/ddr.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef DDR_H
+#define DDR_H
+
+#include <lib/mmio.h>
+#include "socfpga_handoff.h"
+
+/* MACRO DEFINATION */
+#define IO96B_0_REG_BASE				0x18400000
+#define IO96B_1_REG_BASE				0x18800000
+#define IO96B_CSR_BASE					0x05000000
+#define IO96B_CSR_REG(reg)				(IO96B_CSR_BASE + reg)
+
+#define IOSSM_CMD_MAX_WORD_SIZE				7U
+#define IOSSM_RESP_MAX_WORD_SIZE			4U
+
+#define CCU_REG_BASE					0x1C000000
+#define DMI0_DMIUSMCTCR					0x7300
+#define DMI1_DMIUSMCTCR					0x8300
+#define CCU_DMI_ALLOCEN					BIT(1)
+#define CCU_DMI_LOOKUPEN				BIT(2)
+#define CCU_REG(reg)					(CCU_REG_BASE + reg)
+
+// CMD_RESPONSE_STATUS Register
+#define CMD_RESPONSE_STATUS				0x45C
+#define CMD_RESPONSE_OFFSET				0x4
+#define CMD_RESPONSE_DATA_SHORT_MASK			GENMASK(31, 16)
+#define CMD_RESPONSE_DATA_SHORT_OFFSET			16
+#define STATUS_CMD_RESPONSE_ERROR_MASK			GENMASK(7, 5)
+#define STATUS_CMD_RESPONSE_ERROR_OFFSET		5
+#define STATUS_GENERAL_ERROR_MASK			GENMASK(4, 1)
+#define STATUS_GENERAL_ERROR_OFFSET			1
+#define STATUS_COMMAND_RESPONSE_READY			0x1
+#define STATUS_COMMAND_RESPONSE_READY_CLEAR		0x0
+#define STATUS_COMMAND_RESPONSE_READY_MASK		0x1
+#define STATUS_COMMAND_RESPONSE_READY_OFFSET		0
+#define STATUS_COMMAND_RESPONSE(x)			(((x) & \
+							STATUS_COMMAND_RESPONSE_READY_MASK) >> \
+							STATUS_COMMAND_RESPONSE_READY_OFFSET)
+
+// CMD_REQ Register
+#define CMD_STATUS					0x400
+#define CMD_PARAM					0x438
+#define CMD_REQ						0x43C
+#define CMD_PARAM_OFFSET				0x4
+#define CMD_TARGET_IP_TYPE_MASK				GENMASK(31, 29)
+#define CMD_TARGET_IP_TYPE_OFFSET			29
+#define CMD_TARGET_IP_INSTANCE_ID_MASK			GENMASK(28, 24)
+#define CMD_TARGET_IP_INSTANCE_ID_OFFSET		24
+#define CMD_TYPE_MASK					GENMASK(23, 16)
+#define CMD_TYPE_OFFSET					16
+#define CMD_OPCODE_MASK					GENMASK(15, 0)
+#define CMD_OPCODE_OFFSET				0
+
+#define CMD_INIT					0
+
+#define OPCODE_GET_MEM_INTF_INFO			0x0001
+#define OPCODE_GET_MEM_TECHNOLOGY			0x0002
+#define OPCODE_GET_MEM_WIDTH_INFO			0x0004
+#define OPCODE_TRIG_MEM_CAL				0x000A
+#define OPCODE_ECC_ENABLE_STATUS			0x0102
+#define OPCODE_ECC_INTERRUPT_MASK			0x0105
+#define OPCODE_ECC_SCRUB_MODE_0_START			0x0202
+#define OPCODE_ECC_SCRUB_MODE_1_START			0x0203
+#define OPCODE_BIST_RESULTS_STATUS			0x0302
+#define OPCODE_BIST_MEM_INIT_START			0x0303
+// Please update according to IOSSM mailbox spec
+#define MBOX_ID_IOSSM					0x00
+#define MBOX_CMD_GET_SYS_INFO				0x01
+// Please update according to IOSSM mailbox spec
+#define MBOX_CMD_GET_MEM_INFO				0x02
+#define MBOX_CMD_TRIG_CONTROLLER_OP			0x04
+#define MBOX_CMD_TRIG_MEM_CAL_OP			0x05
+#define MBOX_CMD_POKE_REG				0xFD
+#define MBOX_CMD_PEEK_REG				0xFE
+#define MBOX_CMD_GET_DEBUG_LOG				0xFF
+// Please update according to IOSSM mailbox spec
+#define MBOX_CMD_DIRECT					0x00
+
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_0_MASK		0x01
+
+#define IOSSM_MB_WRITE(addr, data)			mmio_write_32(addr, data)
+
+/* FUNCTION DEFINATION */
+int ddr_calibration_check(void);
+
+int iossm_mb_init(void);
+
+int iossm_mb_read_response(void);
+
+int iossm_mb_send(uint32_t cmd_target_ip_type, uint32_t cmd_target_ip_instance_id,
+			uint32_t cmd_type, uint32_t cmd_opcode, uint32_t *args,
+			unsigned int len);
+
+int ddr_iossm_mailbox_cmd(uint32_t cmd);
+
+int ddr_init(void);
+
+int ddr_config_handoff(handoff *hoff_ptr);
+
+void ddr_enable_ns_access(void);
+
+void ddr_enable_firewall(void);
+
+bool is_ddr_init_in_progress(void);
+
+#endif
diff --git a/plat/intel/soc/common/drivers/nand/nand.c b/plat/intel/soc/common/drivers/nand/nand.c
new file mode 100644
index 0000000..c6acbe3
--- /dev/null
+++ b/plat/intel/soc/common/drivers/nand/nand.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/cadence/cdns_nand.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <lib/utils.h>
+#include "nand.h"
+
+#include "agilex5_pinmux.h"
+#include "combophy/combophy.h"
+
+/* Pinmux configuration */
+static void nand_pinmux_config(void)
+{
+	mmio_write_32(SOCFPGA_PINMUX(PIN0SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN1SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN2SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN3SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN4SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN5SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN6SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN7SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN8SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN9SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN10SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN11SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN12SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN13SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN14SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN16SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN17SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN18SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN19SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN20SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN21SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN22SEL), SOCFPGA_PINMUX_SEL_NAND);
+	mmio_write_32(SOCFPGA_PINMUX(PIN23SEL), SOCFPGA_PINMUX_SEL_NAND);
+}
+
+int nand_init(handoff *hoff_ptr)
+{
+	/* NAND pin mux configuration */
+	nand_pinmux_config();
+
+	return cdns_nand_host_init();
+}
diff --git a/plat/intel/soc/common/drivers/nand/nand.h b/plat/intel/soc/common/drivers/nand/nand.h
new file mode 100644
index 0000000..b060a72
--- /dev/null
+++ b/plat/intel/soc/common/drivers/nand/nand.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef DDR_H
+#define DDR_H
+
+#include <lib/mmio.h>
+#include "socfpga_handoff.h"
+
+/* FUNCTION DEFINATION */
+/*
+ * @brief Nand controller initialization function
+ *
+ * @hoff_ptr: Pointer to the hand-off data
+ * Return: 0 on success, a negative errno on failure
+ */
+int nand_init(handoff *hoff_ptr);
+
+#endif
diff --git a/plat/intel/soc/common/drivers/qspi/cadence_qspi.c b/plat/intel/soc/common/drivers/qspi/cadence_qspi.c
index cecf560..da8a8bd 100644
--- a/plat/intel/soc/common/drivers/qspi/cadence_qspi.c
+++ b/plat/intel/soc/common/drivers/qspi/cadence_qspi.c
@@ -13,6 +13,7 @@
 #include <drivers/console.h>
 
 #include "cadence_qspi.h"
+#include "socfpga_plat_def.h"
 
 #define LESS(a, b)   (((a) < (b)) ? (a) : (b))
 #define MORE(a, b)   (((a) > (b)) ? (a) : (b))
diff --git a/plat/intel/soc/common/drivers/qspi/cadence_qspi.h b/plat/intel/soc/common/drivers/qspi/cadence_qspi.h
index cfef585..104ab5f 100644
--- a/plat/intel/soc/common/drivers/qspi/cadence_qspi.h
+++ b/plat/intel/soc/common/drivers/qspi/cadence_qspi.h
@@ -10,8 +10,6 @@
 
 #define CAD_QSPI_MICRON_N25Q_SUPPORT		1
 
-#define CAD_QSPI_OFFSET				0xff8d2000
-
 #define CAD_INVALID				-1
 #define CAD_QSPI_ERROR				-2
 
@@ -38,8 +36,6 @@
 #define CAD_QSPI_CFG_SELCLKPHASE_CLR_MSK	0xfffffffb
 #define CAD_QSPI_CFG_SELCLKPOL_CLR_MSK		0xfffffffd
 
-#define CAD_QSPIDATA_OFST			0xff900000
-
 #define CAD_QSPI_DELAY				0xc
 #define CAD_QSPI_DELAY_CSSOT(x)			(((x) & 0xff) << 0)
 #define CAD_QSPI_DELAY_CSEOT(x)			(((x) & 0xff) << 8)
diff --git a/plat/intel/soc/common/drivers/sdmmc/sdmmc.c b/plat/intel/soc/common/drivers/sdmmc/sdmmc.c
new file mode 100644
index 0000000..8666f54
--- /dev/null
+++ b/plat/intel/soc/common/drivers/sdmmc/sdmmc.c
@@ -0,0 +1,769 @@
+/*
+ * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/cadence/cdns_combo_phy.h>
+#include <drivers/cadence/cdns_sdmmc.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <lib/utils.h>
+
+#include "agilex5_pinmux.h"
+#include "sdmmc.h"
+
+static const struct mmc_ops *ops;
+static unsigned int mmc_ocr_value;
+static struct mmc_csd_emmc mmc_csd;
+static struct sd_switch_status sd_switch_func_status;
+static unsigned char mmc_ext_csd[512] __aligned(16);
+static unsigned int mmc_flags;
+static struct mmc_device_info *mmc_dev_info;
+static unsigned int rca;
+static unsigned int scr[2]__aligned(16) = { 0 };
+
+extern const struct mmc_ops cdns_sdmmc_ops;
+extern struct cdns_sdmmc_params cdns_params;
+extern struct cdns_sdmmc_combo_phy sdmmc_combo_phy_reg;
+extern struct cdns_sdmmc_sdhc sdmmc_sdhc_reg;
+
+static bool is_cmd23_enabled(void)
+{
+	return ((mmc_flags & MMC_FLAG_CMD23) != 0U);
+}
+
+static bool is_sd_cmd6_enabled(void)
+{
+	return ((mmc_flags & MMC_FLAG_SD_CMD6) != 0U);
+}
+
+/* TODO: Will romove once ATF driver is developed */
+void sdmmc_pin_config(void)
+{
+	/* temp use base + addr. Official must change to common method */
+	mmio_write_32(AGX5_PINMUX_PIN0SEL+0x00, 0x0);
+	mmio_write_32(AGX5_PINMUX_PIN0SEL+0x04, 0x0);
+	mmio_write_32(AGX5_PINMUX_PIN0SEL+0x08, 0x0);
+	mmio_write_32(AGX5_PINMUX_PIN0SEL+0x0C, 0x0);
+	mmio_write_32(AGX5_PINMUX_PIN0SEL+0x10, 0x0);
+	mmio_write_32(AGX5_PINMUX_PIN0SEL+0x14, 0x0);
+	mmio_write_32(AGX5_PINMUX_PIN0SEL+0x18, 0x0);
+	mmio_write_32(AGX5_PINMUX_PIN0SEL+0x1C, 0x0);
+	mmio_write_32(AGX5_PINMUX_PIN0SEL+0x20, 0x0);
+	mmio_write_32(AGX5_PINMUX_PIN0SEL+0x24, 0x0);
+	mmio_write_32(AGX5_PINMUX_PIN0SEL+0x28, 0x0);
+}
+
+static int sdmmc_send_cmd(unsigned int idx, unsigned int arg,
+			unsigned int r_type, unsigned int *r_data)
+{
+	struct mmc_cmd cmd;
+	int ret;
+
+	zeromem(&cmd, sizeof(struct mmc_cmd));
+
+	cmd.cmd_idx = idx;
+	cmd.cmd_arg = arg;
+	cmd.resp_type = r_type;
+
+	ret = ops->send_cmd(&cmd);
+
+	if ((ret == 0) && (r_data != NULL)) {
+		int i;
+
+		for (i = 0; i < 4; i++) {
+			*r_data = cmd.resp_data[i];
+			r_data++;
+		}
+	}
+
+	if (ret != 0) {
+		VERBOSE("Send command %u error: %d\n", idx, ret);
+	}
+
+	return ret;
+}
+
+static int sdmmc_device_state(void)
+{
+	int retries = DEFAULT_SDMMC_MAX_RETRIES;
+	unsigned int resp_data[4];
+
+	do {
+		int ret;
+
+		if (retries == 0) {
+			ERROR("CMD13 failed after %d retries\n",
+			      DEFAULT_SDMMC_MAX_RETRIES);
+			return -EIO;
+		}
+
+		ret = sdmmc_send_cmd(MMC_CMD(13), rca << RCA_SHIFT_OFFSET,
+				   MMC_RESPONSE_R1, &resp_data[0]);
+		if (ret != 0) {
+			retries--;
+			continue;
+		}
+
+		if ((resp_data[0] & STATUS_SWITCH_ERROR) != 0U) {
+			return -EIO;
+		}
+
+		retries--;
+	} while ((resp_data[0] & STATUS_READY_FOR_DATA) == 0U);
+
+	return MMC_GET_STATE(resp_data[0]);
+}
+
+static int sdmmc_set_ext_csd(unsigned int ext_cmd, unsigned int value)
+{
+	int ret;
+
+	ret = sdmmc_send_cmd(MMC_CMD(6),
+			   EXTCSD_WRITE_BYTES | EXTCSD_CMD(ext_cmd) |
+			   EXTCSD_VALUE(value) | EXTCSD_CMD_SET_NORMAL,
+			   MMC_RESPONSE_R1B, NULL);
+	if (ret != 0) {
+		return ret;
+	}
+
+	do {
+		ret = sdmmc_device_state();
+		if (ret < 0) {
+			return ret;
+		}
+	} while (ret == MMC_STATE_PRG);
+
+	return 0;
+}
+
+static int sdmmc_mmc_sd_switch(unsigned int bus_width)
+{
+	int ret;
+	int retries = DEFAULT_SDMMC_MAX_RETRIES;
+	unsigned int bus_width_arg = 0;
+
+	/* CMD55: Application Specific Command */
+	ret = sdmmc_send_cmd(MMC_CMD(55), rca << RCA_SHIFT_OFFSET,
+			   MMC_RESPONSE_R5, NULL);
+	if (ret != 0) {
+		return ret;
+	}
+
+	ret = ops->prepare(0, (uintptr_t)&scr, sizeof(scr));
+	if (ret != 0) {
+		return ret;
+	}
+
+	/* ACMD51: SEND_SCR */
+	do {
+		ret = sdmmc_send_cmd(MMC_ACMD(51), 0, MMC_RESPONSE_R1, NULL);
+		if ((ret != 0) && (retries == 0)) {
+			ERROR("ACMD51 failed after %d retries (ret=%d)\n",
+			      DEFAULT_SDMMC_MAX_RETRIES, ret);
+			return ret;
+		}
+
+		retries--;
+	} while (ret != 0);
+
+	ret = ops->read(0, (uintptr_t)&scr, sizeof(scr));
+	if (ret != 0) {
+		return ret;
+	}
+
+	if (((scr[0] & SD_SCR_BUS_WIDTH_4) != 0U) &&
+	    (bus_width == MMC_BUS_WIDTH_4)) {
+		bus_width_arg = 2;
+	}
+
+	/* CMD55: Application Specific Command */
+	ret = sdmmc_send_cmd(MMC_CMD(55), rca << RCA_SHIFT_OFFSET,
+			   MMC_RESPONSE_R5, NULL);
+	if (ret != 0) {
+		return ret;
+	}
+
+	/* ACMD6: SET_BUS_WIDTH */
+	ret = sdmmc_send_cmd(MMC_ACMD(6), bus_width_arg, MMC_RESPONSE_R1, NULL);
+	if (ret != 0) {
+		return ret;
+	}
+
+	do {
+		ret = sdmmc_device_state();
+		if (ret < 0) {
+			return ret;
+		}
+	} while (ret == MMC_STATE_PRG);
+
+	return 0;
+}
+
+static int sdmmc_set_ios(unsigned int clk, unsigned int bus_width)
+{
+	int ret;
+	unsigned int width = bus_width;
+
+	if (mmc_dev_info->mmc_dev_type != MMC_IS_EMMC) {
+		if (width == MMC_BUS_WIDTH_8) {
+			WARN("Wrong bus config for SD-card, force to 4\n");
+			width = MMC_BUS_WIDTH_4;
+		}
+		ret = sdmmc_mmc_sd_switch(width);
+		if (ret != 0) {
+			return ret;
+		}
+	} else if (mmc_csd.spec_vers == 4U) {
+		ret = sdmmc_set_ext_csd(CMD_EXTCSD_BUS_WIDTH,
+				      (unsigned int)width);
+		if (ret != 0) {
+			return ret;
+		}
+	} else {
+		VERBOSE("Wrong MMC type or spec version\n");
+	}
+
+	return ops->set_ios(clk, width);
+}
+
+static int sdmmc_fill_device_info(void)
+{
+	unsigned long long c_size;
+	unsigned int speed_idx;
+	unsigned int nb_blocks;
+	unsigned int freq_unit;
+	int ret = 0;
+	struct mmc_csd_sd_v2 *csd_sd_v2;
+
+	switch (mmc_dev_info->mmc_dev_type) {
+	case MMC_IS_EMMC:
+		mmc_dev_info->block_size = MMC_BLOCK_SIZE;
+
+		ret = ops->prepare(0, (uintptr_t)&mmc_ext_csd,
+				   sizeof(mmc_ext_csd));
+		if (ret != 0) {
+			return ret;
+		}
+
+		/* MMC CMD8: SEND_EXT_CSD */
+		ret = sdmmc_send_cmd(MMC_CMD(8), 0, MMC_RESPONSE_R1, NULL);
+		if (ret != 0) {
+			return ret;
+		}
+
+		ret = ops->read(0, (uintptr_t)&mmc_ext_csd,
+				sizeof(mmc_ext_csd));
+		if (ret != 0) {
+			return ret;
+		}
+
+		do {
+			ret = sdmmc_device_state();
+			if (ret < 0) {
+				return ret;
+			}
+		} while (ret != MMC_STATE_TRAN);
+
+		nb_blocks = (mmc_ext_csd[CMD_EXTCSD_SEC_CNT] << 0) |
+			    (mmc_ext_csd[CMD_EXTCSD_SEC_CNT + 1] << 8) |
+			    (mmc_ext_csd[CMD_EXTCSD_SEC_CNT + 2] << 16) |
+			    (mmc_ext_csd[CMD_EXTCSD_SEC_CNT + 3] << 24);
+
+		mmc_dev_info->device_size = (unsigned long long)nb_blocks *
+			mmc_dev_info->block_size;
+
+		break;
+
+	case MMC_IS_SD:
+		/*
+		 * Use the same mmc_csd struct, as required fields here
+		 * (READ_BL_LEN, C_SIZE, CSIZE_MULT) are common with eMMC.
+		 */
+		mmc_dev_info->block_size = BIT_32(mmc_csd.read_bl_len);
+
+		c_size = ((unsigned long long)mmc_csd.c_size_high << 2U) |
+			 (unsigned long long)mmc_csd.c_size_low;
+		assert(c_size != 0xFFFU);
+
+		mmc_dev_info->device_size = (c_size + 1U) *
+					    BIT_64(mmc_csd.c_size_mult + 2U) *
+					    mmc_dev_info->block_size;
+
+		break;
+
+	case MMC_IS_SD_HC:
+		assert(mmc_csd.csd_structure == 1U);
+
+		mmc_dev_info->block_size = MMC_BLOCK_SIZE;
+
+		/* Need to use mmc_csd_sd_v2 struct */
+		csd_sd_v2 = (struct mmc_csd_sd_v2 *)&mmc_csd;
+		c_size = ((unsigned long long)csd_sd_v2->c_size_high << 16) |
+			 (unsigned long long)csd_sd_v2->c_size_low;
+
+		mmc_dev_info->device_size = (c_size + 1U) << SDMMC_MULT_BY_512K_SHIFT;
+
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	if (ret < 0) {
+		return ret;
+	}
+
+	speed_idx = (mmc_csd.tran_speed & CSD_TRAN_SPEED_MULT_MASK) >>
+			 CSD_TRAN_SPEED_MULT_SHIFT;
+
+	assert(speed_idx > 0U);
+
+	if (mmc_dev_info->mmc_dev_type == MMC_IS_EMMC) {
+		mmc_dev_info->max_bus_freq = tran_speed_base[speed_idx];
+	} else {
+		mmc_dev_info->max_bus_freq = sd_tran_speed_base[speed_idx];
+	}
+
+	freq_unit = mmc_csd.tran_speed & CSD_TRAN_SPEED_UNIT_MASK;
+	while (freq_unit != 0U) {
+		mmc_dev_info->max_bus_freq *= 10U;
+		--freq_unit;
+	}
+
+	mmc_dev_info->max_bus_freq *= 10000U;
+
+	return 0;
+}
+
+static int sdmmc_sd_switch(unsigned int mode, unsigned char group,
+		     unsigned char func)
+{
+	unsigned int group_shift = (group - 1U) * 4U;
+	unsigned int group_mask = GENMASK(group_shift + 3U,  group_shift);
+	unsigned int arg;
+	int ret;
+
+	ret = ops->prepare(0, (uintptr_t)&sd_switch_func_status,
+			   sizeof(sd_switch_func_status));
+	if (ret != 0) {
+		return ret;
+	}
+
+	/* MMC CMD6: SWITCH_FUNC */
+	arg = mode | SD_SWITCH_ALL_GROUPS_MASK;
+	arg &= ~group_mask;
+	arg |= func << group_shift;
+	ret = sdmmc_send_cmd(MMC_CMD(6), arg, MMC_RESPONSE_R1, NULL);
+	if (ret != 0) {
+		return ret;
+	}
+
+	return ops->read(0, (uintptr_t)&sd_switch_func_status,
+			 sizeof(sd_switch_func_status));
+}
+
+static int sdmmc_sd_send_op_cond(void)
+{
+	int n;
+	unsigned int resp_data[4];
+
+	for (n = 0; n < SEND_SDMMC_OP_COND_MAX_RETRIES; n++) {
+		int ret;
+
+		/* CMD55: Application Specific Command */
+		ret = sdmmc_send_cmd(MMC_CMD(55), 0, MMC_RESPONSE_R1, NULL);
+		if (ret != 0) {
+			return ret;
+		}
+
+		/* ACMD41: SD_SEND_OP_COND */
+		ret = sdmmc_send_cmd(MMC_ACMD(41), OCR_HCS |
+			mmc_dev_info->ocr_voltage, MMC_RESPONSE_R3,
+			&resp_data[0]);
+		if (ret != 0) {
+			return ret;
+		}
+
+		if ((resp_data[0] & OCR_POWERUP) != 0U) {
+			mmc_ocr_value = resp_data[0];
+
+			if ((mmc_ocr_value & OCR_HCS) != 0U) {
+				mmc_dev_info->mmc_dev_type = MMC_IS_SD_HC;
+			} else {
+				mmc_dev_info->mmc_dev_type = MMC_IS_SD;
+			}
+
+			return 0;
+		}
+
+		mdelay(10);
+	}
+
+	ERROR("ACMD41 failed after %d retries\n", SEND_SDMMC_OP_COND_MAX_RETRIES);
+
+	return -EIO;
+}
+
+static int sdmmc_reset_to_idle(void)
+{
+	int ret;
+
+	/* CMD0: reset to IDLE */
+	ret = sdmmc_send_cmd(MMC_CMD(0), 0, 0, NULL);
+	if (ret != 0) {
+		return ret;
+	}
+
+	mdelay(2);
+
+	return 0;
+}
+
+static int sdmmc_send_op_cond(void)
+{
+	int ret, n;
+	unsigned int resp_data[4];
+
+	ret = sdmmc_reset_to_idle();
+	if (ret != 0) {
+		return ret;
+	}
+
+	for (n = 0; n < SEND_SDMMC_OP_COND_MAX_RETRIES; n++) {
+		ret = sdmmc_send_cmd(MMC_CMD(1), OCR_SECTOR_MODE |
+				   OCR_VDD_MIN_2V7 | OCR_VDD_MIN_1V7,
+				   MMC_RESPONSE_R3, &resp_data[0]);
+		if (ret != 0) {
+			return ret;
+		}
+
+		if ((resp_data[0] & OCR_POWERUP) != 0U) {
+			mmc_ocr_value = resp_data[0];
+			return 0;
+		}
+
+		mdelay(10);
+	}
+
+	ERROR("CMD1 failed after %d retries\n", SEND_SDMMC_OP_COND_MAX_RETRIES);
+
+	return -EIO;
+}
+
+static int sdmmc_enumerate(unsigned int clk, unsigned int bus_width)
+{
+	int ret;
+	unsigned int resp_data[4];
+
+	ops->init();
+
+	ret = sdmmc_reset_to_idle();
+	if (ret != 0) {
+		return ret;
+	}
+
+	if (mmc_dev_info->mmc_dev_type == MMC_IS_EMMC) {
+		ret = sdmmc_send_op_cond();
+	} else {
+		/* CMD8: Send Interface Condition Command */
+		ret = sdmmc_send_cmd(MMC_CMD(8), VHS_2_7_3_6_V | CMD8_CHECK_PATTERN,
+				   MMC_RESPONSE_R5, &resp_data[0]);
+
+		if ((ret == 0) && ((resp_data[0] & 0xffU) == CMD8_CHECK_PATTERN)) {
+			ret = sdmmc_sd_send_op_cond();
+		}
+	}
+	if (ret != 0) {
+		return ret;
+	}
+
+	/* CMD2: Card Identification */
+	ret = sdmmc_send_cmd(MMC_CMD(2), 0, MMC_RESPONSE_R2, NULL);
+	if (ret != 0) {
+		return ret;
+	}
+
+	/* CMD3: Set Relative Address */
+	if (mmc_dev_info->mmc_dev_type == MMC_IS_EMMC) {
+		rca = MMC_FIX_RCA;
+		ret = sdmmc_send_cmd(MMC_CMD(3), rca << RCA_SHIFT_OFFSET,
+				   MMC_RESPONSE_R1, NULL);
+		if (ret != 0) {
+			return ret;
+		}
+	} else {
+		ret = sdmmc_send_cmd(MMC_CMD(3), 0,
+				   MMC_RESPONSE_R6, &resp_data[0]);
+		if (ret != 0) {
+			return ret;
+		}
+
+		rca = (resp_data[0] & 0xFFFF0000U) >> 16;
+	}
+
+	/* CMD9: CSD Register */
+	ret = sdmmc_send_cmd(MMC_CMD(9), rca << RCA_SHIFT_OFFSET,
+			   MMC_RESPONSE_R2, &resp_data[0]);
+	if (ret != 0) {
+		return ret;
+	}
+
+	memcpy(&mmc_csd, &resp_data, sizeof(resp_data));
+
+	/* CMD7: Select Card */
+	ret = sdmmc_send_cmd(MMC_CMD(7), rca << RCA_SHIFT_OFFSET,
+			   MMC_RESPONSE_R1, NULL);
+	if (ret != 0) {
+		return ret;
+	}
+
+	do {
+		ret = sdmmc_device_state();
+		if (ret < 0) {
+			return ret;
+		}
+	} while (ret != MMC_STATE_TRAN);
+
+	ret = sdmmc_set_ios(clk, bus_width);
+	if (ret != 0) {
+		return ret;
+	}
+
+	ret = sdmmc_fill_device_info();
+	if (ret != 0) {
+		return ret;
+	}
+
+	if (is_sd_cmd6_enabled() &&
+	    (mmc_dev_info->mmc_dev_type == MMC_IS_SD_HC)) {
+		/* Try to switch to High Speed Mode */
+		ret = sdmmc_sd_switch(SD_SWITCH_FUNC_CHECK, 1U, 1U);
+		if (ret != 0) {
+			return ret;
+		}
+
+		if ((sd_switch_func_status.support_g1 & BIT(9)) == 0U) {
+			/* High speed not supported, keep default speed */
+			return 0;
+		}
+
+		ret = sdmmc_sd_switch(SD_SWITCH_FUNC_SWITCH, 1U, 1U);
+		if (ret != 0) {
+			return ret;
+		}
+
+		if ((sd_switch_func_status.sel_g2_g1 & 0x1U) == 0U) {
+			/* Cannot switch to high speed, keep default speed */
+			return 0;
+		}
+
+		mmc_dev_info->max_bus_freq = 50000000U;
+		ret = ops->set_ios(clk, bus_width);
+	}
+
+	return ret;
+}
+
+size_t sdmmc_read_blocks(int lba, uintptr_t buf, size_t size)
+{
+	int ret;
+	unsigned int cmd_idx, cmd_arg;
+
+	assert((ops != NULL) &&
+	       (ops->read != NULL) &&
+	       (size != 0U) &&
+	       ((size & MMC_BLOCK_MASK) == 0U));
+
+	ret = ops->prepare(lba, buf, size);
+	if (ret != 0) {
+		return 0;
+	}
+
+	if (is_cmd23_enabled()) {
+		/* Set block count */
+		ret = sdmmc_send_cmd(MMC_CMD(23), size / MMC_BLOCK_SIZE,
+				   MMC_RESPONSE_R1, NULL);
+		if (ret != 0) {
+			return 0;
+		}
+
+		cmd_idx = MMC_CMD(18);
+	} else {
+		if (size > MMC_BLOCK_SIZE) {
+			cmd_idx = MMC_CMD(18);
+		} else {
+			cmd_idx = MMC_CMD(17);
+		}
+	}
+
+	if (((mmc_ocr_value & OCR_ACCESS_MODE_MASK) == OCR_BYTE_MODE) &&
+	    (mmc_dev_info->mmc_dev_type != MMC_IS_SD_HC)) {
+		cmd_arg = lba * MMC_BLOCK_SIZE;
+	} else {
+		cmd_arg = lba;
+	}
+
+	ret = sdmmc_send_cmd(cmd_idx, cmd_arg, MMC_RESPONSE_R1, NULL);
+	if (ret != 0) {
+		return 0;
+	}
+
+	ret = ops->read(lba, buf, size);
+	if (ret != 0) {
+		return 0;
+	}
+
+	/* Wait buffer empty */
+	do {
+		ret = sdmmc_device_state();
+		if (ret < 0) {
+			return 0;
+		}
+	} while ((ret != MMC_STATE_TRAN) && (ret != MMC_STATE_DATA));
+
+	if (!is_cmd23_enabled() && (size > MMC_BLOCK_SIZE)) {
+		ret = sdmmc_send_cmd(MMC_CMD(12), 0, MMC_RESPONSE_R1B, NULL);
+		if (ret != 0) {
+			return 0;
+		}
+	}
+
+	return size;
+}
+
+size_t sdmmc_write_blocks(int lba, const uintptr_t buf, size_t size)
+{
+	int ret;
+	unsigned int cmd_idx, cmd_arg;
+
+	assert((ops != NULL) &&
+	       (ops->write != NULL) &&
+	       (size != 0U) &&
+	       ((buf & MMC_BLOCK_MASK) == 0U) &&
+	       ((size & MMC_BLOCK_MASK) == 0U));
+
+	ret = ops->prepare(lba, buf, size);
+	if (ret != 0) {
+		return 0;
+	}
+
+	if (is_cmd23_enabled()) {
+		/* Set block count */
+		ret = sdmmc_send_cmd(MMC_CMD(23), size / MMC_BLOCK_SIZE,
+				   MMC_RESPONSE_R1, NULL);
+		if (ret != 0) {
+			return 0;
+		}
+
+		cmd_idx = MMC_CMD(25);
+	} else {
+		if (size > MMC_BLOCK_SIZE) {
+			cmd_idx = MMC_CMD(25);
+		} else {
+			cmd_idx = MMC_CMD(24);
+		}
+	}
+
+	if ((mmc_ocr_value & OCR_ACCESS_MODE_MASK) == OCR_BYTE_MODE) {
+		cmd_arg = lba * MMC_BLOCK_SIZE;
+	} else {
+		cmd_arg = lba;
+	}
+
+	ret = sdmmc_send_cmd(cmd_idx, cmd_arg, MMC_RESPONSE_R1, NULL);
+	if (ret != 0) {
+		return 0;
+	}
+
+	ret = ops->write(lba, buf, size);
+	if (ret != 0) {
+		return 0;
+	}
+
+	/* Wait buffer empty */
+	do {
+		ret = sdmmc_device_state();
+		if (ret < 0) {
+			return 0;
+		}
+	} while ((ret != MMC_STATE_TRAN) && (ret != MMC_STATE_RCV));
+
+	if (!is_cmd23_enabled() && (size > MMC_BLOCK_SIZE)) {
+		ret = sdmmc_send_cmd(MMC_CMD(12), 0, MMC_RESPONSE_R1B, NULL);
+		if (ret != 0) {
+			return 0;
+		}
+	}
+
+	return size;
+}
+
+int sd_or_mmc_init(const struct mmc_ops *ops_ptr, unsigned int clk,
+	     unsigned int width, unsigned int flags,
+	     struct mmc_device_info *device_info)
+{
+	assert((ops_ptr != NULL) &&
+	       (ops_ptr->init != NULL) &&
+	       (ops_ptr->send_cmd != NULL) &&
+	       (ops_ptr->set_ios != NULL) &&
+	       (ops_ptr->prepare != NULL) &&
+	       (ops_ptr->read != NULL) &&
+	       (ops_ptr->write != NULL) &&
+	       (device_info != NULL) &&
+	       (clk != 0) &&
+	       ((width == MMC_BUS_WIDTH_1) ||
+		(width == MMC_BUS_WIDTH_4) ||
+		(width == MMC_BUS_WIDTH_8) ||
+		(width == MMC_BUS_WIDTH_DDR_4) ||
+		(width == MMC_BUS_WIDTH_DDR_8)));
+
+	ops = ops_ptr;
+	mmc_flags = flags;
+	mmc_dev_info = device_info;
+
+	return sdmmc_enumerate(clk, width);
+}
+
+int sdmmc_init(handoff *hoff_ptr, struct cdns_sdmmc_params *params, struct mmc_device_info *info)
+{
+	int result = 0;
+
+	/* SDMMC pin mux configuration */
+	sdmmc_pin_config();
+	cdns_set_sdmmc_var(&sdmmc_combo_phy_reg, &sdmmc_sdhc_reg);
+	result = cdns_sd_host_init(&sdmmc_combo_phy_reg, &sdmmc_sdhc_reg);
+	if (result < 0) {
+		return result;
+	}
+
+	assert((params != NULL) &&
+	       ((params->reg_base & MMC_BLOCK_MASK) == 0) &&
+	       ((params->desc_base & MMC_BLOCK_MASK) == 0) &&
+	       ((params->desc_size & MMC_BLOCK_MASK) == 0) &&
+		   ((params->reg_pinmux & MMC_BLOCK_MASK) == 0) &&
+		   ((params->reg_phy & MMC_BLOCK_MASK) == 0) &&
+	       (params->desc_size > 0) &&
+	       (params->clk_rate > 0) &&
+	       ((params->bus_width == MMC_BUS_WIDTH_1) ||
+		(params->bus_width == MMC_BUS_WIDTH_4) ||
+		(params->bus_width == MMC_BUS_WIDTH_8)));
+
+	memcpy(&cdns_params, params, sizeof(struct cdns_sdmmc_params));
+	cdns_params.cdn_sdmmc_dev_type = info->mmc_dev_type;
+	cdns_params.cdn_sdmmc_dev_mode = SD_DS;
+
+	result = sd_or_mmc_init(&cdns_sdmmc_ops, params->clk_rate, params->bus_width,
+		params->flags, info);
+
+	return result;
+}
diff --git a/plat/intel/soc/common/drivers/sdmmc/sdmmc.h b/plat/intel/soc/common/drivers/sdmmc/sdmmc.h
new file mode 100644
index 0000000..16c6b04
--- /dev/null
+++ b/plat/intel/soc/common/drivers/sdmmc/sdmmc.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SDMMC_H
+#define SDMMC_H
+
+#include <lib/mmio.h>
+#include "socfpga_handoff.h"
+
+#define PERIPHERAL_SDMMC_MASK			0x60
+#define PERIPHERAL_SDMMC_OFFSET			6
+
+#define DEFAULT_SDMMC_MAX_RETRIES		5
+#define SEND_SDMMC_OP_COND_MAX_RETRIES		100
+#define SDMMC_MULT_BY_512K_SHIFT		19
+
+static const unsigned char tran_speed_base[16] = {
+	0, 10, 12, 13, 15, 20, 26, 30, 35, 40, 45, 52, 55, 60, 70, 80
+};
+
+static const unsigned char sd_tran_speed_base[16] = {
+	0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80
+};
+
+
+/* FUNCTION DEFINATION */
+/*
+ * @brief SDMMC controller initialization function
+ *
+ * @hoff_ptr: Pointer to the hand-off data
+ * Return: 0 on success, a negative errno on failure
+ */
+int sdmmc_init(handoff *hoff_ptr, struct cdns_sdmmc_params *params,
+	     struct mmc_device_info *info);
+int sd_or_mmc_init(const struct mmc_ops *ops_ptr, unsigned int clk,
+	     unsigned int width, unsigned int flags,
+	     struct mmc_device_info *device_info);
+void sdmmc_pin_config(void);
+#endif
diff --git a/plat/intel/soc/common/drivers/wdt/watchdog.h b/plat/intel/soc/common/drivers/wdt/watchdog.h
index 2c72463..4ee4cff 100644
--- a/plat/intel/soc/common/drivers/wdt/watchdog.h
+++ b/plat/intel/soc/common/drivers/wdt/watchdog.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -7,7 +7,11 @@
 #ifndef CAD_WATCHDOG_H
 #define CAD_WATCHDOG_H
 
+#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+#define WDT_BASE			(0x10D00200)
+#else
 #define WDT_BASE			(0xFFD00200)
+#endif
 #define WDT_REG_SIZE_OFFSET		(0x4)
 #define WDT_MIN_CYCLES			(65536)
 #define WDT_PERIOD			(20)
diff --git a/plat/intel/soc/common/include/platform_def.h b/plat/intel/soc/common/include/platform_def.h
index 78deebc..49fc567 100644
--- a/plat/intel/soc/common/include/platform_def.h
+++ b/plat/intel/soc/common/include/platform_def.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
- * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -25,12 +25,6 @@
 /* sysmgr.boot_scratch_cold4 & 5 used for CPU release address for SPL */
 #define PLAT_CPU_RELEASE_ADDR			0xffd12210
 
-/*
- * sysmgr.boot_scratch_cold6 & 7 (64bit) are used to indicate L2 reset
- * is done and HPS should trigger warm reset via RMR_EL3.
- */
-#define L2_RESET_DONE_REG			0xFFD12218
-
 /* Magic word to indicate L2 reset is completed */
 #define L2_RESET_DONE_STATUS			0x1228E5E7
 
diff --git a/plat/intel/soc/common/include/socfpga_f2sdram_manager.h b/plat/intel/soc/common/include/socfpga_f2sdram_manager.h
index b30a11e..1bebfc9 100644
--- a/plat/intel/soc/common/include/socfpga_f2sdram_manager.h
+++ b/plat/intel/soc/common/include/socfpga_f2sdram_manager.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -14,6 +14,7 @@
 #define SOCFPGA_F2SDRAMMGR_SIDEBANDMGR_FLAGOUTCLR0	0x54
 #define SOCFPGA_F2SDRAMMGR_SIDEBANDMGR_FLAGOUTSET0	0x50
 
+#define FLAGOUTCLR0_F2SDRAM0_ENABLE		(BIT(8))
 #define FLAGOUTSETCLR_F2SDRAM0_ENABLE		(BIT(1))
 #define FLAGOUTSETCLR_F2SDRAM1_ENABLE		(BIT(4))
 #define FLAGOUTSETCLR_F2SDRAM2_ENABLE		(BIT(7))
diff --git a/plat/intel/soc/common/include/socfpga_handoff.h b/plat/intel/soc/common/include/socfpga_handoff.h
index ba0f7f3..b2913c7 100644
--- a/plat/intel/soc/common/include/socfpga_handoff.h
+++ b/plat/intel/soc/common/include/socfpga_handoff.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -7,13 +7,15 @@
 #ifndef	HANDOFF_H
 #define	HANDOFF_H
 
-#define HANDOFF_MAGIC_HEADER		0x424f4f54	/* BOOT */
-#define HANDOFF_MAGIC_PINMUX_SEL	0x504d5558	/* PMUX */
-#define HANDOFF_MAGIC_IOCTLR		0x494f4354	/* IOCT */
-#define HANDOFF_MAGIC_FPGA		0x46504741	/* FPGA */
-#define HANDOFF_MAGIC_IODELAY		0x444c4159	/* DLAY */
-#define HANDOFF_MAGIC_CLOCK		0x434c4b53	/* CLKS */
-#define HANDOFF_MAGIC_MISC		0x4d495343	/* MISC */
+#define HANDOFF_MAGIC_HEADER			0x424f4f54	/* BOOT */
+#define HANDOFF_MAGIC_PINMUX_SEL		0x504d5558	/* PMUX */
+#define HANDOFF_MAGIC_IOCTLR			0x494f4354	/* IOCT */
+#define HANDOFF_MAGIC_FPGA				0x46504741	/* FPGA */
+#define HANDOFF_MAGIC_IODELAY			0x444c4159	/* DLAY */
+#define HANDOFF_MAGIC_CLOCK				0x434c4b53	/* CLKS */
+#define HANDOFF_MAGIC_MISC				0x4d495343	/* MISC */
+#define HANDOFF_MAGIC_PERIPHERAL		0x50455249	/* PERIPHERAL */
+#define HANDOFF_MAGIC_DDR				0x5344524d	/* DDR */
 
 #include <socfpga_plat_def.h>
 
@@ -39,8 +41,9 @@
 	uint32_t	pinmux_fpga_magic;
 	uint32_t	pinmux_fpga_length;
 	uint32_t	_pad_0x338_0x340[2];
-	uint32_t	pinmux_fpga_array[42];	/* offset, value */
-	uint32_t	_pad_0x3e8_0x3f0[2];
+	uint32_t	pinmux_fpga_array[44];	/* offset, value */
+	/* TODO: Temp remove due to add in extra handoff data */
+	// uint32_t	_pad_0x3e8_0x3f0[2];
 
 	/* pinmux configuration - io delay */
 	uint32_t	pinmux_delay_magic;
@@ -49,7 +52,6 @@
 	uint32_t	pinmux_iodelay_array[96];	/* offset, value */
 
 	/* clock configuration */
-
 #if PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10
 	uint32_t	clock_magic;
 	uint32_t	clock_length;
@@ -120,16 +122,68 @@
 	uint32_t	hps_osc_clk_h;
 	uint32_t	fpga_clk_hz;
 	uint32_t	_pad_0x604_0x610[3];
+#elif PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+	uint32_t	clock_magic;
+	uint32_t	clock_length;
+	uint32_t	_pad_0x588_0x590[2];
+	uint32_t	main_pll_nocclk;
+	uint32_t	main_pll_nocdiv;
+	uint32_t	main_pll_pllglob;
+	uint32_t	main_pll_fdbck;
+	uint32_t	main_pll_pllc0;
+	uint32_t	main_pll_pllc1;
+	uint32_t	main_pll_pllc2;
+	uint32_t	main_pll_pllc3;
+	uint32_t	main_pll_pllm;
+	uint32_t	per_pll_emacctl;
+	uint32_t	per_pll_gpiodiv;
+	uint32_t	per_pll_pllglob;
+	uint32_t	per_pll_fdbck;
+	uint32_t	per_pll_pllc0;
+	uint32_t	per_pll_pllc1;
+	uint32_t	per_pll_pllc2;
+	uint32_t	per_pll_pllc3;
+	uint32_t	per_pll_pllm;
+	uint32_t	alt_emacactr;
+	uint32_t	alt_emacbctr;
+	uint32_t	alt_emacptpctr;
+	uint32_t	alt_gpiodbctr;
+	uint32_t	alt_sdmmcctr;
+	uint32_t	alt_s2fuser0ctr;
+	uint32_t	alt_s2fuser1ctr;
+	uint32_t	alt_psirefctr;
+	/* TODO: Temp added for clk manager. */
+	uint32_t	qspi_clk_khz;
+	uint32_t	hps_osc_clk_hz;
+	uint32_t	fpga_clk_hz;
+	/* TODO: Temp added for clk manager. */
+	uint32_t	ddr_reset_type;
+	/* TODO: Temp added for clk manager. */
+	uint32_t	hps_status_coldreset;
+	/* TODO: Temp remove due to add in extra handoff data */
+	//uint32_t	_pad_0x604_0x610[3];
 #endif
 	/* misc configuration */
 	uint32_t	misc_magic;
 	uint32_t	misc_length;
 	uint32_t	_pad_0x618_0x620[2];
+
+#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+	/* peripheral configuration - select */
+	uint32_t	peripheral_pwr_gate_magic;
+	uint32_t	peripheral_pwr_gate_length;
+	uint32_t	_pad_0x08_0x0C[2];
+	uint32_t	peripheral_pwr_gate_array;	/* offset, value */
+
+	/* ddr configuration - select */
+	uint32_t	ddr_magic;
+	uint32_t	ddr_length;
+	uint32_t	_pad_0x1C_0x20[2];
+	uint32_t	ddr_array[4];	/* offset, value */
+#endif
 } handoff;
 
 int verify_handoff_image(handoff *hoff_ptr, handoff *reverse_hoff_ptr);
 int socfpga_get_handoff(handoff *hoff_ptr);
 
 #endif
-
-
diff --git a/plat/intel/soc/common/include/socfpga_mailbox.h b/plat/intel/soc/common/include/socfpga_mailbox.h
index 564b4ee..77d3af9 100644
--- a/plat/intel/soc/common/include/socfpga_mailbox.h
+++ b/plat/intel/soc/common/include/socfpga_mailbox.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,8 +9,11 @@
 
 #include <lib/utils_def.h>
 
-
+#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+#define MBOX_OFFSET					0x10a30000
+#else
 #define MBOX_OFFSET					0xffa30000
+#endif
 
 #define MBOX_ATF_CLIENT_ID				0x1U
 #define MBOX_MAX_JOB_ID					0xFU
@@ -193,9 +196,9 @@
 #define RSU_VERSION_ACMF_MASK				0xff00
 
 /* Config Status Macros */
-#define CONFIG_STATUS_WORD_SIZE		16U
-#define CONFIG_STATUS_FW_VER_OFFSET	1
-#define CONFIG_STATUS_FW_VER_MASK	0x00FFFFFF
+#define CONFIG_STATUS_WORD_SIZE			16U
+#define CONFIG_STATUS_FW_VER_OFFSET		1
+#define CONFIG_STATUS_FW_VER_MASK		0x00FFFFFF
 
 /* Data structure */
 
@@ -233,6 +236,7 @@
 			unsigned int *resp_len);
 
 void mailbox_reset_cold(void);
+void mailbox_reset_warm(uint32_t reset_type);
 void mailbox_clear_response(void);
 
 int intel_mailbox_get_config_status(uint32_t cmd, bool init_done);
diff --git a/plat/intel/soc/common/include/socfpga_noc.h b/plat/intel/soc/common/include/socfpga_noc.h
index e3c0f73..3fc3f81 100644
--- a/plat/intel/soc/common/include/socfpga_noc.h
+++ b/plat/intel/soc/common/include/socfpga_noc.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020-2022, Intel Corporation. All rights reserved.
+ * Copyright (c) 2020-2023, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -74,6 +74,10 @@
 #define SOCFPGA_NOC_FW_L4_SYS_SCR_WATCHDOG2			0x0070
 #define SOCFPGA_NOC_FW_L4_SYS_SCR_WATCHDOG3			0x0074
 #define SOCFPGA_NOC_FW_L4_SYS_SCR_DAP				0x0078
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_WATCHDOG4			0x007c
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_PWRMGR			0x0080
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_USB1_RXECC			0x0084
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_USB1_TXECC			0x0088
 #define SOCFPGA_NOC_FW_L4_SYS_SCR_L4_NOC_PROBES			0x0090
 #define SOCFPGA_NOC_FW_L4_SYS_SCR_L4_NOC_QOS			0x0094
 
diff --git a/plat/intel/soc/common/include/socfpga_reset_manager.h b/plat/intel/soc/common/include/socfpga_reset_manager.h
index cce16ab..9d06a3d 100644
--- a/plat/intel/soc/common/include/socfpga_reset_manager.h
+++ b/plat/intel/soc/common/include/socfpga_reset_manager.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,10 @@
 
 #include "socfpga_plat_def.h"
 
+/* Status Response */
+#define RSTMGR_RET_OK				0
+#define RSTMGR_RET_ERROR			-1
+
 #define SOCFPGA_BRIDGE_ENABLE			BIT(0)
 #define SOCFPGA_BRIDGE_HAS_MASK			BIT(1)
 
@@ -22,28 +26,51 @@
 /* Register Mapping */
 
 #define SOCFPGA_RSTMGR_STAT			0x000
+#define SOCFPGA_RSTMGR_MISCSTAT			0x008
 #define SOCFPGA_RSTMGR_HDSKEN			0x010
 #define SOCFPGA_RSTMGR_HDSKREQ			0x014
 #define SOCFPGA_RSTMGR_HDSKACK			0x018
+#define SOCFPGA_RSTMGR_HDSKSTALL		0x01C
+#if PLATFORM_MODEL != PLAT_SOCFPGA_AGILEX5
 #define SOCFPGA_RSTMGR_MPUMODRST		0x020
+#endif
 #define SOCFPGA_RSTMGR_PER0MODRST		0x024
 #define SOCFPGA_RSTMGR_PER1MODRST		0x028
-#define SOCFPGA_RSTMGR_BRGMODRST		0x02c
+#define SOCFPGA_RSTMGR_BRGMODRST		0x02C
+#if PLATFORM_MODEL != PLAT_SOCFPGA_AGILEX5
 #define SOCFPGA_RSTMGR_COLDMODRST		0x034
+#endif
+#define SOCFPGA_RSTMGR_DBGMODRST		0x03C
+#define SOCFPGA_RSTMGR_BRGWARMMASK		0x04C
+#define SOCFPGA_RSTMGR_TSTSTA			0x05C
 #define SOCFPGA_RSTMGR_HDSKTIMEOUT		0x064
+#define SOCFPGA_RSTMGR_DBGHDSKTIMEOUT		0x06C
+#define SOCFPGA_RSTMGR_DBGRSTCMPLT		0x070
+#define SOCFPGA_RSTMGR_HPSRSTCMPLT		0x080
+#define SOCFPGA_RSTMGR_CPUINREST		0x090
+#define SOCFPGA_RSTMGR_CPURSTRELEASE		0x094
+#define SOCFPGA_RSTMGR_CPUBASELOW_0		0x098
+#define SOCFPGA_RSTMGR_CPUBASEHIGH_0		0x09C
+#define SOCFPGA_RSTMGR_CPUBASELOW_1		0x0A0
+#define SOCFPGA_RSTMGR_CPUBASEHIGH_1		0x0A4
+#define SOCFPGA_RSTMGR_CPUBASELOW_2		0x0A8
+#define SOCFPGA_RSTMGR_CPUBASEHIGH_2		0x0AC
+#define SOCFPGA_RSTMGR_CPUBASELOW_3		0x0B0
+#define SOCFPGA_RSTMGR_CPUBASEHIGH_3		0x0B4
 
 /* Field Mapping */
-
-#define RSTMGR_PER0MODRST_EMAC0			0x00000001
-#define RSTMGR_PER0MODRST_EMAC1			0x00000002
-#define RSTMGR_PER0MODRST_EMAC2			0x00000004
+/* PER0MODRST */
+#define RSTMGR_PER0MODRST_EMAC0			0x00000001	//TSN0
+#define RSTMGR_PER0MODRST_EMAC1			0x00000002	//TSN1
+#define RSTMGR_PER0MODRST_EMAC2			0x00000004	//TSN2
 #define RSTMGR_PER0MODRST_USB0			0x00000008
 #define RSTMGR_PER0MODRST_USB1			0x00000010
 #define RSTMGR_PER0MODRST_NAND			0x00000020
+#define RSTMGR_PER0MODRST_SOFTPHY		0x00000040
 #define RSTMGR_PER0MODRST_SDMMC			0x00000080
-#define RSTMGR_PER0MODRST_EMAC0OCP		0x00000100
-#define RSTMGR_PER0MODRST_EMAC1OCP		0x00000200
-#define RSTMGR_PER0MODRST_EMAC2OCP		0x00000400
+#define RSTMGR_PER0MODRST_EMAC0OCP		0x00000100	//TSN0ECC
+#define RSTMGR_PER0MODRST_EMAC1OCP		0x00000200	//TSN1ECC
+#define RSTMGR_PER0MODRST_EMAC2OCP		0x00000400	//TSN2ECC
 #define RSTMGR_PER0MODRST_USB0OCP		0x00000800
 #define RSTMGR_PER0MODRST_USB1OCP		0x00001000
 #define RSTMGR_PER0MODRST_NANDOCP		0x00002000
@@ -64,6 +91,7 @@
 #define RSTMGR_PER0MODRST_DMAIF6		0x40000000
 #define RSTMGR_PER0MODRST_DMAIF7		0x80000000
 
+/* PER1MODRST */
 #define RSTMGR_PER1MODRST_WATCHDOG0		0x00000001
 #define RSTMGR_PER1MODRST_WATCHDOG1		0x00000002
 #define RSTMGR_PER1MODRST_WATCHDOG2		0x00000004
@@ -77,31 +105,121 @@
 #define RSTMGR_PER1MODRST_I2C2			0x00000400
 #define RSTMGR_PER1MODRST_I2C3			0x00000800
 #define RSTMGR_PER1MODRST_I2C4			0x00001000
+#define RSTMGR_PER1MODRST_I3C0			0x00002000
+#define RSTMGR_PER1MODRST_I3C1			0x00004000
 #define RSTMGR_PER1MODRST_UART0			0x00010000
 #define RSTMGR_PER1MODRST_UART1			0x00020000
 #define RSTMGR_PER1MODRST_GPIO0			0x01000000
 #define RSTMGR_PER1MODRST_GPIO1			0x02000000
+#define RSTMGR_PER1MODRST_WATCHDOG4		0x04000000
 
+/* HDSKEN */
+#define RSTMGR_HDSKEN_EMIF_FLUSH		0x00000001
 #define RSTMGR_HDSKEN_FPGAHSEN			0x00000004
 #define RSTMGR_HDSKEN_ETRSTALLEN		0x00000008
-#define RSTMGR_HDSKEN_L2FLUSHEN			0x00000100
+#define RSTMGR_HDSKEN_LWS2F_FLUSH		0x00000200
+#define RSTMGR_HDSKEN_S2F_FLUSH			0x00000400
+#define RSTMGR_HDSKEN_F2SDRAM_FLUSH		0x00000800
+#define RSTMGR_HDSKEN_F2S_FLUSH			0x00001000
 #define RSTMGR_HDSKEN_L3NOC_DBG			0x00010000
 #define RSTMGR_HDSKEN_DEBUG_L3NOC		0x00020000
-#define RSTMGR_HDSKEN_SDRSELFREFEN		0x00000001
 
-#define RSTMGR_HDSKEQ_FPGAHSREQ			0x4
+/* HDSKREQ */
+#define RSTMGR_HDSKREQ_EMIFFLUSHREQ		0x00000001
+#define RSTMGR_HDSKREQ_ETRSTALLREQ		0x00000008
+#define RSTMGR_HDSKREQ_LWS2F_FLUSH		0x00000200
+#define RSTMGR_HDSKREQ_S2F_FLUSH		0x00000400
+#define RSTMGR_HDSKREQ_F2SDRAM_FLUSH		0x00000800
+#define RSTMGR_HDSKREQ_F2S_FLUSH		0x00001000
+#define RSTMGR_HDSKREQ_L3NOC_DBG		0x00010000
+#define RSTMGR_HDSKREQ_DEBUG_L3NOC		0x00020000
+#define RSTMGR_HDSKREQ_FPGAHSREQ		0x00000004
+#define RSTMGR_HDSKREQ_LWSOC2FPGAREQ		0x00000200
+#define RSTMGR_HDSKREQ_SOC2FPGAREQ		0x00000400
+#define RSTMGR_HDSKREQ_F2SDRAM0REQ		0x00000800
+#define RSTMGR_HDSKREQ_FPGA2SOCREQ		0x00001000
 
-#define RSTMGR_BRGMODRST_SOC2FPGA		0x1
-#define RSTMGR_BRGMODRST_LWHPS2FPGA		0x2
-#define RSTMGR_BRGMODRST_FPGA2SOC		0x4
-#define RSTMGR_BRGMODRST_F2SSDRAM0		0x8
+/* HDSKACK */
+#define RSTMGR_HDSKACK_EMIFFLUSHREQ		0x00000001
+#define RSTMGR_HDSKACK_FPGAHSREQ		0x00000004
+#define RSTMGR_HDSKACK_ETRSTALLREQ		0x00000008
+#define RSTMGR_HDSKACK_LWS2F_FLUSH		0x00000200
+#define RSTMGR_HDSKACK_S2F_FLUSH		0x00000400
+#define RSTMGR_HDSKACK_F2SDRAM_FLUSH		0x00000800
+#define RSTMGR_HDSKACK_F2S_FLUSH		0x00001000
+#define RSTMGR_HDSKACK_L3NOC_DBG		0x00010000
+#define RSTMGR_HDSKACK_DEBUG_L3NOC		0x00020000
+#define RSTMGR_HDSKACK_FPGAHSACK		0x00000004
+#define RSTMGR_HDSKACK_LWSOC2FPGAACK		0x00000200
+#define RSTMGR_HDSKACK_SOC2FPGAACK		0x00000400
+#define RSTMGR_HDSKACK_F2SDRAM0ACK		0x00000800
+#define RSTMGR_HDSKACK_FPGA2SOCACK		0x00001000
+#define RSTMGR_HDSKACK_FPGAHSACK_DASRT		0x00000000
+#define RSTMGR_HDSKACK_F2SDRAM0ACK_DASRT	0x00000000
+#define RSTMGR_HDSKACK_FPGA2SOCACK_DASRT	0x00000000
+
+/* HDSKSTALL */
+#define RSTMGR_HDSKACK_ETRSTALLWARMRST		0x00000001
+
+/* BRGMODRST */
+#define RSTMGR_BRGMODRST_SOC2FPGA		0x00000001
+#define RSTMGR_BRGMODRST_LWHPS2FPGA		0x00000002
+#define RSTMGR_BRGMODRST_FPGA2SOC		0x00000004
+#define RSTMGR_BRGMODRST_F2SSDRAM0		0x00000008
+#if PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10
 #define RSTMGR_BRGMODRST_F2SSDRAM1		0x10
 #define RSTMGR_BRGMODRST_F2SSDRAM2		0x20
-#define RSTMGR_BRGMODRST_MPFE			0x40
 #define RSTMGR_BRGMODRST_DDRSCH			0x40
+#elif PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+#define RSTMGR_BRGMODRST_F2SSDRAM1		0x10
+#define RSTMGR_BRGMODRST_F2SSDRAM2		0x20
+#endif
+
+#define RSTMGR_BRGMODRST_MPFE			0x40
+
+/* DBGMODRST */
+#define RSTMGR_DBGMODRST_DBG_RST		0x00000001
 
-#define RSTMGR_HDSKREQ_FPGAHSREQ		(BIT(2))
-#define RSTMGR_HDSKACK_FPGAHSACK_MASK		(BIT(2))
+/* BRGMODRSTMASK */
+#define RSTMGR_BRGMODRSTMASK_SOC2FPGA		0x00000001
+#define RSTMGR_BRGMODRSTMASK_LWHPS2FPGA		0x00000002
+#define RSTMGR_BRGMODRSTMASK_FPGA2SOC		0x00000004
+#define RSTMGR_BRGMODRSTMASK_F2SDRAM0		0x00000008
+#define RSTMGR_BRGMODRSTMASK_MPFE		0x00000040
+
+/* TSTSTA */
+#define RSTMGR_TSTSTA_RSTST			0x0000001F
+
+/* HDSKTIMEOUT */
+#define RSTMGR_HDSKTIMEOUT_VAL			0xFFFFFFFF
+
+/* DBGHDSKTIMEOUT */
+#define RSTMGR_DBGHDSKTIMEOUT_VAL		0xFFFFFFFF
+
+/* DBGRSTCMPLT */
+#define RSTMGR_DBGRSTCMPLT_VAL			0xFFFFFFFF
+
+/* HPSRSTCMPLT */
+#define RSTMGR_DBGRSTCMPLT_VAL			0xFFFFFFFF
+
+/* CPUINRESET */
+#define RSTMGR_CPUINRESET_CPU0			0x00000001
+#define RSTMGR_CPUINRESET_CPU1			0x00000002
+#define RSTMGR_CPUINRESET_CPU2			0x00000004
+#define RSTMGR_CPUINRESET_CPU3			0x00000008
+
+/* CPUSTRELEASE */
+#define RSTMGR_CPUSTRELEASE_CPUx		0x10D11094
+
+/* CPUxRESETBASE */
+#define RSTMGR_CPUxRESETBASELOW_CPU0		0x10D11098
+#define RSTMGR_CPUxRESETBASEHIGH_CPU0		0x10D1109C
+#define RSTMGR_CPUxRESETBASELOW_CPU1		0x10D110A0
+#define RSTMGR_CPUxRESETBASEHIGH_CPU1		0x10D110A4
+#define RSTMGR_CPUxRESETBASELOW_CPU2		0x10D110A8
+#define RSTMGR_CPUxRESETBASEHIGH_CPU2		0x10D110AC
+#define RSTMGR_CPUxRESETBASELOW_CPU3		0x10D110B0
+#define RSTMGR_CPUxRESETBASEHIGH_CPU3		0x10D110B4
 
 /* Definitions */
 
@@ -109,17 +227,28 @@
 #define RSTMGR_HDSKEN_SET			0x010D
 
 /* Macros */
+#define SOCFPGA_RSTMGR(_reg)			(SOCFPGA_RSTMGR_REG_BASE + (SOCFPGA_RSTMGR_##_reg))
+#define RSTMGR_FIELD(_reg, _field)		(RSTMGR_##_reg##MODRST_##_field)
 
-#define SOCFPGA_RSTMGR(_reg)		(SOCFPGA_RSTMGR_REG_BASE \
-					+ (SOCFPGA_RSTMGR_##_reg))
-#define RSTMGR_FIELD(_reg, _field)	(RSTMGR_##_reg##MODRST_##_field)
+/* Reset type to SDM from PSCI */
+// Temp add macro here for reset type
+#define SOCFPGA_RESET_TYPE_COLD			0
+#define SOCFPGA_RESET_TYPE_WARM			1
 
 /* Function Declarations */
 
 void deassert_peripheral_reset(void);
 void config_hps_hs_before_warm_reset(void);
 
+int socfpga_bridges_reset(uint32_t mask);
 int socfpga_bridges_enable(uint32_t mask);
 int socfpga_bridges_disable(uint32_t mask);
 
+int socfpga_cpurstrelease(unsigned int cpu_id);
+int socfpga_cpu_reset_base(unsigned int cpu_id);
+
+/* SMP: Func proto */
+void bl31_plat_set_secondary_cpu_entrypoint(unsigned int cpu_id);
+void bl31_plat_set_secondary_cpu_off(void);
+
 #endif /* SOCFPGA_RESETMANAGER_H */
diff --git a/plat/intel/soc/common/include/socfpga_system_manager.h b/plat/intel/soc/common/include/socfpga_system_manager.h
index 8d9ba70..f860f57 100644
--- a/plat/intel/soc/common/include/socfpga_system_manager.h
+++ b/plat/intel/soc/common/include/socfpga_system_manager.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
diff --git a/plat/intel/soc/common/include/socfpga_vab.h b/plat/intel/soc/common/include/socfpga_vab.h
new file mode 100644
index 0000000..f6081df
--- /dev/null
+++ b/plat/intel/soc/common/include/socfpga_vab.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SOCFPGA_VAB_H
+#define SOCFPGA_VAB_H
+
+
+#include <stdlib.h>
+#include "socfpga_fcs.h"
+
+struct fcs_hps_vab_certificate_data {
+	uint32_t vab_cert_magic_num;			/* offset 0x10 */
+	uint32_t flags;
+	uint8_t rsvd0_1[8];
+	uint8_t fcs_sha384[FCS_SHA384_WORD_SIZE];	/* offset 0x20 */
+};
+
+struct fcs_hps_vab_certificate_header {
+	uint32_t cert_magic_num;			/* offset 0 */
+	uint32_t cert_data_sz;
+	uint32_t cert_ver;
+	uint32_t cert_type;
+	struct fcs_hps_vab_certificate_data d;		/* offset 0x10 */
+	/* keychain starts at offset 0x50 */
+};
+
+/* Macros */
+#define IS_BYTE_ALIGNED(x, a)		(((x) & ((typeof(x))(a) - 1)) == 0)
+#define BYTE_ALIGN(x, a)		__ALIGN_MASK((x), (typeof(x))(a)-1)
+#define __ALIGN_MASK(x, mask)		(((x)+(mask))&~(mask))
+#define VAB_CERT_HEADER_SIZE		sizeof(struct fcs_hps_vab_certificate_header)
+#define VAB_CERT_MAGIC_OFFSET		offsetof(struct fcs_hps_vab_certificate_header, d)
+#define VAB_CERT_FIT_SHA384_OFFSET	offsetof(struct fcs_hps_vab_certificate_data, fcs_sha384[0])
+#define SDM_CERT_MAGIC_NUM		0x25D04E7F
+#define CHUNKSZ_PER_WD_RESET		(256 * 1024)
+
+/* SHA related return Macro */
+#define ENOVABIMG		1 /* VAB certificate not available */
+#define EIMGERR		2 /* Image format/size not valid */
+#define ETIMEOUT		3 /* Execution timeout */
+#define EPROCESS		4 /* Process error */
+#define EKEYREJECTED		5/* Key was rejected by service */
+
+/* Function Definitions */
+static size_t get_img_size(uint8_t *img_buf, size_t img_buf_sz);
+int socfpga_vendor_authentication(void **p_image, size_t *p_size);
+static uint32_t get_unaligned_le32(const void *p);
+void sha384_csum_wd(const unsigned char *input, unsigned int ilen,
+unsigned char *output, unsigned int chunk_sz);
+
+#endif
diff --git a/plat/intel/soc/common/soc/socfpga_emac.c b/plat/intel/soc/common/soc/socfpga_emac.c
index 02ff89e..49c92c3 100644
--- a/plat/intel/soc/common/soc/socfpga_emac.c
+++ b/plat/intel/soc/common/soc/socfpga_emac.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020-2023, Intel Corporation. All rights reserved.
+ * Copyright (c) 2020-2022, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
diff --git a/plat/intel/soc/common/soc/socfpga_firewall.c b/plat/intel/soc/common/soc/socfpga_firewall.c
index fc3889c..6247df3 100644
--- a/plat/intel/soc/common/soc/socfpga_firewall.c
+++ b/plat/intel/soc/common/soc/socfpga_firewall.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -20,7 +20,11 @@
 void enable_ns_peripheral_access(void)
 {
 	mmio_write_32(SOCFPGA_L4_PER_SCR(NAND_REGISTER), DISABLE_L4_FIREWALL);
+#if	((PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10) || \
+	(PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX) || \
+	(PLATFORM_MODEL == PLAT_SOCFPGA_N5X))
 	mmio_write_32(SOCFPGA_L4_PER_SCR(NAND_DATA), DISABLE_L4_FIREWALL);
+#endif
 
 	mmio_write_32(SOCFPGA_L4_SYS_SCR(NAND_ECC), DISABLE_L4_FIREWALL);
 	mmio_write_32(SOCFPGA_L4_SYS_SCR(NAND_READ_ECC), DISABLE_L4_FIREWALL);
@@ -87,9 +91,19 @@
 	mmio_write_32(SOCFPGA_L4_SYS_SCR(WATCHDOG1), DISABLE_L4_FIREWALL);
 	mmio_write_32(SOCFPGA_L4_SYS_SCR(WATCHDOG2), DISABLE_L4_FIREWALL);
 	mmio_write_32(SOCFPGA_L4_SYS_SCR(WATCHDOG3), DISABLE_L4_FIREWALL);
+#if	PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+	mmio_write_32(SOCFPGA_L4_SYS_SCR(WATCHDOG4), DISABLE_L4_FIREWALL);
+#endif
 
 	mmio_write_32(SOCFPGA_L4_SYS_SCR(DAP), DISABLE_L4_FIREWALL);
 
+#if	PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+	mmio_write_32(SOCFPGA_L4_SYS_SCR(PWRMGR), DISABLE_L4_FIREWALL);
+
+	mmio_write_32(SOCFPGA_L4_SYS_SCR(USB1_RXECC), DISABLE_L4_FIREWALL);
+	mmio_write_32(SOCFPGA_L4_SYS_SCR(USB1_TXECC), DISABLE_L4_FIREWALL);
+#endif
+
 	mmio_write_32(SOCFPGA_L4_SYS_SCR(L4_NOC_PROBES), DISABLE_L4_FIREWALL);
 
 	mmio_write_32(SOCFPGA_L4_SYS_SCR(L4_NOC_QOS), DISABLE_L4_FIREWALL);
diff --git a/plat/intel/soc/common/soc/socfpga_handoff.c b/plat/intel/soc/common/soc/socfpga_handoff.c
index a3146b4..526c6e1 100644
--- a/plat/intel/soc/common/soc/socfpga_handoff.c
+++ b/plat/intel/soc/common/soc/socfpga_handoff.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -29,16 +29,34 @@
 	for (i = 0; i < sizeof(handoff) / 4; i++)
 		buffer[i] = SWAP_UINT32(buffer[i]);
 
-	if (reverse_hoff_ptr->header_magic != HANDOFF_MAGIC_HEADER)
+	if (reverse_hoff_ptr->header_magic != HANDOFF_MAGIC_HEADER) {
 		return -1;
-	if (reverse_hoff_ptr->pinmux_sel_magic != HANDOFF_MAGIC_PINMUX_SEL)
+	}
+	if (reverse_hoff_ptr->pinmux_sel_magic != HANDOFF_MAGIC_PINMUX_SEL) {
 		return -1;
-	if (reverse_hoff_ptr->pinmux_io_magic != HANDOFF_MAGIC_IOCTLR)
+	}
+	if (reverse_hoff_ptr->pinmux_io_magic != HANDOFF_MAGIC_IOCTLR) {
 		return -1;
-	if (reverse_hoff_ptr->pinmux_fpga_magic != HANDOFF_MAGIC_FPGA)
+	}
+	if (reverse_hoff_ptr->pinmux_fpga_magic != HANDOFF_MAGIC_FPGA) {
 		return -1;
-	if (reverse_hoff_ptr->pinmux_delay_magic != HANDOFF_MAGIC_IODELAY)
+	}
+	if (reverse_hoff_ptr->pinmux_delay_magic != HANDOFF_MAGIC_IODELAY) {
 		return -1;
+	}
+	if (reverse_hoff_ptr->clock_magic != HANDOFF_MAGIC_CLOCK) {
+		return -1;
+	}
+
+#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+	if (reverse_hoff_ptr->peripheral_pwr_gate_magic != HANDOFF_MAGIC_PERIPHERAL) {
+		return -1;
+	}
+
+	if (reverse_hoff_ptr->ddr_magic != HANDOFF_MAGIC_DDR) {
+		return -1;
+	}
+#endif
 
 	return 0;
 }
diff --git a/plat/intel/soc/common/soc/socfpga_mailbox.c b/plat/intel/soc/common/soc/socfpga_mailbox.c
index 525ac2b..d93fc8a 100644
--- a/plat/intel/soc/common/soc/socfpga_mailbox.c
+++ b/plat/intel/soc/common/soc/socfpga_mailbox.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020-2023, Intel Corporation. All rights reserved.
+ * Copyright (c) 2020-2022, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -523,10 +523,20 @@
 void mailbox_reset_cold(void)
 {
 	mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE);
-	mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_REBOOT_HPS, NULL, 0U,
-				CMD_CASUAL, NULL, NULL);
+
+	mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_REBOOT_HPS, 0U, 0U,
+				 CMD_CASUAL, NULL, NULL);
 }
 
+void mailbox_reset_warm(uint32_t reset_type)
+{
+	mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE);
+
+	reset_type = 0x01; // Warm reset header data must be 1
+	mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_REBOOT_HPS, &reset_type, 1U,
+				 CMD_CASUAL, NULL, NULL);
+}
+
 int mailbox_rsu_get_spt_offset(uint32_t *resp_buf, unsigned int resp_buf_len)
 {
 	return mailbox_send_cmd(MBOX_JOB_ID, MBOX_GET_SUBPARTITION_TABLE,
@@ -679,9 +689,10 @@
 				&resp_len);
 }
 
-int mailbox_seu_err_status(uint32_t *resp_buf, uint32_t resp_buf_len)
+int mailbox_seu_err_status(uint32_t *resp_buf, unsigned int resp_buf_len)
 {
+
 	return mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_SEU_ERR_READ, NULL, 0U,
 				CMD_CASUAL, resp_buf,
-				&resp_buf_len);;
+				&resp_buf_len);
 }
diff --git a/plat/intel/soc/common/soc/socfpga_reset_manager.c b/plat/intel/soc/common/soc/socfpga_reset_manager.c
index a546638..7db86c7 100644
--- a/plat/intel/soc/common/soc/socfpga_reset_manager.c
+++ b/plat/intel/soc/common/soc/socfpga_reset_manager.c
@@ -4,6 +4,7 @@
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
+#include <assert.h>
 #include <errno.h>
 #include <common/debug.h>
 #include <drivers/delay_timer.h>
@@ -23,6 +24,7 @@
 			RSTMGR_FIELD(PER1, WATCHDOG1) |
 			RSTMGR_FIELD(PER1, WATCHDOG2) |
 			RSTMGR_FIELD(PER1, WATCHDOG3) |
+			RSTMGR_FIELD(PER1, WATCHDOG4) |
 			RSTMGR_FIELD(PER1, L4SYSTIMER0) |
 			RSTMGR_FIELD(PER1, L4SYSTIMER1) |
 			RSTMGR_FIELD(PER1, SPTIMER0) |
@@ -32,12 +34,15 @@
 			RSTMGR_FIELD(PER1, I2C2) |
 			RSTMGR_FIELD(PER1, I2C3) |
 			RSTMGR_FIELD(PER1, I2C4) |
+			RSTMGR_FIELD(PER1, I3C0) |
+			RSTMGR_FIELD(PER1, I3C1) |
 			RSTMGR_FIELD(PER1, UART0) |
 			RSTMGR_FIELD(PER1, UART1) |
 			RSTMGR_FIELD(PER1, GPIO0) |
 			RSTMGR_FIELD(PER1, GPIO1));
 
 	mmio_clrbits_32(SOCFPGA_RSTMGR(PER0MODRST),
+			RSTMGR_FIELD(PER0, SOFTPHY) |
 			RSTMGR_FIELD(PER0, EMAC0OCP) |
 			RSTMGR_FIELD(PER0, EMAC1OCP) |
 			RSTMGR_FIELD(PER0, EMAC2OCP) |
@@ -80,10 +85,10 @@
 {
 	uint32_t or_mask = 0;
 
-	or_mask |= RSTMGR_HDSKEN_SDRSELFREFEN;
+	or_mask |= RSTMGR_HDSKEN_EMIF_FLUSH;
 	or_mask |= RSTMGR_HDSKEN_FPGAHSEN;
 	or_mask |= RSTMGR_HDSKEN_ETRSTALLEN;
-	or_mask |= RSTMGR_HDSKEN_L2FLUSHEN;
+	or_mask |= RSTMGR_HDSKEN_LWS2F_FLUSH;
 	or_mask |= RSTMGR_HDSKEN_L3NOC_DBG;
 	or_mask |= RSTMGR_HDSKEN_DEBUG_L3NOC;
 
@@ -104,6 +109,27 @@
 	return -ETIMEDOUT;
 }
 
+#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+static int poll_idle_status_by_counter(uint32_t addr, uint32_t mask,
+					uint32_t match, uint32_t delay_ms)
+{
+	int time_out = delay_ms;
+
+	while (time_out-- > 0) {
+
+		if ((mmio_read_32(addr) & mask) == match) {
+			return 0;
+		}
+
+		/* ToDo: Shall use udelay for product release */
+		for (int i = 0; i < 2000; i++) {
+			/* dummy delay */
+		}
+	}
+	return -ETIMEDOUT;
+}
+#endif
+
 static int poll_idle_status_by_clkcycles(uint32_t addr, uint32_t mask,
 					 uint32_t match, uint32_t delay_clk_cycles)
 {
@@ -185,6 +211,39 @@
 		*f2s_respempty |= FLAGINSTATUS_F2SDRAM2_RESPEMPTY;
 		*f2s_cmdidle |= FLAGINSTATUS_F2SDRAM2_CMDIDLE;
 	}
+#elif PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+	if (mask & FPGA2SOC_MASK) {
+		*brg_mask |= RSTMGR_FIELD(BRG, FPGA2SOC);
+		*f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM0_IDLEREQ;
+		*f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM0_FORCE_DRAIN;
+		*f2s_en |= FLAGOUTSETCLR_F2SDRAM0_ENABLE;
+		*f2s_idleack |= FLAGINSTATUS_F2SDRAM0_IDLEACK;
+		*f2s_respempty |= FLAGINSTATUS_F2SDRAM0_RESPEMPTY;
+	}
+	if (mask & F2SDRAM0_MASK) {
+		*brg_mask |= RSTMGR_FIELD(BRG, F2SSDRAM0);
+		*f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM0_IDLEREQ;
+		*f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM0_FORCE_DRAIN;
+		*f2s_en |= FLAGOUTSETCLR_F2SDRAM0_ENABLE;
+		*f2s_idleack |= FLAGINSTATUS_F2SDRAM0_IDLEACK;
+		*f2s_respempty |= FLAGINSTATUS_F2SDRAM0_RESPEMPTY;
+	}
+	if (mask & F2SDRAM1_MASK) {
+		*brg_mask |= RSTMGR_FIELD(BRG, F2SSDRAM1);
+		*f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM1_IDLEREQ;
+		*f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM1_FORCE_DRAIN;
+		*f2s_en |= FLAGOUTSETCLR_F2SDRAM1_ENABLE;
+		*f2s_idleack |= FLAGINSTATUS_F2SDRAM1_IDLEACK;
+		*f2s_respempty |= FLAGINSTATUS_F2SDRAM1_RESPEMPTY;
+	}
+	if (mask & F2SDRAM2_MASK) {
+		*brg_mask |= RSTMGR_FIELD(BRG, F2SSDRAM2);
+		*f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM2_IDLEREQ;
+		*f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM2_FORCE_DRAIN;
+		*f2s_en |= FLAGOUTSETCLR_F2SDRAM2_ENABLE;
+		*f2s_idleack |= FLAGINSTATUS_F2SDRAM2_IDLEACK;
+		*f2s_respempty |= FLAGINSTATUS_F2SDRAM2_RESPEMPTY;
+	}
 #else
 	if ((mask & FPGA2SOC_MASK) != 0U) {
 		*brg_mask |= RSTMGR_FIELD(BRG, FPGA2SOC);
@@ -198,6 +257,153 @@
 #endif
 }
 
+int socfpga_bridges_reset(uint32_t mask)
+{
+	int ret = 0;
+	int timeout = 300;
+	uint32_t brg_mask = 0;
+	uint32_t noc_mask = 0;
+	uint32_t f2s_idlereq = 0;
+	uint32_t f2s_force_drain = 0;
+	uint32_t f2s_en = 0;
+	uint32_t f2s_idleack = 0;
+	uint32_t f2s_respempty = 0;
+	uint32_t f2s_cmdidle = 0;
+
+	/* Reset s2f bridge */
+	socfpga_s2f_bridge_mask(mask, &brg_mask, &noc_mask);
+	if (brg_mask) {
+		if (mask & SOC2FPGA_MASK) {
+			/* Request handshake with SOC2FPGA bridge to clear traffic */
+			mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ),
+					RSTMGR_HDSKREQ_S2F_FLUSH);
+
+			/* Wait for bridge to idle status */
+			ret = poll_idle_status(SOCFPGA_RSTMGR(HDSKACK),
+					RSTMGR_HDSKACK_S2F_FLUSH,
+					RSTMGR_HDSKACK_S2F_FLUSH, 300);
+		}
+
+		if (mask & LWHPS2FPGA_MASK) {
+			/* Request handshake with LWSOC2FPGA bridge to clear traffic */
+			mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ),
+					RSTMGR_HDSKREQ_LWS2F_FLUSH);
+
+			/* Wait for bridge to idle status */
+			ret = poll_idle_status(SOCFPGA_RSTMGR(HDSKACK),
+					RSTMGR_HDSKACK_LWS2F_FLUSH,
+					RSTMGR_HDSKACK_LWS2F_FLUSH, 300);
+		}
+
+		if (ret < 0) {
+			ERROR("S2F Bridge reset: Timeout waiting for idle ack\n");
+			assert(false);
+		}
+
+		/* Assert reset to bridge */
+		mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST),
+				brg_mask);
+
+		/* Clear idle requests to bridge */
+		if (mask & SOC2FPGA_MASK) {
+			mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ),
+					RSTMGR_HDSKREQ_S2F_FLUSH);
+		}
+
+		if (mask & LWHPS2FPGA_MASK) {
+			mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ),
+					RSTMGR_HDSKREQ_LWS2F_FLUSH);
+		}
+
+		/* When FPGA reconfig is complete */
+		mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), brg_mask);
+	}
+
+	/* Reset f2s bridge */
+	socfpga_f2s_bridge_mask(mask, &brg_mask, &f2s_idlereq,
+					&f2s_force_drain, &f2s_en,
+					&f2s_idleack, &f2s_respempty,
+					&f2s_cmdidle);
+
+	if (brg_mask) {
+		mmio_setbits_32(SOCFPGA_RSTMGR(HDSKEN),
+				RSTMGR_HDSKEN_FPGAHSEN);
+
+		mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ),
+				RSTMGR_HDSKREQ_FPGAHSREQ);
+
+		ret = poll_idle_status(SOCFPGA_RSTMGR(HDSKACK),
+				RSTMGR_HDSKACK_FPGAHSREQ,
+				RSTMGR_HDSKACK_FPGAHSREQ, 300);
+
+		if (ret < 0) {
+			ERROR("F2S Bridge disable: Timeout waiting for idle req\n");
+			assert(false);
+		}
+
+		/* Disable f2s bridge */
+		mmio_clrbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+				f2s_en);
+		udelay(5);
+
+		mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+				f2s_force_drain);
+		udelay(5);
+
+		do {
+			/* Read response queue status to ensure it is empty */
+			uint32_t idle_status;
+
+			idle_status = mmio_read_32(SOCFPGA_F2SDRAMMGR(
+				SIDEBANDMGR_FLAGINSTATUS0));
+			if (idle_status & f2s_respempty) {
+				idle_status = mmio_read_32(SOCFPGA_F2SDRAMMGR(
+					SIDEBANDMGR_FLAGINSTATUS0));
+				if (idle_status & f2s_respempty) {
+					break;
+				}
+			}
+			udelay(1000);
+		} while (timeout-- > 0);
+
+		/* Assert reset to f2s bridge */
+		mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST),
+				brg_mask);
+
+		/* Clear idle request to FPGA */
+		mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ),
+				RSTMGR_HDSKREQ_FPGAHSREQ);
+
+		/* Clear idle request to MPFE */
+		mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTCLR0),
+				f2s_idlereq);
+
+		/* When FPGA reconfig is complete */
+		mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), brg_mask);
+
+		/* Enable f2s bridge */
+		mmio_clrbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+			f2s_idlereq);
+
+		ret = poll_idle_status(SOCFPGA_F2SDRAMMGR(
+			SIDEBANDMGR_FLAGINSTATUS0), f2s_idleack, 0, 300);
+		if (ret < 0) {
+			ERROR("F2S bridge enable: Timeout waiting for idle ack");
+			assert(false);
+		}
+
+		mmio_clrbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+			f2s_force_drain);
+		udelay(5);
+
+		mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+			f2s_en);
+		udelay(5);
+	}
+
+	return ret;
+}
+
 int socfpga_bridges_enable(uint32_t mask)
 {
 	int ret = 0;
@@ -209,9 +415,87 @@
 	uint32_t f2s_idleack = 0;
 	uint32_t f2s_respempty = 0;
 	uint32_t f2s_cmdidle = 0;
+#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+	uint32_t delay = 0;
+#endif
 
 	/* Enable s2f bridge */
 	socfpga_s2f_bridge_mask(mask, &brg_mask, &noc_mask);
+#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+	/* Enable SOC2FPGA bridge */
+	if (brg_mask & RSTMGR_BRGMODRSTMASK_SOC2FPGA) {
+		/* Write Reset Manager hdskreq[soc2fpga_flush_req] = 1 */
+		NOTICE("Set S2F hdskreq ...\n");
+		mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ),
+			RSTMGR_HDSKREQ_SOC2FPGAREQ);
+
+		/* Read Reset Manager hdskack[soc2fpga] = 1 */
+		ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK),
+			RSTMGR_HDSKACK_SOC2FPGAACK, RSTMGR_HDSKACK_SOC2FPGAACK,
+			300);
+
+		if (ret < 0) {
+			ERROR("S2F bridge enable: Timeout hdskack\n");
+		}
+
+		/* Write Reset Manager hdskreq[soc2fpga_flush_req] = 0 */
+		NOTICE("Clear S2F hdskreq ...\n");
+		mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ),
+			RSTMGR_HDSKREQ_SOC2FPGAREQ);
+
+		/* Write Reset Manager brgmodrst[soc2fpga] = 1 */
+		NOTICE("Assert S2F ...\n");
+		mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST),
+			RSTMGR_BRGMODRST_SOC2FPGA);
+
+		/* ToDo: Shall use udelay for product release */
+		for (delay = 0; delay < 1000; delay++) {
+			/* dummy delay */
+		}
+
+		/* Write Reset Manager brgmodrst[soc2fpga] = 0 */
+		NOTICE("Deassert S2F ...\n");
+		mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST),
+			RSTMGR_BRGMODRST_SOC2FPGA);
+	}
+
+	/* Enable LWSOC2FPGA bridge */
+	if (brg_mask & RSTMGR_BRGMODRSTMASK_LWHPS2FPGA) {
+		/* Write Reset Manager hdskreq[lwsoc2fpga_flush_req] = 1 */
+		NOTICE("Set LWS2F hdskreq ...\n");
+		mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ),
+			RSTMGR_HDSKREQ_LWSOC2FPGAREQ);
+
+		/* Read Reset Manager hdskack[lwsoc2fpga] = 1 */
+		ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK),
+			RSTMGR_HDSKACK_LWSOC2FPGAACK, RSTMGR_HDSKACK_LWSOC2FPGAACK,
+			300);
+
+		if (ret < 0) {
+			ERROR("LWS2F bridge enable: Timeout hdskack\n");
+		}
+
+		/* Write Reset Manager hdskreq[lwsoc2fpga_flush_req] = 0 */
+		NOTICE("Clear LWS2F hdskreq ...\n");
+		mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ),
+			RSTMGR_HDSKREQ_LWSOC2FPGAREQ);
+
+		/* Write Reset Manager brgmodrst[lwsoc2fpga] = 1 */
+		NOTICE("Assert LWS2F ...\n");
+		mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST),
+			RSTMGR_BRGMODRST_LWHPS2FPGA);
+
+		/* ToDo: Shall use udelay for product release */
+		for (delay = 0; delay < 1000; delay++) {
+			/* dummy delay */
+		}
+
+		/* Write Reset Manager brgmodrst[lwsoc2fpga] = 0 */
+		NOTICE("Deassert LWS2F ...\n");
+		mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST),
+			RSTMGR_BRGMODRST_LWHPS2FPGA);
+	}
+#else
 	if (brg_mask != 0U) {
 		/* Clear idle request */
 		mmio_setbits_32(SOCFPGA_SYSMGR(NOC_IDLEREQ_CLR),
@@ -224,15 +508,186 @@
 		ret = poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLEACK),
 				       noc_mask, 0, 300);
 		if (ret < 0) {
-			ERROR("S2F bridge enable: "
-					"Timeout waiting for idle ack\n");
+			ERROR("S2F bridge enable: Timeout idle ack\n");
 		}
 	}
+#endif
 
 	/* Enable f2s bridge */
 	socfpga_f2s_bridge_mask(mask, &brg_mask, &f2s_idlereq,
 				&f2s_force_drain, &f2s_en,
 				&f2s_idleack, &f2s_respempty, &f2s_cmdidle);
+#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+	/* Enable FPGA2SOC bridge */
+	if (brg_mask & RSTMGR_BRGMODRSTMASK_FPGA2SOC) {
+		/* Write Reset Manager hdsken[fpgahsen] = 1 */
+		NOTICE("Set FPGA hdsken(fpgahsen) ...\n");
+		mmio_setbits_32(SOCFPGA_RSTMGR(HDSKEN), RSTMGR_HDSKEN_FPGAHSEN);
+
+		/* Write Reset Manager hdskreq[fpgahsreq] = 1 */
+		NOTICE("Set FPGA hdskreq(fpgahsreq) ...\n");
+		mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ), RSTMGR_HDSKREQ_FPGAHSREQ);
+
+		/* Read Reset Manager hdskack[fpgahsack] = 1 */
+		NOTICE("Get FPGA hdskack(fpgahsack) ...\n");
+		ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK),
+			RSTMGR_HDSKACK_FPGAHSACK, RSTMGR_HDSKACK_FPGAHSACK,
+			300);
+
+		if (ret < 0) {
+			ERROR("FPGA bridge fpga handshake fpgahsreq: Timeout\n");
+		}
+
+		/* Write Reset Manager hdskreq[f2s_flush_req] = 1 */
+		NOTICE("Set F2S hdskreq(f2s_flush_req) ...\n");
+		mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ),
+			RSTMGR_HDSKREQ_FPGA2SOCREQ);
+
+		/* Read Reset Manager hdskack[f2s_flush_ack] = 1 */
+		NOTICE("Get F2S hdskack(f2s_flush_ack) ...\n");
+		ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK),
+			RSTMGR_HDSKACK_FPGA2SOCACK, RSTMGR_HDSKACK_FPGA2SOCACK,
+			300);
+
+		if (ret < 0) {
+			ERROR("F2S bridge fpga handshake f2sdram_flush_req: Timeout\n");
+		}
+
+		/* Write Reset Manager hdskreq[fpgahsreq] = 1 */
+		NOTICE("Clear FPGA hdskreq(fpgahsreq) ...\n");
+		mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ), RSTMGR_HDSKREQ_FPGAHSREQ);
+
+		/* Write Reset Manager hdskreq[f2s_flush_req] = 1 */
+		NOTICE("Clear F2S hdskreq(f2s_flush_req) ...\n");
+		mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ),
+			RSTMGR_HDSKREQ_FPGA2SOCREQ);
+
+		/* Read Reset Manager hdskack[f2s_flush_ack] = 0 */
+		NOTICE("Get F2SDRAM hdskack(f2s_flush_ack) ...\n");
+		ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK),
+			RSTMGR_HDSKACK_FPGA2SOCACK, RSTMGR_HDSKACK_FPGA2SOCACK_DASRT,
+			300);
+
+		if (ret < 0) {
+			ERROR("F2S bridge fpga handshake f2s_flush_ack: Timeout\n");
+		}
+
+		/* Read Reset Manager hdskack[fpgahsack] = 0 */
+		NOTICE("Get FPGA hdskack(fpgahsack) ...\n");
+		ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK),
+			RSTMGR_HDSKACK_FPGAHSACK, RSTMGR_HDSKACK_FPGAHSACK_DASRT,
+			300);
+
+		if (ret < 0) {
+			ERROR("F2S bridge fpga handshake fpgahsack: Timeout\n");
+		}
+
+		/* Write Reset Manager brgmodrst[fpga2soc] = 1 */
+		NOTICE("Assert F2S ...\n");
+		mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST), RSTMGR_BRGMODRST_FPGA2SOC);
+
+		/* ToDo: Shall use udelay for product release */
+		for (delay = 0; delay < 1000; delay++) {
+			/* dummy delay */
+		}
+
+		/* Write Reset Manager brgmodrst[fpga2soc] = 0 */
+		NOTICE("Deassert F2S ...\n");
+		mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), RSTMGR_BRGMODRST_FPGA2SOC);
+
+		/* Write System Manager f2s bridge control register[f2soc_enable] = 1 */
+		NOTICE("Deassert F2S f2soc_enable ...\n");
+		mmio_setbits_32(SOCFPGA_SYSMGR(F2S_BRIDGE_CTRL),
+			SYSMGR_F2S_BRIDGE_CTRL_EN);
+	}
+
+	/* Enable FPGA2SDRAM bridge */
+	if (brg_mask & RSTMGR_BRGMODRSTMASK_F2SDRAM0) {
+		/* Write Reset Manager hdsken[fpgahsen] = 1 */
+		NOTICE("Set F2SDRAM hdsken(fpgahsen) ...\n");
+		mmio_setbits_32(SOCFPGA_RSTMGR(HDSKEN), RSTMGR_HDSKEN_FPGAHSEN);
+
+		/* Write Reset Manager hdskreq[fpgahsreq] = 1 */
+		NOTICE("Set F2SDRAM hdskreq(fpgahsreq) ...\n");
+		mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ), RSTMGR_HDSKREQ_FPGAHSREQ);
+
+		/* Read Reset Manager hdskack[fpgahsack] = 1 */
+		NOTICE("Get F2SDRAM hdskack(fpgahsack) ...\n");
+		ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK),
+			RSTMGR_HDSKACK_FPGAHSACK, RSTMGR_HDSKACK_FPGAHSACK,
+			300);
+
+		if (ret < 0) {
+			ERROR("F2SDRAM bridge fpga handshake fpgahsreq: Timeout\n");
+		}
+
+		/* Write Reset Manager hdskreq[f2sdram_flush_req] = 1 */
+		NOTICE("Set F2SDRAM hdskreq(f2sdram_flush_req) ...\n");
+		mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ),
+			RSTMGR_HDSKREQ_F2SDRAM0REQ);
+
+		/* Read Reset Manager hdskack[f2sdram_flush_ack] = 1 */
+		NOTICE("Get F2SDRAM hdskack(f2sdram_flush_ack) ...\n");
+		ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK),
+			RSTMGR_HDSKACK_F2SDRAM0ACK, RSTMGR_HDSKACK_F2SDRAM0ACK,
+			300);
+
+		if (ret < 0) {
+			ERROR("F2SDRAM bridge fpga handshake f2sdram_flush_req: Timeout\n");
+		}
+
+		/* Write Reset Manager hdskreq[fpgahsreq] = 1 */
+		NOTICE("Clear F2SDRAM hdskreq(fpgahsreq) ...\n");
+		mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ), RSTMGR_HDSKREQ_FPGAHSREQ);
+
+		/* Write Reset Manager hdskreq[f2sdram_flush_req] = 1 */
+		NOTICE("Clear F2SDRAM hdskreq(f2sdram_flush_req) ...\n");
+		mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ), RSTMGR_HDSKREQ_F2SDRAM0REQ);
+
+		/* Read Reset Manager hdskack[f2sdram_flush_ack] = 0 */
+		NOTICE("Get F2SDRAM hdskack(f2sdram_flush_ack) ...\n");
+		ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK),
+			RSTMGR_HDSKACK_F2SDRAM0ACK, RSTMGR_HDSKACK_F2SDRAM0ACK_DASRT,
+			300);
+
+		if (ret < 0) {
+			ERROR("F2SDRAM bridge fpga handshake f2sdram_flush_ack: Timeout\n");
+		}
+
+		/* Read Reset Manager hdskack[fpgahsack] = 0 */
+		NOTICE("Get F2SDRAM hdskack(fpgahsack) ...\n");
+		ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK),
+			RSTMGR_HDSKACK_FPGAHSACK, RSTMGR_HDSKACK_FPGAHSACK_DASRT,
+			300);
+
+		if (ret < 0) {
+			ERROR("F2SDRAM bridge fpga handshake fpgahsack: Timeout\n");
+		}
+
+		/* Write Reset Manager brgmodrst[fpga2sdram] = 1 */
+		NOTICE("Assert F2SDRAM ...\n");
+		mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST),
+			RSTMGR_BRGMODRST_F2SSDRAM0);
+
+		/* ToDo: Shall use udelay for product release */
+		for (delay = 0; delay < 1000; delay++) {
+			/* dummy delay */
+		}
+
+		/* Write Reset Manager brgmodrst[fpga2sdram] = 0 */
+		NOTICE("Deassert F2SDRAM ...\n");
+		mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST),
+			RSTMGR_BRGMODRST_F2SSDRAM0);
+
+		/*
+		 * Clear fpga2sdram_manager_main_SidebandManager_FlagOutClr0
+		 * f2s_ready_latency_enable
+		 */
+		NOTICE("Clear F2SDRAM f2s_ready_latency_enable ...\n");
+		mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTCLR0),
+			FLAGOUTCLR0_F2SDRAM0_ENABLE);
+	}
+#else
 	if (brg_mask != 0U) {
 		mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), brg_mask);
 
@@ -243,8 +698,7 @@
 				       f2s_idleack, 0, 300);
 
 		if (ret < 0) {
-			ERROR("F2S bridge enable: "
-			      "Timeout waiting for idle ack");
+			ERROR("F2S bridge enable: Timeout idle ack");
 		}
 
 		/* Clear the force drain */
@@ -256,7 +710,7 @@
 				f2s_en);
 		udelay(5);
 	}
-
+#endif
 	return ret;
 }
 
@@ -329,15 +783,13 @@
 		ret = poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLEACK),
 				       noc_mask, noc_mask, 300);
 		if (ret < 0) {
-			ERROR("S2F Bridge disable: "
-			      "Timeout waiting for idle ack\n");
+			ERROR("S2F Bridge disable: Timeout idle ack\n");
 		}
 
 		ret = poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLESTATUS),
 				       noc_mask, noc_mask, 300);
 		if (ret < 0) {
-			ERROR("S2F Bridge disable: "
-			      "Timeout waiting for idle status\n");
+			ERROR("S2F Bridge disable: Timeout idle status\n");
 		}
 
 		mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST), brg_mask);
@@ -365,8 +817,8 @@
 				RSTMGR_HDSKREQ_FPGAHSREQ);
 
 		ret = poll_idle_status_by_clkcycles(SOCFPGA_RSTMGR(HDSKACK),
-						    RSTMGR_HDSKACK_FPGAHSACK_MASK,
-						    RSTMGR_HDSKACK_FPGAHSACK_MASK, 1000);
+						    RSTMGR_HDSKACK_FPGAHSREQ,
+						    RSTMGR_HDSKACK_FPGAHSREQ, 1000);
 
 		/* DISABLE F2S Bridge */
 		mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTCLR0),
@@ -394,3 +846,68 @@
 
 	return ret;
 }
+
+/* CPUxRESETBASELOW */
+int socfpga_cpu_reset_base(unsigned int cpu_id)
+{
+	int ret = 0;
+	uint32_t entrypoint = 0;
+
+	ret = socfpga_cpurstrelease(cpu_id);
+
+	if (ret < 0) {
+		return RSTMGR_RET_ERROR;
+	}
+
+	if (ret == RSTMGR_RET_OK) {
+
+		switch (cpu_id) {
+		case 0:
+			entrypoint = mmio_read_32(SOCFPGA_RSTMGR_CPUBASELOW_0);
+			entrypoint |= mmio_read_32(SOCFPGA_RSTMGR_CPUBASEHIGH_0) << 24;
+		break;
+
+		case 1:
+			entrypoint = mmio_read_32(SOCFPGA_RSTMGR_CPUBASELOW_1);
+			entrypoint |= mmio_read_32(SOCFPGA_RSTMGR_CPUBASEHIGH_1) << 24;
+		break;
+
+		case 2:
+			entrypoint = mmio_read_32(SOCFPGA_RSTMGR_CPUBASELOW_2);
+			entrypoint |= mmio_read_32(SOCFPGA_RSTMGR_CPUBASEHIGH_2) << 24;
+		break;
+
+		case 3:
+			entrypoint = mmio_read_32(SOCFPGA_RSTMGR_CPUBASELOW_3);
+			entrypoint |= mmio_read_32(SOCFPGA_RSTMGR_CPUBASEHIGH_3) << 24;
+		break;
+
+		default:
+		break;
+		}
+
+		mmio_write_64(PLAT_SEC_ENTRY, entrypoint);
+	}
+
+	return RSTMGR_RET_OK;
+}
+
+/* CPURSTRELEASE */
+int socfpga_cpurstrelease(unsigned int cpu_id)
+{
+	unsigned int timeout = 0;
+
+	do {
+		/* Read response queue status to ensure it is empty */
+		uint32_t cpurstrelease_status;
+
+		cpurstrelease_status = mmio_read_32(SOCFPGA_RSTMGR(CPURSTRELEASE));
+
+		if ((cpurstrelease_status & RSTMGR_CPUSTRELEASE_CPUx) == cpu_id) {
+			return RSTMGR_RET_OK;
+		}
+		udelay(1000);
+	} while (timeout-- > 0);
+
+	return RSTMGR_RET_ERROR;
+}
diff --git a/plat/intel/soc/common/socfpga_psci.c b/plat/intel/soc/common/socfpga_psci.c
index 3b96dfc..5ffd512 100644
--- a/plat/intel/soc/common/socfpga_psci.c
+++ b/plat/intel/soc/common/socfpga_psci.c
@@ -1,12 +1,14 @@
 /*
  * Copyright (c) 2019-2023, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch_helpers.h>
 #include <common/debug.h>
-#ifndef SOCFPGA_GIC_V3
+
+#ifndef GICV3_SUPPORT_GIC600
 #include <drivers/arm/gicv2.h>
 #else
 #include <drivers/arm/gicv3.h>
@@ -14,13 +16,16 @@
 #include <lib/mmio.h>
 #include <lib/psci/psci.h>
 #include <plat/common/platform.h>
-
 #include "socfpga_mailbox.h"
 #include "socfpga_plat_def.h"
 #include "socfpga_reset_manager.h"
 #include "socfpga_sip_svc.h"
 #include "socfpga_system_manager.h"
 
+#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+void socfpga_wakeup_secondary_cpu(unsigned int cpu_id);
+extern void plat_secondary_cold_boot_setup(void);
+#endif
 
 /*******************************************************************************
  * plat handler called when a CPU is about to enter standby.
@@ -43,13 +48,18 @@
 int socfpga_pwr_domain_on(u_register_t mpidr)
 {
 	unsigned int cpu_id = plat_core_pos_by_mpidr(mpidr);
+#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+	/* TODO: Add in CPU FUSE from SDM */
+#else
 	uint32_t psci_boot = 0x00;
 
 	VERBOSE("%s: mpidr: 0x%lx\n", __func__, mpidr);
+#endif
 
 	if (cpu_id == -1)
 		return PSCI_E_INTERN_FAIL;
 
+#if PLATFORM_MODEL != PLAT_SOCFPGA_AGILEX5
 	if (cpu_id == 0x00) {
 		psci_boot = mmio_read_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_8));
 		psci_boot |= 0x20000; /* bit 17 */
@@ -57,9 +67,16 @@
 	}
 
 	mmio_write_64(PLAT_CPUID_RELEASE, cpu_id);
+#endif
 
 	/* release core reset */
+#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+	bl31_plat_set_secondary_cpu_entrypoint(cpu_id);
+#else
 	mmio_setbits_32(SOCFPGA_RSTMGR(MPUMODRST), 1 << cpu_id);
+	mmio_write_64(PLAT_CPUID_RELEASE, cpu_id);
+#endif
+
 	return PSCI_E_SUCCESS;
 }
 
@@ -74,7 +91,12 @@
 			__func__, i, target_state->pwr_domain_state[i]);
 
 	/* Prevent interrupts from spuriously waking up this cpu */
+#ifdef GICV3_SUPPORT_GIC600
+	gicv3_cpuif_disable(plat_my_core_pos());
+#else
 	gicv2_cpuif_disable();
+#endif
+
 }
 
 /*******************************************************************************
@@ -83,15 +105,18 @@
  ******************************************************************************/
 void socfpga_pwr_domain_suspend(const psci_power_state_t *target_state)
 {
+#if PLATFORM_MODEL != PLAT_SOCFPGA_AGILEX5
 	unsigned int cpu_id = plat_my_core_pos();
+#endif
 
 	for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
 		VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
 			__func__, i, target_state->pwr_domain_state[i]);
 
+#if PLATFORM_MODEL != PLAT_SOCFPGA_AGILEX5
 	/* assert core reset */
 	mmio_setbits_32(SOCFPGA_RSTMGR(MPUMODRST), 1 << cpu_id);
-
+#endif
 }
 
 /*******************************************************************************
@@ -105,12 +130,18 @@
 		VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
 			__func__, i, target_state->pwr_domain_state[i]);
 
+	/* Enable the gic cpu interface */
+#ifdef GICV3_SUPPORT_GIC600
+	gicv3_rdistif_init(plat_my_core_pos());
+	gicv3_cpuif_enable(plat_my_core_pos());
+#else
 	/* Program the gic per-cpu distributor or re-distributor interface */
 	gicv2_pcpu_distif_init();
 	gicv2_set_pe_target_mask(plat_my_core_pos());
 
 	/* Enable the gic cpu interface */
 	gicv2_cpuif_enable();
+#endif
 }
 
 /*******************************************************************************
@@ -122,14 +153,18 @@
  ******************************************************************************/
 void socfpga_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
 {
+#if PLATFORM_MODEL != PLAT_SOCFPGA_AGILEX5
 	unsigned int cpu_id = plat_my_core_pos();
+#endif
 
 	for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
 		VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
 			__func__, i, target_state->pwr_domain_state[i]);
 
+#if PLATFORM_MODEL != PLAT_SOCFPGA_AGILEX5
 	/* release core reset */
 	mmio_clrbits_32(SOCFPGA_RSTMGR(MPUMODRST), 1 << cpu_id);
+#endif
 }
 
 /*******************************************************************************
@@ -163,11 +198,20 @@
 static int socfpga_system_reset2(int is_vendor, int reset_type,
 					u_register_t cookie)
 {
+#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+	mailbox_reset_warm(reset_type);
+#else
 	if (cold_reset_for_ecc_dbe()) {
 		mailbox_reset_cold();
 	}
+#endif
+
 	/* disable cpuif */
+#ifdef GICV3_SUPPORT_GIC600
+	gicv3_cpuif_disable(plat_my_core_pos());
+#else
 	gicv2_cpuif_disable();
+#endif
 
 	/* Store magic number */
 	mmio_write_32(L2_RESET_DONE_REG, L2_RESET_DONE_STATUS);
@@ -178,8 +222,10 @@
 	/* Enable handshakes */
 	mmio_setbits_32(SOCFPGA_RSTMGR(HDSKEN), RSTMGR_HDSKEN_SET);
 
+#if PLATFORM_MODEL != PLAT_SOCFPGA_AGILEX5
 	/* Reset L2 module */
 	mmio_setbits_32(SOCFPGA_RSTMGR(COLDMODRST), 0x100);
+#endif
 
 	while (1)
 		wfi();
diff --git a/plat/intel/soc/common/socfpga_sip_svc.c b/plat/intel/soc/common/socfpga_sip_svc.c
index 1a18ee1..c6530cf 100644
--- a/plat/intel/soc/common/socfpga_sip_svc.c
+++ b/plat/intel/soc/common/socfpga_sip_svc.c
@@ -335,6 +335,7 @@
 	return 0;
 #endif
 
+#if PLATFORM_MODEL != PLAT_SOCFPGA_AGILEX5
 	switch (reg_addr) {
 	case(0xF8011100):	/* ECCCTRL1 */
 	case(0xF8011104):	/* ECCCTRL2 */
@@ -386,7 +387,41 @@
 	case(0xFFD12220):	/* BOOT_SCRATCH_COLD8 */
 	case(0xFFD12224):	/* BOOT_SCRATCH_COLD9 */
 		return 0;
+#else
+	switch (reg_addr) {
+
+	case(0xF8011104):	/* ECCCTRL2 */
+	case(0xFFD12028):	/* SDMMCGRP_CTRL */
+	case(0xFFD120C4):	/* NOC_IDLEREQ_SET */
+	case(0xFFD120C8):	/* NOC_IDLEREQ_CLR */
+	case(0xFFD120D0):	/* NOC_IDLEACK */
+
 
+	case(SOCFPGA_MEMCTRL(ECCCTRL1)):	/* ECCCTRL1 */
+	case(SOCFPGA_MEMCTRL(ERRINTEN)):	/* ERRINTEN */
+	case(SOCFPGA_MEMCTRL(ERRINTENS)):	/* ERRINTENS */
+	case(SOCFPGA_MEMCTRL(ERRINTENR)):	/* ERRINTENR */
+	case(SOCFPGA_MEMCTRL(INTMODE)):	/* INTMODE */
+	case(SOCFPGA_MEMCTRL(INTSTAT)):	/* INTSTAT */
+	case(SOCFPGA_MEMCTRL(DIAGINTTEST)):	/* DIAGINTTEST */
+	case(SOCFPGA_MEMCTRL(DERRADDRA)):	/* DERRADDRA */
+
+	case(SOCFPGA_SYSMGR(EMAC_0)):	/* EMAC0 */
+	case(SOCFPGA_SYSMGR(EMAC_1)):	/* EMAC1 */
+	case(SOCFPGA_SYSMGR(EMAC_2)):	/* EMAC2 */
+	case(SOCFPGA_SYSMGR(ECC_INTMASK_VALUE)):	/* ECC_INT_MASK_VALUE */
+	case(SOCFPGA_SYSMGR(ECC_INTMASK_SET)):	/* ECC_INT_MASK_SET */
+	case(SOCFPGA_SYSMGR(ECC_INTMASK_CLR)):	/* ECC_INT_MASK_CLEAR */
+	case(SOCFPGA_SYSMGR(ECC_INTMASK_SERR)):	/* ECC_INTSTATUS_SERR */
+	case(SOCFPGA_SYSMGR(ECC_INTMASK_DERR)):	/* ECC_INTSTATUS_DERR */
+	case(SOCFPGA_SYSMGR(NOC_TIMEOUT)):	/* NOC_TIMEOUT */
+	case(SOCFPGA_SYSMGR(NOC_IDLESTATUS)):	/* NOC_IDLESTATUS */
+	case(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_0)):	/* BOOT_SCRATCH_COLD0 */
+	case(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_1)):	/* BOOT_SCRATCH_COLD1 */
+	case(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_8)):	/* BOOT_SCRATCH_COLD8 */
+	case(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_9)):	/* BOOT_SCRATCH_COLD9 */
+		return 0;
+#endif
 	default:
 		break;
 	}
diff --git a/plat/intel/soc/common/socfpga_storage.c b/plat/intel/soc/common/socfpga_storage.c
index 79e15d7..e80f074 100644
--- a/plat/intel/soc/common/socfpga_storage.c
+++ b/plat/intel/soc/common/socfpga_storage.c
@@ -9,29 +9,37 @@
 #include <assert.h>
 #include <common/debug.h>
 #include <common/tbbr/tbbr_img_def.h>
+#include <drivers/cadence/cdns_nand.h>
+#include <drivers/cadence/cdns_sdmmc.h>
 #include <drivers/io/io_block.h>
 #include <drivers/io/io_driver.h>
 #include <drivers/io/io_fip.h>
 #include <drivers/io/io_memmap.h>
+#include <drivers/io/io_mtd.h>
 #include <drivers/io/io_storage.h>
 #include <drivers/mmc.h>
 #include <drivers/partition/partition.h>
 #include <lib/mmio.h>
 #include <tools_share/firmware_image_package.h>
 
+#include "drivers/sdmmc/sdmmc.h"
 #include "socfpga_private.h"
 
+
 #define PLAT_FIP_BASE		(0)
 #define PLAT_FIP_MAX_SIZE	(0x1000000)
 #define PLAT_MMC_DATA_BASE	(0xffe3c000)
 #define PLAT_MMC_DATA_SIZE	(0x2000)
 #define PLAT_QSPI_DATA_BASE	(0x3C00000)
 #define PLAT_QSPI_DATA_SIZE	(0x1000000)
-
+#define PLAT_NAND_DATA_BASE	(0x0200000)
+#define PLAT_NAND_DATA_SIZE	(0x1000000)
 
 static const io_dev_connector_t *fip_dev_con;
 static const io_dev_connector_t *boot_dev_con;
 
+static io_mtd_dev_spec_t nand_dev_spec;
+
 static uintptr_t fip_dev_handle;
 static uintptr_t boot_dev_handle;
 
@@ -136,17 +144,27 @@
 	case BOOT_SOURCE_SDMMC:
 		register_io_dev = &register_io_dev_block;
 		boot_dev_spec.buffer.offset	= PLAT_MMC_DATA_BASE;
-		boot_dev_spec.buffer.length	= MMC_BLOCK_SIZE;
-		boot_dev_spec.ops.read		= mmc_read_blocks;
-		boot_dev_spec.ops.write		= mmc_write_blocks;
+		boot_dev_spec.buffer.length	= SOCFPGA_MMC_BLOCK_SIZE;
+		boot_dev_spec.ops.read		= SDMMC_READ_BLOCKS;
+		boot_dev_spec.ops.write		= SDMMC_WRITE_BLOCKS;
 		boot_dev_spec.block_size	= MMC_BLOCK_SIZE;
 		break;
 
 	case BOOT_SOURCE_QSPI:
 		register_io_dev = &register_io_dev_memmap;
-		fip_spec.offset = fip_spec.offset + PLAT_QSPI_DATA_BASE;
+		fip_spec.offset = PLAT_QSPI_DATA_BASE;
 		break;
 
+#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+	case BOOT_SOURCE_NAND:
+		register_io_dev = &register_io_dev_mtd;
+		nand_dev_spec.ops.init = cdns_nand_init_mtd;
+		nand_dev_spec.ops.read = cdns_nand_read;
+		nand_dev_spec.ops.write = NULL;
+		fip_spec.offset = PLAT_NAND_DATA_BASE;
+		break;
+#endif
+
 	default:
 		ERROR("Unsupported boot source\n");
 		panic();
@@ -159,8 +177,13 @@
 	result = register_io_dev_fip(&fip_dev_con);
 	assert(result == 0);
 
-	result = io_dev_open(boot_dev_con, (uintptr_t)&boot_dev_spec,
-			&boot_dev_handle);
+	if (boot_source == BOOT_SOURCE_NAND) {
+		result = io_dev_open(boot_dev_con, (uintptr_t)&nand_dev_spec,
+								&boot_dev_handle);
+	} else {
+		result = io_dev_open(boot_dev_con, (uintptr_t)&boot_dev_spec,
+								&boot_dev_handle);
+	}
 	assert(result == 0);
 
 	result = io_dev_open(fip_dev_con, (uintptr_t)NULL, &fip_dev_handle);
diff --git a/plat/intel/soc/common/socfpga_topology.c b/plat/intel/soc/common/socfpga_topology.c
index ca1a91e..28c9557 100644
--- a/plat/intel/soc/common/socfpga_topology.c
+++ b/plat/intel/soc/common/socfpga_topology.c
@@ -33,8 +33,8 @@
 	if (mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK))
 		return -1;
 
-	cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
-	cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+	cluster_id = (mpidr >> PLAT_CLUSTER_ID_MPIDR_AFF_SHIFT) & MPIDR_AFFLVL_MASK;
+	cpu_id = (mpidr >> PLAT_CPU_ID_MPIDR_AFF_SHIFT) & MPIDR_AFFLVL_MASK;
 
 	if (cluster_id >= PLATFORM_CLUSTER_COUNT)
 		return -1;
diff --git a/plat/intel/soc/common/socfpga_vab.c b/plat/intel/soc/common/socfpga_vab.c
new file mode 100644
index 0000000..e16610c
--- /dev/null
+++ b/plat/intel/soc/common/socfpga_vab.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <common/tbbr/tbbr_img_def.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <lib/utils.h>
+#include <tools_share/firmware_image_package.h>
+
+#include "socfpga_mailbox.h"
+#include "socfpga_vab.h"
+
+static size_t get_img_size(uint8_t *img_buf, size_t img_buf_sz)
+{
+	uint8_t *img_buf_end = img_buf + img_buf_sz;
+	uint32_t cert_sz = get_unaligned_le32(img_buf_end - sizeof(uint32_t));
+	uint8_t *p = img_buf_end - cert_sz - sizeof(uint32_t);
+
+	/* Ensure p is pointing within the img_buf */
+	if (p < img_buf || p > (img_buf_end - VAB_CERT_HEADER_SIZE))
+		return 0;
+
+	if (get_unaligned_le32(p) == SDM_CERT_MAGIC_NUM)
+		return (size_t)(p - img_buf);
+
+	return 0;
+}
+
+
+
+int socfpga_vendor_authentication(void **p_image, size_t *p_size)
+{
+	int retry_count = 20;
+	uint8_t hash384[FCS_SHA384_WORD_SIZE];
+	uint64_t img_addr, mbox_data_addr;
+	uint32_t img_sz, mbox_data_sz;
+	uint8_t *cert_hash_ptr, *mbox_relocate_data_addr;
+	uint32_t resp = 0, resp_len = 1;
+	int ret = 0;
+
+	img_addr = (uintptr_t)*p_image;
+	img_sz = get_img_size((uint8_t *)img_addr, *p_size);
+
+	if (!img_sz) {
+		NOTICE("VAB certificate not found in image!\n");
+		return -ENOVABIMG;
+	}
+
+	if (!IS_BYTE_ALIGNED(img_sz, sizeof(uint32_t))) {
+		NOTICE("Image size (%d bytes) not aliged to 4 bytes!\n", img_sz);
+		return -EIMGERR;
+	}
+
+	/* Generate HASH384 from the image */
+	/* TODO: This part need to cross check !!!!!! */
+	sha384_csum_wd((uint8_t *)img_addr, img_sz, hash384, CHUNKSZ_PER_WD_RESET);
+	cert_hash_ptr = (uint8_t *)(img_addr + img_sz +
+	VAB_CERT_MAGIC_OFFSET + VAB_CERT_FIT_SHA384_OFFSET);
+
+	/*
+	 * Compare the SHA384 found in certificate against the SHA384
+	 * calculated from image
+	 */
+	if (memcmp(hash384, cert_hash_ptr, FCS_SHA384_WORD_SIZE)) {
+		NOTICE("SHA384 does not match!\n");
+		return -EKEYREJECTED;
+	}
+
+
+	mbox_data_addr = img_addr + img_sz - sizeof(uint32_t);
+	/* Size in word (32bits) */
+	mbox_data_sz = (BYTE_ALIGN(*p_size - img_sz, sizeof(uint32_t))) >> 2;
+
+	NOTICE("mbox_data_addr = %lx    mbox_data_sz = %d\n", mbox_data_addr, mbox_data_sz);
+
+	/* TODO: This part need to cross check !!!!!! */
+	// mbox_relocate_data_addr = (uint8_t *)malloc(mbox_data_sz * sizeof(uint32_t));
+	// if (!mbox_relocate_data_addr) {
+		// NOTICE("Cannot allocate memory for VAB certificate relocation!\n");
+		// return -ENOMEM;
+	// }
+
+	memcpy(mbox_relocate_data_addr, (uint8_t *)mbox_data_addr, mbox_data_sz * sizeof(uint32_t));
+	*(uint32_t *)mbox_relocate_data_addr = 0;
+
+	do {
+		/* Invoke SMC call to ATF to send the VAB certificate to SDM */
+		ret  = mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_VAB_SRC_CERT,
+(uint32_t *)mbox_relocate_data_addr, mbox_data_sz, 0, &resp, &resp_len);
+
+		/* If SDM is not available, just delay 50ms and retry again */
+		/* 0x1FF = The device is busy */
+		if (ret == MBOX_RESP_ERR(0x1FF)) {
+			mdelay(50);
+		} else {
+			break;
+		}
+	} while (--retry_count);
+
+	/* Free the relocate certificate memory space */
+	zeromem((void *)&mbox_relocate_data_addr, sizeof(uint32_t));
+
+
+	/* Exclude the size of the VAB certificate from image size */
+	*p_size = img_sz;
+
+	if (ret) {
+		/*
+		 * Unsupported mailbox command or device not in the
+		 * owned/secure state
+		 */
+		 /* 0x85 = Not allowed under current security setting */
+		if (ret == MBOX_RESP_ERR(0x85)) {
+			/* SDM bypass authentication */
+			NOTICE("Image Authentication bypassed at address\n");
+			return 0;
+		}
+		NOTICE("VAB certificate authentication failed in SDM\n");
+		/* 0x1FF = The device is busy */
+		if (ret == MBOX_RESP_ERR(0x1FF)) {
+			NOTICE("Operation timed out\n");
+			return -ETIMEOUT;
+		} else if (ret == MBOX_WRONG_ID) {
+			NOTICE("No such process\n");
+			return -EPROCESS;
+		}
+	} else {
+		/* If Certificate Process Status has error */
+		if (resp) {
+			NOTICE("VAB certificate execution format error\n");
+			return -EIMGERR;
+		}
+	}
+
+	NOTICE("Image Authentication bypassed at address\n");
+	return ret;
+
+}
+
+static uint32_t get_unaligned_le32(const void *p)
+{
+	/* TODO: Temp for testing */
+	//return le32_to_cpup((__le32 *)p);
+	return 0;
+}
+
+void sha384_csum_wd(const unsigned char *input, unsigned int ilen,
+		unsigned char *output, unsigned int chunk_sz)
+{
+	/* TODO: Update sha384 start, update and finish */
+}
diff --git a/plat/intel/soc/n5x/include/socfpga_plat_def.h b/plat/intel/soc/n5x/include/socfpga_plat_def.h
index 197bbca..a06bbc4 100644
--- a/plat/intel/soc/n5x/include/socfpga_plat_def.h
+++ b/plat/intel/soc/n5x/include/socfpga_plat_def.h
@@ -88,6 +88,18 @@
 #define PLAT_SYS_COUNTER_FREQ_IN_TICKS	(400000000)
 #define PLAT_HZ_CONVERT_TO_MHZ	(1000000)
 
+/*******************************************************************************
+ * SDMMC related pointer function
+ ******************************************************************************/
+#define SDMMC_READ_BLOCKS	mmc_read_blocks
+#define SDMMC_WRITE_BLOCKS	mmc_write_blocks
+
+/*******************************************************************************
+ * sysmgr.boot_scratch_cold6 & 7 (64bit) are used to indicate L2 reset
+ * is done and HPS should trigger warm reset via RMR_EL3.
+ ******************************************************************************/
+#define L2_RESET_DONE_REG			0xFFD12218
+
 /* Platform specific system counter */
 #define PLAT_SYS_COUNTER_FREQ_IN_MHZ	get_cpu_clk()
 
diff --git a/plat/intel/soc/stratix10/include/socfpga_plat_def.h b/plat/intel/soc/stratix10/include/socfpga_plat_def.h
index 8a5d4a4..7c9f15a 100644
--- a/plat/intel/soc/stratix10/include/socfpga_plat_def.h
+++ b/plat/intel/soc/stratix10/include/socfpga_plat_def.h
@@ -86,6 +86,18 @@
 #define PLAT_SYS_COUNTER_FREQ_IN_TICKS	(400000000)
 #define PLAT_HZ_CONVERT_TO_MHZ	(1000000)
 
+/*******************************************************************************
+ * SDMMC related pointer function
+ ******************************************************************************/
+#define SDMMC_READ_BLOCKS	mmc_read_blocks
+#define SDMMC_WRITE_BLOCKS	mmc_write_blocks
+
+/*******************************************************************************
+ * sysmgr.boot_scratch_cold6 & 7 (64bit) are used to indicate L2 reset
+ * is done and HPS should trigger warm reset via RMR_EL3.
+ ******************************************************************************/
+#define L2_RESET_DONE_REG			0xFFD12218
+
 /* Platform specific system counter */
 #define PLAT_SYS_COUNTER_FREQ_IN_MHZ	get_cpu_clk()
 
diff --git a/plat/qemu/qemu/platform.mk b/plat/qemu/qemu/platform.mk
index 56c96a1..a10ab65 100644
--- a/plat/qemu/qemu/platform.mk
+++ b/plat/qemu/qemu/platform.mk
@@ -65,6 +65,7 @@
 				lib/cpus/aarch64/cortex_a76.S		\
 				lib/cpus/aarch64/neoverse_n_common.S	\
 				lib/cpus/aarch64/neoverse_n1.S		\
+				lib/cpus/aarch64/neoverse_v1.S		\
 				lib/cpus/aarch64/qemu_max.S
 else
 QEMU_CPU_LIBS		:=	lib/cpus/${ARCH}/cortex_a15.S