Merge "doc: update maintainer list for Arm platforms" into integration
diff --git a/common/fdt_fixup.c b/common/fdt_fixup.c
index e88a550..46606fb 100644
--- a/common/fdt_fixup.c
+++ b/common/fdt_fixup.c
@@ -188,6 +188,8 @@
  *
  * See reserved-memory/reserved-memory.txt in the (Linux kernel) DT binding
  * documentation for details.
+ * According to this binding, the address-cells and size-cells must match
+ * those of the root node.
  *
  * Return: 0 on success, a negative error value otherwise.
  ******************************************************************************/
@@ -195,23 +197,37 @@
 			    uintptr_t base, size_t size)
 {
 	int offs = fdt_path_offset(dtb, "/reserved-memory");
-	uint32_t addresses[3];
+	uint32_t addresses[4];
+	int ac, sc;
+	unsigned int idx = 0;
 
+	ac = fdt_address_cells(dtb, 0);
+	sc = fdt_size_cells(dtb, 0);
 	if (offs < 0) {			/* create if not existing yet */
 		offs = fdt_add_subnode(dtb, 0, "reserved-memory");
-		if (offs < 0)
+		if (offs < 0) {
 			return offs;
-		fdt_setprop_u32(dtb, offs, "#address-cells", 2);
-		fdt_setprop_u32(dtb, offs, "#size-cells", 1);
+		}
+		fdt_setprop_u32(dtb, offs, "#address-cells", ac);
+		fdt_setprop_u32(dtb, offs, "#size-cells", sc);
 		fdt_setprop(dtb, offs, "ranges", NULL, 0);
 	}
 
-	addresses[0] = cpu_to_fdt32(HIGH_BITS(base));
-	addresses[1] = cpu_to_fdt32(base & 0xffffffff);
-	addresses[2] = cpu_to_fdt32(size & 0xffffffff);
+	if (ac > 1) {
+		addresses[idx] = cpu_to_fdt32(HIGH_BITS(base));
+		idx++;
+	}
+	addresses[idx] = cpu_to_fdt32(base & 0xffffffff);
+	idx++;
+	if (sc > 1) {
+		addresses[idx] = cpu_to_fdt32(HIGH_BITS(size));
+		idx++;
+	}
+	addresses[idx] = cpu_to_fdt32(size & 0xffffffff);
+	idx++;
 	offs = fdt_add_subnode(dtb, offs, node_name);
 	fdt_setprop(dtb, offs, "no-map", NULL, 0);
-	fdt_setprop(dtb, offs, "reg", addresses, 12);
+	fdt_setprop(dtb, offs, "reg", addresses, idx * sizeof(uint32_t));
 
 	return 0;
 }
diff --git a/drivers/allwinner/axp/common.c b/drivers/allwinner/axp/common.c
index e98b16f..143fb0f 100644
--- a/drivers/allwinner/axp/common.c
+++ b/drivers/allwinner/axp/common.c
@@ -96,12 +96,27 @@
 	return 0;
 }
 
+static bool is_node_disabled(const void *fdt, int node)
+{
+	const char *cell;
+	cell = fdt_getprop(fdt, node, "status", NULL);
+	if (cell == NULL) {
+		return false;
+	}
+	return strcmp(cell, "okay") != 0;
+}
+
 static bool should_enable_regulator(const void *fdt, int node)
 {
-	if (fdt_getprop(fdt, node, "phandle", NULL) != NULL)
+	if (is_node_disabled(fdt, node)) {
+		return false;
+	}
+	if (fdt_getprop(fdt, node, "phandle", NULL) != NULL) {
 		return true;
-	if (fdt_getprop(fdt, node, "regulator-always-on", NULL) != NULL)
+	}
+	if (fdt_getprop(fdt, node, "regulator-always-on", NULL) != NULL) {
 		return true;
+	}
 	return false;
 }
 
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index b5f6a10..3ea17fb 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -27,7 +27,7 @@
 static struct mmc_csd_emmc mmc_csd;
 static unsigned char mmc_ext_csd[512] __aligned(16);
 static unsigned int mmc_flags;
-static struct mmc_device_info *mmc_dev_info;
+static struct mmc_device_info mmc_dev_info;
 static unsigned int rca;
 static unsigned int scr[2]__aligned(16) = { 0 };
 
@@ -195,7 +195,7 @@
 	int ret;
 	unsigned int width = bus_width;
 
-	if (mmc_dev_info->mmc_dev_type != MMC_IS_EMMC) {
+	if (mmc_dev_info.mmc_dev_type != MMC_IS_EMMC) {
 		if (width == MMC_BUS_WIDTH_8) {
 			WARN("Wrong bus config for SD-card, force to 4\n");
 			width = MMC_BUS_WIDTH_4;
@@ -226,9 +226,9 @@
 	int ret = 0;
 	struct mmc_csd_sd_v2 *csd_sd_v2;
 
-	switch (mmc_dev_info->mmc_dev_type) {
+	switch (mmc_dev_info.mmc_dev_type) {
 	case MMC_IS_EMMC:
-		mmc_dev_info->block_size = MMC_BLOCK_SIZE;
+		mmc_dev_info.block_size = MMC_BLOCK_SIZE;
 
 		ret = ops->prepare(0, (uintptr_t)&mmc_ext_csd,
 				   sizeof(mmc_ext_csd));
@@ -260,8 +260,8 @@
 			    (mmc_ext_csd[CMD_EXTCSD_SEC_CNT + 2] << 16) |
 			    (mmc_ext_csd[CMD_EXTCSD_SEC_CNT + 3] << 24);
 
-		mmc_dev_info->device_size = (unsigned long long)nb_blocks *
-			mmc_dev_info->block_size;
+		mmc_dev_info.device_size = (unsigned long long)nb_blocks *
+			mmc_dev_info.block_size;
 
 		break;
 
@@ -270,29 +270,29 @@
 		 * Use the same mmc_csd struct, as required fields here
 		 * (READ_BL_LEN, C_SIZE, CSIZE_MULT) are common with eMMC.
 		 */
-		mmc_dev_info->block_size = BIT_32(mmc_csd.read_bl_len);
+		mmc_dev_info.block_size = BIT_32(mmc_csd.read_bl_len);
 
 		c_size = ((unsigned long long)mmc_csd.c_size_high << 2U) |
 			 (unsigned long long)mmc_csd.c_size_low;
 		assert(c_size != 0xFFFU);
 
-		mmc_dev_info->device_size = (c_size + 1U) *
+		mmc_dev_info.device_size = (c_size + 1U) *
 					    BIT_64(mmc_csd.c_size_mult + 2U) *
-					    mmc_dev_info->block_size;
+					    mmc_dev_info.block_size;
 
 		break;
 
 	case MMC_IS_SD_HC:
 		assert(mmc_csd.csd_structure == 1U);
 
-		mmc_dev_info->block_size = MMC_BLOCK_SIZE;
+		mmc_dev_info.block_size = MMC_BLOCK_SIZE;
 
 		/* Need to use mmc_csd_sd_v2 struct */
 		csd_sd_v2 = (struct mmc_csd_sd_v2 *)&mmc_csd;
 		c_size = ((unsigned long long)csd_sd_v2->c_size_high << 16) |
 			 (unsigned long long)csd_sd_v2->c_size_low;
 
-		mmc_dev_info->device_size = (c_size + 1U) << MULT_BY_512K_SHIFT;
+		mmc_dev_info.device_size = (c_size + 1U) << MULT_BY_512K_SHIFT;
 
 		break;
 
@@ -310,19 +310,19 @@
 
 	assert(speed_idx > 0U);
 
-	if (mmc_dev_info->mmc_dev_type == MMC_IS_EMMC) {
-		mmc_dev_info->max_bus_freq = tran_speed_base[speed_idx];
+	if (mmc_dev_info.mmc_dev_type == MMC_IS_EMMC) {
+		mmc_dev_info.max_bus_freq = tran_speed_base[speed_idx];
 	} else {
-		mmc_dev_info->max_bus_freq = sd_tran_speed_base[speed_idx];
+		mmc_dev_info.max_bus_freq = sd_tran_speed_base[speed_idx];
 	}
 
 	freq_unit = mmc_csd.tran_speed & CSD_TRAN_SPEED_UNIT_MASK;
 	while (freq_unit != 0U) {
-		mmc_dev_info->max_bus_freq *= 10U;
+		mmc_dev_info.max_bus_freq *= 10U;
 		--freq_unit;
 	}
 
-	mmc_dev_info->max_bus_freq *= 10000U;
+	mmc_dev_info.max_bus_freq *= 10000U;
 
 	return 0;
 }
@@ -343,7 +343,7 @@
 
 		/* ACMD41: SD_SEND_OP_COND */
 		ret = mmc_send_cmd(MMC_ACMD(41), OCR_HCS |
-			mmc_dev_info->ocr_voltage, MMC_RESPONSE_R3,
+			mmc_dev_info.ocr_voltage, MMC_RESPONSE_R3,
 			&resp_data[0]);
 		if (ret != 0) {
 			return ret;
@@ -353,9 +353,9 @@
 			mmc_ocr_value = resp_data[0];
 
 			if ((mmc_ocr_value & OCR_HCS) != 0U) {
-				mmc_dev_info->mmc_dev_type = MMC_IS_SD_HC;
+				mmc_dev_info.mmc_dev_type = MMC_IS_SD_HC;
 			} else {
-				mmc_dev_info->mmc_dev_type = MMC_IS_SD;
+				mmc_dev_info.mmc_dev_type = MMC_IS_SD;
 			}
 
 			return 0;
@@ -425,9 +425,9 @@
 	ret = mmc_reset_to_idle();
 	if (ret != 0) {
 		return ret;
-	};
+	}
 
-	if (mmc_dev_info->mmc_dev_type == MMC_IS_EMMC) {
+	if (mmc_dev_info.mmc_dev_type == MMC_IS_EMMC) {
 		ret = mmc_send_op_cond();
 	} else {
 		/* CMD8: Send Interface Condition Command */
@@ -449,7 +449,7 @@
 	}
 
 	/* CMD3: Set Relative Address */
-	if (mmc_dev_info->mmc_dev_type == MMC_IS_EMMC) {
+	if (mmc_dev_info.mmc_dev_type == MMC_IS_EMMC) {
 		rca = MMC_FIX_RCA;
 		ret = mmc_send_cmd(MMC_CMD(3), rca << RCA_SHIFT_OFFSET,
 				   MMC_RESPONSE_R1, NULL);
@@ -530,7 +530,7 @@
 	}
 
 	if (((mmc_ocr_value & OCR_ACCESS_MODE_MASK) == OCR_BYTE_MODE) &&
-	    (mmc_dev_info->mmc_dev_type != MMC_IS_SD_HC)) {
+	    (mmc_dev_info.mmc_dev_type != MMC_IS_SD_HC)) {
 		cmd_arg = lba * MMC_BLOCK_SIZE;
 	} else {
 		cmd_arg = lba;
@@ -731,7 +731,7 @@
 
 	ops = ops_ptr;
 	mmc_flags = flags;
-	mmc_dev_info = device_info;
+	memcpy(&mmc_dev_info, device_info, sizeof(struct mmc_device_info));
 
 	return mmc_enumerate(clk, width);
 }
diff --git a/drivers/nxp/auth/csf_hdr_parser/cot.c b/drivers/nxp/auth/csf_hdr_parser/cot.c
new file mode 100644
index 0000000..4502ed6
--- /dev/null
+++ b/drivers/nxp/auth/csf_hdr_parser/cot.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * Copyright 2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+
+#include <drivers/auth/auth_mod.h>
+
+#if USE_TBBR_DEFS
+#include <tools_share/tbbr_oid.h>
+#else
+#include <platform_oid.h>
+#endif
+
+
+static auth_param_type_desc_t sig = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_SIG, 0);
+static auth_param_type_desc_t sig_alg = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_SIG_ALG, 0);
+static auth_param_type_desc_t sig_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, 0);
+
+static auth_param_type_desc_t non_trusted_world_pk = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_PUB_KEY, NON_TRUSTED_WORLD_PK_OID);
+
+/*
+ * TBBR Chain of trust definition
+ */
+static const auth_img_desc_t bl31_image = {
+	.img_id = BL31_IMAGE_ID,
+	.img_type = IMG_PLAT,
+	.parent = NULL,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &non_trusted_world_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &sig_hash
+			}
+		}
+	}
+};
+static const auth_img_desc_t scp_bl2_image = {
+	.img_id = SCP_BL2_IMAGE_ID,
+	.img_type = IMG_PLAT,
+	.parent = NULL,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &non_trusted_world_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &sig_hash
+			}
+		}
+	}
+};
+static const auth_img_desc_t bl32_image = {
+	.img_id = BL32_IMAGE_ID,
+	.img_type = IMG_PLAT,
+	.parent = NULL,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &non_trusted_world_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &sig_hash
+			}
+		}
+	}
+};
+static const auth_img_desc_t bl33_image = {
+	.img_id = BL33_IMAGE_ID,
+	.img_type = IMG_PLAT,
+	.parent = NULL,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &non_trusted_world_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &sig_hash
+			}
+		}
+	}
+};
+#ifdef POLICY_FUSE_PROVISION
+static const auth_img_desc_t fuse_prov_img = {
+	.img_id = FUSE_PROV_IMAGE_ID,
+	.img_type = IMG_PLAT,
+	.parent = NULL,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &non_trusted_world_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &sig_hash
+			}
+		}
+	}
+};
+static const auth_img_desc_t fuse_upgrade_img = {
+	.img_id = FUSE_UP_IMAGE_ID,
+	.img_type = IMG_PLAT,
+	.parent = NULL,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &non_trusted_world_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &sig_hash
+			}
+		}
+	}
+};
+#endif
+#ifdef CONFIG_DDR_FIP_IMAGE
+static const auth_img_desc_t ddr_imem_udimm_1d_img = {
+	.img_id = DDR_IMEM_UDIMM_1D_IMAGE_ID,
+	.img_type = IMG_PLAT,
+	.parent = NULL,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &non_trusted_world_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &sig_hash
+			}
+		}
+	}
+};
+static const auth_img_desc_t ddr_imem_udimm_2d_img = {
+	.img_id = DDR_IMEM_UDIMM_2D_IMAGE_ID,
+	.img_type = IMG_PLAT,
+	.parent = NULL,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &non_trusted_world_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &sig_hash
+			}
+		}
+	}
+};
+static const auth_img_desc_t ddr_dmem_udimm_1d_img = {
+	.img_id = DDR_DMEM_UDIMM_1D_IMAGE_ID,
+	.img_type = IMG_PLAT,
+	.parent = NULL,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &non_trusted_world_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &sig_hash
+			}
+		}
+	}
+};
+static const auth_img_desc_t ddr_dmem_udimm_2d_img = {
+	.img_id = DDR_DMEM_UDIMM_2D_IMAGE_ID,
+	.img_type = IMG_PLAT,
+	.parent = NULL,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &non_trusted_world_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &sig_hash
+			}
+		}
+	}
+};
+static const auth_img_desc_t ddr_imem_rdimm_1d_img = {
+	.img_id = DDR_IMEM_RDIMM_1D_IMAGE_ID,
+	.img_type = IMG_PLAT,
+	.parent = NULL,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &non_trusted_world_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &sig_hash
+			}
+		}
+	}
+};
+static const auth_img_desc_t ddr_imem_rdimm_2d_img = {
+	.img_id = DDR_IMEM_RDIMM_2D_IMAGE_ID,
+	.img_type = IMG_PLAT,
+	.parent = NULL,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &non_trusted_world_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &sig_hash
+			}
+		}
+	}
+};
+static const auth_img_desc_t ddr_dmem_rdimm_1d_img = {
+	.img_id = DDR_DMEM_RDIMM_1D_IMAGE_ID,
+	.img_type = IMG_PLAT,
+	.parent = NULL,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &non_trusted_world_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &sig_hash
+			}
+		}
+	}
+};
+static const auth_img_desc_t ddr_dmem_rdimm_2d_img = {
+	.img_id = DDR_DMEM_RDIMM_2D_IMAGE_ID,
+	.img_type = IMG_PLAT,
+	.parent = NULL,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &non_trusted_world_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &sig_hash
+			}
+		}
+	}
+};
+#endif
+
+static const auth_img_desc_t * const cot_desc[] = {
+	[BL31_IMAGE_ID]			=	&bl31_image,
+	[SCP_BL2_IMAGE_ID]		=	&scp_bl2_image,
+	[BL32_IMAGE_ID]			=	&bl32_image,
+	[BL33_IMAGE_ID]			=	&bl33_image,
+#ifdef POLICY_FUSE_PROVISION
+	[FUSE_PROV_IMAGE_ID]		=	&fuse_prov_img,
+	[FUSE_UP_IMAGE_ID]		=	&fuse_upgrade_img,
+#endif
+#ifdef CONFIG_DDR_FIP_IMAGE
+	[DDR_IMEM_UDIMM_1D_IMAGE_ID]	=	&ddr_imem_udimm_1d_img,
+	[DDR_IMEM_UDIMM_2D_IMAGE_ID]	=	&ddr_imem_udimm_2d_img,
+	[DDR_DMEM_UDIMM_1D_IMAGE_ID]	=	&ddr_dmem_udimm_1d_img,
+	[DDR_DMEM_UDIMM_2D_IMAGE_ID]	=	&ddr_dmem_udimm_2d_img,
+	[DDR_IMEM_RDIMM_1D_IMAGE_ID]	=	&ddr_imem_rdimm_1d_img,
+	[DDR_IMEM_RDIMM_2D_IMAGE_ID]	=	&ddr_imem_rdimm_2d_img,
+	[DDR_DMEM_RDIMM_1D_IMAGE_ID]	=	&ddr_dmem_rdimm_1d_img,
+	[DDR_DMEM_RDIMM_2D_IMAGE_ID]	=	&ddr_dmem_rdimm_2d_img,
+#endif
+};
+
+/* Register the CoT in the authentication module */
+REGISTER_COT(cot_desc);
diff --git a/drivers/nxp/auth/csf_hdr_parser/csf_hdr.h b/drivers/nxp/auth/csf_hdr_parser/csf_hdr.h
new file mode 100644
index 0000000..eaead76
--- /dev/null
+++ b/drivers/nxp/auth/csf_hdr_parser/csf_hdr.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2017-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef CSF_HDR_H
+#define CSF_HDR_H
+
+#include "caam.h"
+#include "hash.h"
+#include "rsa.h"
+
+/* Barker code size in bytes */
+#define CSF_BARKER_LEN	4	/* barker code length in ESBC uboot client */
+				/* header */
+
+#ifdef CSF_HDR_CH3
+struct csf_hdr {
+	uint8_t barker[CSF_BARKER_LEN];	/* 0x00 Barker code */
+	uint32_t srk_tbl_off;		/* 0x04 SRK Table Offset */
+
+	struct {
+		uint8_t num_srk;	/* 0x08 No. of keys */
+		uint8_t srk_sel;	/*  Key no. to be used */
+		uint8_t reserve;	/* 0x0a rseerved */
+	} len_kr;
+	uint8_t ie_flag;
+
+	uint32_t uid_flag;
+
+	uint32_t psign;			/* 0x10 signature offset */
+	uint32_t sign_len;			/* 0x14 length of signature */
+
+	union {
+		struct {
+			uint32_t sg_table_offset; /* 0x18 SG Table Offset */
+			uint32_t sg_entries;	  /* 0x1c no of entries in SG */
+		} sg_isbc;
+		uint64_t img_addr;	/* 64 bit pointer to ESBC Image */
+	};
+
+	union {
+		struct {
+			uint32_t img_size;   /* ESBC client img size in bytes */
+			uint32_t ie_key_sel;
+		} img;
+		uint64_t entry_point;	  /* 0x20-0x24 ESBC entry point */
+	};
+
+	uint32_t fsl_uid_0;			/* 0x28 Freescale unique id 0 */
+	uint32_t fsl_uid_1;			/* 0x2c Freescale unique id 1 */
+	uint32_t oem_uid_0;			/* 0x30 OEM unique id 0 */
+	uint32_t oem_uid_1;			/* 0x34 OEM unique id 1 */
+	uint32_t oem_uid_2;			/* 0x38 OEM unique id 2 */
+	uint32_t oem_uid_3;			/* 0x3c OEM unique id 3 */
+	uint32_t oem_uid_4;			/* 0x40 OEM unique id 4 */
+
+	uint32_t reserved[3];		/* 0x44 - 0x4f */
+};
+
+/* Srk table and key revocation check */
+#define UNREVOCABLE_KEY	8
+#define REVOC_KEY_ALIGN 7
+#define MAX_KEY_ENTRIES 8
+
+#else
+
+/* CSF header for Chassis 2 */
+struct csf_hdr {
+	uint8_t barker[CSF_BARKER_LEN];	/* barker code */
+	union {
+		uint32_t pkey;		/* public key offset */
+		uint32_t srk_tbl_off;
+	};
+
+	union {
+		uint32_t key_len;		/* pub key length in bytes */
+		struct {
+			uint32_t srk_table_flag:8;
+			uint32_t srk_sel:8;
+			uint32_t num_srk:16;
+		} len_kr;
+	};
+
+	uint32_t psign;		/* signature offset */
+	uint32_t sign_len;		/* length of the signature in bytes */
+
+	/* SG Table used by ISBC header */
+	union {
+		struct {
+			uint32_t sg_table_offset; /* 0x14 SG Table Offset */
+			uint32_t sg_entries;	/* no of entries in SG table */
+		} sg_isbc;
+		struct {
+			uint32_t reserved1;	/* Reserved field */
+			uint32_t img_size;	/* ESBC img size in bytes */
+		} img;
+	};
+
+	uint32_t entry_point;		/* ESBC client entry point */
+	uint32_t reserved2;		/* Scatter gather flag */
+	uint32_t uid_flag;
+	uint32_t fsl_uid_0;
+	uint32_t oem_uid_0;
+	uint32_t reserved3[2];
+	uint32_t fsl_uid_1;
+	uint32_t oem_uid_1;
+
+	/* The entries below aren't present in ISBC header */
+	uint64_t img_addr;	/* 64 bit pointer to ESBC Image */
+	uint32_t ie_flag;
+	uint32_t ie_key_sel;
+};
+
+/* Srk table and key revocation check */
+#define UNREVOCABLE_KEY	4
+#define REVOC_KEY_ALIGN 3
+#define MAX_KEY_ENTRIES 4
+
+#endif
+
+struct srk_table {
+	uint32_t key_len;
+	uint8_t pkey[2 * RSA_4K_KEY_SZ_BYTES];
+};
+
+/*
+ * This struct contains the following fields
+ * length of the segment
+ * Destination Target ID
+ * source address
+ * destination address
+ */
+struct sg_table {
+	uint32_t len;			/* Length of Image */
+	uint32_t res1;
+	union {
+		uint64_t src_addr;	/* SRC Address of Image */
+		struct {
+			uint32_t src_addr;
+			uint32_t dst_addr;
+		} img;
+	};
+};
+
+int validate_esbc_header(void *img_hdr, void **img_key, uint32_t *key_len,
+			 void **img_sign, uint32_t *sign_len,
+			 enum sig_alg *algo);
+
+int calc_img_hash(struct csf_hdr *hdr, void *img_addr, uint32_t img_size,
+		  uint8_t *img_hash, uint32_t *hash_len);
+
+#endif
diff --git a/drivers/nxp/auth/csf_hdr_parser/csf_hdr.mk b/drivers/nxp/auth/csf_hdr_parser/csf_hdr.mk
new file mode 100644
index 0000000..d518dbb
--- /dev/null
+++ b/drivers/nxp/auth/csf_hdr_parser/csf_hdr.mk
@@ -0,0 +1,64 @@
+#
+# Copyright 2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+
+CSF_HDR_SOURCES	:=  $(PLAT_DRIVERS_PATH)/auth/csf_hdr_parser/csf_hdr_parser.c
+
+CSF_HDR_SOURCES	+=  $(PLAT_DRIVERS_PATH)/auth/csf_hdr_parser/plat_img_parser.c
+
+PLAT_INCLUDES	+= -I$(PLAT_DRIVERS_PATH)/auth/csf_hdr_parser/
+
+$(eval $(call add_define, CSF_HEADER_PREPENDED))
+
+
+# Path to CST directory is required to generate the CSF header
+# and prepend it to image before fip image gets generated
+ifeq (${CST_DIR},)
+  $(error Error: CST_DIR not set)
+endif
+
+# Rules are created for generating and appending CSF header to images before
+# FIT image generation
+
+# CST_BL31
+define CST_BL31_RULE
+$(1): $(2)
+	@echo " Generating CSF Header for $$@ $$<"
+	$(Q)$(CST_DIR)/create_hdr_esbc --in $(2) --out $(1) --app_off ${CSF_HDR_SZ} \
+					--app $(2) ${BL31_INPUT_FILE}
+endef
+
+CST_BL31_SUFFIX := .cst
+
+# CST_BL32
+define CST_BL32_RULE
+$(1): $(2)
+	@echo " Generating CSF Header for $$@ $$<"
+	$(Q)$(CST_DIR)/create_hdr_esbc --in $(2) --out $(1) --app_off ${CSF_HDR_SZ} \
+					--app $(2) ${BL32_INPUT_FILE}
+endef
+
+CST_BL32_SUFFIX := .cst
+
+# CST_BL33
+define CST_BL33_RULE
+$(1): $(2)
+	@echo " Generating CSF Header for $$@ $$<"
+	$(Q)$(CST_DIR)/create_hdr_esbc --in $(2) --out $(1) --app_off ${CSF_HDR_SZ} \
+					--app $(2) ${BL33_INPUT_FILE}
+endef
+
+CST_BL33_SUFFIX := .cst
+
+# CST_SCP_BL2
+define CST_SCP_BL2_RULE
+$(1): $(2)
+	@echo " Generating CSF Header for $$@ $$<"
+	$(Q)$(CST_DIR)/create_hdr_esbc --in $(2) --out $(1) --app_off ${CSF_HDR_SZ} \
+					--app $(2) ${FUSE_INPUT_FILE}
+endef
+
+CST_SCP_BL2_SUFFIX := .cst
diff --git a/drivers/nxp/auth/csf_hdr_parser/csf_hdr_parser.c b/drivers/nxp/auth/csf_hdr_parser/csf_hdr_parser.c
new file mode 100644
index 0000000..b878082
--- /dev/null
+++ b/drivers/nxp/auth/csf_hdr_parser/csf_hdr_parser.c
@@ -0,0 +1,365 @@
+/*
+ * Copyright (c) 2014-2016, Freescale Semiconductor, Inc.
+ * Copyright 2017-2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <cassert.h>
+#include <common/debug.h>
+#include <csf_hdr.h>
+#include <dcfg.h>
+#include <drivers/auth/crypto_mod.h>
+#include <lib/utils.h>
+#include <sfp.h>
+
+/* Maximum OID string length ("a.b.c.d.e.f ...") */
+#define MAX_OID_STR_LEN			64
+
+#define LIB_NAME	"NXP CSFv2"
+
+#ifdef CSF_HDR_CH3
+/* Barker Code for LS Ch3 ESBC Header */
+static const uint8_t barker_code[CSF_BARKER_LEN] = { 0x12, 0x19, 0x20, 0x01 };
+#else
+static const uint8_t barker_code[CSF_BARKER_LEN] = { 0x68, 0x39, 0x27, 0x81 };
+#endif
+
+#define CHECK_KEY_LEN(key_len)	(((key_len) == 2 * RSA_1K_KEY_SZ_BYTES) || \
+				 ((key_len) == 2 * RSA_2K_KEY_SZ_BYTES) || \
+				 ((key_len) == 2 * RSA_4K_KEY_SZ_BYTES))
+
+/* Flag to indicate if values are there in rotpk_hash_table */
+bool rotpk_not_dpld =  true;
+uint8_t rotpk_hash_table[MAX_KEY_ENTRIES][SHA256_BYTES];
+uint32_t num_rotpk_hash_entries;
+
+/*
+ * This function deploys the hashes of the various platform keys in
+ * rotpk_hash_table. This is done in case of secure boot after comparison
+ * of table's hash with the hash in SFP fuses. This installation is done
+ * only in the first header parsing.
+ */
+static int deploy_rotpk_hash_table(void *srk_buffer, uint16_t num_srk)
+{
+	void *ctx;
+	int ret = 0;
+	int i, j = 0;
+	unsigned int digest_size = SHA256_BYTES;
+	enum hash_algo algo = SHA256;
+	uint8_t hash[SHA256_BYTES];
+	uint32_t srk_hash[SHA256_BYTES/4] __aligned(CACHE_WRITEBACK_GRANULE);
+	struct srk_table *srktbl = (void *)srk_buffer;
+	struct sfp_ccsr_regs_t *sfp_ccsr_regs = (void *)(get_sfp_addr()
+							+ SFP_FUSE_REGS_OFFSET);
+
+
+	if (num_srk > MAX_KEY_ENTRIES) {
+		return -1;
+	}
+
+	ret = hash_init(algo, &ctx);
+	if (ret != 0) {
+		return -1;
+	}
+
+	/* Update hash with that of SRK table */
+	ret = hash_update(algo, ctx, (uint8_t *)((uint8_t *)srk_buffer),
+			  num_srk * sizeof(struct srk_table));
+	if (ret != 0) {
+		return -1;
+	}
+
+	/* Copy hash at destination buffer */
+	ret = hash_final(algo, ctx, hash, digest_size);
+	if (ret != 0) {
+		return -1;
+	}
+
+	/* Add comparison of hash with SFP hash here */
+	for (i = 0; i < SHA256_BYTES/4; i++) {
+		srk_hash[i] =
+			mmio_read_32((uintptr_t)&sfp_ccsr_regs->srk_hash[i]);
+	}
+
+	VERBOSE("SRK table HASH\n");
+	for (i = 0; i < 8; i++) {
+		VERBOSE("%x\n", *((uint32_t *)hash + i));
+	}
+
+	if (memcmp(hash, srk_hash, SHA256_BYTES) != 0) {
+		ERROR("Error in installing ROTPK table\n");
+		ERROR("SRK hash doesn't match the fuse hash\n");
+		return -1;
+	}
+
+	/* Hash table already deployed */
+	if (rotpk_not_dpld == false) {
+		return 0;
+	}
+
+	for (i = 0; i < num_srk; i++) {
+		ret = hash_init(algo, &ctx);
+		if (ret != 0) {
+			return -1;
+		}
+
+		/* Update hash with that of SRK table */
+		ret = hash_update(algo, ctx, srktbl[i].pkey, srktbl[i].key_len);
+		if (ret != 0) {
+			return -1;
+		}
+
+		/* Copy hash at destination buffer */
+		ret = hash_final(algo, ctx, rotpk_hash_table[i], digest_size);
+		if (ret != 0) {
+			return -1;
+		}
+		VERBOSE("Table key %d HASH\n", i);
+		for (j = 0; j < 8; j++) {
+			VERBOSE("%x\n", *((uint32_t *)rotpk_hash_table[i] + j));
+		}
+	}
+	rotpk_not_dpld = false;
+	num_rotpk_hash_entries = num_srk;
+
+	return 0;
+}
+
+/*
+ * Calculate hash of ESBC hdr and ESBC. This function calculates the
+ * single hash of ESBC header and ESBC image
+ */
+int calc_img_hash(struct csf_hdr *hdr,
+		  void *img_addr, uint32_t img_size,
+		  uint8_t *img_hash, uint32_t *hash_len)
+{
+	void *ctx;
+	int ret = 0;
+	unsigned int digest_size = SHA256_BYTES;
+	enum hash_algo algo = SHA256;
+
+	ret = hash_init(algo, &ctx);
+	/* Copy hash at destination buffer */
+	if (ret != 0) {
+		return -1;
+	}
+
+	/* Update hash for CSF Header */
+	ret = hash_update(algo, ctx, (uint8_t *)hdr, sizeof(struct csf_hdr));
+	if (ret != 0) {
+		return -1;
+	}
+
+	/* Update hash with that of SRK table */
+	ret = hash_update(algo, ctx,
+			  (uint8_t *)((uint8_t *)hdr + hdr->srk_tbl_off),
+			  hdr->len_kr.num_srk * sizeof(struct srk_table));
+	if (ret != 0) {
+		return -1;
+	}
+
+	/* Update hash for actual Image */
+	ret = hash_update(algo, ctx, (uint8_t *)(img_addr), img_size);
+	if (ret != 0) {
+		return -1;
+	}
+
+	/* Copy hash at destination buffer */
+	ret = hash_final(algo, ctx, img_hash, digest_size);
+	if (ret != 0) {
+		return -1;
+	}
+
+	*hash_len = digest_size;
+
+	VERBOSE("IMG encoded HASH\n");
+	for (int i = 0; i < 8; i++) {
+		VERBOSE("%x\n", *((uint32_t *)img_hash + i));
+	}
+
+	return 0;
+}
+
+/* This function checks if selected key is revoked or not.*/
+static uint32_t is_key_revoked(uint32_t keynum, uint32_t rev_flag)
+{
+	if (keynum == UNREVOCABLE_KEY) {
+		return 0;
+	}
+
+	if (((uint32_t)(1 << (REVOC_KEY_ALIGN - keynum)) & rev_flag) != 0) {
+		return 1;
+	}
+
+	return 0;
+}
+
+/* Parse the header to extract the type of key,
+ * Check if key is not revoked
+ * and return the key , key length and key_type
+ */
+static int32_t get_key(struct csf_hdr *hdr, uint8_t **key, uint32_t *len,
+			enum sig_alg *key_type)
+{
+	int i = 0;
+	uint32_t ret = 0U;
+	uint32_t key_num, key_revoc_flag;
+	void *esbc = hdr;
+	struct srk_table *srktbl = (void *)((uint8_t *)esbc + hdr->srk_tbl_off);
+	bool sb;
+	uint32_t mode;
+
+	/* We currently support only RSA keys and signature */
+	*key_type = RSA;
+
+	/* Check for number of SRK entries */
+	if ((hdr->len_kr.num_srk == 0) ||
+	    (hdr->len_kr.num_srk > MAX_KEY_ENTRIES)) {
+		ERROR("Error in NUM entries in SRK Table\n");
+		return -1;
+	}
+
+	/*
+	 * Check the key number field. It should be not greater than
+	 * number of entries in SRK table.
+	 */
+	key_num = hdr->len_kr.srk_sel;
+	if ((key_num == 0) || (key_num > hdr->len_kr.num_srk)) {
+		ERROR("Invalid Key number\n");
+		return -1;
+	}
+
+	/* Get revoc key from sfp */
+	key_revoc_flag = get_key_revoc();
+
+	/* Check if selected key has been revoked */
+	ret = is_key_revoked(key_num, key_revoc_flag);
+	if (ret != 0) {
+		ERROR("Selected key has been revoked\n");
+		return -1;
+	}
+
+	/* Check for valid key length - allowed key sized 1k, 2k and 4K */
+	for (i = 0; i < hdr->len_kr.num_srk; i++) {
+		if (CHECK_KEY_LEN(srktbl[i].key_len) == 0) {
+			ERROR("Invalid key length\n");
+			return -1;
+		}
+	}
+
+	/* We don't return error from here. While parsing we just try to
+	 * install the srk table. Failure needs to be taken care of in
+	 * case of secure boot. This failure will be handled at the time
+	 * of rotpk comparison in plat_get_rotpk_info function
+	 */
+	sb = check_boot_mode_secure(&mode);
+	if (sb) {
+		ret = deploy_rotpk_hash_table(srktbl, hdr->len_kr.num_srk);
+		if (ret != 0) {
+			ERROR("ROTPK FAILURE\n");
+			/* For ITS =1 , return failure */
+			if (mode != 0) {
+				return -1;
+			}
+			ERROR("SECURE BOOT DEV-ENV MODE:\n");
+			ERROR("\tCHECK ROTPK !\n");
+			ERROR("\tCONTINUING ON FAILURE...\n");
+		}
+	}
+
+	/* Return the length of the selected key */
+	*len = srktbl[key_num - 1].key_len;
+
+	/* Point key to the selected key */
+	*key =  (uint8_t *)&(srktbl[key_num - 1].pkey);
+
+	return 0;
+}
+
+/*
+ * This function would parse the CSF header and do the following:
+ * 1. Basic integrity checks
+ * 2. Key checks and extract the key from SRK/IE Table
+ * 3. Key hash comparison with SRKH in fuses in case of SRK Table
+ * 4. OEM/UID checks - To be added
+ * 5. Hash calculation for various components used in signature
+ * 6. Signature integrity checks
+ * return -> 0 on success, -1 on failure
+ */
+int validate_esbc_header(void *img_hdr, void **img_key, uint32_t *key_len,
+			 void **img_sign, uint32_t *sign_len,
+			 enum sig_alg *algo)
+{
+	struct csf_hdr *hdr = img_hdr;
+	uint8_t *s;
+	int32_t ret = 0;
+	void *esbc = (uint8_t *)img_hdr;
+	uint8_t *key;
+	uint32_t klen;
+
+	/* check barker code */
+	if (memcmp(hdr->barker, barker_code, CSF_BARKER_LEN) != 0) {
+		ERROR("Wrong barker code in header\n");
+		return -1;
+	}
+
+	ret = get_key(hdr, &key, &klen, algo);
+	if (ret != 0) {
+		return -1;
+	}
+
+	/* check signaure */
+	if (klen == (2 * hdr->sign_len)) {
+		/* check signature length */
+		if (((hdr->sign_len == RSA_1K_KEY_SZ_BYTES) ||
+		    (hdr->sign_len == RSA_2K_KEY_SZ_BYTES) ||
+		    (hdr->sign_len == RSA_4K_KEY_SZ_BYTES)) == 0) {
+			ERROR("Wrong Signature length in header\n");
+			return -1;
+		}
+	} else {
+		ERROR("RSA key length not twice the signature length\n");
+		return -1;
+	}
+
+	/* modulus most significant bit should be set */
+
+	if ((key[0] & 0x80) == 0U) {
+		ERROR("RSA Public key MSB not set\n");
+		return -1;
+	}
+
+	/* modulus value should be odd */
+	if ((key[klen / 2 - 1] & 0x1) == 0U) {
+		ERROR("Public key Modulus in header not odd\n");
+		return -1;
+	}
+
+	/* Check signature value < modulus value */
+	s =  (uint8_t *)(esbc + hdr->psign);
+
+	if (!(memcmp(s, key, hdr->sign_len) < 0)) {
+		ERROR("Signature not less than modulus");
+		return -1;
+	}
+
+	/* Populate the return addresses */
+	*img_sign = (void *)(s);
+
+	/* Save the length of signature */
+	*sign_len = hdr->sign_len;
+
+	*img_key = (uint8_t *)key;
+
+	*key_len = klen;
+
+	return ret;
+}
diff --git a/drivers/nxp/auth/csf_hdr_parser/input_bl2_ch2 b/drivers/nxp/auth/csf_hdr_parser/input_bl2_ch2
new file mode 100644
index 0000000..bf8934b
--- /dev/null
+++ b/drivers/nxp/auth/csf_hdr_parser/input_bl2_ch2
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2014-2016, Freescale Semiconductor, Inc.
+ * Copyright 2017-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+---------------------------------------------------
+# Specify the platform. [Mandatory]
+# Choose Platform - 1010/1040/2041/3041/4080/5020/5040/9131/9132/9164/4240/C290/LS1
+PLATFORM=LS1043
+# ESBC Flag. Specify ESBC=0 to sign u-boot and ESBC=1 to sign ESBC images.(default is 0)
+ESBC=0
+---------------------------------------------------
+# Entry Point/Image start address field in the header.[Mandatory]
+# (default=ADDRESS of first file specified in images)
+ENTRY_POINT=10000000
+---------------------------------------------------
+# Specify the file name of the keys separated by comma.
+# The number of files and key select should lie between 1 and 4 for 1040 and C290.
+# For rest of the platforms only one key is required and key select should not be provided.
+
+# USAGE (for 4080/5020/5040/3041/2041/1010/913x): PRI_KEY = <key1.pri>
+# USAGE (for 1040/C290/9164/4240/LS1): PRI_KEY = <key1.pri>, <key2.pri>, <key3.pri>, <key4.pri>
+
+# PRI_KEY (Default private key :srk.pri) - [Optional]
+PRI_KEY=srk.pri
+# PUB_KEY (Default public key :srk.pub) - [Optional]
+PUB_KEY=srk.pub
+# Please provide KEY_SELECT(between 1 to 4) (Required for 1040/C290/9164/4240/LS1 only) - [Optional]
+KEY_SELECT=
+---------------------------------------------------
+# Specify SG table address, only for (2041/3041/4080/5020/5040) with ESBC=0 - [Optional]
+SG_TABLE_ADDR=
+---------------------------------------------------
+# Specify the target where image will be loaded. (Default is NOR_16B) - [Optional]
+# Only required for Non-PBL Devices (1010/1040/9131/9132i/C290)
+# Select from - NOR_8B/NOR_16B/NAND_8B_512/NAND_8B_2K/NAND_8B_4K/NAND_16B_512/NAND_16B_2K/NAND_16B_4K/SD/MMC/SPI
+IMAGE_TARGET=
+---------------------------------------------------
+# Specify IMAGE, Max 8 images are possible. DST_ADDR is required only for Non-PBL Platform. [Mandatory]
+# USAGE : IMAGE_NO = {IMAGE_NAME, SRC_ADDR, DST_ADDR}
+IMAGE_1={bl2.bin,10000000,ffffffff}
+IMAGE_2={,,}
+IMAGE_3={,,}
+IMAGE_4={,,}
+IMAGE_5={,,}
+IMAGE_6={,,}
+IMAGE_7={,,}
+IMAGE_8={,,}
+---------------------------------------------------
+# Specify OEM AND FSL ID to be populated in header. [Optional]
+# e.g FSL_UID=11111111
+FSL_UID_0=
+FSL_UID_1=
+OEM_UID_0=
+OEM_UID_1=
+---------------------------------------------------
+# Specify the file names of csf header and sg table. (Default :hdr.out) [Optional]
+OUTPUT_HDR_FILENAME=hdr_bl2.out
+
+# Specify the file names of hash file and sign file.
+HASH_FILENAME=img_hash.out
+INPUT_SIGN_FILENAME=sign.out
+
+# Specify the signature size.It is mandatory when neither public key nor private key is specified.
+# Signature size would be [0x80 for 1k key, 0x100 for 2k key, and 0x200 for 4k key].
+SIGN_SIZE=
+---------------------------------------------------
+# Specify the output file name of sg table. (Default :sg_table.out). [Optional]
+# Please note that OUTPUT SG BIN is only required for 2041/3041/4080/5020/5040 when ESBC flag is not set.
+OUTPUT_SG_BIN=
+---------------------------------------------------
+# Following fields are Required for 4240/9164/1040/C290 only
+
+# Specify House keeping Area
+# Required for 4240/9164/1040/C290 only when ESBC flag is not set. [Mandatory]
+HK_AREA_POINTER=
+HK_AREA_SIZE=
+---------------------------------------------------
+# Following field Required for 4240/9164/1040/C290 only
+# Specify Secondary Image Flag. (0 or 1) - [Optional]
+# (Default is 0)
+SEC_IMAGE=0
+# Specify Manufacturing Protection Flag. (0 or 1) - [Optional]
+# Required only for LS1(Default is 0)
+MP_FLAG=1
+---------------------------------------------------
diff --git a/drivers/nxp/auth/csf_hdr_parser/input_bl2_ch3 b/drivers/nxp/auth/csf_hdr_parser/input_bl2_ch3
new file mode 100644
index 0000000..5fdad9c
--- /dev/null
+++ b/drivers/nxp/auth/csf_hdr_parser/input_bl2_ch3
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+---------------------------------------------------
+# Specify the platform. [Mandatory]
+# Choose Platform -
+# TRUST 3.2: LX2160
+PLATFORM=LS2088
+---------------------------------------------------
+# Entry Point/Image start address field in the header.[Mandatory]
+# (default=ADDRESS of first file specified in images)
+# Address can be 64 bit
+ENTRY_POINT=1800A000
+---------------------------------------------------
+# Specify the Key Information.
+# PUB_KEY [Mandatory] Comma Separated List
+# Usage: <srk1.pub> <srk2.pub> .....
+PUB_KEY=srk.pub
+# KEY_SELECT [Mandatory]
+# USAGE (for TRUST 3.x): (between 1 to 8)
+KEY_SELECT=1
+# PRI_KEY [Mandatory] Single Key Used for Signing
+# USAGE: <srk.pri>
+PRI_KEY=srk.pri
+---------------------------------------------------
+# Specify IMAGE, Max 8 images are possible.
+# DST_ADDR is required only for Non-PBL Platform. [Mandatory]
+# USAGE : IMAGE_NO = {IMAGE_NAME, SRC_ADDR, DST_ADDR}
+# Address can be 64 bit
+IMAGE_1={bl2.bin,1800A000,ffffffff}
+IMAGE_2={,,}
+IMAGE_3={,,}
+IMAGE_4={,,}
+IMAGE_5={,,}
+IMAGE_6={,,}
+IMAGE_7={,,}
+IMAGE_8={,,}
+---------------------------------------------------
+# Specify OEM AND FSL ID to be populated in header. [Optional]
+# e.g FSL_UID_0=11111111
+FSL_UID_0=
+FSL_UID_1=
+OEM_UID_0=
+OEM_UID_1=
+OEM_UID_2=
+OEM_UID_3=
+OEM_UID_4=
+---------------------------------------------------
+# Specify the output file names [Optional].
+# Default Values chosen in Tool
+OUTPUT_HDR_FILENAME=hdr_bl2.out
+IMAGE_HASH_FILENAME=
+RSA_SIGN_FILENAME=
+---------------------------------------------------
+# Specify The Flags. (0 or 1) - [Optional]
+MP_FLAG=0
+ISS_FLAG=1
+LW_FLAG=0
+---------------------------------------------------
+# Specify VERBOSE as 1, if you want to Display Header Information [Optional]
+VERBOSE=1
diff --git a/drivers/nxp/auth/csf_hdr_parser/input_bl2_ch3_2 b/drivers/nxp/auth/csf_hdr_parser/input_bl2_ch3_2
new file mode 100644
index 0000000..cc7c07c
--- /dev/null
+++ b/drivers/nxp/auth/csf_hdr_parser/input_bl2_ch3_2
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+---------------------------------------------------
+# Specify the platform. [Mandatory]
+# Choose Platform -
+# TRUST 3.2: LX2160
+PLATFORM=LX2160
+---------------------------------------------------
+# Entry Point/Image start address field in the header.[Mandatory]
+# (default=ADDRESS of first file specified in images)
+# Address can be 64 bit
+ENTRY_POINT=1800D000
+---------------------------------------------------
+# Specify the Key Information.
+# PUB_KEY [Mandatory] Comma Separated List
+# Usage: <srk1.pub> <srk2.pub> .....
+PUB_KEY=srk.pub
+# KEY_SELECT [Mandatory]
+# USAGE (for TRUST 3.x): (between 1 to 8)
+KEY_SELECT=1
+# PRI_KEY [Mandatory] Single Key Used for Signing
+# USAGE: <srk.pri>
+PRI_KEY=srk.pri
+---------------------------------------------------
+# Specify IMAGE, Max 8 images are possible.
+# DST_ADDR is required only for Non-PBL Platform. [Mandatory]
+# USAGE : IMAGE_NO = {IMAGE_NAME, SRC_ADDR, DST_ADDR}
+# Address can be 64 bit
+IMAGE_1={bl2.bin,1800D000,ffffffff}
+IMAGE_2={,,}
+IMAGE_3={,,}
+IMAGE_4={,,}
+IMAGE_5={,,}
+IMAGE_6={,,}
+IMAGE_7={,,}
+IMAGE_8={,,}
+---------------------------------------------------
+# Specify OEM AND FSL ID to be populated in header. [Optional]
+# e.g FSL_UID_0=11111111
+FSL_UID_0=
+FSL_UID_1=
+OEM_UID_0=
+OEM_UID_1=
+OEM_UID_2=
+OEM_UID_3=
+OEM_UID_4=
+---------------------------------------------------
+# Specify the output file names [Optional].
+# Default Values chosen in Tool
+OUTPUT_HDR_FILENAME=hdr_bl2.out
+IMAGE_HASH_FILENAME=
+RSA_SIGN_FILENAME=
+---------------------------------------------------
+# Specify The Flags. (0 or 1) - [Optional]
+MP_FLAG=0
+ISS_FLAG=1
+LW_FLAG=0
+---------------------------------------------------
+# Specify VERBOSE as 1, if you want to Display Header Information [Optional]
+VERBOSE=1
diff --git a/drivers/nxp/auth/csf_hdr_parser/input_blx_ch2 b/drivers/nxp/auth/csf_hdr_parser/input_blx_ch2
new file mode 100644
index 0000000..93b020b
--- /dev/null
+++ b/drivers/nxp/auth/csf_hdr_parser/input_blx_ch2
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2017-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+---------------------------------------------------
+# Specify the platform. [Mandatory]
+# Choose Platform - 1010/1040/2041/3041/4080/5020/5040/9131/9132/9164/4240/C290/LS1
+PLATFORM=LS1043
+# ESBC Flag. Specify ESBC=0 to sign u-boot and ESBC=1 to sign ESBC images.(default is 0)
+ESBC=1
+---------------------------------------------------
+# Specify the file name of the keys separated by comma.
+
+# PRI_KEY (Default private key :srk.pri) - [Optional]
+PRI_KEY=srk.pri
+# PUB_KEY (Default public key :srk.pub) - [Optional]
+PUB_KEY=srk.pub
+# Please provide KEY_SELECT(between 1 to 4) (Required for 1040/C290/9164/4240 only) - [Optional]
+KEY_SELECT=1
+---------------------------------------------------
+# Specify OEM AND FSL ID to be populated in header. [Optional]
+# e.g FSL_UID=11111111
+FSL_UID_0=
+FSL_UID_1=
+OEM_UID_0=
+OEM_UID_1=
+---------------------------------------------------
diff --git a/drivers/nxp/auth/csf_hdr_parser/input_blx_ch3 b/drivers/nxp/auth/csf_hdr_parser/input_blx_ch3
new file mode 100644
index 0000000..18e8e3b
--- /dev/null
+++ b/drivers/nxp/auth/csf_hdr_parser/input_blx_ch3
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+ESBC=1
+---------------------------------------------------
+# Specify the platform. [Mandatory]
+# Choose Platform -
+# TRUST 3.0: LS2085
+# TRUST 3.1: LS2088, LS1088
+PLATFORM=LS2088
+---------------------------------------------------
+# Specify the Key Information.
+# PUB_KEY [Mandatory] Comma Separated List
+# Usage: <srk1.pub> <srk2.pub> .....
+PUB_KEY=srk.pub
+# KEY_SELECT [Mandatory]
+# USAGE (for TRUST 3.x): (between 1 to 8)
+KEY_SELECT=1
+# PRI_KEY [Mandatory] Single Key Used for Signing
+# USAGE: <srk.pri>
+PRI_KEY=srk.pri
+
+---------------------------------------------------
+# Specify OEM AND FSL ID to be populated in header. [Optional]
+# e.g FSL_UID_0=11111111
+FSL_UID_0=
+FSL_UID_1=
+OEM_UID_0=
+OEM_UID_1=
+OEM_UID_2=
+OEM_UID_3=
+OEM_UID_4=
+---------------------------------------------------
diff --git a/drivers/nxp/auth/csf_hdr_parser/input_pbi_ch3 b/drivers/nxp/auth/csf_hdr_parser/input_pbi_ch3
new file mode 100644
index 0000000..9111a2a
--- /dev/null
+++ b/drivers/nxp/auth/csf_hdr_parser/input_pbi_ch3
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+---------------------------------------------------
+# Specify the platform. [Mandatory]
+# Choose Platform -
+# TRUST 3.0: LS2085
+# TRUST 3.1: LS2088, LS1088
+PLATFORM=LS2088
+---------------------------------------------------
+# Specify the Key Information.
+# PUB_KEY [Mandatory] Comma Separated List
+# Usage: <srk1.pub> <srk2.pub> .....
+PUB_KEY=srk.pub
+# KEY_SELECT [Mandatory]
+# USAGE (for TRUST 3.x): (between 1 to 8)
+KEY_SELECT=1
+# PRI_KEY [Mandatory] Single Key Used for Signing
+# USAGE: <srk.pri>
+PRI_KEY=srk.pri
+---------------------------------------------------
+# Specify OEM AND FSL ID to be populated in header. [Optional]
+# e.g FSL_UID_0=11111111
+FSL_UID_0=
+FSL_UID_1=
+OEM_UID_0=
+OEM_UID_1=
+OEM_UID_2=
+OEM_UID_3=
+OEM_UID_4=
+---------------------------------------------------
+# Specify The Flags. (0 or 1) - [Optional]
+MP_FLAG=0
+ISS_FLAG=1
+LW_FLAG=0
+---------------------------------------------------
+# Specify VERBOSE as 1, if you want to Display Header Information [Optional]
+VERBOSE=1
+---------------------------------------------------
diff --git a/drivers/nxp/auth/csf_hdr_parser/input_pbi_ch3_2 b/drivers/nxp/auth/csf_hdr_parser/input_pbi_ch3_2
new file mode 100644
index 0000000..c2d7ce4
--- /dev/null
+++ b/drivers/nxp/auth/csf_hdr_parser/input_pbi_ch3_2
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2017-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+---------------------------------------------------
+# Specify the platform. [Mandatory]
+# Choose Platform -
+# TRUST 3.0: LS2085
+# TRUST 3.1: LS2088, LS1088
+PLATFORM=LX2160
+---------------------------------------------------
+# Specify the Key Information.
+# PUB_KEY [Mandatory] Comma Separated List
+# Usage: <srk1.pub> <srk2.pub> .....
+PUB_KEY=srk.pub
+# KEY_SELECT [Mandatory]
+# USAGE (for TRUST 3.x): (between 1 to 8)
+KEY_SELECT=1
+# PRI_KEY [Mandatory] Single Key Used for Signing
+# USAGE: <srk.pri>
+PRI_KEY=srk.pri
+---------------------------------------------------
+# Specify OEM AND FSL ID to be populated in header. [Optional]
+# e.g FSL_UID_0=11111111
+FSL_UID_0=
+FSL_UID_1=
+OEM_UID_0=
+OEM_UID_1=
+OEM_UID_2=
+OEM_UID_3=
+OEM_UID_4=
+---------------------------------------------------
+# Specify The Flags. (0 or 1) - [Optional]
+MP_FLAG=0
+ISS_FLAG=1
+LW_FLAG=0
+---------------------------------------------------
+# Specify VERBOSE as 1, if you want to Display Header Information [Optional]
+VERBOSE=1
+---------------------------------------------------
diff --git a/drivers/nxp/auth/csf_hdr_parser/plat_img_parser.c b/drivers/nxp/auth/csf_hdr_parser/plat_img_parser.c
new file mode 100644
index 0000000..43b78e5
--- /dev/null
+++ b/drivers/nxp/auth/csf_hdr_parser/plat_img_parser.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2014-2016, Freescale Semiconductor, Inc.
+ * Copyright 2017-2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <csf_hdr.h>
+#include <drivers/auth/crypto_mod.h>
+#include <drivers/auth/img_parser_mod.h>
+#include <lib/utils.h>
+#include <sfp.h>
+
+/* Temporary variables to speed up the authentication parameters search. These
+ * variables are assigned once during the integrity check and used any time an
+ * authentication parameter is requested, so we do not have to parse the image
+ * again.
+ */
+
+/* Hash of Image + CSF Header + SRK table */
+uint8_t img_hash[SHA256_BYTES] __aligned(CACHE_WRITEBACK_GRANULE);
+uint32_t hash_len;
+
+/* Key being used for authentication
+ * Points to the key in CSF header copied in DDR
+ * ESBC client key
+ */
+void *img_key;
+uint32_t key_len;
+
+/* ESBC client signature */
+void *img_sign;
+uint32_t sign_len;
+enum sig_alg alg;
+
+/* Maximum OID string length ("a.b.c.d.e.f ...") */
+#define MAX_OID_STR_LEN			64
+
+#define LIB_NAME	"NXP CSFv2"
+
+/*
+ * Clear all static temporary variables.
+ */
+static void clear_temp_vars(void)
+{
+#define ZERO_AND_CLEAN(x)					\
+	do {							\
+		zeromem(&x, sizeof(x));				\
+		clean_dcache_range((uintptr_t)&x, sizeof(x));	\
+	} while (0)
+
+	ZERO_AND_CLEAN(img_key);
+	ZERO_AND_CLEAN(img_sign);
+	ZERO_AND_CLEAN(img_hash);
+	ZERO_AND_CLEAN(key_len);
+	ZERO_AND_CLEAN(hash_len);
+	ZERO_AND_CLEAN(sign_len);
+
+#undef ZERO_AND_CLEAN
+}
+
+/* Exported functions */
+
+static void init(void)
+{
+	clear_temp_vars();
+}
+
+/*
+ * This function would check the integrity of the CSF header
+ */
+static int check_integrity(void *img, unsigned int img_len)
+{
+	int ret;
+
+	/*
+	 * The image file has been successfully loaded till here.
+	 *
+	 * Flush the image to main memory so that it can be authenticated
+	 * by CAAM, a HW accelerator regardless of cache and MMU state.
+	 */
+	flush_dcache_range((uintptr_t) img, img_len);
+
+	/*
+	 * Image is appended at an offset of 16K (IMG_OFFSET) to the header.
+	 * So the size in header should be equal to img_len - IMG_OFFSET
+	 */
+	VERBOSE("Barker code is %x\n", *(unsigned int *)img);
+	ret = validate_esbc_header(img, &img_key, &key_len, &img_sign,
+				   &sign_len, &alg);
+	if (ret < 0) {
+		ERROR("Header authentication failed\n");
+		clear_temp_vars();
+		return IMG_PARSER_ERR;
+	}
+	/* Calculate the hash of various components from the image */
+	ret = calc_img_hash(img, (uint8_t *)img + CSF_HDR_SZ,
+			    img_len - CSF_HDR_SZ, img_hash, &hash_len);
+	if (ret != 0) {
+		ERROR("Issue in hash calculation %d\n", ret);
+		clear_temp_vars();
+		return IMG_PARSER_ERR;
+	}
+
+	return IMG_PARSER_OK;
+}
+
+/*
+ * Extract an authentication parameter from CSF header
+ *
+ * CSF header has already been parsed and the required information like
+ * hash of data, signature, length stored in global variables has been
+ * extracted in chek_integrity function.  This data
+ * is returned back to the caller.
+ */
+static int get_auth_param(const auth_param_type_desc_t *type_desc,
+		void *img, unsigned int img_len,
+		void **param, unsigned int *param_len)
+{
+	int rc = IMG_PARSER_OK;
+
+	/* We do not use img because the check_integrity function has already
+	 * extracted the relevant data ( pk, sig_alg, etc)
+	 */
+
+	switch (type_desc->type) {
+
+	/* Hash will be returned for comparison with signature */
+	case AUTH_PARAM_HASH:
+		*param = (void *)img_hash;
+		*param_len = (unsigned int)SHA256_BYTES;
+		break;
+
+	/* Return the public key used for signature extracted from the SRK table
+	 * after checks with key revocation
+	 */
+	case AUTH_PARAM_PUB_KEY:
+		/* Get the subject public key */
+		/* For a 1K key - the length would be 2k/8 = 0x100 bytes
+		 * 2K RSA key - 0x200 , 4K RSA - 0x400
+		 */
+		*param = img_key;
+		*param_len = (unsigned int)key_len;
+		break;
+
+	/* Call a function to tell if signature is RSA or ECDSA. ECDSA to be
+	 * supported in later platforms like LX2 etc
+	 */
+	case AUTH_PARAM_SIG_ALG:
+		/* Algo will be signature - RSA or ECDSA  on hash */
+		*param = (void *)&alg;
+		*param_len = 4U;
+		break;
+
+	/* Return the signature */
+	case AUTH_PARAM_SIG:
+		*param = img_sign;
+		*param_len = (unsigned int)sign_len;
+		break;
+
+	case AUTH_PARAM_NV_CTR:
+
+	default:
+		rc = IMG_PARSER_ERR_NOT_FOUND;
+		break;
+	}
+
+	return rc;
+}
+
+REGISTER_IMG_PARSER_LIB(IMG_PLAT, LIB_NAME, init,
+			check_integrity, get_auth_param);
diff --git a/drivers/nxp/auth/tbbr/tbbr_cot.c b/drivers/nxp/auth/tbbr/tbbr_cot.c
new file mode 100644
index 0000000..bb21fa0
--- /dev/null
+++ b/drivers/nxp/auth/tbbr/tbbr_cot.c
@@ -0,0 +1,820 @@
+/*
+ * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * Copyright 2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+
+#include <drivers/auth/auth_mod.h>
+
+#if USE_TBBR_DEFS
+#include <tools_share/tbbr_oid.h>
+#else
+#include <platform_oid.h>
+#endif
+
+
+#if TF_MBEDTLS_HASH_ALG_ID == TF_MBEDTLS_SHA256
+#define HASH_DER_LEN			51
+#elif TF_MBEDTLS_HASH_ALG_ID == TF_MBEDTLS_SHA384
+#define HASH_DER_LEN			67
+#elif TF_MBEDTLS_HASH_ALG_ID == TF_MBEDTLS_SHA512
+#define HASH_DER_LEN			83
+#else
+#error "Invalid value for TF_MBEDTLS_HASH_ALG_ID"
+#endif
+
+/*
+ * The platform must allocate buffers to store the authentication parameters
+ * extracted from the certificates. In this case, because of the way the CoT is
+ * established, we can reuse some of the buffers on different stages
+ */
+
+static unsigned char nt_world_bl_hash_buf[HASH_DER_LEN];
+
+static unsigned char soc_fw_hash_buf[HASH_DER_LEN];
+static unsigned char tos_fw_hash_buf[HASH_DER_LEN];
+static unsigned char tos_fw_extra1_hash_buf[HASH_DER_LEN];
+static unsigned char tos_fw_extra2_hash_buf[HASH_DER_LEN];
+static unsigned char trusted_world_pk_buf[PK_DER_LEN];
+static unsigned char non_trusted_world_pk_buf[PK_DER_LEN];
+static unsigned char content_pk_buf[PK_DER_LEN];
+static unsigned char soc_fw_config_hash_buf[HASH_DER_LEN];
+static unsigned char tos_fw_config_hash_buf[HASH_DER_LEN];
+static unsigned char nt_fw_config_hash_buf[HASH_DER_LEN];
+
+#ifdef CONFIG_DDR_FIP_IMAGE
+static unsigned char ddr_fw_content_pk_buf[PK_DER_LEN];
+static unsigned char ddr_imem_udimm_1d_hash_buf[HASH_DER_LEN];
+static unsigned char ddr_imem_udimm_2d_hash_buf[HASH_DER_LEN];
+static unsigned char ddr_dmem_udimm_1d_hash_buf[HASH_DER_LEN];
+static unsigned char ddr_dmem_udimm_2d_hash_buf[HASH_DER_LEN];
+
+static unsigned char ddr_imem_rdimm_1d_hash_buf[HASH_DER_LEN];
+static unsigned char ddr_imem_rdimm_2d_hash_buf[HASH_DER_LEN];
+static unsigned char ddr_dmem_rdimm_1d_hash_buf[HASH_DER_LEN];
+static unsigned char ddr_dmem_rdimm_2d_hash_buf[HASH_DER_LEN];
+#endif
+
+/*
+ * Parameter type descriptors
+ */
+static auth_param_type_desc_t trusted_nv_ctr = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_NV_CTR, TRUSTED_FW_NVCOUNTER_OID);
+
+static auth_param_type_desc_t subject_pk = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_PUB_KEY, 0);
+static auth_param_type_desc_t sig = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_SIG, 0);
+static auth_param_type_desc_t sig_alg = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_SIG_ALG, 0);
+static auth_param_type_desc_t raw_data = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_RAW_DATA, 0);
+
+
+static auth_param_type_desc_t non_trusted_nv_ctr = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_NV_CTR, NON_TRUSTED_FW_NVCOUNTER_OID);
+static auth_param_type_desc_t trusted_world_pk = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_PUB_KEY, TRUSTED_WORLD_PK_OID);
+static auth_param_type_desc_t non_trusted_world_pk = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_PUB_KEY, NON_TRUSTED_WORLD_PK_OID);
+static auth_param_type_desc_t soc_fw_content_pk = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_PUB_KEY, SOC_FW_CONTENT_CERT_PK_OID);
+static auth_param_type_desc_t tos_fw_content_pk = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_PUB_KEY, TRUSTED_OS_FW_CONTENT_CERT_PK_OID);
+static auth_param_type_desc_t nt_fw_content_pk = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_PUB_KEY, NON_TRUSTED_FW_CONTENT_CERT_PK_OID);
+static auth_param_type_desc_t soc_fw_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, SOC_AP_FW_HASH_OID);
+static auth_param_type_desc_t soc_fw_config_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, SOC_FW_CONFIG_HASH_OID);
+static auth_param_type_desc_t tos_fw_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, TRUSTED_OS_FW_HASH_OID);
+static auth_param_type_desc_t tos_fw_config_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, TRUSTED_OS_FW_CONFIG_HASH_OID);
+static auth_param_type_desc_t tos_fw_extra1_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, TRUSTED_OS_FW_EXTRA1_HASH_OID);
+static auth_param_type_desc_t tos_fw_extra2_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, TRUSTED_OS_FW_EXTRA2_HASH_OID);
+static auth_param_type_desc_t nt_world_bl_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, NON_TRUSTED_WORLD_BOOTLOADER_HASH_OID);
+static auth_param_type_desc_t nt_fw_config_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, NON_TRUSTED_FW_CONFIG_HASH_OID);
+
+#ifdef CONFIG_DDR_FIP_IMAGE
+static auth_param_type_desc_t ddr_fw_content_pk = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_PUB_KEY, DDR_FW_CONTENT_CERT_PK_OID);
+
+static auth_param_type_desc_t ddr_imem_udimm_1d_fw_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, DDR_IMEM_UDIMM_1D_HASH_OID);
+static auth_param_type_desc_t ddr_imem_udimm_2d_fw_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, DDR_IMEM_UDIMM_2D_HASH_OID);
+static auth_param_type_desc_t ddr_dmem_udimm_1d_fw_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, DDR_DMEM_UDIMM_1D_HASH_OID);
+static auth_param_type_desc_t ddr_dmem_udimm_2d_fw_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, DDR_DMEM_UDIMM_2D_HASH_OID);
+
+static auth_param_type_desc_t ddr_imem_rdimm_1d_fw_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, DDR_IMEM_RDIMM_1D_HASH_OID);
+static auth_param_type_desc_t ddr_imem_rdimm_2d_fw_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, DDR_IMEM_RDIMM_2D_HASH_OID);
+static auth_param_type_desc_t ddr_dmem_rdimm_1d_fw_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, DDR_DMEM_RDIMM_1D_HASH_OID);
+static auth_param_type_desc_t ddr_dmem_rdimm_2d_fw_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, DDR_DMEM_RDIMM_2D_HASH_OID);
+#endif
+
+
+/*
+ * Trusted key certificate
+ */
+static const auth_img_desc_t trusted_key_cert = {
+	.img_id = TRUSTED_KEY_CERT_ID,
+	.img_type = IMG_CERT,
+	.parent = NULL,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &subject_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &raw_data
+			}
+		},
+		[1] = {
+			.type = AUTH_METHOD_NV_CTR,
+			.param.nv_ctr = {
+				.cert_nv_ctr = &trusted_nv_ctr,
+				.plat_nv_ctr = &trusted_nv_ctr
+			}
+		}
+	},
+	.authenticated_data = (const auth_param_desc_t[COT_MAX_VERIFIED_PARAMS]) {
+		[0] = {
+			.type_desc = &trusted_world_pk,
+			.data = {
+				.ptr = (void *)trusted_world_pk_buf,
+				.len = (unsigned int)PK_DER_LEN
+			}
+		},
+		[1] = {
+			.type_desc = &non_trusted_world_pk,
+			.data = {
+				.ptr = (void *)non_trusted_world_pk_buf,
+				.len = (unsigned int)PK_DER_LEN
+			}
+		}
+	}
+};
+
+/*
+ * SoC Firmware
+ */
+static const auth_img_desc_t soc_fw_key_cert = {
+	.img_id = SOC_FW_KEY_CERT_ID,
+	.img_type = IMG_CERT,
+	.parent = &trusted_key_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &trusted_world_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &raw_data
+			}
+		},
+		[1] = {
+			.type = AUTH_METHOD_NV_CTR,
+			.param.nv_ctr = {
+				.cert_nv_ctr = &trusted_nv_ctr,
+				.plat_nv_ctr = &trusted_nv_ctr
+			}
+		}
+	},
+	.authenticated_data = (const auth_param_desc_t[COT_MAX_VERIFIED_PARAMS]) {
+		[0] = {
+			.type_desc = &soc_fw_content_pk,
+			.data = {
+				.ptr = (void *)content_pk_buf,
+				.len = (unsigned int)PK_DER_LEN
+			}
+		}
+	}
+};
+static const auth_img_desc_t soc_fw_content_cert = {
+	.img_id = SOC_FW_CONTENT_CERT_ID,
+	.img_type = IMG_CERT,
+	.parent = &soc_fw_key_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &soc_fw_content_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &raw_data
+			}
+		},
+		[1] = {
+			.type = AUTH_METHOD_NV_CTR,
+			.param.nv_ctr = {
+				.cert_nv_ctr = &trusted_nv_ctr,
+				.plat_nv_ctr = &trusted_nv_ctr
+			}
+		}
+	},
+	.authenticated_data = (const auth_param_desc_t[COT_MAX_VERIFIED_PARAMS]) {
+		[0] = {
+			.type_desc = &soc_fw_hash,
+			.data = {
+				.ptr = (void *)soc_fw_hash_buf,
+				.len = (unsigned int)HASH_DER_LEN
+			}
+		},
+		[1] = {
+			.type_desc = &soc_fw_config_hash,
+			.data = {
+				.ptr = (void *)soc_fw_config_hash_buf,
+				.len = (unsigned int)HASH_DER_LEN
+			}
+		}
+	}
+};
+static const auth_img_desc_t bl31_image = {
+	.img_id = BL31_IMAGE_ID,
+	.img_type = IMG_RAW,
+	.parent = &soc_fw_content_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_HASH,
+			.param.hash = {
+				.data = &raw_data,
+				.hash = &soc_fw_hash
+			}
+		}
+	}
+};
+/* SOC FW Config */
+static const auth_img_desc_t soc_fw_config = {
+	.img_id = SOC_FW_CONFIG_ID,
+	.img_type = IMG_RAW,
+	.parent = &soc_fw_content_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_HASH,
+			.param.hash = {
+				.data = &raw_data,
+				.hash = &soc_fw_config_hash
+			}
+		}
+	}
+};
+/*
+ * Trusted OS Firmware
+ */
+static const auth_img_desc_t trusted_os_fw_key_cert = {
+	.img_id = TRUSTED_OS_FW_KEY_CERT_ID,
+	.img_type = IMG_CERT,
+	.parent = &trusted_key_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &trusted_world_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &raw_data
+			}
+		},
+		[1] = {
+			.type = AUTH_METHOD_NV_CTR,
+			.param.nv_ctr = {
+				.cert_nv_ctr = &trusted_nv_ctr,
+				.plat_nv_ctr = &trusted_nv_ctr
+			}
+		}
+	},
+	.authenticated_data = (const auth_param_desc_t[COT_MAX_VERIFIED_PARAMS]) {
+		[0] = {
+			.type_desc = &tos_fw_content_pk,
+			.data = {
+				.ptr = (void *)content_pk_buf,
+				.len = (unsigned int)PK_DER_LEN
+			}
+		}
+	}
+};
+static const auth_img_desc_t trusted_os_fw_content_cert = {
+	.img_id = TRUSTED_OS_FW_CONTENT_CERT_ID,
+	.img_type = IMG_CERT,
+	.parent = &trusted_os_fw_key_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &tos_fw_content_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &raw_data
+			}
+		},
+		[1] = {
+			.type = AUTH_METHOD_NV_CTR,
+			.param.nv_ctr = {
+				.cert_nv_ctr = &trusted_nv_ctr,
+				.plat_nv_ctr = &trusted_nv_ctr
+			}
+		}
+	},
+	.authenticated_data = (const auth_param_desc_t[COT_MAX_VERIFIED_PARAMS]) {
+		[0] = {
+			.type_desc = &tos_fw_hash,
+			.data = {
+				.ptr = (void *)tos_fw_hash_buf,
+				.len = (unsigned int)HASH_DER_LEN
+			}
+		},
+		[1] = {
+			.type_desc = &tos_fw_extra1_hash,
+			.data = {
+				.ptr = (void *)tos_fw_extra1_hash_buf,
+				.len = (unsigned int)HASH_DER_LEN
+			}
+		},
+		[2] = {
+			.type_desc = &tos_fw_extra2_hash,
+			.data = {
+				.ptr = (void *)tos_fw_extra2_hash_buf,
+				.len = (unsigned int)HASH_DER_LEN
+			}
+		},
+		[3] = {
+			.type_desc = &tos_fw_config_hash,
+			.data = {
+				.ptr = (void *)tos_fw_config_hash_buf,
+				.len = (unsigned int)HASH_DER_LEN
+			}
+		}
+	}
+};
+static const auth_img_desc_t bl32_image = {
+	.img_id = BL32_IMAGE_ID,
+	.img_type = IMG_RAW,
+	.parent = &trusted_os_fw_content_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_HASH,
+			.param.hash = {
+				.data = &raw_data,
+				.hash = &tos_fw_hash
+			}
+		}
+	}
+};
+static const auth_img_desc_t bl32_extra1_image = {
+	.img_id = BL32_EXTRA1_IMAGE_ID,
+	.img_type = IMG_RAW,
+	.parent = &trusted_os_fw_content_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_HASH,
+			.param.hash = {
+				.data = &raw_data,
+				.hash = &tos_fw_extra1_hash
+			}
+		}
+	}
+};
+static const auth_img_desc_t bl32_extra2_image = {
+	.img_id = BL32_EXTRA2_IMAGE_ID,
+	.img_type = IMG_RAW,
+	.parent = &trusted_os_fw_content_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_HASH,
+			.param.hash = {
+				.data = &raw_data,
+				.hash = &tos_fw_extra2_hash
+			}
+		}
+	}
+};
+/* TOS FW Config */
+static const auth_img_desc_t tos_fw_config = {
+	.img_id = TOS_FW_CONFIG_ID,
+	.img_type = IMG_RAW,
+	.parent = &trusted_os_fw_content_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_HASH,
+			.param.hash = {
+				.data = &raw_data,
+				.hash = &tos_fw_config_hash
+			}
+		}
+	}
+};
+/*
+ * Non-Trusted Firmware
+ */
+static const auth_img_desc_t non_trusted_fw_key_cert = {
+	.img_id = NON_TRUSTED_FW_KEY_CERT_ID,
+	.img_type = IMG_CERT,
+	.parent = &trusted_key_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &non_trusted_world_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &raw_data
+			}
+		},
+		[1] = {
+			.type = AUTH_METHOD_NV_CTR,
+			.param.nv_ctr = {
+				.cert_nv_ctr = &non_trusted_nv_ctr,
+				.plat_nv_ctr = &non_trusted_nv_ctr
+			}
+		}
+	},
+	.authenticated_data = (const auth_param_desc_t[COT_MAX_VERIFIED_PARAMS]) {
+		[0] = {
+			.type_desc = &nt_fw_content_pk,
+			.data = {
+				.ptr = (void *)content_pk_buf,
+				.len = (unsigned int)PK_DER_LEN
+			}
+		}
+	}
+};
+static const auth_img_desc_t non_trusted_fw_content_cert = {
+	.img_id = NON_TRUSTED_FW_CONTENT_CERT_ID,
+	.img_type = IMG_CERT,
+	.parent = &non_trusted_fw_key_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &nt_fw_content_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &raw_data
+			}
+		},
+		[1] = {
+			.type = AUTH_METHOD_NV_CTR,
+			.param.nv_ctr = {
+				.cert_nv_ctr = &non_trusted_nv_ctr,
+				.plat_nv_ctr = &non_trusted_nv_ctr
+			}
+		}
+	},
+	.authenticated_data = (const auth_param_desc_t[COT_MAX_VERIFIED_PARAMS]) {
+		[0] = {
+			.type_desc = &nt_world_bl_hash,
+			.data = {
+				.ptr = (void *)nt_world_bl_hash_buf,
+				.len = (unsigned int)HASH_DER_LEN
+			}
+		},
+		[1] = {
+			.type_desc = &nt_fw_config_hash,
+			.data = {
+				.ptr = (void *)nt_fw_config_hash_buf,
+				.len = (unsigned int)HASH_DER_LEN
+			}
+		}
+	}
+};
+static const auth_img_desc_t bl33_image = {
+	.img_id = BL33_IMAGE_ID,
+	.img_type = IMG_RAW,
+	.parent = &non_trusted_fw_content_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_HASH,
+			.param.hash = {
+				.data = &raw_data,
+				.hash = &nt_world_bl_hash
+			}
+		}
+	}
+};
+/* NT FW Config */
+static const auth_img_desc_t nt_fw_config = {
+	.img_id = NT_FW_CONFIG_ID,
+	.img_type = IMG_RAW,
+	.parent = &non_trusted_fw_content_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_HASH,
+			.param.hash = {
+				.data = &raw_data,
+				.hash = &nt_fw_config_hash
+			}
+		}
+	}
+};
+#ifdef CONFIG_DDR_FIP_IMAGE
+/*
+ * DDR Firmware
+ */
+static const auth_img_desc_t ddr_fw_key_cert = {
+	.img_id = DDR_FW_KEY_CERT_ID,
+	.img_type = IMG_CERT,
+	.parent = &trusted_key_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &trusted_world_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &raw_data
+			}
+		},
+		[1] = {
+			.type = AUTH_METHOD_NV_CTR,
+			.param.nv_ctr = {
+				.cert_nv_ctr = &trusted_nv_ctr,
+				.plat_nv_ctr = &trusted_nv_ctr
+			}
+		}
+	},
+	.authenticated_data = (const auth_param_desc_t[COT_MAX_VERIFIED_PARAMS]) {
+		[0] = {
+			.type_desc = &ddr_fw_content_pk,
+			.data = {
+				.ptr = (void *)ddr_fw_content_pk_buf,
+				.len = (unsigned int)PK_DER_LEN
+			}
+		}
+	}
+};
+static const auth_img_desc_t ddr_udimm_fw_content_cert = {
+	.img_id = DDR_UDIMM_FW_CONTENT_CERT_ID,
+	.img_type = IMG_CERT,
+	.parent = &ddr_fw_key_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &ddr_fw_content_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &raw_data
+			}
+		},
+		[1] = {
+			.type = AUTH_METHOD_NV_CTR,
+			.param.nv_ctr = {
+				.cert_nv_ctr = &trusted_nv_ctr,
+				.plat_nv_ctr = &trusted_nv_ctr
+			}
+		}
+	},
+	.authenticated_data = (const auth_param_desc_t[COT_MAX_VERIFIED_PARAMS]) {
+		[0] = {
+			.type_desc = &ddr_imem_udimm_1d_fw_hash,
+			.data = {
+				.ptr = (void *)ddr_imem_udimm_1d_hash_buf,
+				.len = (unsigned int)HASH_DER_LEN
+			}
+		},
+		[1] = {
+			.type_desc = &ddr_imem_udimm_2d_fw_hash,
+			.data = {
+				.ptr = (void *)ddr_imem_udimm_2d_hash_buf,
+				.len = (unsigned int)HASH_DER_LEN
+			}
+		},
+		[2] = {
+			.type_desc = &ddr_dmem_udimm_1d_fw_hash,
+			.data = {
+				.ptr = (void *)ddr_dmem_udimm_1d_hash_buf,
+				.len = (unsigned int)HASH_DER_LEN
+			}
+		},
+		[3] = {
+			.type_desc = &ddr_dmem_udimm_2d_fw_hash,
+			.data = {
+				.ptr = (void *)ddr_dmem_udimm_2d_hash_buf,
+				.len = (unsigned int)HASH_DER_LEN
+			}
+		},
+	}
+};
+
+static const auth_img_desc_t ddr_imem_udimm_1d_img = {
+	.img_id = DDR_IMEM_UDIMM_1D_IMAGE_ID,
+	.img_type = IMG_RAW,
+	.parent = &ddr_udimm_fw_content_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_HASH,
+			.param.hash = {
+				.data = &raw_data,
+				.hash = &ddr_imem_udimm_1d_fw_hash
+			}
+		}
+	}
+};
+static const auth_img_desc_t ddr_imem_udimm_2d_img = {
+	.img_id = DDR_IMEM_UDIMM_2D_IMAGE_ID,
+	.img_type = IMG_RAW,
+	.parent = &ddr_udimm_fw_content_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_HASH,
+			.param.hash = {
+				.data = &raw_data,
+				.hash = &ddr_imem_udimm_2d_fw_hash
+			}
+		}
+	}
+};
+static const auth_img_desc_t ddr_dmem_udimm_1d_img = {
+	.img_id = DDR_DMEM_UDIMM_1D_IMAGE_ID,
+	.img_type = IMG_RAW,
+	.parent = &ddr_udimm_fw_content_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_HASH,
+			.param.hash = {
+				.data = &raw_data,
+				.hash = &ddr_dmem_udimm_1d_fw_hash
+			}
+		}
+	}
+};
+static const auth_img_desc_t ddr_dmem_udimm_2d_img = {
+	.img_id = DDR_DMEM_UDIMM_2D_IMAGE_ID,
+	.img_type = IMG_RAW,
+	.parent = &ddr_udimm_fw_content_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_HASH,
+			.param.hash = {
+				.data = &raw_data,
+				.hash = &ddr_dmem_udimm_2d_fw_hash
+			}
+		}
+	}
+};
+
+static const auth_img_desc_t ddr_rdimm_fw_content_cert = {
+	.img_id = DDR_RDIMM_FW_CONTENT_CERT_ID,
+	.img_type = IMG_CERT,
+	.parent = &ddr_fw_key_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &ddr_fw_content_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &raw_data
+			}
+		},
+		[1] = {
+			.type = AUTH_METHOD_NV_CTR,
+			.param.nv_ctr = {
+				.cert_nv_ctr = &trusted_nv_ctr,
+				.plat_nv_ctr = &trusted_nv_ctr
+			}
+		}
+	},
+	.authenticated_data = (const auth_param_desc_t[COT_MAX_VERIFIED_PARAMS]) {
+		[0] = {
+			.type_desc = &ddr_imem_rdimm_1d_fw_hash,
+			.data = {
+				.ptr = (void *)ddr_imem_rdimm_1d_hash_buf,
+				.len = (unsigned int)HASH_DER_LEN
+			}
+		},
+		[1] = {
+			.type_desc = &ddr_imem_rdimm_2d_fw_hash,
+			.data = {
+				.ptr = (void *)ddr_imem_rdimm_2d_hash_buf,
+				.len = (unsigned int)HASH_DER_LEN
+			}
+		},
+		[2] = {
+			.type_desc = &ddr_dmem_rdimm_1d_fw_hash,
+			.data = {
+				.ptr = (void *)ddr_dmem_rdimm_1d_hash_buf,
+				.len = (unsigned int)HASH_DER_LEN
+			}
+		},
+		[3] = {
+			.type_desc = &ddr_dmem_rdimm_2d_fw_hash,
+			.data = {
+				.ptr = (void *)ddr_dmem_rdimm_2d_hash_buf,
+				.len = (unsigned int)HASH_DER_LEN
+			}
+		},
+	}
+};
+
+static const auth_img_desc_t ddr_imem_rdimm_1d_img = {
+	.img_id = DDR_IMEM_RDIMM_1D_IMAGE_ID,
+	.img_type = IMG_RAW,
+	.parent = &ddr_rdimm_fw_content_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_HASH,
+			.param.hash = {
+				.data = &raw_data,
+				.hash = &ddr_imem_rdimm_1d_fw_hash
+			}
+		}
+	}
+};
+static const auth_img_desc_t ddr_imem_rdimm_2d_img = {
+	.img_id = DDR_IMEM_RDIMM_2D_IMAGE_ID,
+	.img_type = IMG_RAW,
+	.parent = &ddr_rdimm_fw_content_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_HASH,
+			.param.hash = {
+				.data = &raw_data,
+				.hash = &ddr_imem_rdimm_2d_fw_hash
+			}
+		}
+	}
+};
+static const auth_img_desc_t ddr_dmem_rdimm_1d_img = {
+	.img_id = DDR_DMEM_RDIMM_1D_IMAGE_ID,
+	.img_type = IMG_RAW,
+	.parent = &ddr_rdimm_fw_content_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_HASH,
+			.param.hash = {
+				.data = &raw_data,
+				.hash = &ddr_dmem_rdimm_1d_fw_hash
+			}
+		}
+	}
+};
+static const auth_img_desc_t ddr_dmem_rdimm_2d_img = {
+	.img_id = DDR_DMEM_RDIMM_2D_IMAGE_ID,
+	.img_type = IMG_RAW,
+	.parent = &ddr_rdimm_fw_content_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_HASH,
+			.param.hash = {
+				.data = &raw_data,
+				.hash = &ddr_dmem_rdimm_2d_fw_hash
+			}
+		}
+	}
+};
+#endif
+
+/*
+ * TBBR Chain of trust definition
+ */
+
+static const auth_img_desc_t * const cot_desc[] = {
+	[TRUSTED_KEY_CERT_ID]			=	&trusted_key_cert,
+	[SOC_FW_KEY_CERT_ID]			=	&soc_fw_key_cert,
+	[SOC_FW_CONTENT_CERT_ID]		=	&soc_fw_content_cert,
+	[BL31_IMAGE_ID]				=	&bl31_image,
+	[SOC_FW_CONFIG_ID]			=	&soc_fw_config,
+	[TRUSTED_OS_FW_KEY_CERT_ID]		=	&trusted_os_fw_key_cert,
+	[TRUSTED_OS_FW_CONTENT_CERT_ID]		=	&trusted_os_fw_content_cert,
+	[BL32_IMAGE_ID]				=	&bl32_image,
+	[BL32_EXTRA1_IMAGE_ID]			=	&bl32_extra1_image,
+	[BL32_EXTRA2_IMAGE_ID]			=	&bl32_extra2_image,
+	[TOS_FW_CONFIG_ID]			=	&tos_fw_config,
+	[NON_TRUSTED_FW_KEY_CERT_ID]		=	&non_trusted_fw_key_cert,
+	[NON_TRUSTED_FW_CONTENT_CERT_ID]	=	&non_trusted_fw_content_cert,
+	[BL33_IMAGE_ID]				=	&bl33_image,
+	[NT_FW_CONFIG_ID]			=	&nt_fw_config,
+#ifdef CONFIG_DDR_FIP_IMAGE
+	[DDR_FW_KEY_CERT_ID]			=	&ddr_fw_key_cert,
+	[DDR_UDIMM_FW_CONTENT_CERT_ID]		=	&ddr_udimm_fw_content_cert,
+	[DDR_RDIMM_FW_CONTENT_CERT_ID]		=	&ddr_rdimm_fw_content_cert,
+	[DDR_IMEM_UDIMM_1D_IMAGE_ID]		=	&ddr_imem_udimm_1d_img,
+	[DDR_IMEM_UDIMM_2D_IMAGE_ID]		=	&ddr_imem_udimm_2d_img,
+	[DDR_DMEM_UDIMM_1D_IMAGE_ID]		=	&ddr_dmem_udimm_1d_img,
+	[DDR_DMEM_UDIMM_2D_IMAGE_ID]		=	&ddr_dmem_udimm_2d_img,
+	[DDR_IMEM_RDIMM_1D_IMAGE_ID]		=	&ddr_imem_rdimm_1d_img,
+	[DDR_IMEM_RDIMM_2D_IMAGE_ID]		=	&ddr_imem_rdimm_2d_img,
+	[DDR_DMEM_RDIMM_1D_IMAGE_ID]		=	&ddr_dmem_rdimm_1d_img,
+	[DDR_DMEM_RDIMM_2D_IMAGE_ID]		=	&ddr_dmem_rdimm_2d_img,
+#endif
+};
+
+/* Register the CoT in the authentication module */
+REGISTER_COT(cot_desc);
diff --git a/drivers/nxp/console/16550_console.S b/drivers/nxp/console/16550_console.S
new file mode 100644
index 0000000..044d3d0
--- /dev/null
+++ b/drivers/nxp/console/16550_console.S
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <console_macros.S>
+
+/* UART16550 Registers */
+#define UARTTX			0x0
+#define UARTRX			0x0
+#define UARTDLL			0x0
+#define UARTIER			0x1
+#define UARTDLLM		0x1
+#define UARTFCR			0x2
+#define UARTLCR			0x3
+#define UARTLSR			0x5
+#define UARTMCR                 0x4
+
+/* FIFO Control Register bits */
+#define UARTFCR_FIFOMD_16450	(0 << 6)
+#define UARTFCR_FIFOMD_16550	(1 << 6)
+#define UARTFCR_RXTRIG_1	(0 << 6)
+#define UARTFCR_RXTRIG_4	(1 << 6)
+#define UARTFCR_RXTRIG_8	(2 << 6)
+#define UARTFCR_RXTRIG_16	(3 << 6)
+#define UARTFCR_TXTRIG_1	(0 << 4)
+#define UARTFCR_TXTRIG_4	(1 << 4)
+#define UARTFCR_TXTRIG_8	(2 << 4)
+#define UARTFCR_TXTRIG_16	(3 << 4)
+#define UARTFCR_DMAEN		(1 << 3)	/* Enable DMA mode */
+#define UARTFCR_TXCLR		(1 << 2)	/* Clear contents of Tx FIFO */
+#define UARTFCR_RXCLR		(1 << 1)	/* Clear contents of Rx FIFO */
+#define UARTFCR_FIFOEN		(1 << 0)	/* Enable the Tx/Rx FIFO */
+#define UARTFCR_64FIFO          (1 << 5)
+
+/* Line Control Register bits */
+#define UARTLCR_DLAB		(1 << 7)	/* Divisor Latch Access */
+#define UARTLCR_SETB		(1 << 6)	/* Set BREAK Condition */
+#define UARTLCR_SETP		(1 << 5)	/* Set Parity to LCR[4] */
+#define UARTLCR_EVEN		(1 << 4)	/* Even Parity Format */
+#define UARTLCR_PAR		(1 << 3)	/* Parity */
+#define UARTLCR_STOP		(1 << 2)	/* Stop Bit */
+#define UARTLCR_WORDSZ_5	0		/* Word Length of 5 */
+#define UARTLCR_WORDSZ_6	1		/* Word Length of 6 */
+#define UARTLCR_WORDSZ_7	2		/* Word Length of 7 */
+#define UARTLCR_WORDSZ_8	3		/* Word Length of 8 */
+
+/* Line Status Register bits */
+#define UARTLSR_RXFIFOEMT	(1 << 9)	/* Rx Fifo Empty */
+#define UARTLSR_TXFIFOFULL	(1 << 8)	/* Tx Fifo Full */
+#define UARTLSR_RXFIFOERR	(1 << 7)	/* Rx Fifo Error */
+#define UARTLSR_TEMT		(1 << 6)	/* Tx Shift Register Empty */
+#define UARTLSR_THRE		(1 << 5)	/* Tx Holding Register Empty */
+#define UARTLSR_BRK		(1 << 4)	/* Break Condition Detected */
+#define UARTLSR_FERR		(1 << 3)	/* Framing Error */
+#define UARTLSR_PERR		(1 << 3)	/* Parity Error */
+#define UARTLSR_OVRF		(1 << 2)	/* Rx Overrun Error */
+#define UARTLSR_RDR		(1 << 2)	/* Rx Data Ready */
+
+#define CONSOLE_T_16550_BASE	CONSOLE_T_BASE
+
+	/*
+	 * "core" functions are low-level implementations that don't require
+	 * writable memory and are thus safe to call in BL1 crash context.
+	 */
+	.globl nxp_console_16550_core_init
+	.globl nxp_console_16550_core_putc
+	.globl nxp_console_16550_core_getc
+	.globl nxp_console_16550_core_flush
+
+	.globl console_16550_putc
+	.globl console_16550_getc
+	.globl console_16550_flush
+
+	/* -----------------------------------------------
+	 * int nxp_console_16550_core_init(uintptr_t base_addr,
+	 * unsigned int uart_clk, unsigned int baud_rate)
+	 * Function to initialize the console without a
+	 * C Runtime to print debug information. This
+	 * function will be accessed by console_init and
+	 * crash reporting.
+	 * In: x0 - console base address
+	 *     w1 - Uart clock in Hz
+	 *     w2 - Baud rate
+	 * Out: return 1 on success, 0 on error
+	 * Clobber list : x1, x2, x3
+	 * -----------------------------------------------
+	 */
+func nxp_console_16550_core_init
+	/* Check the input base address */
+	cbz	x0, init_fail
+	/* Check baud rate and uart clock for sanity */
+	cbz	w1, init_fail
+	cbz	w2, init_fail
+
+	/* Program the baudrate */
+	/* Divisor =  Uart clock / (16 * baudrate) */
+	lsl	w2, w2, #4
+	udiv	w2, w1, w2
+	and	w1, w2, #0xff		/* w1 = DLL */
+	lsr	w2, w2, #8
+	and	w2, w2, #0xff		/* w2 = DLLM */
+	ldrb	w3, [x0, #UARTLCR]
+	orr	w3, w3, #UARTLCR_DLAB
+	strb	w3, [x0, #UARTLCR]	/* enable DLL, DLLM programming */
+	strb	w1, [x0, #UARTDLL]	/* program DLL */
+	strb	w2, [x0, #UARTDLLM]	/* program DLLM */
+	mov	w2, #~UARTLCR_DLAB
+	and	w3, w3, w2
+	strb	w3, [x0, #UARTLCR]	/* disable DLL, DLLM programming */
+
+	/* 8n1 */
+	mov	w3, #3
+	strb	w3, [x0, #UARTLCR]
+	/* no interrupt */
+	mov	w3, #0
+	strb	w3, [x0, #UARTIER]
+	/* enable fifo, DMA */
+	mov	w3, #(UARTFCR_FIFOEN |UARTFCR_TXCLR | UARTFCR_RXCLR)
+	strb	w3, [x0, #UARTFCR]
+	/* DTR + RTS */
+	mov	w3, #3
+	str	w3, [x0, #UARTMCR]
+	mov	w0, #1
+	ret
+init_fail:
+	mov	w0, #0
+	ret
+endfunc nxp_console_16550_core_init
+
+	.globl nxp_console_16550_register
+
+	/* -----------------------------------------------
+	 * int nxp_console_16550_register(uintptr_t baseaddr,
+	 *     uint32_t clock, uint32_t baud,
+	 *     console_t *console);
+	 * Function to initialize and register a new 16550
+	 * console. Storage passed in for the console struct
+	 * *must* be persistent (i.e. not from the stack).
+	 * If w1 (UART clock) is 0, initialisation will be
+	 * skipped, relying on previous code to have done
+	 * this already. w2 is ignored then as well.
+	 * In: x0 - UART register base address
+	 *     w1 - UART clock in Hz
+	 *     w2 - Baud rate (ignored if w1 is 0)
+	 *     x3 - pointer to empty console_t struct
+	 * Out: return 1 on success, 0 on error
+	 * Clobber list : x0, x1, x2, x6, x7, x14
+	 * -----------------------------------------------
+	 */
+func nxp_console_16550_register
+	mov	x7, x30
+	mov	x6, x3
+	cbz	x6, register_fail
+	str	x0, [x6, #CONSOLE_T_16550_BASE]
+
+	/* A clock rate of zero means to skip the initialisation. */
+	cbz	w1, register_16550
+
+	bl	nxp_console_16550_core_init
+	cbz	x0, register_fail
+
+register_16550:
+	mov	x0, x6
+	mov	x30, x7
+	finish_console_register 16550 putc=1, getc=1, flush=1
+
+register_fail:
+	ret	x7
+endfunc nxp_console_16550_register
+
+	/* --------------------------------------------------------
+	 * int console_16550_core_putc(int c, uintptr_t base_addr)
+	 * Function to output a character over the console. It
+	 * returns the character printed on success or -1 on error.
+	 * In : w0 - character to be printed
+	 *      x1 - console base address
+	 * Out : return -1 on error else return character.
+	 * Clobber list : x2
+	 * --------------------------------------------------------
+	 */
+func nxp_console_16550_core_putc
+#if ENABLE_ASSERTIONS
+	cmp	x1, #0
+	ASM_ASSERT(ne)
+#endif /* ENABLE_ASSERTIONS */
+
+	/* Prepend '\r' to '\n' */
+	cmp	w0, #'\n'
+	b.ne	2f
+	/* Check if the transmit FIFO is full */
+1:	ldrb	w2, [x1, #UARTLSR]
+	and	w2, w2, #UARTLSR_THRE        /* #(UARTLSR_TEMT | UARTLSR_THRE)*/
+	cmp	w2, #(UARTLSR_THRE)
+	b.ne	1b
+	mov	w2, #'\r'
+	strb	w2, [x1, #UARTTX]
+	ldrb	w2, [x1, #UARTFCR]
+	orr	w2, w2, #UARTFCR_TXCLR
+
+	/* Check if the transmit FIFO is full */
+2:	ldrb	w2, [x1, #UARTLSR]
+	and	w2, w2, #(UARTLSR_THRE)
+	cmp	w2, #(UARTLSR_THRE)
+	b.ne	2b
+	strb	w0, [x1, #UARTTX]
+	ret
+endfunc nxp_console_16550_core_putc
+
+	/* --------------------------------------------------------
+	 * int console_16550_putc(int c, console_t *console)
+	 * Function to output a character over the console. It
+	 * returns the character printed on success or -1 on error.
+	 * In : w0 - character to be printed
+	 *      x1 - pointer to console_t structure
+	 * Out : return -1 on error else return character.
+	 * Clobber list : x2
+	 * --------------------------------------------------------
+	 */
+func console_16550_putc
+#if ENABLE_ASSERTIONS
+	cmp	x1, #0
+	ASM_ASSERT(ne)
+#endif /* ENABLE_ASSERTIONS */
+	ldr	x1, [x1, #CONSOLE_T_16550_BASE]
+	b	nxp_console_16550_core_putc
+endfunc console_16550_putc
+
+	/* ---------------------------------------------
+	 * int console_16550_core_getc(uintptr_t base_addr)
+	 * Function to get a character from the console.
+	 * It returns the character grabbed on success
+	 * or -1 on if no character is available.
+	 * In :  x0 - console base address
+	 * Out : w0 - character if available, else -1
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func nxp_console_16550_core_getc
+#if ENABLE_ASSERTIONS
+	cmp	x0, #0
+	ASM_ASSERT(ne)
+#endif /* ENABLE_ASSERTIONS */
+
+	/* Check if the receive FIFO is empty */
+1:	ldrb	w1, [x0, #UARTLSR]
+	tbz	w1, #UARTLSR_RDR, 1b
+	ldrb	w0, [x0, #UARTRX]
+	ret
+no_char:
+	mov	w0, #ERROR_NO_PENDING_CHAR
+	ret
+endfunc nxp_console_16550_core_getc
+
+	/* ---------------------------------------------
+	 * int console_16550_getc(console_t *console)
+	 * Function to get a character from the console.
+	 * It returns the character grabbed on success
+	 * or -1 on if no character is available.
+	 * In :  x0 - pointer to console_t structure
+	 * Out : w0 - character if available, else -1
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func console_16550_getc
+#if ENABLE_ASSERTIONS
+	cmp	x1, #0
+	ASM_ASSERT(ne)
+#endif /* ENABLE_ASSERTIONS */
+	ldr	x0, [x0, #CONSOLE_T_16550_BASE]
+	b	nxp_console_16550_core_getc
+endfunc console_16550_getc
+
+	/* ---------------------------------------------
+	 * int console_16550_core_flush(uintptr_t base_addr)
+	 * Function to force a write of all buffered
+	 * data that hasn't been output.
+	 * In : x0 - console base address
+	 * Out : return -1 on error else return 0.
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func nxp_console_16550_core_flush
+#if ENABLE_ASSERTIONS
+	cmp	x0, #0
+	ASM_ASSERT(ne)
+#endif /* ENABLE_ASSERTIONS */
+
+	/* Loop until the transmit FIFO is empty */
+1:	ldrb	w1, [x0, #UARTLSR]
+	and	w1, w1, #(UARTLSR_THRE)
+	cmp	w1, #(UARTLSR_THRE)
+	b.ne	1b
+
+	mov	w0, #0
+	ret
+endfunc nxp_console_16550_core_flush
+
+	/* ---------------------------------------------
+	 * int console_16550_flush(console_t *console)
+	 * Function to force a write of all buffered
+	 * data that hasn't been output.
+	 * In : x0 - pointer to console_t structure
+	 * Out : return -1 on error else return 0.
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func console_16550_flush
+#if ENABLE_ASSERTIONS
+	cmp	x0, #0
+	ASM_ASSERT(ne)
+#endif /* ENABLE_ASSERTIONS */
+	ldr	x0, [x0, #CONSOLE_T_16550_BASE]
+	b	nxp_console_16550_core_flush
+endfunc console_16550_flush
diff --git a/drivers/nxp/console/console.mk b/drivers/nxp/console/console.mk
new file mode 100644
index 0000000..22d1336
--- /dev/null
+++ b/drivers/nxp/console/console.mk
@@ -0,0 +1,46 @@
+#
+# Copyright 2021 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+#------------------------------------------------------------------------------
+#
+# Select the CORE files
+#
+# -----------------------------------------------------------------------------
+
+ifeq (${ADD_CONSOLE},)
+
+ADD_CONSOLE		:= 1
+
+PLAT_INCLUDES		+=	-I$(PLAT_DRIVERS_PATH)/console
+
+ifeq ($(CONSOLE), NS16550)
+NXP_CONSOLE		:=	NS16550
+
+$(eval $(call add_define_val,NXP_CONSOLE,${NXP_CONSOLE}))
+
+CONSOLE_SOURCES		:=	$(PLAT_DRIVERS_PATH)/console/16550_console.S	\
+				$(PLAT_DRIVERS_PATH)/console/console_16550.c
+else
+ifeq ($(CONSOLE), PL011)
+CONSOLE_SOURCES		:=	drivers/arm/pl011/aarch64/pl011_console.S	\
+				${PLAT_DRIVERS_PATH}/console/console_pl011.c
+else
+	$(error -> CONSOLE not set!)
+endif
+endif
+
+ifeq (${BL_COMM_CONSOLE_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${CONSOLE_SOURCES}
+else
+ifeq (${BL2_CONSOLE_NEEDED},yes)
+BL2_SOURCES		+= ${CONSOLE_SOURCES}
+endif
+ifeq (${BL31_CONSOLE_NEEDED},yes)
+BL31_SOURCES		+= ${CONSOLE_SOURCES}
+endif
+endif
+endif
+# -----------------------------------------------------------------------------
diff --git a/drivers/nxp/console/console_16550.c b/drivers/nxp/console/console_16550.c
new file mode 100644
index 0000000..fa5c5bb
--- /dev/null
+++ b/drivers/nxp/console/console_16550.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+
+#include <common/debug.h>
+#include <dcfg.h>
+#include <lib/utils.h>
+#include <plat_console.h>
+
+/*
+ * Perform Arm specific early platform setup. At this moment we only initialize
+ * the console and the memory layout.
+ */
+void plat_console_init(uintptr_t nxp_console_addr, uint32_t uart_clk_div,
+			uint32_t baud)
+{
+	struct sysinfo sys;
+	static console_t nxp_console;
+
+	zeromem(&sys, sizeof(sys));
+	if (get_clocks(&sys)) {
+		ERROR("System clocks are not set\n");
+		panic();
+	}
+	nxp_console_16550_register(nxp_console_addr,
+			      (sys.freq_platform/uart_clk_div),
+			       baud, &nxp_console);
+}
diff --git a/drivers/nxp/console/console_pl011.c b/drivers/nxp/console/console_pl011.c
new file mode 100644
index 0000000..93f2fc2
--- /dev/null
+++ b/drivers/nxp/console/console_pl011.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+
+#include <common/debug.h>
+#include <dcfg.h>
+#include <drivers/arm/pl011.h>
+#include <drivers/console.h>
+#include <lib/utils.h>
+
+/*
+ * Perform Arm specific early platform setup. At this moment we only initialize
+ * the console and the memory layout.
+ */
+void plat_console_init(uintptr_t nxp_console_addr, uint32_t uart_clk_div,
+			uint32_t baud)
+{
+	struct sysinfo sys;
+	static console_t nxp_console;
+
+	zeromem(&sys, sizeof(sys));
+	if (get_clocks(&sys)) {
+		ERROR("System clocks are not set\n");
+		panic();
+	}
+
+	console_pl011_register(nxp_console_addr,
+			      (sys.freq_platform/uart_clk_div),
+			       baud, &nxp_console);
+}
diff --git a/drivers/nxp/console/plat_console.h b/drivers/nxp/console/plat_console.h
new file mode 100644
index 0000000..8b1b23a
--- /dev/null
+++ b/drivers/nxp/console/plat_console.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PLAT_CONSOLE_H
+#define PLAT_CONSOLE_H
+
+#include <stdint.h>
+#include <drivers/console.h>
+
+#if (NXP_CONSOLE == NS16550)
+/*
+ * NXP specific UART - 16550 configuration
+ *
+ * Initialize a NXP 16550 console instance and register it with the console
+ * framework. The |console| pointer must point to storage that will be valid
+ * for the lifetime of the console, such as a global or static local variable.
+ * Its contents will be reinitialized from scratch.
+ * When |clock| has a value of 0, the UART will *not* be initialised. This
+ * means the UART should already be enabled and the baudrate and clock setup
+ * should have been done already, either by platform specific code or by
+ * previous firmware stages. The |baud| parameter will be ignored in this
+ * case as well.
+ */
+int nxp_console_16550_register(uintptr_t baseaddr, uint32_t clock,
+			       uint32_t baud, console_t *console);
+#endif
+/*
+ * Function to initialize platform's console
+ * and register with console framework
+ */
+void plat_console_init(uintptr_t nxp_console_addr, uint32_t uart_clk_div,
+			uint32_t baud);
+
+#endif
diff --git a/drivers/nxp/crypto/caam/caam.mk b/drivers/nxp/crypto/caam/caam.mk
new file mode 100644
index 0000000..548c7b1
--- /dev/null
+++ b/drivers/nxp/crypto/caam/caam.mk
@@ -0,0 +1,28 @@
+#
+# Copyright 2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+
+ifeq (${ADD_CAAM},)
+
+ADD_CAAM		:= 1
+CAAM_DRIVER_PATH	:= drivers/nxp/crypto/caam
+
+CAAM_DRIVER_SOURCES	+=  $(wildcard $(CAAM_DRIVER_PATH)/src/*.c)
+
+PLAT_INCLUDES		+= -I$(CAAM_DRIVER_PATH)/include
+
+ifeq (${BL_COMM_CRYPTO_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${CAAM_DRIVER_SOURCES}
+else
+ifeq (${BL2_CRYPTO_NEEDED},yes)
+BL2_SOURCES		+= ${CAAM_DRIVER_SOURCES}
+endif
+ifeq (${BL31_CRYPTO_NEEDED},yes)
+BL31_SOURCES		+= ${CAAM_DRIVER_SOURCES}
+endif
+endif
+
+endif
diff --git a/drivers/nxp/crypto/caam/include/caam.h b/drivers/nxp/crypto/caam/include/caam.h
new file mode 100644
index 0000000..580e133
--- /dev/null
+++ b/drivers/nxp/crypto/caam/include/caam.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2017-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef CAAM_H
+#define CAAM_H
+
+#include "caam_io.h"
+#include "sec_jr_driver.h"
+
+
+/* Job ring 3 is reserved for usage by sec firmware */
+#define DEFAULT_JR	3
+
+#if defined(CONFIG_CHASSIS_3_2) || defined(CONFIG_CHASSIS_2)
+#define CAAM_JR0_OFFSET			0x10000
+#define CAAM_JR1_OFFSET			0x20000
+#define CAAM_JR2_OFFSET			0x30000
+#define CAAM_JR3_OFFSET			0x40000
+#endif
+
+enum sig_alg {
+	RSA,
+	ECC
+};
+
+/* This function does basic SEC Initialization */
+int sec_init(uintptr_t nxp_caam_addr);
+int config_sec_block(void);
+uintptr_t get_caam_addr(void);
+
+/* This function is used to submit jobs to JR */
+int run_descriptor_jr(struct job_descriptor *desc);
+
+/* This function is used to instatiate the HW RNG is already not instantiated */
+int hw_rng_instantiate(void);
+
+/* This function is used to return random bytes of byte_len from HW RNG */
+int get_rand_bytes_hw(uint8_t *bytes, int byte_len);
+
+/* This function is used to set the hw unique key from HW CAAM */
+int get_hw_unq_key_blob_hw(uint8_t *hw_key, int size);
+
+/* This function is used to fetch random number from
+ * CAAM of length either of 4 bytes or 8 bytes depending
+ * rngWidth value.
+ */
+unsigned long long get_random(int rngWidth);
+
+#endif /* CAAM_H */
diff --git a/drivers/nxp/crypto/caam/include/caam_io.h b/drivers/nxp/crypto/caam/include/caam_io.h
new file mode 100644
index 0000000..4fdb04d
--- /dev/null
+++ b/drivers/nxp/crypto/caam/include/caam_io.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef CAAM_IO_H
+#define CAAM_IO_H
+
+#include <endian.h>
+#include <lib/mmio.h>
+
+typedef unsigned long long phys_addr_t;
+typedef unsigned long long phys_size_t;
+
+/* Return higher 32 bits of physical address */
+#define PHYS_ADDR_HI(phys_addr) \
+	    (uint32_t)(((uint64_t)phys_addr) >> 32)
+
+/* Return lower 32 bits of physical address */
+#define PHYS_ADDR_LO(phys_addr) \
+	    (uint32_t)(((uint64_t)phys_addr) & 0xFFFFFFFF)
+
+#ifdef NXP_SEC_BE
+#define sec_in32(a)	bswap32(mmio_read_32((uintptr_t)(a)))
+#define sec_out32(a, v)	mmio_write_32((uintptr_t)(a), bswap32(v))
+#define sec_in64(addr)  (					\
+	((uint64_t)sec_in32((uintptr_t)(addr)) << 32) |	\
+	(sec_in32(((uintptr_t)(addr)) + 4)))
+#define sec_out64(addr, val) ({					\
+	sec_out32(((uintptr_t)(addr)), (uint32_t)((val) >> 32));	\
+	sec_out32(((uintptr_t)(addr)) + 4, (uint32_t)(val)); })
+#elif defined(NXP_SEC_LE)
+#define sec_in32(a)	mmio_read_32((uintptr_t)(a))
+#define sec_out32(a, v)	mmio_write_32((uintptr_t)(a), (v))
+#define sec_in64(addr)	(					\
+	((uint64_t)sec_in32((uintptr_t)(addr) + 4) << 32) |	\
+	(sec_in32((uintptr_t)(addr))))
+#define sec_out64(addr, val) ({						\
+	sec_out32(((uintptr_t)(addr)) + 4, (uint32_t)((val) >> 32));	\
+	sec_out32(((uintptr_t)(addr)), (uint32_t)(val)); })
+#else
+#error Please define CCSR SEC register endianness
+#endif
+
+static inline void *ptov(phys_addr_t *ptr)
+{
+	return (void *)ptr;
+}
+
+static inline phys_addr_t *vtop(void *ptr)
+{
+	return (phys_addr_t *)ptr;
+}
+#endif /* CAAM_IO_H */
diff --git a/drivers/nxp/crypto/caam/include/hash.h b/drivers/nxp/crypto/caam/include/hash.h
new file mode 100644
index 0000000..946087d
--- /dev/null
+++ b/drivers/nxp/crypto/caam/include/hash.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2017-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef __HASH_H__
+#define __HASH_H__
+
+#include <stdbool.h>
+
+/* List of hash algorithms */
+enum hash_algo {
+	SHA1 = 0,
+	SHA256
+};
+
+/* number of bytes in the SHA256-256 digest */
+#define SHA256_DIGEST_SIZE 32
+
+/*
+ * number of words in the digest - Digest is kept internally
+ * as 8 32-bit words
+ */
+#define _SHA256_DIGEST_LENGTH 8
+
+/*
+ * block length - A block, treated as a sequence of
+ * 32-bit words
+ */
+#define SHA256_BLOCK_LENGTH 16
+
+/* number of bytes in the block */
+#define SHA256_DATA_SIZE 64
+
+#define MAX_SG		12
+
+struct sg_entry {
+#if defined(NXP_SEC_LE)
+	uint32_t addr_lo;	/* Memory Address - lo */
+	uint32_t addr_hi;	/* Memory Address of start of buffer - hi */
+#else
+	uint32_t addr_hi;	/* Memory Address of start of buffer - hi */
+	uint32_t addr_lo;	/* Memory Address - lo */
+#endif
+
+	uint32_t len_flag;	/* Length of the data in the frame */
+#define SG_ENTRY_LENGTH_MASK	0x3FFFFFFF
+#define SG_ENTRY_EXTENSION_BIT	0x80000000
+#define SG_ENTRY_FINAL_BIT	0x40000000
+	uint32_t bpid_offset;
+#define SG_ENTRY_BPID_MASK	0x00FF0000
+#define SG_ENTRY_BPID_SHIFT	16
+#define SG_ENTRY_OFFSET_MASK	0x00001FFF
+#define SG_ENTRY_OFFSET_SHIFT	0
+};
+
+/*
+ * SHA256-256 context
+ * contain the following fields
+ * State
+ * count low
+ * count high
+ * block data buffer
+ * index to the buffer
+ */
+struct hash_ctx {
+	struct sg_entry sg_tbl[MAX_SG];
+	uint32_t hash_desc[64];
+	uint8_t hash[SHA256_DIGEST_SIZE];
+	uint32_t sg_num;
+	uint32_t len;
+	uint8_t *data;
+	enum hash_algo algo;
+	bool active;
+};
+
+int hash_init(enum hash_algo algo, void **ctx);
+int hash_update(enum hash_algo algo, void *context, void *data_ptr,
+		unsigned int data_len);
+int hash_final(enum hash_algo algo, void *context, void *hash_ptr,
+	       unsigned int hash_len);
+
+#endif
diff --git a/drivers/nxp/crypto/caam/include/jobdesc.h b/drivers/nxp/crypto/caam/include/jobdesc.h
new file mode 100644
index 0000000..5921f7b
--- /dev/null
+++ b/drivers/nxp/crypto/caam/include/jobdesc.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2017-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef __JOBDESC_H
+#define __JOBDESC_H
+
+#include <rsa.h>
+
+#define DESC_LEN_MASK		0x7f
+#define DESC_START_SHIFT	16
+
+#define KEY_BLOB_SIZE 32
+#define MAC_SIZE 16
+
+#define KEY_IDNFR_SZ_BYTES 16
+#define CLASS_SHIFT 25
+#define CLASS_2	(0x02 << CLASS_SHIFT)
+
+#define CMD_SHIFT		27
+#define CMD_OPERATION		(U(0x10) << CMD_SHIFT)
+
+#define OP_TYPE_SHIFT		24
+#define OP_TYPE_ENCAP_PROTOCOL	(0x07 << OP_TYPE_SHIFT)
+
+/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */
+#define OP_PCLID_SHIFT		16
+#define OP_PCLID_BLOB		(0x0d << OP_PCLID_SHIFT)
+
+#define BLOB_PROTO_INFO		 0x00000002
+
+uint32_t desc_length(uint32_t *desc);
+
+int cnstr_rng_jobdesc(uint32_t *desc, uint32_t state_handle,
+		      uint32_t *add_inp, uint32_t add_ip_len,
+		      uint8_t *out_data, uint32_t len);
+
+int cnstr_rng_instantiate_jobdesc(uint32_t *desc);
+
+/* Construct descriptor to generate hw key blob */
+int cnstr_hw_encap_blob_jobdesc(uint32_t *desc,
+				uint8_t *key_idnfr, uint32_t key_sz,
+				uint32_t key_class, uint8_t *plain_txt,
+				uint32_t in_sz, uint8_t *enc_blob,
+				uint32_t out_sz, uint32_t operation);
+
+void cnstr_hash_jobdesc(uint32_t *desc, uint8_t *msg, uint32_t msgsz,
+			uint8_t *digest);
+
+void cnstr_jobdesc_pkha_rsaexp(uint32_t *desc,
+			       struct pk_in_params *pkin, uint8_t *out,
+			       uint32_t out_siz);
+#endif
diff --git a/drivers/nxp/crypto/caam/include/jr_driver_config.h b/drivers/nxp/crypto/caam/include/jr_driver_config.h
new file mode 100644
index 0000000..f25c42e
--- /dev/null
+++ b/drivers/nxp/crypto/caam/include/jr_driver_config.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2017-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef _JR_DRIVER_CONFIG_H_
+#define _JR_DRIVER_CONFIG_H_
+
+/* Helper defines  */
+
+ /* Define used for setting a flag on  */
+#define  ON  1
+ /* Define used for setting a flag off  */
+#define  OFF 0
+
+ /* SEC is configured to start work in polling mode,  */
+#define SEC_STARTUP_POLLING_MODE     0
+/*
+ * SEC is configured to start work in interrupt mode,
+ *  when configured for NAPI notification style.
+ */
+#define SEC_STARTUP_INTERRUPT_MODE   1
+
+/*
+ * SEC driver will use ONLY interrupts to receive notifications
+ * for processed packets from SEC engine hardware.
+ */
+#define SEC_NOTIFICATION_TYPE_IRQ   1
+/*
+ * SEC driver will use ONLY polling to receive notifications
+ * for processed packets from SEC engine hardware.
+ */
+#define SEC_NOTIFICATION_TYPE_POLL  2
+
+/*
+ * Determines how SEC user space driver will receive notifications
+ * for processed packets from SEC engine.
+ * Valid values are: #SEC_NOTIFICATION_TYPE_POLL, #SEC_NOTIFICATION_TYPE_IRQ
+ */
+#define SEC_NOTIFICATION_TYPE   SEC_NOTIFICATION_TYPE_POLL
+
+ /* Maximum number of job rings supported by SEC hardware  */
+#define MAX_SEC_JOB_RINGS         1
+
+/*
+ * Size of cryptographic context that is used directly in communicating
+ *  with SEC device.
+ *  SEC device works only with physical addresses. This is the maximum size
+ *  for a SEC descriptor ( = 64 words).
+ */
+
+#define SEC_CRYPTO_DESCRIPTOR_SIZE  256
+
+/*
+ * Size of job descriptor submitted to SEC device for each packet to be
+ *  processed.
+ *  Job descriptor contains 3 DMA address pointers:
+ *      - to shared descriptor, to input buffer and to output buffer.
+ *  The job descriptor contains other SEC specific commands as well:
+ *      - HEADER command, SEQ IN PTR command SEQ OUT PTR command and opaque
+ *        data, each measuring 4 bytes.
+ *  Job descriptor size, depending on physical address representation:
+ *      - 32 bit - size is 28 bytes - cacheline-aligned size is 64 bytes
+ *      - 36 bit - size is 40 bytes - cacheline-aligned size is 64 bytes
+ *  @note: Job descriptor must be cacheline-aligned to ensure efficient memory
+ *  access.
+ *  @note: If other format is used for job descriptor, then the size must be
+ *  revised.
+ */
+
+#define SEC_JOB_DESCRIPTOR_SIZE		64
+
+/*
+ * Size of one entry in the input ring of a job ring.
+ *  Input ring contains pointers to job descriptors.
+ *  The memory used for an input ring and output ring must be physically
+ *  contiguous.
+ */
+
+#define SEC_JOB_INPUT_RING_ENTRY_SIZE	sizeof(phys_addr_t)
+
+/*
+ * Size of one entry in the output ring of a job ring.
+ *  Output ring entry is a pointer to a job descriptor followed by a 4 byte
+ *  status word.
+ *  The memory used for an input ring and output ring must be physically
+ *  contiguous.
+ *  @note If desired to use also the optional SEQ OUT indication in output
+ *  ring entries, then 4 more bytes must be added to the size.
+ */
+
+#define SEC_JOB_OUTPUT_RING_ENTRY_SIZE	(SEC_JOB_INPUT_RING_ENTRY_SIZE + 4)
+
+ /* DMA memory required for an input ring of a job ring.  */
+#define SEC_DMA_MEM_INPUT_RING_SIZE	\
+		((SEC_JOB_INPUT_RING_ENTRY_SIZE) * (SEC_JOB_RING_SIZE))
+
+/*
+ * DMA memory required for an output ring of a job ring.
+ *  Required extra 4 byte for status word per each entry.
+ */
+#define SEC_DMA_MEM_OUTPUT_RING_SIZE	\
+		((SEC_JOB_OUTPUT_RING_ENTRY_SIZE) * (SEC_JOB_RING_SIZE))
+
+ /* DMA memory required for descriptors of a job ring.  */
+#define SEC_DMA_MEM_DESCRIPTORS		\
+		((SEC_CRYPTO_DESCRIPTOR_SIZE)*(SEC_JOB_RING_SIZE))
+
+ /* DMA memory required for a job ring, including both input output rings.  */
+#define SEC_DMA_MEM_JOB_RING_SIZE	\
+		((SEC_DMA_MEM_INPUT_RING_SIZE) +	\
+		(SEC_DMA_MEM_OUTPUT_RING_SIZE))
+
+/*
+ * When calling sec_init() UA will provide an area of virtual memory
+ *  of size #SEC_DMA_MEMORY_SIZE to be  used internally by the driver
+ *  to allocate data (like SEC descriptors) that needs to be passed to
+ *  SEC device in physical addressing and later on retrieved from SEC device.
+ *  At initialization the UA provides specialized ptov/vtop functions/macros to
+ *  translate addresses allocated from this memory area.
+ */
+#define SEC_DMA_MEMORY_SIZE		\
+		((SEC_DMA_MEM_JOB_RING_SIZE) * (MAX_SEC_JOB_RINGS))
+
+/*
+ * SEC DEVICE related configuration.
+
+ * Enable/Disable logging support at compile time.
+ * Valid values:
+ * ON - enable logging
+ * OFF - disable logging
+ * The messages are logged at stdout.
+ */
+
+#define SEC_DRIVER_LOGGING OFF
+
+/*
+ * Configure logging level at compile time.
+ * Valid values:
+ * SEC_DRIVER_LOG_ERROR - log only errors
+ * SEC_DRIVER_LOG_INFO  - log errors and info messages
+ * SEC_DRIVER_LOG_DEBUG - log errors, info and debug messages
+ */
+
+#define SEC_DRIVER_LOGGING_LEVEL SEC_DRIVER_LOG_DEBUG
+
+/*
+ * SEC JOB RING related configuration.
+
+ * Configure the size of the JOB RING.
+ * The maximum size of the ring is hardware limited to 1024.
+ * However the number of packets in flight in a time interval of
+ * 1ms can be calculated
+ * from the traffic rate (Mbps) and packet size.
+ * Here it was considered a packet size of 40 bytes.
+ * @note Round up to nearest power of 2 for optimized update
+ * of producer/consumer indexes of each job ring
+ * \todo Should set to 750, according to the calculation above, but
+ * the JR size must be power of 2, thus the next closest value must
+ * be chosen (i.e. 512 since 1024 is not available)
+ * For firmware choose this to be 16
+ */
+
+#define SEC_JOB_RING_SIZE    16
+
+/*
+ * Interrupt coalescing related configuration.
+ * NOTE: SEC hardware enabled interrupt
+ * coalescing is not supported on SEC version 3.1!
+ * SEC version 4.4 has support for interrupt
+ * coalescing.
+ */
+
+#if SEC_NOTIFICATION_TYPE != SEC_NOTIFICATION_TYPE_POLL
+
+#define SEC_INT_COALESCING_ENABLE   ON
+/*
+ * Interrupt Coalescing Descriptor Count Threshold.
+ * While interrupt coalescing is enabled (ICEN=1), this value determines
+ * how many Descriptors are completed before raising an interrupt.
+ * Valid values for this field are from 0 to 255.
+ * Note that a value of 1 functionally defeats the advantages of interrupt
+ * coalescing since the threshold value is reached each time that a
+ * Job Descriptor is completed. A value of 0 is treated in the same
+ * manner as a value of 1.
+ *
+ */
+#define SEC_INTERRUPT_COALESCING_DESCRIPTOR_COUNT_THRESH  10
+
+/*
+ * Interrupt Coalescing Timer Threshold.
+ * While interrupt coalescing is enabled (ICEN=1), this value determines the
+ * maximum amount of time after processing a Descriptor before raising an
+ * interrupt.
+ * The threshold value is represented in units equal to 64 CAAM interface
+ * clocks. Valid values for this field are from 1 to 65535.
+ * A value of 0 results in behavior identical to that when interrupt
+ * coalescing is disabled.
+ */
+#define SEC_INTERRUPT_COALESCING_TIMER_THRESH  100
+#endif /* SEC_NOTIFICATION_TYPE_POLL  */
+
+#endif /* _JR_DRIVER_CONFIG_H_  */
diff --git a/drivers/nxp/crypto/caam/include/rsa.h b/drivers/nxp/crypto/caam/include/rsa.h
new file mode 100644
index 0000000..bd5dc71
--- /dev/null
+++ b/drivers/nxp/crypto/caam/include/rsa.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2017-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef _RSA_H__
+#define _RSA_H__
+
+/* RSA key size defines */
+#define RSA_4K_KEY_SZ       4096
+#define RSA_4K_KEY_SZ_BYTES (RSA_4K_KEY_SZ/8)
+#define RSA_2K_KEY_SZ       2048
+#define RSA_2K_KEY_SZ_BYTES (RSA_2K_KEY_SZ/8)
+#define RSA_1K_KEY_SZ       1024
+#define RSA_1K_KEY_SZ_BYTES (RSA_1K_KEY_SZ/8)
+
+#define SHA256_BYTES        (256/8)
+
+struct pk_in_params {
+	uint8_t *e;
+	uint32_t e_siz;
+	uint8_t *n;
+	uint32_t n_siz;
+	uint8_t *a;
+	uint32_t a_siz;
+	uint8_t *b;
+	uint32_t b_siz;
+};
+
+struct rsa_context {
+	struct pk_in_params pkin;
+};
+
+int rsa_verify_signature(void *hash_ptr, unsigned int hash_len,
+			 void *sig_ptr, unsigned int sig_len,
+			 void *pk_ptr, unsigned int pk_len);
+
+#endif
diff --git a/drivers/nxp/crypto/caam/include/sec_hw_specific.h b/drivers/nxp/crypto/caam/include/sec_hw_specific.h
new file mode 100644
index 0000000..a82a1a0
--- /dev/null
+++ b/drivers/nxp/crypto/caam/include/sec_hw_specific.h
@@ -0,0 +1,506 @@
+/*
+ * Copyright 2017-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef _SEC_HW_SPECIFIC_H_
+#define _SEC_HW_SPECIFIC_H_
+
+#include "caam.h"
+#include "sec_jr_driver.h"
+
+ /* DEFINES AND MACROS */
+
+/* Used to retry resetting a job ring in SEC hardware. */
+#define SEC_TIMEOUT 100000
+
+/*
+ * Offset to the registers of a job ring.
+ *Is different for each job ring.
+ */
+#define CHAN_BASE(jr)   ((phys_addr_t)(jr)->register_base_addr)
+
+#define unlikely(x)	 __builtin_expect(!!(x), 0)
+
+#define SEC_JOB_RING_IS_FULL(pi, ci, ring_max_size, ring_threshold)    \
+	((((pi) + 1 + ((ring_max_size) - (ring_threshold))) &	\
+	  (ring_max_size - 1))  == ((ci)))
+
+#define SEC_CIRCULAR_COUNTER(x, max)   (((x) + 1) & (max - 1))
+
+ /* Struct representing various job ring registers */
+struct jobring_regs {
+#ifdef NXP_SEC_BE
+	unsigned int irba_h;
+	unsigned int irba_l;
+#else
+	unsigned int irba_l;
+	unsigned int irba_h;
+#endif
+	unsigned int rsvd1;
+	unsigned int irs;
+	unsigned int rsvd2;
+	unsigned int irsa;
+	unsigned int rsvd3;
+	unsigned int irja;
+#ifdef NXP_SEC_BE
+	unsigned int orba_h;
+	unsigned int orba_l;
+#else
+	unsigned int orba_l;
+	unsigned int orba_h;
+#endif
+	unsigned int rsvd4;
+	unsigned int ors;
+	unsigned int rsvd5;
+	unsigned int orjr;
+	unsigned int rsvd6;
+	unsigned int orsf;
+	unsigned int rsvd7;
+	unsigned int jrsta;
+	unsigned int rsvd8;
+	unsigned int jrint;
+	unsigned int jrcfg0;
+	unsigned int jrcfg1;
+	unsigned int rsvd9;
+	unsigned int irri;
+	unsigned int rsvd10;
+	unsigned int orwi;
+	unsigned int rsvd11;
+	unsigned int jrcr;
+};
+
+ /* Offsets representing common SEC Registers */
+#define SEC_REG_MCFGR_OFFSET		0x0004
+#define SEC_REG_SCFGR_OFFSET		0x000C
+#define SEC_REG_JR0ICIDR_MS_OFFSET	0x0010
+#define SEC_REG_JR0ICIDR_LS_OFFSET	0x0014
+#define SEC_REG_JR1ICIDR_MS_OFFSET	0x0018
+#define SEC_REG_JR1ICIDR_LS_OFFSET	0x001C
+#define SEC_REG_JR2ICIDR_MS_OFFSET	0x0020
+#define SEC_REG_JR2ICIDR_LS_OFFSET	0x0024
+#define SEC_REG_JR3ICIDR_MS_OFFSET	0x0028
+#define SEC_REG_JR3ICIDR_LS_OFFSET	0x002C
+#define SEC_REG_JRSTARTR_OFFSET		0x005C
+#define SEC_REG_CTPR_MS_OFFSET		0x0FA8
+
+ /* Offsets  representing various RNG registers */
+#define RNG_REG_RTMCTL_OFFSET		0x0600
+#define RNG_REG_RTSDCTL_OFFSET		0x0610
+#define RNG_REG_RTFRQMIN_OFFSET		0x0618
+#define RNG_REG_RTFRQMAX_OFFSET		0x061C
+#define RNG_REG_RDSTA_OFFSET		0x06C0
+#define ALG_AAI_SH_SHIFT		4
+
+ /* SEC Registers Bitmasks */
+#define	MCFGR_PS_SHIFT			16
+#define	MCFGR_AWCACHE_SHIFT			 8
+#define	MCFGR_AWCACHE_MASK	(0xF << MCFGR_AWCACHE_SHIFT)
+#define	MCFGR_ARCACHE_SHIFT			12
+#define	MCFGR_ARCACHE_MASK	(0xF << MCFGR_ARCACHE_SHIFT)
+
+#define SCFGR_RNGSH0		0x00000200
+#define	SCFGR_VIRT_EN		0x00008000
+
+#define JRICID_MS_LICID		0x80000000
+#define JRICID_MS_LAMTD		0x00020000
+#define JRICID_MS_AMTDT		0x00010000
+#define JRICID_MS_TZ		0x00008000
+#define JRICID_LS_SDID_MASK	0x00000FFF
+#define JRICID_LS_NSEQID_MASK	0x0FFF0000
+#define JRICID_LS_NSEQID_SHIFT		16
+#define JRICID_LS_SEQID_MASK	0x00000FFF
+
+#define JRSTARTR_STARTJR0	0x00000001
+#define JRSTARTR_STARTJR1	0x00000002
+#define JRSTARTR_STARTJR2	0x00000004
+#define JRSTARTR_STARTJR3	0x00000008
+
+#define CTPR_VIRT_EN_POR	0x00000002
+#define CTPR_VIRT_EN_INC	0x00000001
+
+ /* RNG RDSTA bitmask */
+#define RNG_STATE0_HANDLE_INSTANTIATED	0x00000001
+#define RTMCTL_PRGM 0x00010000	/* 1 -> program mode, 0 -> run mode */
+ /* use von Neumann data in both entropy shifter and statistical checker */
+#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_SC	 0
+ /* use raw data in both entropy shifter and statistical checker */
+#define RTMCTL_SAMP_MODE_RAW_ES_SC			 1
+ /* use von Neumann data in entropy shifter, raw data in statistical checker */
+#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_RAW_SC 2
+ /* invalid combination */
+#define RTMCTL_SAMP_MODE_INVALID			   3
+#define RTSDCTL_ENT_DLY_MIN	3200
+#define RTSDCTL_ENT_DLY_MAX	12800
+#define RTSDCTL_ENT_DLY_SHIFT	16
+#define RTSDCTL_ENT_DLY_MASK	(U(0xffff) << RTSDCTL_ENT_DLY_SHIFT)
+#define RTFRQMAX_DISABLE	   (1 << 20)
+
+ /* Constants for error handling on job ring */
+#define JR_REG_JRINT_ERR_TYPE_SHIFT	8
+#define JR_REG_JRINT_ERR_ORWI_SHIFT	16
+#define JR_REG_JRINIT_JRE_SHIFT			1
+
+#define JRINT_JRE			(1 << JR_REG_JRINIT_JRE_SHIFT)
+#define JRINT_ERR_WRITE_STATUS		(1 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_BAD_INPUT_BASE	(3 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_BAD_OUTPUT_BASE	(4 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_WRITE_2_IRBA		(5 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_WRITE_2_ORBA		(6 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_RES_B4_HALT		(7 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_REM_TOO_MANY		(8 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_ADD_TOO_MANY		(9 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_HALT_MASK		0x0C
+#define JRINT_ERR_HALT_INPROGRESS	0x04
+#define JRINT_ERR_HALT_COMPLETE		0x08
+
+#define JR_REG_JRCR_VAL_RESET		0x00000001
+
+#define JR_REG_JRCFG_LO_ICTT_SHIFT	0x10
+#define JR_REG_JRCFG_LO_ICDCT_SHIFT	0x08
+#define JR_REG_JRCFG_LO_ICEN_EN		0x02
+#define JR_REG_JRCFG_LO_IMSK_EN		0x01
+
+ /* Constants for Descriptor Processing errors */
+#define SEC_HW_ERR_SSRC_NO_SRC			0x00
+#define SEC_HW_ERR_SSRC_CCB_ERR			0x02
+#define SEC_HW_ERR_SSRC_JMP_HALT_U	0x03
+#define SEC_HW_ERR_SSRC_DECO		0x04
+#define SEC_HW_ERR_SSRC_JR		0x06
+#define SEC_HW_ERR_SSRC_JMP_HALT_COND   0x07
+
+#define SEC_HW_ERR_DECO_HFN_THRESHOLD   0xF1
+#define SEC_HW_ERR_CCB_ICV_CHECK_FAIL   0x0A
+
+ /* Macros for extracting error codes for the job ring */
+
+#define JR_REG_JRINT_ERR_TYPE_EXTRACT(value)			\
+				((value) & 0x00000F00)
+
+#define JR_REG_JRINT_ERR_ORWI_EXTRACT(value)			\
+				(((value) & 0x3FFF0000) >>	\
+				 JR_REG_JRINT_ERR_ORWI_SHIFT)
+
+#define JR_REG_JRINT_JRE_EXTRACT(value)				\
+				((value) & JRINT_JRE)
+
+ /* Macros for manipulating JR registers */
+typedef union {
+	uint64_t m_whole;
+	struct {
+#ifdef NXP_SEC_BE
+		uint32_t high;
+		uint32_t low;
+#else
+		uint32_t low;
+		uint32_t high;
+#endif
+	} m_halves;
+} ptr_addr_t;
+
+#if defined(CONFIG_PHYS_64BIT)
+#define sec_read_addr(a)		sec_in64((a))
+#define sec_write_addr(a, v)	sec_out64((a), (v))
+#else
+#define sec_read_addr(a)		sec_in32((a))
+#define sec_write_addr(a, v)		sec_out32((a), (v))
+#endif
+
+#define JR_REG(name, jr)	(CHAN_BASE(jr) + JR_REG_##name##_OFFSET)
+#define JR_REG_LO(name, jr)	(CHAN_BASE(jr) + JR_REG_##name##_OFFSET_LO)
+
+#define GET_JR_REG(name, jr)	(sec_in32(JR_REG(name, (jr))))
+#define GET_JR_REG_LO(name, jr)	(sec_in32(JR_REG_LO(name, (jr))))
+
+#define SET_JR_REG(name, jr, val)		\
+		(sec_out32(JR_REG(name, (jr)), (val)))
+
+#define SET_JR_REG_LO(name, jr, val)	\
+		(sec_out32(JR_REG_LO(name, (jr)), (val)))
+
+ /* STRUCTURES AND OTHER TYPEDEFS */
+ /*  Lists the possible states for a job ring. */
+typedef enum sec_job_ring_state_e {
+	SEC_JOB_RING_STATE_STARTED,	/* Job ring is initialized */
+	SEC_JOB_RING_STATE_RESET,	/* Job ring reset is in progres */
+} sec_job_ring_state_t;
+
+struct sec_job_ring_t {
+	/*
+	 * Consumer index for job ring (jobs array).
+	 * @note: cidx and pidx are accessed from
+	 * different threads.
+	 * Place the cidx and pidx inside the structure
+	 *  so that they lay on different cachelines, to
+	 * avoid false sharing between threads when the
+	 * threads run on different cores!
+	 */
+	uint32_t cidx;
+
+	/* Producer index for job ring (jobs array) */
+	uint32_t pidx;
+
+	/*  Ring of input descriptors. Size of array is power of 2 to allow
+	 * fast update of producer/consumer indexes with  bitwise operations.
+	 */
+	phys_addr_t *input_ring;
+
+	/*  Ring of output descriptors. */
+	struct sec_outring_entry *output_ring;
+
+	/* The file descriptor used for polling for interrupts notifications */
+	uint32_t irq_fd;
+
+	/* Model used by SEC Driver to receive  notifications from SEC.
+	 *  Can be either of the three:
+	 * #SEC_NOTIFICATION_TYPE_IRQ or
+	 * #SEC_NOTIFICATION_TYPE_POLL
+	 */
+	uint32_t jr_mode;
+	/* Base address for SEC's register memory for this job ring. */
+	void *register_base_addr;
+	/* notifies if coelescing is enabled for the job ring */
+	uint8_t coalescing_en;
+	/* The state of this job ring */
+	sec_job_ring_state_t jr_state;
+};
+
+ /* Forward structure declaration */
+typedef struct sec_job_ring_t sec_job_ring_t;
+
+struct sec_outring_entry {
+	phys_addr_t desc;	/* Pointer to completed descriptor */
+	uint32_t status;	/* Status for completed descriptor */
+} __packed;
+
+ /* Lists the states possible for the SEC user space driver. */
+typedef enum sec_driver_state_e {
+	SEC_DRIVER_STATE_IDLE,	/*< Driver not initialized */
+	SEC_DRIVER_STATE_STARTED,	/*< Driver initialized and */
+	SEC_DRIVER_STATE_RELEASE,	/*< Driver release is in progress */
+} sec_driver_state_t;
+
+ /* Union describing the possible error codes that */
+ /* can be set in the descriptor status word */
+
+union hw_error_code {
+	uint32_t error;
+	union {
+		struct {
+			uint32_t ssrc:4;
+			uint32_t ssed_val:28;
+		} __packed value;
+		struct {
+			uint32_t ssrc:4;
+			uint32_t res:28;
+		} __packed no_status_src;
+		struct {
+			uint32_t ssrc:4;
+			uint32_t jmp:1;
+			uint32_t res:11;
+			uint32_t desc_idx:8;
+			uint32_t cha_id:4;
+			uint32_t err_id:4;
+		} __packed ccb_status_src;
+		struct {
+			uint32_t ssrc:4;
+			uint32_t jmp:1;
+			uint32_t res:11;
+			uint32_t desc_idx:8;
+			uint32_t offset:8;
+		} __packed jmp_halt_user_src;
+		struct {
+			uint32_t ssrc:4;
+			uint32_t jmp:1;
+			uint32_t res:11;
+			uint32_t desc_idx:8;
+			uint32_t desc_err:8;
+		} __packed deco_src;
+		struct {
+			uint32_t ssrc:4;
+			uint32_t res:17;
+			uint32_t naddr:3;
+			uint32_t desc_err:8;
+		} __packed jr_src;
+		struct {
+			uint32_t ssrc:4;
+			uint32_t jmp:1;
+			uint32_t res:11;
+			uint32_t desc_idx:8;
+			uint32_t cond:8;
+		} __packed jmp_halt_cond_src;
+	} __packed error_desc;
+} __packed;
+
+ /* FUNCTION PROTOTYPES */
+
+/*
+ * @brief Initialize a job ring/channel in SEC device.
+ * Write configuration register/s to properly initialize a job ring.
+ *
+ * @param [in] job_ring     The job ring
+ *
+ * @retval 0 for success
+ * @retval other for error
+ */
+int hw_reset_job_ring(sec_job_ring_t *job_ring);
+
+/*
+ * @brief Reset a job ring/channel in SEC device.
+ * Write configuration register/s to reset a job ring.
+ *
+ * @param [in] job_ring     The job ring
+ *
+ * @retval 0 for success
+ * @retval -1 in case job ring reset failed
+ */
+int hw_shutdown_job_ring(sec_job_ring_t *job_ring);
+
+/*
+ * @brief Handle a job ring/channel error in SEC device.
+ * Identify the error type and clear error bits if required.
+ *
+ * @param [in]  job_ring    The job ring
+ * @param [in]  sec_error_code  error code as first read from SEC engine
+ */
+
+void hw_handle_job_ring_error(sec_job_ring_t *job_ring,
+			      uint32_t sec_error_code);
+/*
+ * @brief Handle a job ring error in the device.
+ * Identify the error type and printout a explanatory
+ * messages.
+ *
+ * @param [in]  job_ring    The job ring
+ *
+ */
+
+int hw_job_ring_error(sec_job_ring_t *job_ring);
+
+/* @brief Set interrupt coalescing parameters on the Job Ring.
+ * @param [in]  job_ring       The job ring
+ * @param [in]  irq_coalesing_timer
+ *                             Interrupt coalescing timer threshold.
+ *                     This value determines the maximum
+ *                     amount of time after processing a descriptor
+ *                     before raising an interrupt.
+ * @param [in]  irq_coalescing_count
+ *                             Interrupt coalescing count threshold.
+ *                     This value determines how many descriptors
+ *                     are completed before raising an interrupt.
+ */
+
+int hw_job_ring_set_coalescing_param(sec_job_ring_t *job_ring,
+				     uint16_t irq_coalescing_timer,
+				     uint8_t irq_coalescing_count);
+
+/* @brief Enable interrupt coalescing on a job ring
+ * @param [in]  job_ring       The job ring
+ */
+
+int hw_job_ring_enable_coalescing(sec_job_ring_t *job_ring);
+
+/*
+ * @brief Disable interrupt coalescing on a job ring
+ * @param [in]  job_ring       The job ring
+ */
+
+int hw_job_ring_disable_coalescing(sec_job_ring_t *job_ring);
+
+/*
+ * @brief Poll the HW for already processed jobs in the JR
+ * and notify the available jobs to UA.
+ *
+ * @param [in]  job_ring            The job ring to poll.
+ * @param [in]  limit               The maximum number of jobs to notify.
+ *                                  If set to negative value, all available
+ *                                  jobs are notified.
+ *
+ * @retval >=0 for No of jobs notified to UA.
+ * @retval -1 for error
+ */
+
+int hw_poll_job_ring(struct sec_job_ring_t *job_ring, int32_t limit);
+
+/* @brief Poll the HW for already processed jobs in the JR
+ * and silently discard the available jobs or notify them to UA
+ * with indicated error code.
+
+ * @param [in,out]  job_ring        The job ring to poll.
+ * @param [in]  do_notify           Can be #TRUE or #FALSE.
+ *                                 Indicates if descriptors to be discarded
+ *                                  or notified to UA with given error_code.
+ * @param [in]  error_code          The detailed SEC error code.
+ * @param [out] notified_descs        Number of notified descriptors.
+ *                                 Can be NULL if do_notify is #FALSE
+ */
+void hw_flush_job_ring(struct sec_job_ring_t *job_ring,
+		       uint32_t do_notify,
+		       uint32_t error_code, uint32_t *notified_descs);
+
+/*
+ * @brief Flush job rings of any processed descs.
+ * The processed descs are silently dropped,
+ *  WITHOUT being notified to UA.
+ */
+void flush_job_rings(void);
+
+/*
+ * @brief Handle desc that generated error in SEC engine.
+ * Identify the exact type of error and handle the error.
+ * Depending on the error type, the job ring could be reset.
+ * All descs that are submitted for processing on this job ring
+ * are notified to User Application with error status and detailed error code.
+
+ * @param [in]  job_ring            Job ring
+ * @param [in]  sec_error_code      Error code read from job ring's Channel
+ *                                 Status Register
+ * @param [out] notified_descs      Number of notified descs. Can be NULL if
+ *                                 do_notify is #FALSE
+ * @param [out] do_driver_shutdown  If set to #TRUE, then UA is returned code
+ *                                 #SEC_PROCESSING_ERROR
+ *                                  which is indication that UA must call
+ *                                  sec_release() after this.
+ */
+void sec_handle_desc_error(struct sec_job_ring_t *job_ring,
+			   uint32_t sec_error_code,
+			   uint32_t *notified_descs,
+			   uint32_t *do_driver_shutdown);
+
+/*
+ * @brief Release the software and hardware resources tied to a job ring.
+ * @param [in] job_ring The job ring
+ * @retval  0 for success
+ * @retval  -1 for error
+ */
+int shutdown_job_ring(struct sec_job_ring_t *job_ring);
+
+/*
+ * @brief Enable irqs on associated job ring.
+ * @param [in] job_ring The job ring
+ * @retval  0 for success
+ * @retval  -1 for error
+ */
+int jr_enable_irqs(struct sec_job_ring_t *job_ring);
+
+/*
+ * @brief Disable irqs on associated job ring.
+ * @param [in] job_ring The job ring
+ * @retval  0 for success
+ * @retval  -1 for error
+ */
+int jr_disable_irqs(struct sec_job_ring_t *job_ring);
+
+ /*
+  * IRJA - Input Ring Jobs Added Register shows
+  * how many new jobs were added to the Input Ring.
+  */
+static inline void hw_enqueue_desc_on_job_ring(struct jobring_regs *regs,
+					       int num)
+{
+	sec_out32(&regs->irja, num);
+}
+
+#endif /* _SEC_HW_SPECIFIC_H_ */
diff --git a/drivers/nxp/crypto/caam/include/sec_jr_driver.h b/drivers/nxp/crypto/caam/include/sec_jr_driver.h
new file mode 100644
index 0000000..1381eab
--- /dev/null
+++ b/drivers/nxp/crypto/caam/include/sec_jr_driver.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2017-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef _JR_DRIVER_H_
+#define _JR_DRIVER_H_
+
+#include "jr_driver_config.h"
+
+/* The maximum size of a SEC descriptor, in WORDs (32 bits). */
+#define MAX_DESC_SIZE_WORDS		64
+
+#define CAAM_TIMEOUT   200000	/* ms */
+
+/* Return codes for JR user space driver APIs */
+typedef enum sec_return_code_e {
+	SEC_SUCCESS = 0,
+	SEC_INVALID_INPUT_PARAM,
+	SEC_OUT_OF_MEMORY,
+	SEC_DESCRIPTOR_IN_FLIGHT,
+	SEC_LAST_DESCRIPTOR_IN_FLIGHT,
+	SEC_PROCESSING_ERROR,
+	SEC_DESC_PROCESSING_ERROR,
+	SEC_JR_IS_FULL,
+	SEC_DRIVER_RELEASE_IN_PROGRESS,
+	SEC_DRIVER_ALREADY_INITIALIZED,
+	SEC_DRIVER_NOT_INITIALIZED,
+	SEC_JOB_RING_RESET_IN_PROGRESS,
+	SEC_RESET_ENGINE_FAILED,
+	SEC_ENABLE_IRQS_FAILED,
+	SEC_DISABLE_IRQS_FAILED,
+	SEC_RETURN_CODE_MAX_VALUE,
+} sec_return_code_t;
+
+/* STRUCTURES AND OTHER TYPEDEFS */
+
+/*
+ * @brief Function called by JR User Space driver to notify every processed
+ *         descriptor.
+ *
+ * Callback provided by the User Application.
+ * Callback is invoked by JR User Space driver for each descriptor processed by
+ * SEC
+ * @param [in] status          Status word indicating processing result for
+ *                                this descriptor.
+ * @param [in] arg               Opaque data passed by User Application
+ *                                It is opaque from JR driver's point of view.
+ * @param [in] job_ring           The job ring handle on which the processed
+ *                               descriptor word was enqueued
+ */
+typedef void (*user_callback) (uint32_t *desc, uint32_t status,
+			       void *arg, void *job_ring);
+
+/*
+ * Structure encompassing a job descriptor which is to be processed
+ * by SEC. User should also initialise this structure with the callback
+ * function pointer which will be called by driver after recieving proccessed
+ * descriptor from SEC. User data is also passed in this data structure which
+ * will be sent as an argument to the user callback function.
+ */
+struct job_descriptor {
+	uint32_t desc[MAX_DESC_SIZE_WORDS];
+	void *arg;
+	user_callback callback;
+};
+
+/*
+ * @brief Initialize the JR User Space driver.
+ * This function will handle initialization of sec library
+ * along with registering platform specific callbacks,
+ * as well as local data initialization.
+ * Call once during application startup.
+ * @note Global SEC initialization is done in SEC kernel driver.
+ * @note The hardware IDs of the initialized Job Rings are opaque to the UA.
+ * The exact Job Rings used by this library are decided between SEC user
+ * space driver and SEC kernel driver. A static partitioning of Job Rings is
+ * assumed, configured in DTS(device tree specification) file.
+ * @param [in] platform_cb     Registering the platform specific
+ *                             callbacks with driver
+ * @retval ::0                 for successful execution
+ * @retval ::-1                failure
+ */
+int sec_jr_lib_init(void);
+
+/*
+ * @brief Initialize the software and hardware resources tied to a job ring.
+ * @param [in] jr_mode;        Model to be used by SEC Driver to receive
+ *                             notifications from SEC.  Can be either
+ *                             SEC_NOTIFICATION_TYPE_IRQ or
+ *                             SEC_NOTIFICATION_TYPE_POLL
+ * @param [in] irq_coalescing_timer This value determines the maximum
+ *                                     amount of time after processing a
+ *                                     descriptor before raising an interrupt.
+ * @param [in] irq_coalescing_count This value determines how many
+ *                                     descriptors are completed before
+ *                                     raising an interrupt.
+ * @param [in] reg_base_addr   The job ring base address register
+ * @param [in] irq_id          The job ring interrupt identification number.
+ * @retval  job_ring_handle for successful job ring configuration
+ * @retval  NULL on error
+ */
+void *init_job_ring(uint8_t jr_mode,
+		    uint16_t irq_coalescing_timer,
+		    uint8_t irq_coalescing_count,
+		    void *reg_base_addr, uint32_t irq_id);
+
+/*
+ * @brief Release the resources used by the JR User Space driver.
+ * Reset and release SEC's job rings indicated by the User Application at
+ * init_job_ring() and free any memory allocated internally.
+ * Call once during application tear down.
+ * @note In case there are any descriptors in-flight (descriptors received by
+ * JR driver for processing and for which no response was yet provided to UA),
+ * the descriptors are discarded without any notifications to User Application.
+ * @retval ::0                 is returned for a successful execution
+ * @retval ::-1                is returned if JR driver release is in progress
+ */
+int sec_release(void);
+
+/*
+ * @brief Submit a descriptor for SEC processing.
+ * This function creates a "job" which is meant to instruct SEC HW
+ * to perform the processing on the input buffer. The "job" is enqueued
+ * in the Job Ring associated. The function will return after the "job"
+ * enqueue is finished. The function will not wait for SEC to
+ * start or/and finish the "job" processing.
+ * After the processing is finished the SEC HW writes the processing result
+ * to the provided output buffer.
+ * The Caller must poll JR driver using jr_dequeue()
+ * to receive notifications of the processing completion
+ * status. The notifications are received by caller by means of callback
+ * (see ::user_callback).
+ * @param [in]  job_ring_handle   The handle of the job ring on which
+ *                                descriptor is to be enqueued
+ * @param [in]  job_descriptor    The job descriptor structure of type
+ *                                struct job_descriptor. This structure
+ *                                should be filled with job descriptor along
+ *                                with callback function to be called after
+ *                                processing of descriptor and some
+ *                                opaque data passed to be passed to the
+ *                                callback function
+ *
+ * @retval ::0                 is returned for successful execution
+ * @retval ::-1                is returned if there is some enqueue failure
+ */
+int enq_jr_desc(void *job_ring_handle, struct job_descriptor *jobdescr);
+
+/*
+ * @brief Polls for available descriptors processed by SEC on a specific
+ * Job Ring
+ * This function polls the SEC Job Rings and delivers processed descriptors
+ * Each processed descriptor has a user_callback registered.
+ * This user_callback is invoked for each processed descriptor.
+ * The polling is stopped when "limit" descriptors are notified or when
+ * there are no more descriptors to notify.
+ * @note The dequeue_jr() API cannot be called from within a user_callback
+ * function
+ * @param [in]  job_ring_handle    The Job Ring handle.
+ * @param [in]  limit              This value represents the maximum number
+ *                                 of processed descriptors that can be
+ *                                 notified API call on this Job Ring.
+ *                                 Note that fewer descriptors may be notified
+ *                                 if enough processed descriptors are not
+ *                                 available.
+ *                                 If limit has a negative value, then all
+ *                                 ready descriptors will be notified.
+ *
+ * @retval :: >=0                  is returned where retval is the total
+ *                                 Number of descriptors notified
+ *                                 during this function call.
+ * @retval :: -1                   is returned in case of some error
+ */
+int dequeue_jr(void *job_ring_handle, int32_t limit);
+
+#endif /* _JR_DRIVER_H_  */
diff --git a/drivers/nxp/crypto/caam/src/auth/auth.mk b/drivers/nxp/crypto/caam/src/auth/auth.mk
new file mode 100644
index 0000000..d1f8c75
--- /dev/null
+++ b/drivers/nxp/crypto/caam/src/auth/auth.mk
@@ -0,0 +1,12 @@
+#
+# Copyright 2018-2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+
+SEC_DRIVERS_PATH	:=	drivers/nxp/crypto/caam
+
+ifeq (${TRUSTED_BOARD_BOOT},1)
+AUTH_SOURCES +=  $(wildcard $(SEC_DRIVERS_PATH)/src/auth/*.c)
+endif
diff --git a/drivers/nxp/crypto/caam/src/auth/hash.c b/drivers/nxp/crypto/caam/src/auth/hash.c
new file mode 100644
index 0000000..1665df1
--- /dev/null
+++ b/drivers/nxp/crypto/caam/src/auth/hash.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include "caam.h"
+#include <common/debug.h>
+#include <drivers/auth/crypto_mod.h>
+
+#include "hash.h"
+#include "jobdesc.h"
+#include "sec_hw_specific.h"
+
+/* Since no Allocator is available . Taking a global static ctx.
+ * This would mean that only one active ctx can be there at a time.
+ */
+
+static struct hash_ctx glbl_ctx;
+
+static void hash_done(uint32_t *desc, uint32_t status, void *arg,
+		      void *job_ring)
+{
+	INFO("Hash Desc SUCCESS with status %x\n", status);
+}
+
+/***************************************************************************
+ * Function	: hash_init
+ * Arguments	: ctx - SHA context
+ * Return	: init,
+ * Description	: This function initializes the context for SHA calculation
+ ***************************************************************************/
+int hash_init(enum hash_algo algo, void **ctx)
+{
+	if (glbl_ctx.active == false) {
+		memset(&glbl_ctx, 0, sizeof(struct hash_ctx));
+		glbl_ctx.active = true;
+		glbl_ctx.algo = algo;
+		*ctx = &glbl_ctx;
+		return 0;
+	} else {
+		return -1;
+	}
+}
+
+/***************************************************************************
+ * Function	: hash_update
+ * Arguments	: ctx - SHA context
+ *		  buffer - Data
+ *		  length - Length
+ * Return	: -1 on error
+ *		  0 on SUCCESS
+ * Description	: This function creates SG entry of the data provided
+ ***************************************************************************/
+int hash_update(enum hash_algo algo, void *context, void *data_ptr,
+		unsigned int data_len)
+{
+	struct hash_ctx *ctx = context;
+	/* MAX_SG would be MAX_SG_ENTRIES + key + hdr + sg table */
+	if (ctx->sg_num >= MAX_SG) {
+		ERROR("Reached limit for calling %s\n", __func__);
+		ctx->active = false;
+		return -EINVAL;
+
+	}
+
+	if (ctx->algo != algo) {
+		ERROR("ctx for algo not correct\n");
+		ctx->active = false;
+		return -EINVAL;
+	}
+
+#if defined(SEC_MEM_NON_COHERENT) && defined(IMAGE_BL2)
+	flush_dcache_range((uintptr_t)data_ptr, data_len);
+	dmbsy();
+#endif
+
+#ifdef CONFIG_PHYS_64BIT
+	sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_hi,
+		  (uint32_t) ((uintptr_t) data_ptr >> 32));
+#else
+	sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_hi, 0x0);
+#endif
+	sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_lo, (uintptr_t) data_ptr);
+
+	sec_out32(&ctx->sg_tbl[ctx->sg_num].len_flag,
+		  (data_len & SG_ENTRY_LENGTH_MASK));
+
+	ctx->sg_num++;
+
+	ctx->len += data_len;
+
+	return 0;
+}
+
+/***************************************************************************
+ * Function	: hash_final
+ * Arguments	: ctx - SHA context
+ * Return	: SUCCESS or FAILURE
+ * Description	: This function sets the final bit and enqueues the decriptor
+ ***************************************************************************/
+int hash_final(enum hash_algo algo, void *context, void *hash_ptr,
+	       unsigned int hash_len)
+{
+	int ret = 0;
+	struct hash_ctx *ctx = context;
+	uint32_t final = 0U;
+
+	struct job_descriptor jobdesc __aligned(CACHE_WRITEBACK_GRANULE);
+
+	jobdesc.arg = NULL;
+	jobdesc.callback = hash_done;
+
+	if (ctx->algo != algo) {
+		ERROR("ctx for algo not correct\n");
+		ctx->active = false;
+		return -EINVAL;
+	}
+
+	final = sec_in32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag) |
+	    SG_ENTRY_FINAL_BIT;
+	sec_out32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag, final);
+
+	dsb();
+
+	/* create the hw_rng descriptor */
+	cnstr_hash_jobdesc(jobdesc.desc, (uint8_t *) ctx->sg_tbl,
+			   ctx->len, hash_ptr);
+
+#if defined(SEC_MEM_NON_COHERENT) && defined(IMAGE_BL2)
+	flush_dcache_range((uintptr_t)ctx->sg_tbl,
+			   (sizeof(struct sg_entry) * MAX_SG));
+	inv_dcache_range((uintptr_t)hash_ptr, hash_len);
+
+	dmbsy();
+#endif
+
+	/* Finally, generate the requested random data bytes */
+	ret = run_descriptor_jr(&jobdesc);
+	if (ret != 0) {
+		ERROR("Error in running descriptor\n");
+		ret = -1;
+	}
+	ctx->active = false;
+	return ret;
+}
diff --git a/drivers/nxp/crypto/caam/src/auth/nxp_crypto.c b/drivers/nxp/crypto/caam/src/auth/nxp_crypto.c
new file mode 100644
index 0000000..646e981
--- /dev/null
+++ b/drivers/nxp/crypto/caam/src/auth/nxp_crypto.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <stddef.h>
+#include <string.h>
+
+#include "caam.h"
+#include <common/debug.h>
+#include <drivers/auth/crypto_mod.h>
+
+#include "hash.h"
+#include "rsa.h"
+
+#define LIB_NAME		"NXP crypto"
+
+/*
+ * Initialize the library and export the descriptor
+ */
+static void init(void)
+{
+	/* Initialize NXP crypto library`:*/
+	NOTICE("Initializing & configuring SEC block.\n");
+
+	if (config_sec_block() < 0) {
+		ERROR("Init & config failure for caam.\n");
+	}
+}
+
+/*
+ * Verify a signature.
+ *
+ * For IMG_PLAT - data points to a PKCS#1.5 encoded HASH
+ * sig_alg will be RSA or ECC
+ * Parameters are passed using the DER encoding format following the ASN.1
+ * structures detailed above.
+ */
+static int verify_signature(void *data_ptr, unsigned int data_len,
+			    void *sig_ptr, unsigned int sig_len,
+			    void *sign_alg, unsigned int sig_alg_len,
+			    void *pk_ptr, unsigned int pk_len)
+{
+	int ret = CRYPTO_SUCCESS;
+
+	enum sig_alg alg = *(enum sig_alg *)sign_alg;
+
+	switch (alg) {
+	case RSA:
+		NOTICE("Verifying RSA\n");
+		ret = rsa_verify_signature(data_ptr, data_len, sig_ptr, sig_len,
+					   pk_ptr, pk_len);
+		break;
+	case ECC:
+	default:
+		ret = CRYPTO_ERR_SIGNATURE;
+		break;
+	}
+
+	if (ret != 0) {
+		ERROR("RSA verification Failed\n");
+	}
+	return ret;
+
+}
+
+/*
+ * Match a hash
+ *
+ * Digest info is passed as a table of SHA-26 hashes and digest_info_len
+ * is number of entries in the table
+ * This implementation is very specific to the CSF header parser ROTPK
+ * comparison.
+ */
+static int verify_hash(void *data_ptr, unsigned int data_len,
+		       void *digest_info_ptr, unsigned int digest_info_len)
+{
+	void *ctx = NULL;
+	int i = 0, ret = 0;
+	enum hash_algo algo = SHA256;
+	uint8_t hash[SHA256_BYTES] __aligned(CACHE_WRITEBACK_GRANULE) = {0};
+	uint32_t digest_size = SHA256_BYTES;
+	uint8_t *hash_tbl = digest_info_ptr;
+
+	NOTICE("Verifying hash\n");
+	ret = hash_init(algo, &ctx);
+	if (ret != 0) {
+		return CRYPTO_ERR_HASH;
+	}
+
+	/* Update hash with that of SRK table */
+	ret = hash_update(algo, ctx, data_ptr, data_len);
+	if (ret != 0) {
+		return CRYPTO_ERR_HASH;
+	}
+
+	/* Copy hash at destination buffer */
+	ret = hash_final(algo, ctx, hash, digest_size);
+	if (ret != 0) {
+		return CRYPTO_ERR_HASH;
+	}
+
+	VERBOSE("%s Calculated hash\n", __func__);
+	for (i = 0; i < SHA256_BYTES/4; i++) {
+		VERBOSE("%x\n", *((uint32_t *)hash + i));
+	}
+
+	for (i = 0; i < digest_info_len; i++) {
+		if (memcmp(hash, (hash_tbl + (i * digest_size)),
+			   digest_size) == 0) {
+			return CRYPTO_SUCCESS;
+		}
+	}
+
+	return CRYPTO_ERR_HASH;
+}
+
+/*
+ * Register crypto library descriptor
+ */
+REGISTER_CRYPTO_LIB(LIB_NAME, init, verify_signature, verify_hash, NULL);
diff --git a/drivers/nxp/crypto/caam/src/auth/rsa.c b/drivers/nxp/crypto/caam/src/auth/rsa.c
new file mode 100644
index 0000000..0c44462
--- /dev/null
+++ b/drivers/nxp/crypto/caam/src/auth/rsa.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include "caam.h"
+#include <common/debug.h>
+#include <drivers/auth/crypto_mod.h>
+
+#include "jobdesc.h"
+#include "rsa.h"
+#include "sec_hw_specific.h"
+
+/* This array contains DER value for SHA-256 */
+static const uint8_t hash_identifier[] = {
+	0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60,
+	0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00,
+	0x04, 0x20
+};
+
+static void rsa_done(uint32_t *desc, uint32_t status, void *arg,
+		     void *job_ring)
+{
+	INFO("RSA Desc SUCCESS with status %x\n", status);
+}
+
+static int rsa_public_verif_sec(uint8_t *sign, uint8_t *to,
+				uint8_t *rsa_pub_key, uint32_t klen)
+{
+	int ret = 0;
+	struct rsa_context ctx __aligned(CACHE_WRITEBACK_GRANULE);
+	struct job_descriptor jobdesc __aligned(CACHE_WRITEBACK_GRANULE);
+
+	jobdesc.arg = NULL;
+	jobdesc.callback = rsa_done;
+
+	memset(&ctx, 0, sizeof(struct rsa_context));
+
+	ctx.pkin.a = sign;
+	ctx.pkin.a_siz = klen;
+	ctx.pkin.n = rsa_pub_key;
+	ctx.pkin.n_siz = klen;
+	ctx.pkin.e = rsa_pub_key + klen;
+	ctx.pkin.e_siz = klen;
+
+	cnstr_jobdesc_pkha_rsaexp(jobdesc.desc, &ctx.pkin, to, klen);
+
+#if defined(SEC_MEM_NON_COHERENT) && defined(IMAGE_BL2)
+	flush_dcache_range((uintptr_t)sign, klen);
+	flush_dcache_range((uintptr_t)rsa_pub_key, 2 * klen);
+	flush_dcache_range((uintptr_t)&ctx.pkin, sizeof(ctx.pkin));
+	inv_dcache_range((uintptr_t)to, klen);
+
+	dmbsy();
+	dsbsy();
+	isb();
+#endif
+
+	/* Finally, generate the requested random data bytes */
+	ret = run_descriptor_jr(&jobdesc);
+	if (ret != 0) {
+		ERROR("Error in running descriptor\n");
+		ret = -1;
+	}
+#if defined(SEC_MEM_NON_COHERENT) && defined(IMAGE_BL2)
+	inv_dcache_range((uintptr_t)to, klen);
+	dmbsy();
+	dsbsy();
+	isb();
+#endif
+	return ret;
+}
+
+/*
+ * Construct encoded hash EM' wrt PKCSv1.5. This function calculates the
+ * pointers for padding, DER value and hash. And finally, constructs EM'
+ * which includes hash of complete CSF header and ESBC image. If SG flag
+ * is on, hash of SG table and entries is also included.
+ */
+static int construct_img_encoded_hash_second(uint8_t *hash, uint8_t hash_len,
+					     uint8_t *encoded_hash_second,
+					     unsigned int key_len)
+{
+	/*
+	 * RSA PKCSv1.5 encoding format for encoded message is below
+	 * EM = 0x0 || 0x1 || PS || 0x0 || DER || Hash
+	 * PS is Padding String
+	 * DER is DER value for SHA-256
+	 * Hash is SHA-256 hash
+	 * *********************************************************
+	 * representative points to first byte of EM initially and is
+	 * filled with 0x0
+	 * representative is incremented by 1 and second byte is filled
+	 * with 0x1
+	 * padding points to third byte of EM
+	 * digest points to full length of EM - 32 bytes
+	 * hash_id (DER value) points to 19 bytes before pDigest
+	 * separator is one byte which separates padding and DER
+	 */
+
+	unsigned int len;
+	uint8_t *representative;
+	uint8_t *padding, *digest;
+	uint8_t *hash_id, *separator;
+	int i;
+	int ret = 0;
+
+	if (hash_len != SHA256_BYTES) {
+		return -1;
+	}
+
+	/* Key length = Modulus length */
+	len = (key_len / 2U) - 1U;
+	representative = encoded_hash_second;
+	representative[0] = 0U;
+	representative[1] = 1U;	/* block type 1 */
+
+	padding = &representative[2];
+	digest = &representative[1] + len - 32;
+	hash_id = digest - sizeof(hash_identifier);
+	separator = hash_id - 1;
+
+	/* fill padding area pointed by padding with 0xff */
+	memset(padding, 0xff, separator - padding);
+
+	/* fill byte pointed by separator */
+	*separator = 0U;
+
+	/* fill SHA-256 DER value  pointed by HashId */
+	memcpy(hash_id, hash_identifier, sizeof(hash_identifier));
+
+	/* fill hash pointed by Digest */
+	for (i = 0; i < SHA256_BYTES; i++) {
+		digest[i] = hash[i];
+	}
+
+	return ret;
+}
+
+int rsa_verify_signature(void *hash_ptr, unsigned int hash_len,
+			 void *sig_ptr, unsigned int sig_len,
+			 void *pk_ptr, unsigned int pk_len)
+{
+	uint8_t img_encoded_hash_second[RSA_4K_KEY_SZ_BYTES];
+	uint8_t encoded_hash[RSA_4K_KEY_SZ_BYTES] __aligned(CACHE_WRITEBACK_GRANULE);
+	int ret = 0;
+
+	ret = construct_img_encoded_hash_second(hash_ptr, hash_len,
+						img_encoded_hash_second,
+						pk_len);
+	if (ret != 0) {
+		ERROR("Encoded Hash Failure\n");
+		return CRYPTO_ERR_SIGNATURE;
+	}
+
+	ret = rsa_public_verif_sec(sig_ptr, encoded_hash, pk_ptr, pk_len / 2);
+	if (ret != 0) {
+		ERROR("RSA signature Failure\n");
+		return CRYPTO_ERR_SIGNATURE;
+	}
+
+	ret = memcmp(img_encoded_hash_second, encoded_hash, sig_len);
+	if (ret != 0) {
+		ERROR("Comparison Failure\n");
+		return CRYPTO_ERR_SIGNATURE;
+	}
+
+	return CRYPTO_SUCCESS;
+}
diff --git a/drivers/nxp/crypto/caam/src/caam.c b/drivers/nxp/crypto/caam/src/caam.c
new file mode 100644
index 0000000..e594f7b
--- /dev/null
+++ b/drivers/nxp/crypto/caam/src/caam.c
@@ -0,0 +1,339 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include "caam.h"
+#include <common/debug.h>
+#include "jobdesc.h"
+#include "sec_hw_specific.h"
+
+static uintptr_t g_nxp_caam_addr;
+static void *job_ring;
+
+uintptr_t get_caam_addr(void)
+{
+	if (g_nxp_caam_addr == 0) {
+		ERROR("Sec Init is not done.\n");
+		panic();
+	}
+	return g_nxp_caam_addr;
+}
+
+/* This function sets the TZ bit for the Job ring number passed as @num */
+static void config_tz(int num)
+{
+	uint32_t jricid;
+
+	/* Setting TZ bit of job ring */
+	switch (num) {
+	case 0:
+		jricid = sec_in32(g_nxp_caam_addr + SEC_REG_JR0ICIDR_MS_OFFSET);
+		sec_out32(g_nxp_caam_addr + SEC_REG_JR0ICIDR_MS_OFFSET,
+			  jricid | JRICID_MS_TZ);
+		break;
+	case 1:
+		jricid = sec_in32(g_nxp_caam_addr + SEC_REG_JR1ICIDR_MS_OFFSET);
+		sec_out32(g_nxp_caam_addr + SEC_REG_JR1ICIDR_MS_OFFSET,
+			  jricid | JRICID_MS_TZ);
+		break;
+	case 2:
+		jricid = sec_in32(g_nxp_caam_addr + SEC_REG_JR2ICIDR_MS_OFFSET);
+		sec_out32(g_nxp_caam_addr + SEC_REG_JR2ICIDR_MS_OFFSET,
+			  jricid | JRICID_MS_TZ);
+		break;
+	case 3:
+		jricid = sec_in32(g_nxp_caam_addr + SEC_REG_JR3ICIDR_MS_OFFSET);
+		sec_out32(g_nxp_caam_addr + SEC_REG_JR3ICIDR_MS_OFFSET,
+			  jricid | JRICID_MS_TZ);
+		break;
+	default:
+		break;
+	}
+}
+
+/* This function checks if Virtualization is enabled for JR and
+ * accordingly sets the bot for starting JR<num> in JRSTARTR register
+ */
+static inline void start_jr(int num)
+{
+	uint32_t ctpr = sec_in32((g_nxp_caam_addr + SEC_REG_CTPR_MS_OFFSET));
+	uint32_t tmp = sec_in32((g_nxp_caam_addr + SEC_REG_JRSTARTR_OFFSET));
+	uint32_t scfgr = sec_in32((g_nxp_caam_addr + SEC_REG_SCFGR_OFFSET));
+	bool start = false;
+
+	if ((ctpr & CTPR_VIRT_EN_INC) != 0U) {
+		if (((ctpr & CTPR_VIRT_EN_POR) != 0U) ||
+		    ((scfgr & SCFGR_VIRT_EN) != 0U)) {
+			start = true;
+		}
+	} else {
+		if ((ctpr & CTPR_VIRT_EN_POR) != 0U) {
+			start = true;
+		}
+	}
+
+	if (start == true) {
+		switch (num) {
+		case 0:
+			tmp |= JRSTARTR_STARTJR0;
+			break;
+		case 1:
+			tmp |= JRSTARTR_STARTJR1;
+			break;
+		case 2:
+			tmp |= JRSTARTR_STARTJR2;
+			break;
+		case 3:
+			tmp |= JRSTARTR_STARTJR3;
+			break;
+		default:
+			break;
+		}
+	}
+	sec_out32((g_nxp_caam_addr + SEC_REG_JRSTARTR_OFFSET), tmp);
+}
+
+/* This functions configures the Job Ring
+ * JR3 is reserved for use by Secure world
+ */
+static int configure_jr(int num)
+{
+	int ret;
+	void *reg_base_addr;
+
+	switch (num) {
+	case 0:
+		reg_base_addr = (void *)(g_nxp_caam_addr + CAAM_JR0_OFFSET);
+		break;
+	case 1:
+		reg_base_addr = (void *)(g_nxp_caam_addr + CAAM_JR1_OFFSET);
+		break;
+	case 2:
+		reg_base_addr = (void *)(g_nxp_caam_addr + CAAM_JR2_OFFSET);
+		break;
+	case 3:
+		reg_base_addr = (void *)(g_nxp_caam_addr + CAAM_JR3_OFFSET);
+		break;
+	default:
+		break;
+	}
+
+	/* Initialize the JR library */
+	ret = sec_jr_lib_init();
+	if (ret != 0) {
+		ERROR("Error in sec_jr_lib_init");
+		return -1;
+	}
+
+	start_jr(num);
+
+	/* Do HW configuration of the JR */
+	job_ring = init_job_ring(SEC_NOTIFICATION_TYPE_POLL, 0, 0,
+				 reg_base_addr, 0);
+
+	if (job_ring == NULL) {
+		ERROR("Error in init_job_ring");
+		return -1;
+	}
+
+	return ret;
+}
+
+/* TBD - Configures and locks the ICID values for various JR */
+static inline void configure_icid(void)
+{
+}
+
+/* TBD configures the TZ settings of RTIC */
+static inline void configure_rtic(void)
+{
+}
+
+int sec_init(uintptr_t nxp_caam_addr)
+{
+	g_nxp_caam_addr = nxp_caam_addr;
+	return config_sec_block();
+}
+
+/* This function configure SEC block:
+ * - It does basic parameter setting
+ * - Configures the default Job ring assigned to TZ /secure world
+ * - Instantiates the RNG
+ */
+int config_sec_block(void)
+{
+	int ret = 0;
+	uint32_t mcfgr;
+
+	if (g_nxp_caam_addr == 0) {
+		ERROR("Sec Init is not done.\n");
+		return -1;
+	} else if (job_ring != NULL) {
+		NOTICE("Sec is already initialized and configured.\n");
+		return ret;
+	}
+
+	mcfgr = sec_in32(g_nxp_caam_addr + SEC_REG_MCFGR_OFFSET);
+
+	/* Modify CAAM Read/Write attributes
+	 * AXI Write - Cacheable, WB and WA
+	 * AXI Read - Cacheable, RA
+	 */
+#if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS2088A)
+	mcfgr = (mcfgr & ~MCFGR_AWCACHE_MASK) | (0xb << MCFGR_AWCACHE_SHIFT);
+	mcfgr = (mcfgr & ~MCFGR_ARCACHE_MASK) | (0x6 << MCFGR_ARCACHE_SHIFT);
+#else
+	mcfgr = (mcfgr & ~MCFGR_AWCACHE_MASK) | (0x2 << MCFGR_AWCACHE_SHIFT);
+#endif
+
+	/* Set PS bit to 1 */
+#ifdef CONFIG_PHYS_64BIT
+	mcfgr |= (1 << MCFGR_PS_SHIFT);
+#endif
+	sec_out32(g_nxp_caam_addr + SEC_REG_MCFGR_OFFSET, mcfgr);
+
+	/* Asssign ICID to all Job rings and lock them for usage */
+	configure_icid();
+
+	/* Configure the RTIC */
+	configure_rtic();
+
+	/* Configure the default JR for usage */
+	ret = configure_jr(DEFAULT_JR);
+	if (ret != 0) {
+		ERROR("\nFSL_JR: configuration failure\n");
+		return -1;
+	}
+	/* Do TZ configuration of default JR for sec firmware */
+	config_tz(DEFAULT_JR);
+
+#ifdef CONFIG_RNG_INIT
+	/* Instantiate the RNG */
+	ret = hw_rng_instantiate();
+	if (ret != 0) {
+		ERROR("\nRNG Instantiation failure\n");
+		return -1;
+	}
+#endif
+
+	return ret;
+}
+
+/* This function is used for sumbitting job to the Job Ring
+ * [param] [in] - jobdesc to be submitted
+ * Return - -1 in case of error and 0 in case of SUCCESS
+ */
+int run_descriptor_jr(struct job_descriptor *jobdesc)
+{
+	int i = 0, ret = 0;
+	uint32_t *desc_addr = jobdesc->desc;
+	uint32_t desc_len = desc_length(jobdesc->desc);
+	uint32_t desc_word;
+
+	for (i = 0; i < desc_len; i++) {
+		desc_word = desc_addr[i];
+		VERBOSE("%x\n", desc_word);
+		sec_out32((uint32_t *)&desc_addr[i], desc_word);
+	}
+	dsb();
+
+#if defined(SEC_MEM_NON_COHERENT) && defined(IMAGE_BL2)
+	flush_dcache_range((uintptr_t)desc_addr, desc_len * 4);
+	dmbsy();
+	dsbsy();
+	isb();
+#endif
+
+	ret = enq_jr_desc(job_ring, jobdesc);
+	if (ret == 0) {
+		VERBOSE("JR enqueue done...\n");
+	} else {
+		ERROR("Error in Enqueue\n");
+		return ret;
+	}
+
+	VERBOSE("Dequeue in progress");
+
+	ret = dequeue_jr(job_ring, -1);
+	if (ret >= 0) {
+		VERBOSE("Dequeue of %x desc success\n", ret);
+		ret = 0;
+	} else {
+		ERROR("deq_ret %x\n", ret);
+		ret = -1;
+	}
+
+	return ret;
+}
+
+/* this function returns a random number using HW RNG Algo
+ * In case of failure, random number returned is 0
+ * prngWidth = 0 - 32 bit random number
+ * prngWidth > 0 means 64 bit random number
+ */
+unsigned long long get_random(int rngWidth)
+{
+	unsigned long long result = 0;
+	uint8_t rand_byte[64] __aligned(CACHE_WRITEBACK_GRANULE);
+	uint8_t rand_byte_swp[8];
+	int bytes = 0;
+	int i = 0;
+	int ret = 0;
+
+#ifdef CAAM_TEST
+	rand_byte[0] = U(0x12);
+	rand_byte[1] = U(0x34);
+	rand_byte[2] = U(0x56);
+	rand_byte[3] = U(0x78);
+	rand_byte[4] = U(0x9a);
+	rand_byte[5] = U(0xbc);
+	rand_byte[6] = U(0xde);
+	rand_byte[7] = U(0xf1);
+#endif
+
+	if (rngWidth == 0U) {
+		bytes = 4;
+	} else {
+		bytes = 8;
+	}
+
+	memset(rand_byte, 0, 64);
+
+	ret = get_rand_bytes_hw(rand_byte, bytes);
+
+	for (i = 0; i < bytes; i++) {
+		if (ret != 0) {
+			/* Return 0 in case of failure */
+			rand_byte_swp[i] = 0;
+		} else {
+			rand_byte_swp[i] = rand_byte[bytes - i - 1];
+			result = (result << 8) | rand_byte_swp[i];
+		}
+	}
+
+	INFO("result %llx\n", result);
+
+	return result;
+
+} /* _get_RNG() */
+
+unsigned int _get_hw_unq_key(uint64_t hw_key_phy_addr, unsigned int size)
+{
+	int ret = 0;
+	uint8_t *hw_key = (uint8_t *) ptov((phys_addr_t *) hw_key_phy_addr);
+
+	ret = get_hw_unq_key_blob_hw(hw_key, size);
+
+	return ret;
+}
diff --git a/drivers/nxp/crypto/caam/src/hw_key_blob.c b/drivers/nxp/crypto/caam/src/hw_key_blob.c
new file mode 100644
index 0000000..0720695
--- /dev/null
+++ b/drivers/nxp/crypto/caam/src/hw_key_blob.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "caam.h"
+#include <common/debug.h>
+#include "jobdesc.h"
+#include "sec_hw_specific.h"
+
+
+/* Callback function after Instantiation decsriptor is submitted to SEC
+ */
+static void blob_done(uint32_t *desc, uint32_t status, void *arg,
+		      void *job_ring)
+{
+	INFO("Blob Desc SUCCESS with status %x\n", status);
+}
+
+/* @brief Submit descriptor to create blob
+ * @retval 0 on success
+ * @retval -1 on error
+ */
+int get_hw_unq_key_blob_hw(uint8_t *hw_key, int size)
+{
+	int ret = 0;
+	int i = 0;
+
+	uint32_t key_sz = KEY_IDNFR_SZ_BYTES;
+	uint8_t key_data[KEY_IDNFR_SZ_BYTES];
+	uint8_t in_data[16];
+	uint8_t out_data[16 + KEY_BLOB_SIZE + MAC_SIZE];
+	struct job_descriptor desc __aligned(CACHE_WRITEBACK_GRANULE);
+	struct job_descriptor *jobdesc = &desc;
+	uint32_t in_sz = 16U;
+
+	/* Output blob will have 32 bytes key blob in beginning and
+	 * 16 byte HMAC identifier at end of data blob
+	 */
+	uint32_t out_sz = in_sz + KEY_BLOB_SIZE + MAC_SIZE;
+
+	uint32_t operation = CMD_OPERATION | OP_TYPE_ENCAP_PROTOCOL |
+	    OP_PCLID_BLOB | BLOB_PROTO_INFO;
+
+	memset(key_data, 0xff, KEY_IDNFR_SZ_BYTES);
+	memset(in_data, 0x00, in_sz);
+	memset(out_data, 0x00, in_sz);
+
+	jobdesc->arg = NULL;
+	jobdesc->callback = blob_done;
+
+	INFO("\nGenerating Master Key Verification Blob.\n");
+
+	/* Create the hw_rng descriptor */
+	ret = cnstr_hw_encap_blob_jobdesc(jobdesc->desc, key_data, key_sz,
+					  CLASS_2, in_data, in_sz, out_data,
+					  out_sz, operation);
+
+	/* Finally, generate the blob. */
+	ret = run_descriptor_jr(jobdesc);
+	if (ret != 0) {
+		ERROR("Error in running hw unq key blob descriptor\n");
+		return -1;
+	}
+	/* Copying alternate bytes of the Master Key Verification Blob.
+	 */
+	for (i = 0; i < size; i++) {
+		hw_key[i] = out_data[2 * i];
+	}
+
+	return ret;
+}
diff --git a/drivers/nxp/crypto/caam/src/jobdesc.c b/drivers/nxp/crypto/caam/src/jobdesc.c
new file mode 100644
index 0000000..9c235af
--- /dev/null
+++ b/drivers/nxp/crypto/caam/src/jobdesc.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2017-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "caam.h"
+#include <common/debug.h>
+#include "jobdesc.h"
+#include "rsa.h"
+#include "sec_hw_specific.h"
+
+
+/* Return Length of desctiptr from first word */
+uint32_t desc_length(uint32_t *desc)
+{
+	return desc[0] & DESC_LEN_MASK;
+}
+
+/*Update start index in first word of descriptor */
+void desc_update_start_index(uint32_t *desc, uint32_t index)
+{
+	desc[0] |= (index << DESC_START_SHIFT);
+}
+
+/* Initialize the descriptor */
+void desc_init(uint32_t *desc)
+{
+	*desc = 0;
+}
+
+/* Add word in the descriptor and increment the length */
+void desc_add_word(uint32_t *desc, uint32_t word)
+{
+	uint32_t len = desc_length(desc);
+
+	/* Add Word at Last */
+	uint32_t *last = desc + len;
+	*last = word;
+
+	/* Increase the length */
+	desc[0] += 1;
+}
+
+/* Add Pointer to the descriptor */
+void desc_add_ptr(uint32_t *desc, phys_addr_t *ptr)
+{
+	uint32_t len = desc_length(desc);
+
+	/* Add Word at Last */
+	phys_addr_t *last = (phys_addr_t *) (desc + len);
+
+#ifdef CONFIG_PHYS_64BIT
+	ptr_addr_t *ptr_addr = (ptr_addr_t *) last;
+
+	ptr_addr->m_halves.high = PHYS_ADDR_HI(ptr);
+	ptr_addr->m_halves.low = PHYS_ADDR_LO(ptr);
+#else
+	*last = ptr;
+#endif
+
+	/* Increase the length */
+	desc[0] += (uint32_t) (sizeof(phys_addr_t) / sizeof(uint32_t));
+}
+
+/* Descriptor to generate Random words */
+int cnstr_rng_jobdesc(uint32_t *desc, uint32_t state_handle,
+		      uint32_t *add_inp, uint32_t add_ip_len,
+		      uint8_t *out_data, uint32_t len)
+{
+	phys_addr_t *phys_addr_out = vtop(out_data);
+
+	/* Current descriptor support only 64K length */
+	if (len > U(0xffff))
+		return -1;
+	/* Additional Input not supported by current descriptor */
+	if (add_ip_len > 0U)
+		return -1;
+
+	VERBOSE("Constructing descriptor\n");
+	desc_init(desc);
+	/* Class1 Alg Operation,RNG Optype, Generate */
+	desc_add_word(desc, U(0xb0800000));
+	desc_add_word(desc, U(0x82500000) | (state_handle << ALG_AAI_SH_SHIFT));
+	desc_add_word(desc, U(0x60340000) | len);
+	desc_add_ptr(desc, phys_addr_out);
+
+	return 0;
+
+}
+
+/* Construct descriptor to instantiate RNG */
+int cnstr_rng_instantiate_jobdesc(uint32_t *desc)
+{
+	desc_init(desc);
+	desc_add_word(desc, U(0xb0800000));
+	/* Class1 Alg Operation,RNG Optype, Instantiate */
+	desc_add_word(desc, U(0x82500004));
+	/* Wait for done */
+	desc_add_word(desc, U(0xa2000001));
+	/*Load to clear written */
+	desc_add_word(desc, U(0x10880004));
+	/*Pri Mode Reg clear */
+	desc_add_word(desc, U(0x00000001));
+	/* Generate secure keys */
+	desc_add_word(desc, U(0x82501000));
+
+	return 0;
+}
+
+/* Construct descriptor to generate hw key blob */
+int cnstr_hw_encap_blob_jobdesc(uint32_t *desc,
+				uint8_t *key_idnfr, uint32_t key_sz,
+				uint32_t key_class, uint8_t *plain_txt,
+				uint32_t in_sz, uint8_t *enc_blob,
+				uint32_t out_sz, uint32_t operation)
+{
+	phys_addr_t *phys_key_idnfr, *phys_addr_in, *phys_addr_out;
+	int i = 0;
+
+	phys_key_idnfr = vtop((void *)key_idnfr);
+	phys_addr_in = vtop((void *)plain_txt);
+	phys_addr_out = vtop((void *)enc_blob);
+
+	desc_init(desc);
+
+	desc_add_word(desc, U(0xb0800000));
+
+	/* Key Identifier */
+	desc_add_word(desc, (key_class | key_sz));
+	desc_add_ptr(desc, phys_key_idnfr);
+
+	/* Source Address */
+	desc_add_word(desc, U(0xf0400000));
+	desc_add_ptr(desc, phys_addr_in);
+
+	/* In Size = 0x10 */
+	desc_add_word(desc, in_sz);
+
+	/* Out Address */
+	desc_add_word(desc, U(0xf8400000));
+	desc_add_ptr(desc, phys_addr_out);
+
+	/* Out Size = 0x10 */
+	desc_add_word(desc, out_sz);
+
+	/* Operation */
+	desc_add_word(desc, operation);
+
+	for (i = 0; i < 15; i++)
+		VERBOSE("desc word %x\n", desc[i]);
+
+	return 0;
+}
+
+/***************************************************************************
+ * Function	: inline_cnstr_jobdesc_pkha_rsaexp
+ * Arguments	: desc - Pointer to Descriptor
+ *		  pkin - Pointer to Input Params
+ *		  out - Pointer to Output
+ *		  out_siz - Output Size
+ * Return	: Void
+ * Description	: Creates the descriptor for PKHA RSA
+ ***************************************************************************/
+void cnstr_jobdesc_pkha_rsaexp(uint32_t *desc,
+			       struct pk_in_params *pkin, uint8_t *out,
+			       uint32_t out_siz)
+{
+	phys_addr_t *ptr_addr_e, *ptr_addr_a, *ptr_addr_n, *ptr_addr_out;
+
+	ptr_addr_e = vtop((void *)(pkin->e));
+	ptr_addr_a = vtop((void *)(pkin->a));
+	ptr_addr_n = vtop((void *)(pkin->n));
+	ptr_addr_out = vtop((void *)(out));
+
+	desc_init(desc);
+	desc_add_word(desc, U(0xb0800000));
+	desc_add_word(desc, U(0x02010000) | pkin->e_siz);
+	desc_add_ptr(desc, ptr_addr_e);
+	desc_add_word(desc, U(0x220c0000) | pkin->a_siz);
+	desc_add_ptr(desc, ptr_addr_a);
+	desc_add_word(desc, U(0x22080000) | pkin->n_siz);
+	desc_add_ptr(desc, ptr_addr_n);
+	desc_add_word(desc, U(0x81800006));
+	desc_add_word(desc, U(0x620d0000) | out_siz);
+	desc_add_ptr(desc, ptr_addr_out);
+}
+
+/***************************************************************************
+ * Function	: inline_cnstr_jobdesc_sha256
+ * Arguments	: desc - Pointer to Descriptor
+ *		  msg - Pointer to SG Table
+ *		  msgsz - Size of SG Table
+ *		  digest - Pointer to Output Digest
+ * Return	: Void
+ * Description	: Creates the descriptor for SHA256 HASH calculation
+ ***************************************************************************/
+void cnstr_hash_jobdesc(uint32_t *desc, uint8_t *msg, uint32_t msgsz,
+			uint8_t *digest)
+{
+	/* SHA 256 , output is of length 32 words */
+	phys_addr_t *ptr_addr_in, *ptr_addr_out;
+
+	ptr_addr_in = (void *)vtop(msg);
+	ptr_addr_out = (void *)vtop(digest);
+
+	desc_init(desc);
+	desc_add_word(desc, U(0xb0800000));
+
+	/* Operation Command
+	 * OP_TYPE_CLASS2_ALG | OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HASH |
+	 * OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT | OP_ALG_ICV_OFF)
+	 */
+	desc_add_word(desc, U(0x8443000d));
+
+	if (msgsz > U(0xffff)) {
+		desc_add_word(desc, U(0x25540000));	/* FIFO Load */
+		desc_add_ptr(desc, ptr_addr_in);	/* Pointer to msg */
+		desc_add_word(desc, msgsz);	/* Size */
+		desc_add_word(desc, U(0x54200020));	/* FIFO Store */
+		desc_add_ptr(desc, ptr_addr_out);	/* Pointer to Result */
+	} else {
+		desc_add_word(desc, U(0x25140000) | msgsz);
+		desc_add_ptr(desc, ptr_addr_in);
+		desc_add_word(desc, U(0x54200020));
+		desc_add_ptr(desc, ptr_addr_out);
+	}
+
+}
diff --git a/drivers/nxp/crypto/caam/src/rng.c b/drivers/nxp/crypto/caam/src/rng.c
new file mode 100644
index 0000000..0b9d87d
--- /dev/null
+++ b/drivers/nxp/crypto/caam/src/rng.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <arch_helpers.h>
+#include "caam.h"
+#include <common/debug.h>
+#include "jobdesc.h"
+#include "sec_hw_specific.h"
+
+
+/* Callback function after Instantiation decsriptor is submitted to SEC */
+static void rng_done(uint32_t *desc, uint32_t status, void *arg,
+		     void *job_ring)
+{
+	INFO("RNG Desc SUCCESS with status %x\n", status);
+}
+
+/* Is the HW RNG instantiated?
+ * Return code:
+ * 0 - Not in the instantiated state
+ * 1 - In the instantiated state
+ * state_handle - 0 for SH0, 1 for SH1
+ */
+static int is_hw_rng_instantiated(uint32_t *state_handle)
+{
+	int ret_code = 0;
+	uint32_t rdsta;
+
+	rdsta = sec_in32(get_caam_addr() + RNG_REG_RDSTA_OFFSET);
+
+	 /*Check if either of the two state handles has been instantiated */
+	if (rdsta & RNG_STATE0_HANDLE_INSTANTIATED) {
+		*state_handle = 0;
+		ret_code = 1;
+	} else if (rdsta & RNG_STATE0_HANDLE_INSTANTIATED) {
+		*state_handle = 1;
+		ret_code = 1;
+	}
+
+	return ret_code;
+}
+
+/* @brief Kick the TRNG block of the RNG HW Engine
+ * @param [in] ent_delay       Entropy delay to be used
+ *        By default, the TRNG runs for 200 clocks per sample;
+ *        1200 clocks per sample generates better entropy.
+ * @retval 0 on success
+ * @retval -1 on error
+ */
+static void kick_trng(int ent_delay)
+{
+	uint32_t val;
+
+	/* put RNG4 into program mode */
+	val = sec_in32(get_caam_addr() + RNG_REG_RTMCTL_OFFSET);
+	val = val | RTMCTL_PRGM;
+	sec_out32(get_caam_addr() + RNG_REG_RTMCTL_OFFSET, val);
+
+	/* rtsdctl bits 0-15 contain "Entropy Delay, which defines the
+	 *  length (in system clocks) of each Entropy sample taken
+	 */
+	val = sec_in32(get_caam_addr() + RNG_REG_RTSDCTL_OFFSET);
+	val = (val & ~RTSDCTL_ENT_DLY_MASK) |
+	    (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
+	sec_out32(get_caam_addr() + RNG_REG_RTSDCTL_OFFSET, val);
+	/* min. freq. count, equal to 1/4 of the entropy sample length */
+	sec_out32(get_caam_addr() + RNG_REG_RTFRQMIN_OFFSET, ent_delay >> 2);
+	/* disable maximum frequency count */
+	sec_out32(get_caam_addr() + RNG_REG_RTFRQMAX_OFFSET, RTFRQMAX_DISABLE);
+
+	/* select raw sampling in both entropy shifter
+	 *  and statistical checker
+	 */
+	val = sec_in32(get_caam_addr() + RNG_REG_RTMCTL_OFFSET);
+	val = val | RTMCTL_SAMP_MODE_RAW_ES_SC;
+	sec_out32(get_caam_addr() + RNG_REG_RTMCTL_OFFSET, val);
+
+	/* put RNG4 into run mode */
+	val = sec_in32(get_caam_addr() + RNG_REG_RTMCTL_OFFSET);
+	val = val & ~RTMCTL_PRGM;
+	sec_out32(get_caam_addr() + RNG_REG_RTMCTL_OFFSET, val);
+}
+
+/* @brief Submit descriptor to instantiate the RNG
+ * @retval 0 on success
+ * @retval -1 on error
+ */
+static int instantiate_rng(void)
+{
+	int ret = 0;
+	struct job_descriptor desc __aligned(CACHE_WRITEBACK_GRANULE);
+	struct job_descriptor *jobdesc = &desc;
+
+	jobdesc->arg = NULL;
+	jobdesc->callback = rng_done;
+
+	/* create the hw_rng descriptor */
+	cnstr_rng_instantiate_jobdesc(jobdesc->desc);
+
+	/* Finally, generate the requested random data bytes */
+	ret = run_descriptor_jr(jobdesc);
+	if (ret != 0) {
+		ERROR("Error in running descriptor\n");
+		ret = -1;
+	}
+	return ret;
+}
+
+/* Generate Random Data using HW RNG
+ * Parameters:
+ * uint8_t* add_input   - user specified optional input byte array
+ * uint32_t add_input_len - number of bytes of additional input
+ * uint8_t* out                   - user specified output byte array
+ * uint32_t out_len       - number of bytes to store in output byte array
+ * Return code:
+ * 0 - SUCCESS
+ * -1 - ERROR
+ */
+static int
+hw_rng_generate(uint32_t *add_input, uint32_t add_input_len,
+		uint8_t *out, uint32_t out_len, uint32_t state_handle)
+{
+	int ret = 0;
+	struct job_descriptor desc __aligned(CACHE_WRITEBACK_GRANULE);
+	struct job_descriptor *jobdesc = &desc;
+
+	jobdesc->arg = NULL;
+	jobdesc->callback = rng_done;
+
+#if defined(SEC_MEM_NON_COHERENT) && defined(IMAGE_BL2)
+	inv_dcache_range((uintptr_t)out, out_len);
+	dmbsy();
+#endif
+
+	/* create the hw_rng descriptor */
+	ret = cnstr_rng_jobdesc(jobdesc->desc, state_handle,
+				add_input, add_input_len, out, out_len);
+	if (ret != 0) {
+		ERROR("Descriptor construction failed\n");
+		ret = -1;
+		goto out;
+	}
+	/* Finally, generate the requested random data bytes */
+	ret = run_descriptor_jr(jobdesc);
+	if (ret != 0) {
+		ERROR("Error in running descriptor\n");
+		ret = -1;
+	}
+
+out:
+	return ret;
+}
+
+/* this function instantiates the rng
+ *
+ * Return code:
+ *  0 - All is well
+ * <0 - Error occurred somewhere
+ */
+int hw_rng_instantiate(void)
+{
+	int ret = 0;
+	int ent_delay = RTSDCTL_ENT_DLY_MIN;
+	uint32_t state_handle;
+
+	ret = is_hw_rng_instantiated(&state_handle);
+	if (ret != 0) {
+		NOTICE("RNG already instantiated\n");
+		return 0;
+	}
+	do {
+		kick_trng(ent_delay);
+		ent_delay += 400;
+		/*if instantiate_rng(...) fails, the loop will rerun
+		 *and the kick_trng(...) function will modify the
+		 *upper and lower limits of the entropy sampling
+		 *interval, leading to a sucessful initialization of
+		 */
+		ret = instantiate_rng();
+	} while ((ret == -1) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
+	if (ret != 0) {
+		ERROR("RNG: Failed to instantiate RNG\n");
+		return ret;
+	}
+
+	NOTICE("RNG: INSTANTIATED\n");
+
+	/* Enable RDB bit so that RNG works faster */
+	// sec_setbits32(&sec->scfgr, SEC_SCFGR_RDBENABLE);
+
+	return ret;
+}
+
+/* Generate random bytes, and stuff them into the bytes buffer
+ *
+ * If the HW RNG has not already been instantiated,
+ *  it will be instantiated before data is generated.
+ *
+ * Parameters:
+ * uint8_t* bytes  - byte buffer large enough to hold the requested random date
+ * int byte_len - number of random bytes to generate
+ *
+ * Return code:
+ *  0 - All is well
+ *  ~0 - Error occurred somewhere
+ */
+int get_rand_bytes_hw(uint8_t *bytes, int byte_len)
+{
+	int ret_code = 0;
+	uint32_t state_handle;
+
+	/* If this is the first time this routine is called,
+	 *  then the hash_drbg will not already be instantiated.
+	 * Therefore, before generating data, instantiate the hash_drbg
+	 */
+	ret_code = is_hw_rng_instantiated(&state_handle);
+	if (ret_code == 0) {
+		INFO("Instantiating the HW RNG\n");
+
+		/* Instantiate the hw RNG */
+		ret_code = hw_rng_instantiate();
+		if (ret_code != 0) {
+			ERROR("HW RNG Instantiate failed\n");
+			return ret_code;
+		}
+	}
+	/* If  HW RNG is still not instantiated, something must have gone wrong,
+	 * it must be in the error state, we will not generate any random data
+	 */
+	if (is_hw_rng_instantiated(&state_handle) == 0) {
+		ERROR("HW RNG is in an Error state, and cannot be used\n");
+		return -1;
+	}
+	/* Generate a random 256-bit value, as 32 bytes */
+	ret_code = hw_rng_generate(0, 0, bytes, byte_len, state_handle);
+	if (ret_code != 0) {
+		ERROR("HW RNG Generate failed\n");
+		return ret_code;
+	}
+
+	return ret_code;
+}
diff --git a/drivers/nxp/crypto/caam/src/sec_hw_specific.c b/drivers/nxp/crypto/caam/src/sec_hw_specific.c
new file mode 100644
index 0000000..92b7762
--- /dev/null
+++ b/drivers/nxp/crypto/caam/src/sec_hw_specific.c
@@ -0,0 +1,635 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <arch_helpers.h>
+#include "caam.h"
+#include <common/debug.h>
+#include "jobdesc.h"
+#include "sec_hw_specific.h"
+
+
+/* Job rings used for communication with SEC HW */
+extern struct sec_job_ring_t g_job_rings[MAX_SEC_JOB_RINGS];
+
+/* The current state of SEC user space driver */
+extern volatile sec_driver_state_t g_driver_state;
+
+/* The number of job rings used by SEC user space driver */
+extern int g_job_rings_no;
+
+/* LOCAL FUNCTIONS */
+static inline void hw_set_input_ring_start_addr(struct jobring_regs *regs,
+						phys_addr_t *start_addr)
+{
+#if defined(CONFIG_PHYS_64BIT)
+	sec_out32(&regs->irba_h, PHYS_ADDR_HI(start_addr));
+#else
+	sec_out32(&regs->irba_h, 0);
+#endif
+	sec_out32(&regs->irba_l, PHYS_ADDR_LO(start_addr));
+}
+
+static inline void hw_set_output_ring_start_addr(struct jobring_regs *regs,
+						 phys_addr_t *start_addr)
+{
+#if defined(CONFIG_PHYS_64BIT)
+	sec_out32(&regs->orba_h, PHYS_ADDR_HI(start_addr));
+#else
+	sec_out32(&regs->orba_h, 0);
+#endif
+	sec_out32(&regs->orba_l, PHYS_ADDR_LO(start_addr));
+}
+
+/* ORJR - Output Ring Jobs Removed Register shows how many jobs were
+ * removed from the Output Ring for processing by software. This is done after
+ * the software has processed the entries.
+ */
+static inline void hw_remove_entries(sec_job_ring_t *jr, int num)
+{
+	struct jobring_regs *regs =
+	    (struct jobring_regs *)jr->register_base_addr;
+
+	sec_out32(&regs->orjr, num);
+}
+
+/* IRSA - Input Ring Slots Available register holds the number of entries in
+ * the Job Ring's input ring. Once a job is enqueued, the value returned is
+ * decremented by the hardware by the number of jobs enqueued.
+ */
+static inline int hw_get_available_slots(sec_job_ring_t *jr)
+{
+	struct jobring_regs *regs =
+	    (struct jobring_regs *)jr->register_base_addr;
+
+	return sec_in32(&regs->irsa);
+}
+
+/* ORSFR - Output Ring Slots Full register holds the number of jobs which were
+ * processed by the SEC and can be retrieved by the software. Once a job has
+ * been processed by software, the user will call hw_remove_one_entry in order
+ * to notify the SEC that the entry was processed
+ */
+static inline int hw_get_no_finished_jobs(sec_job_ring_t *jr)
+{
+	struct jobring_regs *regs =
+	    (struct jobring_regs *)jr->register_base_addr;
+
+	return sec_in32(&regs->orsf);
+}
+
+/* @brief Process Jump Halt Condition related errors
+ * @param [in]  error_code The error code in the descriptor status word
+ */
+static inline void hw_handle_jmp_halt_cond_err(union hw_error_code error_code)
+{
+	ERROR("JMP %x\n", error_code.error_desc.jmp_halt_cond_src.jmp);
+	ERROR("Descriptor Index: %d\n",
+	      error_code.error_desc.jmp_halt_cond_src.desc_idx);
+	ERROR(" Condition %x\n", error_code.error_desc.jmp_halt_cond_src.cond);
+}
+
+/* @brief Process DECO related errors
+ * @param [in]  error_code      The error code in the descriptor status word
+ */
+static inline void hw_handle_deco_err(union hw_error_code error_code)
+{
+	ERROR("JMP %x\n", error_code.error_desc.deco_src.jmp);
+	ERROR("Descriptor Index: 0x%x",
+	      error_code.error_desc.deco_src.desc_idx);
+
+	switch (error_code.error_desc.deco_src.desc_err) {
+	case SEC_HW_ERR_DECO_HFN_THRESHOLD:
+		WARN(" Descriptor completed but exceeds the Threshold");
+		break;
+	default:
+		ERROR("Error 0x%04x not implemented",
+		      error_code.error_desc.deco_src.desc_err);
+		break;
+	}
+}
+
+/* @brief Process  Jump Halt User Status related errors
+ * @param [in]  error_code      The error code in the descriptor status word
+ */
+static inline void hw_handle_jmp_halt_user_err(union hw_error_code error_code)
+{
+	WARN(" Not implemented");
+}
+
+/* @brief Process CCB related errors
+ * @param [in]  error_code      The error code in the descriptor status word
+ */
+static inline void hw_handle_ccb_err(union hw_error_code hw_error_code)
+{
+	WARN(" Not implemented");
+}
+
+/* @brief Process Job Ring related errors
+ * @param [in]  error_code      The error code in the descriptor status word
+ */
+static inline void hw_handle_jr_err(union hw_error_code hw_error_code)
+{
+	WARN(" Not implemented");
+}
+
+/* GLOBAL FUNCTIONS */
+
+int hw_reset_job_ring(sec_job_ring_t *job_ring)
+{
+	int ret = 0;
+	struct jobring_regs *regs =
+	    (struct jobring_regs *)job_ring->register_base_addr;
+
+	/* First reset the job ring in hw */
+	ret = hw_shutdown_job_ring(job_ring);
+	if (ret != 0) {
+		ERROR("Failed resetting job ring in hardware");
+		return ret;
+	}
+	/* In order to have the HW JR in a workable state
+	 *after a reset, I need to re-write the input
+	 * queue size, input start address, output queue
+	 * size and output start address
+	 * Write the JR input queue size to the HW register
+	 */
+	sec_out32(&regs->irs, SEC_JOB_RING_SIZE);
+
+	/* Write the JR output queue size to the HW register */
+	sec_out32(&regs->ors, SEC_JOB_RING_SIZE);
+
+	/* Write the JR input queue start address */
+	hw_set_input_ring_start_addr(regs, vtop(job_ring->input_ring));
+
+	/* Write the JR output queue start address */
+	hw_set_output_ring_start_addr(regs, vtop(job_ring->output_ring));
+
+	return 0;
+}
+
+int hw_shutdown_job_ring(sec_job_ring_t *job_ring)
+{
+	struct jobring_regs *regs =
+	    (struct jobring_regs *)job_ring->register_base_addr;
+	unsigned int timeout = SEC_TIMEOUT;
+	uint32_t tmp = 0U;
+
+	VERBOSE("Resetting Job ring\n");
+
+	/*
+	 * Mask interrupts since we are going to poll
+	 * for reset completion status
+	 * Also, at POR, interrupts are ENABLED on a JR, thus
+	 * this is the point where I can disable them without
+	 * changing the code logic too much
+	 */
+
+	jr_disable_irqs(job_ring);
+
+	/* initiate flush (required prior to reset) */
+	sec_out32(&regs->jrcr, JR_REG_JRCR_VAL_RESET);
+
+	/* dummy read */
+	tmp = sec_in32(&regs->jrcr);
+
+	do {
+		tmp = sec_in32(&regs->jrint);
+	} while (((tmp & JRINT_ERR_HALT_MASK) ==
+		  JRINT_ERR_HALT_INPROGRESS) && ((--timeout) != 0U));
+
+	if ((tmp & JRINT_ERR_HALT_MASK) != JRINT_ERR_HALT_COMPLETE ||
+	    timeout == 0U) {
+		ERROR("Failed to flush hw job ring %x\n %u", tmp, timeout);
+		/* unmask interrupts */
+		if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) {
+			jr_enable_irqs(job_ring);
+		}
+		return -1;
+	}
+	/* Initiate reset */
+	timeout = SEC_TIMEOUT;
+	sec_out32(&regs->jrcr, JR_REG_JRCR_VAL_RESET);
+
+	do {
+		tmp = sec_in32(&regs->jrcr);
+	} while (((tmp & JR_REG_JRCR_VAL_RESET) != 0U) &&
+		 ((--timeout) != 0U));
+
+	if (timeout == 0U) {
+		ERROR("Failed to reset hw job ring\n");
+		/* unmask interrupts */
+		if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) {
+			jr_enable_irqs(job_ring);
+		}
+		return -1;
+	}
+	/* unmask interrupts */
+	if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) {
+		jr_enable_irqs(job_ring);
+	}
+	return 0;
+
+}
+
+void hw_handle_job_ring_error(sec_job_ring_t *job_ring, uint32_t error_code)
+{
+	union hw_error_code hw_err_code;
+
+	hw_err_code.error = error_code;
+
+	switch (hw_err_code.error_desc.value.ssrc) {
+	case SEC_HW_ERR_SSRC_NO_SRC:
+		INFO("No Status Source ");
+		break;
+	case SEC_HW_ERR_SSRC_CCB_ERR:
+		INFO("CCB Status Source");
+		hw_handle_ccb_err(hw_err_code);
+		break;
+	case SEC_HW_ERR_SSRC_JMP_HALT_U:
+		INFO("Jump Halt User Status Source");
+		hw_handle_jmp_halt_user_err(hw_err_code);
+		break;
+	case SEC_HW_ERR_SSRC_DECO:
+		INFO("DECO Status Source");
+		hw_handle_deco_err(hw_err_code);
+		break;
+	case SEC_HW_ERR_SSRC_JR:
+		INFO("Job Ring Status Source");
+		hw_handle_jr_err(hw_err_code);
+		break;
+	case SEC_HW_ERR_SSRC_JMP_HALT_COND:
+		INFO("Jump Halt Condition Codes");
+		hw_handle_jmp_halt_cond_err(hw_err_code);
+		break;
+	default:
+		INFO("Unknown SSRC");
+		break;
+	}
+}
+
+int hw_job_ring_error(sec_job_ring_t *job_ring)
+{
+	uint32_t jrint_error_code;
+	struct jobring_regs *regs =
+	    (struct jobring_regs *)job_ring->register_base_addr;
+
+	if (JR_REG_JRINT_JRE_EXTRACT(sec_in32(&regs->jrint)) == 0) {
+		return 0;
+	}
+
+	jrint_error_code =
+	    JR_REG_JRINT_ERR_TYPE_EXTRACT(sec_in32(&regs->jrint));
+	switch (jrint_error_code) {
+	case JRINT_ERR_WRITE_STATUS:
+		ERROR("Error writing status to Output Ring ");
+		break;
+	case JRINT_ERR_BAD_INPUT_BASE:
+		ERROR("Bad Input Ring Base (not on a 4-byte boundary)\n");
+		break;
+	case JRINT_ERR_BAD_OUTPUT_BASE:
+		ERROR("Bad Output Ring Base (not on a 4-byte boundary)\n");
+		break;
+	case JRINT_ERR_WRITE_2_IRBA:
+		ERROR("Invalid write to Input Ring Base Address Register\n");
+		break;
+	case JRINT_ERR_WRITE_2_ORBA:
+		ERROR("Invalid write to Output Ring Base Address Register\n");
+		break;
+	case JRINT_ERR_RES_B4_HALT:
+		ERROR("Job Ring released before Job Ring is halted\n");
+		break;
+	case JRINT_ERR_REM_TOO_MANY:
+		ERROR("Removed too many jobs from job ring\n");
+		break;
+	case JRINT_ERR_ADD_TOO_MANY:
+		ERROR("Added too many jobs on job ring\n");
+		break;
+	default:
+		ERROR("Unknown SEC JR Error :%d\n", jrint_error_code);
+		break;
+	}
+	return jrint_error_code;
+}
+
+int hw_job_ring_set_coalescing_param(sec_job_ring_t *job_ring,
+				     uint16_t irq_coalescing_timer,
+				     uint8_t irq_coalescing_count)
+{
+	uint32_t reg_val = 0U;
+	struct jobring_regs *regs =
+	    (struct jobring_regs *)job_ring->register_base_addr;
+
+	/* Set descriptor count coalescing */
+	reg_val |= (irq_coalescing_count << JR_REG_JRCFG_LO_ICDCT_SHIFT);
+
+	/* Set coalescing timer value */
+	reg_val |= (irq_coalescing_timer << JR_REG_JRCFG_LO_ICTT_SHIFT);
+
+	/* Update parameters in HW */
+	sec_out32(&regs->jrcfg1, reg_val);
+
+	VERBOSE("Set coalescing params on jr\n");
+
+	return 0;
+}
+
+int hw_job_ring_enable_coalescing(sec_job_ring_t *job_ring)
+{
+	uint32_t reg_val = 0U;
+	struct jobring_regs *regs =
+	    (struct jobring_regs *)job_ring->register_base_addr;
+
+	/* Get the current value of the register */
+	reg_val = sec_in32(&regs->jrcfg1);
+
+	/* Enable coalescing */
+	reg_val |= JR_REG_JRCFG_LO_ICEN_EN;
+
+	/* Write in hw */
+	sec_out32(&regs->jrcfg1, reg_val);
+
+	VERBOSE("Enabled coalescing on jr\n");
+
+	return 0;
+}
+
+int hw_job_ring_disable_coalescing(sec_job_ring_t *job_ring)
+{
+	uint32_t reg_val = 0U;
+	struct jobring_regs *regs =
+	    (struct jobring_regs *)job_ring->register_base_addr;
+
+	/* Get the current value of the register */
+	reg_val = sec_in32(&regs->jrcfg1);
+
+	/* Disable coalescing */
+	reg_val &= ~JR_REG_JRCFG_LO_ICEN_EN;
+
+	/* Write in hw */
+	sec_out32(&regs->jrcfg1, reg_val);
+
+	VERBOSE("Disabled coalescing on jr");
+
+	return 0;
+
+}
+
+void hw_flush_job_ring(struct sec_job_ring_t *job_ring,
+		       uint32_t do_notify,
+		       uint32_t error_code, uint32_t *notified_descs)
+{
+	int32_t jobs_no_to_discard = 0;
+	int32_t discarded_descs_no = 0;
+	int32_t number_of_jobs_available = 0;
+
+	VERBOSE("JR pi[%d]i ci[%d]\n", job_ring->pidx, job_ring->cidx);
+	VERBOSE("error code %x\n", error_code);
+	VERBOSE("Notify_desc = %d\n", do_notify);
+
+	number_of_jobs_available = hw_get_no_finished_jobs(job_ring);
+
+	/* Discard all jobs */
+	jobs_no_to_discard = number_of_jobs_available;
+
+	VERBOSE("JR pi[%d]i ci[%d]\n", job_ring->pidx, job_ring->cidx);
+	VERBOSE("Discarding desc = %d\n", jobs_no_to_discard);
+
+	while (jobs_no_to_discard > discarded_descs_no) {
+		discarded_descs_no++;
+		/* Now increment the consumer index for the current job ring,
+		 * AFTER saving job in temporary location!
+		 * Increment the consumer index for the current job ring
+		 */
+
+		job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
+						      SEC_JOB_RING_SIZE);
+
+		hw_remove_entries(job_ring, 1);
+	}
+
+	if (do_notify == true) {
+		if (notified_descs == NULL) {
+			return;
+		}
+		*notified_descs = discarded_descs_no;
+	}
+}
+
+/* return >0 in case of success
+ *  -1 in case of error from SEC block
+ *  0 in case job not yet processed by SEC
+ *   or  Descriptor returned is NULL after dequeue
+ */
+int hw_poll_job_ring(struct sec_job_ring_t *job_ring, int32_t limit)
+{
+	int32_t jobs_no_to_notify = 0;
+	int32_t number_of_jobs_available = 0;
+	int32_t notified_descs_no = 0;
+	uint32_t error_descs_no = 0U;
+	uint32_t sec_error_code = 0U;
+	uint32_t do_driver_shutdown = false;
+	phys_addr_t *fnptr, *arg_addr;
+	user_callback usercall = NULL;
+	uint8_t *current_desc;
+	void *arg;
+	uintptr_t current_desc_addr;
+	phys_addr_t current_desc_loc;
+
+#if defined(SEC_MEM_NON_COHERENT) && defined(IMAGE_BL2)
+	inv_dcache_range((uintptr_t)job_ring->register_base_addr, sizeof(struct jobring_regs));
+	dmbsy();
+#endif
+
+	/* check here if any JR error that cannot be written
+	 * in the output status word has occurred
+	 */
+	sec_error_code = hw_job_ring_error(job_ring);
+	if (unlikely(sec_error_code) != 0) {
+		ERROR("Error here itself %x\n", sec_error_code);
+		return -1;
+	}
+	/* Compute the number of notifications that need to be raised to UA
+	 * If limit < 0 -> notify all done jobs
+	 * If limit > total number of done jobs -> notify all done jobs
+	 * If limit = 0 -> error
+	 * If limit > 0 && limit < total number of done jobs -> notify a number
+	 * of done jobs equal with limit
+	 */
+
+	/*compute the number of jobs available in the job ring based on the
+	 * producer and consumer index values.
+	 */
+
+	number_of_jobs_available = hw_get_no_finished_jobs(job_ring);
+	jobs_no_to_notify = (limit < 0 || limit > number_of_jobs_available) ?
+	    number_of_jobs_available : limit;
+	VERBOSE("JR - pi %d, ci %d, ", job_ring->pidx, job_ring->cidx);
+	VERBOSE("Jobs submitted %d", number_of_jobs_available);
+	VERBOSE("Jobs to notify %d\n", jobs_no_to_notify);
+
+	while (jobs_no_to_notify > notified_descs_no) {
+
+#if defined(SEC_MEM_NON_COHERENT) && defined(IMAGE_BL2)
+		inv_dcache_range(
+			(uintptr_t)(&job_ring->output_ring[job_ring->cidx]),
+			sizeof(struct sec_outring_entry));
+		dmbsy();
+#endif
+
+		/* Get job status here */
+		sec_error_code =
+		    sec_in32(&(job_ring->output_ring[job_ring->cidx].status));
+
+		/* Get completed descriptor
+		 */
+		current_desc_loc = (uintptr_t)
+		    &job_ring->output_ring[job_ring->cidx].desc;
+		current_desc_addr = sec_read_addr(current_desc_loc);
+
+		current_desc = ptov((phys_addr_t *) current_desc_addr);
+		if (current_desc == 0) {
+			ERROR("No descriptor returned from SEC");
+			assert(current_desc);
+			return 0;
+		}
+		/* now increment the consumer index for the current job ring,
+		 * AFTER saving job in temporary location!
+		 */
+		job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
+						      SEC_JOB_RING_SIZE);
+
+		if (sec_error_code != 0) {
+			ERROR("desc at cidx %d\n ", job_ring->cidx);
+			ERROR("generated error %x\n", sec_error_code);
+
+			sec_handle_desc_error(job_ring,
+					      sec_error_code,
+					      &error_descs_no,
+					      &do_driver_shutdown);
+			hw_remove_entries(job_ring, 1);
+
+			return -1;
+		}
+		/* Signal that the job has been processed & the slot is free */
+		hw_remove_entries(job_ring, 1);
+		notified_descs_no++;
+
+		arg_addr = (phys_addr_t *) (current_desc +
+				(MAX_DESC_SIZE_WORDS * sizeof(uint32_t)));
+
+		fnptr = (phys_addr_t *) (current_desc +
+					(MAX_DESC_SIZE_WORDS * sizeof(uint32_t)
+					+  sizeof(void *)));
+
+		arg = (void *)*(arg_addr);
+		if (*fnptr != 0) {
+			VERBOSE("Callback Function called\n");
+			usercall = (user_callback) *(fnptr);
+			(*usercall) ((uint32_t *) current_desc,
+				     sec_error_code, arg, job_ring);
+		}
+	}
+
+	return notified_descs_no;
+}
+
+void sec_handle_desc_error(sec_job_ring_t *job_ring,
+			   uint32_t sec_error_code,
+			   uint32_t *notified_descs,
+			   uint32_t *do_driver_shutdown)
+{
+	/* Analyze the SEC error on this job ring */
+	hw_handle_job_ring_error(job_ring, sec_error_code);
+}
+
+void flush_job_rings(void)
+{
+	struct sec_job_ring_t *job_ring = NULL;
+	int i = 0;
+
+	for (i = 0; i < g_job_rings_no; i++) {
+		job_ring = &g_job_rings[i];
+		/* Producer index is frozen. If consumer index is not equal
+		 * with producer index, then we have descs to flush.
+		 */
+		while (job_ring->pidx != job_ring->cidx) {
+			hw_flush_job_ring(job_ring, false, 0,	/* no error */
+					  NULL);
+		}
+	}
+}
+
+int shutdown_job_ring(struct sec_job_ring_t *job_ring)
+{
+	int ret = 0;
+
+	ret = hw_shutdown_job_ring(job_ring);
+	if (ret != 0) {
+		ERROR("Failed to shutdown hardware job ring\n");
+		return ret;
+	}
+
+	if (job_ring->coalescing_en != 0) {
+		hw_job_ring_disable_coalescing(job_ring);
+	}
+
+	if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) {
+		ret = jr_disable_irqs(job_ring);
+		if (ret != 0) {
+			ERROR("Failed to disable irqs for job ring");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+int jr_enable_irqs(struct sec_job_ring_t *job_ring)
+{
+	uint32_t reg_val = 0U;
+	struct jobring_regs *regs =
+	    (struct jobring_regs *)job_ring->register_base_addr;
+
+	/* Get the current value of the register */
+	reg_val = sec_in32(&regs->jrcfg1);
+
+	/* Enable interrupts by disabling interrupt masking*/
+	reg_val &= ~JR_REG_JRCFG_LO_IMSK_EN;
+
+	/* Update parameters in HW */
+	sec_out32(&regs->jrcfg1, reg_val);
+
+	VERBOSE("Enable interrupts on JR\n");
+
+	return 0;
+}
+
+int jr_disable_irqs(struct sec_job_ring_t *job_ring)
+{
+	uint32_t reg_val = 0U;
+	struct jobring_regs *regs =
+	    (struct jobring_regs *)job_ring->register_base_addr;
+
+	/* Get the current value of the register */
+	reg_val = sec_in32(&regs->jrcfg1);
+
+	/* Disable interrupts by enabling interrupt masking*/
+	reg_val |= JR_REG_JRCFG_LO_IMSK_EN;
+
+	/* Update parameters in HW */
+	sec_out32(&regs->jrcfg1, reg_val);
+
+	VERBOSE("Disable interrupts on JR\n");
+
+	return 0;
+}
diff --git a/drivers/nxp/crypto/caam/src/sec_jr_driver.c b/drivers/nxp/crypto/caam/src/sec_jr_driver.c
new file mode 100644
index 0000000..1fe7007
--- /dev/null
+++ b/drivers/nxp/crypto/caam/src/sec_jr_driver.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include "caam.h"
+#include <common/debug.h>
+#include "jobdesc.h"
+#include "nxp_timer.h"
+#include "sec_hw_specific.h"
+#include "sec_jr_driver.h"
+
+
+/* Job rings used for communication with SEC HW  */
+struct sec_job_ring_t g_job_rings[MAX_SEC_JOB_RINGS];
+
+/* The current state of SEC user space driver */
+volatile sec_driver_state_t g_driver_state = SEC_DRIVER_STATE_IDLE;
+
+int g_job_rings_no;
+
+uint8_t ip_ring[SEC_DMA_MEM_INPUT_RING_SIZE] __aligned(CACHE_WRITEBACK_GRANULE);
+uint8_t op_ring[SEC_DMA_MEM_OUTPUT_RING_SIZE] __aligned(CACHE_WRITEBACK_GRANULE);
+
+void *init_job_ring(uint8_t jr_mode,
+		    uint16_t irq_coalescing_timer,
+		    uint8_t irq_coalescing_count,
+		    void *reg_base_addr, uint32_t irq_id)
+{
+	struct sec_job_ring_t *job_ring = &g_job_rings[g_job_rings_no++];
+	int ret = 0;
+
+	job_ring->register_base_addr = reg_base_addr;
+	job_ring->jr_mode = jr_mode;
+	job_ring->irq_fd = irq_id;
+
+	job_ring->input_ring = vtop(ip_ring);
+	memset(job_ring->input_ring, 0, SEC_DMA_MEM_INPUT_RING_SIZE);
+
+	job_ring->output_ring = (struct sec_outring_entry *)vtop(op_ring);
+	memset(job_ring->output_ring, 0, SEC_DMA_MEM_OUTPUT_RING_SIZE);
+
+	dsb();
+
+#if defined(SEC_MEM_NON_COHERENT) && defined(IMAGE_BL2)
+	flush_dcache_range((uintptr_t)(job_ring->input_ring),
+				       SEC_DMA_MEM_INPUT_RING_SIZE),
+	flush_dcache_range((uintptr_t)(job_ring->output_ring),
+				       SEC_DMA_MEM_OUTPUT_RING_SIZE),
+
+	dmbsy();
+#endif
+	/* Reset job ring in SEC hw and configure job ring registers */
+	ret = hw_reset_job_ring(job_ring);
+	if (ret != 0) {
+		ERROR("Failed to reset hardware job ring\n");
+		return NULL;
+	}
+
+	if (jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
+		/* Enable IRQ if driver work sin interrupt mode */
+		ERROR("Enabling DONE IRQ generation on job ring\n");
+		ret = jr_enable_irqs(job_ring);
+		if (ret != 0) {
+			ERROR("Failed to enable irqs for job ring\n");
+			return NULL;
+		}
+	}
+	if ((irq_coalescing_timer != 0) || (irq_coalescing_count != 0)) {
+		hw_job_ring_set_coalescing_param(job_ring,
+						 irq_coalescing_timer,
+						 irq_coalescing_count);
+
+		hw_job_ring_enable_coalescing(job_ring);
+		job_ring->coalescing_en = 1;
+	}
+
+	job_ring->jr_state = SEC_JOB_RING_STATE_STARTED;
+
+	return job_ring;
+}
+
+int sec_release(void)
+{
+	int i;
+
+	/* Validate driver state */
+	if (g_driver_state == SEC_DRIVER_STATE_RELEASE) {
+		ERROR("Driver release is already in progress");
+		return SEC_DRIVER_RELEASE_IN_PROGRESS;
+	}
+	/* Update driver state */
+	g_driver_state = SEC_DRIVER_STATE_RELEASE;
+
+	/* If any descriptors in flight , poll and wait
+	 * until all descriptors are received and silently discarded.
+	 */
+
+	flush_job_rings();
+
+	for (i = 0; i < g_job_rings_no; i++) {
+		shutdown_job_ring(&g_job_rings[i]);
+	}
+	g_job_rings_no = 0;
+	g_driver_state = SEC_DRIVER_STATE_IDLE;
+
+	return SEC_SUCCESS;
+}
+
+int sec_jr_lib_init(void)
+{
+	/* Validate driver state */
+	if (g_driver_state != SEC_DRIVER_STATE_IDLE) {
+		ERROR("Driver already initialized\n");
+		return 0;
+	}
+
+	memset(g_job_rings, 0, sizeof(g_job_rings));
+	g_job_rings_no = 0;
+
+	/* Update driver state */
+	g_driver_state = SEC_DRIVER_STATE_STARTED;
+	return 0;
+}
+
+int dequeue_jr(void *job_ring_handle, int32_t limit)
+{
+	int ret = 0;
+	int notified_descs_no = 0;
+	struct sec_job_ring_t *job_ring = (sec_job_ring_t *) job_ring_handle;
+	uint64_t start_time;
+
+	/* Validate driver state */
+	if (g_driver_state != SEC_DRIVER_STATE_STARTED) {
+		ERROR("Driver release in progress or driver not initialized\n");
+		return -1;
+	}
+
+	/* Validate input arguments */
+	if (job_ring == NULL) {
+		ERROR("job_ring_handle is NULL\n");
+		return -1;
+	}
+	if (((limit == 0) || (limit > SEC_JOB_RING_SIZE))) {
+		ERROR("Invalid limit parameter configuration\n");
+		return -1;
+	}
+
+	VERBOSE("JR Polling limit[%d]\n", limit);
+
+	/* Poll job ring
+	 * If limit < 0 -> poll JR until no more notifications are available.
+	 * If limit > 0 -> poll JR until limit is reached.
+	 */
+
+	start_time = get_timer_val(0);
+
+	while (notified_descs_no == 0) {
+		/* Run hw poll job ring */
+		notified_descs_no = hw_poll_job_ring(job_ring, limit);
+		if (notified_descs_no < 0) {
+			ERROR("Error polling SEC engine job ring ");
+			return notified_descs_no;
+		}
+		VERBOSE("Jobs notified[%d]. ", notified_descs_no);
+
+		if (get_timer_val(start_time) >= CAAM_TIMEOUT) {
+			break;
+		}
+	}
+
+	if (job_ring->jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
+
+		/* Always enable IRQ generation when in pure IRQ mode */
+		ret = jr_enable_irqs(job_ring);
+		if (ret != 0) {
+			ERROR("Failed to enable irqs for job ring");
+			return ret;
+		}
+	}
+	return notified_descs_no;
+}
+
+int enq_jr_desc(void *job_ring_handle, struct job_descriptor *jobdescr)
+{
+	struct sec_job_ring_t *job_ring;
+
+	job_ring = (struct sec_job_ring_t *)job_ring_handle;
+
+	/* Validate driver state */
+	if (g_driver_state != SEC_DRIVER_STATE_STARTED) {
+		ERROR("Driver release in progress or driver not initialized\n");
+		return -1;
+	}
+
+	/* Check job ring state */
+	if (job_ring->jr_state != SEC_JOB_RING_STATE_STARTED) {
+		ERROR("Job ring is currently resetting\n");
+		return -1;
+	}
+
+	if (SEC_JOB_RING_IS_FULL(job_ring->pidx, job_ring->cidx,
+				 SEC_JOB_RING_SIZE, SEC_JOB_RING_SIZE)) {
+		ERROR("Job ring is full\n");
+		return -1;
+	}
+
+	/* Set ptr in input ring to current descriptor  */
+	sec_write_addr(&job_ring->input_ring[job_ring->pidx],
+		       (phys_addr_t) vtop(jobdescr->desc));
+
+	dsb();
+
+#if defined(SEC_MEM_NON_COHERENT) && defined(IMAGE_BL2)
+	flush_dcache_range((uintptr_t)(&job_ring->input_ring[job_ring->pidx]),
+			   sizeof(phys_addr_t));
+
+	inv_dcache_range((uintptr_t)(&job_ring->output_ring[job_ring->cidx]),
+			   sizeof(struct sec_outring_entry));
+	dmbsy();
+#endif
+	/* Notify HW that a new job is enqueued  */
+	hw_enqueue_desc_on_job_ring(
+			(struct jobring_regs *)job_ring->register_base_addr, 1);
+
+	/* increment the producer index for the current job ring */
+	job_ring->pidx = SEC_CIRCULAR_COUNTER(job_ring->pidx,
+					      SEC_JOB_RING_SIZE);
+
+	return 0;
+}
diff --git a/drivers/nxp/csu/csu.c b/drivers/nxp/csu/csu.c
new file mode 100644
index 0000000..9f90fe0
--- /dev/null
+++ b/drivers/nxp/csu/csu.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <endian.h>
+
+#include <common/debug.h>
+#include <csu.h>
+#include <lib/mmio.h>
+
+void enable_layerscape_ns_access(struct csu_ns_dev_st *csu_ns_dev,
+				 uint32_t num, uintptr_t nxp_csu_addr)
+{
+	uint32_t *base = (uint32_t *)nxp_csu_addr;
+	uint32_t *reg;
+	uint32_t val;
+	int i;
+
+	for (i = 0; i < num; i++) {
+		reg = base + csu_ns_dev[i].ind / 2U;
+		val = be32toh(mmio_read_32((uintptr_t)reg));
+		if (csu_ns_dev[i].ind % 2U == 0U) {
+			val &= 0x0000ffffU;
+			val |= csu_ns_dev[i].val << 16U;
+		} else {
+			val &= 0xffff0000U;
+			val |= csu_ns_dev[i].val;
+		}
+		mmio_write_32((uintptr_t)reg, htobe32(val));
+	}
+}
diff --git a/drivers/nxp/csu/csu.h b/drivers/nxp/csu/csu.h
new file mode 100644
index 0000000..9f82feb
--- /dev/null
+++ b/drivers/nxp/csu/csu.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef CSU_H
+#define CSU_H
+
+#define CSU_SEC_ACCESS_REG_OFFSET	(0x0021CU)
+
+/* Macros defining access permissions to configure
+ * the regions controlled by Central Security Unit.
+ */
+enum csu_cslx_access {
+	CSU_NS_SUP_R = (0x8U),
+	CSU_NS_SUP_W = (0x80U),
+	CSU_NS_SUP_RW = (0x88U),
+	CSU_NS_USER_R = (0x4U),
+	CSU_NS_USER_W = (0x40U),
+	CSU_NS_USER_RW = (0x44U),
+	CSU_S_SUP_R = (0x2U),
+	CSU_S_SUP_W = (0x20U),
+	CSU_S_SUP_RW = (0x22U),
+	CSU_S_USER_R = (0x1U),
+	CSU_S_USER_W = (0x10U),
+	CSU_S_USER_RW = (0x11U),
+	CSU_ALL_RW = (0xffU),
+};
+
+struct csu_ns_dev_st {
+	uintptr_t ind;
+	uint32_t val;
+};
+
+void enable_layerscape_ns_access(struct csu_ns_dev_st *csu_ns_dev,
+				 uint32_t num, uintptr_t nxp_csu_addr);
+
+#endif
diff --git a/drivers/nxp/csu/csu.mk b/drivers/nxp/csu/csu.mk
new file mode 100644
index 0000000..ebdf674
--- /dev/null
+++ b/drivers/nxp/csu/csu.mk
@@ -0,0 +1,28 @@
+#
+# Copyright 2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-----------------------------------------------------------------------------
+ifeq (${CSU_ADDED},)
+
+CSU_ADDED		:= 1
+
+CSU_DRIVERS_PATH	:=  ${PLAT_DRIVERS_PATH}/csu
+
+PLAT_INCLUDES		+= -I$(CSU_DRIVERS_PATH)
+
+CSU_SOURCES		+= $(CSU_DRIVERS_PATH)/csu.c
+
+ifeq (${BL_COMM_CSU_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${CSU_SOURCES}
+else
+ifeq (${BL2_CSU_NEEDED},yes)
+BL2_SOURCES		+= ${CSU_SOURCES}
+endif
+ifeq (${BL31_CSU_NEEDED},yes)
+BL31_SOURCES		+= ${CSU_SOURCES}
+endif
+endif
+
+endif
diff --git a/drivers/nxp/dcfg/dcfg.c b/drivers/nxp/dcfg/dcfg.c
new file mode 100644
index 0000000..2e813e7
--- /dev/null
+++ b/drivers/nxp/dcfg/dcfg.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <common/debug.h>
+#include "dcfg.h"
+#include <lib/mmio.h>
+#ifdef NXP_SFP_ENABLED
+#include <sfp.h>
+#endif
+
+static soc_info_t soc_info = {0};
+static devdisr5_info_t devdisr5_info = {0};
+static dcfg_init_info_t *dcfg_init_info;
+
+/* Read the PORSR1 register */
+uint32_t read_reg_porsr1(void)
+{
+	unsigned int *porsr1_addr = NULL;
+
+	if (dcfg_init_info->porsr1 != 0U) {
+		return dcfg_init_info->porsr1;
+	}
+
+	porsr1_addr = (void *)
+			(dcfg_init_info->g_nxp_dcfg_addr + DCFG_PORSR1_OFFSET);
+	dcfg_init_info->porsr1 = gur_in32(porsr1_addr);
+
+	return dcfg_init_info->porsr1;
+}
+
+
+const soc_info_t *get_soc_info(void)
+{
+	uint32_t reg;
+
+	if (soc_info.is_populated == true) {
+		return (const soc_info_t *) &soc_info;
+	}
+
+	reg = gur_in32(dcfg_init_info->g_nxp_dcfg_addr + DCFG_SVR_OFFSET);
+
+	soc_info.mfr_id = (reg & SVR_MFR_ID_MASK) >> SVR_MFR_ID_SHIFT;
+#if defined(CONFIG_CHASSIS_3_2)
+	soc_info.family = (reg & SVR_FAMILY_MASK) >> SVR_FAMILY_SHIFT;
+	soc_info.dev_id = (reg & SVR_DEV_ID_MASK) >> SVR_DEV_ID_SHIFT;
+#endif
+	/* zero means SEC enabled. */
+	soc_info.sec_enabled =
+		(((reg & SVR_SEC_MASK) >> SVR_SEC_SHIFT) == 0) ? true : false;
+
+	soc_info.personality = (reg & SVR_PERSONALITY_MASK)
+				>> SVR_PERSONALITY_SHIFT;
+	soc_info.maj_ver = (reg & SVR_MAJ_VER_MASK) >> SVR_MAJ_VER_SHIFT;
+	soc_info.min_ver = reg & SVR_MIN_VER_MASK;
+
+	soc_info.is_populated = true;
+	return (const soc_info_t *) &soc_info;
+}
+
+void dcfg_init(dcfg_init_info_t *dcfg_init_data)
+{
+	dcfg_init_info = dcfg_init_data;
+	read_reg_porsr1();
+	get_soc_info();
+}
+
+bool is_sec_enabled(void)
+{
+	return soc_info.sec_enabled;
+}
+
+const devdisr5_info_t *get_devdisr5_info(void)
+{
+	uint32_t reg;
+
+	if (devdisr5_info.is_populated == true)
+		return (const devdisr5_info_t *) &devdisr5_info;
+
+	reg = gur_in32(dcfg_init_info->g_nxp_dcfg_addr + DCFG_DEVDISR5_OFFSET);
+
+#if defined(CONFIG_CHASSIS_3_2)
+	devdisr5_info.ddrc1_present = (reg & DISR5_DDRC1_MASK) ? 0 : 1;
+	devdisr5_info.ddrc2_present = (reg & DISR5_DDRC2_MASK) ? 0 : 1;
+	devdisr5_info.ocram_present = (reg & DISR5_OCRAM_MASK) ? 0 : 1;
+#elif defined(CONFIG_CHASSIS_2)
+	devdisr5_info.ddrc1_present = (reg & DISR5_DDRC1_MASK) ? 0 : 1;
+	devdisr5_info.ocram_present = (reg & DISR5_OCRAM_MASK) ? 0 : 1;
+#endif
+	devdisr5_info.is_populated = true;
+
+	return (const devdisr5_info_t *) &devdisr5_info;
+}
+
+int get_clocks(struct sysinfo *sys)
+{
+	unsigned int *rcwsr0 = NULL;
+	const unsigned long sysclk = dcfg_init_info->nxp_sysclk_freq;
+	const unsigned long ddrclk = dcfg_init_info->nxp_ddrclk_freq;
+
+	rcwsr0 = (void *)(dcfg_init_info->g_nxp_dcfg_addr + RCWSR0_OFFSET);
+	sys->freq_platform = sysclk;
+	sys->freq_ddr_pll0 = ddrclk;
+	sys->freq_ddr_pll1 = ddrclk;
+
+	sys->freq_platform *= (gur_in32(rcwsr0) >>
+				RCWSR0_SYS_PLL_RAT_SHIFT) &
+				RCWSR0_SYS_PLL_RAT_MASK;
+
+	sys->freq_platform /= dcfg_init_info->nxp_plat_clk_divider;
+
+	sys->freq_ddr_pll0 *= (gur_in32(rcwsr0) >>
+				RCWSR0_MEM_PLL_RAT_SHIFT) &
+				RCWSR0_MEM_PLL_RAT_MASK;
+	sys->freq_ddr_pll1 *= (gur_in32(rcwsr0) >>
+				RCWSR0_MEM2_PLL_RAT_SHIFT) &
+				RCWSR0_MEM2_PLL_RAT_MASK;
+	if (sys->freq_platform == 0) {
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+#ifdef NXP_SFP_ENABLED
+/*******************************************************************************
+ * Returns true if secur eboot is enabled on board
+ * mode = 0  (development mode - sb_en = 1)
+ * mode = 1 (production mode - ITS = 1)
+ ******************************************************************************/
+bool check_boot_mode_secure(uint32_t *mode)
+{
+	uint32_t val = 0U;
+	uint32_t *rcwsr = NULL;
+	*mode = 0U;
+
+	if (sfp_check_its() == 1) {
+		/* ITS =1 , Production mode */
+		*mode = 1U;
+		return true;
+	}
+
+	rcwsr = (void *)(dcfg_init_info->g_nxp_dcfg_addr + RCWSR_SB_EN_OFFSET);
+
+	val = (gur_in32(rcwsr) >> RCWSR_SBEN_SHIFT) &
+				RCWSR_SBEN_MASK;
+
+	if (val == RCWSR_SBEN_MASK) {
+		*mode = 0U;
+		return true;
+	}
+
+	return false;
+}
+#endif
+
+void error_handler(int error_code)
+{
+	 /* Dump error code in SCRATCH4 register */
+	INFO("Error in Fuse Provisioning: %x\n", error_code);
+	gur_out32((void *)
+		  (dcfg_init_info->g_nxp_dcfg_addr + DCFG_SCRATCH4_OFFSET),
+		  error_code);
+}
diff --git a/drivers/nxp/dcfg/dcfg.h b/drivers/nxp/dcfg/dcfg.h
new file mode 100644
index 0000000..161e295
--- /dev/null
+++ b/drivers/nxp/dcfg/dcfg.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef DCFG_H
+#define DCFG_H
+
+#include <endian.h>
+
+#if defined(CONFIG_CHASSIS_2)
+#include <dcfg_lsch2.h>
+#elif defined(CONFIG_CHASSIS_3_2)
+#include <dcfg_lsch3.h>
+#endif
+
+#ifdef NXP_GUR_BE
+#define gur_in32(a)		bswap32(mmio_read_32((uintptr_t)(a)))
+#define gur_out32(a, v)		mmio_write_32((uintptr_t)(a), bswap32(v))
+#elif defined(NXP_GUR_LE)
+#define gur_in32(a)		mmio_read_32((uintptr_t)(a))
+#define gur_out32(a, v)		mmio_write_32((uintptr_t)(a), v)
+#else
+#error Please define CCSR GUR register endianness
+#endif
+
+typedef struct {
+	bool is_populated;
+	uint8_t mfr_id;
+#if defined(CONFIG_CHASSIS_3_2)
+	uint8_t family;
+	uint8_t dev_id;
+#endif
+	uint8_t personality;
+	bool sec_enabled;
+	uint8_t maj_ver;
+	uint8_t min_ver;
+} soc_info_t;
+
+typedef struct {
+	bool is_populated;
+	uint8_t ocram_present;
+	uint8_t ddrc1_present;
+#if defined(CONFIG_CHASSIS_3_2)
+	uint8_t ddrc2_present;
+#endif
+} devdisr5_info_t;
+
+typedef struct {
+	uint32_t porsr1;
+	uintptr_t g_nxp_dcfg_addr;
+	unsigned long nxp_sysclk_freq;
+	unsigned long nxp_ddrclk_freq;
+	unsigned int nxp_plat_clk_divider;
+} dcfg_init_info_t;
+
+
+struct sysinfo {
+	unsigned long freq_platform;
+	unsigned long freq_ddr_pll0;
+	unsigned long freq_ddr_pll1;
+};
+
+int get_clocks(struct sysinfo *sys);
+
+/* Read the PORSR1 register */
+uint32_t read_reg_porsr1(void);
+
+/*******************************************************************************
+ * Returns true if secur eboot is enabled on board
+ * mode = 0  (development mode - sb_en = 1)
+ * mode = 1 (production mode - ITS = 1)
+ ******************************************************************************/
+bool check_boot_mode_secure(uint32_t *mode);
+
+const soc_info_t *get_soc_info();
+const devdisr5_info_t *get_devdisr5_info();
+
+void dcfg_init(dcfg_init_info_t *dcfg_init_data);
+bool is_sec_enabled(void);
+
+void error_handler(int error_code);
+#endif /*	DCFG_H	*/
diff --git a/drivers/nxp/dcfg/dcfg.mk b/drivers/nxp/dcfg/dcfg.mk
new file mode 100644
index 0000000..61d1850
--- /dev/null
+++ b/drivers/nxp/dcfg/dcfg.mk
@@ -0,0 +1,28 @@
+#
+# Copyright 2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${ADD_DCFG},)
+
+ADD_DCFG		:= 1
+
+DCFG_DRIVERS_PATH	:=  ${PLAT_DRIVERS_PATH}/dcfg
+
+PLAT_INCLUDES		+= -I$(DCFG_DRIVERS_PATH)
+
+DCFG_SOURCES		+= $(DCFG_DRIVERS_PATH)/dcfg.c
+
+ifeq (${BL_COMM_DCFG_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${DCFG_SOURCES}
+else
+ifeq (${BL2_DCFG_NEEDED},yes)
+BL2_SOURCES		+= ${DCFG_SOURCES}
+endif
+ifeq (${BL31_DCFG_NEEDED},yes)
+BL31_SOURCES		+= ${DCFG_SOURCES}
+endif
+endif
+
+endif
diff --git a/drivers/nxp/dcfg/dcfg_lsch2.h b/drivers/nxp/dcfg/dcfg_lsch2.h
new file mode 100644
index 0000000..c021aa1
--- /dev/null
+++ b/drivers/nxp/dcfg/dcfg_lsch2.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef DCFG_LSCH2_H
+#define DCFG_LSCH2_H
+
+/* dcfg block register offsets and bitfields */
+#define DCFG_PORSR1_OFFSET		0x00
+#define DCFG_DEVDISR1_OFFSET		0x070
+#define DCFG_DEVDISR4_OFFSET		0x07C
+#define DCFG_DEVDISR5_OFFSET		0x080
+#define DCFG_COREDISR_OFFSET		0x094
+#define RCWSR0_OFFSET			0x100
+#define RCWSR5_OFFSET			0x118
+#define DCFG_BOOTLOCPTRL_OFFSET		0x400
+#define DCFG_BOOTLOCPTRH_OFFSET		0x404
+#define DCFG_COREDISABLEDSR_OFFSET	0x990
+#define DCFG_SCRATCH4_OFFSET		0x20C
+#define DCFG_SVR_OFFSET			0x0A4
+#define DCFG_BRR_OFFSET			0x0E4
+
+#define DCFG_RSTCR_OFFSET		0x0B0
+#define RSTCR_RESET_REQ			0x2
+
+#define DCFG_RSTRQSR1_OFFSET		0x0C8
+#define DCFG_RSTRQMR1_OFFSET		0x0C0
+
+/* DCFG DCSR Macros */
+#define DCFG_DCSR_PORCR1_OFFSET		0x0
+
+#define SVR_MFR_ID_MASK			0xF0000000
+#define SVR_MFR_ID_SHIFT		28
+#define SVR_FAMILY_MASK			0xF000000
+#define SVR_FAMILY_SHIFT		24
+#define SVR_DEV_ID_MASK			0x3F0000
+#define SVR_DEV_ID_SHIFT		16
+#define SVR_PERSONALITY_MASK		0x3E00
+#define SVR_PERSONALITY_SHIFT		9
+#define SVR_SEC_MASK			0x100
+#define SVR_SEC_SHIFT			8
+#define SVR_MAJ_VER_MASK		0xF0
+#define SVR_MAJ_VER_SHIFT		4
+#define SVR_MIN_VER_MASK		0xF
+
+#define DISR5_DDRC1_MASK		0x1
+#define DISR5_OCRAM_MASK		0x40
+
+/* DCFG regsiters bit masks */
+#define RCWSR0_SYS_PLL_RAT_SHIFT	25
+#define RCWSR0_SYS_PLL_RAT_MASK		0x1f
+#define RCWSR0_MEM_PLL_RAT_SHIFT	16
+#define RCWSR0_MEM_PLL_RAT_MASK		0x3f
+#define RCWSR0_MEM2_PLL_RAT_SHIFT	18
+#define RCWSR0_MEM2_PLL_RAT_MASK	0x3f
+
+#define RCWSR_SB_EN_OFFSET		RCWSR5_OFFSET
+#define RCWSR_SBEN_MASK			0x1
+#define RCWSR_SBEN_SHIFT		21
+
+/* RCW SRC NAND */
+#define RCW_SRC_NAND_MASK		(0x100)
+#define RCW_SRC_NAND_VAL		(0x100)
+#define NAND_RESERVED_MASK		(0xFC)
+#define NAND_RESERVED_1			(0x0)
+#define NAND_RESERVED_2			(0x80)
+
+/* RCW SRC NOR */
+#define RCW_SRC_NOR_MASK		(0x1F0)
+#define NOR_8B_VAL			(0x10)
+#define NOR_16B_VAL			(0x20)
+#define SD_VAL				(0x40)
+#define QSPI_VAL1			(0x44)
+#define QSPI_VAL2			(0x45)
+
+#endif /*	DCFG_LSCH2_H	*/
diff --git a/drivers/nxp/dcfg/dcfg_lsch3.h b/drivers/nxp/dcfg/dcfg_lsch3.h
new file mode 100644
index 0000000..8144542
--- /dev/null
+++ b/drivers/nxp/dcfg/dcfg_lsch3.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef DCFG_LSCH3_H
+#define DCFG_LSCH3_H
+
+/* dcfg block register offsets and bitfields */
+#define DCFG_PORSR1_OFFSET			0x00
+
+#define DCFG_DEVDISR1_OFFSET			0x70
+#define DCFG_DEVDISR1_SEC	(1 << 22)
+
+#define DCFG_DEVDISR2_OFFSET			0x74
+
+#define DCFG_DEVDISR3_OFFSET			0x78
+#define DCFG_DEVDISR3_QBMAIN	(1 << 12)
+
+#define DCFG_DEVDISR4_OFFSET			0x7C
+#define DCFG_DEVDISR4_SPI_QSPI	(1 << 4 | 1 << 5)
+
+#define DCFG_DEVDISR5_OFFSET			0x80
+#define DISR5_DDRC1_MASK	0x1
+#define DISR5_DDRC2_MASK	0x2
+#define DISR5_OCRAM_MASK	0x1000
+#define DEVDISR5_MASK_ALL_MEM	0x00001003
+#define DEVDISR5_MASK_DDR	0x00000003
+#define DEVDISR5_MASK_DBG	0x00000400
+
+#define DCFG_DEVDISR6_OFFSET			0x84
+//#define DEVDISR6_MASK             0x00000001
+
+#define DCFG_COREDISR_OFFSET			0x94
+
+#define DCFG_SVR_OFFSET				0x0A4
+#define SVR_MFR_ID_MASK		0xF0000000
+#define SVR_MFR_ID_SHIFT	28
+#define SVR_FAMILY_MASK		0xF000000
+#define SVR_FAMILY_SHIFT	24
+#define SVR_DEV_ID_MASK		0x3F0000
+#define SVR_DEV_ID_SHIFT	16
+#define SVR_PERSONALITY_MASK	0x3E00
+#define SVR_PERSONALITY_SHIFT	9
+#define SVR_SEC_MASK		0x100
+#define SVR_SEC_SHIFT		8
+#define SVR_MAJ_VER_MASK	0xF0
+#define SVR_MAJ_VER_SHIFT	4
+#define SVR_MIN_VER_MASK	0xF
+
+#define RCWSR0_OFFSET				0x100
+#define RCWSR0_SYS_PLL_RAT_SHIFT	2
+#define RCWSR0_SYS_PLL_RAT_MASK		0x1f
+#define RCWSR0_MEM_PLL_RAT_SHIFT	10
+#define RCWSR0_MEM_PLL_RAT_MASK		0x3f
+#define RCWSR0_MEM2_PLL_RAT_SHIFT	18
+#define RCWSR0_MEM2_PLL_RAT_MASK	0x3f
+
+#define RCWSR5_OFFSET				0x110
+#define RCWSR9_OFFSET				0x120
+#define RCWSR_SB_EN_OFFSET	RCWSR9_OFFSET
+#define RCWSR_SBEN_MASK		0x1
+#define RCWSR_SBEN_SHIFT	10
+
+#define RCW_SR27_OFFSET				0x168
+/* DCFG register to dump error code */
+#define DCFG_SCRATCH4_OFFSET			0x20C
+#define DCFG_SCRATCHRW5_OFFSET			0x210
+#define DCFG_SCRATCHRW6_OFFSET			0x214
+#define DCFG_SCRATCHRW7_OFFSET			0x218
+#define DCFG_BOOTLOCPTRL_OFFSET			0x400
+#define DCFG_BOOTLOCPTRH_OFFSET			0x404
+#define DCFG_COREDISABLEDSR_OFFSET		0x990
+
+#endif /*	DCFG_LSCH3_H	*/
diff --git a/drivers/nxp/dcfg/scfg.h b/drivers/nxp/dcfg/scfg.h
new file mode 100644
index 0000000..81df9a6
--- /dev/null
+++ b/drivers/nxp/dcfg/scfg.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef SCFG_H
+#define SCFG_H
+
+#ifdef CONFIG_CHASSIS_2
+
+/* SCFG register offsets */
+#define SCFG_CORE0_SFT_RST_OFFSET	0x0130
+#define SCFG_SNPCNFGCR_OFFSET		0x01A4
+#define SCFG_CORESRENCR_OFFSET		0x0204
+#define SCFG_RVBAR0_0_OFFSET		0x0220
+#define SCFG_RVBAR0_1_OFFSET		0x0224
+#define SCFG_COREBCR_OFFSET		0x0680
+#define SCFG_RETREQCR_OFFSET		0x0424
+
+#define SCFG_COREPMCR_OFFSET		0x042C
+#define COREPMCR_WFIL2			0x1
+
+#define SCFG_GIC400_ADDR_ALIGN_OFFSET	0x0188
+#define SCFG_BOOTLOCPTRH_OFFSET		0x0600
+#define SCFG_BOOTLOCPTRL_OFFSET		0x0604
+#define SCFG_SCRATCHRW2_OFFSET		0x0608
+#define SCFG_SCRATCHRW3_OFFSET		0x060C
+
+/* SCFG bit fields */
+#define SCFG_SNPCNFGCR_SECRDSNP		0x80000000
+#define SCFG_SNPCNFGCR_SECWRSNP         0x40000000
+#endif /* CONFIG_CHASSIS_2 */
+
+#ifndef __ASSEMBLER__
+#include <endian.h>
+#include <lib/mmio.h>
+
+#ifdef NXP_SCFG_BE
+#define scfg_in32(a)		bswap32(mmio_read_32((uintptr_t)(a)))
+#define scfg_out32(a, v)	mmio_write_32((uintptr_t)(a), bswap32(v))
+#define scfg_setbits32(a, v)	mmio_setbits_32((uintptr_t)(a), v)
+#define scfg_clrbits32(a, v)	mmio_clrbits_32((uintptr_t)(a), v)
+#define scfg_clrsetbits32(a, clear, set)	\
+				mmio_clrsetbits_32((uintptr_t)(a), clear, set)
+#elif defined(NXP_GUR_LE)
+#define scfg_in32(a)		mmio_read_32((uintptr_t)(a))
+#define scfg_out32(a, v)	mmio_write_32((uintptr_t)(a), v)
+#define scfg_setbits32(a, v)	mmio_setbits_32((uintptr_t)(a), v)
+#define scfg_clrbits32(a, v)	mmio_clrbits_32((uintptr_t)(a), v)
+#define scfg_clrsetbits32(a, clear, set)	\
+				mmio_clrsetbits_32((uintptr_t)(a), clear, set)
+#else
+#error Please define CCSR SCFG register endianness
+#endif
+#endif	/*	__ASSEMBLER__	*/
+
+#endif	/* SCFG_H */
diff --git a/drivers/nxp/ddr/fsl-mmdc/ddr.mk b/drivers/nxp/ddr/fsl-mmdc/ddr.mk
new file mode 100644
index 0000000..e6cc7c1
--- /dev/null
+++ b/drivers/nxp/ddr/fsl-mmdc/ddr.mk
@@ -0,0 +1,19 @@
+#
+# Copyright 2021 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-----------------------------------------------------------------------------
+
+# MMDC ddr cntlr driver files
+
+DDR_DRIVERS_PATH	:=	drivers/nxp/ddr
+
+DDR_CNTLR_SOURCES	:=	${DDR_DRIVERS_PATH}/fsl-mmdc/fsl_mmdc.c \
+				${DDR_DRIVERS_PATH}/nxp-ddr/utility.c	\
+				${DDR_DRIVERS_PATH}/nxp-ddr/ddr.c	\
+				${DDR_DRIVERS_PATH}/nxp-ddr/ddrc.c
+
+PLAT_INCLUDES		+=	-I$(DDR_DRIVERS_PATH)/include	\
+				-I$(DDR_DRIVERS_PATH)/fsl-mmdc
+#------------------------------------------------
diff --git a/drivers/nxp/ddr/fsl-mmdc/fsl_mmdc.c b/drivers/nxp/ddr/fsl-mmdc/fsl_mmdc.c
new file mode 100644
index 0000000..7e6504e
--- /dev/null
+++ b/drivers/nxp/ddr/fsl-mmdc/fsl_mmdc.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+/*
+ * Generic driver for Freescale MMDC(Multi Mode DDR Controller).
+ */
+
+#include <errno.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include "ddr_io.h"
+#include <drivers/delay_timer.h>
+#include <fsl_mmdc.h>
+
+static void set_wait_for_bits_clear(void *ptr, unsigned int value,
+				    unsigned int bits)
+{
+	int timeout = 1000;
+
+	ddr_out32(ptr, value);
+
+	while ((ddr_in32(ptr) & bits) != 0) {
+		udelay(100);
+		timeout--;
+	}
+	if (timeout <= 0) {
+		INFO("Error: %llx", (unsigned long long)ptr);
+		INFO(" wait for clear timeout.\n");
+	}
+}
+
+void mmdc_init(const struct fsl_mmdc_info *priv, uintptr_t nxp_ddr_addr)
+{
+	struct mmdc_regs *mmdc = (struct mmdc_regs *)nxp_ddr_addr;
+	unsigned int tmp;
+
+	/* 1. set configuration request */
+	ddr_out32(&mmdc->mdscr, MDSCR_ENABLE_CON_REQ);
+
+	/* 2. configure the desired timing parameters */
+	ddr_out32(&mmdc->mdotc, priv->mdotc);
+	ddr_out32(&mmdc->mdcfg0, priv->mdcfg0);
+	ddr_out32(&mmdc->mdcfg1, priv->mdcfg1);
+	ddr_out32(&mmdc->mdcfg2, priv->mdcfg2);
+
+	/* 3. configure DDR type and other miscellaneous parameters */
+	ddr_out32(&mmdc->mdmisc, priv->mdmisc);
+	ddr_out32(&mmdc->mpmur0, MMDC_MPMUR0_FRC_MSR);
+	ddr_out32(&mmdc->mdrwd, priv->mdrwd);
+	ddr_out32(&mmdc->mpodtctrl, priv->mpodtctrl);
+
+	/* 4. configure the required delay while leaving reset */
+	ddr_out32(&mmdc->mdor, priv->mdor);
+
+	/* 5. configure DDR physical parameters */
+	/* set row/column address width, burst length, data bus width */
+	tmp = priv->mdctl & ~(MDCTL_SDE0 | MDCTL_SDE1);
+	ddr_out32(&mmdc->mdctl, tmp);
+	/* configure address space partition */
+	ddr_out32(&mmdc->mdasp, priv->mdasp);
+
+	/* 6. perform a ZQ calibration - not needed here, doing in #8b */
+
+	/* 7. enable MMDC with the desired chip select */
+#if (DDRC_NUM_CS == 1)
+	ddr_out32(&mmdc->mdctl, tmp | MDCTL_SDE0);
+#elif (DDRC_NUM_CS == 2)
+	ddr_out32(&mmdc->mdctl, tmp | MDCTL_SDE0 | MDCTL_SDE1);
+#else
+#error "Unsupported DDRC_NUM_CS"
+#endif
+
+	/* 8a. dram init sequence: update MRs for ZQ, ODT, PRE, etc */
+	ddr_out32(&mmdc->mdscr, CMD_ADDR_LSB_MR_ADDR(8) |
+				MDSCR_ENABLE_CON_REQ |
+				CMD_LOAD_MODE_REG |
+				CMD_BANK_ADDR_2);
+
+	ddr_out32(&mmdc->mdscr, CMD_ADDR_LSB_MR_ADDR(0) |
+				MDSCR_ENABLE_CON_REQ |
+				CMD_LOAD_MODE_REG |
+				CMD_BANK_ADDR_3);
+
+	ddr_out32(&mmdc->mdscr, CMD_ADDR_LSB_MR_ADDR(4) |
+				MDSCR_ENABLE_CON_REQ |
+				CMD_LOAD_MODE_REG |
+				CMD_BANK_ADDR_1);
+
+	ddr_out32(&mmdc->mdscr, CMD_ADDR_MSB_MR_OP(0x19) |
+				CMD_ADDR_LSB_MR_ADDR(0x30) |
+				MDSCR_ENABLE_CON_REQ |
+				CMD_LOAD_MODE_REG | CMD_BANK_ADDR_0);
+
+	/* 8b. ZQ calibration */
+	ddr_out32(&mmdc->mdscr, CMD_ADDR_MSB_MR_OP(0x4) |
+				MDSCR_ENABLE_CON_REQ |
+				CMD_ZQ_CALIBRATION | CMD_BANK_ADDR_0);
+
+	set_wait_for_bits_clear(&mmdc->mpzqhwctrl, priv->mpzqhwctrl,
+				MPZQHWCTRL_ZQ_HW_FORCE);
+
+	/* 9a. calibrations now, wr lvl */
+	ddr_out32(&mmdc->mdscr,  CMD_ADDR_LSB_MR_ADDR(0x84) | MDSCR_WL_EN |
+				MDSCR_ENABLE_CON_REQ |
+				CMD_LOAD_MODE_REG | CMD_BANK_ADDR_1);
+
+	set_wait_for_bits_clear(&mmdc->mpwlgcr, MPWLGCR_HW_WL_EN,
+				MPWLGCR_HW_WL_EN);
+
+	mdelay(1);
+
+	ddr_out32(&mmdc->mdscr, CMD_ADDR_LSB_MR_ADDR(4) |
+				MDSCR_ENABLE_CON_REQ |
+				CMD_LOAD_MODE_REG | CMD_BANK_ADDR_1);
+
+	ddr_out32(&mmdc->mdscr, MDSCR_ENABLE_CON_REQ);
+
+	mdelay(1);
+
+	/* 9b. read DQS gating calibration */
+	ddr_out32(&mmdc->mdscr, CMD_ADDR_MSB_MR_OP(4) | MDSCR_ENABLE_CON_REQ |
+				CMD_PRECHARGE_BANK_OPEN | CMD_BANK_ADDR_0);
+
+	ddr_out32(&mmdc->mdscr, CMD_ADDR_LSB_MR_ADDR(4) | MDSCR_ENABLE_CON_REQ |
+				CMD_LOAD_MODE_REG | CMD_BANK_ADDR_3);
+
+	ddr_out32(&mmdc->mppdcmpr2, MPPDCMPR2_MPR_COMPARE_EN);
+
+	/* set absolute read delay offset */
+	if (priv->mprddlctl != 0) {
+		ddr_out32(&mmdc->mprddlctl, priv->mprddlctl);
+	} else {
+		ddr_out32(&mmdc->mprddlctl, MMDC_MPRDDLCTL_DEFAULT_DELAY);
+	}
+
+	set_wait_for_bits_clear(&mmdc->mpdgctrl0,
+				AUTO_RD_DQS_GATING_CALIBRATION_EN,
+				AUTO_RD_DQS_GATING_CALIBRATION_EN);
+
+	ddr_out32(&mmdc->mdscr,  MDSCR_ENABLE_CON_REQ | CMD_LOAD_MODE_REG |
+				CMD_BANK_ADDR_3);
+
+	/* 9c. read calibration */
+	ddr_out32(&mmdc->mdscr, CMD_ADDR_MSB_MR_OP(4) | MDSCR_ENABLE_CON_REQ |
+				CMD_PRECHARGE_BANK_OPEN | CMD_BANK_ADDR_0);
+	ddr_out32(&mmdc->mdscr, CMD_ADDR_LSB_MR_ADDR(4) | MDSCR_ENABLE_CON_REQ |
+				CMD_LOAD_MODE_REG | CMD_BANK_ADDR_3);
+	ddr_out32(&mmdc->mppdcmpr2,  MPPDCMPR2_MPR_COMPARE_EN);
+	set_wait_for_bits_clear(&mmdc->mprddlhwctl,
+				MPRDDLHWCTL_AUTO_RD_CALIBRATION_EN,
+				MPRDDLHWCTL_AUTO_RD_CALIBRATION_EN);
+
+	ddr_out32(&mmdc->mdscr, MDSCR_ENABLE_CON_REQ | CMD_LOAD_MODE_REG |
+				CMD_BANK_ADDR_3);
+
+	/* 10. configure power-down, self-refresh entry, exit parameters */
+	ddr_out32(&mmdc->mdpdc, priv->mdpdc);
+	ddr_out32(&mmdc->mapsr, MMDC_MAPSR_PWR_SAV_CTRL_STAT);
+
+	/* 11. ZQ config again? do nothing here */
+
+	/* 12. refresh scheme */
+	set_wait_for_bits_clear(&mmdc->mdref, priv->mdref,
+				MDREF_START_REFRESH);
+
+	/* 13. disable CON_REQ */
+	ddr_out32(&mmdc->mdscr, MDSCR_DISABLE_CFG_REQ);
+}
diff --git a/drivers/nxp/ddr/fsl-mmdc/fsl_mmdc.h b/drivers/nxp/ddr/fsl-mmdc/fsl_mmdc.h
new file mode 100644
index 0000000..31db552
--- /dev/null
+++ b/drivers/nxp/ddr/fsl-mmdc/fsl_mmdc.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef FSL_MMDC_H
+#define FSL_MMDC_H
+
+/* PHY Write Leveling Configuration and Error Status Register (MPWLGCR) */
+#define MPWLGCR_HW_WL_EN		(1 << 0)
+
+/* PHY Pre-defined Compare and CA delay-line Configuration (MPPDCMPR2) */
+#define MPPDCMPR2_MPR_COMPARE_EN	(1 << 0)
+
+
+/* MMDC PHY Read DQS gating control register 0 (MPDGCTRL0) */
+#define AUTO_RD_DQS_GATING_CALIBRATION_EN	(1 << 28)
+
+/* MMDC PHY Read Delay HW Calibration Control Register (MPRDDLHWCTL) */
+#define MPRDDLHWCTL_AUTO_RD_CALIBRATION_EN	(1 << 4)
+
+/* MMDC Core Power Saving Control and Status Register (MMDC_MAPSR) */
+#define MMDC_MAPSR_PWR_SAV_CTRL_STAT	0x00001067
+
+/* MMDC Core Refresh Control Register (MMDC_MDREF) */
+#define MDREF_START_REFRESH	(1 << 0)
+
+/* MMDC Core Special Command Register (MDSCR) */
+#define CMD_ADDR_MSB_MR_OP(x)	(x << 24)
+#define CMD_ADDR_LSB_MR_ADDR(x)	(x << 16)
+#define MDSCR_DISABLE_CFG_REQ	(0 << 15)
+#define MDSCR_ENABLE_CON_REQ	(1 << 15)
+#define MDSCR_CON_ACK		(1 << 14)
+#define MDSCR_WL_EN		(1 << 9)
+#define	CMD_NORMAL		(0 << 4)
+#define	CMD_PRECHARGE		(1 << 4)
+#define	CMD_AUTO_REFRESH	(2 << 4)
+#define	CMD_LOAD_MODE_REG	(3 << 4)
+#define	CMD_ZQ_CALIBRATION	(4 << 4)
+#define	CMD_PRECHARGE_BANK_OPEN	(5 << 4)
+#define	CMD_MRR			(6 << 4)
+#define CMD_BANK_ADDR_0		0x0
+#define CMD_BANK_ADDR_1		0x1
+#define CMD_BANK_ADDR_2		0x2
+#define CMD_BANK_ADDR_3		0x3
+#define CMD_BANK_ADDR_4		0x4
+#define CMD_BANK_ADDR_5		0x5
+#define CMD_BANK_ADDR_6		0x6
+#define CMD_BANK_ADDR_7		0x7
+
+/* MMDC Core Control Register (MDCTL) */
+#define MDCTL_SDE0		(U(1) << 31)
+#define MDCTL_SDE1		(1 << 30)
+
+/* MMDC PHY ZQ HW control register (MMDC_MPZQHWCTRL) */
+#define MPZQHWCTRL_ZQ_HW_FORCE	(1 << 16)
+
+/* MMDC PHY Measure Unit Register (MMDC_MPMUR0) */
+#define MMDC_MPMUR0_FRC_MSR	(1 << 11)
+
+/* MMDC PHY Read delay-lines Configuration Register (MMDC_MPRDDLCTL) */
+/* default 64 for a quarter cycle delay */
+#define MMDC_MPRDDLCTL_DEFAULT_DELAY	0x40404040
+
+/* MMDC Registers */
+struct mmdc_regs {
+	unsigned int mdctl;
+	unsigned int mdpdc;
+	unsigned int mdotc;
+	unsigned int mdcfg0;
+	unsigned int mdcfg1;
+	unsigned int mdcfg2;
+	unsigned int mdmisc;
+	unsigned int mdscr;
+	unsigned int mdref;
+	unsigned int res1[2];
+	unsigned int mdrwd;
+	unsigned int mdor;
+	unsigned int mdmrr;
+	unsigned int mdcfg3lp;
+	unsigned int mdmr4;
+	unsigned int mdasp;
+	unsigned int res2[239];
+	unsigned int maarcr;
+	unsigned int mapsr;
+	unsigned int maexidr0;
+	unsigned int maexidr1;
+	unsigned int madpcr0;
+	unsigned int madpcr1;
+	unsigned int madpsr0;
+	unsigned int madpsr1;
+	unsigned int madpsr2;
+	unsigned int madpsr3;
+	unsigned int madpsr4;
+	unsigned int madpsr5;
+	unsigned int masbs0;
+	unsigned int masbs1;
+	unsigned int res3[2];
+	unsigned int magenp;
+	unsigned int res4[239];
+	unsigned int mpzqhwctrl;
+	unsigned int mpzqswctrl;
+	unsigned int mpwlgcr;
+	unsigned int mpwldectrl0;
+	unsigned int mpwldectrl1;
+	unsigned int mpwldlst;
+	unsigned int mpodtctrl;
+	unsigned int mprddqby0dl;
+	unsigned int mprddqby1dl;
+	unsigned int mprddqby2dl;
+	unsigned int mprddqby3dl;
+	unsigned int mpwrdqby0dl;
+	unsigned int mpwrdqby1dl;
+	unsigned int mpwrdqby2dl;
+	unsigned int mpwrdqby3dl;
+	unsigned int mpdgctrl0;
+	unsigned int mpdgctrl1;
+	unsigned int mpdgdlst0;
+	unsigned int mprddlctl;
+	unsigned int mprddlst;
+	unsigned int mpwrdlctl;
+	unsigned int mpwrdlst;
+	unsigned int mpsdctrl;
+	unsigned int mpzqlp2ctl;
+	unsigned int mprddlhwctl;
+	unsigned int mpwrdlhwctl;
+	unsigned int mprddlhwst0;
+	unsigned int mprddlhwst1;
+	unsigned int mpwrdlhwst0;
+	unsigned int mpwrdlhwst1;
+	unsigned int mpwlhwerr;
+	unsigned int mpdghwst0;
+	unsigned int mpdghwst1;
+	unsigned int mpdghwst2;
+	unsigned int mpdghwst3;
+	unsigned int mppdcmpr1;
+	unsigned int mppdcmpr2;
+	unsigned int mpswdar0;
+	unsigned int mpswdrdr0;
+	unsigned int mpswdrdr1;
+	unsigned int mpswdrdr2;
+	unsigned int mpswdrdr3;
+	unsigned int mpswdrdr4;
+	unsigned int mpswdrdr5;
+	unsigned int mpswdrdr6;
+	unsigned int mpswdrdr7;
+	unsigned int mpmur0;
+	unsigned int mpwrcadl;
+	unsigned int mpdccr;
+};
+
+struct fsl_mmdc_info {
+	unsigned int mdctl;
+	unsigned int mdpdc;
+	unsigned int mdotc;
+	unsigned int mdcfg0;
+	unsigned int mdcfg1;
+	unsigned int mdcfg2;
+	unsigned int mdmisc;
+	unsigned int mdref;
+	unsigned int mdrwd;
+	unsigned int mdor;
+	unsigned int mdasp;
+	unsigned int mpodtctrl;
+	unsigned int mpzqhwctrl;
+	unsigned int mprddlctl;
+};
+
+void mmdc_init(const struct fsl_mmdc_info *priv, uintptr_t nxp_ddr_addr);
+
+#endif /* FSL_MMDC_H */
diff --git a/drivers/nxp/ddr/include/ddr.h b/drivers/nxp/ddr/include/ddr.h
new file mode 100644
index 0000000..0ef2870
--- /dev/null
+++ b/drivers/nxp/ddr/include/ddr.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef DDR_H
+#define DDR_H
+
+#include "ddr_io.h"
+#include "dimm.h"
+#include "immap.h"
+
+#ifndef DDRC_NUM_CS
+#define DDRC_NUM_CS 4
+#endif
+
+/*
+ * This is irrespective of what is the number of DDR controller,
+ * number of DIMM used. This is set to maximum
+ * Max controllers = 2
+ * Max num of DIMM per controlle = 2
+ * MAX NUM CS = 4
+ * Not to be changed.
+ */
+#define MAX_DDRC_NUM	2
+#define MAX_DIMM_NUM	2
+#define MAX_CS_NUM	4
+
+#include "opts.h"
+#include "regs.h"
+#include "utility.h"
+
+#ifdef DDR_DEBUG
+#define debug(...) INFO(__VA_ARGS__)
+#else
+#define debug(...) VERBOSE(__VA_ARGS__)
+#endif
+
+#ifndef DDRC_NUM_DIMM
+#define DDRC_NUM_DIMM 1
+#endif
+
+#define CONFIG_CS_PER_SLOT \
+	(DDRC_NUM_CS / DDRC_NUM_DIMM)
+
+/* Record of register values computed */
+struct ddr_cfg_regs {
+	struct {
+		unsigned int bnds;
+		unsigned int config;
+		unsigned int config_2;
+	} cs[MAX_CS_NUM];
+	unsigned int dec[10];
+	unsigned int timing_cfg[10];
+	unsigned int sdram_cfg[3];
+	unsigned int sdram_mode[16];
+	unsigned int md_cntl;
+	unsigned int interval;
+	unsigned int data_init;
+	unsigned int clk_cntl;
+	unsigned int init_addr;
+	unsigned int init_ext_addr;
+	unsigned int zq_cntl;
+	unsigned int wrlvl_cntl[3];
+	unsigned int ddr_sr_cntr;
+	unsigned int sdram_rcw[6];
+	unsigned int dq_map[4];
+	unsigned int eor;
+	unsigned int cdr[2];
+	unsigned int err_disable;
+	unsigned int err_int_en;
+	unsigned int tx_cfg[4];
+	unsigned int debug[64];
+};
+
+struct ddr_conf {
+	int dimm_in_use[MAX_DIMM_NUM];
+	int cs_in_use;	/* bitmask, bit 0 for cs0, bit 1 for cs1, etc. */
+	int cs_on_dimm[MAX_DIMM_NUM];	/* bitmask */
+	unsigned long long cs_base_addr[MAX_CS_NUM];
+	unsigned long long cs_size[MAX_CS_NUM];
+	unsigned long long base_addr;
+	unsigned long long total_mem;
+};
+
+struct ddr_info {
+	unsigned long clk;
+	unsigned long long mem_base;
+	unsigned int num_ctlrs;
+	unsigned int dimm_on_ctlr;
+	struct dimm_params dimm;
+	struct memctl_opt opt;
+	struct ddr_conf conf;
+	struct ddr_cfg_regs ddr_reg;
+	struct ccsr_ddr *ddr[MAX_DDRC_NUM];
+	uint16_t *phy[MAX_DDRC_NUM];
+	int *spd_addr;
+	unsigned int ip_rev;
+	uintptr_t phy_gen2_fw_img_buf;
+	void *img_loadr;
+	int warm_boot_flag;
+};
+
+struct rc_timing {
+	unsigned int speed_bin;
+	unsigned int clk_adj;
+	unsigned int wrlvl;
+};
+
+struct board_timing {
+	unsigned int rc;
+	struct rc_timing const *p;
+	unsigned int add1;
+	unsigned int add2;
+};
+
+enum warm_boot {
+	DDR_COLD_BOOT = 0,
+	DDR_WARM_BOOT = 1,
+	DDR_WRM_BOOT_NT_SUPPORTED = -1,
+};
+
+int disable_unused_ddrc(struct ddr_info *priv, int mask,
+			uintptr_t nxp_ccn_hn_f0_addr);
+int ddr_board_options(struct ddr_info *priv);
+int compute_ddrc(const unsigned long clk,
+		 const struct memctl_opt *popts,
+		 const struct ddr_conf *conf,
+		 struct ddr_cfg_regs *ddr,
+		 const struct dimm_params *dimm_params,
+		 const unsigned int ip_rev);
+int compute_ddr_phy(struct ddr_info *priv);
+int ddrc_set_regs(const unsigned long clk,
+		  const struct ddr_cfg_regs *regs,
+		  const struct ccsr_ddr *ddr,
+		  int twopass);
+int cal_board_params(struct ddr_info *priv,
+		     const struct board_timing *dimm,
+		     int len);
+/* return bit mask of used DIMM(s) */
+int ddr_get_ddr_params(struct dimm_params *pdimm, struct ddr_conf *conf);
+long long dram_init(struct ddr_info *priv
+#if defined(NXP_HAS_CCN504) || defined(NXP_HAS_CCN508)
+		    , uintptr_t nxp_ccn_hn_f0_addr
+#endif
+		);
+long long board_static_ddr(struct ddr_info *info);
+
+#endif	/* DDR_H */
diff --git a/drivers/nxp/ddr/include/ddr_io.h b/drivers/nxp/ddr/include/ddr_io.h
new file mode 100644
index 0000000..fbd7e97
--- /dev/null
+++ b/drivers/nxp/ddr/include/ddr_io.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef DDR_IO_H
+#define DDR_IO_H
+
+#include <endian.h>
+
+#include <lib/mmio.h>
+
+#define min(a, b)  (((a) > (b)) ? (b) : (a))
+
+#define max(a, b)  (((a) > (b)) ? (a) : (b))
+
+/* macro for memory barrier */
+#define mb()		asm volatile("dsb sy" : : : "memory")
+
+#ifdef NXP_DDR_BE
+#define ddr_in32(a)			bswap32(mmio_read_32((uintptr_t)(a)))
+#define ddr_out32(a, v)			mmio_write_32((uintptr_t)(a),\
+							bswap32(v))
+#elif defined(NXP_DDR_LE)
+#define ddr_in32(a)			mmio_read_32((uintptr_t)(a))
+#define ddr_out32(a, v)			mmio_write_32((uintptr_t)(a), v)
+#else
+#error Please define CCSR DDR register endianness
+#endif
+
+#define ddr_setbits32(a, v)		ddr_out32((a), ddr_in32(a) | (v))
+#define ddr_clrbits32(a, v)		ddr_out32((a), ddr_in32(a) & ~(v))
+#define ddr_clrsetbits32(a, c, s)	ddr_out32((a), (ddr_in32(a) & ~(c)) \
+						  | (s))
+
+#endif /*	DDR_IO_H	*/
diff --git a/drivers/nxp/ddr/include/dimm.h b/drivers/nxp/ddr/include/dimm.h
new file mode 100644
index 0000000..fcae179
--- /dev/null
+++ b/drivers/nxp/ddr/include/dimm.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef DIMM_H
+#define DIMM_H
+
+#define SPD_MEMTYPE_DDR4        0x0C
+
+#define DDR4_SPD_MODULETYPE_MASK        0x0f
+#define DDR4_SPD_MODULETYPE_EXT         0x00
+#define DDR4_SPD_RDIMM			0x01
+#define DDR4_SPD_UDIMM			0x02
+#define DDR4_SPD_SO_DIMM		0x03
+#define DDR4_SPD_LRDIMM			0x04
+#define DDR4_SPD_MINI_RDIMM		0x05
+#define DDR4_SPD_MINI_UDIMM		0x06
+#define DDR4_SPD_72B_SO_RDIMM		0x08
+#define DDR4_SPD_72B_SO_UDIMM		0x09
+#define DDR4_SPD_16B_SO_DIMM		0x0c
+#define DDR4_SPD_32B_SO_DIMM		0x0d
+
+#define SPD_SPA0_ADDRESS		0x36
+#define SPD_SPA1_ADDRESS		0x37
+
+#define spd_to_ps(mtb, ftb)	\
+	((mtb) * pdimm->mtb_ps + ((ftb) * pdimm->ftb_10th_ps) / 10)
+
+#ifdef DDR_DEBUG
+#define dump_spd(spd, len) {				\
+	register int i;					\
+	register unsigned char *buf = (void *)(spd);	\
+							\
+	for (i = 0; i < (len); i++) {			\
+		print_uint(i);				\
+		puts("\t: 0x");				\
+		print_hex(buf[i]);			\
+		puts("\n");				\
+	}						\
+}
+#else
+#define dump_spd(spd, len) {}
+#endif
+
+/* From JEEC Standard No. 21-C release 23A */
+struct ddr4_spd {
+	/* General Section: Bytes 0-127 */
+	unsigned char info_size_crc;	/*  0 # bytes */
+	unsigned char spd_rev;		/*  1 Total # bytes of SPD */
+	unsigned char mem_type;		/*  2 Key Byte / mem type */
+	unsigned char module_type;	/*  3 Key Byte / Module Type */
+	unsigned char density_banks;	/*  4 Density and Banks	*/
+	unsigned char addressing;	/*  5 Addressing */
+	unsigned char package_type;	/*  6 Package type */
+	unsigned char opt_feature;	/*  7 Optional features */
+	unsigned char thermal_ref;	/*  8 Thermal and refresh */
+	unsigned char oth_opt_features;	/*  9 Other optional features */
+	unsigned char res_10;		/* 10 Reserved */
+	unsigned char module_vdd;	/* 11 Module nominal voltage */
+	unsigned char organization;	/* 12 Module Organization */
+	unsigned char bus_width;	/* 13 Module Memory Bus Width */
+	unsigned char therm_sensor;	/* 14 Module Thermal Sensor */
+	unsigned char ext_type;		/* 15 Extended module type */
+	unsigned char res_16;
+	unsigned char timebases;	/* 17 MTb and FTB */
+	unsigned char tck_min;		/* 18 tCKAVGmin */
+	unsigned char tck_max;		/* 19 TCKAVGmax */
+	unsigned char caslat_b1;	/* 20 CAS latencies, 1st byte */
+	unsigned char caslat_b2;	/* 21 CAS latencies, 2nd byte */
+	unsigned char caslat_b3;	/* 22 CAS latencies, 3rd byte */
+	unsigned char caslat_b4;	/* 23 CAS latencies, 4th byte */
+	unsigned char taa_min;		/* 24 Min CAS Latency Time */
+	unsigned char trcd_min;		/* 25 Min RAS# to CAS# Delay Time */
+	unsigned char trp_min;		/* 26 Min Row Precharge Delay Time */
+	unsigned char tras_trc_ext;	/* 27 Upper Nibbles for tRAS and tRC */
+	unsigned char tras_min_lsb;	/* 28 tRASmin, lsb */
+	unsigned char trc_min_lsb;	/* 29 tRCmin, lsb */
+	unsigned char trfc1_min_lsb;	/* 30 Min Refresh Recovery Delay Time */
+	unsigned char trfc1_min_msb;	/* 31 Min Refresh Recovery Delay Time */
+	unsigned char trfc2_min_lsb;	/* 32 Min Refresh Recovery Delay Time */
+	unsigned char trfc2_min_msb;	/* 33 Min Refresh Recovery Delay Time */
+	unsigned char trfc4_min_lsb;	/* 34 Min Refresh Recovery Delay Time */
+	unsigned char trfc4_min_msb;	/* 35 Min Refresh Recovery Delay Time */
+	unsigned char tfaw_msb;		/* 36 Upper Nibble for tFAW */
+	unsigned char tfaw_min;		/* 37 tFAW, lsb */
+	unsigned char trrds_min;	/* 38 tRRD_Smin, MTB */
+	unsigned char trrdl_min;	/* 39 tRRD_Lmin, MTB */
+	unsigned char tccdl_min;	/* 40 tCCS_Lmin, MTB */
+	unsigned char res_41[60-41];	/* 41 Rserved */
+	unsigned char mapping[78-60];	/* 60~77 Connector to SDRAM bit map */
+	unsigned char res_78[117-78];	/* 78~116, Reserved */
+	signed char fine_tccdl_min;	/* 117 Fine offset for tCCD_Lmin */
+	signed char fine_trrdl_min;	/* 118 Fine offset for tRRD_Lmin */
+	signed char fine_trrds_min;	/* 119 Fine offset for tRRD_Smin */
+	signed char fine_trc_min;	/* 120 Fine offset for tRCmin */
+	signed char fine_trp_min;	/* 121 Fine offset for tRPmin */
+	signed char fine_trcd_min;	/* 122 Fine offset for tRCDmin */
+	signed char fine_taa_min;	/* 123 Fine offset for tAAmin */
+	signed char fine_tck_max;	/* 124 Fine offset for tCKAVGmax */
+	signed char fine_tck_min;	/* 125 Fine offset for tCKAVGmin */
+	/* CRC: Bytes 126-127 */
+	unsigned char crc[2];		/* 126-127 SPD CRC */
+
+	/* Module-Specific Section: Bytes 128-255 */
+	union {
+		struct {
+			/* 128 (Unbuffered) Module Nominal Height */
+			unsigned char mod_height;
+			/* 129 (Unbuffered) Module Maximum Thickness */
+			unsigned char mod_thickness;
+			/* 130 (Unbuffered) Reference Raw Card Used */
+			unsigned char ref_raw_card;
+			/* 131 (Unbuffered) Address Mapping from
+			 *     Edge Connector to DRAM
+			 */
+			unsigned char addr_mapping;
+			/* 132~253 (Unbuffered) Reserved */
+			unsigned char res_132[254-132];
+			/* 254~255 CRC */
+			unsigned char crc[2];
+		} unbuffered;
+		struct {
+			/* 128 (Registered) Module Nominal Height */
+			unsigned char mod_height;
+			/* 129 (Registered) Module Maximum Thickness */
+			unsigned char mod_thickness;
+			/* 130 (Registered) Reference Raw Card Used */
+			unsigned char ref_raw_card;
+			/* 131 DIMM Module Attributes */
+			unsigned char modu_attr;
+			/* 132 RDIMM Thermal Heat Spreader Solution */
+			unsigned char thermal;
+			/* 133 Register Manufacturer ID Code, LSB */
+			unsigned char reg_id_lo;
+			/* 134 Register Manufacturer ID Code, MSB */
+			unsigned char reg_id_hi;
+			/* 135 Register Revision Number */
+			unsigned char reg_rev;
+			/* 136 Address mapping from register to DRAM */
+			unsigned char reg_map;
+			unsigned char ca_stren;
+			unsigned char clk_stren;
+			/* 139~253 Reserved */
+			unsigned char res_139[254-139];
+			/* 254~255 CRC */
+			unsigned char crc[2];
+		} registered;
+		struct {
+			/* 128 (Loadreduced) Module Nominal Height */
+			unsigned char mod_height;
+			/* 129 (Loadreduced) Module Maximum Thickness */
+			unsigned char mod_thickness;
+			/* 130 (Loadreduced) Reference Raw Card Used */
+			unsigned char ref_raw_card;
+			/* 131 DIMM Module Attributes */
+			unsigned char modu_attr;
+			/* 132 RDIMM Thermal Heat Spreader Solution */
+			unsigned char thermal;
+			/* 133 Register Manufacturer ID Code, LSB */
+			unsigned char reg_id_lo;
+			/* 134 Register Manufacturer ID Code, MSB */
+			unsigned char reg_id_hi;
+			/* 135 Register Revision Number */
+			unsigned char reg_rev;
+			/* 136 Address mapping from register to DRAM */
+			unsigned char reg_map;
+			/* 137 Register Output Drive Strength for CMD/Add*/
+			unsigned char reg_drv;
+			/* 138 Register Output Drive Strength for CK */
+			unsigned char reg_drv_ck;
+			/* 139 Data Buffer Revision Number */
+			unsigned char data_buf_rev;
+			/* 140 DRAM VrefDQ for Package Rank 0 */
+			unsigned char vrefqe_r0;
+			/* 141 DRAM VrefDQ for Package Rank 1 */
+			unsigned char vrefqe_r1;
+			/* 142 DRAM VrefDQ for Package Rank 2 */
+			unsigned char vrefqe_r2;
+			/* 143 DRAM VrefDQ for Package Rank 3 */
+			unsigned char vrefqe_r3;
+			/* 144 Data Buffer VrefDQ for DRAM Interface */
+			unsigned char data_intf;
+			/*
+			 * 145 Data Buffer MDQ Drive Strength and RTT
+			 * for data rate <= 1866
+			 */
+			unsigned char data_drv_1866;
+			/*
+			 * 146 Data Buffer MDQ Drive Strength and RTT
+			 * for 1866 < data rate <= 2400
+			 */
+			unsigned char data_drv_2400;
+			/*
+			 * 147 Data Buffer MDQ Drive Strength and RTT
+			 * for 2400 < data rate <= 3200
+			 */
+			unsigned char data_drv_3200;
+			/* 148 DRAM Drive Strength */
+			unsigned char dram_drv;
+			/*
+			 * 149 DRAM ODT (RTT_WR, RTT_NOM)
+			 * for data rate <= 1866
+			 */
+			unsigned char dram_odt_1866;
+			/*
+			 * 150 DRAM ODT (RTT_WR, RTT_NOM)
+			 * for 1866 < data rate <= 2400
+			 */
+			unsigned char dram_odt_2400;
+			/*
+			 * 151 DRAM ODT (RTT_WR, RTT_NOM)
+			 * for 2400 < data rate <= 3200
+			 */
+			unsigned char dram_odt_3200;
+			/*
+			 * 152 DRAM ODT (RTT_PARK)
+			 * for data rate <= 1866
+			 */
+			unsigned char dram_odt_park_1866;
+			/*
+			 * 153 DRAM ODT (RTT_PARK)
+			 * for 1866 < data rate <= 2400
+			 */
+			unsigned char dram_odt_park_2400;
+			/*
+			 * 154 DRAM ODT (RTT_PARK)
+			 * for 2400 < data rate <= 3200
+			 */
+			unsigned char dram_odt_park_3200;
+			unsigned char res_155[254-155];	/* Reserved */
+			/* 254~255 CRC */
+			unsigned char crc[2];
+		} loadreduced;
+		unsigned char uc[128]; /* 128-255 Module-Specific Section */
+	} mod_section;
+
+	unsigned char res_256[320-256];	/* 256~319 Reserved */
+
+	/* Module supplier's data: Byte 320~383 */
+	unsigned char mmid_lsb;		/* 320 Module MfgID Code LSB */
+	unsigned char mmid_msb;		/* 321 Module MfgID Code MSB */
+	unsigned char mloc;		/* 322 Mfg Location */
+	unsigned char mdate[2];		/* 323~324 Mfg Date */
+	unsigned char sernum[4];	/* 325~328 Module Serial Number */
+	unsigned char mpart[20];	/* 329~348 Mfg's Module Part Number */
+	unsigned char mrev;		/* 349 Module Revision Code */
+	unsigned char dmid_lsb;		/* 350 DRAM MfgID Code LSB */
+	unsigned char dmid_msb;		/* 351 DRAM MfgID Code MSB */
+	unsigned char stepping;		/* 352 DRAM stepping */
+	unsigned char msd[29];		/* 353~381 Mfg's Specific Data */
+	unsigned char res_382[2];	/* 382~383 Reserved */
+};
+
+/* Parameters for a DDR dimm computed from the SPD */
+struct dimm_params {
+	/* DIMM organization parameters */
+	char mpart[19];		/* guaranteed null terminated */
+
+	unsigned int n_ranks;
+	unsigned int die_density;
+	unsigned long long rank_density;
+	unsigned long long capacity;
+	unsigned int primary_sdram_width;
+	unsigned int ec_sdram_width;
+	unsigned int rdimm;
+	unsigned int package_3ds;	/* number of dies in 3DS */
+	unsigned int device_width;	/* x4, x8, x16 components */
+	unsigned int rc;
+
+	/* SDRAM device parameters */
+	unsigned int n_row_addr;
+	unsigned int n_col_addr;
+	unsigned int edc_config;	/* 0 = none, 1 = parity, 2 = ECC */
+	unsigned int bank_addr_bits;
+	unsigned int bank_group_bits;
+	unsigned int burst_lengths_bitmask;	/* BL=4 bit 2, BL=8 = bit 3 */
+
+	/* mirrored DIMMs */
+	unsigned int mirrored_dimm;	/* only for ddr3 */
+
+	/* DIMM timing parameters */
+
+	int mtb_ps;	/* medium timebase ps */
+	int ftb_10th_ps; /* fine timebase, in 1/10 ps */
+	int taa_ps;	/* minimum CAS latency time */
+	int tfaw_ps;	/* four active window delay */
+
+	/*
+	 * SDRAM clock periods
+	 * The range for these are 1000-10000 so a short should be sufficient
+	 */
+	int tckmin_x_ps;
+	int tckmax_ps;
+
+	/* SPD-defined CAS latencies */
+	unsigned int caslat_x;
+
+	/* basic timing parameters */
+	int trcd_ps;
+	int trp_ps;
+	int tras_ps;
+
+	int trfc1_ps;
+	int trfc2_ps;
+	int trfc4_ps;
+	int trrds_ps;
+	int trrdl_ps;
+	int tccdl_ps;
+	int trfc_slr_ps;
+
+	int trc_ps;	/* maximum = 254 ns + .75 ns = 254750 ps */
+	int twr_ps;	/* 15ns  for all speed bins */
+
+	unsigned int refresh_rate_ps;
+	unsigned int extended_op_srt;
+
+	/* RDIMM */
+	unsigned char rcw[16];	/* Register Control Word 0-15 */
+	unsigned int dq_mapping[18];
+	unsigned int dq_mapping_ors;
+};
+
+int read_spd(unsigned char chip, void *buf, int len);
+int crc16(unsigned char *ptr, int count);
+int cal_dimm_params(const struct ddr4_spd *spd, struct dimm_params *pdimm);
+
+#endif /* DIMM_H */
diff --git a/drivers/nxp/ddr/include/immap.h b/drivers/nxp/ddr/include/immap.h
new file mode 100644
index 0000000..83b4de6
--- /dev/null
+++ b/drivers/nxp/ddr/include/immap.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef DDR_IMMAP_H
+#define DDR_IMMAP_H
+
+#define	DDR_DBUS_64		0
+#define	DDR_DBUS_32		1
+#define	DDR_DBUS_16		2
+
+/*
+ * DDRC register file for DDRC 5.0 and above
+ */
+struct ccsr_ddr {
+	struct {
+		unsigned int a;		 /* 0x0, 0x8, 0x10, 0x18 */
+		unsigned int res;	 /* 0x4, 0xc, 0x14, 0x1c */
+	} bnds[4];
+	unsigned char	res_20[0x40 - 0x20];
+	unsigned int	dec[10];	 /* 0x40 */
+	unsigned char	res_68[0x80 - 0x68];
+	unsigned int	csn_cfg[4];	 /* 0x80, 0x84, 0x88, 0x8c */
+	unsigned char	res_90[48];
+	unsigned int	csn_cfg_2[4];	 /* 0xc0, 0xc4, 0xc8, 0xcc */
+	unsigned char	res_d0[48];
+	unsigned int	timing_cfg_3;	 /* SDRAM Timing Configuration 3 */
+	unsigned int	timing_cfg_0;	 /* SDRAM Timing Configuration 0 */
+	unsigned int	timing_cfg_1;	 /* SDRAM Timing Configuration 1 */
+	unsigned int	timing_cfg_2;	 /* SDRAM Timing Configuration 2 */
+	unsigned int	sdram_cfg;	 /* SDRAM Control Configuration */
+	unsigned int	sdram_cfg_2;	 /* SDRAM Control Configuration 2 */
+	unsigned int	sdram_mode;	 /* SDRAM Mode Configuration */
+	unsigned int	sdram_mode_2;	 /* SDRAM Mode Configuration 2 */
+	unsigned int	sdram_md_cntl;	 /* SDRAM Mode Control */
+	unsigned int	sdram_interval;	 /* SDRAM Interval Configuration */
+	unsigned int	sdram_data_init; /* SDRAM Data initialization */
+	unsigned char	res_12c[4];
+	unsigned int	sdram_clk_cntl;	 /* SDRAM Clock Control */
+	unsigned char	res_134[20];
+	unsigned int	init_addr;	 /* training init addr */
+	unsigned int	init_ext_addr;	 /* training init extended addr */
+	unsigned char	res_150[16];
+	unsigned int	timing_cfg_4;	 /* SDRAM Timing Configuration 4 */
+	unsigned int	timing_cfg_5;	 /* SDRAM Timing Configuration 5 */
+	unsigned int	timing_cfg_6;	 /* SDRAM Timing Configuration 6 */
+	unsigned int	timing_cfg_7;	 /* SDRAM Timing Configuration 7 */
+	unsigned int	zq_cntl;	 /* ZQ calibration control*/
+	unsigned int	wrlvl_cntl;	 /* write leveling control*/
+	unsigned char	reg_178[4];
+	unsigned int	ddr_sr_cntr;	 /* self refresh counter */
+	unsigned int	ddr_sdram_rcw_1; /* Control Words 1 */
+	unsigned int	ddr_sdram_rcw_2; /* Control Words 2 */
+	unsigned char	reg_188[8];
+	unsigned int	ddr_wrlvl_cntl_2; /* write leveling control 2 */
+	unsigned int	ddr_wrlvl_cntl_3; /* write leveling control 3 */
+	unsigned char	res_198[0x1a0-0x198];
+	unsigned int	ddr_sdram_rcw_3;
+	unsigned int	ddr_sdram_rcw_4;
+	unsigned int	ddr_sdram_rcw_5;
+	unsigned int	ddr_sdram_rcw_6;
+	unsigned char	res_1b0[0x200-0x1b0];
+	unsigned int	sdram_mode_3;	 /* SDRAM Mode Configuration 3 */
+	unsigned int	sdram_mode_4;	 /* SDRAM Mode Configuration 4 */
+	unsigned int	sdram_mode_5;	 /* SDRAM Mode Configuration 5 */
+	unsigned int	sdram_mode_6;	 /* SDRAM Mode Configuration 6 */
+	unsigned int	sdram_mode_7;	 /* SDRAM Mode Configuration 7 */
+	unsigned int	sdram_mode_8;	 /* SDRAM Mode Configuration 8 */
+	unsigned char	res_218[0x220-0x218];
+	unsigned int	sdram_mode_9;	 /* SDRAM Mode Configuration 9 */
+	unsigned int	sdram_mode_10;	 /* SDRAM Mode Configuration 10 */
+	unsigned int	sdram_mode_11;	 /* SDRAM Mode Configuration 11 */
+	unsigned int	sdram_mode_12;	 /* SDRAM Mode Configuration 12 */
+	unsigned int	sdram_mode_13;	 /* SDRAM Mode Configuration 13 */
+	unsigned int	sdram_mode_14;	 /* SDRAM Mode Configuration 14 */
+	unsigned int	sdram_mode_15;	 /* SDRAM Mode Configuration 15 */
+	unsigned int	sdram_mode_16;	 /* SDRAM Mode Configuration 16 */
+	unsigned char	res_240[0x250-0x240];
+	unsigned int	timing_cfg_8;	 /* SDRAM Timing Configuration 8 */
+	unsigned int	timing_cfg_9;	 /* SDRAM Timing Configuration 9 */
+	unsigned int	timing_cfg_10;	 /* SDRAM Timing COnfigurtion 10 */
+	unsigned char   res_258[0x260-0x25c];
+	unsigned int	sdram_cfg_3;
+	unsigned char	res_264[0x270-0x264];
+	unsigned int	sdram_md_cntl_2;
+	unsigned char	res_274[0x400-0x274];
+	unsigned int	dq_map[4];
+	unsigned char	res_410[0x800-0x410];
+	unsigned int	tx_cfg[4];
+	unsigned char	res_810[0xb20-0x810];
+	unsigned int	ddr_dsr1;	 /* Debug Status 1 */
+	unsigned int	ddr_dsr2;	 /* Debug Status 2 */
+	unsigned int	ddr_cdr1;	 /* Control Driver 1 */
+	unsigned int	ddr_cdr2;	 /* Control Driver 2 */
+	unsigned char	res_b30[200];
+	unsigned int	ip_rev1;	 /* IP Block Revision 1 */
+	unsigned int	ip_rev2;	 /* IP Block Revision 2 */
+	unsigned int	eor;		 /* Enhanced Optimization Register */
+	unsigned char	res_c04[252];
+	unsigned int	mtcr;		 /* Memory Test Control Register */
+	unsigned char	res_d04[28];
+	unsigned int	mtp[10];	 /* Memory Test Patterns */
+	unsigned char	res_d48[184];
+	unsigned int	data_err_inject_hi; /* Data Path Err Injection Mask Hi*/
+	unsigned int	data_err_inject_lo;/* Data Path Err Injection Mask Lo*/
+	unsigned int	ecc_err_inject;	 /* Data Path Err Injection Mask ECC */
+	unsigned char	res_e0c[20];
+	unsigned int	capture_data_hi; /* Data Path Read Capture High */
+	unsigned int	capture_data_lo; /* Data Path Read Capture Low */
+	unsigned int	capture_ecc;	 /* Data Path Read Capture ECC */
+	unsigned char	res_e2c[20];
+	unsigned int	err_detect;	 /* Error Detect */
+	unsigned int	err_disable;	 /* Error Disable */
+	unsigned int	err_int_en;
+	unsigned int	capture_attributes; /* Error Attrs Capture */
+	unsigned int	capture_address; /* Error Addr Capture */
+	unsigned int	capture_ext_address; /* Error Extended Addr Capture */
+	unsigned int	err_sbe;	 /* Single-Bit ECC Error Management */
+	unsigned char	res_e5c[164];
+	unsigned int	debug[64];	 /* debug_1 to debug_64 */
+};
+#endif /* DDR_IMMAP_H */
diff --git a/drivers/nxp/ddr/include/opts.h b/drivers/nxp/ddr/include/opts.h
new file mode 100644
index 0000000..f32891b
--- /dev/null
+++ b/drivers/nxp/ddr/include/opts.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef DDR_OPTS_H
+#define DDR_OPTS_H
+
+#define SDRAM_TYPE_DDR4		5	/* sdram_cfg register */
+
+#define DDR_BC4			4	/* burst chop */
+#define DDR_OTF			6	/* on-the-fly BC4 and BL8 */
+#define DDR_BL8			8	/* burst length 8 */
+
+#define DDR4_RTT_OFF		0
+#define DDR4_RTT_60_OHM		1	/* RZQ/4 */
+#define DDR4_RTT_120_OHM	2	/* RZQ/2 */
+#define DDR4_RTT_40_OHM		3	/* RZQ/6 */
+#define DDR4_RTT_240_OHM	4	/* RZQ/1 */
+#define DDR4_RTT_48_OHM		5	/* RZQ/5 */
+#define DDR4_RTT_80_OHM		6	/* RZQ/3 */
+#define DDR4_RTT_34_OHM		7	/* RZQ/7 */
+#define DDR4_RTT_WR_OFF		0
+#define DDR4_RTT_WR_120_OHM	1
+#define DDR4_RTT_WR_240_OHM	2
+#define DDR4_RTT_WR_HZ		3
+#define DDR4_RTT_WR_80_OHM	4
+#define DDR_ODT_NEVER		0x0
+#define DDR_ODT_CS		0x1
+#define DDR_ODT_ALL_OTHER_CS	0x2
+#define DDR_ODT_OTHER_DIMM	0x3
+#define DDR_ODT_ALL		0x4
+#define DDR_ODT_SAME_DIMM	0x5
+#define DDR_ODT_CS_AND_OTHER_DIMM 0x6
+#define DDR_ODT_OTHER_CS_ONSAMEDIMM 0x7
+#define DDR_BA_INTLV_CS01	0x40
+#define DDR_BA_INTLV_CS0123	0x64
+#define DDR_BA_NONE		0
+#define DDR_256B_INTLV		0x8
+
+struct memctl_opt {
+	int rdimm;
+	unsigned int dbw_cap_shift;
+	struct local_opts_s {
+		unsigned int auto_precharge;
+		unsigned int odt_rd_cfg;
+		unsigned int odt_wr_cfg;
+		unsigned int odt_rtt_norm;
+		unsigned int odt_rtt_wr;
+	} cs_odt[DDRC_NUM_CS];
+	int ctlr_intlv;
+	unsigned int ctlr_intlv_mode;
+	unsigned int ba_intlv;
+	int addr_hash;
+	int ecc_mode;
+	int ctlr_init_ecc;
+	int self_refresh_in_sleep;
+	int self_refresh_irq_en;
+	int dynamic_power;
+	/* memory data width 0 = 64-bit, 1 = 32-bit, 2 = 16-bit */
+	unsigned int data_bus_dimm;
+	unsigned int data_bus_used;	/* on individual board */
+	unsigned int burst_length;	/* BC4, OTF and BL8 */
+	int otf_burst_chop_en;
+	int mirrored_dimm;
+	int quad_rank_present;
+	int output_driver_impedance;
+	int ap_en;
+	int x4_en;
+
+	int caslat_override;
+	unsigned int caslat_override_value;
+	int addt_lat_override;
+	unsigned int addt_lat_override_value;
+
+	unsigned int clk_adj;
+	unsigned int cpo_sample;
+	unsigned int wr_data_delay;
+
+	unsigned int cswl_override;
+	unsigned int wrlvl_override;
+	unsigned int wrlvl_sample;
+	unsigned int wrlvl_start;
+	unsigned int wrlvl_ctl_2;
+	unsigned int wrlvl_ctl_3;
+
+	int half_strength_drive_en;
+	int twot_en;
+	int threet_en;
+	unsigned int bstopre;
+	unsigned int tfaw_ps;
+
+	int rtt_override;
+	unsigned int rtt_override_value;
+	unsigned int rtt_wr_override_value;
+	unsigned int rtt_park;
+
+	int auto_self_refresh_en;
+	unsigned int sr_it;
+	unsigned int ddr_cdr1;
+	unsigned int ddr_cdr2;
+
+	unsigned int trwt_override;
+	unsigned int trwt;
+	unsigned int twrt;
+	unsigned int trrt;
+	unsigned int twwt;
+
+	unsigned int vref_phy;
+	unsigned int vref_dimm;
+	unsigned int odt;
+	unsigned int phy_tx_impedance;
+	unsigned int phy_atx_impedance;
+	unsigned int skip2d;
+};
+
+#endif /* DDR_OPTS_H */
diff --git a/drivers/nxp/ddr/include/regs.h b/drivers/nxp/ddr/include/regs.h
new file mode 100644
index 0000000..e85fd8f
--- /dev/null
+++ b/drivers/nxp/ddr/include/regs.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef DDR_REG_H
+#define DDR_REG_H
+
+#define SDRAM_CS_CONFIG_EN		0x80000000
+
+/* DDR_SDRAM_CFG - DDR SDRAM Control Configuration
+ */
+#define SDRAM_CFG_MEM_EN		0x80000000
+#define SDRAM_CFG_SREN			0x40000000
+#define SDRAM_CFG_ECC_EN		0x20000000
+#define SDRAM_CFG_RD_EN			0x10000000
+#define SDRAM_CFG_SDRAM_TYPE_MASK	0x07000000
+#define SDRAM_CFG_SDRAM_TYPE_SHIFT	24
+#define SDRAM_CFG_DYN_PWR		0x00200000
+#define SDRAM_CFG_DBW_MASK		0x00180000
+#define SDRAM_CFG_DBW_SHIFT		19
+#define SDRAM_CFG_32_BW			0x00080000
+#define SDRAM_CFG_16_BW			0x00100000
+#define SDRAM_CFG_8_BW			0x00180000
+#define SDRAM_CFG_8_BE			0x00040000
+#define SDRAM_CFG_2T_EN			0x00008000
+#define SDRAM_CFG_MEM_HLT		0x00000002
+#define SDRAM_CFG_BI			0x00000001
+
+#define SDRAM_CFG2_FRC_SR		0x80000000
+#define SDRAM_CFG2_FRC_SR_CLEAR		~(SDRAM_CFG2_FRC_SR)
+#define SDRAM_CFG2_D_INIT		0x00000010
+#define SDRAM_CFG2_AP_EN		0x00000020
+#define SDRAM_CFG2_ODT_ONLY_READ	2
+
+#define SDRAM_CFG3_DDRC_RST		0x80000000
+
+#define SDRAM_INTERVAL_REFINT	0xFFFF0000
+#define SDRAM_INTERVAL_REFINT_CLEAR	~(SDRAM_INTERVAL_REFINT)
+#define SDRAM_INTERVAL_BSTOPRE	0x3FFF
+
+/* DDR_MD_CNTL */
+#define MD_CNTL_MD_EN		0x80000000
+#define MD_CNTL_CS_SEL(x)	(((x) & 0x7) << 28)
+#define MD_CNTL_MD_SEL(x)	(((x) & 0xf) << 24)
+#define MD_CNTL_CKE(x)		(((x) & 0x3) << 20)
+
+/* DDR_CDR1 */
+#define DDR_CDR1_DHC_EN	0x80000000
+#define DDR_CDR1_ODT_SHIFT	17
+#define DDR_CDR1_ODT_MASK	0x6
+#define DDR_CDR2_ODT_MASK	0x1
+#define DDR_CDR1_ODT(x) ((x & DDR_CDR1_ODT_MASK) << DDR_CDR1_ODT_SHIFT)
+#define DDR_CDR2_ODT(x) (x & DDR_CDR2_ODT_MASK)
+#define DDR_CDR2_VREF_OVRD(x)	(0x00008080 | ((((x) - 37) & 0x3F) << 8))
+#define DDR_CDR2_VREF_TRAIN_EN	0x00000080
+#define DDR_CDR2_VREF_RANGE_2	0x00000040
+#define DDR_CDR_ODT_OFF		0x0
+#define DDR_CDR_ODT_100ohm	0x1
+#define DDR_CDR_ODT_120OHM	0x2
+#define DDR_CDR_ODT_80ohm	0x3
+#define DDR_CDR_ODT_60ohm	0x4
+#define DDR_CDR_ODT_40ohm	0x5
+#define DDR_CDR_ODT_50ohm	0x6
+#define DDR_CDR_ODT_30ohm	0x7
+
+
+/* DDR ERR_DISABLE */
+#define DDR_ERR_DISABLE_APED	(1 << 8)  /* Address parity error disable */
+#define DDR_ERR_DISABLE_SBED	(1 << 2)  /* Address parity error disable */
+#define DDR_ERR_DISABLE_MBED	(1 << 3)  /* Address parity error disable */
+
+/* Mode Registers */
+#define DDR_MR5_CA_PARITY_LAT_4_CLK	0x1 /* for DDR4-1600/1866/2133 */
+#define DDR_MR5_CA_PARITY_LAT_5_CLK	0x2 /* for DDR4-2400 */
+
+/* DDR DSR2  register */
+#define DDR_DSR_2_PHY_INIT_CMPLT	0x4
+
+/* SDRAM TIMING_CFG_10 register */
+#define DDR_TIMING_CFG_10_T_STAB	0x7FFF
+
+/* DEBUG 2 register */
+#define DDR_DBG_2_MEM_IDLE		0x00000002
+
+/* DEBUG 26 register */
+#define DDR_DEBUG_26_BIT_6		(0x1 << 6)
+#define DDR_DEBUG_26_BIT_7		(0x1 << 7)
+#define DDR_DEBUG_26_BIT_12		(0x1 << 12)
+#define DDR_DEBUG_26_BIT_13		(0x1 << 13)
+#define DDR_DEBUG_26_BIT_14		(0x1 << 14)
+#define DDR_DEBUG_26_BIT_15		(0x1 << 15)
+#define DDR_DEBUG_26_BIT_16		(0x1 << 16)
+#define DDR_DEBUG_26_BIT_17		(0x1 << 17)
+#define DDR_DEBUG_26_BIT_18		(0x1 << 18)
+#define DDR_DEBUG_26_BIT_19		(0x1 << 19)
+#define DDR_DEBUG_26_BIT_24		(0x1 << 24)
+#define DDR_DEBUG_26_BIT_25		(0x1 << 25)
+
+#define DDR_DEBUG_26_BIT_24_CLEAR	~(DDR_DEBUG_26_BIT_24)
+
+/* DEBUG_29 register */
+#define DDR_TX_BD_DIS	(1 << 10) /* Transmit Bit Deskew Disable */
+
+#define DDR_INIT_ADDR_EXT_UIA	(1 << 31)
+
+#endif /* DDR_REG_H */
diff --git a/drivers/nxp/ddr/include/utility.h b/drivers/nxp/ddr/include/utility.h
new file mode 100644
index 0000000..2e22ad5
--- /dev/null
+++ b/drivers/nxp/ddr/include/utility.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef UTILITY_H
+#define UTILITY_H
+
+#include <dcfg.h>
+
+#if defined(NXP_HAS_CCN504) || defined(NXP_HAS_CCN508)
+#define CCN_HN_F_SAM_CTL		0x8
+#define CCN_HN_F_REGION_SIZE		0x10000
+#endif
+
+unsigned long get_ddr_freq(struct sysinfo *sys, int ctrl_num);
+unsigned int get_memory_clk_ps(unsigned long clk);
+unsigned int picos_to_mclk(unsigned long data_rate, unsigned int picos);
+unsigned int get_ddrc_version(const struct ccsr_ddr *ddr);
+void print_ddr_info(struct ccsr_ddr *ddr);
+
+#endif
diff --git a/drivers/nxp/ddr/nxp-ddr/README.odt b/drivers/nxp/ddr/nxp-ddr/README.odt
new file mode 100644
index 0000000..8796302
--- /dev/null
+++ b/drivers/nxp/ddr/nxp-ddr/README.odt
@@ -0,0 +1,31 @@
+Table for dynamic ODT for DDR4 with PHY generation 2
+====================================================
+Two-slot system
+Only symmetric configurations are supported for interleaving. Non-symmetric
+configurations are possible but not covered here. First slot empty is possbile
+but prohibited for simplicity.
++-----------------------+-------------+---------------+-----------------------------+-----------------------------+
+|     Configuration     |             |DRAM controller|           Slot 1            |           Slot 2            |
++-----------+-----------+-------------+-------+-------+--------------+--------------+--------------+--------------+
+|           |           |             |       |       |    Rank 1    |   Rank 2     |   Rank 1     |    Rank 2    |
+|  Slot 1   |  Slot 2   | Write/Read  | Write | Read  |-------+------+-------+------+-------+------+-------+------+
+|           |           |             |       |       | Write | Read | Write | Read | Write | Read | Write | Read |
++-----------+-----------+------+------+-------+-------+-------+------+-------+------+-------+------+-------+------+
+|           |           |      |Rank 1|  off  |  60   |  240  | off  |   60  | 240  |   60  |  60  |   60  |  60  |
+|           |           |Slot 1|------+-------+-------+-------+------+-------+------+-------+------+-------+------+
+|           |           |      |Rank 2|  off  |  60   |   60  | 240  |  240  | off  |   60  |  60  |   60  |  60  |
+| Dual Rank | Dual Rank |------+------+-------+-------+-------+------+-------+------+-------+------+-------+------+
+|           |           |      |Rank 1|  off  |  60   |   60  |  60  |   60  |  60  |  240  | off  |   60  | 240  |
+|           |           |Slot 2|------+-------+-------+-------+------+-------+------+-------+------+-------+------+
+|           |           |      |Rank 2|  off  |  60   |   60  |  60  |   60  |  60  |   60  | 240  |  240  | off  |
++-----------+-----------+------+------+-------+-------+-------+------+-------+------+-------+------+-------+------+
+|           |           |  Slot 1     |  off  |  60   |   80  |  off |       |      |       |      |       |      |
+|Single Rank|Single Rank|-------------+-------+-------+-------+------+-------+------+-------+------+-------+------+
+|           |           |  Slot 2     |  off  |  60   |       |      |       |      |   80  | off  |
++-----------+-----------+------+------+-------+-------+-------+------+-------+------+-------+------+
+|           |           |      |Rank 1|  off  |  80   |   80  | off  |  off  | off  |
+| Dual Rank |           |Slot 1|------+-------+-------+-------+------+-------+------+
+|           |           |      |Rank 2|  off  |  80   |   80  | off  |  off  | off  |
++-----------+-----------+-------------+-------+-------+-------+------+-------+------+
+|Single Rank|           |  Slot 1     |  off  |  80   |   80  | off  |
++-----------+-----------+-------------+-------+-------+-------+------+
diff --git a/drivers/nxp/ddr/nxp-ddr/ddr.c b/drivers/nxp/ddr/nxp-ddr/ddr.c
new file mode 100644
index 0000000..216e05c
--- /dev/null
+++ b/drivers/nxp/ddr/nxp-ddr/ddr.c
@@ -0,0 +1,930 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <errno.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <ddr.h>
+#ifndef CONFIG_DDR_NODIMM
+#include <i2c.h>
+#endif
+#include <nxp_timer.h>
+
+struct dynamic_odt {
+	unsigned int odt_rd_cfg;
+	unsigned int odt_wr_cfg;
+	unsigned int odt_rtt_norm;
+	unsigned int odt_rtt_wr;
+};
+
+#ifndef CONFIG_STATIC_DDR
+#if defined(PHY_GEN2_FW_IMAGE_BUFFER) && !defined(NXP_DDR_PHY_GEN2)
+#error Missing NXP_DDR_PHY_GEN2
+#endif
+#ifdef NXP_DDR_PHY_GEN2
+static const struct dynamic_odt single_D[4] = {
+	{	/* cs0 */
+		DDR_ODT_NEVER,
+		DDR_ODT_ALL,
+		DDR4_RTT_80_OHM,
+		DDR4_RTT_WR_OFF
+	},
+	{	/* cs1 */
+		DDR_ODT_NEVER,
+		DDR_ODT_NEVER,
+		DDR4_RTT_OFF,
+		DDR4_RTT_WR_OFF
+	},
+	{},
+	{}
+};
+
+static const struct dynamic_odt single_S[4] = {
+	{	/* cs0 */
+		DDR_ODT_NEVER,
+		DDR_ODT_ALL,
+		DDR4_RTT_80_OHM,
+		DDR4_RTT_WR_OFF
+	},
+	{},
+	{},
+	{},
+};
+
+static const struct dynamic_odt dual_DD[4] = {
+	{	/* cs0 */
+		DDR_ODT_OTHER_DIMM,
+		DDR_ODT_ALL,
+		DDR4_RTT_60_OHM,
+		DDR4_RTT_WR_240_OHM
+	},
+	{	/* cs1 */
+		DDR_ODT_OTHER_DIMM,
+		DDR_ODT_ALL,
+		DDR4_RTT_60_OHM,
+		DDR4_RTT_WR_240_OHM
+	},
+	{	/* cs2 */
+		DDR_ODT_OTHER_DIMM,
+		DDR_ODT_ALL,
+		DDR4_RTT_60_OHM,
+		DDR4_RTT_WR_240_OHM
+	},
+	{	/* cs3 */
+		DDR_ODT_OTHER_DIMM,
+		DDR_ODT_ALL,
+		DDR4_RTT_60_OHM,
+		DDR4_RTT_WR_240_OHM
+	}
+};
+
+static const struct dynamic_odt dual_SS[4] = {
+	{	/* cs0 */
+		DDR_ODT_NEVER,
+		DDR_ODT_ALL,
+		DDR4_RTT_80_OHM,
+		DDR4_RTT_WR_OFF
+	},
+	{},
+	{	/* cs2 */
+		DDR_ODT_NEVER,
+		DDR_ODT_ALL,
+		DDR4_RTT_80_OHM,
+		DDR4_RTT_WR_OFF
+	},
+	{}
+};
+
+static const struct dynamic_odt dual_D0[4] = {
+	{	/* cs0 */
+		DDR_ODT_NEVER,
+		DDR_ODT_SAME_DIMM,
+		DDR4_RTT_80_OHM,
+		DDR4_RTT_WR_OFF
+	},
+	{	/* cs1 */
+		DDR_ODT_NEVER,
+		DDR_ODT_NEVER,
+		DDR4_RTT_80_OHM,
+		DDR4_RTT_WR_OFF
+	},
+	{},
+	{}
+};
+
+static const struct dynamic_odt dual_S0[4] = {
+	{	/* cs0 */
+		DDR_ODT_NEVER,
+		DDR_ODT_CS,
+		DDR4_RTT_80_OHM,
+		DDR4_RTT_WR_OFF
+	},
+	{},
+	{},
+	{}
+};
+#else
+static const struct dynamic_odt single_D[4] = {
+	{	/* cs0 */
+		DDR_ODT_NEVER,
+		DDR_ODT_ALL,
+		DDR4_RTT_40_OHM,
+		DDR4_RTT_WR_OFF
+	},
+	{	/* cs1 */
+		DDR_ODT_NEVER,
+		DDR_ODT_NEVER,
+		DDR4_RTT_OFF,
+		DDR4_RTT_WR_OFF
+	},
+	{},
+	{}
+};
+
+static const struct dynamic_odt single_S[4] = {
+	{	/* cs0 */
+		DDR_ODT_NEVER,
+		DDR_ODT_ALL,
+		DDR4_RTT_40_OHM,
+		DDR4_RTT_WR_OFF
+	},
+	{},
+	{},
+	{},
+};
+
+static const struct dynamic_odt dual_DD[4] = {
+	{	/* cs0 */
+		DDR_ODT_NEVER,
+		DDR_ODT_SAME_DIMM,
+		DDR4_RTT_120_OHM,
+		DDR4_RTT_WR_OFF
+	},
+	{	/* cs1 */
+		DDR_ODT_OTHER_DIMM,
+		DDR_ODT_OTHER_DIMM,
+		DDR4_RTT_34_OHM,
+		DDR4_RTT_WR_OFF
+	},
+	{	/* cs2 */
+		DDR_ODT_NEVER,
+		DDR_ODT_SAME_DIMM,
+		DDR4_RTT_120_OHM,
+		DDR4_RTT_WR_OFF
+	},
+	{	/* cs3 */
+		DDR_ODT_OTHER_DIMM,
+		DDR_ODT_OTHER_DIMM,
+		DDR4_RTT_34_OHM,
+		DDR4_RTT_WR_OFF
+	}
+};
+
+static const struct dynamic_odt dual_SS[4] = {
+	{	/* cs0 */
+		DDR_ODT_OTHER_DIMM,
+		DDR_ODT_ALL,
+		DDR4_RTT_34_OHM,
+		DDR4_RTT_WR_120_OHM
+	},
+	{},
+	{	/* cs2 */
+		DDR_ODT_OTHER_DIMM,
+		DDR_ODT_ALL,
+		DDR4_RTT_34_OHM,
+		DDR4_RTT_WR_120_OHM
+	},
+	{}
+};
+
+static const struct dynamic_odt dual_D0[4] = {
+	{	/* cs0 */
+		DDR_ODT_NEVER,
+		DDR_ODT_SAME_DIMM,
+		DDR4_RTT_40_OHM,
+		DDR4_RTT_WR_OFF
+	},
+	{	/* cs1 */
+		DDR_ODT_NEVER,
+		DDR_ODT_NEVER,
+		DDR4_RTT_OFF,
+		DDR4_RTT_WR_OFF
+	},
+	{},
+	{}
+};
+
+static const struct dynamic_odt dual_S0[4] = {
+	{	/* cs0 */
+		DDR_ODT_NEVER,
+		DDR_ODT_CS,
+		DDR4_RTT_40_OHM,
+		DDR4_RTT_WR_OFF
+	},
+	{},
+	{},
+	{}
+};
+#endif /* NXP_DDR_PHY_GEN2 */
+
+/*
+ * Automatically select bank interleaving mode based on DIMMs
+ * in this order: cs0_cs1_cs2_cs3, cs0_cs1, null.
+ * This function only deal with one or two slots per controller.
+ */
+static inline unsigned int auto_bank_intlv(const int cs_in_use,
+					   const struct dimm_params *pdimm)
+{
+	switch (cs_in_use) {
+	case 0xf:
+		return DDR_BA_INTLV_CS0123;
+	case 0x3:
+		return DDR_BA_INTLV_CS01;
+	case 0x1:
+		return DDR_BA_NONE;
+	case 0x5:
+		return DDR_BA_NONE;
+	default:
+		break;
+	}
+
+	return 0U;
+}
+
+static int cal_odt(const unsigned int clk,
+		   struct memctl_opt *popts,
+		   struct ddr_conf *conf,
+		   struct dimm_params *pdimm,
+		   const int dimm_slot_per_ctrl)
+
+{
+	unsigned int i;
+	const struct dynamic_odt *pdodt = NULL;
+
+	const static struct dynamic_odt *table[2][5] = {
+		{single_S, single_D, NULL, NULL},
+		{dual_SS, dual_DD, NULL, NULL},
+	};
+
+	if (dimm_slot_per_ctrl != 1 && dimm_slot_per_ctrl != 2) {
+		ERROR("Unsupported number of DIMMs\n");
+		return -EINVAL;
+	}
+
+	pdodt = table[dimm_slot_per_ctrl - 1][pdimm->n_ranks - 1];
+	if (pdodt == dual_SS) {
+		pdodt = (conf->cs_in_use == 0x5) ? dual_SS :
+			((conf->cs_in_use == 0x1) ? dual_S0 : NULL);
+	} else if (pdodt == dual_DD) {
+		pdodt = (conf->cs_in_use == 0xf) ? dual_DD :
+			((conf->cs_in_use == 0x3) ? dual_D0 : NULL);
+	}
+	if (pdodt == dual_DD && pdimm->package_3ds) {
+		ERROR("Too many 3DS DIMMs.\n");
+		return -EINVAL;
+	}
+
+	if (pdodt == NULL) {
+		ERROR("Error determing ODT.\n");
+		return -EINVAL;
+	}
+
+	/* Pick chip-select local options. */
+	for (i = 0U; i < DDRC_NUM_CS; i++) {
+		debug("cs %d\n", i);
+		popts->cs_odt[i].odt_rd_cfg = pdodt[i].odt_rd_cfg;
+		debug("     odt_rd_cfg 0x%x\n",
+			  popts->cs_odt[i].odt_rd_cfg);
+		popts->cs_odt[i].odt_wr_cfg = pdodt[i].odt_wr_cfg;
+		debug("     odt_wr_cfg 0x%x\n",
+			  popts->cs_odt[i].odt_wr_cfg);
+		popts->cs_odt[i].odt_rtt_norm = pdodt[i].odt_rtt_norm;
+		debug("     odt_rtt_norm 0x%x\n",
+			  popts->cs_odt[i].odt_rtt_norm);
+		popts->cs_odt[i].odt_rtt_wr = pdodt[i].odt_rtt_wr;
+		debug("     odt_rtt_wr 0x%x\n",
+			  popts->cs_odt[i].odt_rtt_wr);
+		popts->cs_odt[i].auto_precharge = 0;
+		debug("     auto_precharge %d\n",
+			  popts->cs_odt[i].auto_precharge);
+	}
+
+	return 0;
+}
+
+static int cal_opts(const unsigned int clk,
+		    struct memctl_opt *popts,
+		    struct ddr_conf *conf,
+		    struct dimm_params *pdimm,
+		    const int dimm_slot_per_ctrl,
+		    const unsigned int ip_rev)
+{
+	popts->rdimm = pdimm->rdimm;
+	popts->mirrored_dimm = pdimm->mirrored_dimm;
+#ifdef CONFIG_DDR_ECC_EN
+	popts->ecc_mode = pdimm->edc_config == 0x02 ? 1 : 0;
+#endif
+	popts->ctlr_init_ecc = popts->ecc_mode;
+	debug("ctlr_init_ecc %d\n", popts->ctlr_init_ecc);
+	popts->self_refresh_in_sleep = 1;
+	popts->dynamic_power = 0;
+
+	/*
+	 * check sdram width, allow platform override
+	 * 0 = 64-bit, 1 = 32-bit, 2 = 16-bit
+	 */
+	if (pdimm->primary_sdram_width == 64) {
+		popts->data_bus_dimm = DDR_DBUS_64;
+		popts->otf_burst_chop_en = 1;
+	} else if (pdimm->primary_sdram_width == 32) {
+		popts->data_bus_dimm = DDR_DBUS_32;
+		popts->otf_burst_chop_en = 0;
+	} else if (pdimm->primary_sdram_width == 16) {
+		popts->data_bus_dimm = DDR_DBUS_16;
+		popts->otf_burst_chop_en = 0;
+	} else {
+		ERROR("primary sdram width invalid!\n");
+		return -EINVAL;
+	}
+	popts->data_bus_used = popts->data_bus_dimm;
+	popts->x4_en = (pdimm->device_width == 4) ? 1 : 0;
+	debug("x4_en %d\n", popts->x4_en);
+
+	/* for RDIMM and DDR4 UDIMM/discrete memory, address parity enable */
+	if (popts->rdimm != 0) {
+		popts->ap_en = 1; /* 0 = disable,  1 = enable */
+	} else {
+		popts->ap_en = 0; /* disabled for DDR4 UDIMM/discrete default */
+	}
+
+	if (ip_rev == 0x50500) {
+		popts->ap_en = 0;
+	}
+
+	debug("ap_en %d\n", popts->ap_en);
+
+	/* BSTTOPRE precharge interval uses 1/4 of refint value. */
+	popts->bstopre = picos_to_mclk(clk, pdimm->refresh_rate_ps) >> 2;
+	popts->tfaw_ps = pdimm->tfaw_ps;
+
+	return 0;
+}
+
+static void cal_intlv(const int num_ctlrs,
+		      struct memctl_opt *popts,
+		      struct ddr_conf *conf,
+		      struct dimm_params *pdimm)
+{
+#ifdef NXP_DDR_INTLV_256B
+	if (num_ctlrs == 2) {
+		popts->ctlr_intlv = 1;
+		popts->ctlr_intlv_mode = DDR_256B_INTLV;
+	}
+#endif
+	debug("ctlr_intlv %d\n", popts->ctlr_intlv);
+	debug("ctlr_intlv_mode %d\n", popts->ctlr_intlv_mode);
+
+	popts->ba_intlv = auto_bank_intlv(conf->cs_in_use, pdimm);
+	debug("ba_intlv 0x%x\n", popts->ba_intlv);
+}
+
+static int update_burst_length(struct memctl_opt *popts)
+{
+	/* Choose burst length. */
+	if ((popts->data_bus_used == DDR_DBUS_32) ||
+	    (popts->data_bus_used == DDR_DBUS_16)) {
+		/* 32-bit or 16-bit bus */
+		popts->otf_burst_chop_en = 0;
+		popts->burst_length = DDR_BL8;
+	} else if (popts->otf_burst_chop_en != 0) { /* on-the-fly burst chop */
+		popts->burst_length = DDR_OTF;	/* on-the-fly BC4 and BL8 */
+	} else {
+		popts->burst_length = DDR_BL8;
+	}
+	debug("data_bus_used %d\n", popts->data_bus_used);
+	debug("otf_burst_chop_en %d\n", popts->otf_burst_chop_en);
+	debug("burst_length 0x%x\n", popts->burst_length);
+	/*
+	 * If a reduced data width is requested, but the SPD
+	 * specifies a physically wider device, adjust the
+	 * computed dimm capacities accordingly before
+	 * assigning addresses.
+	 * 0 = 64-bit, 1 = 32-bit, 2 = 16-bit
+	 */
+	if (popts->data_bus_dimm > popts->data_bus_used) {
+		ERROR("Data bus configuration error\n");
+		return -EINVAL;
+	}
+	popts->dbw_cap_shift = popts->data_bus_used - popts->data_bus_dimm;
+	debug("dbw_cap_shift %d\n", popts->dbw_cap_shift);
+
+	return 0;
+}
+
+int cal_board_params(struct ddr_info *priv,
+		     const struct board_timing *dimm,
+		     int len)
+{
+	const unsigned long speed = priv->clk / 1000000;
+	const struct dimm_params *pdimm = &priv->dimm;
+	struct memctl_opt *popts = &priv->opt;
+	struct rc_timing const *prt = NULL;
+	struct rc_timing const *chosen = NULL;
+	int i;
+
+	for (i = 0; i < len; i++) {
+		if (pdimm->rc == dimm[i].rc) {
+			prt = dimm[i].p;
+			break;
+		}
+	}
+	if (prt == NULL) {
+		ERROR("Board parameters no match.\n");
+		return -EINVAL;
+	}
+	while (prt->speed_bin != 0) {
+		if (speed <= prt->speed_bin) {
+			chosen = prt;
+			break;
+		}
+		prt++;
+	}
+	if (chosen == NULL) {
+		ERROR("timing no match for speed %lu\n", speed);
+		return -EINVAL;
+	}
+	popts->clk_adj = prt->clk_adj;
+	popts->wrlvl_start = prt->wrlvl;
+	popts->wrlvl_ctl_2 = (prt->wrlvl * 0x01010101 + dimm[i].add1) &
+			     0xFFFFFFFF;
+	popts->wrlvl_ctl_3 = (prt->wrlvl * 0x01010101 + dimm[i].add2) &
+			     0xFFFFFFFF;
+
+	return 0;
+}
+
+static int synthesize_ctlr(struct ddr_info *priv)
+{
+	int ret;
+
+	ret = cal_odt(priv->clk,
+		      &priv->opt,
+		      &priv->conf,
+		      &priv->dimm,
+		      priv->dimm_on_ctlr);
+	if (ret != 0) {
+		return ret;
+	}
+
+	ret = cal_opts(priv->clk,
+		       &priv->opt,
+		       &priv->conf,
+		       &priv->dimm,
+		       priv->dimm_on_ctlr,
+		       priv->ip_rev);
+
+	if (ret != 0) {
+		return ret;
+	}
+
+	cal_intlv(priv->num_ctlrs, &priv->opt, &priv->conf, &priv->dimm);
+	ret = ddr_board_options(priv);
+	if (ret != 0) {
+		ERROR("Failed matching board timing.\n");
+	}
+
+	ret = update_burst_length(&priv->opt);
+
+	return ret;
+}
+
+/* Return the bit mask of valid DIMMs found */
+static int parse_spd(struct ddr_info *priv)
+{
+	struct ddr_conf *conf = &priv->conf;
+	struct dimm_params *dimm = &priv->dimm;
+	int j, valid_mask = 0;
+
+#ifdef CONFIG_DDR_NODIMM
+	valid_mask = ddr_get_ddr_params(dimm, conf);
+	if (valid_mask < 0) {
+		ERROR("DDR params error\n");
+		return valid_mask;
+	}
+#else
+	const int *spd_addr = priv->spd_addr;
+	const int num_ctlrs = priv->num_ctlrs;
+	const int num_dimm = priv->dimm_on_ctlr;
+	struct ddr4_spd spd[2];
+	unsigned int spd_checksum[2];
+	int addr_idx = 0;
+	int spd_idx = 0;
+	int ret, addr, i;
+
+	/* Scan all DIMMs */
+	for (i = 0; i < num_ctlrs; i++) {
+		debug("Controller %d\n", i);
+		for (j = 0; j < num_dimm; j++, addr_idx++) {
+			debug("DIMM %d\n", j);
+			addr = spd_addr[addr_idx];
+			if (addr == 0) {
+				if (j == 0) {
+					ERROR("First SPD addr wrong.\n");
+					return -EINVAL;
+				}
+				continue;
+			}
+			debug("addr 0x%x\n", addr);
+			ret = read_spd(addr, &spd[spd_idx],
+				       sizeof(struct ddr4_spd));
+			if (ret != 0) {	/* invalid */
+				debug("Invalid SPD at address 0x%x\n", addr);
+				continue;
+			}
+
+			spd_checksum[spd_idx] =
+				(spd[spd_idx].crc[1] << 24) |
+				(spd[spd_idx].crc[0] << 16) |
+				(spd[spd_idx].mod_section.uc[127] << 8) |
+				(spd[spd_idx].mod_section.uc[126] << 0);
+			debug("checksum 0x%x\n", spd_checksum[spd_idx]);
+			if (spd_checksum[spd_idx] == 0) {
+				debug("Bad checksum, ignored.\n");
+				continue;
+			}
+			if (spd_idx == 0) {
+				/* first valid SPD */
+				ret = cal_dimm_params(&spd[0], dimm);
+				if (ret != 0) {
+					ERROR("SPD calculation error\n");
+					return -EINVAL;
+				}
+			}
+
+			if (spd_idx != 0 && spd_checksum[0] !=
+			    spd_checksum[spd_idx]) {
+				ERROR("Not identical DIMMs.\n");
+				return -EINVAL;
+			}
+			conf->dimm_in_use[j] = 1;
+			valid_mask |= 1 << addr_idx;
+			spd_idx = 1;
+		}
+		debug("done with controller %d\n", i);
+	}
+	switch (num_ctlrs) {
+	case 1:
+		if ((valid_mask & 0x1) == 0) {
+			ERROR("First slot cannot be empty.\n");
+			return -EINVAL;
+		}
+		break;
+	case 2:
+		switch (num_dimm) {
+		case 1:
+			if (valid_mask == 0) {
+				ERROR("Both slot empty\n");
+				return -EINVAL;
+			}
+			break;
+		case 2:
+			if (valid_mask != 0x5 &&
+			    valid_mask != 0xf &&
+			    (valid_mask & 0x7) != 0x4 &&
+			    (valid_mask & 0xd) != 0x1) {
+				ERROR("Invalid DIMM combination.\n");
+				return -EINVAL;
+			}
+			break;
+		default:
+			ERROR("Invalid number of DIMMs.\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		ERROR("Invalid number of controllers.\n");
+		return -EINVAL;
+	}
+	/* now we have valid and identical DIMMs on controllers */
+#endif	/* CONFIG_DDR_NODIMM */
+
+	debug("cal cs\n");
+	conf->cs_in_use = 0;
+	for (j = 0; j < DDRC_NUM_DIMM; j++) {
+		if (conf->dimm_in_use[j] == 0) {
+			continue;
+		}
+		switch (dimm->n_ranks) {
+		case 4:
+			ERROR("Quad-rank DIMM not supported\n");
+			return -EINVAL;
+		case 2:
+			conf->cs_on_dimm[j] = 0x3 << (j * CONFIG_CS_PER_SLOT);
+			conf->cs_in_use |= conf->cs_on_dimm[j];
+			break;
+		case 1:
+			conf->cs_on_dimm[j] = 0x1 << (j * CONFIG_CS_PER_SLOT);
+			conf->cs_in_use |= conf->cs_on_dimm[j];
+			break;
+		default:
+			ERROR("SPD error with n_ranks\n");
+			return -EINVAL;
+		}
+		debug("cs_in_use = %x\n", conf->cs_in_use);
+		debug("cs_on_dimm[%d] = %x\n", j, conf->cs_on_dimm[j]);
+	}
+#ifndef CONFIG_DDR_NODIMM
+	if (priv->dimm.rdimm != 0) {
+		NOTICE("RDIMM %s\n", priv->dimm.mpart);
+	} else {
+		NOTICE("UDIMM %s\n", priv->dimm.mpart);
+	}
+#else
+	NOTICE("%s\n", priv->dimm.mpart);
+#endif
+
+	return valid_mask;
+}
+
+static unsigned long long assign_intlv_addr(
+	const struct dimm_params *pdimm,
+	const struct memctl_opt *opt,
+	struct ddr_conf *conf,
+	const unsigned long long current_mem_base)
+{
+	int i;
+	int ctlr_density_mul = 0;
+	const unsigned long long rank_density = pdimm->rank_density >>
+						opt->dbw_cap_shift;
+	unsigned long long total_ctlr_mem;
+
+	debug("rank density 0x%llx\n", rank_density);
+	switch (opt->ba_intlv & DDR_BA_INTLV_CS0123) {
+	case DDR_BA_INTLV_CS0123:
+		ctlr_density_mul = 4;
+		break;
+	case DDR_BA_INTLV_CS01:
+		ctlr_density_mul = 2;
+		break;
+	default:
+		ctlr_density_mul = 1;
+		break;
+	}
+	debug("ctlr density mul %d\n", ctlr_density_mul);
+	switch (opt->ctlr_intlv_mode) {
+	case DDR_256B_INTLV:
+		total_ctlr_mem = 2 * ctlr_density_mul * rank_density;
+		break;
+	default:
+		ERROR("Unknown interleaving mode");
+		return 0;
+	}
+	conf->base_addr = current_mem_base;
+	conf->total_mem = total_ctlr_mem;
+
+	/* overwrite cs_in_use bitmask with controller interleaving */
+	conf->cs_in_use = (1 << ctlr_density_mul) - 1;
+	debug("Overwrite cs_in_use as %x\n", conf->cs_in_use);
+
+	/* Fill addr with each cs in use */
+	for (i = 0; i < ctlr_density_mul; i++) {
+		conf->cs_base_addr[i] = current_mem_base;
+		conf->cs_size[i] = total_ctlr_mem;
+		debug("CS %d\n", i);
+		debug("    base_addr 0x%llx\n", conf->cs_base_addr[i]);
+		debug("    size 0x%llx\n", conf->cs_size[i]);
+	}
+
+	return total_ctlr_mem;
+}
+
+static unsigned long long assign_non_intlv_addr(
+	const struct dimm_params *pdimm,
+	const struct memctl_opt *opt,
+	struct ddr_conf *conf,
+	unsigned long long current_mem_base)
+{
+	int i;
+	const unsigned long long rank_density = pdimm->rank_density >>
+						opt->dbw_cap_shift;
+	unsigned long long total_ctlr_mem = 0ULL;
+
+	debug("rank density 0x%llx\n", rank_density);
+	conf->base_addr = current_mem_base;
+
+	/* assign each cs */
+	switch (opt->ba_intlv & DDR_BA_INTLV_CS0123) {
+	case DDR_BA_INTLV_CS0123:
+		for (i = 0; i < DDRC_NUM_CS; i++) {
+			conf->cs_base_addr[i] = current_mem_base;
+			conf->cs_size[i] = rank_density << 2;
+			total_ctlr_mem += rank_density;
+		}
+		break;
+	case DDR_BA_INTLV_CS01:
+		for (i = 0; ((conf->cs_in_use & (1 << i)) != 0) && i < 2; i++) {
+			conf->cs_base_addr[i] = current_mem_base;
+			conf->cs_size[i] = rank_density << 1;
+			total_ctlr_mem += rank_density;
+		}
+		current_mem_base += total_ctlr_mem;
+		for (; ((conf->cs_in_use & (1 << i)) != 0) && i < DDRC_NUM_CS;
+		     i++) {
+			conf->cs_base_addr[i] = current_mem_base;
+			conf->cs_size[i] = rank_density;
+			total_ctlr_mem += rank_density;
+			current_mem_base += rank_density;
+		}
+		break;
+	case DDR_BA_NONE:
+		for (i = 0; ((conf->cs_in_use & (1 << i)) != 0) &&
+			     (i < DDRC_NUM_CS); i++) {
+			conf->cs_base_addr[i] = current_mem_base;
+			conf->cs_size[i] = rank_density;
+			current_mem_base += rank_density;
+			total_ctlr_mem += rank_density;
+		}
+		break;
+	default:
+		ERROR("Unsupported bank interleaving\n");
+		return 0;
+	}
+	for (i = 0; ((conf->cs_in_use & (1 << i)) != 0) &&
+		     (i < DDRC_NUM_CS); i++) {
+		debug("CS %d\n", i);
+		debug("    base_addr 0x%llx\n", conf->cs_base_addr[i]);
+		debug("    size 0x%llx\n", conf->cs_size[i]);
+	}
+
+	return total_ctlr_mem;
+}
+
+unsigned long long assign_addresses(struct ddr_info *priv)
+		   __attribute__ ((weak));
+
+unsigned long long assign_addresses(struct ddr_info *priv)
+{
+	struct memctl_opt *opt = &priv->opt;
+	const struct dimm_params *dimm = &priv->dimm;
+	struct ddr_conf *conf = &priv->conf;
+	unsigned long long current_mem_base = priv->mem_base;
+	unsigned long long total_mem;
+
+	total_mem = 0ULL;
+	debug("ctlr_intlv %d\n", opt->ctlr_intlv);
+	if (opt->ctlr_intlv != 0) {
+		total_mem = assign_intlv_addr(dimm, opt, conf,
+					      current_mem_base);
+	} else {
+		/*
+		 * Simple linear assignment if memory controllers are not
+		 * interleaved. This is only valid for SoCs with single DDRC.
+		 */
+		total_mem = assign_non_intlv_addr(dimm, opt, conf,
+						  current_mem_base);
+	}
+	conf->total_mem = total_mem;
+	debug("base 0x%llx\n", current_mem_base);
+	debug("Total mem by assignment is 0x%llx\n", total_mem);
+
+	return total_mem;
+}
+
+static int cal_ddrc_regs(struct ddr_info *priv)
+{
+	int ret;
+
+	ret = compute_ddrc(priv->clk,
+			   &priv->opt,
+			   &priv->conf,
+			   &priv->ddr_reg,
+			   &priv->dimm,
+			   priv->ip_rev);
+	if (ret != 0) {
+		ERROR("Calculating DDR registers failed\n");
+	}
+
+	return ret;
+}
+
+#endif /* CONFIG_STATIC_DDR */
+
+static int write_ddrc_regs(struct ddr_info *priv)
+{
+	int i;
+	int ret;
+
+	for (i = 0; i < priv->num_ctlrs; i++) {
+		ret = ddrc_set_regs(priv->clk, &priv->ddr_reg, priv->ddr[i], 0);
+		if (ret != 0) {
+			ERROR("Writing DDR register(s) failed\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+long long dram_init(struct ddr_info *priv
+#if defined(NXP_HAS_CCN504) || defined(NXP_HAS_CCN508)
+		    , uintptr_t nxp_ccn_hn_f0_addr
+#endif
+		)
+{
+	uint64_t time __unused;
+	long long dram_size;
+	int ret;
+	const uint64_t time_base = get_timer_val(0);
+	unsigned int ip_rev = get_ddrc_version(priv->ddr[0]);
+
+	int valid_spd_mask __unused;
+	int scratch = 0x0;
+
+	priv->ip_rev = ip_rev;
+
+#ifndef CONFIG_STATIC_DDR
+	INFO("time base %llu ms\n", time_base);
+	debug("Parse DIMM SPD(s)\n");
+	valid_spd_mask = parse_spd(priv);
+
+	if (valid_spd_mask < 0) {
+		ERROR("Parsing DIMM Error\n");
+		return valid_spd_mask;
+	}
+
+#if defined(NXP_HAS_CCN504) || defined(NXP_HAS_CCN508)
+	if (priv->num_ctlrs == 2 || priv->num_ctlrs == 1) {
+		ret = disable_unused_ddrc(priv, valid_spd_mask,
+					  nxp_ccn_hn_f0_addr);
+		if (ret != 0) {
+			return ret;
+		}
+	}
+#endif
+
+	time = get_timer_val(time_base);
+	INFO("Time after parsing SPD %llu ms\n", time);
+	debug("Synthesize configurations\n");
+	ret = synthesize_ctlr(priv);
+	if (ret != 0) {
+		ERROR("Synthesize config error\n");
+		return ret;
+	}
+
+	debug("Assign binding addresses\n");
+	dram_size = assign_addresses(priv);
+	if (dram_size == 0) {
+		ERROR("Assigning address error\n");
+		return -EINVAL;
+	}
+
+	debug("Calculate controller registers\n");
+	ret = cal_ddrc_regs(priv);
+	if (ret != 0) {
+		ERROR("Calculate register error\n");
+		return ret;
+	}
+
+	ret = compute_ddr_phy(priv);
+	if (ret != 0)
+		ERROR("Calculating DDR PHY registers failed.\n");
+
+#else
+	dram_size = board_static_ddr(priv);
+	if (dram_size == 0) {
+		ERROR("Error getting static DDR settings.\n");
+		return -EINVAL;
+	}
+#endif
+
+	if (priv->warm_boot_flag == DDR_WARM_BOOT) {
+		scratch = (priv->ddr_reg).sdram_cfg[1];
+		scratch = scratch & ~(SDRAM_CFG2_D_INIT);
+		priv->ddr_reg.sdram_cfg[1] = scratch;
+	}
+
+	time = get_timer_val(time_base);
+	INFO("Time before programming controller %llu ms\n", time);
+	debug("Program controller registers\n");
+	ret = write_ddrc_regs(priv);
+	if (ret != 0) {
+		ERROR("Programing DDRC error\n");
+		return ret;
+	}
+
+	puts("");
+	NOTICE("%lld GB ", dram_size >> 30);
+	print_ddr_info(priv->ddr[0]);
+
+	time = get_timer_val(time_base);
+	INFO("Time used by DDR driver %llu ms\n", time);
+
+	return dram_size;
+}
diff --git a/drivers/nxp/ddr/nxp-ddr/ddr.mk b/drivers/nxp/ddr/nxp-ddr/ddr.mk
new file mode 100644
index 0000000..866c092
--- /dev/null
+++ b/drivers/nxp/ddr/nxp-ddr/ddr.mk
@@ -0,0 +1,79 @@
+#
+# Copyright 2021 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+DDR_DRIVERS_PATH	:= ${PLAT_DRIVERS_PATH}/ddr
+
+ifeq ($(PLAT_DDR_PHY), PHY_GEN2)
+$(eval $(call add_define, PHY_GEN2))
+PLAT_DDR_PHY_DIR		:= phy-gen2
+ifeq (${APPLY_MAX_CDD},yes)
+$(eval $(call add_define,NXP_APPLY_MAX_CDD))
+endif
+
+ifeq (${ERRATA_DDR_A011396}, 1)
+$(eval $(call add_define,ERRATA_DDR_A011396))
+endif
+
+ifeq (${ERRATA_DDR_A050450}, 1)
+$(eval $(call add_define,ERRATA_DDR_A050450))
+endif
+
+endif
+
+ifeq ($(PLAT_DDR_PHY), PHY_GEN1)
+PLAT_DDR_PHY_DIR		:= phy-gen1
+
+ifeq (${ERRATA_DDR_A008511},1)
+$(eval $(call add_define,ERRATA_DDR_A008511))
+endif
+
+ifeq (${ERRATA_DDR_A009803},1)
+$(eval $(call add_define,ERRATA_DDR_A009803))
+endif
+
+ifeq (${ERRATA_DDR_A009942},1)
+$(eval $(call add_define,ERRATA_DDR_A009942))
+endif
+
+ifeq (${ERRATA_DDR_A010165},1)
+$(eval $(call add_define,ERRATA_DDR_A010165))
+endif
+
+endif
+
+ifeq ($(DDR_BIST), yes)
+$(eval $(call add_define, BIST_EN))
+endif
+
+ifeq ($(DDR_DEBUG), yes)
+$(eval $(call add_define, DDR_DEBUG))
+endif
+
+ifeq ($(DDR_PHY_DEBUG), yes)
+$(eval $(call add_define, DDR_PHY_DEBUG))
+endif
+
+ifeq ($(DEBUG_PHY_IO), yes)
+$(eval $(call add_define, DEBUG_PHY_IO))
+endif
+
+ifeq ($(DEBUG_WARM_RESET), yes)
+$(eval $(call add_define, DEBUG_WARM_RESET))
+endif
+
+ifeq ($(DEBUG_DDR_INPUT_CONFIG), yes)
+$(eval $(call add_define, DEBUG_DDR_INPUT_CONFIG))
+endif
+
+DDR_CNTLR_SOURCES	:= $(DDR_DRIVERS_PATH)/nxp-ddr/ddr.c \
+			   $(DDR_DRIVERS_PATH)/nxp-ddr/ddrc.c \
+			   $(DDR_DRIVERS_PATH)/nxp-ddr/dimm.c \
+			   $(DDR_DRIVERS_PATH)/nxp-ddr/regs.c \
+			   $(DDR_DRIVERS_PATH)/nxp-ddr/utility.c \
+			   $(DDR_DRIVERS_PATH)/$(PLAT_DDR_PHY_DIR)/phy.c
+
+PLAT_INCLUDES		+= -I$(DDR_DRIVERS_PATH)/nxp-ddr \
+			   -I$(DDR_DRIVERS_PATH)/include
diff --git a/drivers/nxp/ddr/nxp-ddr/ddrc.c b/drivers/nxp/ddr/nxp-ddr/ddrc.c
new file mode 100644
index 0000000..17a2b6a
--- /dev/null
+++ b/drivers/nxp/ddr/nxp-ddr/ddrc.c
@@ -0,0 +1,594 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <common/debug.h>
+#include <ddr.h>
+#include <drivers/delay_timer.h>
+#include <immap.h>
+
+#define BIST_CR		0x80060000
+#define BIST_CR_EN	0x80000000
+#define BIST_CR_STAT	0x00000001
+#define CTLR_INTLV_MASK	0x20000000
+
+#pragma weak run_bist
+
+bool run_bist(void)
+{
+#ifdef BIST_EN
+	return true;
+#else
+	return false;
+#endif
+}
+
+/*
+ * Perform build-in test on memory
+ * timeout value in 10ms
+ */
+int bist(const struct ccsr_ddr *ddr, int timeout)
+{
+	const unsigned int test_pattern[10] = {
+		0xffffffff,
+		0x00000000,
+		0xaaaaaaaa,
+		0x55555555,
+		0xcccccccc,
+		0x33333333,
+		0x12345678,
+		0xabcdef01,
+		0xaa55aa55,
+		0x55aa55aa
+	};
+	unsigned int mtcr, err_detect, err_sbe;
+	unsigned int cs0_config;
+	unsigned int csn_bnds[4];
+	int ret = 0;
+	uint32_t i;
+#ifdef CONFIG_DDR_ADDR_DEC
+	uint32_t dec_9 = ddr_in32(&ddr->dec[9]);
+	uint32_t pos = 0U;
+	uint32_t map_save = 0U;
+	uint32_t temp32 = 0U;
+	uint32_t map, shift, highest;
+#endif
+
+	cs0_config = ddr_in32(&ddr->csn_cfg[0]);
+	if ((cs0_config & CTLR_INTLV_MASK) != 0U) {
+		/* set bnds to non-interleaving */
+		for (i = 0U; i < 4U; i++) {
+			csn_bnds[i] = ddr_in32(&ddr->bnds[i].a);
+			ddr_out32(&ddr->bnds[i].a,
+				  (csn_bnds[i] & U(0xfffefffe)) >> 1U);
+		}
+		ddr_out32(&ddr->csn_cfg[0], cs0_config & ~CTLR_INTLV_MASK);
+#ifdef CONFIG_DDR_ADDR_DEC
+		if ((dec_9 & 0x1U) != 0U) {
+			highest = (dec_9 >> 26U) == U(0x3F) ? 0U : dec_9 >> 26U;
+			pos = 37U;
+			for (i = 0U; i < 36U; i++) {      /* Go through all 37 */
+				if ((i % 4U) == 0U) {
+					temp32 = ddr_in32(&ddr->dec[i >> 2U]);
+				}
+				shift = (3U - i % 4U) * 8U + 2U;
+				map = (temp32 >> shift) & U(0x3F);
+				if (map > highest && map != U(0x3F)) {
+					highest = map;
+					pos = i;
+				}
+			}
+			debug("\nFound highest position %d, mapping to %d, ",
+			      pos, highest);
+			map_save = ddr_in32(&ddr->dec[pos >> 2]);
+			shift = (3U - pos % 4U) * 8U + 2U;
+			debug("in dec[%d], bit %d (0x%x)\n",
+			      pos >> 2U, shift, map_save);
+			temp32 = map_save & ~(U(0x3F) << shift);
+			temp32 |= 8U << shift;
+			ddr_out32(&ddr->dec[pos >> 2U], temp32);
+			timeout <<= 2U;
+			debug("Increase wait time to %d ms\n", timeout * 10);
+		}
+#endif
+	}
+	for (i = 0U; i < 10U; i++) {
+		ddr_out32(&ddr->mtp[i], test_pattern[i]);
+	}
+	mtcr = BIST_CR;
+	ddr_out32(&ddr->mtcr, mtcr);
+	do {
+		mdelay(10);
+		mtcr = ddr_in32(&ddr->mtcr);
+	} while (timeout-- > 0 && ((mtcr & BIST_CR_EN) != 0));
+	if (timeout <= 0) {
+		ERROR("Timeout\n");
+	} else {
+		debug("Timer remains %d\n", timeout);
+	}
+
+	err_detect = ddr_in32(&ddr->err_detect);
+	err_sbe = ddr_in32(&ddr->err_sbe);
+	if (err_detect != 0U || ((err_sbe & U(0xffff)) != 0U)) {
+		ERROR("ECC error detected\n");
+		ret = -EIO;
+	}
+
+	if ((cs0_config & CTLR_INTLV_MASK) != 0) {
+		for (i = 0U; i < 4U; i++) {
+			ddr_out32(&ddr->bnds[i].a, csn_bnds[i]);
+		}
+		ddr_out32(&ddr->csn_cfg[0], cs0_config);
+#ifdef CONFIG_DDR_ADDR_DEC
+		if ((dec_9 & U(0x1)) != 0U) {
+			ddr_out32(&ddr->dec[pos >> 2], map_save);
+		}
+#endif
+	}
+	if ((mtcr & BIST_CR_STAT) != 0) {
+		ERROR("Built-in self test failed\n");
+		ret = -EIO;
+	} else {
+		NOTICE("Build-in self test passed\n");
+	}
+
+	return ret;
+}
+
+void dump_ddrc(unsigned int *ddr)
+{
+#ifdef DDR_DEBUG
+	uint32_t i;
+	unsigned long val;
+
+	for (i = 0U; i < U(0x400); i++, ddr++) {
+		val = ddr_in32(ddr);
+		if (val != 0U) {	/* skip zeros */
+			debug("*0x%lx = 0x%lx\n", (unsigned long)ddr, val);
+		}
+	}
+#endif
+}
+
+#ifdef ERRATA_DDR_A009803
+static void set_wait_for_bits_clear(const void *ptr,
+				    unsigned int value,
+				    unsigned int bits)
+{
+	int timeout = 1000;
+
+	ddr_out32(ptr, value);
+	do {
+		udelay(100);
+	} while (timeout-- > 0 && ((ddr_in32(ptr) & bits) != 0));
+
+	if (timeout <= 0) {
+		ERROR("wait for clear timeout.\n");
+	}
+}
+#endif
+
+#if (DDRC_NUM_CS > 4)
+#error Invalid setting for DDRC_NUM_CS
+#endif
+
+/*
+ * If supported by the platform, writing to DDR controller takes two
+ * passes to deassert DDR reset to comply with JEDEC specs for RDIMMs.
+ */
+int ddrc_set_regs(const unsigned long clk,
+		  const struct ddr_cfg_regs *regs,
+		  const struct ccsr_ddr *ddr,
+		  int twopass)
+{
+	unsigned int i, bus_width;
+	unsigned int temp_sdram_cfg;
+	unsigned int total_mem_per_ctrl, total_mem_per_ctrl_adj;
+	const int mod_bnds = regs->cs[0].config & CTLR_INTLV_MASK;
+	int timeout;
+	int ret = 0;
+#if defined(ERRATA_DDR_A009942) || defined(ERRATA_DDR_A010165)
+	unsigned long ddr_freq;
+	unsigned int tmp;
+#ifdef ERRATA_DDR_A009942
+	unsigned int check;
+	unsigned int cpo_min = U(0xff);
+	unsigned int cpo_max = 0U;
+#endif
+#endif
+
+	if (twopass == 2U) {
+		goto after_reset;
+	}
+
+	/* Set cdr1 first in case 0.9v VDD is enabled for some SoCs*/
+	ddr_out32(&ddr->ddr_cdr1, regs->cdr[0]);
+
+	ddr_out32(&ddr->sdram_clk_cntl, regs->clk_cntl);
+
+	for (i = 0U; i < DDRC_NUM_CS; i++) {
+		if (mod_bnds != 0U) {
+			ddr_out32(&ddr->bnds[i].a,
+				  (regs->cs[i].bnds & U(0xfffefffe)) >> 1U);
+		} else {
+			ddr_out32(&ddr->bnds[i].a, regs->cs[i].bnds);
+		}
+		ddr_out32(&ddr->csn_cfg_2[i], regs->cs[i].config_2);
+	}
+
+	ddr_out32(&ddr->timing_cfg_0, regs->timing_cfg[0]);
+	ddr_out32(&ddr->timing_cfg_1, regs->timing_cfg[1]);
+	ddr_out32(&ddr->timing_cfg_2, regs->timing_cfg[2]);
+	ddr_out32(&ddr->timing_cfg_3, regs->timing_cfg[3]);
+	ddr_out32(&ddr->timing_cfg_4, regs->timing_cfg[4]);
+	ddr_out32(&ddr->timing_cfg_5, regs->timing_cfg[5]);
+	ddr_out32(&ddr->timing_cfg_6, regs->timing_cfg[6]);
+	ddr_out32(&ddr->timing_cfg_7, regs->timing_cfg[7]);
+	ddr_out32(&ddr->timing_cfg_8, regs->timing_cfg[8]);
+	ddr_out32(&ddr->timing_cfg_9, regs->timing_cfg[9]);
+	ddr_out32(&ddr->zq_cntl, regs->zq_cntl);
+	for (i = 0U; i < 4U; i++) {
+		ddr_out32(&ddr->dq_map[i], regs->dq_map[i]);
+	}
+	ddr_out32(&ddr->sdram_cfg_3, regs->sdram_cfg[2]);
+	ddr_out32(&ddr->sdram_mode, regs->sdram_mode[0]);
+	ddr_out32(&ddr->sdram_mode_2, regs->sdram_mode[1]);
+	ddr_out32(&ddr->sdram_mode_3, regs->sdram_mode[2]);
+	ddr_out32(&ddr->sdram_mode_4, regs->sdram_mode[3]);
+	ddr_out32(&ddr->sdram_mode_5, regs->sdram_mode[4]);
+	ddr_out32(&ddr->sdram_mode_6, regs->sdram_mode[5]);
+	ddr_out32(&ddr->sdram_mode_7, regs->sdram_mode[6]);
+	ddr_out32(&ddr->sdram_mode_8, regs->sdram_mode[7]);
+	ddr_out32(&ddr->sdram_mode_9, regs->sdram_mode[8]);
+	ddr_out32(&ddr->sdram_mode_10, regs->sdram_mode[9]);
+	ddr_out32(&ddr->sdram_mode_11, regs->sdram_mode[10]);
+	ddr_out32(&ddr->sdram_mode_12, regs->sdram_mode[11]);
+	ddr_out32(&ddr->sdram_mode_13, regs->sdram_mode[12]);
+	ddr_out32(&ddr->sdram_mode_14, regs->sdram_mode[13]);
+	ddr_out32(&ddr->sdram_mode_15, regs->sdram_mode[14]);
+	ddr_out32(&ddr->sdram_mode_16, regs->sdram_mode[15]);
+	ddr_out32(&ddr->sdram_md_cntl, regs->md_cntl);
+#ifdef ERRATA_DDR_A009663
+	ddr_out32(&ddr->sdram_interval,
+		  regs->interval & ~SDRAM_INTERVAL_BSTOPRE);
+#else
+	ddr_out32(&ddr->sdram_interval, regs->interval);
+#endif
+	ddr_out32(&ddr->sdram_data_init, regs->data_init);
+	if (regs->eor != 0) {
+		ddr_out32(&ddr->eor, regs->eor);
+	}
+
+	ddr_out32(&ddr->wrlvl_cntl, regs->wrlvl_cntl[0]);
+#ifndef NXP_DDR_EMU
+	/*
+	 * Skip these two registers if running on emulator
+	 * because emulator doesn't have skew between bytes.
+	 */
+
+	if (regs->wrlvl_cntl[1] != 0) {
+		ddr_out32(&ddr->ddr_wrlvl_cntl_2, regs->wrlvl_cntl[1]);
+	}
+	if (regs->wrlvl_cntl[2] != 0) {
+		ddr_out32(&ddr->ddr_wrlvl_cntl_3, regs->wrlvl_cntl[2]);
+	}
+#endif
+
+	ddr_out32(&ddr->ddr_sr_cntr, regs->ddr_sr_cntr);
+	ddr_out32(&ddr->ddr_sdram_rcw_1, regs->sdram_rcw[0]);
+	ddr_out32(&ddr->ddr_sdram_rcw_2, regs->sdram_rcw[1]);
+	ddr_out32(&ddr->ddr_sdram_rcw_3, regs->sdram_rcw[2]);
+	ddr_out32(&ddr->ddr_sdram_rcw_4, regs->sdram_rcw[3]);
+	ddr_out32(&ddr->ddr_sdram_rcw_5, regs->sdram_rcw[4]);
+	ddr_out32(&ddr->ddr_sdram_rcw_6, regs->sdram_rcw[5]);
+	ddr_out32(&ddr->ddr_cdr2, regs->cdr[1]);
+	ddr_out32(&ddr->sdram_cfg_2, regs->sdram_cfg[1]);
+	ddr_out32(&ddr->init_addr, regs->init_addr);
+	ddr_out32(&ddr->init_ext_addr, regs->init_ext_addr);
+
+#ifdef ERRATA_DDR_A009803
+	/* part 1 of 2 */
+	if ((regs->sdram_cfg[1] & SDRAM_CFG2_AP_EN) != 0) {
+		if ((regs->sdram_cfg[0] & SDRAM_CFG_RD_EN) != 0) {
+			ddr_out32(&ddr->ddr_sdram_rcw_2,
+				  regs->sdram_rcw[1] & ~0xf0);
+		}
+
+		ddr_out32(&ddr->err_disable,
+				regs->err_disable | DDR_ERR_DISABLE_APED);
+	}
+#else
+	ddr_out32(&ddr->err_disable, regs->err_disable);
+#endif
+	ddr_out32(&ddr->err_int_en, regs->err_int_en);
+
+	/* For DDRC 5.05 only */
+	if (get_ddrc_version(ddr) == 0x50500) {
+		ddr_out32(&ddr->tx_cfg[1], 0x1f1f1f1f);
+		ddr_out32(&ddr->debug[3], 0x124a02c0);
+	}
+
+	for (i = 0U; i < 4U; i++) {
+		if (regs->tx_cfg[i] != 0) {
+			ddr_out32(&ddr->tx_cfg[i], regs->tx_cfg[i]);
+		}
+	}
+	for (i = 0U; i < 64U; i++) {
+		if (regs->debug[i] != 0) {
+#ifdef ERRATA_DDR_A009942
+			if (i == 28U) {
+				continue;
+			}
+#endif
+			ddr_out32(&ddr->debug[i], regs->debug[i]);
+		}
+	}
+#ifdef CONFIG_DDR_ADDR_DEC
+	if ((regs->dec[9] & 1) != 0U) {
+		for (i = 0U; i < 10U; i++) {
+			ddr_out32(&ddr->dec[i], regs->dec[i]);
+		}
+		if (mod_bnds != 0) {
+			debug("Disable address decoding\n");
+			ddr_out32(&ddr->dec[9], 0);
+		}
+	}
+#endif
+
+#ifdef ERRATA_DDR_A008511
+	/* Part 1 of 2 */
+	/* This erraum only applies to verion 5.2.1 */
+	if (get_ddrc_version(ddr) == 0x50200) {
+		ERROR("Unsupported SoC.\n");
+	} else if (get_ddrc_version(ddr) == 0x50201) {
+		ddr_out32(&ddr->debug[37], (U(1) << 31));
+		ddr_out32(&ddr->ddr_cdr2,
+			  regs->cdr[1] | DDR_CDR2_VREF_TRAIN_EN);
+	} else {
+		debug("Erratum A008511 doesn't apply.\n");
+	}
+#endif
+
+#ifdef ERRATA_DDR_A009942
+	ddr_freq = clk / 1000000U;
+	tmp = ddr_in32(&ddr->debug[28]);
+	tmp &= U(0xff0fff00);
+	tmp |= ddr_freq <= 1333U ? U(0x0080006a) :
+		(ddr_freq <= 1600U ? U(0x0070006f) :
+		 (ddr_freq <= 1867U ? U(0x00700076) : U(0x0060007b)));
+	if (regs->debug[28] != 0) {
+		tmp &= ~0xff;
+		tmp |= regs->debug[28] & 0xff;
+	} else {
+		WARN("Warning: Optimal CPO value not set.\n");
+	}
+	ddr_out32(&ddr->debug[28], tmp);
+#endif
+
+#ifdef ERRATA_DDR_A010165
+	ddr_freq = clk / 1000000U;
+	if ((ddr_freq > 1900) && (ddr_freq < 2300)) {
+		tmp = ddr_in32(&ddr->debug[28]);
+		ddr_out32(&ddr->debug[28], tmp | 0x000a0000);
+	}
+#endif
+	/*
+	 * For RDIMMs, JEDEC spec requires clocks to be stable before reset is
+	 * deasserted. Clocks start when any chip select is enabled and clock
+	 * control register is set. Because all DDR components are connected to
+	 * one reset signal, this needs to be done in two steps. Step 1 is to
+	 * get the clocks started. Step 2 resumes after reset signal is
+	 * deasserted.
+	 */
+	if (twopass == 1) {
+		udelay(200);
+		return 0;
+	}
+
+	/* As per new sequence flow shall be write CSn_CONFIG registers needs to
+	 * be set after all the other DDR controller registers are set, then poll
+	 * for PHY_INIT_CMPLT = 1 , then wait at least 100us (micro seconds),
+	 * then set the MEM_EN = 1
+	 */
+	for (i = 0U; i < DDRC_NUM_CS; i++) {
+		if (mod_bnds != 0U && i == 0U) {
+			ddr_out32(&ddr->csn_cfg[i],
+					(regs->cs[i].config & ~CTLR_INTLV_MASK));
+		} else {
+			ddr_out32(&ddr->csn_cfg[i], regs->cs[i].config);
+		}
+	}
+
+after_reset:
+	/* Set, but do not enable the memory */
+	temp_sdram_cfg = regs->sdram_cfg[0];
+	temp_sdram_cfg &= ~(SDRAM_CFG_MEM_EN);
+	ddr_out32(&ddr->sdram_cfg, temp_sdram_cfg);
+
+	if (get_ddrc_version(ddr) < U(0x50500)) {
+		/*
+		 * 500 painful micro-seconds must elapse between
+		 * the DDR clock setup and the DDR config enable.
+		 * DDR2 need 200 us, and DDR3 need 500 us from spec,
+		 * we choose the max, that is 500 us for all of case.
+		 */
+		udelay(500);
+		/* applied memory barrier */
+		mb();
+		isb();
+	} else {
+		/* wait for PHY complete */
+		timeout = 40;
+		while (((ddr_in32(&ddr->ddr_dsr2) & 0x4) != 0) &&
+		       (timeout > 0)) {
+			udelay(500);
+			timeout--;
+		}
+		if (timeout <= 0) {
+			printf("PHY handshake timeout, ddr_dsr2 = %x\n",
+			       ddr_in32(&ddr->ddr_dsr2));
+		} else {
+			debug("PHY handshake completed, timer remains %d\n",
+			      timeout);
+		}
+	}
+
+	temp_sdram_cfg = ddr_in32(&ddr->sdram_cfg);
+	/* Let the controller go */
+	udelay(100);
+	ddr_out32(&ddr->sdram_cfg, temp_sdram_cfg | SDRAM_CFG_MEM_EN);
+
+	/* applied memory barrier */
+	mb();
+	isb();
+
+	total_mem_per_ctrl = 0;
+	for (i = 0; i < DDRC_NUM_CS; i++) {
+		if ((regs->cs[i].config & 0x80000000) == 0) {
+			continue;
+		}
+		total_mem_per_ctrl += 1 << (
+			((regs->cs[i].config >> 14) & 0x3) + 2 +
+			((regs->cs[i].config >> 8) & 0x7) + 12 +
+			((regs->cs[i].config >> 4) & 0x3) + 0 +
+			((regs->cs[i].config >> 0) & 0x7) + 8 +
+			((regs->sdram_cfg[2] >> 4) & 0x3) +
+			3 - ((regs->sdram_cfg[0] >> 19) & 0x3) -
+			26);		/* minus 26 (count of 64M) */
+	}
+	total_mem_per_ctrl_adj = total_mem_per_ctrl;
+	/*
+	 * total memory / bus width = transactions needed
+	 * transactions needed / data rate = seconds
+	 * to add plenty of buffer, double the time
+	 * For example, 2GB on 666MT/s 64-bit bus takes about 402ms
+	 * Let's wait for 800ms
+	 */
+	bus_width = 3 - ((ddr_in32(&ddr->sdram_cfg) & SDRAM_CFG_DBW_MASK)
+			>> SDRAM_CFG_DBW_SHIFT);
+	timeout = ((total_mem_per_ctrl_adj << (6 - bus_width)) * 100 /
+		   (clk >> 20)) << 2;
+	total_mem_per_ctrl_adj >>= 4;	/* shift down to gb size */
+	if ((ddr_in32(&ddr->sdram_cfg_2) & SDRAM_CFG2_D_INIT) != 0) {
+		debug("total size %d GB\n", total_mem_per_ctrl_adj);
+		debug("Need to wait up to %d ms\n", timeout * 10);
+
+		do {
+			mdelay(10);
+		} while (timeout-- > 0 &&
+			 ((ddr_in32(&ddr->sdram_cfg_2) & SDRAM_CFG2_D_INIT)) != 0);
+
+		if (timeout <= 0) {
+			if (ddr_in32(&ddr->debug[1]) & 0x3d00) {
+				ERROR("Found training error(s): 0x%x\n",
+				      ddr_in32(&ddr->debug[1]));
+			}
+			ERROR("Error: Waiting for D_INIT timeout.\n");
+			return -EIO;
+		}
+	}
+
+	if (mod_bnds != 0U) {
+		debug("Restore original bnds\n");
+		for (i = 0U; i < DDRC_NUM_CS; i++) {
+			ddr_out32(&ddr->bnds[i].a, regs->cs[i].bnds);
+		}
+		ddr_out32(&ddr->csn_cfg[0], regs->cs[0].config);
+#ifdef CONFIG_DDR_ADDR_DEC
+		if ((regs->dec[9] & U(0x1)) != 0U) {
+			debug("Restore address decoding\n");
+			ddr_out32(&ddr->dec[9], regs->dec[9]);
+		}
+#endif
+	}
+
+#ifdef ERRATA_DDR_A009803
+	/* Part 2 of 2 */
+	if ((regs->sdram_cfg[1] & SDRAM_CFG2_AP_EN) != 0) {
+		timeout = 400;
+		do {
+			mdelay(1);
+		} while (timeout-- > 0 && ((ddr_in32(&ddr->debug[1]) & 0x2) == 0));
+
+		if ((regs->sdram_cfg[0] & SDRAM_CFG_RD_EN) != 0) {
+			for (i = 0U; i < DDRC_NUM_CS; i++) {
+				if ((regs->cs[i].config & SDRAM_CS_CONFIG_EN) == 0) {
+					continue;
+				}
+				set_wait_for_bits_clear(&ddr->sdram_md_cntl,
+						MD_CNTL_MD_EN |
+						MD_CNTL_CS_SEL(i) |
+						0x070000ed,
+						MD_CNTL_MD_EN);
+				udelay(1);
+			}
+		}
+
+		ddr_out32(&ddr->err_disable,
+			  regs->err_disable & ~DDR_ERR_DISABLE_APED);
+	}
+#endif
+
+#ifdef ERRATA_DDR_A009663
+	ddr_out32(&ddr->sdram_interval, regs->interval);
+#endif
+
+#ifdef ERRATA_DDR_A009942
+	timeout = 400;
+	do {
+		mdelay(1);
+	} while (timeout-- > 0 && ((ddr_in32(&ddr->debug[1]) & 0x2) == 0));
+	tmp = (regs->sdram_cfg[0] >> 19) & 0x3;
+	check = (tmp == DDR_DBUS_64) ? 4 : ((tmp == DDR_DBUS_32) ? 2 : 1);
+	for (i = 0; i < check; i++) {
+		tmp = ddr_in32(&ddr->debug[9 + i]);
+		debug("Reading debug[%d] as 0x%x\n", i + 9, tmp);
+		cpo_min = min(cpo_min,
+			      min((tmp >> 24) & 0xff, (tmp >> 8) & 0xff));
+		cpo_max = max(cpo_max,
+			      max((tmp >> 24) & 0xff, (tmp >> 8) & 0xff));
+	}
+	if ((regs->sdram_cfg[0] & SDRAM_CFG_ECC_EN) != 0) {
+		tmp = ddr_in32(&ddr->debug[13]);
+		cpo_min = min(cpo_min, (tmp >> 24) & 0xff);
+		cpo_max = max(cpo_max, (tmp >> 24) & 0xff);
+	}
+	debug("cpo_min 0x%x\n", cpo_min);
+	debug("cpo_max 0x%x\n", cpo_max);
+	tmp = ddr_in32(&ddr->debug[28]);
+	debug("debug[28] 0x%x\n", tmp);
+	if ((cpo_min + 0x3B) < (tmp & 0xff)) {
+		WARN("Warning: A009942 requires setting cpo_sample to 0x%x\n",
+		     (cpo_min + cpo_max) / 2 + 0x27);
+	} else {
+		debug("Optimal cpo_sample 0x%x\n",
+			(cpo_min + cpo_max) / 2 + 0x27);
+	}
+#endif
+	if (run_bist() != 0) {
+		if ((ddr_in32(&ddr->debug[1]) &
+		    ((get_ddrc_version(ddr) == 0x50500) ? 0x3c00 : 0x3d00)) != 0) {
+			ERROR("Found training error(s): 0x%x\n",
+			     ddr_in32(&ddr->debug[1]));
+			return -EIO;
+		}
+		INFO("Running built-in self test ...\n");
+		/* give it 10x time to cover whole memory */
+		timeout = ((total_mem_per_ctrl << (6 - bus_width)) *
+			   100 / (clk >> 20)) * 10;
+		INFO("\tWait up to %d ms\n", timeout * 10);
+		ret = bist(ddr, timeout);
+	}
+	dump_ddrc((void *)ddr);
+
+	return ret;
+}
diff --git a/drivers/nxp/ddr/nxp-ddr/dimm.c b/drivers/nxp/ddr/nxp-ddr/dimm.c
new file mode 100644
index 0000000..16efcba
--- /dev/null
+++ b/drivers/nxp/ddr/nxp-ddr/dimm.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+
+#include <common/debug.h>
+#include <ddr.h>
+#include <dimm.h>
+#include <i2c.h>
+#include <lib/utils.h>
+
+int read_spd(unsigned char chip, void *buf, int len)
+{
+	unsigned char dummy = 0U;
+	int ret;
+
+	if (len < 256) {
+		ERROR("Invalid SPD length\n");
+		return -EINVAL;
+	}
+
+	i2c_write(SPD_SPA0_ADDRESS, 0, 1, &dummy, 1);
+	ret = i2c_read(chip, 0, 1, buf, 256);
+	if (ret == 0) {
+		i2c_write(SPD_SPA1_ADDRESS, 0, 1, &dummy, 1);
+		ret = i2c_read(chip, 0, 1, buf + 256, min(256, len - 256));
+	}
+	if (ret != 0) {
+		zeromem(buf, len);
+	}
+
+	return ret;
+}
+
+int crc16(unsigned char *ptr, int count)
+{
+	int i;
+	int crc = 0;
+
+	while (--count >= 0) {
+		crc = crc ^ (int)*ptr++ << 8;
+		for (i = 0; i < 8; ++i) {
+			if ((crc & 0x8000) != 0) {
+				crc = crc << 1 ^ 0x1021;
+			} else {
+				crc = crc << 1;
+			}
+		}
+	}
+	return crc & 0xffff;
+}
+
+static int ddr4_spd_check(const struct ddr4_spd *spd)
+{
+	void *p = (void *)spd;
+	int csum16;
+	int len;
+	char crc_lsb;	/* byte 126 */
+	char crc_msb;	/* byte 127 */
+
+	len = 126;
+	csum16 = crc16(p, len);
+
+	crc_lsb = (char) (csum16 & 0xff);
+	crc_msb = (char) (csum16 >> 8);
+
+	if (spd->crc[0] != crc_lsb || spd->crc[1] != crc_msb) {
+		ERROR("SPD CRC = 0x%x%x, computed CRC = 0x%x%x\n",
+		      spd->crc[1], spd->crc[0], crc_msb, crc_lsb);
+		return -EINVAL;
+	}
+
+	p = (void *)spd + 128;
+	len = 126;
+	csum16 = crc16(p, len);
+
+	crc_lsb = (char) (csum16 & 0xff);
+	crc_msb = (char) (csum16 >> 8);
+
+	if (spd->mod_section.uc[126] != crc_lsb ||
+	    spd->mod_section.uc[127] != crc_msb) {
+		ERROR("SPD CRC = 0x%x%x, computed CRC = 0x%x%x\n",
+		      spd->mod_section.uc[127], spd->mod_section.uc[126],
+		      crc_msb, crc_lsb);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static unsigned long long
+compute_ranksize(const struct ddr4_spd *spd)
+{
+	unsigned long long bsize;
+
+	int nbit_sdram_cap_bsize = 0;
+	int nbit_primary_bus_width = 0;
+	int nbit_sdram_width = 0;
+	int die_count = 0;
+	bool package_3ds;
+
+	if ((spd->density_banks & 0xf) <= 7) {
+		nbit_sdram_cap_bsize = (spd->density_banks & 0xf) + 28;
+	}
+	if ((spd->bus_width & 0x7) < 4) {
+		nbit_primary_bus_width = (spd->bus_width & 0x7) + 3;
+	}
+	if ((spd->organization & 0x7) < 4) {
+		nbit_sdram_width = (spd->organization & 0x7) + 2;
+	}
+	package_3ds = (spd->package_type & 0x3) == 0x2;
+	if (package_3ds) {
+		die_count = (spd->package_type >> 4) & 0x7;
+	}
+
+	bsize = 1ULL << (nbit_sdram_cap_bsize - 3 +
+			 nbit_primary_bus_width - nbit_sdram_width +
+			 die_count);
+
+	return bsize;
+}
+
+int cal_dimm_params(const struct ddr4_spd *spd, struct dimm_params *pdimm)
+{
+	int ret;
+	int i;
+	static const unsigned char udimm_rc_e_dq[18] = {
+		0x0c, 0x2c, 0x15, 0x35, 0x15, 0x35, 0x0b, 0x2c, 0x15,
+		0x35, 0x0b, 0x35, 0x0b, 0x2c, 0x0b, 0x35, 0x15, 0x36
+	};
+	int spd_error = 0;
+	unsigned char *ptr;
+	unsigned char val;
+
+	if (spd->mem_type != SPD_MEMTYPE_DDR4) {
+		ERROR("Not a DDR4 DIMM.\n");
+		return -EINVAL;
+	}
+
+	ret = ddr4_spd_check(spd);
+	if (ret != 0) {
+		ERROR("DIMM SPD checksum mismatch\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * The part name in ASCII in the SPD EEPROM is not null terminated.
+	 * Guarantee null termination here by presetting all bytes to 0
+	 * and copying the part name in ASCII from the SPD onto it
+	 */
+	if ((spd->info_size_crc & 0xF) > 2) {
+		memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1);
+	}
+
+	/* DIMM organization parameters */
+	pdimm->n_ranks = ((spd->organization >> 3) & 0x7) + 1;
+	debug("n_ranks %d\n", pdimm->n_ranks);
+	pdimm->rank_density = compute_ranksize(spd);
+	if (pdimm->rank_density == 0) {
+		return -EINVAL;
+	}
+
+	debug("rank_density 0x%llx\n", pdimm->rank_density);
+	pdimm->capacity = pdimm->n_ranks * pdimm->rank_density;
+	debug("capacity 0x%llx\n", pdimm->capacity);
+	pdimm->die_density = spd->density_banks & 0xf;
+	debug("die density 0x%x\n", pdimm->die_density);
+	pdimm->primary_sdram_width = 1 << (3 + (spd->bus_width & 0x7));
+	debug("primary_sdram_width %d\n", pdimm->primary_sdram_width);
+	if (((spd->bus_width >> 3) & 0x3) != 0) {
+		pdimm->ec_sdram_width = 8;
+	} else {
+		pdimm->ec_sdram_width = 0;
+	}
+	debug("ec_sdram_width %d\n", pdimm->ec_sdram_width);
+	pdimm->device_width = 1 << ((spd->organization & 0x7) + 2);
+	debug("device_width %d\n", pdimm->device_width);
+	pdimm->package_3ds = (spd->package_type & 0x3) == 0x2 ?
+			     (spd->package_type >> 4) & 0x7 : 0;
+	debug("package_3ds %d\n", pdimm->package_3ds);
+
+	switch (spd->module_type & DDR4_SPD_MODULETYPE_MASK) {
+	case DDR4_SPD_RDIMM:
+	case DDR4_SPD_MINI_RDIMM:
+	case DDR4_SPD_72B_SO_RDIMM:
+		pdimm->rdimm = 1;
+		pdimm->rc = spd->mod_section.registered.ref_raw_card & 0x8f;
+		if ((spd->mod_section.registered.reg_map & 0x1) != 0) {
+			pdimm->mirrored_dimm = 1;
+		}
+		val = spd->mod_section.registered.ca_stren;
+		pdimm->rcw[3] = val >> 4;
+		pdimm->rcw[4] = ((val & 0x3) << 2) | ((val & 0xc) >> 2);
+		val = spd->mod_section.registered.clk_stren;
+		pdimm->rcw[5] = ((val & 0x3) << 2) | ((val & 0xc) >> 2);
+		pdimm->rcw[6] = 0xf;
+		/* A17 used for 16Gb+, C[2:0] used for 3DS */
+		pdimm->rcw[8] = pdimm->die_density >= 0x6 ? 0x0 : 0x8 |
+				(pdimm->package_3ds > 0x3 ? 0x0 :
+				 (pdimm->package_3ds > 0x1 ? 0x1 :
+				  (pdimm->package_3ds > 0 ? 0x2 : 0x3)));
+		if (pdimm->package_3ds != 0 || pdimm->n_ranks != 4) {
+			pdimm->rcw[13] = 0x4;
+		} else {
+			pdimm->rcw[13] = 0x5;
+		}
+		pdimm->rcw[13] |= pdimm->mirrored_dimm ? 0x8 : 0;
+		break;
+
+	case DDR4_SPD_UDIMM:
+	case DDR4_SPD_SO_DIMM:
+	case DDR4_SPD_MINI_UDIMM:
+	case DDR4_SPD_72B_SO_UDIMM:
+	case DDR4_SPD_16B_SO_DIMM:
+	case DDR4_SPD_32B_SO_DIMM:
+		pdimm->rc = spd->mod_section.unbuffered.ref_raw_card & 0x8f;
+		if ((spd->mod_section.unbuffered.addr_mapping & 0x1) != 0) {
+			pdimm->mirrored_dimm = 1;
+		}
+		if ((spd->mod_section.unbuffered.mod_height & 0xe0) == 0 &&
+		    (spd->mod_section.unbuffered.ref_raw_card == 0x04)) {
+			/* Fix SPD error found on DIMMs with raw card E0 */
+			for (i = 0; i < 18; i++) {
+				if (spd->mapping[i] == udimm_rc_e_dq[i]) {
+					continue;
+				}
+				spd_error = 1;
+				ptr = (unsigned char *)&spd->mapping[i];
+				*ptr = udimm_rc_e_dq[i];
+			}
+			if (spd_error != 0) {
+				INFO("SPD DQ mapping error fixed\n");
+			}
+		}
+		break;
+
+	default:
+		ERROR("Unknown module_type 0x%x\n", spd->module_type);
+		return -EINVAL;
+	}
+	debug("rdimm %d\n", pdimm->rdimm);
+	debug("mirrored_dimm %d\n", pdimm->mirrored_dimm);
+	debug("rc 0x%x\n", pdimm->rc);
+
+	/* SDRAM device parameters */
+	pdimm->n_row_addr = ((spd->addressing >> 3) & 0x7) + 12;
+	debug("n_row_addr %d\n", pdimm->n_row_addr);
+	pdimm->n_col_addr = (spd->addressing & 0x7) + 9;
+	debug("n_col_addr %d\n", pdimm->n_col_addr);
+	pdimm->bank_addr_bits = (spd->density_banks >> 4) & 0x3;
+	debug("bank_addr_bits %d\n", pdimm->bank_addr_bits);
+	pdimm->bank_group_bits = (spd->density_banks >> 6) & 0x3;
+	debug("bank_group_bits %d\n", pdimm->bank_group_bits);
+
+	if (pdimm->ec_sdram_width != 0) {
+		pdimm->edc_config = 0x02;
+	} else {
+		pdimm->edc_config = 0x00;
+	}
+	debug("edc_config %d\n", pdimm->edc_config);
+
+	/* DDR4 spec has BL8 -bit3, BC4 -bit2 */
+	pdimm->burst_lengths_bitmask = 0x0c;
+	debug("burst_lengths_bitmask 0x%x\n", pdimm->burst_lengths_bitmask);
+
+	/* MTB - medium timebase
+	 * The MTB in the SPD spec is 125ps,
+	 *
+	 * FTB - fine timebase
+	 * use 1/10th of ps as our unit to avoid floating point
+	 * eg, 10 for 1ps, 25 for 2.5ps, 50 for 5ps
+	 */
+	if ((spd->timebases & 0xf) == 0x0) {
+		pdimm->mtb_ps = 125;
+		pdimm->ftb_10th_ps = 10;
+
+	} else {
+		ERROR("Unknown Timebases\n");
+		return -EINVAL;
+	}
+
+	/* sdram minimum cycle time */
+	pdimm->tckmin_x_ps = spd_to_ps(spd->tck_min, spd->fine_tck_min);
+	debug("tckmin_x_ps %d\n", pdimm->tckmin_x_ps);
+
+	/* sdram max cycle time */
+	pdimm->tckmax_ps = spd_to_ps(spd->tck_max, spd->fine_tck_max);
+	debug("tckmax_ps %d\n", pdimm->tckmax_ps);
+
+	/*
+	 * CAS latency supported
+	 * bit0 - CL7
+	 * bit4 - CL11
+	 * bit8 - CL15
+	 * bit12- CL19
+	 * bit16- CL23
+	 */
+	pdimm->caslat_x  = (spd->caslat_b1 << 7)	|
+			   (spd->caslat_b2 << 15)	|
+			   (spd->caslat_b3 << 23);
+	debug("caslat_x 0x%x\n", pdimm->caslat_x);
+
+	if (spd->caslat_b4 != 0) {
+		WARN("Unhandled caslat_b4 value\n");
+	}
+
+	/*
+	 * min CAS latency time
+	 */
+	pdimm->taa_ps = spd_to_ps(spd->taa_min, spd->fine_taa_min);
+	debug("taa_ps %d\n", pdimm->taa_ps);
+
+	/*
+	 * min RAS to CAS delay time
+	 */
+	pdimm->trcd_ps = spd_to_ps(spd->trcd_min, spd->fine_trcd_min);
+	debug("trcd_ps %d\n", pdimm->trcd_ps);
+
+	/*
+	 * Min Row Precharge Delay Time
+	 */
+	pdimm->trp_ps = spd_to_ps(spd->trp_min, spd->fine_trp_min);
+	debug("trp_ps %d\n", pdimm->trp_ps);
+
+	/* min active to precharge delay time */
+	pdimm->tras_ps = (((spd->tras_trc_ext & 0xf) << 8) +
+			  spd->tras_min_lsb) * pdimm->mtb_ps;
+	debug("tras_ps %d\n", pdimm->tras_ps);
+
+	/* min active to actice/refresh delay time */
+	pdimm->trc_ps = spd_to_ps((((spd->tras_trc_ext & 0xf0) << 4) +
+				   spd->trc_min_lsb), spd->fine_trc_min);
+	debug("trc_ps %d\n", pdimm->trc_ps);
+	/* Min Refresh Recovery Delay Time */
+	pdimm->trfc1_ps = ((spd->trfc1_min_msb << 8) | (spd->trfc1_min_lsb)) *
+		       pdimm->mtb_ps;
+	debug("trfc1_ps %d\n", pdimm->trfc1_ps);
+	pdimm->trfc2_ps = ((spd->trfc2_min_msb << 8) | (spd->trfc2_min_lsb)) *
+		       pdimm->mtb_ps;
+	debug("trfc2_ps %d\n", pdimm->trfc2_ps);
+	pdimm->trfc4_ps = ((spd->trfc4_min_msb << 8) | (spd->trfc4_min_lsb)) *
+			pdimm->mtb_ps;
+	debug("trfc4_ps %d\n", pdimm->trfc4_ps);
+	/* min four active window delay time */
+	pdimm->tfaw_ps = (((spd->tfaw_msb & 0xf) << 8) | spd->tfaw_min) *
+			pdimm->mtb_ps;
+	debug("tfaw_ps %d\n", pdimm->tfaw_ps);
+
+	/* min row active to row active delay time, different bank group */
+	pdimm->trrds_ps = spd_to_ps(spd->trrds_min, spd->fine_trrds_min);
+	debug("trrds_ps %d\n", pdimm->trrds_ps);
+	/* min row active to row active delay time, same bank group */
+	pdimm->trrdl_ps = spd_to_ps(spd->trrdl_min, spd->fine_trrdl_min);
+	debug("trrdl_ps %d\n", pdimm->trrdl_ps);
+	/* min CAS to CAS Delay Time (tCCD_Lmin), same bank group */
+	pdimm->tccdl_ps = spd_to_ps(spd->tccdl_min, spd->fine_tccdl_min);
+	debug("tccdl_ps %d\n", pdimm->tccdl_ps);
+	if (pdimm->package_3ds != 0) {
+		if (pdimm->die_density > 5) {
+			debug("Unsupported logical rank density 0x%x\n",
+				  pdimm->die_density);
+			return -EINVAL;
+		}
+		pdimm->trfc_slr_ps = (pdimm->die_density <= 4) ?
+				     260000 : 350000;
+	}
+	debug("trfc_slr_ps %d\n", pdimm->trfc_slr_ps);
+
+	/* 15ns for all speed bins */
+	pdimm->twr_ps = 15000;
+	debug("twr_ps %d\n", pdimm->twr_ps);
+
+	/*
+	 * Average periodic refresh interval
+	 * tREFI = 7.8 us at normal temperature range
+	 */
+	pdimm->refresh_rate_ps = 7800000;
+	debug("refresh_rate_ps %d\n", pdimm->refresh_rate_ps);
+
+	for (i = 0; i < 18; i++) {
+		pdimm->dq_mapping[i] = spd->mapping[i];
+		debug("dq_mapping 0x%x\n", pdimm->dq_mapping[i]);
+	}
+
+	pdimm->dq_mapping_ors = ((spd->mapping[0] >> 6) & 0x3) == 0 ? 1 : 0;
+	debug("dq_mapping_ors %d\n", pdimm->dq_mapping_ors);
+
+	return 0;
+}
diff --git a/drivers/nxp/ddr/nxp-ddr/regs.c b/drivers/nxp/ddr/nxp-ddr/regs.c
new file mode 100644
index 0000000..cedd7ca
--- /dev/null
+++ b/drivers/nxp/ddr/nxp-ddr/regs.c
@@ -0,0 +1,1394 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <common/debug.h>
+#include <ddr.h>
+#include <lib/utils.h>
+
+static inline unsigned int cal_cwl(const unsigned long clk)
+{
+	const unsigned int mclk_ps = get_memory_clk_ps(clk);
+
+	return mclk_ps >= 1250U ? 9U :
+		(mclk_ps >= 1070U ? 10U :
+		 (mclk_ps >= 935U ? 11U :
+		  (mclk_ps >= 833U ? 12U :
+		   (mclk_ps >= 750U ? 14U :
+		    (mclk_ps >= 625U ? 16U : 18U)))));
+}
+
+static void cal_csn_config(int i,
+			   struct ddr_cfg_regs *regs,
+			   const struct memctl_opt *popts,
+			   const struct dimm_params *pdimm)
+{
+	unsigned int intlv_en = 0U;
+	unsigned int intlv_ctl = 0U;
+	const unsigned int cs_n_en = 1U;
+	const unsigned int ap_n_en = popts->cs_odt[i].auto_precharge;
+	const unsigned int odt_rd_cfg = popts->cs_odt[i].odt_rd_cfg;
+	const unsigned int odt_wr_cfg = popts->cs_odt[i].odt_wr_cfg;
+	const unsigned int ba_bits_cs_n = pdimm->bank_addr_bits;
+	const unsigned int row_bits_cs_n = pdimm->n_row_addr - 12U;
+	const unsigned int col_bits_cs_n = pdimm->n_col_addr - 8U;
+	const unsigned int bg_bits_cs_n = pdimm->bank_group_bits;
+
+	if (i == 0) {
+		/* These fields only available in CS0_CONFIG */
+		if (popts->ctlr_intlv != 0) {
+			switch (popts->ctlr_intlv_mode) {
+			case DDR_256B_INTLV:
+				intlv_en = popts->ctlr_intlv;
+				intlv_ctl = popts->ctlr_intlv_mode;
+				break;
+			default:
+				break;
+			}
+		}
+	}
+	regs->cs[i].config = ((cs_n_en & 0x1) << 31)		|
+			    ((intlv_en & 0x3) << 29)		|
+			    ((intlv_ctl & 0xf) << 24)		|
+			    ((ap_n_en & 0x1) << 23)		|
+			    ((odt_rd_cfg & 0x7) << 20)		|
+			    ((odt_wr_cfg & 0x7) << 16)		|
+			    ((ba_bits_cs_n & 0x3) << 14)	|
+			    ((row_bits_cs_n & 0x7) << 8)	|
+			    ((bg_bits_cs_n & 0x3) << 4)		|
+			    ((col_bits_cs_n & 0x7) << 0);
+	debug("cs%d\n", i);
+	debug("   _config = 0x%x\n", regs->cs[i].config);
+}
+
+static inline int avoid_odt_overlap(const struct ddr_conf *conf,
+				    const struct dimm_params *pdimm)
+{
+	if ((conf->cs_in_use == 0xf) != 0) {
+		return 2;
+	}
+
+#if DDRC_NUM_DIMM >= 2
+	if (conf->dimm_in_use[0] != 0 && conf->dimm_in_use[1] != 0) {
+		return 1;
+	}
+#endif
+	return 0;
+}
+
+/* Requires rcw2 set first */
+static void cal_timing_cfg(const unsigned long clk,
+			   struct ddr_cfg_regs *regs,
+			   const struct memctl_opt *popts,
+			   const struct dimm_params *pdimm,
+			   const struct ddr_conf *conf,
+			   unsigned int cas_latency,
+			   unsigned int additive_latency)
+{
+	const unsigned int mclk_ps = get_memory_clk_ps(clk);
+	/* tXP=max(4nCK, 6ns) */
+	const int txp = max((int)mclk_ps * 4, 6000);
+	/* DDR4 supports 10, 12, 14, 16, 18, 20, 24 */
+	static const int wrrec_table[] = {
+		10, 10, 10, 10, 10,
+		10, 10, 10, 10, 10,
+		12, 12, 14, 14, 16,
+		16, 18, 18, 20, 20,
+		24, 24, 24, 24,
+	};
+	int trwt_mclk = (clk / 1000000 > 1900) ? 3 : 2;
+	int twrt_mclk;
+	int trrt_mclk;
+	int twwt_mclk;
+	const int act_pd_exit_mclk = picos_to_mclk(clk, txp);
+	const int pre_pd_exit_mclk = act_pd_exit_mclk;
+	const int taxpd_mclk = 0;
+	/*
+	 * MRS_CYC = max(tMRD, tMOD)
+	 * tMRD = 8nCK, tMOD = max(24nCK, 15ns)
+	 */
+	const int tmrd_mclk = max(24U, picos_to_mclk(clk, 15000));
+	const int pretoact_mclk = picos_to_mclk(clk, pdimm->trp_ps);
+	const int acttopre_mclk = picos_to_mclk(clk, pdimm->tras_ps);
+	const int acttorw_mclk = picos_to_mclk(clk, pdimm->trcd_ps);
+	const int caslat_ctrl = (cas_latency - 1) << 1;
+	const int trfc1_min = pdimm->die_density >= 0x3 ? 16000 :
+			      (pdimm->die_density == 0x4 ? 26000 :
+			       (pdimm->die_density == 0x5 ? 35000 :
+				55000));
+	const int refrec_ctrl = picos_to_mclk(clk,
+							pdimm->trfc1_ps) - 8;
+	int wrrec_mclk = picos_to_mclk(clk, pdimm->twr_ps);
+	const int acttoact_mclk = max(picos_to_mclk(clk,
+							      pdimm->trrds_ps),
+						4U);
+	int wrtord_mclk = max(2U, picos_to_mclk(clk, 2500));
+	const unsigned int cpo = 0U;
+	const int wr_lat = cal_cwl(clk);
+	int rd_to_pre = picos_to_mclk(clk, 7500);
+	const int wr_data_delay = popts->wr_data_delay;
+	const int cke_pls = max(3U, picos_to_mclk(clk, 5000));
+#ifdef ERRATA_DDR_A050450
+	const unsigned short four_act = ((popts->twot_en == 0) &&
+					 (popts->threet_en == 0) &&
+					 (popts->tfaw_ps % 2 == 0)) ?
+						(picos_to_mclk(clk, popts->tfaw_ps) + 1) :
+						picos_to_mclk(clk, popts->tfaw_ps);
+#else
+	const unsigned short four_act = picos_to_mclk(clk,
+					 popts->tfaw_ps);
+#endif
+	const unsigned int cntl_adj = 0U;
+	const unsigned int ext_pretoact = picos_to_mclk(clk,
+							pdimm->trp_ps) >> 4U;
+	const unsigned int ext_acttopre = picos_to_mclk(clk,
+							pdimm->tras_ps) >> 4U;
+	const unsigned int ext_acttorw = picos_to_mclk(clk,
+						       pdimm->trcd_ps) >> 4U;
+	const unsigned int ext_caslat = (2U * cas_latency - 1U) >> 4U;
+	const unsigned int ext_add_lat = additive_latency >> 4U;
+	const unsigned int ext_refrec = (picos_to_mclk(clk,
+					       pdimm->trfc1_ps) - 8U) >> 4U;
+	const unsigned int ext_wrrec = (picos_to_mclk(clk, pdimm->twr_ps) +
+				  (popts->otf_burst_chop_en ? 2U : 0U)) >> 4U;
+	const unsigned int rwt_same_cs = 0U;
+	const unsigned int wrt_same_cs = 0U;
+	const unsigned int rrt_same_cs = popts->burst_length == DDR_BL8 ? 0U : 2U;
+	const unsigned int wwt_same_cs = popts->burst_length == DDR_BL8 ? 0U : 2U;
+	const unsigned int dll_lock = 2U;
+	unsigned int rodt_on = 0U;
+	const unsigned int rodt_off = 4U;
+	const unsigned int wodt_on = 1U;
+	const unsigned int wodt_off = 4U;
+	const unsigned int hs_caslat = 0U;
+	const unsigned int hs_wrlat = 0U;
+	const unsigned int hs_wrrec = 0U;
+	const unsigned int hs_clkadj = 0U;
+	const unsigned int hs_wrlvl_start = 0U;
+	const unsigned int txpr = max(5U,
+				      picos_to_mclk(clk,
+						    pdimm->trfc1_ps + 10000U));
+	const unsigned int tcksre = max(5U, picos_to_mclk(clk, 10000U));
+	const unsigned int tcksrx = max(5U, picos_to_mclk(clk, 10000U));
+	const unsigned int cs_to_cmd = 0U;
+	const unsigned int cke_rst = txpr <= 200U ? 0U :
+				     (txpr <= 256U ? 1U :
+				      (txpr <= 512U ? 2U : 3U));
+	const unsigned int cksre = tcksre <= 19U ? tcksre - 5U : 15U;
+	const unsigned int cksrx = tcksrx <= 19U ? tcksrx - 5U : 15U;
+	unsigned int par_lat = 0U;
+	const int tccdl = max(5U, picos_to_mclk(clk, pdimm->tccdl_ps));
+	int rwt_bg = cas_latency + 2 + 4 - wr_lat;
+	int wrt_bg = wr_lat + 4 + 1 - cas_latency;
+	const int rrt_bg = popts->burst_length == DDR_BL8 ?
+				tccdl - 4 : tccdl - 2;
+	const int wwt_bg = popts->burst_length == DDR_BL8 ?
+					tccdl - 4 : tccdl - 2;
+	const unsigned int acttoact_bg = picos_to_mclk(clk, pdimm->trrdl_ps);
+	const unsigned int wrtord_bg = max(4U, picos_to_mclk(clk, 7500)) +
+				       (popts->otf_burst_chop_en ? 2 : 0);
+	const unsigned int pre_all_rec = 0;
+	const unsigned int refrec_cid_mclk = pdimm->package_3ds ?
+				picos_to_mclk(clk, pdimm->trfc_slr_ps) : 0;
+	const unsigned int acttoact_cid_mclk = pdimm->package_3ds ? 4U : 0;
+
+
+	/* for two dual-rank DIMMs to avoid ODT overlap */
+	if (avoid_odt_overlap(conf, pdimm) == 2) {
+		twrt_mclk = 2;
+		twwt_mclk = 2;
+		trrt_mclk = 2;
+	} else {
+		twrt_mclk = 1;
+		twwt_mclk = 1;
+		trrt_mclk = 0;
+	}
+
+	if (popts->trwt_override != 0) {
+		trwt_mclk = popts->trwt;
+		if (popts->twrt != 0) {
+			twrt_mclk = popts->twrt;
+		}
+		if (popts->trrt != 0) {
+			trrt_mclk = popts->trrt;
+		}
+		if (popts->twwt != 0) {
+			twwt_mclk = popts->twwt;
+		}
+	}
+	regs->timing_cfg[0] = (((trwt_mclk & 0x3) << 30)		|
+			     ((twrt_mclk & 0x3) << 28)			|
+			     ((trrt_mclk & 0x3) << 26)			|
+			     ((twwt_mclk & 0x3) << 24)			|
+			     ((act_pd_exit_mclk & 0xf) << 20)		|
+			     ((pre_pd_exit_mclk & 0xF) << 16)		|
+			     ((taxpd_mclk & 0xf) << 8)			|
+			     ((tmrd_mclk & 0x1f) << 0));
+	debug("timing_cfg[0] = 0x%x\n", regs->timing_cfg[0]);
+
+	if ((wrrec_mclk < 1) || (wrrec_mclk > 24)) {
+		ERROR("WRREC doesn't support clock %d\n", wrrec_mclk);
+	} else {
+		wrrec_mclk = wrrec_table[wrrec_mclk - 1];
+	}
+
+	if (popts->otf_burst_chop_en != 0) {
+		wrrec_mclk += 2;
+		wrtord_mclk += 2;
+	}
+
+	if (pdimm->trfc1_ps < trfc1_min) {
+		ERROR("trfc1_ps (%d) < %d\n", pdimm->trfc1_ps, trfc1_min);
+	}
+
+	regs->timing_cfg[1] = (((pretoact_mclk & 0x0F) << 28)		|
+			     ((acttopre_mclk & 0x0F) << 24)		|
+			     ((acttorw_mclk & 0xF) << 20)		|
+			     ((caslat_ctrl & 0xF) << 16)		|
+			     ((refrec_ctrl & 0xF) << 12)		|
+			     ((wrrec_mclk & 0x0F) << 8)			|
+			     ((acttoact_mclk & 0x0F) << 4)		|
+			     ((wrtord_mclk & 0x0F) << 0));
+	debug("timing_cfg[1] = 0x%x\n", regs->timing_cfg[1]);
+
+	if (rd_to_pre < 4) {
+		rd_to_pre = 4;
+	}
+	if (popts->otf_burst_chop_en) {
+		rd_to_pre += 2;
+	}
+
+	regs->timing_cfg[2] = (((additive_latency & 0xf) << 28)		|
+			     ((cpo & 0x1f) << 23)			|
+			     ((wr_lat & 0xf) << 19)			|
+			     (((wr_lat & 0x10) >> 4) << 18)		|
+			     ((rd_to_pre & 0xf) << 13)			|
+			     ((wr_data_delay & 0xf) << 9)		|
+			     ((cke_pls & 0x7) << 6)			|
+			     ((four_act & 0x3f) << 0));
+	debug("timing_cfg[2] = 0x%x\n", regs->timing_cfg[2]);
+
+	regs->timing_cfg[3] = (((ext_pretoact & 0x1) << 28)		|
+			     ((ext_acttopre & 0x3) << 24)		|
+			     ((ext_acttorw & 0x1) << 22)		|
+			     ((ext_refrec & 0x3F) << 16)		|
+			     ((ext_caslat & 0x3) << 12)			|
+			     ((ext_add_lat & 0x1) << 10)		|
+			     ((ext_wrrec & 0x1) << 8)			|
+			     ((cntl_adj & 0x7) << 0));
+	debug("timing_cfg[3] = 0x%x\n", regs->timing_cfg[3]);
+
+	regs->timing_cfg[4] = (((rwt_same_cs & 0xf) << 28)		|
+			     ((wrt_same_cs & 0xf) << 24)		|
+			     ((rrt_same_cs & 0xf) << 20)		|
+			     ((wwt_same_cs & 0xf) << 16)		|
+			     ((trwt_mclk & 0xc) << 12)			|
+			     ((twrt_mclk & 0x4) << 10)			|
+			     ((trrt_mclk & 0x4) << 8)			|
+			     ((twwt_mclk & 0x4) << 6)			|
+			     (dll_lock & 0x3));
+	debug("timing_cfg[4] = 0x%x\n", regs->timing_cfg[4]);
+
+	/* rodt_on = timing_cfg_1[caslat] - timing_cfg_2[wrlat] + 1 */
+	if (cas_latency >= wr_lat) {
+		rodt_on = cas_latency - wr_lat + 1;
+	}
+
+	regs->timing_cfg[5] = (((rodt_on & 0x1f) << 24)			|
+			     ((rodt_off & 0x7) << 20)			|
+			     ((wodt_on & 0x1f) << 12)			|
+			     (wodt_off & 0x7) << 8);
+	debug("timing_cfg[5] = 0x%x\n", regs->timing_cfg[5]);
+
+	regs->timing_cfg[6] = (((hs_caslat & 0x1f) << 24)		|
+			     ((hs_wrlat & 0x1f) << 19)			|
+			     ((hs_wrrec & 0x1f) << 12)			|
+			     ((hs_clkadj & 0x1f) << 6)			|
+			     ((hs_wrlvl_start & 0x1f) << 0));
+	debug("timing_cfg[6] = 0x%x\n", regs->timing_cfg[6]);
+
+	if (popts->ap_en != 0) {
+		par_lat = (regs->sdram_rcw[1] & 0xf) + 1;
+		debug("PAR_LAT = 0x%x\n", par_lat);
+	}
+
+	regs->timing_cfg[7] = (((cke_rst & 0x3) << 28)			|
+			     ((cksre & 0xf) << 24)			|
+			     ((cksrx & 0xf) << 20)			|
+			     ((par_lat & 0xf) << 16)			|
+			     ((cs_to_cmd & 0xf) << 4));
+	debug("timing_cfg[7] = 0x%x\n", regs->timing_cfg[7]);
+
+	if (rwt_bg < tccdl) {
+		rwt_bg = tccdl - rwt_bg;
+	} else {
+		rwt_bg = 0;
+	}
+	if (wrt_bg < tccdl) {
+		wrt_bg = tccdl - wrt_bg;
+	} else {
+		wrt_bg = 0;
+	}
+	regs->timing_cfg[8] = (((rwt_bg & 0xf) << 28)			|
+			     ((wrt_bg & 0xf) << 24)			|
+			     ((rrt_bg & 0xf) << 20)			|
+			     ((wwt_bg & 0xf) << 16)			|
+			     ((acttoact_bg & 0xf) << 12)		|
+			     ((wrtord_bg & 0xf) << 8)			|
+			     ((pre_all_rec & 0x1f) << 0));
+	debug("timing_cfg[8] = 0x%x\n", regs->timing_cfg[8]);
+
+	regs->timing_cfg[9] = (refrec_cid_mclk & 0x3ff) << 16		|
+			      (acttoact_cid_mclk & 0xf) << 8;
+	debug("timing_cfg[9] = 0x%x\n", regs->timing_cfg[9]);
+}
+
+static void cal_ddr_sdram_rcw(const unsigned long clk,
+			      struct ddr_cfg_regs *regs,
+			      const struct memctl_opt *popts,
+			      const struct dimm_params *pdimm)
+{
+	const unsigned int freq = clk / 1000000U;
+	unsigned int rc0a, rc0f;
+
+	if (pdimm->rdimm == 0) {
+		return;
+	}
+
+	rc0a = freq > 3200U ? 7U :
+	       (freq > 2933U ? 6U :
+		(freq > 2666U ? 5U :
+		 (freq > 2400U ? 4U :
+		  (freq > 2133U ? 3U :
+		   (freq > 1866U ? 2U :
+		    (freq > 1600U ? 1U : 0U))))));
+	rc0f = freq > 3200U ? 3U :
+		(freq > 2400U ? 2U :
+		 (freq > 2133U ? 1U : 0U));
+	rc0f = (regs->sdram_cfg[1] & SDRAM_CFG2_AP_EN) ? rc0f : 4;
+	regs->sdram_rcw[0] =
+		pdimm->rcw[0] << 28	|
+		pdimm->rcw[1] << 24	|
+		pdimm->rcw[2] << 20	|
+		pdimm->rcw[3] << 16	|
+		pdimm->rcw[4] << 12	|
+		pdimm->rcw[5] << 8	|
+		pdimm->rcw[6] << 4	|
+		pdimm->rcw[7];
+	regs->sdram_rcw[1] =
+		pdimm->rcw[8] << 28	|
+		pdimm->rcw[9] << 24	|
+		rc0a << 20		|
+		pdimm->rcw[11] << 16	|
+		pdimm->rcw[12] << 12	|
+		pdimm->rcw[13] << 8	|
+		pdimm->rcw[14] << 4	|
+		rc0f;
+	regs->sdram_rcw[2] =
+		((freq - 1260 + 19) / 20) << 8;
+
+	debug("sdram_rcw[0] = 0x%x\n", regs->sdram_rcw[0]);
+	debug("sdram_rcw[1] = 0x%x\n", regs->sdram_rcw[1]);
+	debug("sdram_rcw[2] = 0x%x\n", regs->sdram_rcw[2]);
+}
+
+static void cal_ddr_sdram_cfg(const unsigned long clk,
+			      struct ddr_cfg_regs *regs,
+			      const struct memctl_opt *popts,
+			      const struct dimm_params *pdimm,
+			      const unsigned int ip_rev)
+{
+	const unsigned int mem_en = 1U;
+	const unsigned int sren = popts->self_refresh_in_sleep;
+	const unsigned int ecc_en = popts->ecc_mode;
+	const unsigned int rd_en = (pdimm->rdimm != 0U) ? 1U : 0U;
+	const unsigned int dyn_pwr = popts->dynamic_power;
+	const unsigned int dbw = popts->data_bus_used;
+	const unsigned int eight_be = (dbw == 1U ||
+				       popts->burst_length == DDR_BL8) ? 1U : 0U;
+	const unsigned int ncap = 0U;
+	const unsigned int threet_en = popts->threet_en;
+	const unsigned int twot_en = pdimm->rdimm ?
+					0U : popts->twot_en;
+	const unsigned int ba_intlv = popts->ba_intlv;
+	const unsigned int x32_en = 0U;
+	const unsigned int pchb8 = 0U;
+	const unsigned int hse = popts->half_strength_drive_en;
+	const unsigned int acc_ecc_en = (dbw != 0U && ecc_en == 1U) ? 1U : 0U;
+	const unsigned int mem_halt = 0U;
+#ifdef PHY_GEN2
+	const unsigned int bi = 1U;
+#else
+	const unsigned int bi = 0U;
+#endif
+	const unsigned int sdram_type = SDRAM_TYPE_DDR4;
+	unsigned int odt_cfg = 0U;
+	const unsigned int frc_sr = 0U;
+	const unsigned int sr_ie = popts->self_refresh_irq_en;
+	const unsigned int num_pr = pdimm->package_3ds + 1U;
+	const unsigned int slow = (clk < 1249000000U) ? 1U : 0U;
+	const unsigned int x4_en = popts->x4_en;
+	const unsigned int obc_cfg = popts->otf_burst_chop_en;
+	const unsigned int ap_en = ip_rev == 0x50500U ? 0U : popts->ap_en;
+	const unsigned int d_init = popts->ctlr_init_ecc;
+	const unsigned int rcw_en = popts->rdimm;
+	const unsigned int md_en = popts->mirrored_dimm;
+	const unsigned int qd_en = popts->quad_rank_present;
+	const unsigned int unq_mrs_en = ip_rev < 0x50500U ? 1U : 0U;
+	const unsigned int rd_pre = popts->quad_rank_present;
+	int i;
+
+	regs->sdram_cfg[0] = ((mem_en & 0x1) << 31)		|
+				((sren & 0x1) << 30)		|
+				((ecc_en & 0x1) << 29)		|
+				((rd_en & 0x1) << 28)		|
+				((sdram_type & 0x7) << 24)	|
+				((dyn_pwr & 0x1) << 21)		|
+				((dbw & 0x3) << 19)		|
+				((eight_be & 0x1) << 18)	|
+				((ncap & 0x1) << 17)		|
+				((threet_en & 0x1) << 16)	|
+				((twot_en & 0x1) << 15)		|
+				((ba_intlv & 0x7F) << 8)	|
+				((x32_en & 0x1) << 5)		|
+				((pchb8 & 0x1) << 4)		|
+				((hse & 0x1) << 3)		|
+				((acc_ecc_en & 0x1) << 2)	|
+				((mem_halt & 0x1) << 1)		|
+				((bi & 0x1) << 0);
+	debug("sdram_cfg[0] = 0x%x\n", regs->sdram_cfg[0]);
+
+	for (i = 0; i < DDRC_NUM_CS; i++) {
+		if (popts->cs_odt[i].odt_rd_cfg != 0 ||
+		    popts->cs_odt[i].odt_wr_cfg != 0) {
+			odt_cfg = SDRAM_CFG2_ODT_ONLY_READ;
+			break;
+		}
+	}
+
+	regs->sdram_cfg[1] = (0
+		| ((frc_sr & 0x1) << 31)
+		| ((sr_ie & 0x1) << 30)
+		| ((odt_cfg & 0x3) << 21)
+		| ((num_pr & 0xf) << 12)
+		| ((slow & 1) << 11)
+		| (x4_en << 10)
+		| (qd_en << 9)
+		| (unq_mrs_en << 8)
+		| ((obc_cfg & 0x1) << 6)
+		| ((ap_en & 0x1) << 5)
+		| ((d_init & 0x1) << 4)
+		| ((rcw_en & 0x1) << 2)
+		| ((md_en & 0x1) << 0)
+		);
+	debug("sdram_cfg[1] = 0x%x\n", regs->sdram_cfg[1]);
+
+	regs->sdram_cfg[2] = (rd_pre & 0x1) << 16	|
+				 (popts->rdimm ? 1 : 0);
+	if (pdimm->package_3ds != 0) {
+		if (((pdimm->package_3ds + 1) & 0x1) != 0) {
+			WARN("Unsupported 3DS DIMM\n");
+		} else {
+			regs->sdram_cfg[2] |= ((pdimm->package_3ds + 1) >> 1)
+						  << 4;
+		}
+	}
+	debug("sdram_cfg[2] = 0x%x\n", regs->sdram_cfg[2]);
+}
+
+
+static void cal_ddr_sdram_interval(const unsigned long clk,
+				   struct ddr_cfg_regs *regs,
+				   const struct memctl_opt *popts,
+				   const struct dimm_params *pdimm)
+{
+	const unsigned int refint = picos_to_mclk(clk, pdimm->refresh_rate_ps);
+	const unsigned int bstopre = popts->bstopre;
+
+	regs->interval = ((refint & 0xFFFF) << 16)	|
+				  ((bstopre & 0x3FFF) << 0);
+	debug("interval = 0x%x\n", regs->interval);
+}
+
+/* Require cs and cfg first */
+static void cal_ddr_sdram_mode(const unsigned long clk,
+			       struct ddr_cfg_regs *regs,
+			       const struct memctl_opt *popts,
+			       const struct ddr_conf *conf,
+			       const struct dimm_params *pdimm,
+			       unsigned int cas_latency,
+			       unsigned int additive_latency,
+			       const unsigned int ip_rev)
+{
+	int i;
+	unsigned short esdmode;		/* Extended SDRAM mode */
+	unsigned short sdmode;		/* SDRAM mode */
+
+	/* Mode Register - MR1 */
+	const unsigned int qoff = 0;
+	const unsigned int tdqs_en = 0;
+	unsigned int rtt;
+	const unsigned int wrlvl_en = 0;
+	unsigned int al = 0;
+	unsigned int dic = 0;
+	const unsigned int dll_en = 1;
+
+	/* Mode Register - MR0 */
+	unsigned int wr = 0;
+	const unsigned int dll_rst = 0;
+	const unsigned int mode = 0;
+	unsigned int caslat = 4;/* CAS# latency, default set as 6 cycles */
+	/* BT: Burst Type (0=Nibble Sequential, 1=Interleaved) */
+	const unsigned int bt = 0;
+	const unsigned int bl = popts->burst_length == DDR_BL8 ? 0 :
+				 (popts->burst_length == DDR_BC4 ? 2 : 1);
+
+	const unsigned int wr_mclk = picos_to_mclk(clk, pdimm->twr_ps);
+	/* DDR4 support WR 10, 12, 14, 16, 18, 20, 24 */
+	static const int wr_table[] = {
+		0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 6
+	};
+	/* DDR4 support CAS 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 22, 24 */
+	static const int cas_latency_table[] = {
+		0, 1, 2, 3, 4, 5, 6, 7, 13, 8,
+		14, 9, 15, 10, 12, 11, 16, 17,
+		18, 19, 20, 21, 22, 23
+	};
+	const unsigned int unq_mrs_en = ip_rev < U(0x50500) ? 1U : 0U;
+	unsigned short esdmode2 = 0U;
+	unsigned short esdmode3 = 0U;
+	const unsigned int wr_crc = 0U;
+	unsigned int rtt_wr = 0U;
+	const unsigned int srt = 0U;
+	unsigned int cwl = cal_cwl(clk);
+	const unsigned int mpr = 0U;
+	const unsigned int mclk_ps = get_memory_clk_ps(clk);
+	const unsigned int wc_lat = 0U;
+	unsigned short esdmode4 = 0U;
+	unsigned short esdmode5;
+	int rtt_park_all = 0;
+	unsigned int rtt_park;
+	const bool four_cs = conf->cs_in_use == 0xf ? true : false;
+	unsigned short esdmode6 = 0U;	/* Extended SDRAM mode 6 */
+	unsigned short esdmode7 = 0U;	/* Extended SDRAM mode 7 */
+	const unsigned int tccdl_min = max(5U,
+					   picos_to_mclk(clk, pdimm->tccdl_ps));
+
+	if (popts->rtt_override != 0U) {
+		rtt = popts->rtt_override_value;
+	} else {
+		rtt = popts->cs_odt[0].odt_rtt_norm;
+	}
+
+	if (additive_latency == (cas_latency - 1)) {
+		al = 1;
+	}
+	if (additive_latency == (cas_latency - 2)) {
+		al = 2;
+	}
+
+	if (popts->quad_rank_present != 0 || popts->output_driver_impedance != 0) {
+		dic = 1;	/* output driver impedance 240/7 ohm */
+	}
+
+	esdmode = (((qoff & 0x1) << 12)				|
+		   ((tdqs_en & 0x1) << 11)			|
+		   ((rtt & 0x7) << 8)				|
+		   ((wrlvl_en & 0x1) << 7)			|
+		   ((al & 0x3) << 3)				|
+		   ((dic & 0x3) << 1)				|
+		   ((dll_en & 0x1) << 0));
+
+	if (wr_mclk >= 10 && wr_mclk <= 24) {
+		wr = wr_table[wr_mclk - 10];
+	} else {
+		ERROR("unsupported wc_mclk = %d for mode register\n", wr_mclk);
+	}
+
+	/* look up table to get the cas latency bits */
+	if (cas_latency >= 9 && cas_latency <= 32) {
+		caslat = cas_latency_table[cas_latency - 9];
+	} else {
+		WARN("Error: unsupported cas latency for mode register\n");
+	}
+
+	sdmode = (((caslat & 0x10) << 8)			|
+		  ((wr & 0x7) << 9)				|
+		  ((dll_rst & 0x1) << 8)			|
+		  ((mode & 0x1) << 7)				|
+		  (((caslat >> 1) & 0x7) << 4)			|
+		  ((bt & 0x1) << 3)				|
+		  ((caslat & 1) << 2)				|
+		  ((bl & 0x3) << 0));
+
+	regs->sdram_mode[0] = (((esdmode & 0xFFFF) << 16)	|
+				 ((sdmode & 0xFFFF) << 0));
+	debug("sdram_mode[0] = 0x%x\n", regs->sdram_mode[0]);
+
+	switch (cwl) {
+	case 9:
+	case 10:
+	case 11:
+	case 12:
+		cwl -= 9;
+		break;
+	case 14:
+		cwl -= 10;
+		break;
+	case 16:
+		cwl -= 11;
+		break;
+	case 18:
+		cwl -= 12;
+		break;
+	case 20:
+		cwl -= 13;
+		break;
+	default:
+		printf("Error CWL\n");
+		break;
+	}
+
+	if (popts->rtt_override != 0) {
+		rtt_wr = popts->rtt_wr_override_value;
+	} else {
+		rtt_wr = popts->cs_odt[0].odt_rtt_wr;
+	}
+
+	esdmode2 = ((wr_crc & 0x1) << 12)			|
+		   ((rtt_wr & 0x7) << 9)			|
+		   ((srt & 0x3) << 6)				|
+		   ((cwl & 0x7) << 3);
+	esdmode3 = ((mpr & 0x3) << 11) | ((wc_lat & 0x3) << 9);
+
+	regs->sdram_mode[1] = ((esdmode2 & 0xFFFF) << 16)	|
+				((esdmode3 & 0xFFFF) << 0);
+	debug("sdram_mode[1] = 0x%x\n", regs->sdram_mode[1]);
+
+	esdmode6 = ((tccdl_min - 4) & 0x7) << 10;
+	if (popts->vref_dimm != 0) {
+		esdmode6 |= popts->vref_dimm & 0x7f;
+	} else if ((popts->ddr_cdr2 & DDR_CDR2_VREF_RANGE_2) != 0) {
+		esdmode6 |= 1 << 6;	/* Range 2 */
+	}
+
+	regs->sdram_mode[9] = ((esdmode6 & 0xffff) << 16)	|
+				 ((esdmode7 & 0xffff) << 0);
+	debug("sdram_mode[9] = 0x%x\n", regs->sdram_mode[9]);
+
+	rtt_park = (popts->rtt_park != 0) ? popts->rtt_park : 240;
+	switch (rtt_park) {
+	case 240:
+		rtt_park = 0x4;
+		break;
+	case 120:
+		rtt_park = 0x2;
+		break;
+	case 80:
+		rtt_park = 0x6;
+		break;
+	case 60:
+		rtt_park = 0x1;
+		break;
+	case 48:
+		rtt_park = 0x5;
+		break;
+	case 40:
+		rtt_park = 0x3;
+		break;
+	case 34:
+		rtt_park = 0x7;
+		break;
+	default:
+		rtt_park = 0;
+		break;
+	}
+
+	for (i = 0; i < DDRC_NUM_CS; i++) {
+		if (i != 0 && unq_mrs_en == 0) {
+			break;
+		}
+
+		if (popts->rtt_override != 0) {
+			rtt = popts->rtt_override_value;
+			rtt_wr = popts->rtt_wr_override_value;
+		} else {
+			rtt = popts->cs_odt[i].odt_rtt_norm;
+			rtt_wr = popts->cs_odt[i].odt_rtt_wr;
+		}
+
+		esdmode &= 0xF8FF;	/* clear bit 10,9,8 for rtt */
+		esdmode |= (rtt & 0x7) << 8;
+		esdmode2 &= 0xF9FF;	/* clear bit 10, 9 */
+		esdmode2 |= (rtt_wr & 0x3) << 9;
+		esdmode5 = (popts->x4_en) ? 0 : 0x400; /* data mask */
+
+		if (rtt_park_all == 0 &&
+		    ((regs->cs[i].config & SDRAM_CS_CONFIG_EN) != 0)) {
+			esdmode5 |= rtt_park << 6;
+			rtt_park_all = four_cs ? 0 : 1;
+		}
+
+		if (((regs->sdram_cfg[1] & SDRAM_CFG2_AP_EN) != 0) &&
+		    (popts->rdimm == 0)) {
+			if (mclk_ps >= 935) {
+				esdmode5 |= DDR_MR5_CA_PARITY_LAT_4_CLK;
+			} else if (mclk_ps >= 833) {
+				esdmode5 |= DDR_MR5_CA_PARITY_LAT_5_CLK;
+			} else {
+				esdmode5 |= DDR_MR5_CA_PARITY_LAT_5_CLK;
+				WARN("mclk_ps not supported %d", mclk_ps);
+
+			}
+		}
+
+		switch (i) {
+		case 0:
+			regs->sdram_mode[8] = ((esdmode4 & 0xffff) << 16) |
+						((esdmode5 & 0xffff) << 0);
+			debug("sdram_mode[8] = 0x%x\n", regs->sdram_mode[8]);
+			break;
+		case 1:
+			regs->sdram_mode[2] = (((esdmode & 0xFFFF) << 16) |
+					      ((sdmode & 0xFFFF) << 0));
+			regs->sdram_mode[3] = ((esdmode2 & 0xFFFF) << 16) |
+					      ((esdmode3 & 0xFFFF) << 0);
+			regs->sdram_mode[10] = ((esdmode4 & 0xFFFF) << 16) |
+					       ((esdmode5 & 0xFFFF) << 0);
+			regs->sdram_mode[11] = ((esdmode6 & 0xFFFF) << 16) |
+					       ((esdmode7 & 0xFFFF) << 0);
+			debug("sdram_mode[2] = 0x%x\n", regs->sdram_mode[2]);
+			debug("sdram_mode[3] = 0x%x\n", regs->sdram_mode[3]);
+			debug("sdram_mode[10] = 0x%x\n", regs->sdram_mode[10]);
+			debug("sdram_mode[11] = 0x%x\n", regs->sdram_mode[11]);
+			break;
+		case 2:
+			regs->sdram_mode[4] = (((esdmode & 0xFFFF) << 16) |
+					      ((sdmode & 0xFFFF) << 0));
+			regs->sdram_mode[5] = ((esdmode2 & 0xFFFF) << 16) |
+					      ((esdmode3 & 0xFFFF) << 0);
+			regs->sdram_mode[12] = ((esdmode4 & 0xFFFF) << 16) |
+					       ((esdmode5 & 0xFFFF) << 0);
+			regs->sdram_mode[13] = ((esdmode6 & 0xFFFF) << 16) |
+					       ((esdmode7 & 0xFFFF) << 0);
+			debug("sdram_mode[4] = 0x%x\n", regs->sdram_mode[4]);
+			debug("sdram_mode[5] = 0x%x\n", regs->sdram_mode[5]);
+			debug("sdram_mode[12] = 0x%x\n", regs->sdram_mode[12]);
+			debug("sdram_mode[13] = 0x%x\n", regs->sdram_mode[13]);
+			break;
+		case 3:
+			regs->sdram_mode[6] = (((esdmode & 0xFFFF) << 16) |
+					      ((sdmode & 0xFFFF) << 0));
+			regs->sdram_mode[7] = ((esdmode2 & 0xFFFF) << 16) |
+					      ((esdmode3 & 0xFFFF) << 0);
+			regs->sdram_mode[14] = ((esdmode4 & 0xFFFF) << 16) |
+					       ((esdmode5 & 0xFFFF) << 0);
+			regs->sdram_mode[15] = ((esdmode6 & 0xFFFF) << 16) |
+					       ((esdmode7 & 0xFFFF) << 0);
+			debug("sdram_mode[6] = 0x%x\n", regs->sdram_mode[6]);
+			debug("sdram_mode[7] = 0x%x\n", regs->sdram_mode[7]);
+			debug("sdram_mode[14] = 0x%x\n", regs->sdram_mode[14]);
+			debug("sdram_mode[15] = 0x%x\n", regs->sdram_mode[15]);
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+#ifndef CONFIG_MEM_INIT_VALUE
+#define CONFIG_MEM_INIT_VALUE 0xDEADBEEF
+#endif
+static void cal_ddr_data_init(struct ddr_cfg_regs *regs)
+{
+	regs->data_init = CONFIG_MEM_INIT_VALUE;
+}
+
+static void cal_ddr_dq_mapping(struct ddr_cfg_regs *regs,
+			       const struct dimm_params *pdimm)
+{
+	const unsigned int acc_ecc_en = (regs->sdram_cfg[0] >> 2) & 0x1;
+/* FIXME: revert the dq mapping from DIMM */
+	regs->dq_map[0] = ((pdimm->dq_mapping[0] & 0x3F) << 26)	|
+			 ((pdimm->dq_mapping[1] & 0x3F) << 20)	|
+			 ((pdimm->dq_mapping[2] & 0x3F) << 14)	|
+			 ((pdimm->dq_mapping[3] & 0x3F) << 8)	|
+			 ((pdimm->dq_mapping[4] & 0x3F) << 2);
+
+	regs->dq_map[1] = ((pdimm->dq_mapping[5] & 0x3F) << 26)	|
+			 ((pdimm->dq_mapping[6] & 0x3F) << 20)	|
+			 ((pdimm->dq_mapping[7] & 0x3F) << 14)	|
+			 ((pdimm->dq_mapping[10] & 0x3F) << 8)	|
+			 ((pdimm->dq_mapping[11] & 0x3F) << 2);
+
+	regs->dq_map[2] = ((pdimm->dq_mapping[12] & 0x3F) << 26)	|
+			 ((pdimm->dq_mapping[13] & 0x3F) << 20)		|
+			 ((pdimm->dq_mapping[14] & 0x3F) << 14)		|
+			 ((pdimm->dq_mapping[15] & 0x3F) << 8)		|
+			 ((pdimm->dq_mapping[16] & 0x3F) << 2);
+
+	/* dq_map for ECC[4:7] is set to 0 if accumulated ECC is enabled */
+	regs->dq_map[3] = ((pdimm->dq_mapping[17] & 0x3F) << 26)	|
+			 ((pdimm->dq_mapping[8] & 0x3F) << 20)		|
+			 ((acc_ecc_en != 0) ? 0 :
+			  (pdimm->dq_mapping[9] & 0x3F) << 14)		|
+			 pdimm->dq_mapping_ors;
+	debug("dq_map[0] = 0x%x\n", regs->dq_map[0]);
+	debug("dq_map[1] = 0x%x\n", regs->dq_map[1]);
+	debug("dq_map[2] = 0x%x\n", regs->dq_map[2]);
+	debug("dq_map[3] = 0x%x\n", regs->dq_map[3]);
+}
+static void cal_ddr_zq_cntl(struct ddr_cfg_regs *regs)
+{
+	const unsigned int zqinit = 10U;	/* 1024 clocks */
+	const unsigned int zqoper = 9U;		/* 512 clocks */
+	const unsigned int zqcs = 7U;		/* 128 clocks */
+	const unsigned int zqcs_init = 5U;	/* 1024 refresh seqences */
+	const unsigned int zq_en = 1U;		/* enabled */
+
+	regs->zq_cntl = ((zq_en & 0x1) << 31)			|
+			   ((zqinit & 0xF) << 24)		|
+			   ((zqoper & 0xF) << 16)		|
+			   ((zqcs & 0xF) << 8)			|
+			   ((zqcs_init & 0xF) << 0);
+	debug("zq_cntl = 0x%x\n", regs->zq_cntl);
+}
+
+static void cal_ddr_sr_cntr(struct ddr_cfg_regs *regs,
+			    const struct memctl_opt *popts)
+{
+	const unsigned int sr_it = (popts->auto_self_refresh_en) ?
+					popts->sr_it : 0;
+
+	regs->ddr_sr_cntr = (sr_it & 0xF) << 16;
+	debug("ddr_sr_cntr = 0x%x\n", regs->ddr_sr_cntr);
+}
+
+static void cal_ddr_eor(struct ddr_cfg_regs *regs,
+			const struct memctl_opt *popts)
+{
+	if (popts->addr_hash != 0) {
+		regs->eor = 0x40000000;	/* address hash enable */
+		debug("eor = 0x%x\n", regs->eor);
+	}
+}
+
+static void cal_ddr_csn_bnds(struct ddr_cfg_regs *regs,
+			     const struct memctl_opt *popts,
+			     const struct ddr_conf *conf,
+			     const struct dimm_params *pdimm)
+{
+	int i;
+	unsigned long long ea, sa;
+
+	/* Chip Select Memory Bounds (CSn_BNDS) */
+	for (i = 0;
+		i < DDRC_NUM_CS && conf->cs_size[i];
+		i++) {
+		debug("cs_in_use = 0x%x\n", conf->cs_in_use);
+		if (conf->cs_in_use != 0) {
+			sa = conf->cs_base_addr[i];
+			ea = sa + conf->cs_size[i] - 1;
+			sa >>= 24;
+			ea >>= 24;
+			regs->cs[i].bnds = ((sa & 0xffff) << 16) |
+					   ((ea & 0xffff) << 0);
+			cal_csn_config(i, regs, popts, pdimm);
+		} else {
+			/* setting bnds to 0xffffffff for inactive CS */
+			regs->cs[i].bnds = 0xffffffff;
+		}
+
+		debug("cs[%d].bnds = 0x%x\n", i, regs->cs[i].bnds);
+	}
+}
+
+static void cal_ddr_addr_dec(struct ddr_cfg_regs *regs)
+{
+#ifdef CONFIG_DDR_ADDR_DEC
+	unsigned int ba_bits __unused;
+	char p __unused;
+	const unsigned int cs0_config = regs->cs[0].config;
+	const int cacheline = PLATFORM_CACHE_LINE_SHIFT;
+	unsigned int bg_bits;
+	unsigned int row_bits;
+	unsigned int col_bits;
+	unsigned int cs;
+	unsigned int map_row[18];
+	unsigned int map_col[11];
+	unsigned int map_ba[2];
+	unsigned int map_cid[2] = {0x3F, 0x3F};
+	unsigned int map_bg[2] = {0x3F, 0x3F};
+	unsigned int map_cs[2] = {0x3F, 0x3F};
+	unsigned int dbw;
+	unsigned int ba_intlv;
+	int placement;
+	int intlv;
+	int abort = 0;
+	int i;
+	int j;
+
+	col_bits = (cs0_config >> 0) & 0x7;
+	if (col_bits < 4) {
+		col_bits += 8;
+	} else if (col_bits < 7 || col_bits > 10) {
+		ERROR("Error %s col_bits = %d\n", __func__, col_bits);
+	}
+	row_bits = ((cs0_config >> 8) & 0x7) + 12;
+	ba_bits = ((cs0_config >> 14) & 0x3) + 2;
+	bg_bits = ((cs0_config >> 4) & 0x3) + 0;
+	intlv = (cs0_config >> 24) & 0xf;
+	ba_intlv = (regs->sdram_cfg[0] >> 8) & 0x7f;
+	switch (ba_intlv) {
+	case DDR_BA_INTLV_CS01:
+		cs = 1;
+		break;
+	case DDR_BA_INTLV_CS0123:
+		cs = 2;
+		break;
+	case DDR_BA_NONE:
+		cs = 0;
+		break;
+	default:
+		ERROR("%s ba_intlv 0x%x\n", __func__, ba_intlv);
+		return;
+	}
+	debug("col %d, row %d, ba %d, bg %d, intlv %d\n",
+			col_bits, row_bits, ba_bits, bg_bits, intlv);
+	/*
+	 * Example mapping of 15x2x2x10
+	 * ---- --rr rrrr rrrr rrrr rCBB Gccc cccI cGcc cbbb
+	 */
+	dbw = (regs->sdram_cfg[0] >> 19) & 0x3;
+	switch (dbw) {
+	case 0:	/* 64-bit */
+		placement = 3;
+		break;
+	case 1:	/* 32-bit */
+		placement = 2;
+		break;
+	default:
+		ERROR("%s dbw = %d\n", __func__, dbw);
+		return;
+	}
+	debug("cacheline size %d\n", cacheline);
+	for (i = 0; placement < cacheline; i++) {
+		map_col[i] = placement++;
+	}
+	map_bg[0] = placement++;
+	for ( ; i < col_bits; i++) {
+		map_col[i] = placement++;
+		if (placement == intlv) {
+			placement++;
+		}
+	}
+	for ( ; i < 11; i++) {
+		map_col[i] = 0x3F;	/* unused col bits */
+	}
+
+	if (bg_bits >= 2) {
+		map_bg[1] = placement++;
+	}
+	map_ba[0] = placement++;
+	map_ba[1] = placement++;
+	if (cs != 0U) {
+		map_cs[0] = placement++;
+		if (cs == 2U) {
+			map_cs[1] = placement++;
+		}
+	} else {
+		map_cs[0] = U(0x3F);
+	}
+
+	for (i = 0; i < row_bits; i++) {
+		map_row[i] = placement++;
+	}
+
+	for ( ; i < 18; i++) {
+		map_row[i] = 0x3F;	/* unused row bits */
+	}
+
+	for (i = 39; i >= 0 ; i--) {
+		if (i == intlv) {
+			placement = 8;
+			p = 'I';
+		} else if (i < 3) {
+			p = 'b';
+			placement = 0;
+		} else {
+			placement = 0;
+			p = '-';
+		}
+		for (j = 0; j < 18; j++) {
+			if (map_row[j] != i) {
+				continue;
+			}
+			if (placement != 0) {
+				abort = 1;
+				ERROR("%s wrong address bit %d\n", __func__, i);
+			}
+			placement = i;
+			p = 'r';
+		}
+		for (j = 0; j < 11; j++) {
+			if (map_col[j] != i) {
+				continue;
+			}
+			if (placement != 0) {
+				abort = 1;
+				ERROR("%s wrong address bit %d\n", __func__, i);
+			}
+			placement = i;
+			p = 'c';
+		}
+		for (j = 0; j < 2; j++) {
+			if (map_ba[j] != i) {
+				continue;
+			}
+			if (placement != 0) {
+				abort = 1;
+				ERROR("%s wrong address bit %d\n", __func__, i);
+			}
+			placement = i;
+			p = 'B';
+		}
+		for (j = 0; j < 2; j++) {
+			if (map_bg[j] != i) {
+				continue;
+			}
+			if (placement != 0) {
+				abort = 1;
+				ERROR("%s wrong address bit %d\n", __func__, i);
+			}
+			placement = i;
+			p = 'G';
+		}
+		for (j = 0; j < 2; j++) {
+			if (map_cs[j] != i) {
+				continue;
+			}
+			if (placement != 0) {
+				abort = 1;
+				ERROR("%s wrong address bit %d\n", __func__, i);
+			}
+			placement = i;
+			p = 'C';
+		}
+#ifdef DDR_DEBUG
+		printf("%c", p);
+		if ((i % 4) == 0) {
+			printf(" ");
+		}
+#endif
+	}
+#ifdef DDR_DEBUG
+	puts("\n");
+#endif
+
+	if (abort != 0) {
+		return;
+	}
+
+	regs->dec[0] = map_row[17] << 26		|
+		      map_row[16] << 18			|
+		      map_row[15] << 10			|
+		      map_row[14] << 2;
+	regs->dec[1] = map_row[13] << 26		|
+		      map_row[12] << 18			|
+		      map_row[11] << 10			|
+		      map_row[10] << 2;
+	regs->dec[2] = map_row[9] << 26			|
+		      map_row[8] << 18			|
+		      map_row[7] << 10			|
+		      map_row[6] << 2;
+	regs->dec[3] = map_row[5] << 26			|
+		      map_row[4] << 18			|
+		      map_row[3] << 10			|
+		      map_row[2] << 2;
+	regs->dec[4] = map_row[1] << 26			|
+		      map_row[0] << 18			|
+		      map_col[10] << 10			|
+		      map_col[9] << 2;
+	regs->dec[5] = map_col[8] << 26			|
+		      map_col[7] << 18			|
+		      map_col[6] << 10			|
+		      map_col[5] << 2;
+	regs->dec[6] = map_col[4] << 26			|
+		      map_col[3] << 18			|
+		      map_col[2] << 10			|
+		      map_col[1] << 2;
+	regs->dec[7] = map_col[0] << 26			|
+		      map_ba[1] << 18			|
+		      map_ba[0] << 10			|
+		      map_cid[1] << 2;
+	regs->dec[8] = map_cid[1] << 26			|
+		      map_cs[1] << 18			|
+		      map_cs[0] << 10			|
+		      map_bg[1] << 2;
+	regs->dec[9] = map_bg[0] << 26			|
+		      1;
+	for (i = 0; i < 10; i++) {
+		debug("dec[%d] = 0x%x\n", i, regs->dec[i]);
+	}
+#endif
+}
+static unsigned int skip_caslat(unsigned int tckmin_ps,
+				unsigned int taamin_ps,
+				unsigned int mclk_ps,
+				unsigned int package_3ds)
+{
+	int i, j, k;
+	struct cas {
+		const unsigned int tckmin_ps;
+		const unsigned int caslat[4];
+	};
+	struct speed {
+		const struct cas *cl;
+		const unsigned int taamin_ps[4];
+	};
+	const struct cas cl_3200[] = {
+		{625,	{0xa00000, 0xb00000, 0xf000000,} },
+		{750,	{ 0x20000,  0x60000,  0xe00000,} },
+		{833,	{  0x8000,  0x18000,   0x38000,} },
+		{937,	{  0x4000,   0x4000,    0xc000,} },
+		{1071,	{  0x1000,   0x1000,    0x3000,} },
+		{1250,	{   0x400,    0x400,     0xc00,} },
+		{1500,	{       0,    0x600,     0x200,} },
+	};
+	const struct cas cl_2933[] = {
+		{682,	{       0,  0x80000, 0x180000, 0x380000} },
+		{750,	{ 0x20000,  0x60000,  0x60000,  0xe0000} },
+		{833,	{  0x8000,  0x18000,  0x18000,  0x38000} },
+		{937,	{  0x4000,   0x4000,   0x4000,   0xc000} },
+		{1071,	{  0x1000,   0x1000,   0x1000,   0x3000} },
+		{1250,	{   0x400,    0x400,    0x400,    0xc00} },
+		{1500,	{       0,    0x200,    0x200,    0x200} },
+	};
+	const struct cas cl_2666[] = {
+		{750,	{       0,  0x20000,  0x60000,  0xe0000} },
+		{833,	{  0x8000,  0x18000,  0x18000,  0x38000} },
+		{937,	{  0x4000,   0x4000,   0x4000,   0xc000} },
+		{1071,	{  0x1000,   0x1000,   0x1000,   0x3000} },
+		{1250,	{   0x400,    0x400,    0x400,    0xc00} },
+		{1500,	{       0,        0,    0x200,    0x200} },
+	};
+	const struct cas cl_2400[] = {
+		{833,	{       0,   0x8000,  0x18000,  0x38000} },
+		{937,	{  0xc000,   0x4000,   0x4000,   0xc000} },
+		{1071,	{  0x3000,   0x1000,   0x1000,   0x3000} },
+		{1250,	{   0xc00,    0x400,    0x400,    0xc00} },
+		{1500,	{       0,    0x400,    0x200,    0x200} },
+	};
+	const struct cas cl_2133[] = {
+		{937,	{       0,   0x4000,   0xc000,} },
+		{1071,	{  0x2000,        0,   0x2000,} },
+		{1250,	{   0x800,        0,    0x800,} },
+		{1500,	{       0,    0x400,    0x200,} },
+	};
+	const struct cas cl_1866[] = {
+		{1071,	{       0,   0x1000,   0x3000,} },
+		{1250,	{   0xc00,    0x400,    0xc00,} },
+		{1500,	{       0,    0x400,    0x200,} },
+	};
+	const struct cas cl_1600[] = {
+		{1250,	{       0,    0x400,    0xc00,} },
+		{1500,	{       0,    0x400,    0x200,} },
+	};
+	const struct speed bin_0[] = {
+		{cl_3200, {12500, 13750, 15000,} },
+		{cl_2933, {12960, 13640, 13750, 15000,} },
+		{cl_2666, {12750, 13500, 13750, 15000,} },
+		{cl_2400, {12500, 13320, 13750, 15000,} },
+		{cl_2133, {13130, 13500, 15000,} },
+		{cl_1866, {12850, 13500, 15000,} },
+		{cl_1600, {12500, 13500, 15000,} }
+	};
+	const struct cas cl_3200_3ds[] = {
+		{625,	{ 0xa000000, 0xb000000, 0xf000000,} },
+		{750,	{ 0xaa00000, 0xab00000, 0xef00000,} },
+		{833,	{ 0xaac0000, 0xaac0000, 0xebc0000,} },
+		{937,	{ 0xaab0000, 0xaab0000, 0xeaf0000,} },
+		{1071,	{ 0xaaa4000, 0xaaac000, 0xeaec000,} },
+		{1250,	{ 0xaaa0000, 0xaaa2000, 0xeaeb000,} },
+	};
+	const struct cas cl_2666_3ds[] = {
+		{750,	{ 0xa00000, 0xb00000, 0xf00000,} },
+		{833,	{ 0xac0000, 0xac0000, 0xbc0000,} },
+		{937,	{ 0xab0000, 0xab0000, 0xaf0000,} },
+		{1071,	{ 0xaa4000, 0xaac000, 0xaac000,} },
+		{1250,	{ 0xaa0000, 0xaaa000, 0xaaa000,} },
+	};
+	const struct cas cl_2400_3ds[] = {
+		{833,	{ 0xe00000, 0xe40000, 0xec0000, 0xb00000} },
+		{937,	{ 0xe00000, 0xe00000, 0xea0000, 0xae0000} },
+		{1071,	{ 0xe00000, 0xe04000, 0xeac000, 0xaec000} },
+		{1250,	{ 0xe00000, 0xe00000, 0xeaa000, 0xae2000} },
+	};
+	const struct cas cl_2133_3ds[] = {
+		{937,	{  0x90000,  0xb0000,  0xf0000,} },
+		{1071,	{  0x84000,  0xac000,  0xec000,} },
+		{1250,	{  0x80000,  0xa2000,  0xe2000,} },
+	};
+	const struct cas cl_1866_3ds[] = {
+		{1071,	{        0,   0x4000,   0xc000,} },
+		{1250,	{        0,   0x1000,   0x3000,} },
+	};
+	const struct cas cl_1600_3ds[] = {
+		{1250,	{        0,   0x1000,   0x3000,} },
+	};
+	const struct speed bin_3ds[] = {
+		{cl_3200_3ds, {15000, 16250, 17140,} },
+		{cl_2666_3ds, {15000, 16500, 17140,} },
+		{cl_2400_3ds, {15000, 15830, 16670, 17140} },
+		{cl_2133_3ds, {15950, 16880, 17140,} },
+		{cl_1866_3ds, {15000, 16070, 17140,} },
+		{cl_1600_3ds, {15000, 16250, 17500,} },
+	};
+	const struct speed *bin;
+	int size;
+	unsigned int taamin_max, tck_max;
+
+	if (taamin_ps > ((package_3ds != 0) ? 21500 : 18000)) {
+		ERROR("taamin_ps %u invalid\n", taamin_ps);
+		return 0;
+	}
+	if (package_3ds != 0) {
+		bin = bin_3ds;
+		size = ARRAY_SIZE(bin_3ds);
+		taamin_max = 1250;
+		tck_max = 1500;
+	} else {
+		bin = bin_0;
+		size = ARRAY_SIZE(bin_0);
+		taamin_max = 1500;
+		tck_max = 1600;
+	}
+	if (mclk_ps < 625 || mclk_ps > tck_max) {
+		ERROR("mclk %u invalid\n", mclk_ps);
+		return 0;
+	}
+
+	for (i = 0; i < size; i++) {
+		if (bin[i].cl[0].tckmin_ps >= tckmin_ps) {
+			break;
+		}
+	}
+	if (i >= size) {
+		ERROR("speed bin not found\n");
+		return 0;
+	}
+	if (bin[i].cl[0].tckmin_ps > tckmin_ps && i > 0) {
+		i--;
+	}
+
+	for (j = 0; j < 4; j++) {
+		if ((bin[i].taamin_ps[j] == 0) ||
+		    bin[i].taamin_ps[j] >= taamin_ps) {
+			break;
+		}
+	}
+
+	if (j >= 4) {
+		ERROR("taamin_ps out of range.\n");
+		return 0;
+	}
+
+	if ((bin[i].taamin_ps[j] == 0) ||
+	    (bin[i].taamin_ps[j] > taamin_ps && j > 0)) {
+		j--;
+	}
+
+	for (k = 0; bin[i].cl[k].tckmin_ps < mclk_ps &&
+		    bin[i].cl[k].tckmin_ps < taamin_max; k++)
+		;
+	if (bin[i].cl[k].tckmin_ps > mclk_ps && k > 0) {
+		k--;
+	}
+
+	debug("Skip CL mask for this speed 0x%x\n", bin[i].cl[k].caslat[j]);
+
+	return bin[i].cl[k].caslat[j];
+}
+
+int compute_ddrc(const unsigned long clk,
+		 const struct memctl_opt *popts,
+		 const struct ddr_conf *conf,
+		 struct ddr_cfg_regs *regs,
+		 const struct dimm_params *pdimm,
+		 unsigned int ip_rev)
+{
+	unsigned int cas_latency;
+	unsigned int caslat_skip;
+	unsigned int additive_latency;
+	const unsigned int mclk_ps = get_memory_clk_ps(clk);
+	int i;
+
+	zeromem(regs, sizeof(struct ddr_cfg_regs));
+
+	if (mclk_ps < pdimm->tckmin_x_ps) {
+		ERROR("DDR Clk: MCLK cycle is %u ps.\n", mclk_ps);
+		ERROR("DDR Clk is faster than DIMM can support.\n");
+	}
+
+	/* calculate cas latency, override first */
+	cas_latency = (popts->caslat_override != 0) ?
+			popts->caslat_override_value :
+			(pdimm->taa_ps + mclk_ps - 1) / mclk_ps;
+
+	/* skip unsupported caslat based on speed bin */
+	caslat_skip = skip_caslat(pdimm->tckmin_x_ps,
+				  pdimm->taa_ps,
+				  mclk_ps,
+				  pdimm->package_3ds);
+	debug("Skip caslat 0x%x\n", caslat_skip);
+
+	/* Check if DIMM supports the cas latency */
+	i = 24;
+	while (((pdimm->caslat_x & ~caslat_skip & (1 << cas_latency)) == 0) &&
+	       (i-- > 0)) {
+		cas_latency++;
+	}
+
+	if (i <= 0) {
+		ERROR("Failed to find a proper cas latency\n");
+		return -EINVAL;
+	}
+	/* Verify cas latency does not exceed 18ns for DDR4 */
+	if (cas_latency * mclk_ps > 18000) {
+		ERROR("cas latency is too large %d\n", cas_latency);
+		return -EINVAL;
+	}
+
+	additive_latency = (popts->addt_lat_override != 0) ?
+				popts->addt_lat_override_value : 0;
+
+	cal_ddr_csn_bnds(regs, popts, conf, pdimm);
+	cal_ddr_sdram_cfg(clk, regs, popts, pdimm, ip_rev);
+	cal_ddr_sdram_rcw(clk, regs, popts, pdimm);
+	cal_timing_cfg(clk, regs, popts, pdimm, conf, cas_latency,
+		       additive_latency);
+	cal_ddr_dq_mapping(regs, pdimm);
+
+	if (ip_rev >= 0x50500) {
+		cal_ddr_addr_dec(regs);
+	}
+
+	cal_ddr_sdram_mode(clk, regs, popts, conf, pdimm, cas_latency,
+			   additive_latency, ip_rev);
+	cal_ddr_eor(regs, popts);
+	cal_ddr_data_init(regs);
+	cal_ddr_sdram_interval(clk, regs, popts, pdimm);
+	cal_ddr_zq_cntl(regs);
+	cal_ddr_sr_cntr(regs, popts);
+
+	return 0;
+}
diff --git a/drivers/nxp/ddr/nxp-ddr/utility.c b/drivers/nxp/ddr/nxp-ddr/utility.c
new file mode 100644
index 0000000..d33ad77
--- /dev/null
+++ b/drivers/nxp/ddr/nxp-ddr/utility.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <errno.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <common/debug.h>
+#include <ddr.h>
+#include <immap.h>
+#include <lib/mmio.h>
+
+#define UL_5POW12	244140625UL
+#define ULL_2E12	2000000000000ULL
+#define UL_2POW13	(1UL << 13)
+#define ULL_8FS		0xFFFFFFFFULL
+
+#define do_div(n, base) ({				\
+	unsigned int __base = (base);			\
+	unsigned int __rem;				\
+	__rem = ((unsigned long long)(n)) % __base;	\
+	(n) = ((unsigned long long)(n)) / __base;	\
+	__rem;						\
+})
+
+#define CCN_HN_F_SAM_NODEID_MASK	0x7f
+#ifdef NXP_HAS_CCN504
+#define CCN_HN_F_SAM_NODEID_DDR0	0x4
+#define CCN_HN_F_SAM_NODEID_DDR1	0xe
+#elif defined(NXP_HAS_CCN508)
+#define CCN_HN_F_SAM_NODEID_DDR0	0x8
+#define CCN_HN_F_SAM_NODEID_DDR1	0x18
+#endif
+
+unsigned long get_ddr_freq(struct sysinfo *sys, int ctrl_num)
+{
+	if (sys->freq_ddr_pll0 == 0) {
+		get_clocks(sys);
+	}
+
+	switch (ctrl_num) {
+	case 0:
+		return sys->freq_ddr_pll0;
+	case 1:
+		return sys->freq_ddr_pll0;
+	case 2:
+		return sys->freq_ddr_pll1;
+	}
+
+	return 0;
+}
+
+unsigned int get_memory_clk_ps(const unsigned long data_rate)
+{
+	unsigned int result;
+	/* Round to nearest 10ps, being careful about 64-bit multiply/divide */
+	unsigned long long rem, mclk_ps = ULL_2E12;
+
+	/* Now perform the big divide, the result fits in 32-bits */
+	rem = do_div(mclk_ps, data_rate);
+	result = (rem >= (data_rate >> 1)) ? mclk_ps + 1 : mclk_ps;
+
+	return result;
+}
+
+unsigned int picos_to_mclk(unsigned long data_rate, unsigned int picos)
+{
+	unsigned long long clks, clks_rem;
+
+	/* Short circuit for zero picos */
+	if ((picos == 0U) || (data_rate == 0UL)) {
+		return 0U;
+	}
+
+	/* First multiply the time by the data rate (32x32 => 64) */
+	clks = picos * (unsigned long long)data_rate;
+	/*
+	 * Now divide by 5^12 and track the 32-bit remainder, then divide
+	 * by 2*(2^12) using shifts (and updating the remainder).
+	 */
+	clks_rem = do_div(clks, UL_5POW12);
+	clks_rem += (clks & (UL_2POW13-1)) * UL_5POW12;
+	clks >>= 13U;
+
+	/* If we had a remainder greater than the 1ps error, then round up */
+	if (clks_rem > data_rate) {
+		clks++;
+	}
+
+	/* Clamp to the maximum representable value */
+	if (clks > ULL_8FS) {
+		clks = ULL_8FS;
+	}
+	return (unsigned int) clks;
+}
+
+/* valid_spd_mask has been checked by parse_spd */
+int disable_unused_ddrc(struct ddr_info *priv,
+			int valid_spd_mask, uintptr_t nxp_ccn_hn_f0_addr)
+{
+#if defined(NXP_HAS_CCN504) || defined(NXP_HAS_CCN508)
+	void *hnf_sam_ctrl = (void *)(nxp_ccn_hn_f0_addr + CCN_HN_F_SAM_CTL);
+	uint32_t val, nodeid;
+#ifdef NXP_HAS_CCN504
+	uint32_t num_hnf_nodes = 4U;
+#else
+	uint32_t num_hnf_nodes = 8U;
+#endif
+	int disable_ddrc = 0;
+	int i;
+
+	if (priv->num_ctlrs < 2) {
+		debug("%s: nothing to do.\n", __func__);
+	}
+
+	switch (priv->dimm_on_ctlr) {
+	case 1:
+		disable_ddrc = ((valid_spd_mask &0x2) == 0) ? 2 : 0;
+		disable_ddrc = ((valid_spd_mask &0x1) == 0) ? 1 : disable_ddrc;
+		break;
+	case 2:
+		disable_ddrc = ((valid_spd_mask &0x4) == 0) ? 2 : 0;
+		disable_ddrc = ((valid_spd_mask &0x1) == 0) ? 1 : disable_ddrc;
+		break;
+	default:
+		ERROR("Invalid number of DIMMs %d\n", priv->dimm_on_ctlr);
+		return -EINVAL;
+	}
+
+	if (disable_ddrc != 0) {
+		debug("valid_spd_mask = 0x%x\n", valid_spd_mask);
+	}
+
+	switch (disable_ddrc) {
+	case 1:
+		priv->num_ctlrs = 1;
+		priv->spd_addr = &priv->spd_addr[priv->dimm_on_ctlr];
+		priv->ddr[0] = priv->ddr[1];
+		priv->ddr[1] = NULL;
+		priv->phy[0] = priv->phy[0];
+		priv->phy[1] = NULL;
+		debug("Disable first DDR controller\n");
+		break;
+	case 2:
+		priv->num_ctlrs = 1;
+		priv->ddr[1] = NULL;
+		priv->phy[1] = NULL;
+		debug("Disable second DDR controller\n");
+		/* fallthrough */
+	case 0:
+		break;
+	default:
+		ERROR("Program error.\n");
+		return -EINVAL;
+	}
+
+	if (disable_ddrc == 0) {
+		debug("Both controllers in use.\n");
+		return 0;
+	}
+
+	for (i = 0; i < num_hnf_nodes; i++) {
+		val = mmio_read_64((uintptr_t)hnf_sam_ctrl);
+		nodeid = disable_ddrc == 1 ? CCN_HN_F_SAM_NODEID_DDR1 :
+			 (disable_ddrc == 2 ? CCN_HN_F_SAM_NODEID_DDR0 :
+			  (i < 4 ? CCN_HN_F_SAM_NODEID_DDR0
+				 : CCN_HN_F_SAM_NODEID_DDR1));
+		if (nodeid != (val & CCN_HN_F_SAM_NODEID_MASK)) {
+			debug("Setting HN-F node %d\n", i);
+			debug("nodeid = 0x%x\n", nodeid);
+			val &= ~CCN_HN_F_SAM_NODEID_MASK;
+			val |= nodeid;
+			mmio_write_64((uintptr_t)hnf_sam_ctrl, val);
+		}
+		hnf_sam_ctrl += CCN_HN_F_REGION_SIZE;
+	}
+#endif
+	return 0;
+}
+
+unsigned int get_ddrc_version(const struct ccsr_ddr *ddr)
+{
+	unsigned int ver;
+
+	ver = (ddr_in32(&ddr->ip_rev1) & 0xFFFF) << 8U;
+	ver |= (ddr_in32(&ddr->ip_rev2) & 0xFF00) >> 8U;
+
+	return ver;
+}
+
+void print_ddr_info(struct ccsr_ddr *ddr)
+{
+	unsigned int cs0_config = ddr_in32(&ddr->csn_cfg[0]);
+	unsigned int sdram_cfg = ddr_in32(&ddr->sdram_cfg);
+	int cas_lat;
+
+	if ((sdram_cfg & SDRAM_CFG_MEM_EN) == 0U) {
+		printf(" (DDR not enabled)\n");
+		return;
+	}
+
+	printf("DDR");
+	switch ((sdram_cfg & SDRAM_CFG_SDRAM_TYPE_MASK) >>
+		SDRAM_CFG_SDRAM_TYPE_SHIFT) {
+	case SDRAM_TYPE_DDR4:
+		printf("4");
+		break;
+	default:
+		printf("?");
+		break;
+	}
+
+	switch (sdram_cfg & SDRAM_CFG_DBW_MASK) {
+	case SDRAM_CFG_32_BW:
+		printf(", 32-bit");
+		break;
+	case SDRAM_CFG_16_BW:
+		printf(", 16-bit");
+		break;
+	case SDRAM_CFG_8_BW:
+		printf(", 8-bit");
+		break;
+	default:
+		printf(", 64-bit");
+		break;
+	}
+
+	/* Calculate CAS latency based on timing cfg values */
+	cas_lat = ((ddr_in32(&ddr->timing_cfg_1) >> 16) & 0xf);
+	cas_lat += 2;	/* for DDRC newer than 4.4 */
+	cas_lat += ((ddr_in32(&ddr->timing_cfg_3) >> 12) & 3) << 4;
+	printf(", CL=%d", cas_lat >> 1);
+	if ((cas_lat & 0x1) != 0) {
+		printf(".5");
+	}
+
+	if ((sdram_cfg & SDRAM_CFG_ECC_EN) != 0) {
+		printf(", ECC on");
+	} else {
+		printf(", ECC off");
+	}
+
+	if ((cs0_config & 0x20000000) != 0) {
+		printf(", ");
+		switch ((cs0_config >> 24) & 0xf) {
+		case DDR_256B_INTLV:
+			printf("256B");
+			break;
+		default:
+			printf("invalid");
+			break;
+		}
+	}
+
+	if (((sdram_cfg >> 8) & 0x7f) != 0) {
+		printf(", ");
+		switch (sdram_cfg >> 8 & 0x7f) {
+		case DDR_BA_INTLV_CS0123:
+			printf("CS0+CS1+CS2+CS3");
+			break;
+		case DDR_BA_INTLV_CS01:
+			printf("CS0+CS1");
+			break;
+		default:
+			printf("invalid");
+			break;
+		}
+	}
+	printf("\n");
+}
diff --git a/drivers/nxp/ddr/phy-gen1/phy.c b/drivers/nxp/ddr/phy-gen1/phy.c
new file mode 100644
index 0000000..4b66d38
--- /dev/null
+++ b/drivers/nxp/ddr/phy-gen1/phy.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <common/debug.h>
+#include <ddr.h>
+
+static void cal_ddr_sdram_clk_cntl(struct ddr_cfg_regs *regs,
+					 const struct memctl_opt *popts)
+{
+	const unsigned int clk_adj = popts->clk_adj;
+	const unsigned int ss_en = 0U;
+
+	regs->clk_cntl = ((ss_en & U(0x1)) << 31U)		|
+				  ((clk_adj & U(0x1F)) << 22U);
+	debug("clk_cntl = 0x%x\n", regs->clk_cntl);
+}
+
+static void cal_ddr_cdr(struct ddr_cfg_regs *regs,
+			const struct memctl_opt *popts)
+{
+	regs->cdr[0] = popts->ddr_cdr1;
+	regs->cdr[1] = popts->ddr_cdr2;
+	debug("cdr[0] = 0x%x\n", regs->cdr[0]);
+	debug("cdr[1] = 0x%x\n", regs->cdr[1]);
+}
+
+static void cal_ddr_wrlvl_cntl(struct ddr_cfg_regs *regs,
+				const struct memctl_opt *popts)
+{
+	const unsigned int wrlvl_en = 1U;	/* enabled */
+	const unsigned int wrlvl_mrd = U(0x6);	/* > 40nCK */
+	const unsigned int wrlvl_odten = U(0x7);	/* 128 */
+	const unsigned int wrlvl_dqsen = U(0x5);	/* > 25nCK */
+	const unsigned int wrlvl_wlr = U(0x6);	/* > tWLO + 6 */
+	const unsigned int wrlvl_smpl = popts->wrlvl_override ?
+					popts->wrlvl_sample : U(0xf);
+	const unsigned int wrlvl_start = popts->wrlvl_start;
+
+	regs->wrlvl_cntl[0] = ((wrlvl_en & U(0x1)) << 31U)	|
+				  ((wrlvl_mrd & U(0x7)) << 24U)	|
+				  ((wrlvl_odten & U(0x7)) << 20U)	|
+				  ((wrlvl_dqsen & U(0x7)) << 16U)	|
+				  ((wrlvl_smpl & U(0xf)) << 12U)	|
+				  ((wrlvl_wlr & U(0x7)) << 8U)	|
+				  ((wrlvl_start & U(0x1F)) << 0U);
+	regs->wrlvl_cntl[1] = popts->wrlvl_ctl_2;
+	regs->wrlvl_cntl[2] = popts->wrlvl_ctl_3;
+	debug("wrlvl_cntl[0] = 0x%x\n", regs->wrlvl_cntl[0]);
+	debug("wrlvl_cntl[1] = 0x%x\n", regs->wrlvl_cntl[1]);
+	debug("wrlvl_cntl[2] = 0x%x\n", regs->wrlvl_cntl[2]);
+
+}
+
+static void cal_ddr_dbg(struct ddr_cfg_regs *regs,
+			const struct memctl_opt *popts)
+{
+	if (popts->cswl_override != 0) {
+		regs->debug[18] = popts->cswl_override;
+	}
+
+#ifdef CONFIG_SYS_FSL_DDR_EMU
+	/* disable DDR training for emulator */
+	regs->debug[2] = U(0x00000400);
+	regs->debug[4] = U(0xff800800);
+	regs->debug[5] = U(0x08000800);
+	regs->debug[6] = U(0x08000800);
+	regs->debug[7] = U(0x08000800);
+	regs->debug[8] = U(0x08000800);
+#endif
+	if (popts->cpo_sample != 0U) {
+		regs->debug[28] = popts->cpo_sample;
+		debug("debug[28] = 0x%x\n", regs->debug[28]);
+	}
+}
+
+int compute_ddr_phy(struct ddr_info *priv)
+{
+	const struct memctl_opt *popts = &priv->opt;
+	struct ddr_cfg_regs *regs = &priv->ddr_reg;
+
+	cal_ddr_sdram_clk_cntl(regs, popts);
+	cal_ddr_cdr(regs, popts);
+	cal_ddr_wrlvl_cntl(regs, popts);
+	cal_ddr_dbg(regs, popts);
+
+	return 0;
+}
diff --git a/drivers/nxp/ddr/phy-gen2/csr.h b/drivers/nxp/ddr/phy-gen2/csr.h
new file mode 100644
index 0000000..ee7b4d8
--- /dev/null
+++ b/drivers/nxp/ddr/phy-gen2/csr.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2021 NXP
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef CSR_H
+#define CSR_H
+
+#define t_anib					0
+#define t_dbyte					0x10000
+#define t_master				0x20000
+#define t_acsm					0x40000
+#define t_initeng				0x90000
+#define t_drtub					0xc0000
+#define t_apbonly				0xd0000
+#define csr_dbyte_misc_mode_addr		0x00
+#define csr_micro_cont_mux_sel_addr		0x00
+#define csr_uct_shadow_regs			0x04
+#define csr_cal_uclk_info_addr			0x08
+#define csr_seq0bdly0_addr			0x0b
+#define csr_seq0bdly1_addr			0x0c
+#define csr_seq0bdly2_addr			0x0d
+#define csr_seq0bdly3_addr			0x0e
+#define csr_seq0bdisable_flag0_addr		0x0c
+#define csr_seq0bdisable_flag1_addr		0x0d
+#define csr_seq0bdisable_flag2_addr		0x0e
+#define csr_seq0bdisable_flag3_addr		0x0f
+#define csr_seq0bdisable_flag4_addr		0x10
+#define csr_seq0bdisable_flag5_addr		0x11
+#define csr_seq0bdisable_flag6_addr		0x12
+#define csr_seq0bdisable_flag7_addr		0x13
+#define csr_dfi_mode_addr			0x18
+#define csr_tristate_mode_ca_addr		0x19
+#define csr_dfiphyupd_addr			0x21
+#define csr_dqs_preamble_control_addr		0x24
+#define csr_master_x4config_addr		0x25
+#define csr_enable_cs_multicast_addr		0x27
+#define csr_acx4_anib_dis_addr			0x2c
+#define csr_dmipin_present_addr			0x2d
+#define csr_ard_ptr_init_val_addr		0x2e
+#define csr_dct_write_prot			0x31
+#define csr_uct_write_only_shadow		0x32
+#define csr_uct_write_prot			0x33
+#define csr_uct_dat_write_only_shadow		0x34
+#define	csr_dbyte_dll_mode_cntrl_addr		0x3a
+#define csr_atx_impedance_addr			0x43
+#define csr_dq_dqs_rcv_cntrl_addr		0x43
+#define csr_cal_offsets_addr			0x45
+#define csr_tx_impedance_ctrl1_addr		0x49
+#define csr_dq_dqs_rcv_cntrl1_addr		0x4a
+#define csr_tx_odt_drv_stren_addr		0x4d
+#define csr_cal_drv_str0_addr			0x50
+#define csr_atx_slew_rate_addr			0x55
+#define csr_proc_odt_time_ctl_addr		0x56
+#define csr_mem_alert_control_addr		0x5b
+#define csr_mem_alert_control2_addr		0x5c
+#define csr_tx_slew_rate_addr			0x5f
+#define csr_mem_reset_l_addr			0x60
+#define csr_dfi_camode_addr			0x75
+#define csr_dll_gain_ctl_addr			0x7c
+#define csr_dll_lockparam_addr			0x7d
+#define csr_ucclk_hclk_enables_addr		0x80
+#define csr_acsm_playback0x0_addr		0x80
+#define csr_acsm_playback1x0_addr		0x81
+#define csr_cal_rate_addr			0x88
+#define csr_cal_zap_addr			0x89
+#define csr_cal_misc2_addr			0x98
+#define csr_micro_reset_addr			0x99
+#define csr_dfi_rd_data_cs_dest_map_addr	0xb0
+#define csr_vref_in_global_addr			0xb2
+#define csr_dfi_wr_data_cs_dest_map_addr	0xb4
+#define csr_pll_pwr_dn_addr			0xc3
+#define csr_pll_ctrl2_addr			0xc5
+#define csr_pll_ctrl1_addr			0xc7
+#define csr_pll_test_mode_addr			0xca
+#define csr_pll_ctrl4_addr			0xcc
+#define csr_dfi_freq_xlat0_addr			0xf0
+#define csr_acsm_ctrl0_addr			0xf0
+#define csr_dfi_freq_ratio_addr			0xfa
+#define csr_acsm_ctrl13_addr			0xfd
+#define csr_tx_pre_drv_mode_lsb			8
+#define csr_tx_pre_n_lsb			4
+#define csr_tx_pre_p_lsb			0
+#define csr_atx_pre_drv_mode_lsb		8
+#define csr_atx_pre_n_lsb			4
+#define csr_atx_pre_p_lsb			0
+#define csr_wdqsextension_lsb			8
+#define csr_lp4sttc_pre_bridge_rx_en_lsb	7
+#define csr_lp4postamble_ext_lsb		6
+#define csr_lp4tgl_two_tck_tx_dqs_pre_lsb	5
+#define csr_position_dfe_init_lsb		2
+#define csr_two_tck_tx_dqs_pre_lsb		1
+#define csr_two_tck_rx_dqs_pre_lsb		0
+#define csr_dll_rx_preamble_mode_lsb		1
+#define csr_odtstren_n_lsb			6
+#define csr_drv_stren_fsdq_n_lsb		6
+#define	csr_drv_stren_fsdq_p_lsb		0
+#define csr_adrv_stren_n_lsb			5
+#define csr_adrv_stren_p_lsb			0
+#define csr_cal_drv_str_pu50_lsb		4
+#define csr_cal_once_lsb			5
+#define csr_cal_interval_lsb			0
+#define csr_cal_run_lsb				4
+#define csr_global_vref_in_dac_lsb		3
+#define csr_gain_curr_adj_lsb			7
+#define csr_major_mode_dbyte_lsb		4
+#define csr_dfe_ctrl_lsb			2
+#define csr_ext_vref_range_lsb			1
+#define csr_sel_analog_vref_lsb			0
+#define csr_malertsync_bypass_lsb		0
+#define csr_ck_dis_val_lsb			2
+#define csr_ddr2tmode_lsb			1
+#define csr_dis_dyn_adr_tri_lsb			0
+#define	csr_dbyte_disable_lsb			2
+#define csr_power_down_rcvr_lsb			0
+#define csr_power_down_rcvr_dqs_lsb		9
+#define csr_rx_pad_standby_en_lsb		10
+#define csr_rx_pad_standby_en_mask		0x400
+#define csr_x4tg_lsb				0
+#define csr_reset_to_micro_mask			0x8
+#define csr_protect_mem_reset_mask		0x2
+#define csr_stall_to_micro_mask			0x1
+#define uct_write_prot_shadow_mask		0x1
+#define csr_acsm_par_mode_mask			0x4000
+#define csr_acsm_cke_enb_lsb			0
+#define csr_dfiphyupd_threshold_lsb		8
+#define csr_dfiphyupd_threshold_msb		11
+#define csr_dfiphyupd_threshold_mask		0xf00
+#define csr_dfi_rd_destm0_lsb			0
+#define csr_dfi_rd_destm1_lsb			2
+#define csr_dfi_rd_destm2_lsb			4
+#define csr_dfi_rd_destm3_lsb			6
+#define csr_dfi_wr_destm0_lsb			0
+#define csr_dfi_wr_destm1_lsb			2
+#define csr_dfi_wr_destm2_lsb			4
+#define csr_dfi_wr_destm3_lsb			6
+#define csr_acsm_2t_mode_mask			0x40
+#define csr_cal_misc2_err_dis			13
+#define csr_cal_offset_pdc_lsb			6
+#define csr_cal_offset_pdc_msb			9
+#define csr_cal_offset_pdc_mask			0xe0
+#define csr_cal_drv_pdth_mask			0x3c0
+
+
+struct impedance_mapping {
+	int ohm;
+	int code;
+};
+
+#endif
diff --git a/drivers/nxp/ddr/phy-gen2/ddr4fw.h b/drivers/nxp/ddr/phy-gen2/ddr4fw.h
new file mode 100644
index 0000000..f17f2e7
--- /dev/null
+++ b/drivers/nxp/ddr/phy-gen2/ddr4fw.h
@@ -0,0 +1,2897 @@
+/*
+ * Copyright 2021 NXP
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef DDR4FW
+#define DDR4FW
+
+#define PHY_GEN2_MAX_IMAGE_SIZE		32768
+#define PHY_GEN2_IMEM_ADDR		0x50000
+#define PHY_GEN2_DMEM_ADDR		0x54000
+
+struct ddr4u1d {
+	uint8_t  reserved00;
+	uint8_t  msg_misc;
+	uint16_t pmu_revision;
+	uint8_t  pstate;
+	uint8_t  pll_bypass_en;
+	uint16_t dramfreq;
+	uint8_t  dfi_freq_ratio;
+	uint8_t  bpznres_val;
+	uint8_t  phy_odt_impedance;
+	uint8_t  phy_drv_impedance;
+	uint8_t  phy_vref;
+	uint8_t  dram_type;
+	uint8_t  disabled_dbyte;
+	uint8_t  enabled_dqs;
+	uint8_t  cs_present;
+	uint8_t  cs_present_d0;
+	uint8_t  cs_present_d1;
+	uint8_t  addr_mirror;
+	uint8_t  cs_test_fail;
+	uint8_t  phy_cfg;
+	uint16_t sequence_ctrl;
+	uint8_t  hdt_ctrl;
+	uint8_t  reserved19[0x1B - 0x19];
+	uint8_t  share2dvref_result;
+	uint8_t  reserved1c[0x22 - 0x1c];
+	uint16_t phy_config_override;
+	uint8_t  dfimrlmargin;
+	int8_t   cdd_rr_3_2;
+	int8_t   cdd_rr_3_1;
+	int8_t   cdd_rr_3_0;
+	int8_t   cdd_rr_2_3;
+	int8_t   cdd_rr_2_1;
+	int8_t   cdd_rr_2_0;
+	int8_t   cdd_rr_1_3;
+	int8_t   cdd_rr_1_2;
+	int8_t   cdd_rr_1_0;
+	int8_t   cdd_rr_0_3;
+	int8_t   cdd_rr_0_2;
+	int8_t   cdd_rr_0_1;
+	int8_t   cdd_ww_3_2;
+	int8_t   cdd_ww_3_1;
+	int8_t   cdd_ww_3_0;
+	int8_t   cdd_ww_2_3;
+	int8_t   cdd_ww_2_1;
+	int8_t   cdd_ww_2_0;
+	int8_t   cdd_ww_1_3;
+	int8_t   cdd_ww_1_2;
+	int8_t   cdd_ww_1_0;
+	int8_t   cdd_ww_0_3;
+	int8_t   cdd_ww_0_2;
+	int8_t   cdd_ww_0_1;
+	int8_t   cdd_rw_3_3;
+	int8_t   cdd_rw_3_2;
+	int8_t   cdd_rw_3_1;
+	int8_t   cdd_rw_3_0;
+	int8_t   cdd_rw_2_3;
+	int8_t   cdd_rw_2_2;
+	int8_t   cdd_rw_2_1;
+	int8_t   cdd_rw_2_0;
+	int8_t   cdd_rw_1_3;
+	int8_t   cdd_rw_1_2;
+	int8_t   cdd_rw_1_1;
+	int8_t   cdd_rw_1_0;
+	int8_t   cdd_rw_0_3;
+	int8_t   cdd_rw_0_2;
+	int8_t   cdd_rw_0_1;
+	int8_t   cdd_rw_0_0;
+	int8_t   cdd_wr_3_3;
+	int8_t   cdd_wr_3_2;
+	int8_t   cdd_wr_3_1;
+	int8_t   cdd_wr_3_0;
+	int8_t   cdd_wr_2_3;
+	int8_t   cdd_wr_2_2;
+	int8_t   cdd_wr_2_1;
+	int8_t   cdd_wr_2_0;
+	int8_t   cdd_wr_1_3;
+	int8_t   cdd_wr_1_2;
+	int8_t   cdd_wr_1_1;
+	int8_t   cdd_wr_1_0;
+	int8_t   cdd_wr_0_3;
+	int8_t   cdd_wr_0_2;
+	int8_t   cdd_wr_0_1;
+	int8_t   cdd_wr_0_0;
+	uint8_t  reserved5d;
+	uint16_t mr0;
+	uint16_t mr1;
+	uint16_t mr2;
+	uint16_t mr3;
+	uint16_t mr4;
+	uint16_t mr5;
+	uint16_t mr6;
+	uint8_t  x16present;
+	uint8_t  cs_setup_gddec;
+	uint16_t rtt_nom_wr_park0;
+	uint16_t rtt_nom_wr_park1;
+	uint16_t rtt_nom_wr_park2;
+	uint16_t rtt_nom_wr_park3;
+	uint16_t rtt_nom_wr_park4;
+	uint16_t rtt_nom_wr_park5;
+	uint16_t rtt_nom_wr_park6;
+	uint16_t rtt_nom_wr_park7;
+	uint8_t  acsm_odt_ctrl0;
+	uint8_t  acsm_odt_ctrl1;
+	uint8_t  acsm_odt_ctrl2;
+	uint8_t  acsm_odt_ctrl3;
+	uint8_t  acsm_odt_ctrl4;
+	uint8_t  acsm_odt_ctrl5;
+	uint8_t  acsm_odt_ctrl6;
+	uint8_t  acsm_odt_ctrl7;
+	uint8_t  vref_dq_r0nib0;
+	uint8_t  vref_dq_r0nib1;
+	uint8_t  vref_dq_r0nib2;
+	uint8_t  vref_dq_r0nib3;
+	uint8_t  vref_dq_r0nib4;
+	uint8_t  vref_dq_r0nib5;
+	uint8_t  vref_dq_r0nib6;
+	uint8_t  vref_dq_r0nib7;
+	uint8_t  vref_dq_r0nib8;
+	uint8_t  vref_dq_r0nib9;
+	uint8_t  vref_dq_r0nib10;
+	uint8_t  vref_dq_r0nib11;
+	uint8_t  vref_dq_r0nib12;
+	uint8_t  vref_dq_r0nib13;
+	uint8_t  vref_dq_r0nib14;
+	uint8_t  vref_dq_r0nib15;
+	uint8_t  vref_dq_r0nib16;
+	uint8_t  vref_dq_r0nib17;
+	uint8_t  vref_dq_r0nib18;
+	uint8_t  vref_dq_r0nib19;
+	uint8_t  vref_dq_r1nib0;
+	uint8_t  vref_dq_r1nib1;
+	uint8_t  vref_dq_r1nib2;
+	uint8_t  vref_dq_r1nib3;
+	uint8_t  vref_dq_r1nib4;
+	uint8_t  vref_dq_r1nib5;
+	uint8_t  vref_dq_r1nib6;
+	uint8_t  vref_dq_r1nib7;
+	uint8_t  vref_dq_r1nib8;
+	uint8_t  vref_dq_r1nib9;
+	uint8_t  vref_dq_r1nib10;
+	uint8_t  vref_dq_r1nib11;
+	uint8_t  vref_dq_r1nib12;
+	uint8_t  vref_dq_r1nib13;
+	uint8_t  vref_dq_r1nib14;
+	uint8_t  vref_dq_r1nib15;
+	uint8_t  vref_dq_r1nib16;
+	uint8_t  vref_dq_r1nib17;
+	uint8_t  vref_dq_r1nib18;
+	uint8_t  vref_dq_r1nib19;
+	uint8_t  vref_dq_r2nib0;
+	uint8_t  vref_dq_r2nib1;
+	uint8_t  vref_dq_r2nib2;
+	uint8_t  vref_dq_r2nib3;
+	uint8_t  vref_dq_r2nib4;
+	uint8_t  vref_dq_r2nib5;
+	uint8_t  vref_dq_r2nib6;
+	uint8_t  vref_dq_r2nib7;
+	uint8_t  vref_dq_r2nib8;
+	uint8_t  vref_dq_r2nib9;
+	uint8_t  vref_dq_r2nib10;
+	uint8_t  vref_dq_r2nib11;
+	uint8_t  vref_dq_r2nib12;
+	uint8_t  vref_dq_r2nib13;
+	uint8_t  vref_dq_r2nib14;
+	uint8_t  vref_dq_r2nib15;
+	uint8_t  vref_dq_r2nib16;
+	uint8_t  vref_dq_r2nib17;
+	uint8_t  vref_dq_r2nib18;
+	uint8_t  vref_dq_r2nib19;
+	uint8_t  vref_dq_r3nib0;
+	uint8_t  vref_dq_r3nib1;
+	uint8_t  vref_dq_r3nib2;
+	uint8_t  vref_dq_r3nib3;
+	uint8_t  vref_dq_r3nib4;
+	uint8_t  vref_dq_r3nib5;
+	uint8_t  vref_dq_r3nib6;
+	uint8_t  vref_dq_r3nib7;
+	uint8_t  vref_dq_r3nib8;
+	uint8_t  vref_dq_r3nib9;
+	uint8_t  vref_dq_r3nib10;
+	uint8_t  vref_dq_r3nib11;
+	uint8_t  vref_dq_r3nib12;
+	uint8_t  vref_dq_r3nib13;
+	uint8_t  vref_dq_r3nib14;
+	uint8_t  vref_dq_r3nib15;
+	uint8_t  vref_dq_r3nib16;
+	uint8_t  vref_dq_r3nib17;
+	uint8_t  vref_dq_r3nib18;
+	uint8_t  vref_dq_r3nib19;
+	uint8_t  reserved_d6[0x3f6 - 0xd6];
+	uint16_t alt_cas_l;
+	uint8_t  alt_wcas_l;
+	uint8_t  d4misc;
+} __packed;
+
+struct ddr4u2d {
+	uint8_t  reserved00;
+	uint8_t  msg_misc;
+	uint16_t pmu_revision;
+	uint8_t  pstate;
+	uint8_t  pll_bypass_en;
+	uint16_t dramfreq;
+	uint8_t  dfi_freq_ratio;
+	uint8_t  bpznres_val;
+	uint8_t  phy_odt_impedance;
+	uint8_t  phy_drv_impedance;
+	uint8_t  phy_vref;
+	uint8_t  dram_type;
+	uint8_t  disabled_dbyte;
+	uint8_t  enabled_dqs;
+	uint8_t  cs_present;
+	uint8_t  cs_present_d0;
+	uint8_t  cs_present_d1;
+	uint8_t  addr_mirror;
+	uint8_t  cs_test_fail;
+	uint8_t  phy_cfg;
+	uint16_t sequence_ctrl;
+	uint8_t  hdt_ctrl;
+	uint8_t  rx2d_train_opt;
+	uint8_t  tx2d_train_opt;
+	uint8_t  share2dvref_result;
+	uint8_t  delay_weight2d;
+	uint8_t  voltage_weight2d;
+	uint8_t  reserved1e[0x22 - 0x1e];
+	uint16_t phy_config_override;
+	uint8_t  dfimrlmargin;
+	uint8_t  r0_rx_clk_dly_margin;
+	uint8_t  r0_vref_dac_margin;
+	uint8_t  r0_tx_dq_dly_margin;
+	uint8_t  r0_device_vref_margin;
+	uint8_t  reserved29[0x33 - 0x29];
+	uint8_t  r1_rx_clk_dly_margin;
+	uint8_t  r1_vref_dac_margin;
+	uint8_t  r1_tx_dq_dly_margin;
+	uint8_t  r1_device_vref_margin;
+	uint8_t  reserved37[0x41 - 0x37];
+	uint8_t  r2_rx_clk_dly_margin;
+	uint8_t  r2_vref_dac_margin;
+	uint8_t  r2_tx_dq_dly_margin;
+	uint8_t  r2_device_vref_margin;
+	uint8_t  reserved45[0x4f - 0x45];
+	uint8_t  r3_rx_clk_dly_margin;
+	uint8_t  r3_vref_dac_margin;
+	uint8_t  r3_tx_dq_dly_margin;
+	uint8_t  r3_device_vref_margin;
+	uint8_t  reserved53[0x5e - 0x53];
+	uint16_t mr0;
+	uint16_t mr1;
+	uint16_t mr2;
+	uint16_t mr3;
+	uint16_t mr4;
+	uint16_t mr5;
+	uint16_t mr6;
+	uint8_t  x16present;
+	uint8_t  cs_setup_gddec;
+	uint16_t rtt_nom_wr_park0;
+	uint16_t rtt_nom_wr_park1;
+	uint16_t rtt_nom_wr_park2;
+	uint16_t rtt_nom_wr_park3;
+	uint16_t rtt_nom_wr_park4;
+	uint16_t rtt_nom_wr_park5;
+	uint16_t rtt_nom_wr_park6;
+	uint16_t rtt_nom_wr_park7;
+	uint8_t  acsm_odt_ctrl0;
+	uint8_t  acsm_odt_ctrl1;
+	uint8_t  acsm_odt_ctrl2;
+	uint8_t  acsm_odt_ctrl3;
+	uint8_t  acsm_odt_ctrl4;
+	uint8_t  acsm_odt_ctrl5;
+	uint8_t  acsm_odt_ctrl6;
+	uint8_t  acsm_odt_ctrl7;
+	uint8_t  vref_dq_r0nib0;
+	uint8_t  vref_dq_r0nib1;
+	uint8_t  vref_dq_r0nib2;
+	uint8_t  vref_dq_r0nib3;
+	uint8_t  vref_dq_r0nib4;
+	uint8_t  vref_dq_r0nib5;
+	uint8_t  vref_dq_r0nib6;
+	uint8_t  vref_dq_r0nib7;
+	uint8_t  vref_dq_r0nib8;
+	uint8_t  vref_dq_r0nib9;
+	uint8_t  vref_dq_r0nib10;
+	uint8_t  vref_dq_r0nib11;
+	uint8_t  vref_dq_r0nib12;
+	uint8_t  vref_dq_r0nib13;
+	uint8_t  vref_dq_r0nib14;
+	uint8_t  vref_dq_r0nib15;
+	uint8_t  vref_dq_r0nib16;
+	uint8_t  vref_dq_r0nib17;
+	uint8_t  vref_dq_r0nib18;
+	uint8_t  vref_dq_r0nib19;
+	uint8_t  vref_dq_r1nib0;
+	uint8_t  vref_dq_r1nib1;
+	uint8_t  vref_dq_r1nib2;
+	uint8_t  vref_dq_r1nib3;
+	uint8_t  vref_dq_r1nib4;
+	uint8_t  vref_dq_r1nib5;
+	uint8_t  vref_dq_r1nib6;
+	uint8_t  vref_dq_r1nib7;
+	uint8_t  vref_dq_r1nib8;
+	uint8_t  vref_dq_r1nib9;
+	uint8_t  vref_dq_r1nib10;
+	uint8_t  vref_dq_r1nib11;
+	uint8_t  vref_dq_r1nib12;
+	uint8_t  vref_dq_r1nib13;
+	uint8_t  vref_dq_r1nib14;
+	uint8_t  vref_dq_r1nib15;
+	uint8_t  vref_dq_r1nib16;
+	uint8_t  vref_dq_r1nib17;
+	uint8_t  vref_dq_r1nib18;
+	uint8_t  vref_dq_r1nib19;
+	uint8_t  vref_dq_r2nib0;
+	uint8_t  vref_dq_r2nib1;
+	uint8_t  vref_dq_r2nib2;
+	uint8_t  vref_dq_r2nib3;
+	uint8_t  vref_dq_r2nib4;
+	uint8_t  vref_dq_r2nib5;
+	uint8_t  vref_dq_r2nib6;
+	uint8_t  vref_dq_r2nib7;
+	uint8_t  vref_dq_r2nib8;
+	uint8_t  vref_dq_r2nib9;
+	uint8_t  vref_dq_r2nib10;
+	uint8_t  vref_dq_r2nib11;
+	uint8_t  vref_dq_r2nib12;
+	uint8_t  vref_dq_r2nib13;
+	uint8_t  vref_dq_r2nib14;
+	uint8_t  vref_dq_r2nib15;
+	uint8_t  vref_dq_r2nib16;
+	uint8_t  vref_dq_r2nib17;
+	uint8_t  vref_dq_r2nib18;
+	uint8_t  vref_dq_r2nib19;
+	uint8_t  vref_dq_r3nib0;
+	uint8_t  vref_dq_r3nib1;
+	uint8_t  vref_dq_r3nib2;
+	uint8_t  vref_dq_r3nib3;
+	uint8_t  vref_dq_r3nib4;
+	uint8_t  vref_dq_r3nib5;
+	uint8_t  vref_dq_r3nib6;
+	uint8_t  vref_dq_r3nib7;
+	uint8_t  vref_dq_r3nib8;
+	uint8_t  vref_dq_r3nib9;
+	uint8_t  vref_dq_r3nib10;
+	uint8_t  vref_dq_r3nib11;
+	uint8_t  vref_dq_r3nib12;
+	uint8_t  vref_dq_r3nib13;
+	uint8_t  vref_dq_r3nib14;
+	uint8_t  vref_dq_r3nib15;
+	uint8_t  vref_dq_r3nib16;
+	uint8_t  vref_dq_r3nib17;
+	uint8_t  vref_dq_r3nib18;
+	uint8_t  vref_dq_r3nib19;
+	uint8_t  reserved_d6[0x3f6 - 0xd6];
+	uint16_t alt_cas_l;
+	uint8_t  alt_wcas_l;
+	uint8_t  d4misc;
+} __packed;
+
+struct ddr4r1d {
+	uint8_t  reserved00;
+	uint8_t  msg_misc;
+	uint16_t pmu_revision;
+	uint8_t  pstate;
+	uint8_t  pll_bypass_en;
+	uint16_t dramfreq;
+	uint8_t  dfi_freq_ratio;
+	uint8_t  bpznres_val;
+	uint8_t  phy_odt_impedance;
+	uint8_t  phy_drv_impedance;
+	uint8_t  phy_vref;
+	uint8_t  dram_type;
+	uint8_t  disabled_dbyte;
+	uint8_t  enabled_dqs;
+	uint8_t  cs_present;
+	uint8_t  cs_present_d0;
+	uint8_t  cs_present_d1;
+	uint8_t  addr_mirror;
+	uint8_t  cs_test_fail;
+	uint8_t  phy_cfg;
+	uint16_t sequence_ctrl;
+	uint8_t  hdt_ctrl;
+	uint8_t  reserved19[0x22 - 0x19];
+	uint16_t phy_config_override;
+	uint8_t  dfimrlmargin;
+	int8_t   cdd_rr_3_2;
+	int8_t   cdd_rr_3_1;
+	int8_t   cdd_rr_3_0;
+	int8_t   cdd_rr_2_3;
+	int8_t   cdd_rr_2_1;
+	int8_t   cdd_rr_2_0;
+	int8_t   cdd_rr_1_3;
+	int8_t   cdd_rr_1_2;
+	int8_t   cdd_rr_1_0;
+	int8_t   cdd_rr_0_3;
+	int8_t   cdd_rr_0_2;
+	int8_t   cdd_rr_0_1;
+	int8_t   cdd_ww_3_2;
+	int8_t   cdd_ww_3_1;
+	int8_t   cdd_ww_3_0;
+	int8_t   cdd_ww_2_3;
+	int8_t   cdd_ww_2_1;
+	int8_t   cdd_ww_2_0;
+	int8_t   cdd_ww_1_3;
+	int8_t   cdd_ww_1_2;
+	int8_t   cdd_ww_1_0;
+	int8_t   cdd_ww_0_3;
+	int8_t   cdd_ww_0_2;
+	int8_t   cdd_ww_0_1;
+	int8_t   cdd_rw_3_3;
+	int8_t   cdd_rw_3_2;
+	int8_t   cdd_rw_3_1;
+	int8_t   cdd_rw_3_0;
+	int8_t   cdd_rw_2_3;
+	int8_t   cdd_rw_2_2;
+	int8_t   cdd_rw_2_1;
+	int8_t   cdd_rw_2_0;
+	int8_t   cdd_rw_1_3;
+	int8_t   cdd_rw_1_2;
+	int8_t   cdd_rw_1_1;
+	int8_t   cdd_rw_1_0;
+	int8_t   cdd_rw_0_3;
+	int8_t   cdd_rw_0_2;
+	int8_t   cdd_rw_0_1;
+	int8_t   cdd_rw_0_0;
+	int8_t   cdd_wr_3_3;
+	int8_t   cdd_wr_3_2;
+	int8_t   cdd_wr_3_1;
+	int8_t   cdd_wr_3_0;
+	int8_t   cdd_wr_2_3;
+	int8_t   cdd_wr_2_2;
+	int8_t   cdd_wr_2_1;
+	int8_t   cdd_wr_2_0;
+	int8_t   cdd_wr_1_3;
+	int8_t   cdd_wr_1_2;
+	int8_t   cdd_wr_1_1;
+	int8_t   cdd_wr_1_0;
+	int8_t   cdd_wr_0_3;
+	int8_t   cdd_wr_0_2;
+	int8_t   cdd_wr_0_1;
+	int8_t   cdd_wr_0_0;
+	uint8_t  reserved5d;
+	uint16_t mr0;
+	uint16_t mr1;
+	uint16_t mr2;
+	uint16_t mr3;
+	uint16_t mr4;
+	uint16_t mr5;
+	uint16_t mr6;
+	uint8_t  x16present;
+	uint8_t  cs_setup_gddec;
+	uint16_t rtt_nom_wr_park0;
+	uint16_t rtt_nom_wr_park1;
+	uint16_t rtt_nom_wr_park2;
+	uint16_t rtt_nom_wr_park3;
+	uint16_t rtt_nom_wr_park4;
+	uint16_t rtt_nom_wr_park5;
+	uint16_t rtt_nom_wr_park6;
+	uint16_t rtt_nom_wr_park7;
+	uint8_t  acsm_odt_ctrl0;
+	uint8_t  acsm_odt_ctrl1;
+	uint8_t  acsm_odt_ctrl2;
+	uint8_t  acsm_odt_ctrl3;
+	uint8_t  acsm_odt_ctrl4;
+	uint8_t  acsm_odt_ctrl5;
+	uint8_t  acsm_odt_ctrl6;
+	uint8_t  acsm_odt_ctrl7;
+	uint8_t  vref_dq_r0nib0;
+	uint8_t  vref_dq_r0nib1;
+	uint8_t  vref_dq_r0nib2;
+	uint8_t  vref_dq_r0nib3;
+	uint8_t  vref_dq_r0nib4;
+	uint8_t  vref_dq_r0nib5;
+	uint8_t  vref_dq_r0nib6;
+	uint8_t  vref_dq_r0nib7;
+	uint8_t  vref_dq_r0nib8;
+	uint8_t  vref_dq_r0nib9;
+	uint8_t  vref_dq_r0nib10;
+	uint8_t  vref_dq_r0nib11;
+	uint8_t  vref_dq_r0nib12;
+	uint8_t  vref_dq_r0nib13;
+	uint8_t  vref_dq_r0nib14;
+	uint8_t  vref_dq_r0nib15;
+	uint8_t  vref_dq_r0nib16;
+	uint8_t  vref_dq_r0nib17;
+	uint8_t  vref_dq_r0nib18;
+	uint8_t  vref_dq_r0nib19;
+	uint8_t  vref_dq_r1nib0;
+	uint8_t  vref_dq_r1nib1;
+	uint8_t  vref_dq_r1nib2;
+	uint8_t  vref_dq_r1nib3;
+	uint8_t  vref_dq_r1nib4;
+	uint8_t  vref_dq_r1nib5;
+	uint8_t  vref_dq_r1nib6;
+	uint8_t  vref_dq_r1nib7;
+	uint8_t  vref_dq_r1nib8;
+	uint8_t  vref_dq_r1nib9;
+	uint8_t  vref_dq_r1nib10;
+	uint8_t  vref_dq_r1nib11;
+	uint8_t  vref_dq_r1nib12;
+	uint8_t  vref_dq_r1nib13;
+	uint8_t  vref_dq_r1nib14;
+	uint8_t  vref_dq_r1nib15;
+	uint8_t  vref_dq_r1nib16;
+	uint8_t  vref_dq_r1nib17;
+	uint8_t  vref_dq_r1nib18;
+	uint8_t  vref_dq_r1nib19;
+	uint8_t  vref_dq_r2nib0;
+	uint8_t  vref_dq_r2nib1;
+	uint8_t  vref_dq_r2nib2;
+	uint8_t  vref_dq_r2nib3;
+	uint8_t  vref_dq_r2nib4;
+	uint8_t  vref_dq_r2nib5;
+	uint8_t  vref_dq_r2nib6;
+	uint8_t  vref_dq_r2nib7;
+	uint8_t  vref_dq_r2nib8;
+	uint8_t  vref_dq_r2nib9;
+	uint8_t  vref_dq_r2nib10;
+	uint8_t  vref_dq_r2nib11;
+	uint8_t  vref_dq_r2nib12;
+	uint8_t  vref_dq_r2nib13;
+	uint8_t  vref_dq_r2nib14;
+	uint8_t  vref_dq_r2nib15;
+	uint8_t  vref_dq_r2nib16;
+	uint8_t  vref_dq_r2nib17;
+	uint8_t  vref_dq_r2nib18;
+	uint8_t  vref_dq_r2nib19;
+	uint8_t  vref_dq_r3nib0;
+	uint8_t  vref_dq_r3nib1;
+	uint8_t  vref_dq_r3nib2;
+	uint8_t  vref_dq_r3nib3;
+	uint8_t  vref_dq_r3nib4;
+	uint8_t  vref_dq_r3nib5;
+	uint8_t  vref_dq_r3nib6;
+	uint8_t  vref_dq_r3nib7;
+	uint8_t  vref_dq_r3nib8;
+	uint8_t  vref_dq_r3nib9;
+	uint8_t  vref_dq_r3nib10;
+	uint8_t  vref_dq_r3nib11;
+	uint8_t  vref_dq_r3nib12;
+	uint8_t  vref_dq_r3nib13;
+	uint8_t  vref_dq_r3nib14;
+	uint8_t  vref_dq_r3nib15;
+	uint8_t  vref_dq_r3nib16;
+	uint8_t  vref_dq_r3nib17;
+	uint8_t  vref_dq_r3nib18;
+	uint8_t  vref_dq_r3nib19;
+	uint8_t  f0rc00_d0;
+	uint8_t  f0rc01_d0;
+	uint8_t  f0rc02_d0;
+	uint8_t  f0rc03_d0;
+	uint8_t  f0rc04_d0;
+	uint8_t  f0rc05_d0;
+	uint8_t  f0rc06_d0;
+	uint8_t  f0rc07_d0;
+	uint8_t  f0rc08_d0;
+	uint8_t  f0rc09_d0;
+	uint8_t  f0rc0a_d0;
+	uint8_t  f0rc0b_d0;
+	uint8_t  f0rc0c_d0;
+	uint8_t  f0rc0d_d0;
+	uint8_t  f0rc0e_d0;
+	uint8_t  f0rc0f_d0;
+	uint8_t  f0rc1x_d0;
+	uint8_t  f0rc2x_d0;
+	uint8_t  f0rc3x_d0;
+	uint8_t  f0rc4x_d0;
+	uint8_t  f0rc5x_d0;
+	uint8_t  f0rc6x_d0;
+	uint8_t  f0rc7x_d0;
+	uint8_t  f0rc8x_d0;
+	uint8_t  f0rc9x_d0;
+	uint8_t  f0rcax_d0;
+	uint8_t  f0rcbx_d0;
+	uint8_t  f1rc00_d0;
+	uint8_t  f1rc01_d0;
+	uint8_t  f1rc02_d0;
+	uint8_t  f1rc03_d0;
+	uint8_t  f1rc04_d0;
+	uint8_t  f1rc05_d0;
+	uint8_t  f1rc06_d0;
+	uint8_t  f1rc07_d0;
+	uint8_t  f1rc08_d0;
+	uint8_t  f1rc09_d0;
+	uint8_t  f1rc0a_d0;
+	uint8_t  f1rc0b_d0;
+	uint8_t  f1rc0c_d0;
+	uint8_t  f1rc0d_d0;
+	uint8_t  f1rc0e_d0;
+	uint8_t  f1rc0f_d0;
+	uint8_t  f1rc1x_d0;
+	uint8_t  f1rc2x_d0;
+	uint8_t  f1rc3x_d0;
+	uint8_t  f1rc4x_d0;
+	uint8_t  f1rc5x_d0;
+	uint8_t  f1rc6x_d0;
+	uint8_t  f1rc7x_d0;
+	uint8_t  f1rc8x_d0;
+	uint8_t  f1rc9x_d0;
+	uint8_t  f1rcax_d0;
+	uint8_t  f1rcbx_d0;
+	uint8_t  f0rc00_d1;
+	uint8_t  f0rc01_d1;
+	uint8_t  f0rc02_d1;
+	uint8_t  f0rc03_d1;
+	uint8_t  f0rc04_d1;
+	uint8_t  f0rc05_d1;
+	uint8_t  f0rc06_d1;
+	uint8_t  f0rc07_d1;
+	uint8_t  f0rc08_d1;
+	uint8_t  f0rc09_d1;
+	uint8_t  f0rc0a_d1;
+	uint8_t  f0rc0b_d1;
+	uint8_t  f0rc0c_d1;
+	uint8_t  f0rc0d_d1;
+	uint8_t  f0rc0e_d1;
+	uint8_t  f0rc0f_d1;
+	uint8_t  f0rc1x_d1;
+	uint8_t  f0rc2x_d1;
+	uint8_t  f0rc3x_d1;
+	uint8_t  f0rc4x_d1;
+	uint8_t  f0rc5x_d1;
+	uint8_t  f0rc6x_d1;
+	uint8_t  f0rc7x_d1;
+	uint8_t  f0rc8x_d1;
+	uint8_t  f0rc9x_d1;
+	uint8_t  f0rcax_d1;
+	uint8_t  f0rcbx_d1;
+	uint8_t  f1rc00_d1;
+	uint8_t  f1rc01_d1;
+	uint8_t  f1rc02_d1;
+	uint8_t  f1rc03_d1;
+	uint8_t  f1rc04_d1;
+	uint8_t  f1rc05_d1;
+	uint8_t  f1rc06_d1;
+	uint8_t  f1rc07_d1;
+	uint8_t  f1rc08_d1;
+	uint8_t  f1rc09_d1;
+	uint8_t  f1rc0a_d1;
+	uint8_t  f1rc0b_d1;
+	uint8_t  f1rc0c_d1;
+	uint8_t  f1rc0d_d1;
+	uint8_t  f1rc0e_d1;
+	uint8_t  f1rc0f_d1;
+	uint8_t  f1rc1x_d1;
+	uint8_t  f1rc2x_d1;
+	uint8_t  f1rc3x_d1;
+	uint8_t  f1rc4x_d1;
+	uint8_t  f1rc5x_d1;
+	uint8_t  f1rc6x_d1;
+	uint8_t  f1rc7x_d1;
+	uint8_t  f1rc8x_d1;
+	uint8_t  f1rc9x_d1;
+	uint8_t  f1rcax_d1;
+	uint8_t  f1rcbx_d1;
+	uint8_t  reserved142[0x3f6 - 0x142];
+	uint16_t alt_cas_l;
+	uint8_t  alt_wcas_l;
+	uint8_t  d4misc;
+} __packed;
+
+struct ddr4r2d {
+	uint8_t  reserved00;
+	uint8_t  msg_misc;
+	uint16_t pmu_revision;
+	uint8_t  pstate;
+	uint8_t  pll_bypass_en;
+	uint16_t dramfreq;
+	uint8_t  dfi_freq_ratio;
+	uint8_t  bpznres_val;
+	uint8_t  phy_odt_impedance;
+	uint8_t  phy_drv_impedance;
+	uint8_t  phy_vref;
+	uint8_t  dram_type;
+	uint8_t  disabled_dbyte;
+	uint8_t  enabled_dqs;
+	uint8_t  cs_present;
+	uint8_t  cs_present_d0;
+	uint8_t  cs_present_d1;
+	uint8_t  addr_mirror;
+	uint8_t  cs_test_fail;
+	uint8_t  phy_cfg;
+	uint16_t sequence_ctrl;
+	uint8_t  hdt_ctrl;
+	uint8_t  rx2d_train_opt;
+	uint8_t  tx2d_train_opt;
+	uint8_t  share2dvref_result;
+	uint8_t  delay_weight2d;
+	uint8_t  voltage_weight2d;
+	uint8_t  reserved1e[0x22-0x1e];
+	uint16_t phy_config_override;
+	uint8_t  dfimrlmargin;
+	uint8_t  r0_rx_clk_dly_margin;
+	uint8_t  r0_vref_dac_margin;
+	uint8_t  r0_tx_dq_dly_margin;
+	uint8_t  r0_device_vref_margin;
+	uint8_t  reserved29[0x33-0x29];
+	uint8_t  r1_rx_clk_dly_margin;
+	uint8_t  r1_vref_dac_margin;
+	uint8_t  r1_tx_dq_dly_margin;
+	uint8_t  r1_device_vref_margin;
+	uint8_t  reserved37[0x41-0x37];
+	uint8_t  r2_rx_clk_dly_margin;
+	uint8_t  r2_vref_dac_margin;
+	uint8_t  r2_tx_dq_dly_margin;
+	uint8_t  r2_device_vref_margin;
+	uint8_t  reserved45[0x4f - 0x45];
+	uint8_t  r3_rx_clk_dly_margin;
+	uint8_t  r3_vref_dac_margin;
+	uint8_t  r3_tx_dq_dly_margin;
+	uint8_t  r3_device_vref_margin;
+	uint8_t  reserved53[0x5e - 0x53];
+	uint16_t mr0;
+	uint16_t mr1;
+	uint16_t mr2;
+	uint16_t mr3;
+	uint16_t mr4;
+	uint16_t mr5;
+	uint16_t mr6;
+	uint8_t  x16present;
+	uint8_t  cs_setup_gddec;
+	uint16_t rtt_nom_wr_park0;
+	uint16_t rtt_nom_wr_park1;
+	uint16_t rtt_nom_wr_park2;
+	uint16_t rtt_nom_wr_park3;
+	uint16_t rtt_nom_wr_park4;
+	uint16_t rtt_nom_wr_park5;
+	uint16_t rtt_nom_wr_park6;
+	uint16_t rtt_nom_wr_park7;
+	uint8_t  acsm_odt_ctrl0;
+	uint8_t  acsm_odt_ctrl1;
+	uint8_t  acsm_odt_ctrl2;
+	uint8_t  acsm_odt_ctrl3;
+	uint8_t  acsm_odt_ctrl4;
+	uint8_t  acsm_odt_ctrl5;
+	uint8_t  acsm_odt_ctrl6;
+	uint8_t  acsm_odt_ctrl7;
+	uint8_t  vref_dq_r0nib0;
+	uint8_t  vref_dq_r0nib1;
+	uint8_t  vref_dq_r0nib2;
+	uint8_t  vref_dq_r0nib3;
+	uint8_t  vref_dq_r0nib4;
+	uint8_t  vref_dq_r0nib5;
+	uint8_t  vref_dq_r0nib6;
+	uint8_t  vref_dq_r0nib7;
+	uint8_t  vref_dq_r0nib8;
+	uint8_t  vref_dq_r0nib9;
+	uint8_t  vref_dq_r0nib10;
+	uint8_t  vref_dq_r0nib11;
+	uint8_t  vref_dq_r0nib12;
+	uint8_t  vref_dq_r0nib13;
+	uint8_t  vref_dq_r0nib14;
+	uint8_t  vref_dq_r0nib15;
+	uint8_t  vref_dq_r0nib16;
+	uint8_t  vref_dq_r0nib17;
+	uint8_t  vref_dq_r0nib18;
+	uint8_t  vref_dq_r0nib19;
+	uint8_t  vref_dq_r1nib0;
+	uint8_t  vref_dq_r1nib1;
+	uint8_t  vref_dq_r1nib2;
+	uint8_t  vref_dq_r1nib3;
+	uint8_t  vref_dq_r1nib4;
+	uint8_t  vref_dq_r1nib5;
+	uint8_t  vref_dq_r1nib6;
+	uint8_t  vref_dq_r1nib7;
+	uint8_t  vref_dq_r1nib8;
+	uint8_t  vref_dq_r1nib9;
+	uint8_t  vref_dq_r1nib10;
+	uint8_t  vref_dq_r1nib11;
+	uint8_t  vref_dq_r1nib12;
+	uint8_t  vref_dq_r1nib13;
+	uint8_t  vref_dq_r1nib14;
+	uint8_t  vref_dq_r1nib15;
+	uint8_t  vref_dq_r1nib16;
+	uint8_t  vref_dq_r1nib17;
+	uint8_t  vref_dq_r1nib18;
+	uint8_t  vref_dq_r1nib19;
+	uint8_t  vref_dq_r2nib0;
+	uint8_t  vref_dq_r2nib1;
+	uint8_t  vref_dq_r2nib2;
+	uint8_t  vref_dq_r2nib3;
+	uint8_t  vref_dq_r2nib4;
+	uint8_t  vref_dq_r2nib5;
+	uint8_t  vref_dq_r2nib6;
+	uint8_t  vref_dq_r2nib7;
+	uint8_t  vref_dq_r2nib8;
+	uint8_t  vref_dq_r2nib9;
+	uint8_t  vref_dq_r2nib10;
+	uint8_t  vref_dq_r2nib11;
+	uint8_t  vref_dq_r2nib12;
+	uint8_t  vref_dq_r2nib13;
+	uint8_t  vref_dq_r2nib14;
+	uint8_t  vref_dq_r2nib15;
+	uint8_t  vref_dq_r2nib16;
+	uint8_t  vref_dq_r2nib17;
+	uint8_t  vref_dq_r2nib18;
+	uint8_t  vref_dq_r2nib19;
+	uint8_t  vref_dq_r3nib0;
+	uint8_t  vref_dq_r3nib1;
+	uint8_t  vref_dq_r3nib2;
+	uint8_t  vref_dq_r3nib3;
+	uint8_t  vref_dq_r3nib4;
+	uint8_t  vref_dq_r3nib5;
+	uint8_t  vref_dq_r3nib6;
+	uint8_t  vref_dq_r3nib7;
+	uint8_t  vref_dq_r3nib8;
+	uint8_t  vref_dq_r3nib9;
+	uint8_t  vref_dq_r3nib10;
+	uint8_t  vref_dq_r3nib11;
+	uint8_t  vref_dq_r3nib12;
+	uint8_t  vref_dq_r3nib13;
+	uint8_t  vref_dq_r3nib14;
+	uint8_t  vref_dq_r3nib15;
+	uint8_t  vref_dq_r3nib16;
+	uint8_t  vref_dq_r3nib17;
+	uint8_t  vref_dq_r3nib18;
+	uint8_t  vref_dq_r3nib19;
+	uint8_t  f0rc00_d0;
+	uint8_t  f0rc01_d0;
+	uint8_t  f0rc02_d0;
+	uint8_t  f0rc03_d0;
+	uint8_t  f0rc04_d0;
+	uint8_t  f0rc05_d0;
+	uint8_t  f0rc06_d0;
+	uint8_t  f0rc07_d0;
+	uint8_t  f0rc08_d0;
+	uint8_t  f0rc09_d0;
+	uint8_t  f0rc0a_d0;
+	uint8_t  f0rc0b_d0;
+	uint8_t  f0rc0c_d0;
+	uint8_t  f0rc0d_d0;
+	uint8_t  f0rc0e_d0;
+	uint8_t  f0rc0f_d0;
+	uint8_t  f0rc1x_d0;
+	uint8_t  f0rc2x_d0;
+	uint8_t  f0rc3x_d0;
+	uint8_t  f0rc4x_d0;
+	uint8_t  f0rc5x_d0;
+	uint8_t  f0rc6x_d0;
+	uint8_t  f0rc7x_d0;
+	uint8_t  f0rc8x_d0;
+	uint8_t  f0rc9x_d0;
+	uint8_t  f0rcax_d0;
+	uint8_t  f0rcbx_d0;
+	uint8_t  f1rc00_d0;
+	uint8_t  f1rc01_d0;
+	uint8_t  f1rc02_d0;
+	uint8_t  f1rc03_d0;
+	uint8_t  f1rc04_d0;
+	uint8_t  f1rc05_d0;
+	uint8_t  f1rc06_d0;
+	uint8_t  f1rc07_d0;
+	uint8_t  f1rc08_d0;
+	uint8_t  f1rc09_d0;
+	uint8_t  f1rc0a_d0;
+	uint8_t  f1rc0b_d0;
+	uint8_t  f1rc0c_d0;
+	uint8_t  f1rc0d_d0;
+	uint8_t  f1rc0e_d0;
+	uint8_t  f1rc0f_d0;
+	uint8_t  f1rc1x_d0;
+	uint8_t  f1rc2x_d0;
+	uint8_t  f1rc3x_d0;
+	uint8_t  f1rc4x_d0;
+	uint8_t  f1rc5x_d0;
+	uint8_t  f1rc6x_d0;
+	uint8_t  f1rc7x_d0;
+	uint8_t  f1rc8x_d0;
+	uint8_t  f1rc9x_d0;
+	uint8_t  f1rcax_d0;
+	uint8_t  f1rcbx_d0;
+	uint8_t  f0rc00_d1;
+	uint8_t  f0rc01_d1;
+	uint8_t  f0rc02_d1;
+	uint8_t  f0rc03_d1;
+	uint8_t  f0rc04_d1;
+	uint8_t  f0rc05_d1;
+	uint8_t  f0rc06_d1;
+	uint8_t  f0rc07_d1;
+	uint8_t  f0rc08_d1;
+	uint8_t  f0rc09_d1;
+	uint8_t  f0rc0a_d1;
+	uint8_t  f0rc0b_d1;
+	uint8_t  f0rc0c_d1;
+	uint8_t  f0rc0d_d1;
+	uint8_t  f0rc0e_d1;
+	uint8_t  f0rc0f_d1;
+	uint8_t  f0rc1x_d1;
+	uint8_t  f0rc2x_d1;
+	uint8_t  f0rc3x_d1;
+	uint8_t  f0rc4x_d1;
+	uint8_t  f0rc5x_d1;
+	uint8_t  f0rc6x_d1;
+	uint8_t  f0rc7x_d1;
+	uint8_t  f0rc8x_d1;
+	uint8_t  f0rc9x_d1;
+	uint8_t  f0rcax_d1;
+	uint8_t  f0rcbx_d1;
+	uint8_t  f1rc00_d1;
+	uint8_t  f1rc01_d1;
+	uint8_t  f1rc02_d1;
+	uint8_t  f1rc03_d1;
+	uint8_t  f1rc04_d1;
+	uint8_t  f1rc05_d1;
+	uint8_t  f1rc06_d1;
+	uint8_t  f1rc07_d1;
+	uint8_t  f1rc08_d1;
+	uint8_t  f1rc09_d1;
+	uint8_t  f1rc0a_d1;
+	uint8_t  f1rc0b_d1;
+	uint8_t  f1rc0c_d1;
+	uint8_t  f1rc0d_d1;
+	uint8_t  f1rc0e_d1;
+	uint8_t  f1rc0f_d1;
+	uint8_t  f1rc1x_d1;
+	uint8_t  f1rc2x_d1;
+	uint8_t  f1rc3x_d1;
+	uint8_t  f1rc4x_d1;
+	uint8_t  f1rc5x_d1;
+	uint8_t  f1rc6x_d1;
+	uint8_t  f1rc7x_d1;
+	uint8_t  f1rc8x_d1;
+	uint8_t  f1rc9x_d1;
+	uint8_t  f1rcax_d1;
+	uint8_t  f1rcbx_d1;
+	uint8_t  reserved142[0x3f6 - 0x142];
+	uint16_t alt_cas_l;
+	uint8_t  alt_wcas_l;
+	uint8_t  d4misc;
+} __packed;
+
+struct ddr4lr1d {
+	uint8_t  reserved00;
+	uint8_t  msg_misc;
+	uint16_t pmu_revision;
+	uint8_t  pstate;
+	uint8_t  pll_bypass_en;
+	uint16_t dramfreq;
+	uint8_t  dfi_freq_ratio;
+	uint8_t  bpznres_val;
+	uint8_t  phy_odt_impedance;
+	uint8_t  phy_drv_impedance;
+	uint8_t  phy_vref;
+	uint8_t  dram_type;
+	uint8_t  disabled_dbyte;
+	uint8_t  enabled_dqs;
+	uint8_t  cs_present;
+	uint8_t  cs_present_d0;
+	uint8_t  cs_present_d1;
+	uint8_t  addr_mirror;
+	uint8_t  cs_test_fail;
+	uint8_t  phy_cfg;
+	uint16_t sequence_ctrl;
+	uint8_t  hdt_ctrl;
+	uint8_t  reserved19[0x22 - 0x19];
+	uint16_t phy_config_override;
+	uint8_t  dfimrlmargin;
+	int8_t   cdd_rr_3_2;
+	int8_t   cdd_rr_3_1;
+	int8_t   cdd_rr_3_0;
+	int8_t   cdd_rr_2_3;
+	int8_t   cdd_rr_2_1;
+	int8_t   cdd_rr_2_0;
+	int8_t   cdd_rr_1_3;
+	int8_t   cdd_rr_1_2;
+	int8_t   cdd_rr_1_0;
+	int8_t   cdd_rr_0_3;
+	int8_t   cdd_rr_0_2;
+	int8_t   cdd_rr_0_1;
+	int8_t   cdd_ww_3_2;
+	int8_t   cdd_ww_3_1;
+	int8_t   cdd_ww_3_0;
+	int8_t   cdd_ww_2_3;
+	int8_t   cdd_ww_2_1;
+	int8_t   cdd_ww_2_0;
+	int8_t   cdd_ww_1_3;
+	int8_t   cdd_ww_1_2;
+	int8_t   cdd_ww_1_0;
+	int8_t   cdd_ww_0_3;
+	int8_t   cdd_ww_0_2;
+	int8_t   cdd_ww_0_1;
+	int8_t   cdd_rw_3_3;
+	int8_t   cdd_rw_3_2;
+	int8_t   cdd_rw_3_1;
+	int8_t   cdd_rw_3_0;
+	int8_t   cdd_rw_2_3;
+	int8_t   cdd_rw_2_2;
+	int8_t   cdd_rw_2_1;
+	int8_t   cdd_rw_2_0;
+	int8_t   cdd_rw_1_3;
+	int8_t   cdd_rw_1_2;
+	int8_t   cdd_rw_1_1;
+	int8_t   cdd_rw_1_0;
+	int8_t   cdd_rw_0_3;
+	int8_t   cdd_rw_0_2;
+	int8_t   cdd_rw_0_1;
+	int8_t   cdd_rw_0_0;
+	int8_t   cdd_wr_3_3;
+	int8_t   cdd_wr_3_2;
+	int8_t   cdd_wr_3_1;
+	int8_t   cdd_wr_3_0;
+	int8_t   cdd_wr_2_3;
+	int8_t   cdd_wr_2_2;
+	int8_t   cdd_wr_2_1;
+	int8_t   cdd_wr_2_0;
+	int8_t   cdd_wr_1_3;
+	int8_t   cdd_wr_1_2;
+	int8_t   cdd_wr_1_1;
+	int8_t   cdd_wr_1_0;
+	int8_t   cdd_wr_0_3;
+	int8_t   cdd_wr_0_2;
+	int8_t   cdd_wr_0_1;
+	int8_t   cdd_wr_0_0;
+	uint8_t  reserved5d;
+	uint16_t mr0;
+	uint16_t mr1;
+	uint16_t mr2;
+	uint16_t mr3;
+	uint16_t mr4;
+	uint16_t mr5;
+	uint16_t mr6;
+	uint8_t  x16present;
+	uint8_t  cs_setup_gddec;
+	uint16_t rtt_nom_wr_park0;
+	uint16_t rtt_nom_wr_park1;
+	uint16_t rtt_nom_wr_park2;
+	uint16_t rtt_nom_wr_park3;
+	uint16_t rtt_nom_wr_park4;
+	uint16_t rtt_nom_wr_park5;
+	uint16_t rtt_nom_wr_park6;
+	uint16_t rtt_nom_wr_park7;
+	uint8_t  acsm_odt_ctrl0;
+	uint8_t  acsm_odt_ctrl1;
+	uint8_t  acsm_odt_ctrl2;
+	uint8_t  acsm_odt_ctrl3;
+	uint8_t  acsm_odt_ctrl4;
+	uint8_t  acsm_odt_ctrl5;
+	uint8_t  acsm_odt_ctrl6;
+	uint8_t  acsm_odt_ctrl7;
+	uint8_t  vref_dq_r0nib0;
+	uint8_t  vref_dq_r0nib1;
+	uint8_t  vref_dq_r0nib2;
+	uint8_t  vref_dq_r0nib3;
+	uint8_t  vref_dq_r0nib4;
+	uint8_t  vref_dq_r0nib5;
+	uint8_t  vref_dq_r0nib6;
+	uint8_t  vref_dq_r0nib7;
+	uint8_t  vref_dq_r0nib8;
+	uint8_t  vref_dq_r0nib9;
+	uint8_t  vref_dq_r0nib10;
+	uint8_t  vref_dq_r0nib11;
+	uint8_t  vref_dq_r0nib12;
+	uint8_t  vref_dq_r0nib13;
+	uint8_t  vref_dq_r0nib14;
+	uint8_t  vref_dq_r0nib15;
+	uint8_t  vref_dq_r0nib16;
+	uint8_t  vref_dq_r0nib17;
+	uint8_t  vref_dq_r0nib18;
+	uint8_t  vref_dq_r0nib19;
+	uint8_t  vref_dq_r1nib0;
+	uint8_t  vref_dq_r1nib1;
+	uint8_t  vref_dq_r1nib2;
+	uint8_t  vref_dq_r1nib3;
+	uint8_t  vref_dq_r1nib4;
+	uint8_t  vref_dq_r1nib5;
+	uint8_t  vref_dq_r1nib6;
+	uint8_t  vref_dq_r1nib7;
+	uint8_t  vref_dq_r1nib8;
+	uint8_t  vref_dq_r1nib9;
+	uint8_t  vref_dq_r1nib10;
+	uint8_t  vref_dq_r1nib11;
+	uint8_t  vref_dq_r1nib12;
+	uint8_t  vref_dq_r1nib13;
+	uint8_t  vref_dq_r1nib14;
+	uint8_t  vref_dq_r1nib15;
+	uint8_t  vref_dq_r1nib16;
+	uint8_t  vref_dq_r1nib17;
+	uint8_t  vref_dq_r1nib18;
+	uint8_t  vref_dq_r1nib19;
+	uint8_t  vref_dq_r2nib0;
+	uint8_t  vref_dq_r2nib1;
+	uint8_t  vref_dq_r2nib2;
+	uint8_t  vref_dq_r2nib3;
+	uint8_t  vref_dq_r2nib4;
+	uint8_t  vref_dq_r2nib5;
+	uint8_t  vref_dq_r2nib6;
+	uint8_t  vref_dq_r2nib7;
+	uint8_t  vref_dq_r2nib8;
+	uint8_t  vref_dq_r2nib9;
+	uint8_t  vref_dq_r2nib10;
+	uint8_t  vref_dq_r2nib11;
+	uint8_t  vref_dq_r2nib12;
+	uint8_t  vref_dq_r2nib13;
+	uint8_t  vref_dq_r2nib14;
+	uint8_t  vref_dq_r2nib15;
+	uint8_t  vref_dq_r2nib16;
+	uint8_t  vref_dq_r2nib17;
+	uint8_t  vref_dq_r2nib18;
+	uint8_t  vref_dq_r2nib19;
+	uint8_t  vref_dq_r3nib0;
+	uint8_t  vref_dq_r3nib1;
+	uint8_t  vref_dq_r3nib2;
+	uint8_t  vref_dq_r3nib3;
+	uint8_t  vref_dq_r3nib4;
+	uint8_t  vref_dq_r3nib5;
+	uint8_t  vref_dq_r3nib6;
+	uint8_t  vref_dq_r3nib7;
+	uint8_t  vref_dq_r3nib8;
+	uint8_t  vref_dq_r3nib9;
+	uint8_t  vref_dq_r3nib10;
+	uint8_t  vref_dq_r3nib11;
+	uint8_t  vref_dq_r3nib12;
+	uint8_t  vref_dq_r3nib13;
+	uint8_t  vref_dq_r3nib14;
+	uint8_t  vref_dq_r3nib15;
+	uint8_t  vref_dq_r3nib16;
+	uint8_t  vref_dq_r3nib17;
+	uint8_t  vref_dq_r3nib18;
+	uint8_t  vref_dq_r3nib19;
+	uint8_t  f0rc00_d0;
+	uint8_t  f0rc01_d0;
+	uint8_t  f0rc02_d0;
+	uint8_t  f0rc03_d0;
+	uint8_t  f0rc04_d0;
+	uint8_t  f0rc05_d0;
+	uint8_t  f0rc06_d0;
+	uint8_t  f0rc07_d0;
+	uint8_t  f0rc08_d0;
+	uint8_t  f0rc09_d0;
+	uint8_t  f0rc0a_d0;
+	uint8_t  f0rc0b_d0;
+	uint8_t  f0rc0c_d0;
+	uint8_t  f0rc0d_d0;
+	uint8_t  f0rc0e_d0;
+	uint8_t  f0rc0f_d0;
+	uint8_t  f0rc1x_d0;
+	uint8_t  f0rc2x_d0;
+	uint8_t  f0rc3x_d0;
+	uint8_t  f0rc4x_d0;
+	uint8_t  f0rc5x_d0;
+	uint8_t  f0rc6x_d0;
+	uint8_t  f0rc7x_d0;
+	uint8_t  f0rc8x_d0;
+	uint8_t  f0rc9x_d0;
+	uint8_t  f0rcax_d0;
+	uint8_t  f0rcbx_d0;
+	uint8_t  f1rc00_d0;
+	uint8_t  f1rc01_d0;
+	uint8_t  f1rc02_d0;
+	uint8_t  f1rc03_d0;
+	uint8_t  f1rc04_d0;
+	uint8_t  f1rc05_d0;
+	uint8_t  f1rc06_d0;
+	uint8_t  f1rc07_d0;
+	uint8_t  f1rc08_d0;
+	uint8_t  f1rc09_d0;
+	uint8_t  f1rc0a_d0;
+	uint8_t  f1rc0b_d0;
+	uint8_t  f1rc0c_d0;
+	uint8_t  f1rc0d_d0;
+	uint8_t  f1rc0e_d0;
+	uint8_t  f1rc0f_d0;
+	uint8_t  f1rc1x_d0;
+	uint8_t  f1rc2x_d0;
+	uint8_t  f1rc3x_d0;
+	uint8_t  f1rc4x_d0;
+	uint8_t  f1rc5x_d0;
+	uint8_t  f1rc6x_d0;
+	uint8_t  f1rc7x_d0;
+	uint8_t  f1rc8x_d0;
+	uint8_t  f1rc9x_d0;
+	uint8_t  f1rcax_d0;
+	uint8_t  f1rcbx_d0;
+	uint8_t  f0rc00_d1;
+	uint8_t  f0rc01_d1;
+	uint8_t  f0rc02_d1;
+	uint8_t  f0rc03_d1;
+	uint8_t  f0rc04_d1;
+	uint8_t  f0rc05_d1;
+	uint8_t  f0rc06_d1;
+	uint8_t  f0rc07_d1;
+	uint8_t  f0rc08_d1;
+	uint8_t  f0rc09_d1;
+	uint8_t  f0rc0a_d1;
+	uint8_t  f0rc0b_d1;
+	uint8_t  f0rc0c_d1;
+	uint8_t  f0rc0d_d1;
+	uint8_t  f0rc0e_d1;
+	uint8_t  f0rc0f_d1;
+	uint8_t  f0rc1x_d1;
+	uint8_t  f0rc2x_d1;
+	uint8_t  f0rc3x_d1;
+	uint8_t  f0rc4x_d1;
+	uint8_t  f0rc5x_d1;
+	uint8_t  f0rc6x_d1;
+	uint8_t  f0rc7x_d1;
+	uint8_t  f0rc8x_d1;
+	uint8_t  f0rc9x_d1;
+	uint8_t  f0rcax_d1;
+	uint8_t  f0rcbx_d1;
+	uint8_t  f1rc00_d1;
+	uint8_t  f1rc01_d1;
+	uint8_t  f1rc02_d1;
+	uint8_t  f1rc03_d1;
+	uint8_t  f1rc04_d1;
+	uint8_t  f1rc05_d1;
+	uint8_t  f1rc06_d1;
+	uint8_t  f1rc07_d1;
+	uint8_t  f1rc08_d1;
+	uint8_t  f1rc09_d1;
+	uint8_t  f1rc0a_d1;
+	uint8_t  f1rc0b_d1;
+	uint8_t  f1rc0c_d1;
+	uint8_t  f1rc0d_d1;
+	uint8_t  f1rc0e_d1;
+	uint8_t  f1rc0f_d1;
+	uint8_t  f1rc1x_d1;
+	uint8_t  f1rc2x_d1;
+	uint8_t  f1rc3x_d1;
+	uint8_t  f1rc4x_d1;
+	uint8_t  f1rc5x_d1;
+	uint8_t  f1rc6x_d1;
+	uint8_t  f1rc7x_d1;
+	uint8_t  f1rc8x_d1;
+	uint8_t  f1rc9x_d1;
+	uint8_t  f1rcax_d1;
+	uint8_t  f1rcbx_d1;
+	uint8_t  bc00_d0;
+	uint8_t  bc01_d0;
+	uint8_t  bc02_d0;
+	uint8_t  bc03_d0;
+	uint8_t  bc04_d0;
+	uint8_t  bc05_d0;
+	uint8_t  bc06_d0;
+	uint8_t  bc07_d0;
+	uint8_t  bc08_d0;
+	uint8_t  bc09_d0;
+	uint8_t  bc0a_d0;
+	uint8_t  bc0b_d0;
+	uint8_t  bc0c_d0;
+	uint8_t  bc0d_d0;
+	uint8_t  bc0e_d0;
+	uint8_t  f0bc6x_d0;
+	uint8_t  f0bccx_d0;
+	uint8_t  f0bcdx_d0;
+	uint8_t  f0bcex_d0;
+	uint8_t  f0bcfx_d0;
+	uint8_t  f1bccx_d0;
+	uint8_t  f1bcdx_d0;
+	uint8_t  f1bcex_d0;
+	uint8_t  f1bcfx_d0;
+	uint8_t  f0bc2x_b0_d0;
+	uint8_t  f0bc3x_b0_d0;
+	uint8_t  f0bc4x_b0_d0;
+	uint8_t  f0bc5x_b0_d0;
+	uint8_t  f0bc8x_b0_d0;
+	uint8_t  f0bc9x_b0_d0;
+	uint8_t  f0bcax_b0_d0;
+	uint8_t  f0bcbx_b0_d0;
+	uint8_t  f1bc2x_b0_d0;
+	uint8_t  f1bc3x_b0_d0;
+	uint8_t  f1bc4x_b0_d0;
+	uint8_t  f1bc5x_b0_d0;
+	uint8_t  f1bc8x_b0_d0;
+	uint8_t  f1bc9x_b0_d0;
+	uint8_t  f1bcax_b0_d0;
+	uint8_t  f1bcbx_b0_d0;
+	uint8_t  f2bc2x_b0_d0;
+	uint8_t  f2bc3x_b0_d0;
+	uint8_t  f2bc4x_b0_d0;
+	uint8_t  f2bc5x_b0_d0;
+	uint8_t  f2bc8x_b0_d0;
+	uint8_t  f2bc9x_b0_d0;
+	uint8_t  f2bcax_b0_d0;
+	uint8_t  f2bcbx_b0_d0;
+	uint8_t  f3bc2x_b0_d0;
+	uint8_t  f3bc3x_b0_d0;
+	uint8_t  f3bc4x_b0_d0;
+	uint8_t  f3bc5x_b0_d0;
+	uint8_t  f3bc8x_b0_d0;
+	uint8_t  f3bc9x_b0_d0;
+	uint8_t  f3bcax_b0_d0;
+	uint8_t  f3bcbx_b0_d0;
+	uint8_t  f0bc2x_b1_d0;
+	uint8_t  f0bc3x_b1_d0;
+	uint8_t  f0bc4x_b1_d0;
+	uint8_t  f0bc5x_b1_d0;
+	uint8_t  f0bc8x_b1_d0;
+	uint8_t  f0bc9x_b1_d0;
+	uint8_t  f0bcax_b1_d0;
+	uint8_t  f0bcbx_b1_d0;
+	uint8_t  f1bc2x_b1_d0;
+	uint8_t  f1bc3x_b1_d0;
+	uint8_t  f1bc4x_b1_d0;
+	uint8_t  f1bc5x_b1_d0;
+	uint8_t  f1bc8x_b1_d0;
+	uint8_t  f1bc9x_b1_d0;
+	uint8_t  f1bcax_b1_d0;
+	uint8_t  f1bcbx_b1_d0;
+	uint8_t  f2bc2x_b1_d0;
+	uint8_t  f2bc3x_b1_d0;
+	uint8_t  f2bc4x_b1_d0;
+	uint8_t  f2bc5x_b1_d0;
+	uint8_t  f2bc8x_b1_d0;
+	uint8_t  f2bc9x_b1_d0;
+	uint8_t  f2bcax_b1_d0;
+	uint8_t  f2bcbx_b1_d0;
+	uint8_t  f3bc2x_b1_d0;
+	uint8_t  f3bc3x_b1_d0;
+	uint8_t  f3bc4x_b1_d0;
+	uint8_t  f3bc5x_b1_d0;
+	uint8_t  f3bc8x_b1_d0;
+	uint8_t  f3bc9x_b1_d0;
+	uint8_t  f3bcax_b1_d0;
+	uint8_t  f3bcbx_b1_d0;
+	uint8_t  f0bc2x_b2_d0;
+	uint8_t  f0bc3x_b2_d0;
+	uint8_t  f0bc4x_b2_d0;
+	uint8_t  f0bc5x_b2_d0;
+	uint8_t  f0bc8x_b2_d0;
+	uint8_t  f0bc9x_b2_d0;
+	uint8_t  f0bcax_b2_d0;
+	uint8_t  f0bcbx_b2_d0;
+	uint8_t  f1bc2x_b2_d0;
+	uint8_t  f1bc3x_b2_d0;
+	uint8_t  f1bc4x_b2_d0;
+	uint8_t  f1bc5x_b2_d0;
+	uint8_t  f1bc8x_b2_d0;
+	uint8_t  f1bc9x_b2_d0;
+	uint8_t  f1bcax_b2_d0;
+	uint8_t  f1bcbx_b2_d0;
+	uint8_t  f2bc2x_b2_d0;
+	uint8_t  f2bc3x_b2_d0;
+	uint8_t  f2bc4x_b2_d0;
+	uint8_t  f2bc5x_b2_d0;
+	uint8_t  f2bc8x_b2_d0;
+	uint8_t  f2bc9x_b2_d0;
+	uint8_t  f2bcax_b2_d0;
+	uint8_t  f2bcbx_b2_d0;
+	uint8_t  f3bc2x_b2_d0;
+	uint8_t  f3bc3x_b2_d0;
+	uint8_t  f3bc4x_b2_d0;
+	uint8_t  f3bc5x_b2_d0;
+	uint8_t  f3bc8x_b2_d0;
+	uint8_t  f3bc9x_b2_d0;
+	uint8_t  f3bcax_b2_d0;
+	uint8_t  f3bcbx_b2_d0;
+	uint8_t  f0bc2x_b3_d0;
+	uint8_t  f0bc3x_b3_d0;
+	uint8_t  f0bc4x_b3_d0;
+	uint8_t  f0bc5x_b3_d0;
+	uint8_t  f0bc8x_b3_d0;
+	uint8_t  f0bc9x_b3_d0;
+	uint8_t  f0bcax_b3_d0;
+	uint8_t  f0bcbx_b3_d0;
+	uint8_t  f1bc2x_b3_d0;
+	uint8_t  f1bc3x_b3_d0;
+	uint8_t  f1bc4x_b3_d0;
+	uint8_t  f1bc5x_b3_d0;
+	uint8_t  f1bc8x_b3_d0;
+	uint8_t  f1bc9x_b3_d0;
+	uint8_t  f1bcax_b3_d0;
+	uint8_t  f1bcbx_b3_d0;
+	uint8_t  f2bc2x_b3_d0;
+	uint8_t  f2bc3x_b3_d0;
+	uint8_t  f2bc4x_b3_d0;
+	uint8_t  f2bc5x_b3_d0;
+	uint8_t  f2bc8x_b3_d0;
+	uint8_t  f2bc9x_b3_d0;
+	uint8_t  f2bcax_b3_d0;
+	uint8_t  f2bcbx_b3_d0;
+	uint8_t  f3bc2x_b3_d0;
+	uint8_t  f3bc3x_b3_d0;
+	uint8_t  f3bc4x_b3_d0;
+	uint8_t  f3bc5x_b3_d0;
+	uint8_t  f3bc8x_b3_d0;
+	uint8_t  f3bc9x_b3_d0;
+	uint8_t  f3bcax_b3_d0;
+	uint8_t  f3bcbx_b3_d0;
+	uint8_t  f0bc2x_b4_d0;
+	uint8_t  f0bc3x_b4_d0;
+	uint8_t  f0bc4x_b4_d0;
+	uint8_t  f0bc5x_b4_d0;
+	uint8_t  f0bc8x_b4_d0;
+	uint8_t  f0bc9x_b4_d0;
+	uint8_t  f0bcax_b4_d0;
+	uint8_t  f0bcbx_b4_d0;
+	uint8_t  f1bc2x_b4_d0;
+	uint8_t  f1bc3x_b4_d0;
+	uint8_t  f1bc4x_b4_d0;
+	uint8_t  f1bc5x_b4_d0;
+	uint8_t  f1bc8x_b4_d0;
+	uint8_t  f1bc9x_b4_d0;
+	uint8_t  f1bcax_b4_d0;
+	uint8_t  f1bcbx_b4_d0;
+	uint8_t  f2bc2x_b4_d0;
+	uint8_t  f2bc3x_b4_d0;
+	uint8_t  f2bc4x_b4_d0;
+	uint8_t  f2bc5x_b4_d0;
+	uint8_t  f2bc8x_b4_d0;
+	uint8_t  f2bc9x_b4_d0;
+	uint8_t  f2bcax_b4_d0;
+	uint8_t  f2bcbx_b4_d0;
+	uint8_t  f3bc2x_b4_d0;
+	uint8_t  f3bc3x_b4_d0;
+	uint8_t  f3bc4x_b4_d0;
+	uint8_t  f3bc5x_b4_d0;
+	uint8_t  f3bc8x_b4_d0;
+	uint8_t  f3bc9x_b4_d0;
+	uint8_t  f3bcax_b4_d0;
+	uint8_t  f3bcbx_b4_d0;
+	uint8_t  f0bc2x_b5_d0;
+	uint8_t  f0bc3x_b5_d0;
+	uint8_t  f0bc4x_b5_d0;
+	uint8_t  f0bc5x_b5_d0;
+	uint8_t  f0bc8x_b5_d0;
+	uint8_t  f0bc9x_b5_d0;
+	uint8_t  f0bcax_b5_d0;
+	uint8_t  f0bcbx_b5_d0;
+	uint8_t  f1bc2x_b5_d0;
+	uint8_t  f1bc3x_b5_d0;
+	uint8_t  f1bc4x_b5_d0;
+	uint8_t  f1bc5x_b5_d0;
+	uint8_t  f1bc8x_b5_d0;
+	uint8_t  f1bc9x_b5_d0;
+	uint8_t  f1bcax_b5_d0;
+	uint8_t  f1bcbx_b5_d0;
+	uint8_t  f2bc2x_b5_d0;
+	uint8_t  f2bc3x_b5_d0;
+	uint8_t  f2bc4x_b5_d0;
+	uint8_t  f2bc5x_b5_d0;
+	uint8_t  f2bc8x_b5_d0;
+	uint8_t  f2bc9x_b5_d0;
+	uint8_t  f2bcax_b5_d0;
+	uint8_t  f2bcbx_b5_d0;
+	uint8_t  f3bc2x_b5_d0;
+	uint8_t  f3bc3x_b5_d0;
+	uint8_t  f3bc4x_b5_d0;
+	uint8_t  f3bc5x_b5_d0;
+	uint8_t  f3bc8x_b5_d0;
+	uint8_t  f3bc9x_b5_d0;
+	uint8_t  f3bcax_b5_d0;
+	uint8_t  f3bcbx_b5_d0;
+	uint8_t  f0bc2x_b6_d0;
+	uint8_t  f0bc3x_b6_d0;
+	uint8_t  f0bc4x_b6_d0;
+	uint8_t  f0bc5x_b6_d0;
+	uint8_t  f0bc8x_b6_d0;
+	uint8_t  f0bc9x_b6_d0;
+	uint8_t  f0bcax_b6_d0;
+	uint8_t  f0bcbx_b6_d0;
+	uint8_t  f1bc2x_b6_d0;
+	uint8_t  f1bc3x_b6_d0;
+	uint8_t  f1bc4x_b6_d0;
+	uint8_t  f1bc5x_b6_d0;
+	uint8_t  f1bc8x_b6_d0;
+	uint8_t  f1bc9x_b6_d0;
+	uint8_t  f1bcax_b6_d0;
+	uint8_t  f1bcbx_b6_d0;
+	uint8_t  f2bc2x_b6_d0;
+	uint8_t  f2bc3x_b6_d0;
+	uint8_t  f2bc4x_b6_d0;
+	uint8_t  f2bc5x_b6_d0;
+	uint8_t  f2bc8x_b6_d0;
+	uint8_t  f2bc9x_b6_d0;
+	uint8_t  f2bcax_b6_d0;
+	uint8_t  f2bcbx_b6_d0;
+	uint8_t  f3bc2x_b6_d0;
+	uint8_t  f3bc3x_b6_d0;
+	uint8_t  f3bc4x_b6_d0;
+	uint8_t  f3bc5x_b6_d0;
+	uint8_t  f3bc8x_b6_d0;
+	uint8_t  f3bc9x_b6_d0;
+	uint8_t  f3bcax_b6_d0;
+	uint8_t  f3bcbx_b6_d0;
+	uint8_t  f0bc2x_b7_d0;
+	uint8_t  f0bc3x_b7_d0;
+	uint8_t  f0bc4x_b7_d0;
+	uint8_t  f0bc5x_b7_d0;
+	uint8_t  f0bc8x_b7_d0;
+	uint8_t  f0bc9x_b7_d0;
+	uint8_t  f0bcax_b7_d0;
+	uint8_t  f0bcbx_b7_d0;
+	uint8_t  f1bc2x_b7_d0;
+	uint8_t  f1bc3x_b7_d0;
+	uint8_t  f1bc4x_b7_d0;
+	uint8_t  f1bc5x_b7_d0;
+	uint8_t  f1bc8x_b7_d0;
+	uint8_t  f1bc9x_b7_d0;
+	uint8_t  f1bcax_b7_d0;
+	uint8_t  f1bcbx_b7_d0;
+	uint8_t  f2bc2x_b7_d0;
+	uint8_t  f2bc3x_b7_d0;
+	uint8_t  f2bc4x_b7_d0;
+	uint8_t  f2bc5x_b7_d0;
+	uint8_t  f2bc8x_b7_d0;
+	uint8_t  f2bc9x_b7_d0;
+	uint8_t  f2bcax_b7_d0;
+	uint8_t  f2bcbx_b7_d0;
+	uint8_t  f3bc2x_b7_d0;
+	uint8_t  f3bc3x_b7_d0;
+	uint8_t  f3bc4x_b7_d0;
+	uint8_t  f3bc5x_b7_d0;
+	uint8_t  f3bc8x_b7_d0;
+	uint8_t  f3bc9x_b7_d0;
+	uint8_t  f3bcax_b7_d0;
+	uint8_t  f3bcbx_b7_d0;
+	uint8_t  f0bc2x_b8_d0;
+	uint8_t  f0bc3x_b8_d0;
+	uint8_t  f0bc4x_b8_d0;
+	uint8_t  f0bc5x_b8_d0;
+	uint8_t  f0bc8x_b8_d0;
+	uint8_t  f0bc9x_b8_d0;
+	uint8_t  f0bcax_b8_d0;
+	uint8_t  f0bcbx_b8_d0;
+	uint8_t  f1bc2x_b8_d0;
+	uint8_t  f1bc3x_b8_d0;
+	uint8_t  f1bc4x_b8_d0;
+	uint8_t  f1bc5x_b8_d0;
+	uint8_t  f1bc8x_b8_d0;
+	uint8_t  f1bc9x_b8_d0;
+	uint8_t  f1bcax_b8_d0;
+	uint8_t  f1bcbx_b8_d0;
+	uint8_t  f2bc2x_b8_d0;
+	uint8_t  f2bc3x_b8_d0;
+	uint8_t  f2bc4x_b8_d0;
+	uint8_t  f2bc5x_b8_d0;
+	uint8_t  f2bc8x_b8_d0;
+	uint8_t  f2bc9x_b8_d0;
+	uint8_t  f2bcax_b8_d0;
+	uint8_t  f2bcbx_b8_d0;
+	uint8_t  f3bc2x_b8_d0;
+	uint8_t  f3bc3x_b8_d0;
+	uint8_t  f3bc4x_b8_d0;
+	uint8_t  f3bc5x_b8_d0;
+	uint8_t  f3bc8x_b8_d0;
+	uint8_t  f3bc9x_b8_d0;
+	uint8_t  f3bcax_b8_d0;
+	uint8_t  f3bcbx_b8_d0;
+	uint8_t  f5bc5x_d0;
+	uint8_t  f5bc6x_d0;
+	uint8_t  f4bc8x_d0;
+	uint8_t  f4bc9x_d0;
+	uint8_t  f4bcax_d0;
+	uint8_t  f4bcbx_d0;
+	uint8_t  f4bccx_d0;
+	uint8_t  f4bcdx_d0;
+	uint8_t  f4bcex_d0;
+	uint8_t  f4bcfx_d0;
+	uint8_t  f5bc8x_d0;
+	uint8_t  f5bc9x_d0;
+	uint8_t  f5bcax_d0;
+	uint8_t  f5bcbx_d0;
+	uint8_t  f5bccx_d0;
+	uint8_t  f5bcdx_d0;
+	uint8_t  f5bcex_d0;
+	uint8_t  f5bcfx_d0;
+	uint8_t  f6bc8x_d0;
+	uint8_t  f6bc9x_d0;
+	uint8_t  f6bcax_d0;
+	uint8_t  f6bcbx_d0;
+	uint8_t  f6bccx_d0;
+	uint8_t  f6bcdx_d0;
+	uint8_t  f6bcex_d0;
+	uint8_t  f6bcfx_d0;
+	uint8_t  f7bc8x_d0;
+	uint8_t  f7bc9x_d0;
+	uint8_t  f7bcax_d0;
+	uint8_t  f7bcbx_d0;
+	uint8_t  f7bccx_d0;
+	uint8_t  f7bcdx_d0;
+	uint8_t  f7bcex_d0;
+	uint8_t  f7bcfx_d0;
+	uint8_t  bc00_d1;
+	uint8_t  bc01_d1;
+	uint8_t  bc02_d1;
+	uint8_t  bc03_d1;
+	uint8_t  bc04_d1;
+	uint8_t  bc05_d1;
+	uint8_t  bc06_d1;
+	uint8_t  bc07_d1;
+	uint8_t  bc08_d1;
+	uint8_t  bc09_d1;
+	uint8_t  bc0a_d1;
+	uint8_t  bc0b_d1;
+	uint8_t  bc0c_d1;
+	uint8_t  bc0d_d1;
+	uint8_t  bc0e_d1;
+	uint8_t  f0bc6x_d1;
+	uint8_t  f0bccx_d1;
+	uint8_t  f0bcdx_d1;
+	uint8_t  f0bcex_d1;
+	uint8_t  f0bcfx_d1;
+	uint8_t  f1bccx_d1;
+	uint8_t  f1bcdx_d1;
+	uint8_t  f1bcex_d1;
+	uint8_t  f1bcfx_d1;
+	uint8_t  f0bc2x_b0_d1;
+	uint8_t  f0bc3x_b0_d1;
+	uint8_t  f0bc4x_b0_d1;
+	uint8_t  f0bc5x_b0_d1;
+	uint8_t  f0bc8x_b0_d1;
+	uint8_t  f0bc9x_b0_d1;
+	uint8_t  f0bcax_b0_d1;
+	uint8_t  f0bcbx_b0_d1;
+	uint8_t  f1bc2x_b0_d1;
+	uint8_t  f1bc3x_b0_d1;
+	uint8_t  f1bc4x_b0_d1;
+	uint8_t  f1bc5x_b0_d1;
+	uint8_t  f1bc8x_b0_d1;
+	uint8_t  f1bc9x_b0_d1;
+	uint8_t  f1bcax_b0_d1;
+	uint8_t  f1bcbx_b0_d1;
+	uint8_t  f2bc2x_b0_d1;
+	uint8_t  f2bc3x_b0_d1;
+	uint8_t  f2bc4x_b0_d1;
+	uint8_t  f2bc5x_b0_d1;
+	uint8_t  f2bc8x_b0_d1;
+	uint8_t  f2bc9x_b0_d1;
+	uint8_t  f2bcax_b0_d1;
+	uint8_t  f2bcbx_b0_d1;
+	uint8_t  f3bc2x_b0_d1;
+	uint8_t  f3bc3x_b0_d1;
+	uint8_t  f3bc4x_b0_d1;
+	uint8_t  f3bc5x_b0_d1;
+	uint8_t  f3bc8x_b0_d1;
+	uint8_t  f3bc9x_b0_d1;
+	uint8_t  f3bcax_b0_d1;
+	uint8_t  f3bcbx_b0_d1;
+	uint8_t  f0bc2x_b1_d1;
+	uint8_t  f0bc3x_b1_d1;
+	uint8_t  f0bc4x_b1_d1;
+	uint8_t  f0bc5x_b1_d1;
+	uint8_t  f0bc8x_b1_d1;
+	uint8_t  f0bc9x_b1_d1;
+	uint8_t  f0bcax_b1_d1;
+	uint8_t  f0bcbx_b1_d1;
+	uint8_t  f1bc2x_b1_d1;
+	uint8_t  f1bc3x_b1_d1;
+	uint8_t  f1bc4x_b1_d1;
+	uint8_t  f1bc5x_b1_d1;
+	uint8_t  f1bc8x_b1_d1;
+	uint8_t  f1bc9x_b1_d1;
+	uint8_t  f1bcax_b1_d1;
+	uint8_t  f1bcbx_b1_d1;
+	uint8_t  f2bc2x_b1_d1;
+	uint8_t  f2bc3x_b1_d1;
+	uint8_t  f2bc4x_b1_d1;
+	uint8_t  f2bc5x_b1_d1;
+	uint8_t  f2bc8x_b1_d1;
+	uint8_t  f2bc9x_b1_d1;
+	uint8_t  f2bcax_b1_d1;
+	uint8_t  f2bcbx_b1_d1;
+	uint8_t  f3bc2x_b1_d1;
+	uint8_t  f3bc3x_b1_d1;
+	uint8_t  f3bc4x_b1_d1;
+	uint8_t  f3bc5x_b1_d1;
+	uint8_t  f3bc8x_b1_d1;
+	uint8_t  f3bc9x_b1_d1;
+	uint8_t  f3bcax_b1_d1;
+	uint8_t  f3bcbx_b1_d1;
+	uint8_t  f0bc2x_b2_d1;
+	uint8_t  f0bc3x_b2_d1;
+	uint8_t  f0bc4x_b2_d1;
+	uint8_t  f0bc5x_b2_d1;
+	uint8_t  f0bc8x_b2_d1;
+	uint8_t  f0bc9x_b2_d1;
+	uint8_t  f0bcax_b2_d1;
+	uint8_t  f0bcbx_b2_d1;
+	uint8_t  f1bc2x_b2_d1;
+	uint8_t  f1bc3x_b2_d1;
+	uint8_t  f1bc4x_b2_d1;
+	uint8_t  f1bc5x_b2_d1;
+	uint8_t  f1bc8x_b2_d1;
+	uint8_t  f1bc9x_b2_d1;
+	uint8_t  f1bcax_b2_d1;
+	uint8_t  f1bcbx_b2_d1;
+	uint8_t  f2bc2x_b2_d1;
+	uint8_t  f2bc3x_b2_d1;
+	uint8_t  f2bc4x_b2_d1;
+	uint8_t  f2bc5x_b2_d1;
+	uint8_t  f2bc8x_b2_d1;
+	uint8_t  f2bc9x_b2_d1;
+	uint8_t  f2bcax_b2_d1;
+	uint8_t  f2bcbx_b2_d1;
+	uint8_t  f3bc2x_b2_d1;
+	uint8_t  f3bc3x_b2_d1;
+	uint8_t  f3bc4x_b2_d1;
+	uint8_t  f3bc5x_b2_d1;
+	uint8_t  f3bc8x_b2_d1;
+	uint8_t  f3bc9x_b2_d1;
+	uint8_t  f3bcax_b2_d1;
+	uint8_t  f3bcbx_b2_d1;
+	uint8_t  f0bc2x_b3_d1;
+	uint8_t  f0bc3x_b3_d1;
+	uint8_t  f0bc4x_b3_d1;
+	uint8_t  f0bc5x_b3_d1;
+	uint8_t  f0bc8x_b3_d1;
+	uint8_t  f0bc9x_b3_d1;
+	uint8_t  f0bcax_b3_d1;
+	uint8_t  f0bcbx_b3_d1;
+	uint8_t  f1bc2x_b3_d1;
+	uint8_t  f1bc3x_b3_d1;
+	uint8_t  f1bc4x_b3_d1;
+	uint8_t  f1bc5x_b3_d1;
+	uint8_t  f1bc8x_b3_d1;
+	uint8_t  f1bc9x_b3_d1;
+	uint8_t  f1bcax_b3_d1;
+	uint8_t  f1bcbx_b3_d1;
+	uint8_t  f2bc2x_b3_d1;
+	uint8_t  f2bc3x_b3_d1;
+	uint8_t  f2bc4x_b3_d1;
+	uint8_t  f2bc5x_b3_d1;
+	uint8_t  f2bc8x_b3_d1;
+	uint8_t  f2bc9x_b3_d1;
+	uint8_t  f2bcax_b3_d1;
+	uint8_t  f2bcbx_b3_d1;
+	uint8_t  f3bc2x_b3_d1;
+	uint8_t  f3bc3x_b3_d1;
+	uint8_t  f3bc4x_b3_d1;
+	uint8_t  f3bc5x_b3_d1;
+	uint8_t  f3bc8x_b3_d1;
+	uint8_t  f3bc9x_b3_d1;
+	uint8_t  f3bcax_b3_d1;
+	uint8_t  f3bcbx_b3_d1;
+	uint8_t  f0bc2x_b4_d1;
+	uint8_t  f0bc3x_b4_d1;
+	uint8_t  f0bc4x_b4_d1;
+	uint8_t  f0bc5x_b4_d1;
+	uint8_t  f0bc8x_b4_d1;
+	uint8_t  f0bc9x_b4_d1;
+	uint8_t  f0bcax_b4_d1;
+	uint8_t  f0bcbx_b4_d1;
+	uint8_t  f1bc2x_b4_d1;
+	uint8_t  f1bc3x_b4_d1;
+	uint8_t  f1bc4x_b4_d1;
+	uint8_t  f1bc5x_b4_d1;
+	uint8_t  f1bc8x_b4_d1;
+	uint8_t  f1bc9x_b4_d1;
+	uint8_t  f1bcax_b4_d1;
+	uint8_t  f1bcbx_b4_d1;
+	uint8_t  f2bc2x_b4_d1;
+	uint8_t  f2bc3x_b4_d1;
+	uint8_t  f2bc4x_b4_d1;
+	uint8_t  f2bc5x_b4_d1;
+	uint8_t  f2bc8x_b4_d1;
+	uint8_t  f2bc9x_b4_d1;
+	uint8_t  f2bcax_b4_d1;
+	uint8_t  f2bcbx_b4_d1;
+	uint8_t  f3bc2x_b4_d1;
+	uint8_t  f3bc3x_b4_d1;
+	uint8_t  f3bc4x_b4_d1;
+	uint8_t  f3bc5x_b4_d1;
+	uint8_t  f3bc8x_b4_d1;
+	uint8_t  f3bc9x_b4_d1;
+	uint8_t  f3bcax_b4_d1;
+	uint8_t  f3bcbx_b4_d1;
+	uint8_t  f0bc2x_b5_d1;
+	uint8_t  f0bc3x_b5_d1;
+	uint8_t  f0bc4x_b5_d1;
+	uint8_t  f0bc5x_b5_d1;
+	uint8_t  f0bc8x_b5_d1;
+	uint8_t  f0bc9x_b5_d1;
+	uint8_t  f0bcax_b5_d1;
+	uint8_t  f0bcbx_b5_d1;
+	uint8_t  f1bc2x_b5_d1;
+	uint8_t  f1bc3x_b5_d1;
+	uint8_t  f1bc4x_b5_d1;
+	uint8_t  f1bc5x_b5_d1;
+	uint8_t  f1bc8x_b5_d1;
+	uint8_t  f1bc9x_b5_d1;
+	uint8_t  f1bcax_b5_d1;
+	uint8_t  f1bcbx_b5_d1;
+	uint8_t  f2bc2x_b5_d1;
+	uint8_t  f2bc3x_b5_d1;
+	uint8_t  f2bc4x_b5_d1;
+	uint8_t  f2bc5x_b5_d1;
+	uint8_t  f2bc8x_b5_d1;
+	uint8_t  f2bc9x_b5_d1;
+	uint8_t  f2bcax_b5_d1;
+	uint8_t  f2bcbx_b5_d1;
+	uint8_t  f3bc2x_b5_d1;
+	uint8_t  f3bc3x_b5_d1;
+	uint8_t  f3bc4x_b5_d1;
+	uint8_t  f3bc5x_b5_d1;
+	uint8_t  f3bc8x_b5_d1;
+	uint8_t  f3bc9x_b5_d1;
+	uint8_t  f3bcax_b5_d1;
+	uint8_t  f3bcbx_b5_d1;
+	uint8_t  f0bc2x_b6_d1;
+	uint8_t  f0bc3x_b6_d1;
+	uint8_t  f0bc4x_b6_d1;
+	uint8_t  f0bc5x_b6_d1;
+	uint8_t  f0bc8x_b6_d1;
+	uint8_t  f0bc9x_b6_d1;
+	uint8_t  f0bcax_b6_d1;
+	uint8_t  f0bcbx_b6_d1;
+	uint8_t  f1bc2x_b6_d1;
+	uint8_t  f1bc3x_b6_d1;
+	uint8_t  f1bc4x_b6_d1;
+	uint8_t  f1bc5x_b6_d1;
+	uint8_t  f1bc8x_b6_d1;
+	uint8_t  f1bc9x_b6_d1;
+	uint8_t  f1bcax_b6_d1;
+	uint8_t  f1bcbx_b6_d1;
+	uint8_t  f2bc2x_b6_d1;
+	uint8_t  f2bc3x_b6_d1;
+	uint8_t  f2bc4x_b6_d1;
+	uint8_t  f2bc5x_b6_d1;
+	uint8_t  f2bc8x_b6_d1;
+	uint8_t  f2bc9x_b6_d1;
+	uint8_t  f2bcax_b6_d1;
+	uint8_t  f2bcbx_b6_d1;
+	uint8_t  f3bc2x_b6_d1;
+	uint8_t  f3bc3x_b6_d1;
+	uint8_t  f3bc4x_b6_d1;
+	uint8_t  f3bc5x_b6_d1;
+	uint8_t  f3bc8x_b6_d1;
+	uint8_t  f3bc9x_b6_d1;
+	uint8_t  f3bcax_b6_d1;
+	uint8_t  f3bcbx_b6_d1;
+	uint8_t  f0bc2x_b7_d1;
+	uint8_t  f0bc3x_b7_d1;
+	uint8_t  f0bc4x_b7_d1;
+	uint8_t  f0bc5x_b7_d1;
+	uint8_t  f0bc8x_b7_d1;
+	uint8_t  f0bc9x_b7_d1;
+	uint8_t  f0bcax_b7_d1;
+	uint8_t  f0bcbx_b7_d1;
+	uint8_t  f1bc2x_b7_d1;
+	uint8_t  f1bc3x_b7_d1;
+	uint8_t  f1bc4x_b7_d1;
+	uint8_t  f1bc5x_b7_d1;
+	uint8_t  f1bc8x_b7_d1;
+	uint8_t  f1bc9x_b7_d1;
+	uint8_t  f1bcax_b7_d1;
+	uint8_t  f1bcbx_b7_d1;
+	uint8_t  f2bc2x_b7_d1;
+	uint8_t  f2bc3x_b7_d1;
+	uint8_t  f2bc4x_b7_d1;
+	uint8_t  f2bc5x_b7_d1;
+	uint8_t  f2bc8x_b7_d1;
+	uint8_t  f2bc9x_b7_d1;
+	uint8_t  f2bcax_b7_d1;
+	uint8_t  f2bcbx_b7_d1;
+	uint8_t  f3bc2x_b7_d1;
+	uint8_t  f3bc3x_b7_d1;
+	uint8_t  f3bc4x_b7_d1;
+	uint8_t  f3bc5x_b7_d1;
+	uint8_t  f3bc8x_b7_d1;
+	uint8_t  f3bc9x_b7_d1;
+	uint8_t  f3bcax_b7_d1;
+	uint8_t  f3bcbx_b7_d1;
+	uint8_t  f0bc2x_b8_d1;
+	uint8_t  f0bc3x_b8_d1;
+	uint8_t  f0bc4x_b8_d1;
+	uint8_t  f0bc5x_b8_d1;
+	uint8_t  f0bc8x_b8_d1;
+	uint8_t  f0bc9x_b8_d1;
+	uint8_t  f0bcax_b8_d1;
+	uint8_t  f0bcbx_b8_d1;
+	uint8_t  f1bc2x_b8_d1;
+	uint8_t  f1bc3x_b8_d1;
+	uint8_t  f1bc4x_b8_d1;
+	uint8_t  f1bc5x_b8_d1;
+	uint8_t  f1bc8x_b8_d1;
+	uint8_t  f1bc9x_b8_d1;
+	uint8_t  f1bcax_b8_d1;
+	uint8_t  f1bcbx_b8_d1;
+	uint8_t  f2bc2x_b8_d1;
+	uint8_t  f2bc3x_b8_d1;
+	uint8_t  f2bc4x_b8_d1;
+	uint8_t  f2bc5x_b8_d1;
+	uint8_t  f2bc8x_b8_d1;
+	uint8_t  f2bc9x_b8_d1;
+	uint8_t  f2bcax_b8_d1;
+	uint8_t  f2bcbx_b8_d1;
+	uint8_t  f3bc2x_b8_d1;
+	uint8_t  f3bc3x_b8_d1;
+	uint8_t  f3bc4x_b8_d1;
+	uint8_t  f3bc5x_b8_d1;
+	uint8_t  f3bc8x_b8_d1;
+	uint8_t  f3bc9x_b8_d1;
+	uint8_t  f3bcax_b8_d1;
+	uint8_t  f3bcbx_b8_d1;
+	uint8_t  f5bc5x_d1;
+	uint8_t  f5bc6x_d1;
+	uint8_t  f4bc8x_d1;
+	uint8_t  f4bc9x_d1;
+	uint8_t  f4bcax_d1;
+	uint8_t  f4bcbx_d1;
+	uint8_t  f4bccx_d1;
+	uint8_t  f4bcdx_d1;
+	uint8_t  f4bcex_d1;
+	uint8_t  f4bcfx_d1;
+	uint8_t  f5bc8x_d1;
+	uint8_t  f5bc9x_d1;
+	uint8_t  f5bcax_d1;
+	uint8_t  f5bcbx_d1;
+	uint8_t  f5bccx_d1;
+	uint8_t  f5bcdx_d1;
+	uint8_t  f5bcex_d1;
+	uint8_t  f5bcfx_d1;
+	uint8_t  f6bc8x_d1;
+	uint8_t  f6bc9x_d1;
+	uint8_t  f6bcax_d1;
+	uint8_t  f6bcbx_d1;
+	uint8_t  f6bccx_d1;
+	uint8_t  f6bcdx_d1;
+	uint8_t  f6bcex_d1;
+	uint8_t  f6bcfx_d1;
+	uint8_t  f7bc8x_d1;
+	uint8_t  f7bc9x_d1;
+	uint8_t  f7bcax_d1;
+	uint8_t  f7bcbx_d1;
+	uint8_t  f7bccx_d1;
+	uint8_t  f7bcdx_d1;
+	uint8_t  f7bcex_d1;
+	uint8_t  f7bcfx_d1;
+	uint16_t alt_cas_l;
+	uint8_t  alt_wcas_l;
+	uint8_t  d4misc;
+} __packed;
+
+struct ddr4lr2d {
+	uint8_t  reserved00;
+	uint8_t  msg_misc;
+	uint16_t pmu_revision;
+	uint8_t  pstate;
+	uint8_t  pll_bypass_en;
+	uint16_t dramfreq;
+	uint8_t  dfi_freq_ratio;
+	uint8_t  bpznres_val;
+	uint8_t  phy_odt_impedance;
+	uint8_t  phy_drv_impedance;
+	uint8_t  phy_vref;
+	uint8_t  dram_type;
+	uint8_t  disabled_dbyte;
+	uint8_t  enabled_dqs;
+	uint8_t  cs_present;
+	uint8_t  cs_present_d0;
+	uint8_t  cs_present_d1;
+	uint8_t  addr_mirror;
+	uint8_t  cs_test_fail;
+	uint8_t  phy_cfg;
+	uint16_t sequence_ctrl;
+	uint8_t  hdt_ctrl;
+	uint8_t  rx2d_train_opt;
+	uint8_t  tx2d_train_opt;
+	uint8_t  share2dvref_result;
+	uint8_t  delay_weight2d;
+	uint8_t  voltage_weight2d;
+	uint8_t  reserved1e[0x22 - 0x1e];
+	uint16_t phy_config_override;
+	uint8_t  dfimrlmargin;
+	uint8_t  r0_rx_clk_dly_margin;
+	uint8_t  r0_vref_dac_margin;
+	uint8_t  r0_tx_dq_dly_margin;
+	uint8_t  r0_device_vref_margin;
+	uint8_t  reserved29[0x33 - 0x29];
+	uint8_t  r1_rx_clk_dly_margin;
+	uint8_t  r1_vref_dac_margin;
+	uint8_t  r1_tx_dq_dly_margin;
+	uint8_t  r1_device_vref_margin;
+	uint8_t  reserved37[0x41 - 0x37];
+	uint8_t  r2_rx_clk_dly_margin;
+	uint8_t  r2_vref_dac_margin;
+	uint8_t  r2_tx_dq_dly_margin;
+	uint8_t  r2_device_vref_margin;
+	uint8_t  reserved45[0x4f - 0x45];
+	uint8_t  r3_rx_clk_dly_margin;
+	uint8_t  r3_vref_dac_margin;
+	uint8_t  r3_tx_dq_dly_margin;
+	uint8_t  r3_device_vref_margin;
+	uint8_t  reserved53[0x5e - 0x53];
+	uint16_t mr0;
+	uint16_t mr1;
+	uint16_t mr2;
+	uint16_t mr3;
+	uint16_t mr4;
+	uint16_t mr5;
+	uint16_t mr6;
+	uint8_t  x16present;
+	uint8_t  cs_setup_gddec;
+	uint16_t rtt_nom_wr_park0;
+	uint16_t rtt_nom_wr_park1;
+	uint16_t rtt_nom_wr_park2;
+	uint16_t rtt_nom_wr_park3;
+	uint16_t rtt_nom_wr_park4;
+	uint16_t rtt_nom_wr_park5;
+	uint16_t rtt_nom_wr_park6;
+	uint16_t rtt_nom_wr_park7;
+	uint8_t  acsm_odt_ctrl0;
+	uint8_t  acsm_odt_ctrl1;
+	uint8_t  acsm_odt_ctrl2;
+	uint8_t  acsm_odt_ctrl3;
+	uint8_t  acsm_odt_ctrl4;
+	uint8_t  acsm_odt_ctrl5;
+	uint8_t  acsm_odt_ctrl6;
+	uint8_t  acsm_odt_ctrl7;
+	uint8_t  vref_dq_r0nib0;
+	uint8_t  vref_dq_r0nib1;
+	uint8_t  vref_dq_r0nib2;
+	uint8_t  vref_dq_r0nib3;
+	uint8_t  vref_dq_r0nib4;
+	uint8_t  vref_dq_r0nib5;
+	uint8_t  vref_dq_r0nib6;
+	uint8_t  vref_dq_r0nib7;
+	uint8_t  vref_dq_r0nib8;
+	uint8_t  vref_dq_r0nib9;
+	uint8_t  vref_dq_r0nib10;
+	uint8_t  vref_dq_r0nib11;
+	uint8_t  vref_dq_r0nib12;
+	uint8_t  vref_dq_r0nib13;
+	uint8_t  vref_dq_r0nib14;
+	uint8_t  vref_dq_r0nib15;
+	uint8_t  vref_dq_r0nib16;
+	uint8_t  vref_dq_r0nib17;
+	uint8_t  vref_dq_r0nib18;
+	uint8_t  vref_dq_r0nib19;
+	uint8_t  vref_dq_r1nib0;
+	uint8_t  vref_dq_r1nib1;
+	uint8_t  vref_dq_r1nib2;
+	uint8_t  vref_dq_r1nib3;
+	uint8_t  vref_dq_r1nib4;
+	uint8_t  vref_dq_r1nib5;
+	uint8_t  vref_dq_r1nib6;
+	uint8_t  vref_dq_r1nib7;
+	uint8_t  vref_dq_r1nib8;
+	uint8_t  vref_dq_r1nib9;
+	uint8_t  vref_dq_r1nib10;
+	uint8_t  vref_dq_r1nib11;
+	uint8_t  vref_dq_r1nib12;
+	uint8_t  vref_dq_r1nib13;
+	uint8_t  vref_dq_r1nib14;
+	uint8_t  vref_dq_r1nib15;
+	uint8_t  vref_dq_r1nib16;
+	uint8_t  vref_dq_r1nib17;
+	uint8_t  vref_dq_r1nib18;
+	uint8_t  vref_dq_r1nib19;
+	uint8_t  vref_dq_r2nib0;
+	uint8_t  vref_dq_r2nib1;
+	uint8_t  vref_dq_r2nib2;
+	uint8_t  vref_dq_r2nib3;
+	uint8_t  vref_dq_r2nib4;
+	uint8_t  vref_dq_r2nib5;
+	uint8_t  vref_dq_r2nib6;
+	uint8_t  vref_dq_r2nib7;
+	uint8_t  vref_dq_r2nib8;
+	uint8_t  vref_dq_r2nib9;
+	uint8_t  vref_dq_r2nib10;
+	uint8_t  vref_dq_r2nib11;
+	uint8_t  vref_dq_r2nib12;
+	uint8_t  vref_dq_r2nib13;
+	uint8_t  vref_dq_r2nib14;
+	uint8_t  vref_dq_r2nib15;
+	uint8_t  vref_dq_r2nib16;
+	uint8_t  vref_dq_r2nib17;
+	uint8_t  vref_dq_r2nib18;
+	uint8_t  vref_dq_r2nib19;
+	uint8_t  vref_dq_r3nib0;
+	uint8_t  vref_dq_r3nib1;
+	uint8_t  vref_dq_r3nib2;
+	uint8_t  vref_dq_r3nib3;
+	uint8_t  vref_dq_r3nib4;
+	uint8_t  vref_dq_r3nib5;
+	uint8_t  vref_dq_r3nib6;
+	uint8_t  vref_dq_r3nib7;
+	uint8_t  vref_dq_r3nib8;
+	uint8_t  vref_dq_r3nib9;
+	uint8_t  vref_dq_r3nib10;
+	uint8_t  vref_dq_r3nib11;
+	uint8_t  vref_dq_r3nib12;
+	uint8_t  vref_dq_r3nib13;
+	uint8_t  vref_dq_r3nib14;
+	uint8_t  vref_dq_r3nib15;
+	uint8_t  vref_dq_r3nib16;
+	uint8_t  vref_dq_r3nib17;
+	uint8_t  vref_dq_r3nib18;
+	uint8_t  vref_dq_r3nib19;
+	uint8_t  f0rc00_d0;
+	uint8_t  f0rc01_d0;
+	uint8_t  f0rc02_d0;
+	uint8_t  f0rc03_d0;
+	uint8_t  f0rc04_d0;
+	uint8_t  f0rc05_d0;
+	uint8_t  f0rc06_d0;
+	uint8_t  f0rc07_d0;
+	uint8_t  f0rc08_d0;
+	uint8_t  f0rc09_d0;
+	uint8_t  f0rc0a_d0;
+	uint8_t  f0rc0b_d0;
+	uint8_t  f0rc0c_d0;
+	uint8_t  f0rc0d_d0;
+	uint8_t  f0rc0e_d0;
+	uint8_t  f0rc0f_d0;
+	uint8_t  f0rc1x_d0;
+	uint8_t  f0rc2x_d0;
+	uint8_t  f0rc3x_d0;
+	uint8_t  f0rc4x_d0;
+	uint8_t  f0rc5x_d0;
+	uint8_t  f0rc6x_d0;
+	uint8_t  f0rc7x_d0;
+	uint8_t  f0rc8x_d0;
+	uint8_t  f0rc9x_d0;
+	uint8_t  f0rcax_d0;
+	uint8_t  f0rcbx_d0;
+	uint8_t  f1rc00_d0;
+	uint8_t  f1rc01_d0;
+	uint8_t  f1rc02_d0;
+	uint8_t  f1rc03_d0;
+	uint8_t  f1rc04_d0;
+	uint8_t  f1rc05_d0;
+	uint8_t  f1rc06_d0;
+	uint8_t  f1rc07_d0;
+	uint8_t  f1rc08_d0;
+	uint8_t  f1rc09_d0;
+	uint8_t  f1rc0a_d0;
+	uint8_t  f1rc0b_d0;
+	uint8_t  f1rc0c_d0;
+	uint8_t  f1rc0d_d0;
+	uint8_t  f1rc0e_d0;
+	uint8_t  f1rc0f_d0;
+	uint8_t  f1rc1x_d0;
+	uint8_t  f1rc2x_d0;
+	uint8_t  f1rc3x_d0;
+	uint8_t  f1rc4x_d0;
+	uint8_t  f1rc5x_d0;
+	uint8_t  f1rc6x_d0;
+	uint8_t  f1rc7x_d0;
+	uint8_t  f1rc8x_d0;
+	uint8_t  f1rc9x_d0;
+	uint8_t  f1rcax_d0;
+	uint8_t  f1rcbx_d0;
+	uint8_t  f0rc00_d1;
+	uint8_t  f0rc01_d1;
+	uint8_t  f0rc02_d1;
+	uint8_t  f0rc03_d1;
+	uint8_t  f0rc04_d1;
+	uint8_t  f0rc05_d1;
+	uint8_t  f0rc06_d1;
+	uint8_t  f0rc07_d1;
+	uint8_t  f0rc08_d1;
+	uint8_t  f0rc09_d1;
+	uint8_t  f0rc0a_d1;
+	uint8_t  f0rc0b_d1;
+	uint8_t  f0rc0c_d1;
+	uint8_t  f0rc0d_d1;
+	uint8_t  f0rc0e_d1;
+	uint8_t  f0rc0f_d1;
+	uint8_t  f0rc1x_d1;
+	uint8_t  f0rc2x_d1;
+	uint8_t  f0rc3x_d1;
+	uint8_t  f0rc4x_d1;
+	uint8_t  f0rc5x_d1;
+	uint8_t  f0rc6x_d1;
+	uint8_t  f0rc7x_d1;
+	uint8_t  f0rc8x_d1;
+	uint8_t  f0rc9x_d1;
+	uint8_t  f0rcax_d1;
+	uint8_t  f0rcbx_d1;
+	uint8_t  f1rc00_d1;
+	uint8_t  f1rc01_d1;
+	uint8_t  f1rc02_d1;
+	uint8_t  f1rc03_d1;
+	uint8_t  f1rc04_d1;
+	uint8_t  f1rc05_d1;
+	uint8_t  f1rc06_d1;
+	uint8_t  f1rc07_d1;
+	uint8_t  f1rc08_d1;
+	uint8_t  f1rc09_d1;
+	uint8_t  f1rc0a_d1;
+	uint8_t  f1rc0b_d1;
+	uint8_t  f1rc0c_d1;
+	uint8_t  f1rc0d_d1;
+	uint8_t  f1rc0e_d1;
+	uint8_t  f1rc0f_d1;
+	uint8_t  f1rc1x_d1;
+	uint8_t  f1rc2x_d1;
+	uint8_t  f1rc3x_d1;
+	uint8_t  f1rc4x_d1;
+	uint8_t  f1rc5x_d1;
+	uint8_t  f1rc6x_d1;
+	uint8_t  f1rc7x_d1;
+	uint8_t  f1rc8x_d1;
+	uint8_t  f1rc9x_d1;
+	uint8_t  f1rcax_d1;
+	uint8_t  f1rcbx_d1;
+	uint8_t  bc00_d0;
+	uint8_t  bc01_d0;
+	uint8_t  bc02_d0;
+	uint8_t  bc03_d0;
+	uint8_t  bc04_d0;
+	uint8_t  bc05_d0;
+	uint8_t  bc06_d0;
+	uint8_t  bc07_d0;
+	uint8_t  bc08_d0;
+	uint8_t  bc09_d0;
+	uint8_t  bc0a_d0;
+	uint8_t  bc0b_d0;
+	uint8_t  bc0c_d0;
+	uint8_t  bc0d_d0;
+	uint8_t  bc0e_d0;
+	uint8_t  f0bc6x_d0;
+	uint8_t  f0bccx_d0;
+	uint8_t  f0bcdx_d0;
+	uint8_t  f0bcex_d0;
+	uint8_t  f0bcfx_d0;
+	uint8_t  f1bccx_d0;
+	uint8_t  f1bcdx_d0;
+	uint8_t  f1bcex_d0;
+	uint8_t  f1bcfx_d0;
+	uint8_t  f0bc2x_b0_d0;
+	uint8_t  f0bc3x_b0_d0;
+	uint8_t  f0bc4x_b0_d0;
+	uint8_t  f0bc5x_b0_d0;
+	uint8_t  f0bc8x_b0_d0;
+	uint8_t  f0bc9x_b0_d0;
+	uint8_t  f0bcax_b0_d0;
+	uint8_t  f0bcbx_b0_d0;
+	uint8_t  f1bc2x_b0_d0;
+	uint8_t  f1bc3x_b0_d0;
+	uint8_t  f1bc4x_b0_d0;
+	uint8_t  f1bc5x_b0_d0;
+	uint8_t  f1bc8x_b0_d0;
+	uint8_t  f1bc9x_b0_d0;
+	uint8_t  f1bcax_b0_d0;
+	uint8_t  f1bcbx_b0_d0;
+	uint8_t  f2bc2x_b0_d0;
+	uint8_t  f2bc3x_b0_d0;
+	uint8_t  f2bc4x_b0_d0;
+	uint8_t  f2bc5x_b0_d0;
+	uint8_t  f2bc8x_b0_d0;
+	uint8_t  f2bc9x_b0_d0;
+	uint8_t  f2bcax_b0_d0;
+	uint8_t  f2bcbx_b0_d0;
+	uint8_t  f3bc2x_b0_d0;
+	uint8_t  f3bc3x_b0_d0;
+	uint8_t  f3bc4x_b0_d0;
+	uint8_t  f3bc5x_b0_d0;
+	uint8_t  f3bc8x_b0_d0;
+	uint8_t  f3bc9x_b0_d0;
+	uint8_t  f3bcax_b0_d0;
+	uint8_t  f3bcbx_b0_d0;
+	uint8_t  f0bc2x_b1_d0;
+	uint8_t  f0bc3x_b1_d0;
+	uint8_t  f0bc4x_b1_d0;
+	uint8_t  f0bc5x_b1_d0;
+	uint8_t  f0bc8x_b1_d0;
+	uint8_t  f0bc9x_b1_d0;
+	uint8_t  f0bcax_b1_d0;
+	uint8_t  f0bcbx_b1_d0;
+	uint8_t  f1bc2x_b1_d0;
+	uint8_t  f1bc3x_b1_d0;
+	uint8_t  f1bc4x_b1_d0;
+	uint8_t  f1bc5x_b1_d0;
+	uint8_t  f1bc8x_b1_d0;
+	uint8_t  f1bc9x_b1_d0;
+	uint8_t  f1bcax_b1_d0;
+	uint8_t  f1bcbx_b1_d0;
+	uint8_t  f2bc2x_b1_d0;
+	uint8_t  f2bc3x_b1_d0;
+	uint8_t  f2bc4x_b1_d0;
+	uint8_t  f2bc5x_b1_d0;
+	uint8_t  f2bc8x_b1_d0;
+	uint8_t  f2bc9x_b1_d0;
+	uint8_t  f2bcax_b1_d0;
+	uint8_t  f2bcbx_b1_d0;
+	uint8_t  f3bc2x_b1_d0;
+	uint8_t  f3bc3x_b1_d0;
+	uint8_t  f3bc4x_b1_d0;
+	uint8_t  f3bc5x_b1_d0;
+	uint8_t  f3bc8x_b1_d0;
+	uint8_t  f3bc9x_b1_d0;
+	uint8_t  f3bcax_b1_d0;
+	uint8_t  f3bcbx_b1_d0;
+	uint8_t  f0bc2x_b2_d0;
+	uint8_t  f0bc3x_b2_d0;
+	uint8_t  f0bc4x_b2_d0;
+	uint8_t  f0bc5x_b2_d0;
+	uint8_t  f0bc8x_b2_d0;
+	uint8_t  f0bc9x_b2_d0;
+	uint8_t  f0bcax_b2_d0;
+	uint8_t  f0bcbx_b2_d0;
+	uint8_t  f1bc2x_b2_d0;
+	uint8_t  f1bc3x_b2_d0;
+	uint8_t  f1bc4x_b2_d0;
+	uint8_t  f1bc5x_b2_d0;
+	uint8_t  f1bc8x_b2_d0;
+	uint8_t  f1bc9x_b2_d0;
+	uint8_t  f1bcax_b2_d0;
+	uint8_t  f1bcbx_b2_d0;
+	uint8_t  f2bc2x_b2_d0;
+	uint8_t  f2bc3x_b2_d0;
+	uint8_t  f2bc4x_b2_d0;
+	uint8_t  f2bc5x_b2_d0;
+	uint8_t  f2bc8x_b2_d0;
+	uint8_t  f2bc9x_b2_d0;
+	uint8_t  f2bcax_b2_d0;
+	uint8_t  f2bcbx_b2_d0;
+	uint8_t  f3bc2x_b2_d0;
+	uint8_t  f3bc3x_b2_d0;
+	uint8_t  f3bc4x_b2_d0;
+	uint8_t  f3bc5x_b2_d0;
+	uint8_t  f3bc8x_b2_d0;
+	uint8_t  f3bc9x_b2_d0;
+	uint8_t  f3bcax_b2_d0;
+	uint8_t  f3bcbx_b2_d0;
+	uint8_t  f0bc2x_b3_d0;
+	uint8_t  f0bc3x_b3_d0;
+	uint8_t  f0bc4x_b3_d0;
+	uint8_t  f0bc5x_b3_d0;
+	uint8_t  f0bc8x_b3_d0;
+	uint8_t  f0bc9x_b3_d0;
+	uint8_t  f0bcax_b3_d0;
+	uint8_t  f0bcbx_b3_d0;
+	uint8_t  f1bc2x_b3_d0;
+	uint8_t  f1bc3x_b3_d0;
+	uint8_t  f1bc4x_b3_d0;
+	uint8_t  f1bc5x_b3_d0;
+	uint8_t  f1bc8x_b3_d0;
+	uint8_t  f1bc9x_b3_d0;
+	uint8_t  f1bcax_b3_d0;
+	uint8_t  f1bcbx_b3_d0;
+	uint8_t  f2bc2x_b3_d0;
+	uint8_t  f2bc3x_b3_d0;
+	uint8_t  f2bc4x_b3_d0;
+	uint8_t  f2bc5x_b3_d0;
+	uint8_t  f2bc8x_b3_d0;
+	uint8_t  f2bc9x_b3_d0;
+	uint8_t  f2bcax_b3_d0;
+	uint8_t  f2bcbx_b3_d0;
+	uint8_t  f3bc2x_b3_d0;
+	uint8_t  f3bc3x_b3_d0;
+	uint8_t  f3bc4x_b3_d0;
+	uint8_t  f3bc5x_b3_d0;
+	uint8_t  f3bc8x_b3_d0;
+	uint8_t  f3bc9x_b3_d0;
+	uint8_t  f3bcax_b3_d0;
+	uint8_t  f3bcbx_b3_d0;
+	uint8_t  f0bc2x_b4_d0;
+	uint8_t  f0bc3x_b4_d0;
+	uint8_t  f0bc4x_b4_d0;
+	uint8_t  f0bc5x_b4_d0;
+	uint8_t  f0bc8x_b4_d0;
+	uint8_t  f0bc9x_b4_d0;
+	uint8_t  f0bcax_b4_d0;
+	uint8_t  f0bcbx_b4_d0;
+	uint8_t  f1bc2x_b4_d0;
+	uint8_t  f1bc3x_b4_d0;
+	uint8_t  f1bc4x_b4_d0;
+	uint8_t  f1bc5x_b4_d0;
+	uint8_t  f1bc8x_b4_d0;
+	uint8_t  f1bc9x_b4_d0;
+	uint8_t  f1bcax_b4_d0;
+	uint8_t  f1bcbx_b4_d0;
+	uint8_t  f2bc2x_b4_d0;
+	uint8_t  f2bc3x_b4_d0;
+	uint8_t  f2bc4x_b4_d0;
+	uint8_t  f2bc5x_b4_d0;
+	uint8_t  f2bc8x_b4_d0;
+	uint8_t  f2bc9x_b4_d0;
+	uint8_t  f2bcax_b4_d0;
+	uint8_t  f2bcbx_b4_d0;
+	uint8_t  f3bc2x_b4_d0;
+	uint8_t  f3bc3x_b4_d0;
+	uint8_t  f3bc4x_b4_d0;
+	uint8_t  f3bc5x_b4_d0;
+	uint8_t  f3bc8x_b4_d0;
+	uint8_t  f3bc9x_b4_d0;
+	uint8_t  f3bcax_b4_d0;
+	uint8_t  f3bcbx_b4_d0;
+	uint8_t  f0bc2x_b5_d0;
+	uint8_t  f0bc3x_b5_d0;
+	uint8_t  f0bc4x_b5_d0;
+	uint8_t  f0bc5x_b5_d0;
+	uint8_t  f0bc8x_b5_d0;
+	uint8_t  f0bc9x_b5_d0;
+	uint8_t  f0bcax_b5_d0;
+	uint8_t  f0bcbx_b5_d0;
+	uint8_t  f1bc2x_b5_d0;
+	uint8_t  f1bc3x_b5_d0;
+	uint8_t  f1bc4x_b5_d0;
+	uint8_t  f1bc5x_b5_d0;
+	uint8_t  f1bc8x_b5_d0;
+	uint8_t  f1bc9x_b5_d0;
+	uint8_t  f1bcax_b5_d0;
+	uint8_t  f1bcbx_b5_d0;
+	uint8_t  f2bc2x_b5_d0;
+	uint8_t  f2bc3x_b5_d0;
+	uint8_t  f2bc4x_b5_d0;
+	uint8_t  f2bc5x_b5_d0;
+	uint8_t  f2bc8x_b5_d0;
+	uint8_t  f2bc9x_b5_d0;
+	uint8_t  f2bcax_b5_d0;
+	uint8_t  f2bcbx_b5_d0;
+	uint8_t  f3bc2x_b5_d0;
+	uint8_t  f3bc3x_b5_d0;
+	uint8_t  f3bc4x_b5_d0;
+	uint8_t  f3bc5x_b5_d0;
+	uint8_t  f3bc8x_b5_d0;
+	uint8_t  f3bc9x_b5_d0;
+	uint8_t  f3bcax_b5_d0;
+	uint8_t  f3bcbx_b5_d0;
+	uint8_t  f0bc2x_b6_d0;
+	uint8_t  f0bc3x_b6_d0;
+	uint8_t  f0bc4x_b6_d0;
+	uint8_t  f0bc5x_b6_d0;
+	uint8_t  f0bc8x_b6_d0;
+	uint8_t  f0bc9x_b6_d0;
+	uint8_t  f0bcax_b6_d0;
+	uint8_t  f0bcbx_b6_d0;
+	uint8_t  f1bc2x_b6_d0;
+	uint8_t  f1bc3x_b6_d0;
+	uint8_t  f1bc4x_b6_d0;
+	uint8_t  f1bc5x_b6_d0;
+	uint8_t  f1bc8x_b6_d0;
+	uint8_t  f1bc9x_b6_d0;
+	uint8_t  f1bcax_b6_d0;
+	uint8_t  f1bcbx_b6_d0;
+	uint8_t  f2bc2x_b6_d0;
+	uint8_t  f2bc3x_b6_d0;
+	uint8_t  f2bc4x_b6_d0;
+	uint8_t  f2bc5x_b6_d0;
+	uint8_t  f2bc8x_b6_d0;
+	uint8_t  f2bc9x_b6_d0;
+	uint8_t  f2bcax_b6_d0;
+	uint8_t  f2bcbx_b6_d0;
+	uint8_t  f3bc2x_b6_d0;
+	uint8_t  f3bc3x_b6_d0;
+	uint8_t  f3bc4x_b6_d0;
+	uint8_t  f3bc5x_b6_d0;
+	uint8_t  f3bc8x_b6_d0;
+	uint8_t  f3bc9x_b6_d0;
+	uint8_t  f3bcax_b6_d0;
+	uint8_t  f3bcbx_b6_d0;
+	uint8_t  f0bc2x_b7_d0;
+	uint8_t  f0bc3x_b7_d0;
+	uint8_t  f0bc4x_b7_d0;
+	uint8_t  f0bc5x_b7_d0;
+	uint8_t  f0bc8x_b7_d0;
+	uint8_t  f0bc9x_b7_d0;
+	uint8_t  f0bcax_b7_d0;
+	uint8_t  f0bcbx_b7_d0;
+	uint8_t  f1bc2x_b7_d0;
+	uint8_t  f1bc3x_b7_d0;
+	uint8_t  f1bc4x_b7_d0;
+	uint8_t  f1bc5x_b7_d0;
+	uint8_t  f1bc8x_b7_d0;
+	uint8_t  f1bc9x_b7_d0;
+	uint8_t  f1bcax_b7_d0;
+	uint8_t  f1bcbx_b7_d0;
+	uint8_t  f2bc2x_b7_d0;
+	uint8_t  f2bc3x_b7_d0;
+	uint8_t  f2bc4x_b7_d0;
+	uint8_t  f2bc5x_b7_d0;
+	uint8_t  f2bc8x_b7_d0;
+	uint8_t  f2bc9x_b7_d0;
+	uint8_t  f2bcax_b7_d0;
+	uint8_t  f2bcbx_b7_d0;
+	uint8_t  f3bc2x_b7_d0;
+	uint8_t  f3bc3x_b7_d0;
+	uint8_t  f3bc4x_b7_d0;
+	uint8_t  f3bc5x_b7_d0;
+	uint8_t  f3bc8x_b7_d0;
+	uint8_t  f3bc9x_b7_d0;
+	uint8_t  f3bcax_b7_d0;
+	uint8_t  f3bcbx_b7_d0;
+	uint8_t  f0bc2x_b8_d0;
+	uint8_t  f0bc3x_b8_d0;
+	uint8_t  f0bc4x_b8_d0;
+	uint8_t  f0bc5x_b8_d0;
+	uint8_t  f0bc8x_b8_d0;
+	uint8_t  f0bc9x_b8_d0;
+	uint8_t  f0bcax_b8_d0;
+	uint8_t  f0bcbx_b8_d0;
+	uint8_t  f1bc2x_b8_d0;
+	uint8_t  f1bc3x_b8_d0;
+	uint8_t  f1bc4x_b8_d0;
+	uint8_t  f1bc5x_b8_d0;
+	uint8_t  f1bc8x_b8_d0;
+	uint8_t  f1bc9x_b8_d0;
+	uint8_t  f1bcax_b8_d0;
+	uint8_t  f1bcbx_b8_d0;
+	uint8_t  f2bc2x_b8_d0;
+	uint8_t  f2bc3x_b8_d0;
+	uint8_t  f2bc4x_b8_d0;
+	uint8_t  f2bc5x_b8_d0;
+	uint8_t  f2bc8x_b8_d0;
+	uint8_t  f2bc9x_b8_d0;
+	uint8_t  f2bcax_b8_d0;
+	uint8_t  f2bcbx_b8_d0;
+	uint8_t  f3bc2x_b8_d0;
+	uint8_t  f3bc3x_b8_d0;
+	uint8_t  f3bc4x_b8_d0;
+	uint8_t  f3bc5x_b8_d0;
+	uint8_t  f3bc8x_b8_d0;
+	uint8_t  f3bc9x_b8_d0;
+	uint8_t  f3bcax_b8_d0;
+	uint8_t  f3bcbx_b8_d0;
+	uint8_t  f5bc5x_d0;
+	uint8_t  f5bc6x_d0;
+	uint8_t  f4bc8x_d0;
+	uint8_t  f4bc9x_d0;
+	uint8_t  f4bcax_d0;
+	uint8_t  f4bcbx_d0;
+	uint8_t  f4bccx_d0;
+	uint8_t  f4bcdx_d0;
+	uint8_t  f4bcex_d0;
+	uint8_t  f4bcfx_d0;
+	uint8_t  f5bc8x_d0;
+	uint8_t  f5bc9x_d0;
+	uint8_t  f5bcax_d0;
+	uint8_t  f5bcbx_d0;
+	uint8_t  f5bccx_d0;
+	uint8_t  f5bcdx_d0;
+	uint8_t  f5bcex_d0;
+	uint8_t  f5bcfx_d0;
+	uint8_t  f6bc8x_d0;
+	uint8_t  f6bc9x_d0;
+	uint8_t  f6bcax_d0;
+	uint8_t  f6bcbx_d0;
+	uint8_t  f6bccx_d0;
+	uint8_t  f6bcdx_d0;
+	uint8_t  f6bcex_d0;
+	uint8_t  f6bcfx_d0;
+	uint8_t  f7bc8x_d0;
+	uint8_t  f7bc9x_d0;
+	uint8_t  f7bcax_d0;
+	uint8_t  f7bcbx_d0;
+	uint8_t  f7bccx_d0;
+	uint8_t  f7bcdx_d0;
+	uint8_t  f7bcex_d0;
+	uint8_t  f7bcfx_d0;
+	uint8_t  bc00_d1;
+	uint8_t  bc01_d1;
+	uint8_t  bc02_d1;
+	uint8_t  bc03_d1;
+	uint8_t  bc04_d1;
+	uint8_t  bc05_d1;
+	uint8_t  bc06_d1;
+	uint8_t  bc07_d1;
+	uint8_t  bc08_d1;
+	uint8_t  bc09_d1;
+	uint8_t  bc0a_d1;
+	uint8_t  bc0b_d1;
+	uint8_t  bc0c_d1;
+	uint8_t  bc0d_d1;
+	uint8_t  bc0e_d1;
+	uint8_t  f0bc6x_d1;
+	uint8_t  f0bccx_d1;
+	uint8_t  f0bcdx_d1;
+	uint8_t  f0bcex_d1;
+	uint8_t  f0bcfx_d1;
+	uint8_t  f1bccx_d1;
+	uint8_t  f1bcdx_d1;
+	uint8_t  f1bcex_d1;
+	uint8_t  f1bcfx_d1;
+	uint8_t  f0bc2x_b0_d1;
+	uint8_t  f0bc3x_b0_d1;
+	uint8_t  f0bc4x_b0_d1;
+	uint8_t  f0bc5x_b0_d1;
+	uint8_t  f0bc8x_b0_d1;
+	uint8_t  f0bc9x_b0_d1;
+	uint8_t  f0bcax_b0_d1;
+	uint8_t  f0bcbx_b0_d1;
+	uint8_t  f1bc2x_b0_d1;
+	uint8_t  f1bc3x_b0_d1;
+	uint8_t  f1bc4x_b0_d1;
+	uint8_t  f1bc5x_b0_d1;
+	uint8_t  f1bc8x_b0_d1;
+	uint8_t  f1bc9x_b0_d1;
+	uint8_t  f1bcax_b0_d1;
+	uint8_t  f1bcbx_b0_d1;
+	uint8_t  f2bc2x_b0_d1;
+	uint8_t  f2bc3x_b0_d1;
+	uint8_t  f2bc4x_b0_d1;
+	uint8_t  f2bc5x_b0_d1;
+	uint8_t  f2bc8x_b0_d1;
+	uint8_t  f2bc9x_b0_d1;
+	uint8_t  f2bcax_b0_d1;
+	uint8_t  f2bcbx_b0_d1;
+	uint8_t  f3bc2x_b0_d1;
+	uint8_t  f3bc3x_b0_d1;
+	uint8_t  f3bc4x_b0_d1;
+	uint8_t  f3bc5x_b0_d1;
+	uint8_t  f3bc8x_b0_d1;
+	uint8_t  f3bc9x_b0_d1;
+	uint8_t  f3bcax_b0_d1;
+	uint8_t  f3bcbx_b0_d1;
+	uint8_t  f0bc2x_b1_d1;
+	uint8_t  f0bc3x_b1_d1;
+	uint8_t  f0bc4x_b1_d1;
+	uint8_t  f0bc5x_b1_d1;
+	uint8_t  f0bc8x_b1_d1;
+	uint8_t  f0bc9x_b1_d1;
+	uint8_t  f0bcax_b1_d1;
+	uint8_t  f0bcbx_b1_d1;
+	uint8_t  f1bc2x_b1_d1;
+	uint8_t  f1bc3x_b1_d1;
+	uint8_t  f1bc4x_b1_d1;
+	uint8_t  f1bc5x_b1_d1;
+	uint8_t  f1bc8x_b1_d1;
+	uint8_t  f1bc9x_b1_d1;
+	uint8_t  f1bcax_b1_d1;
+	uint8_t  f1bcbx_b1_d1;
+	uint8_t  f2bc2x_b1_d1;
+	uint8_t  f2bc3x_b1_d1;
+	uint8_t  f2bc4x_b1_d1;
+	uint8_t  f2bc5x_b1_d1;
+	uint8_t  f2bc8x_b1_d1;
+	uint8_t  f2bc9x_b1_d1;
+	uint8_t  f2bcax_b1_d1;
+	uint8_t  f2bcbx_b1_d1;
+	uint8_t  f3bc2x_b1_d1;
+	uint8_t  f3bc3x_b1_d1;
+	uint8_t  f3bc4x_b1_d1;
+	uint8_t  f3bc5x_b1_d1;
+	uint8_t  f3bc8x_b1_d1;
+	uint8_t  f3bc9x_b1_d1;
+	uint8_t  f3bcax_b1_d1;
+	uint8_t  f3bcbx_b1_d1;
+	uint8_t  f0bc2x_b2_d1;
+	uint8_t  f0bc3x_b2_d1;
+	uint8_t  f0bc4x_b2_d1;
+	uint8_t  f0bc5x_b2_d1;
+	uint8_t  f0bc8x_b2_d1;
+	uint8_t  f0bc9x_b2_d1;
+	uint8_t  f0bcax_b2_d1;
+	uint8_t  f0bcbx_b2_d1;
+	uint8_t  f1bc2x_b2_d1;
+	uint8_t  f1bc3x_b2_d1;
+	uint8_t  f1bc4x_b2_d1;
+	uint8_t  f1bc5x_b2_d1;
+	uint8_t  f1bc8x_b2_d1;
+	uint8_t  f1bc9x_b2_d1;
+	uint8_t  f1bcax_b2_d1;
+	uint8_t  f1bcbx_b2_d1;
+	uint8_t  f2bc2x_b2_d1;
+	uint8_t  f2bc3x_b2_d1;
+	uint8_t  f2bc4x_b2_d1;
+	uint8_t  f2bc5x_b2_d1;
+	uint8_t  f2bc8x_b2_d1;
+	uint8_t  f2bc9x_b2_d1;
+	uint8_t  f2bcax_b2_d1;
+	uint8_t  f2bcbx_b2_d1;
+	uint8_t  f3bc2x_b2_d1;
+	uint8_t  f3bc3x_b2_d1;
+	uint8_t  f3bc4x_b2_d1;
+	uint8_t  f3bc5x_b2_d1;
+	uint8_t  f3bc8x_b2_d1;
+	uint8_t  f3bc9x_b2_d1;
+	uint8_t  f3bcax_b2_d1;
+	uint8_t  f3bcbx_b2_d1;
+	uint8_t  f0bc2x_b3_d1;
+	uint8_t  f0bc3x_b3_d1;
+	uint8_t  f0bc4x_b3_d1;
+	uint8_t  f0bc5x_b3_d1;
+	uint8_t  f0bc8x_b3_d1;
+	uint8_t  f0bc9x_b3_d1;
+	uint8_t  f0bcax_b3_d1;
+	uint8_t  f0bcbx_b3_d1;
+	uint8_t  f1bc2x_b3_d1;
+	uint8_t  f1bc3x_b3_d1;
+	uint8_t  f1bc4x_b3_d1;
+	uint8_t  f1bc5x_b3_d1;
+	uint8_t  f1bc8x_b3_d1;
+	uint8_t  f1bc9x_b3_d1;
+	uint8_t  f1bcax_b3_d1;
+	uint8_t  f1bcbx_b3_d1;
+	uint8_t  f2bc2x_b3_d1;
+	uint8_t  f2bc3x_b3_d1;
+	uint8_t  f2bc4x_b3_d1;
+	uint8_t  f2bc5x_b3_d1;
+	uint8_t  f2bc8x_b3_d1;
+	uint8_t  f2bc9x_b3_d1;
+	uint8_t  f2bcax_b3_d1;
+	uint8_t  f2bcbx_b3_d1;
+	uint8_t  f3bc2x_b3_d1;
+	uint8_t  f3bc3x_b3_d1;
+	uint8_t  f3bc4x_b3_d1;
+	uint8_t  f3bc5x_b3_d1;
+	uint8_t  f3bc8x_b3_d1;
+	uint8_t  f3bc9x_b3_d1;
+	uint8_t  f3bcax_b3_d1;
+	uint8_t  f3bcbx_b3_d1;
+	uint8_t  f0bc2x_b4_d1;
+	uint8_t  f0bc3x_b4_d1;
+	uint8_t  f0bc4x_b4_d1;
+	uint8_t  f0bc5x_b4_d1;
+	uint8_t  f0bc8x_b4_d1;
+	uint8_t  f0bc9x_b4_d1;
+	uint8_t  f0bcax_b4_d1;
+	uint8_t  f0bcbx_b4_d1;
+	uint8_t  f1bc2x_b4_d1;
+	uint8_t  f1bc3x_b4_d1;
+	uint8_t  f1bc4x_b4_d1;
+	uint8_t  f1bc5x_b4_d1;
+	uint8_t  f1bc8x_b4_d1;
+	uint8_t  f1bc9x_b4_d1;
+	uint8_t  f1bcax_b4_d1;
+	uint8_t  f1bcbx_b4_d1;
+	uint8_t  f2bc2x_b4_d1;
+	uint8_t  f2bc3x_b4_d1;
+	uint8_t  f2bc4x_b4_d1;
+	uint8_t  f2bc5x_b4_d1;
+	uint8_t  f2bc8x_b4_d1;
+	uint8_t  f2bc9x_b4_d1;
+	uint8_t  f2bcax_b4_d1;
+	uint8_t  f2bcbx_b4_d1;
+	uint8_t  f3bc2x_b4_d1;
+	uint8_t  f3bc3x_b4_d1;
+	uint8_t  f3bc4x_b4_d1;
+	uint8_t  f3bc5x_b4_d1;
+	uint8_t  f3bc8x_b4_d1;
+	uint8_t  f3bc9x_b4_d1;
+	uint8_t  f3bcax_b4_d1;
+	uint8_t  f3bcbx_b4_d1;
+	uint8_t  f0bc2x_b5_d1;
+	uint8_t  f0bc3x_b5_d1;
+	uint8_t  f0bc4x_b5_d1;
+	uint8_t  f0bc5x_b5_d1;
+	uint8_t  f0bc8x_b5_d1;
+	uint8_t  f0bc9x_b5_d1;
+	uint8_t  f0bcax_b5_d1;
+	uint8_t  f0bcbx_b5_d1;
+	uint8_t  f1bc2x_b5_d1;
+	uint8_t  f1bc3x_b5_d1;
+	uint8_t  f1bc4x_b5_d1;
+	uint8_t  f1bc5x_b5_d1;
+	uint8_t  f1bc8x_b5_d1;
+	uint8_t  f1bc9x_b5_d1;
+	uint8_t  f1bcax_b5_d1;
+	uint8_t  f1bcbx_b5_d1;
+	uint8_t  f2bc2x_b5_d1;
+	uint8_t  f2bc3x_b5_d1;
+	uint8_t  f2bc4x_b5_d1;
+	uint8_t  f2bc5x_b5_d1;
+	uint8_t  f2bc8x_b5_d1;
+	uint8_t  f2bc9x_b5_d1;
+	uint8_t  f2bcax_b5_d1;
+	uint8_t  f2bcbx_b5_d1;
+	uint8_t  f3bc2x_b5_d1;
+	uint8_t  f3bc3x_b5_d1;
+	uint8_t  f3bc4x_b5_d1;
+	uint8_t  f3bc5x_b5_d1;
+	uint8_t  f3bc8x_b5_d1;
+	uint8_t  f3bc9x_b5_d1;
+	uint8_t  f3bcax_b5_d1;
+	uint8_t  f3bcbx_b5_d1;
+	uint8_t  f0bc2x_b6_d1;
+	uint8_t  f0bc3x_b6_d1;
+	uint8_t  f0bc4x_b6_d1;
+	uint8_t  f0bc5x_b6_d1;
+	uint8_t  f0bc8x_b6_d1;
+	uint8_t  f0bc9x_b6_d1;
+	uint8_t  f0bcax_b6_d1;
+	uint8_t  f0bcbx_b6_d1;
+	uint8_t  f1bc2x_b6_d1;
+	uint8_t  f1bc3x_b6_d1;
+	uint8_t  f1bc4x_b6_d1;
+	uint8_t  f1bc5x_b6_d1;
+	uint8_t  f1bc8x_b6_d1;
+	uint8_t  f1bc9x_b6_d1;
+	uint8_t  f1bcax_b6_d1;
+	uint8_t  f1bcbx_b6_d1;
+	uint8_t  f2bc2x_b6_d1;
+	uint8_t  f2bc3x_b6_d1;
+	uint8_t  f2bc4x_b6_d1;
+	uint8_t  f2bc5x_b6_d1;
+	uint8_t  f2bc8x_b6_d1;
+	uint8_t  f2bc9x_b6_d1;
+	uint8_t  f2bcax_b6_d1;
+	uint8_t  f2bcbx_b6_d1;
+	uint8_t  f3bc2x_b6_d1;
+	uint8_t  f3bc3x_b6_d1;
+	uint8_t  f3bc4x_b6_d1;
+	uint8_t  f3bc5x_b6_d1;
+	uint8_t  f3bc8x_b6_d1;
+	uint8_t  f3bc9x_b6_d1;
+	uint8_t  f3bcax_b6_d1;
+	uint8_t  f3bcbx_b6_d1;
+	uint8_t  f0bc2x_b7_d1;
+	uint8_t  f0bc3x_b7_d1;
+	uint8_t  f0bc4x_b7_d1;
+	uint8_t  f0bc5x_b7_d1;
+	uint8_t  f0bc8x_b7_d1;
+	uint8_t  f0bc9x_b7_d1;
+	uint8_t  f0bcax_b7_d1;
+	uint8_t  f0bcbx_b7_d1;
+	uint8_t  f1bc2x_b7_d1;
+	uint8_t  f1bc3x_b7_d1;
+	uint8_t  f1bc4x_b7_d1;
+	uint8_t  f1bc5x_b7_d1;
+	uint8_t  f1bc8x_b7_d1;
+	uint8_t  f1bc9x_b7_d1;
+	uint8_t  f1bcax_b7_d1;
+	uint8_t  f1bcbx_b7_d1;
+	uint8_t  f2bc2x_b7_d1;
+	uint8_t  f2bc3x_b7_d1;
+	uint8_t  f2bc4x_b7_d1;
+	uint8_t  f2bc5x_b7_d1;
+	uint8_t  f2bc8x_b7_d1;
+	uint8_t  f2bc9x_b7_d1;
+	uint8_t  f2bcax_b7_d1;
+	uint8_t  f2bcbx_b7_d1;
+	uint8_t  f3bc2x_b7_d1;
+	uint8_t  f3bc3x_b7_d1;
+	uint8_t  f3bc4x_b7_d1;
+	uint8_t  f3bc5x_b7_d1;
+	uint8_t  f3bc8x_b7_d1;
+	uint8_t  f3bc9x_b7_d1;
+	uint8_t  f3bcax_b7_d1;
+	uint8_t  f3bcbx_b7_d1;
+	uint8_t  f0bc2x_b8_d1;
+	uint8_t  f0bc3x_b8_d1;
+	uint8_t  f0bc4x_b8_d1;
+	uint8_t  f0bc5x_b8_d1;
+	uint8_t  f0bc8x_b8_d1;
+	uint8_t  f0bc9x_b8_d1;
+	uint8_t  f0bcax_b8_d1;
+	uint8_t  f0bcbx_b8_d1;
+	uint8_t  f1bc2x_b8_d1;
+	uint8_t  f1bc3x_b8_d1;
+	uint8_t  f1bc4x_b8_d1;
+	uint8_t  f1bc5x_b8_d1;
+	uint8_t  f1bc8x_b8_d1;
+	uint8_t  f1bc9x_b8_d1;
+	uint8_t  f1bcax_b8_d1;
+	uint8_t  f1bcbx_b8_d1;
+	uint8_t  f2bc2x_b8_d1;
+	uint8_t  f2bc3x_b8_d1;
+	uint8_t  f2bc4x_b8_d1;
+	uint8_t  f2bc5x_b8_d1;
+	uint8_t  f2bc8x_b8_d1;
+	uint8_t  f2bc9x_b8_d1;
+	uint8_t  f2bcax_b8_d1;
+	uint8_t  f2bcbx_b8_d1;
+	uint8_t  f3bc2x_b8_d1;
+	uint8_t  f3bc3x_b8_d1;
+	uint8_t  f3bc4x_b8_d1;
+	uint8_t  f3bc5x_b8_d1;
+	uint8_t  f3bc8x_b8_d1;
+	uint8_t  f3bc9x_b8_d1;
+	uint8_t  f3bcax_b8_d1;
+	uint8_t  f3bcbx_b8_d1;
+	uint8_t  f5bc5x_d1;
+	uint8_t  f5bc6x_d1;
+	uint8_t  f4bc8x_d1;
+	uint8_t  f4bc9x_d1;
+	uint8_t  f4bcax_d1;
+	uint8_t  f4bcbx_d1;
+	uint8_t  f4bccx_d1;
+	uint8_t  f4bcdx_d1;
+	uint8_t  f4bcex_d1;
+	uint8_t  f4bcfx_d1;
+	uint8_t  f5bc8x_d1;
+	uint8_t  f5bc9x_d1;
+	uint8_t  f5bcax_d1;
+	uint8_t  f5bcbx_d1;
+	uint8_t  f5bccx_d1;
+	uint8_t  f5bcdx_d1;
+	uint8_t  f5bcex_d1;
+	uint8_t  f5bcfx_d1;
+	uint8_t  f6bc8x_d1;
+	uint8_t  f6bc9x_d1;
+	uint8_t  f6bcax_d1;
+	uint8_t  f6bcbx_d1;
+	uint8_t  f6bccx_d1;
+	uint8_t  f6bcdx_d1;
+	uint8_t  f6bcex_d1;
+	uint8_t  f6bcfx_d1;
+	uint8_t  f7bc8x_d1;
+	uint8_t  f7bc9x_d1;
+	uint8_t  f7bcax_d1;
+	uint8_t  f7bcbx_d1;
+	uint8_t  f7bccx_d1;
+	uint8_t  f7bcdx_d1;
+	uint8_t  f7bcex_d1;
+	uint8_t  f7bcfx_d1;
+	uint16_t alt_cas_l;
+	uint8_t  alt_wcas_l;
+	uint8_t  d4misc;
+} __packed;
+#endif
diff --git a/drivers/nxp/ddr/phy-gen2/ddrphy.mk b/drivers/nxp/ddr/phy-gen2/ddrphy.mk
new file mode 100644
index 0000000..ba5c774
--- /dev/null
+++ b/drivers/nxp/ddr/phy-gen2/ddrphy.mk
@@ -0,0 +1,20 @@
+#
+# Copyright 2021 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+#-----------------------------------------------------------------------------
+
+# SNPS ddr phy driver files
+
+DDR_PHY_C  =
+DDR_PHY_H  =
+
+$(DDR_PHY_C): $(DDR_PHY_H) $(COMMON_HDRS) src
+	@cp -r "$(DDR_PHY_PATH)/$@" "$(SRC_DIR)/$@"
+
+$(DDR_PHY_H): src
+	@cp -r "$(DDR_PHY_PATH)/$@" "$(SRC_DIR)/$@"
+
+#------------------------------------------------
diff --git a/drivers/nxp/ddr/phy-gen2/input.h b/drivers/nxp/ddr/phy-gen2/input.h
new file mode 100644
index 0000000..dbcd1ae
--- /dev/null
+++ b/drivers/nxp/ddr/phy-gen2/input.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2021 NXP
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef _INPUT_H_
+#define _INPUT_H_
+
+enum dram_types {
+	DDR4,
+	DDR3,
+	LPDDR4,
+	LPDDR3,
+	LPDDR2,
+	DDR5,
+};
+
+enum dimm_types {
+	UDIMM,
+	SODIMM,
+	RDIMM,
+	LRDIMM,
+	NODIMM,
+};
+
+struct input_basic {
+	enum dram_types dram_type;
+	enum dimm_types dimm_type;
+	int lp4x_mode;		/* 0x1 = lpddr4x mode, when dram_type is lpddr4
+				 */
+				/* not used for protocols other than lpddr4 */
+	int num_dbyte;		/* number of dbytes physically instantiated */
+	int num_active_dbyte_dfi0;	/* number of active dbytes to be
+					 * controlled by dfi0
+					 */
+	int num_active_dbyte_dfi1;	/* number of active dbytes to be
+					 * controlled by  dfi1. Not used for
+					 * protocols other than lpddr3 and
+					 * lpddr4
+					 */
+	int num_anib;		/* number of anibs physically instantiated */
+	int num_rank_dfi0;	/* number of ranks in dfi0 channel */
+	int num_rank_dfi1;	/* number of ranks in dfi1 channel */
+	int dram_data_width;	/* 4,8,16 or 32 depending on protocol and dram
+				 * type
+				 */
+	int num_pstates;
+	int frequency;		/* memclk frequency in mhz -- round up */
+	int pll_bypass;		/* pll bypass enable */
+	int dfi_freq_ratio;	/* selected dfi frequency ratio */
+	int dfi1exists;		/* whether they phy config has dfi1 channel */
+	int train2d;
+	int hard_macro_ver;
+	int read_dbienable;
+	int dfi_mode;		/* no longer used */
+};
+
+struct input_advanced {
+	int d4rx_preamble_length;
+	int d4tx_preamble_length;
+	int ext_cal_res_val;	/* external pull-down resistor */
+	int is2ttiming;
+	int odtimpedance;
+	int tx_impedance;
+	int atx_impedance;
+	int mem_alert_en;
+	int mem_alert_puimp;
+	int mem_alert_vref_level;
+	int mem_alert_sync_bypass;
+	int dis_dyn_adr_tri;
+	int phy_mstr_train_interval;
+	int phy_mstr_max_req_to_ack;
+	int wdqsext;
+	int cal_interval;
+	int cal_once;
+	int dram_byte_swap;
+	int rx_en_back_off;
+	int train_sequence_ctrl;
+	int phy_gen2_umctl_opt;
+	int phy_gen2_umctl_f0rc5x;
+	int tx_slew_rise_dq;
+	int tx_slew_fall_dq;
+	int tx_slew_rise_ac;
+	int tx_slew_fall_ac;
+	int enable_high_clk_skew_fix;
+	int disable_unused_addr_lns;
+	int phy_init_sequence_num;
+	int cs_mode;		/* rdimm */
+	int cast_cs_to_cid;	/* rdimm */
+};
+
+struct input {
+	struct input_basic basic;
+	struct input_advanced adv;
+	unsigned int mr[7];
+	unsigned int cs_d0;
+	unsigned int cs_d1;
+	unsigned int mirror;
+	unsigned int odt[4];
+	unsigned int rcw[16];
+	unsigned int rcw3x;
+	unsigned int vref;
+};
+
+#endif
diff --git a/drivers/nxp/ddr/phy-gen2/messages.h b/drivers/nxp/ddr/phy-gen2/messages.h
new file mode 100644
index 0000000..7dec7df
--- /dev/null
+++ b/drivers/nxp/ddr/phy-gen2/messages.h
@@ -0,0 +1,2909 @@
+/*
+ * Copyright 2021 NXP
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef MESSAGE_H
+#define MESSAGE_H
+
+#ifdef DEBUG
+struct phy_msg {
+	uint32_t index;
+	const char *msg;
+};
+
+const static struct phy_msg messages_1d[] = {
+	{0x00000001,
+	 "PMU1:prbsGenCtl:%x\n"
+	},
+	{0x00010000,
+	 "PMU1: loading 2D acsm sequence\n"
+	},
+	{0x00020000,
+	 "PMU1: loading 1D acsm sequence\n"
+	},
+	{0x00030002,
+	 "PMU3: %d memclocks @ %d to get half of 300ns\n"
+	},
+	{0x00040000,
+	 "PMU: Error: User requested MPR read pattern for read DQS training in DDR3 Mode\n"
+	},
+	{0x00050000,
+	 "PMU3: Running 1D search for left eye edge\n"
+	},
+	{0x00060001,
+	 "PMU1: In Phase Left Edge Search cs %d\n"
+	},
+	{0x00070001,
+	 "PMU1: Out of Phase Left Edge Search cs %d\n"
+	},
+	{0x00080000,
+	 "PMU3: Running 1D search for right eye edge\n"
+	},
+	{0x00090001,
+	 "PMU1: In Phase Right Edge Search cs %d\n"
+	},
+	{0x000a0001,
+	 "PMU1: Out of Phase Right Edge Search cs %d\n"
+	},
+	{0x000b0001,
+	 "PMU1: mxRdLat training pstate %d\n"
+	},
+	{0x000c0001,
+	 "PMU1: mxRdLat search for cs %d\n"
+	},
+	{0x000d0001,
+	 "PMU0: MaxRdLat non consistent DtsmLoThldXingInd 0x%03x\n"
+	},
+	{0x000e0003,
+	 "PMU4: CS %d Dbyte %d worked with DFIMRL = %d DFICLKs\n"
+	},
+	{0x000f0004,
+	 "PMU3: MaxRdLat Read Lane err mask for csn %d, DFIMRL %2d DFIClks, dbyte %d = 0x%03x\n"
+	},
+	{0x00100003,
+	 "PMU3: MaxRdLat Read Lane err mask for csn %d DFIMRL %2d, All dbytes = 0x%03x\n"
+	},
+	{0x00110001,
+	 "PMU: Error: CS%d failed to find a DFIMRL setting that worked for all bytes during MaxRdLat training\n"
+	},
+	{0x00120002,
+	 "PMU3: Smallest passing DFIMRL for all dbytes in CS%d = %d DFIClks\n"
+	},
+	{0x00130000,
+	 "PMU: Error: No passing DFIMRL value found for any chip select during MaxRdLat training\n"
+	},
+	{0x00140003,
+	 "PMU: Error: Dbyte %d lane %d txDqDly passing region is too small (width = %d)\n"
+	},
+	{0x00150006,
+	 "PMU10: Adjusting rxclkdly db %d nib %d from %d+%d=%d->%d\n"
+	},
+	{0x00160000,
+	 "PMU4: TxDqDly Passing Regions (EyeLeft EyeRight -> EyeCenter) Units=1/32 UI\n"
+	},
+	{0x00170005,
+	 "PMU4: DB %d Lane %d: %3d %3d -> %3d\n"
+	},
+	{0x00180002,
+	 "PMU2: TXDQ delayLeft[%2d] = %3d (DISCONNECTED)\n"
+	},
+	{0x00190004,
+	 "PMU2: TXDQ delayLeft[%2d] = %3d oopScaled = %3d selectOop %d\n"
+	},
+	{0x001a0002,
+	 "PMU2: TXDQ delayRight[%2d] = %3d (DISCONNECTED)\n"
+	},
+	{0x001b0004,
+	 "PMU2: TXDQ delayRight[%2d] = %3d oopScaled = %3d selectOop %d\n"
+	},
+	{0x001c0003,
+	 "PMU: Error: Dbyte %d lane %d txDqDly passing region is too small (width = %d)\n"
+	},
+	{0x001d0000,
+	 "PMU4: TxDqDly Passing Regions (EyeLeft EyeRight -> EyeCenter) Units=1/32 UI\n"
+	},
+	{0x001e0002,
+	 "PMU4: DB %d Lane %d: (DISCONNECTED)\n"
+	},
+	{0x001f0005,
+	 "PMU4: DB %d Lane %d: %3d %3d -> %3d\n"
+	},
+	{0x00200002,
+	 "PMU3: Running 1D search csn %d for DM Right/NotLeft(%d) eye edge\n"
+	},
+	{0x00210002,
+	 "PMU3: WrDq DM byte%2d with Errcnt %d\n"
+	},
+	{0x00220002,
+	 "PMU3: WrDq DM byte%2d avgDly 0x%04x\n"
+	},
+	{0x00230002,
+	 "PMU1: WrDq DM byte%2d with Errcnt %d\n"
+	},
+	{0x00240001,
+	 "PMU: Error: Dbyte %d txDqDly DM training did not start inside the eye\n"
+	},
+	{0x00250000,
+	 "PMU4: DM TxDqDly Passing Regions (EyeLeft EyeRight -> EyeCenter) Units=1/32 UI\n"
+	},
+	{0x00260002,
+	 "PMU4: DB %d Lane %d: (DISCONNECTED)\n"
+	},
+	{0x00270005,
+	 "PMU4: DB %d Lane %d: %3d %3d -> %3d\n"
+	},
+	{0x00280003,
+	 "PMU: Error: Dbyte %d lane %d txDqDly DM passing region is too small (width = %d)\n"
+	},
+	{0x00290004,
+	 "PMU3: Errcnt for MRD/MWD search nib %2d delay = (%d, 0x%02x) = %d\n"
+	},
+	{0x002a0000,
+	 "PMU3: Precharge all open banks\n"
+	},
+	{0x002b0002,
+	 "PMU: Error: Dbyte %d nibble %d found mutliple working coarse delay setting for MRD/MWD\n"
+	},
+	{0x002c0000,
+	 "PMU4: MRD Passing Regions (coarseVal, fineLeft fineRight -> fineCenter)\n"
+	},
+	{0x002d0000,
+	 "PMU4: MWD Passing Regions (coarseVal, fineLeft fineRight -> fineCenter)\n"
+	},
+	{0x002e0004,
+	 "PMU10: Warning: DB %d nibble %d has multiple working coarse delays, %d and %d, choosing the smaller delay\n"
+	},
+	{0x002f0003,
+	 "PMU: Error: Dbyte %d nibble %d MRD/MWD passing region is too small (width = %d)\n"
+	},
+	{0x00300006,
+	 "PMU4: DB %d nibble %d: %3d, %3d %3d -> %3d\n"
+	},
+	{0x00310002,
+	 "PMU1: Start MRD/nMWD %d for csn %d\n"
+	},
+	{0x00320002,
+	 "PMU2: RXDQS delayLeft[%2d] = %3d (DISCONNECTED)\n"
+	},
+	{0x00330006,
+	 "PMU2: RXDQS delayLeft[%2d] = %3d delayOop[%2d] = %3d OopScaled %4d, selectOop %d\n"
+	},
+	{0x00340002,
+	 "PMU2: RXDQS delayRight[%2d] = %3d (DISCONNECTED)\n"
+	},
+	{0x00350006,
+	 "PMU2: RXDQS delayRight[%2d] = %3d delayOop[%2d] = %4d OopScaled %4d, selectOop %d\n"
+	},
+	{0x00360000,
+	 "PMU4: RxClkDly Passing Regions (EyeLeft EyeRight -> EyeCenter)\n"
+	},
+	{0x00370002,
+	 "PMU4: DB %d nibble %d: (DISCONNECTED)\n"
+	},
+	{0x00380005,
+	 "PMU4: DB %d nibble %d: %3d %3d -> %3d\n"
+	},
+	{0x00390003,
+	 "PMU: Error: Dbyte %d nibble %d rxClkDly passing region is too small (width = %d)\n"
+	},
+	{0x003a0002,
+	 "PMU0: goodbar = %d for RDWR_BLEN %d\n"
+	},
+	{0x003b0001,
+	 "PMU3: RxClkDly = %d\n"
+	},
+	{0x003c0005,
+	 "PMU0: db %d l %d absLane %d -> bottom %d top %d\n"
+	},
+	{0x003d0009,
+	 "PMU3: BYTE %d - %3d %3d %3d %3d %3d %3d %3d %3d\n"
+	},
+	{0x003e0002,
+	 "PMU: Error: dbyte %d lane %d's per-lane vrefDAC's had no passing region\n"
+	},
+	{0x003f0004,
+	 "PMU0: db%d l%d - %d %d\n"
+	},
+	{0x00400002,
+	 "PMU0: goodbar = %d for RDWR_BLEN %d\n"
+	},
+	{0x00410004,
+	 "PMU3: db%d l%d saw %d issues at rxClkDly %d\n"
+	},
+	{0x00420003,
+	 "PMU3: db%d l%d first saw a pass->fail edge at rxClkDly %d\n"
+	},
+	{0x00430002,
+	 "PMU3: lane %d PBD = %d\n"
+	},
+	{0x00440003,
+	 "PMU3: db%d l%d first saw a DBI pass->fail edge at rxClkDly %d\n"
+	},
+	{0x00450003,
+	 "PMU2: db%d l%d already passed rxPBD = %d\n"
+	},
+	{0x00460003,
+	 "PMU0: db%d l%d, PBD = %d\n"
+	},
+	{0x00470002,
+	 "PMU: Error: dbyte %d lane %d failed read deskew\n"
+	},
+	{0x00480003,
+	 "PMU0: db%d l%d, inc PBD = %d\n"
+	},
+	{0x00490003,
+	 "PMU1: Running lane deskew on pstate %d csn %d rdDBIEn %d\n"
+	},
+	{0x004a0000,
+	 "PMU: Error: Read deskew training has been requested, but csrMajorModeDbyte[2] is set\n"
+	},
+	{0x004b0002,
+	 "PMU1: AcsmCsMapCtrl%02d 0x%04x\n"
+	},
+	{0x004c0002,
+	 "PMU1: AcsmCsMapCtrl%02d 0x%04x\n"
+	},
+	{0x004d0001,
+	 "PMU: Error: Wrong PMU image loaded. message Block DramType = 0x%02x, but image built for D3U Type\n"
+	},
+	{0x004e0001,
+	 "PMU: Error: Wrong PMU image loaded. message Block DramType = 0x%02x, but image built for D3R Type\n"
+	},
+	{0x004f0001,
+	 "PMU: Error: Wrong PMU image loaded. message Block DramType = 0x%02x, but image built for D4U Type\n"
+	},
+	{0x00500001,
+	 "PMU: Error: Wrong PMU image loaded. message Block DramType = 0x%02x, but image built for D4R Type\n"
+	},
+	{0x00510001,
+	 "PMU: Error: Wrong PMU image loaded. message Block DramType = 0x%02x, but image built for D4LR Type\n"
+	},
+	{0x00520000,
+	 "PMU: Error: Both 2t timing mode and ddr4 geardown mode specified in the messageblock's PhyCfg and MR3 fields. Only one can be enabled\n"
+	},
+	{0x00530003,
+	 "PMU10: PHY TOTALS - NUM_DBYTES %d NUM_NIBBLES %d NUM_ANIBS %d\n"
+	},
+	{0x00540006,
+	 "PMU10: CSA=0x%02x, CSB=0x%02x, TSTAGES=0x%04x, HDTOUT=%d, MMISC=%d DRAMFreq=%dMT DramType=LPDDR3\n"
+	},
+	{0x00550006,
+	 "PMU10: CSA=0x%02x, CSB=0x%02x, TSTAGES=0x%04x, HDTOUT=%d, MMISC=%d DRAMFreq=%dMT DramType=LPDDR4\n"
+	},
+	{0x00560008,
+	 "PMU10: CS=0x%02x, TSTAGES=0x%04x, HDTOUT=%d, 2T=%d, MMISC=%d AddrMirror=%d DRAMFreq=%dMT DramType=%d\n"
+	},
+	{0x00570004,
+	 "PMU10: Pstate%d MR0=0x%04x MR1=0x%04x MR2=0x%04x\n"
+	},
+	{0x00580008,
+	 "PMU10: Pstate%d MRS MR0=0x%04x MR1=0x%04x MR2=0x%04x MR3=0x%04x MR4=0x%04x MR5=0x%04x MR6=0x%04x\n"
+	},
+	{0x00590005,
+	 "PMU10: Pstate%d MRS MR1_A0=0x%04x MR2_A0=0x%04x MR3_A0=0x%04x MR11_A0=0x%04x\n"
+	},
+	{0x005a0000,
+	 "PMU10: UseBroadcastMR set. All ranks and channels use MRXX_A0 for MR settings.\n"
+	},
+	{0x005b0005,
+	 "PMU10: Pstate%d MRS MR01_A0=0x%02x MR02_A0=0x%02x MR03_A0=0x%02x MR11_A0=0x%02x\n"
+	},
+	{0x005c0005,
+	 "PMU10: Pstate%d MRS MR12_A0=0x%02x MR13_A0=0x%02x MR14_A0=0x%02x MR22_A0=0x%02x\n"
+	},
+	{0x005d0005,
+	 "PMU10: Pstate%d MRS MR01_A1=0x%02x MR02_A1=0x%02x MR03_A1=0x%02x MR11_A1=0x%02x\n"
+	},
+	{0x005e0005,
+	 "PMU10: Pstate%d MRS MR12_A1=0x%02x MR13_A1=0x%02x MR14_A1=0x%02x MR22_A1=0x%02x\n"
+	},
+	{0x005f0005,
+	 "PMU10: Pstate%d MRS MR01_B0=0x%02x MR02_B0=0x%02x MR03_B0=0x%02x MR11_B0=0x%02x\n"
+	},
+	{0x00600005,
+	 "PMU10: Pstate%d MRS MR12_B0=0x%02x MR13_B0=0x%02x MR14_B0=0x%02x MR22_B0=0x%02x\n"
+	},
+	{0x00610005,
+	 "PMU10: Pstate%d MRS MR01_B1=0x%02x MR02_B1=0x%02x MR03_B1=0x%02x MR11_B1=0x%02x\n"
+	},
+	{0x00620005,
+	 "PMU10: Pstate%d MRS MR12_B1=0x%02x MR13_B1=0x%02x MR14_B1=0x%02x MR22_B1=0x%02x\n"
+	},
+	{0x00630002,
+	 "PMU1: AcsmOdtCtrl%02d 0x%02x\n"
+	},
+	{0x00640002,
+	 "PMU1: AcsmCsMapCtrl%02d 0x%04x\n"
+	},
+	{0x00650002,
+	 "PMU1: AcsmCsMapCtrl%02d 0x%04x\n"
+	},
+	{0x00660000,
+	 "PMU1: HwtCAMode set\n"
+	},
+	{0x00670001,
+	 "PMU3: DDR4 infinite preamble enter/exit mode %d\n"
+	},
+	{0x00680002,
+	 "PMU1: In rxenb_train() csn=%d pstate=%d\n"
+	},
+	{0x00690000,
+	 "PMU3: Finding DQS falling edge\n"
+	},
+	{0x006a0000,
+	 "PMU3: Searching for DDR3/LPDDR3/LPDDR4 read preamble\n"
+	},
+	{0x006b0009,
+	 "PMU3: dtsm fails Even Nibbles : %2x %2x %2x %2x %2x %2x %2x %2x %2x\n"
+	},
+	{0x006c0009,
+	 "PMU3: dtsm fails Odd  Nibbles : %2x %2x %2x %2x %2x %2x %2x %2x %2x\n"
+	},
+	{0x006d0002,
+	 "PMU3: Preamble search pass=%d anyfail=%d\n"
+	},
+	{0x006e0000,
+	 "PMU: Error: RxEn training preamble not found\n"
+	},
+	{0x006f0000,
+	 "PMU3: Found DQS pre-amble\n"
+	},
+	{0x00700001,
+	 "PMU: Error: Dbyte %d couldn't find the rising edge of DQS during RxEn Training\n"
+	},
+	{0x00710000,
+	 "PMU3: RxEn aligning to first rising edge of burst\n"
+	},
+	{0x00720001,
+	 "PMU3: Decreasing RxEn delay by %d fine step to allow full capture of reads\n"
+	},
+	{0x00730001,
+	 "PMU3: MREP Delay = %d\n"
+	},
+	{0x00740003,
+	 "PMU3: Errcnt for MREP nib %2d delay = %2d is %d\n"
+	},
+	{0x00750002,
+	 "PMU3: MREP nibble %d sampled a 1 at data buffer delay %d\n"
+	},
+	{0x00760002,
+	 "PMU3: MREP nibble %d saw a 0 to 1 transition at data buffer delay %d\n"
+	},
+	{0x00770000,
+	 "PMU2:  MREP did not find a 0 to 1 transition for all nibbles. Failing nibbles assumed to have rising edge close to fine delay 63\n"
+	},
+	{0x00780002,
+	 "PMU2:  Rising edge found in alias window, setting rxDly for nibble %d = %d\n"
+	},
+	{0x00790002,
+	 "PMU: Error: Failed MREP for nib %d with %d one\n"
+	},
+	{0x007a0003,
+	 "PMU2:  Rising edge not found in alias window with %d one, leaving rxDly for nibble %d = %d\n"
+	},
+	{0x007b0002,
+	 "PMU3: Training DIMM %d CSn %d\n"
+	},
+	{0x007c0001,
+	 "PMU3: exitCAtrain_lp3 cs 0x%x\n"
+	},
+	{0x007d0001,
+	 "PMU3: enterCAtrain_lp3 cs 0x%x\n"
+	},
+	{0x007e0001,
+	 "PMU3: CAtrain_switchmsb_lp3 cs 0x%x\n"
+	},
+	{0x007f0001,
+	 "PMU3: CATrain_rdwr_lp3 looking for pattern %x\n"
+	},
+	{0x00800000,
+	 "PMU3: exitCAtrain_lp4\n"
+	},
+	{0x00810001,
+	 "PMU3: DEBUG enterCAtrain_lp4 1: cs 0x%x\n"
+	},
+	{0x00820001,
+	 "PMU3: DEBUG enterCAtrain_lp4 3: Put dbyte %d in async mode\n"
+	},
+	{0x00830000,
+	 "PMU3: DEBUG enterCAtrain_lp4 5: Send MR13 to turn on CA training\n"
+	},
+	{0x00840003,
+	 "PMU3: DEBUG enterCAtrain_lp4 7: idx = %d vref = %x mr12 = %x\n"
+	},
+	{0x00850001,
+	 "PMU3: CATrain_rdwr_lp4 looking for pattern %x\n"
+	},
+	{0x00860004,
+	 "PMU3: Phase %d CAreadbackA db:%d %x xo:%x\n"
+	},
+	{0x00870005,
+	 "PMU3: DEBUG lp4SetCatrVref 1: cs=%d chan=%d mr12=%x vref=%d.%d%%\n"
+	},
+	{0x00880003,
+	 "PMU3: DEBUG lp4SetCatrVref 3: mr12 = %x send vref= %x to db=%d\n"
+	},
+	{0x00890000,
+	 "PMU10:Optimizing vref\n"
+	},
+	{0x008a0004,
+	 "PMU4:mr12:%2x cs:%d chan %d r:%4x\n"
+	},
+	{0x008b0005,
+	 "PMU3: i:%2d bstr:%2d bsto:%2d st:%d r:%d\n"
+	},
+	{0x008c0002,
+	 "Failed to find sufficient CA Vref Passing Region for CS %d ch. %d\n"
+	},
+	{0x008d0005,
+	 "PMU3:Found %d.%d%% MR12:%x for cs:%d chan %d\n"
+	},
+	{0x008e0002,
+	 "PMU3:Calculated %d for AtxImpedence from acx %d.\n"
+	},
+	{0x008f0000,
+	 "PMU3:CA Odt impedence ==0.  Use default vref.\n"
+	},
+	{0x00900003,
+	 "PMU3:Calculated %d.%d%% for Vref MR12=0x%x.\n"
+	},
+	{0x00910000,
+	 "PMU3: CAtrain_lp\n"
+	},
+	{0x00920000,
+	 "PMU3: CAtrain Begins.\n"
+	},
+	{0x00930001,
+	 "PMU3: CAtrain_lp testing dly %d\n"
+	},
+	{0x00940001,
+	 "PMU5: CA bitmap dump for cs %x\n"
+	},
+	{0x00950001,
+	 "PMU5: CAA%d "
+	},
+	{0x00960001, "%02x"
+	},
+	{0x00970000, "\n"
+	},
+	{0x00980001,
+	 "PMU5: CAB%d "
+	},
+	{0x00990001, "%02x"
+	},
+	{0x009a0000, "\n"
+	},
+	{0x009b0003,
+	 "PMU3: anibi=%d, anibichan[anibi]=%d ,chan=%d\n"
+	},
+	{0x009c0001, "%02x"
+	},
+	{0x009d0001, "\nPMU3:Raw CA setting :%x"
+	},
+	{0x009e0002, "\nPMU3:ATxDly setting:%x margin:%d\n"
+	},
+	{0x009f0002, "\nPMU3:InvClk ATxDly setting:%x margin:%d\n"
+	},
+	{0x00a00000, "\nPMU3:No Range found!\n"
+	},
+	{0x00a10003,
+	 "PMU3: 2 anibi=%d, anibichan[anibi]=%d ,chan=%d"
+	},
+	{0x00a20002, "\nPMU3: no neg clock => CA setting anib=%d, :%d\n"
+	},
+	{0x00a30001,
+	 "PMU3:Normal margin:%d\n"
+	},
+	{0x00a40001,
+	 "PMU3:Inverted margin:%d\n"
+	},
+	{0x00a50000,
+	 "PMU3:Using Inverted clock\n"
+	},
+	{0x00a60000,
+	 "PMU3:Using normal clk\n"
+	},
+	{0x00a70003,
+	 "PMU3: 3 anibi=%d, anibichan[anibi]=%d ,chan=%d\n"
+	},
+	{0x00a80002,
+	 "PMU3: Setting ATxDly for anib %x to %x\n"
+	},
+	{0x00a90000,
+	 "PMU: Error: CA Training Failed.\n"
+	},
+	{0x00aa0000,
+	 "PMU1: Writing MRs\n"
+	},
+	{0x00ab0000,
+	 "PMU4:Using MR12 values from 1D CA VREF training.\n"
+	},
+	{0x00ac0000,
+	 "PMU3:Writing all MRs to fsp 1\n"
+	},
+	{0x00ad0000,
+	 "PMU10:Lp4Quickboot mode.\n"
+	},
+	{0x00ae0000,
+	 "PMU3: Writing MRs\n"
+	},
+	{0x00af0001,
+	 "PMU10: Setting boot clock divider to %d\n"
+	},
+	{0x00b00000,
+	 "PMU3: Resetting DRAM\n"
+	},
+	{0x00b10000,
+	 "PMU3: setup for RCD initalization\n"
+	},
+	{0x00b20000,
+	 "PMU3: pmu_exit_SR from dev_init()\n"
+	},
+	{0x00b30000,
+	 "PMU3: initializing RCD\n"
+	},
+	{0x00b40000,
+	 "PMU10: **** Executing 2D Image ****\n"
+	},
+	{0x00b50001,
+	 "PMU10: **** Start DDR4 Training. PMU Firmware Revision 0x%04x ****\n"
+	},
+	{0x00b60001,
+	 "PMU10: **** Start DDR3 Training. PMU Firmware Revision 0x%04x ****\n"
+	},
+	{0x00b70001,
+	 "PMU10: **** Start LPDDR3 Training. PMU Firmware Revision 0x%04x ****\n"
+	},
+	{0x00b80001,
+	 "PMU10: **** Start LPDDR4 Training. PMU Firmware Revision 0x%04x ****\n"
+	},
+	{0x00b90000,
+	 "PMU: Error: Mismatched internal revision between DCCM and ICCM images\n"
+	},
+	{0x00ba0001,
+	 "PMU10: **** Testchip %d Specific Firmware ****\n"
+	},
+	{0x00bb0000,
+	 "PMU1: LRDIMM with EncodedCS mode, one DIMM\n"
+	},
+	{0x00bc0000,
+	 "PMU1: LRDIMM with EncodedCS mode, two DIMMs\n"
+	},
+	{0x00bd0000,
+	 "PMU1: RDIMM with EncodedCS mode, one DIMM\n"
+	},
+	{0x00be0000,
+	 "PMU2: Starting LRDIMM MREP training for all ranks\n"
+	},
+	{0x00bf0000,
+	 "PMU199: LRDIMM MREP training for all ranks completed\n"
+	},
+	{0x00c00000,
+	 "PMU2: Starting LRDIMM DWL training for all ranks\n"
+	},
+	{0x00c10000,
+	 "PMU199: LRDIMM DWL training for all ranks completed\n"
+	},
+	{0x00c20000,
+	 "PMU2: Starting LRDIMM MRD training for all ranks\n"
+	},
+	{0x00c30000,
+	 "PMU199: LRDIMM MRD training for all ranks completed\n"
+	},
+	{0x00c40000,
+	 "PMU2: Starting RXEN training for all ranks\n"
+	},
+	{0x00c50000,
+	 "PMU2: Starting write leveling fine delay training for all ranks\n"
+	},
+	{0x00c60000,
+	 "PMU2: Starting LRDIMM MWD training for all ranks\n"
+	},
+	{0x00c70000,
+	 "PMU199: LRDIMM MWD training for all ranks completed\n"
+	},
+	{0x00c80000,
+	 "PMU2: Starting write leveling fine delay training for all ranks\n"
+	},
+	{0x00c90000,
+	 "PMU2: Starting read deskew training\n"
+	},
+	{0x00ca0000,
+	 "PMU2: Starting SI friendly 1d RdDqs training for all ranks\n"
+	},
+	{0x00cb0000,
+	 "PMU2: Starting write leveling coarse delay training for all ranks\n"
+	},
+	{0x00cc0000,
+	 "PMU2: Starting 1d WrDq training for all ranks\n"
+	},
+	{0x00cd0000,
+	 "PMU2: Running DQS2DQ Oscillator for all ranks\n"
+	},
+	{0x00ce0000,
+	 "PMU2: Starting again read deskew training but with PRBS\n"
+	},
+	{0x00cf0000,
+	 "PMU2: Starting 1d RdDqs training for all ranks\n"
+	},
+	{0x00d00000,
+	 "PMU2: Starting again 1d WrDq training for all ranks\n"
+	},
+	{0x00d10000,
+	 "PMU2: Starting MaxRdLat training\n"
+	},
+	{0x00d20000,
+	 "PMU2: Starting 2d WrDq training for all ranks\n"
+	},
+	{0x00d30000,
+	 "PMU2: Starting 2d RdDqs training for all ranks\n"
+	},
+	{0x00d40002,
+	 "PMU3:read_fifo %x %x\n"
+	},
+	{0x00d50001,
+	 "PMU: Error: Invalid PhyDrvImpedance of 0x%x specified in message block.\n"
+	},
+	{0x00d60001,
+	 "PMU: Error: Invalid PhyOdtImpedance of 0x%x specified in message block.\n"
+	},
+	{0x00d70001,
+	 "PMU: Error: Invalid BPZNResVal of 0x%x specified in message block.\n"
+	},
+	{0x00d80005,
+	 "PMU3: fixRxEnBackOff csn:%d db:%d dn:%d bo:%d dly:%x\n"
+	},
+	{0x00d90001,
+	 "PMU3: fixRxEnBackOff dly:%x\n"
+	},
+	{0x00da0000,
+	 "PMU3: Entering setupPpt\n"
+	},
+	{0x00db0000,
+	 "PMU3: Start lp4PopulateHighLowBytes\n"
+	},
+	{0x00dc0002,
+	 "PMU3:Dbyte Detect: db%d received %x\n"
+	},
+	{0x00dd0002,
+	 "PMU3:getDqs2Dq read %x from dbyte %d\n"
+	},
+	{0x00de0002,
+	 "PMU3:getDqs2Dq(2) read %x from dbyte %d\n"
+	},
+	{0x00df0001,
+	 "PMU: Error: Dbyte %d read 0 from the DQS oscillator it is connected to\n"
+	},
+	{0x00e00002,
+	 "PMU4: Dbyte %d dqs2dq = %d/32 UI\n"
+	},
+	{0x00e10003,
+	 "PMU3:getDqs2Dq set dqs2dq:%d/32 ui (%d ps) from dbyte %d\n"
+	},
+	{0x00e20003,
+	 "PMU3: Setting coarse delay in AtxDly chiplet %d from 0x%02x to 0x%02x\n"
+	},
+	{0x00e30003,
+	 "PMU3: Clearing coarse delay in AtxDly chiplet %d from 0x%02x to 0x%02x\n"
+	},
+	{0x00e40000,
+	 "PMU3: Performing DDR4 geardown sync sequence\n"
+	},
+	{0x00e50000,
+	 "PMU1: Enter self refresh\n"
+	},
+	{0x00e60000,
+	 "PMU1: Exit self refresh\n"
+	},
+	{0x00e70000,
+	 "PMU: Error: No dbiEnable with lp4\n"
+	},
+	{0x00e80000,
+	 "PMU: Error: No dbiDisable with lp4\n"
+	},
+	{0x00e90001,
+	 "PMU1: DDR4 update Rx DBI Setting disable %d\n"
+	},
+	{0x00ea0001,
+	 "PMU1: DDR4 update 2nCk WPre Setting disable %d\n"
+	},
+	{0x00eb0005,
+	 "PMU1: read_delay: db%d lane%d delays[%2d] = 0x%02x (max 0x%02x)\n"
+	},
+	{0x00ec0004,
+	 "PMU1: write_delay: db%d lane%d delays[%2d] = 0x%04x\n"
+	},
+	{0x00ed0001,
+	 "PMU5: ID=%d -- db0  db1  db2  db3  db4  db5  db6  db7  db8  db9 --\n"
+	},
+	{0x00ee000b,
+	 "PMU5: [%d]:0x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n"
+	},
+	{0x00ef0003,
+	 "PMU2: dump delays - pstate=%d dimm=%d csn=%d\n"
+	},
+	{0x00f00000,
+	 "PMU3: Printing Mid-Training Delay Information\n"
+	},
+	{0x00f10001,
+	 "PMU5: CS%d <<KEY>> 0 TrainingCntr <<KEY>> coarse(15:10) fine(9:0)\n"
+	},
+	{0x00f20001,
+	 "PMU5: CS%d <<KEY>> 0 RxEnDly, 1 RxClkDly <<KEY>> coarse(10:6) fine(5:0)\n"
+	},
+	{0x00f30001,
+	 "PMU5: CS%d <<KEY>> 0 TxDqsDly, 1 TxDqDly <<KEY>> coarse(9:6) fine(5:0)\n"
+	},
+	{0x00f40001,
+	 "PMU5: CS%d <<KEY>> 0 RxPBDly <<KEY>> 1 Delay Unit ~= 7ps\n"
+	},
+	{0x00f50000,
+	 "PMU5: all CS <<KEY>> 0 DFIMRL <<KEY>> Units = DFI clocks\n"
+	},
+	{0x00f60000,
+	 "PMU5: all CS <<KEY>> VrefDACs <<KEY>> DAC(6:0)\n"
+	},
+	{0x00f70000,
+	 "PMU1: Set DMD in MR13 and wrDBI in MR3 for training\n"
+	},
+	{0x00f80000,
+	 "PMU: Error: getMaxRxen() failed to find largest rxen nibble delay\n"
+	},
+	{0x00f90003,
+	 "PMU2: getMaxRxen(): maxDly %d maxTg %d maxNib %d\n"
+	},
+	{0x00fa0003,
+	 "PMU2: getRankMaxRxen(): maxDly %d Tg %d maxNib %d\n"
+	},
+	{0x00fb0000,
+	 "PMU1: skipping CDD calculation in 2D image\n"
+	},
+	{0x00fc0001,
+	 "PMU3: Calculating CDDs for pstate %d\n"
+	},
+	{0x00fd0003,
+	 "PMU3: rxFromDly[%d][%d] = %d\n"
+	},
+	{0x00fe0003,
+	 "PMU3: rxToDly  [%d][%d] = %d\n"
+	},
+	{0x00ff0003,
+	 "PMU3: rxDly    [%d][%d] = %d\n"
+	},
+	{0x01000003,
+	 "PMU3: txDly    [%d][%d] = %d\n"
+	},
+	{0x01010003,
+	 "PMU3: allFine CDD_RR_%d_%d = %d\n"
+	},
+	{0x01020003,
+	 "PMU3: allFine CDD_WW_%d_%d = %d\n"
+	},
+	{0x01030003,
+	 "PMU3: CDD_RR_%d_%d = %d\n"
+	},
+	{0x01040003,
+	 "PMU3: CDD_WW_%d_%d = %d\n"
+	},
+	{0x01050003,
+	 "PMU3: allFine CDD_RW_%d_%d = %d\n"
+	},
+	{0x01060003,
+	 "PMU3: allFine CDD_WR_%d_%d = %d\n"
+	},
+	{0x01070003,
+	 "PMU3: CDD_RW_%d_%d = %d\n"
+	},
+	{0x01080003,
+	 "PMU3: CDD_WR_%d_%d = %d\n"
+	},
+	{0x01090004,
+	 "PMU3: F%dBC2x_B%d_D%d = 0x%02x\n"
+	},
+	{0x010a0004,
+	 "PMU3: F%dBC3x_B%d_D%d = 0x%02x\n"
+	},
+	{0x010b0004,
+	 "PMU3: F%dBC4x_B%d_D%d = 0x%02x\n"
+	},
+	{0x010c0004,
+	 "PMU3: F%dBC5x_B%d_D%d = 0x%02x\n"
+	},
+	{0x010d0004,
+	 "PMU3: F%dBC8x_B%d_D%d = 0x%02x\n"
+	},
+	{0x010e0004,
+	 "PMU3: F%dBC9x_B%d_D%d = 0x%02x\n"
+	},
+	{0x010f0004,
+	 "PMU3: F%dBCAx_B%d_D%d = 0x%02x\n"
+	},
+	{0x01100004,
+	 "PMU3: F%dBCBx_B%d_D%d = 0x%02x\n"
+	},
+	{0x01110000,
+	 "PMU10: Entering context_switch_postamble\n"
+	},
+	{0x01120003,
+	 "PMU10: context_switch_postamble is enabled for DIMM %d, RC0A=0x%x, RC3x=0x%x\n"
+	},
+	{0x01130000,
+	 "PMU10: Setting bcw fspace 0\n"
+	},
+	{0x01140001,
+	 "PMU10: Sending BC0A = 0x%x\n"
+	},
+	{0x01150001,
+	 "PMU10: Sending BC6x = 0x%x\n"
+	},
+	{0x01160001,
+	 "PMU10: Sending RC0A = 0x%x\n"
+	},
+	{0x01170001,
+	 "PMU10: Sending RC3x = 0x%x\n"
+	},
+	{0x01180001,
+	 "PMU10: Sending RC0A = 0x%x\n"
+	},
+	{0x01190001,
+	 "PMU1: enter_lp3: DEBUG: pstate = %d\n"
+	},
+	{0x011a0001,
+	 "PMU1: enter_lp3: DEBUG: dfifreqxlat_pstate = %d\n"
+	},
+	{0x011b0001,
+	 "PMU1: enter_lp3: DEBUG: pllbypass = %d\n"
+	},
+	{0x011c0001,
+	 "PMU1: enter_lp3: DEBUG: forcecal = %d\n"
+	},
+	{0x011d0001,
+	 "PMU1: enter_lp3: DEBUG: pllmaxrange = 0x%x\n"
+	},
+	{0x011e0001,
+	 "PMU1: enter_lp3: DEBUG: dacval_out = 0x%x\n"
+	},
+	{0x011f0001,
+	 "PMU1: enter_lp3: DEBUG: pllctrl3 = 0x%x\n"
+	},
+	{0x01200000,
+	 "PMU3: Loading DRAM with BIOS supplied MR values and entering self refresh prior to exiting PMU code.\n"
+	},
+	{0x01210002,
+	 "PMU3: Setting DataBuffer function space of dimmcs 0x%02x to %d\n"
+	},
+	{0x01220002,
+	 "PMU4: Setting RCW FxRC%Xx = 0x%02x\n"
+	},
+	{0x01230002,
+	 "PMU4: Setting RCW FxRC%02x = 0x%02x\n"
+	},
+	{0x01240001,
+	 "PMU1: DDR4 update Rd Pre Setting disable %d\n"
+	},
+	{0x01250002,
+	 "PMU2: Setting BCW FxBC%Xx = 0x%02x\n"
+	},
+	{0x01260002,
+	 "PMU2: Setting BCW BC%02x = 0x%02x\n"
+	},
+	{0x01270002,
+	 "PMU2: Setting BCW PBA mode FxBC%Xx = 0x%02x\n"
+	},
+	{0x01280002,
+	 "PMU2: Setting BCW PBA mode BC%02x = 0x%02x\n"
+	},
+	{0x01290003,
+	 "PMU4: BCW value for dimm %d, fspace %d, addr 0x%04x\n"
+	},
+	{0x012a0002,
+	 "PMU4: DB %d, value 0x%02x\n"
+	},
+	{0x012b0000,
+	 "PMU6: WARNING MREP underflow, set to min value -2 coarse, 0 fine\n"
+	},
+	{0x012c0004,
+	 "PMU6: LRDIMM Writing final data buffer fine delay value nib %2d, trainDly %3d, fineDly code %2d, new MREP fine %2d\n"
+	},
+	{0x012d0003,
+	 "PMU6: LRDIMM Writing final data buffer fine delay value nib %2d, trainDly %3d, fineDly code %2d\n"
+	},
+	{0x012e0003,
+	 "PMU6: LRDIMM Writing data buffer fine delay type %d nib %2d, code %2d\n"
+	},
+	{0x012f0002,
+	 "PMU6: Writing final data buffer coarse delay value dbyte %2d, coarse = 0x%02x\n"
+	},
+	{0x01300003,
+	 "PMU4: data 0x%04x at MB addr 0x%08x saved at CSR addr 0x%08x\n"
+	},
+	{0x01310003,
+	 "PMU4: data 0x%04x at MB addr 0x%08x restored from CSR addr 0x%08x\n"
+	},
+	{0x01320003,
+	 "PMU4: data 0x%04x at MB addr 0x%08x saved at CSR addr 0x%08x\n"
+	},
+	{0x01330003,
+	 "PMU4: data 0x%04x at MB addr 0x%08x restored from CSR addr 0x%08x\n"
+	},
+	{0x01340001,
+	 "PMU3: Update BC00, BC01, BC02 for rank-dimm 0x%02x\n"
+	},
+	{0x01350000,
+	 "PMU3: Writing D4 RDIMM RCD Control words F0RC00 -> F0RC0F\n"
+	},
+	{0x01360000,
+	 "PMU3: Disable parity in F0RC0E\n"
+	},
+	{0x01370000,
+	 "PMU3: Writing D4 RDIMM RCD Control words F1RC00 -> F1RC05\n"
+	},
+	{0x01380000,
+	 "PMU3: Writing D4 RDIMM RCD Control words F1RC1x -> F1RC9x\n"
+	},
+	{0x01390000,
+	 "PMU3: Writing D4 Data buffer Control words BC00 -> BC0E\n"
+	},
+	{0x013a0002,
+	 "PMU1: setAltCL Sending MR0 0x%x cl=%d\n"
+	},
+	{0x013b0002,
+	 "PMU1: restoreFromAltCL Sending MR0 0x%x cl=%d\n"
+	},
+	{0x013c0002,
+	 "PMU1: restoreAcsmFromAltCL Sending MR0 0x%x cl=%d\n"
+	},
+	{0x013d0002,
+	 "PMU2: Setting D3R RC%d = 0x%01x\n"
+	},
+	{0x013e0000,
+	 "PMU3: Writing D3 RDIMM RCD Control words RC0 -> RC11\n"
+	},
+	{0x013f0002,
+	 "PMU0: VrefDAC0/1 vddqStart %d dacToVddq %d\n"
+	},
+	{0x01400001,
+	 "PMU: Error: Messageblock phyVref=0x%x is above the limit for TSMC28's attenuated LPDDR4 receivers. Please see the pub databook\n"
+	},
+	{0x01410001,
+	 "PMU: Error: Messageblock phyVref=0x%x is above the limit for TSMC28's attenuated DDR4 receivers. Please see the pub databook\n"
+	},
+	{0x01420001,
+	 "PMU0: PHY VREF @ (%d/1000) VDDQ\n"
+	},
+	{0x01430002,
+	 "PMU0: initalizing phy vrefDacs to %d ExtVrefRange %x\n"
+	},
+	{0x01440002,
+	 "PMU0: initalizing global vref to %d range %d\n"
+	},
+	{0x01450002,
+	 "PMU4: Setting initial device vrefDQ for CS%d to MR6 = 0x%04x\n"
+	},
+	{0x01460003,
+	 "PMU1: In write_level_fine() csn=%d dimm=%d pstate=%d\n"
+	},
+	{0x01470000,
+	 "PMU3: Fine write leveling hardware search increasing TxDqsDly until full bursts are seen\n"
+	},
+	{0x01480000,
+	 "PMU4: WL normalized pos   : ........................|........................\n"
+	},
+	{0x01490007,
+	 "PMU4: WL margin for nib %2d: %08x%08x%08x%08x%08x%08x\n"
+	},
+	{0x014a0000,
+	 "PMU4: WL normalized pos   : ........................|........................\n"
+	},
+	{0x014b0000,
+	 "PMU3: Exiting write leveling mode\n"
+	},
+	{0x014c0001,
+	 "PMU3: got %d for cl in load_wrlvl_acsm\n"
+	},
+	{0x014d0003,
+	 "PMU1: In write_level_coarse() csn=%d dimm=%d pstate=%d\n"
+	},
+	{0x014e0003,
+	 "PMU3: left eye edge search db:%d ln:%d dly:0x%x\n"
+	},
+	{0x014f0003,
+	 "PMU3: right eye edge search db:%d ln:%d dly:0x%x\n"
+	},
+	{0x01500004,
+	 "PMU3: eye center db:%d ln:%d dly:0x%x (maxdq:%x)\n"
+	},
+	{0x01510003,
+	 "PMU3: Wrote to TxDqDly db:%d ln:%d dly:0x%x\n"
+	},
+	{0x01520003,
+	 "PMU3: Wrote to TxDqDly db:%d ln:%d dly:0x%x\n"
+	},
+	{0x01530002,
+	 "PMU3: Coarse write leveling dbyte%2d is still failing for TxDqsDly=0x%04x\n"
+	},
+	{0x01540002,
+	 "PMU4: Coarse write leveling iteration %d saw %d data miscompares across the entire phy\n"
+	},
+	{0x01550000,
+	 "PMU: Error: Failed write leveling coarse\n"
+	},
+	{0x01560001,
+	 "PMU3: got %d for cl in load_wrlvl_acsm\n"
+	},
+	{0x01570003,
+	 "PMU3: In write_level_coarse() csn=%d dimm=%d pstate=%d\n"
+	},
+	{0x01580003,
+	 "PMU3: left eye edge search db:%d ln:%d dly:0x%x\n"
+	},
+	{0x01590003,
+	 "PMU3: right eye edge search db: %d ln: %d dly: 0x%x\n"
+	},
+	{0x015a0004,
+	 "PMU3: eye center db: %d ln: %d dly: 0x%x (maxdq: 0x%x)\n"
+	},
+	{0x015b0003,
+	 "PMU3: Wrote to TxDqDly db: %d ln: %d dly: 0x%x\n"
+	},
+	{0x015c0003,
+	 "PMU3: Wrote to TxDqDly db: %d ln: %d dly: 0x%x\n"
+	},
+	{0x015d0002,
+	 "PMU3: Coarse write leveling nibble%2d is still failing for TxDqsDly=0x%04x\n"
+	},
+	{0x015e0002,
+	 "PMU4: Coarse write leveling iteration %d saw %d data miscompares across the entire phy\n"
+	},
+	{0x015f0000,
+	 "PMU: Error: Failed write leveling coarse\n"
+	},
+	{0x01600000,
+	 "PMU4: WL normalized pos   : ................................|................................\n"
+	},
+	{0x01610009,
+	 "PMU4: WL margin for nib %2d: %08x%08x%08x%08x%08x%08x%08x%08x\n"
+	},
+	{0x01620000,
+	 "PMU4: WL normalized pos   : ................................|................................\n"
+	},
+	{0x01630001,
+	 "PMU8: Adjust margin after WL coarse to be larger than %d\n"
+	},
+	{0x01640001,
+	 "PMU: Error: All margin after write leveling coarse are smaller than minMargin %d\n"
+	},
+	{0x01650002,
+	 "PMU8: Decrement nib %d TxDqsDly by %d fine step\n"
+	},
+	{0x01660003,
+	 "PMU3: In write_level_coarse() csn=%d dimm=%d pstate=%d\n"
+	},
+	{0x01670005,
+	 "PMU2: Write level: dbyte %d nib%d dq/dmbi %2d dqsfine 0x%04x dqDly 0x%04x\n"
+	},
+	{0x01680002,
+	 "PMU3: Coarse write leveling nibble%2d is still failing for TxDqsDly=0x%04x\n"
+	},
+	{0x01690002,
+	 "PMU4: Coarse write leveling iteration %d saw %d data miscompares across the entire phy\n"
+	},
+	{0x016a0000,
+	 "PMU: Error: Failed write leveling coarse\n"
+	},
+	{0x016b0001,
+	 "PMU3: DWL delay = %d\n"
+	},
+	{0x016c0003,
+	 "PMU3: Errcnt for DWL nib %2d delay = %2d is %d\n"
+	},
+	{0x016d0002,
+	 "PMU3: DWL nibble %d sampled a 1 at delay %d\n"
+	},
+	{0x016e0003,
+	 "PMU3: DWL nibble %d passed at delay %d. Rising edge was at %d\n"
+	},
+	{0x016f0000,
+	 "PMU2: DWL did nto find a rising edge of memclk for all nibbles. Failing nibbles assumed to have rising edge close to fine delay 63\n"
+	},
+	{0x01700002,
+	 "PMU2:  Rising edge found in alias window, setting wrlvlDly for nibble %d = %d\n"
+	},
+	{0x01710002,
+	 "PMU: Error: Failed DWL for nib %d with %d one\n"
+	},
+	{0x01720003,
+	 "PMU2:  Rising edge not found in alias window with %d one, leaving wrlvlDly for nibble %d = %d\n"
+	},
+	{0x04000000,
+	 "PMU: Error:Mailbox Buffer Overflowed.\n"
+	},
+	{0x04010000,
+	 "PMU: Error:Mailbox Buffer Overflowed.\n"
+	},
+	{0x04020000,
+	 "PMU: ***** Assertion Error - terminating *****\n"
+	},
+	{0x04030002,
+	 "PMU1: swapByte db %d by %d\n"
+	},
+	{0x04040003,
+	 "PMU3: get_cmd_dly max(%d ps, %d memclk) = %d\n"
+	},
+	{0x04050002,
+	 "PMU0: Write CSR 0x%06x 0x%04x\n"
+	},
+	{0x04060002,
+	 "PMU0: hwt_init_ppgc_prbs(): Polynomial: %x, Deg: %d\n"
+	},
+	{0x04070001,
+	 "PMU: Error: acsm_set_cmd to non existent instruction address %d\n"
+	},
+	{0x04080001,
+	 "PMU: Error: acsm_set_cmd with unknown ddr cmd 0x%x\n"
+	},
+	{0x0409000c,
+	 "PMU1: acsm_addr %02x, acsm_flgs %04x, ddr_cmd %02x, cmd_dly %02x, ddr_addr %04x, ddr_bnk %02x, ddr_cs %02x, cmd_rcnt %02x, AcsmSeq0/1/2/3 %04x %04x %04x %04x\n"
+	},
+	{0x040a0000,
+	 "PMU: Error: Polling on ACSM done failed to complete in acsm_poll_done()...\n"
+	},
+	{0x040b0000,
+	 "PMU1: acsm RUN\n"
+	},
+	{0x040c0000,
+	 "PMU1: acsm STOPPED\n"
+	},
+	{0x040d0002,
+	 "PMU1: acsm_init: acsm_mode %04x mxrdlat %04x\n"
+	},
+	{0x040e0002,
+	 "PMU: Error: setAcsmCLCWL: cl and cwl must be each >= 2 and 5, resp. CL=%d CWL=%d\n"
+	},
+	{0x040f0002,
+	 "PMU: Error: setAcsmCLCWL: cl and cwl must be each >= 5. CL=%d CWL=%d\n"
+	},
+	{0x04100002,
+	 "PMU1: setAcsmCLCWL: CASL %04d WCASL %04d\n"
+	},
+	{0x04110001,
+	 "PMU: Error: Reserved value of register F0RC0F found in message block: 0x%04x\n"
+	},
+	{0x04120001,
+	 "PMU3: Written MRS to CS=0x%02x\n"
+	},
+	{0x04130001,
+	 "PMU3: Written MRS to CS=0x%02x\n"
+	},
+	{0x04140000,
+	 "PMU3: Entering Boot Freq Mode.\n"
+	},
+	{0x04150001,
+	 "PMU: Error: Boot clock divider setting of %d is too small\n"
+	},
+	{0x04160000,
+	 "PMU3: Exiting Boot Freq Mode.\n"
+	},
+	{0x04170002,
+	 "PMU3: Writing MR%d OP=%x\n"
+	},
+	{0x04180000,
+	 "PMU: Error: Delay too large in slomo\n"
+	},
+	{0x04190001,
+	 "PMU3: Written MRS to CS=0x%02x\n"
+	},
+	{0x041a0000,
+	 "PMU3: Enable Channel A\n"
+	},
+	{0x041b0000,
+	 "PMU3: Enable Channel B\n"
+	},
+	{0x041c0000,
+	 "PMU3: Enable All Channels\n"
+	},
+	{0x041d0002,
+	 "PMU2: Use PDA mode to set MR%d with value 0x%02x\n"
+	},
+	{0x041e0001,
+	 "PMU3: Written Vref with PDA to CS=0x%02x\n"
+	},
+	{0x041f0000,
+	 "PMU1: start_cal: DEBUG: setting CalRun to 1\n"
+	},
+	{0x04200000,
+	 "PMU1: start_cal: DEBUG: setting CalRun to 0\n"
+	},
+	{0x04210001,
+	 "PMU1: lock_pll_dll: DEBUG: pstate = %d\n"
+	},
+	{0x04220001,
+	 "PMU1: lock_pll_dll: DEBUG: dfifreqxlat_pstate = %d\n"
+	},
+	{0x04230001,
+	 "PMU1: lock_pll_dll: DEBUG: pllbypass = %d\n"
+	},
+	{0x04240001,
+	 "PMU3: SaveLcdlSeed: Saving seed %d\n"
+	},
+	{0x04250000,
+	 "PMU1: in phy_defaults()\n"
+	},
+	{0x04260003,
+	 "PMU3: ACXConf:%d MaxNumDbytes:%d NumDfi:%d\n"
+	},
+	{0x04270005,
+	 "PMU1: setAltAcsmCLCWL setting cl=%d cwl=%d\n"
+	},
+};
+
+const static struct phy_msg messages_2d[] = {
+	{0x00000001,
+	 "PMU0: Converting %d into an MR\n"
+	},
+	{0x00010003,
+	 "PMU DEBUG: vref_idx %d -= %d, range_idx = %d\n"
+	},
+	{0x00020002,
+	 "PMU0: vrefIdx. Passing range %d, remaining vrefidx = %d\n"
+	},
+	{0x00030002,
+	 "PMU0: VrefIdx %d -> MR[6:0] 0x%02x\n"
+	},
+	{0x00040001,
+	 "PMU0: Converting MR 0x%04x to vrefIdx\n"
+	},
+	{0x00050002,
+	 "PMU0: DAC %d Range %d\n"
+	},
+	{0x00060003,
+	 "PMU0: Range %d, Range_idx %d, vref_idx offset %d\n"
+	},
+	{0x00070002,
+	 "PMU0: MR 0x%04x -> VrefIdx %d\n"
+	},
+	{0x00080001,
+	 "PMU: Error: Illegal timing group number ,%d, in getPtrVrefDq\n"
+	},
+	{0x00090003,
+	 "PMU1: VrefDqR%dNib%d = %d\n"
+	},
+	{0x000a0003,
+	 "PMU0: VrefDqR%dNib%d = %d\n"
+	},
+	{0x000b0000,
+	 "PMU0: ----------------MARGINS-------\n"
+	},
+	{0x000c0002,
+	 "PMU0: R%d_RxClkDly_Margin = %d\n"
+	},
+	{0x000d0002,
+	 "PMU0: R%d_VrefDac_Margin = %d\n"
+	},
+	{0x000e0002,
+	 "PMU0: R%d_TxDqDly_Margin = %d\n"
+	},
+	{0x000f0002,
+	 "PMU0: R%d_DeviceVref_Margin = %d\n"
+	},
+	{0x00100000,
+	 "PMU0: -----------------------\n"
+	},
+	{0x00110003,
+	 "PMU0: eye %d's for all TG's is [%d ... %d]\n"
+	},
+	{0x00120000,
+	 "PMU0: ------- settingWeight -----\n"
+	},
+	{0x00130002,
+	 "PMU0: Weight %d @ Setting %d\n"
+	},
+	{0x0014001f,
+	 "PMU4: %3d %3d %3d %3d %3d %3d %3d %3d %3d %3d %3d %3d %3d %3d %3d >%3d< %3d %3d %3d %3d %3d %3d %3d %3d %3d %3d %3d %3d %3d %3d %3d\n"
+	},
+	{0x00150002,
+	 "PMU3: Voltage Range = [%d, %d]\n"
+	},
+	{0x00160004,
+	 "PMU4: -- DB%d L%d -- centers: delay = %d, voltage = %d\n"
+	},
+	{0x00170001,
+	 "PMU5: <<KEY>> 0 TxDqDlyTg%d <<KEY>> coarse(6:6) fine(5:0)\n"
+	},
+	{0x00180001,
+	 "PMU5: <<KEY>> 0 messageBlock VrefDqR%d <<KEY>> MR6(6:0)\n"
+	},
+	{0x00190001,
+	 "PMU5: <<KEY>> 0 RxClkDlyTg%d <<KEY>> fine(5:0)\n"
+	},
+	{0x001a0003,
+	 "PMU0: tgToCsn: tg %d + 0x%04x -> csn %d\n"
+	},
+	{0x001b0002,
+	 "PMU: Error: LP4 rank %d cannot be mapped on tg %d\n"
+	},
+	{0x001c0002,
+	 "PMU3: Sending vref %d,  Mr = 0X%05x, to all devices\n"
+	},
+	{0x001d0004,
+	 "PMU4: -------- %dD Write Scanning TG %d (CS 0x%x) Lanes 0x%03x --------\n"
+	},
+	{0x001e0002,
+	 "PMU0: training lanes 0x%03x using lanes 0x%03x\n"
+	},
+	{0x001f0003,
+	 "PMU4: ------- 2D-DFE Read Scanning TG %d (CS 0x%x) Lanes 0x%03x -------\n"
+	},
+	{0x00200004,
+	 "PMU4: ------- %dD Read Scanning TG %d (CS 0x%x) Lanes 0x%03x -------\n"
+	},
+	{0x00210003,
+	 "PMU4: TG%d MR1[13,6,5]=0x%x MR6[13,9,8]=0x%x\n"
+	},
+	{0x00220002,
+	 "PMU0: training lanes 0x%03x using lanes 0x%03x\n"
+	},
+	{0x00230003,
+	 "PMU4: ------- 2D-DFE Read Scanning TG %d (CS 0x%x) Lanes 0x%03x -------\n"
+	},
+	{0x00240004,
+	 "PMU4: ------- %dD Read Scanning TG %d (CS 0x%x) Lanes 0x%03x -------\n"
+	},
+	{0x00250002,
+	 "PMU0: training lanes 0x%03x using lanes 0x%03x\n"
+	},
+	{0x00260002,
+	 "PMU3: Sending vref %d,  Mr = 0X%05x, to all devices\n"
+	},
+	{0x00270004,
+	 "PMU4: -------- %dD Write Scanning TG %d (CS 0x%x) Lanes 0x%03x --------\n"
+	},
+	{0x00280001,
+	 "PMU0: input %d\n"
+	},
+	{0x00290002,
+	 "PMU4: Programmed Voltage Search Range [%d, %d]\n"
+	},
+	{0x002a0002,
+	 "PMU3: Delay Stepsize = %d Fine, Voltage Stepsize = %d DAC\n"
+	},
+	{0x002b0002,
+	 "PMU4: Delay Weight = %d, Voltage Weight = %d\n"
+	},
+	{0x002c0003,
+	 "PMU0: raw 0x%x allFine %d incDec %d"
+	},
+	{0x002d0008,
+	 "PMU0: db%d l%d, voltage 0x%x (u_r %d) delay 0x%x (u_r %d) - lcdl %d mask 0x%x\n"
+	},
+	{0x002e0005,
+	 "PMU0: DB%d L%d, Eye %d, Seed = (0x%x, 0x%x)\n"
+	},
+	{0x002f0002,
+	 "PMU3: 2D Enables       : %d,                    1,                %d\n"
+	},
+	{0x00300006,
+	 "PMU3: 2D Delay   Ranges: OOPL[0x%04x,0x%04x], IP[0x%04x,0x%04x], OOPR[0x%04x,0x%04x]\n"
+	},
+	{0x00310002,
+	 "PMU3: 2D Voltage Search Range : [%d, %d]\n"
+	},
+	{0x00320002,
+	 "PMU4: Found Voltage Search Range [%d, %d]\n"
+	},
+	{0x00330002,
+	 "PMU0: User Weight = %d, Voltage Weight = %d\n"
+	},
+	{0x00340005,
+	 "PMU0: D(%d,%d) V(%d,%d | %d)\n"
+	},
+	{0x00350002,
+	 "PMU0: Norm Weight = %d, Voltage Weight = %d\n"
+	},
+	{0x00360002,
+	 "PMU0: seed 0 = (%d,%d) (center)\n"
+	},
+	{0x00370003,
+	 "PMU0: seed 1 = (%d,%d).min edge at idx %d\n"
+	},
+	{0x00380003,
+	 "PMU0: seed 2 = (%d,%d) max edge at idx %d\n"
+	},
+	{0x00390003,
+	 "PMU0: Search point %d = (%d,%d)\n"
+	},
+	{0x003a0005,
+	 "PMU0: YMARGIN: ^ %d, - %d, v %d. rate %d = %d\n"
+	},
+	{0x003b0003,
+	 "PMU0: XMARGIN: center %d, edge %d. = %d\n"
+	},
+	{0x003c0002,
+	 "PMU0: ----------- weighting (%d,%d) ----------------\n"
+	},
+	{0x003d0003,
+	 "PMU0: X margin - L %d R %d - Min %d\n"
+	},
+	{0x003e0003,
+	 "PMU0: Y margin - L %d R %d - Min %d\n"
+	},
+	{0x003f0003,
+	 "PMU0: center (%d,%d) weight = %d\n"
+	},
+	{0x00400003,
+	 "PMU4: Eye argest blob area %d from %d to %d\n"
+	},
+	{0x00410002,
+	 "PMU0: Compute centroid min_x %d max_x %d\n"
+	},
+	{0x00420003,
+	 "PMU0: Compute centroid sumLnDlyWidth %d sumLnVrefWidth %d sumLnWidht %d\n"
+	},
+	{0x00430000,
+	 "PMU: Error: No passing region found for 1 or more lanes. Set hdtCtrl=4 to see passing regions\n"
+	},
+	{0x00440003,
+	 "PMU0: Centroid ( %d, %d ) found with sumLnWidht %d\n"
+	},
+	{0x00450003,
+	 "PMU0: Optimal allFine Center ( %d + %d ,%d )\n"
+	},
+	{0x00460003,
+	 "PMU3: point %d starting at (%d,%d)\n"
+	},
+	{0x00470002,
+	 "PMU0: picking left (%d > %d)\n"
+	},
+	{0x00480002,
+	 "PMU0: picking right (%d > %d)\n"
+	},
+	{0x00490002,
+	 "PMU0: picking down (%d > %d)\n"
+	},
+	{0x004a0002,
+	 "PMU0: picking up (%d > %d)\n"
+	},
+	{0x004b0009,
+	 "PMU3: new center @ (%3d, %3d). Moved (%2i, %2i) -- L %d, R %d, C %d, U %d, D %d\n"
+	},
+	{0x004c0003,
+	 "PMU3: cordNum %d imporved %d to %d\n"
+	},
+	{0x004d0000,
+	 "PMU: Error: No passing region found for 1 or more lanes. Set hdtCtrl=4 to see passing regions\n"
+	},
+	{0x004e0004,
+	 "PMU0: Optimal allFine Center ( %d + %d ,%d ), found with weight %d.\n"
+	},
+	{0x004f0003,
+	 "PMU0: merging lanes=%d..%d, centerMerge_t %d\n"
+	},
+	{0x00500001,
+	 "PMU0: laneVal %d is disable\n"
+	},
+	{0x00510002,
+	 "PMU0: checking common center %d against current center %d\n"
+	},
+	{0x00520001,
+	 "PMU: Error: getCompoundEye Called on lane%d eye with non-compatible centers\n"
+	},
+	{0x00530001,
+	 "PMU0: laneItr %d is disable\n"
+	},
+	{0x00540005,
+	 "PMU0: lane %d, data_idx %d, offset_idx %d, = [%d..%d]\n"
+	},
+	{0x00550003,
+	 "PMU0: lane %d, data_idx %d, offset_idx %d, offset_idx out of range!\n"
+	},
+	{0x00560003,
+	 "PMU0: mergeData[%d] = max_v_low %d, min_v_high %d\n"
+	},
+	{0x00570005,
+	 "PMU1: writing merged center (%d,%d) back to dataBlock[%d]. doDelay %d, doVoltage %d\n"
+	},
+	{0x00580005,
+	 "PMU0: applying relative (%i,%i) back to dataBlock[%d]. doDelay %d, doVoltage %d\n"
+	},
+	{0x00590002,
+	 "PMU0: drvstren %x is idx %d in the table\n"
+	},
+	{0x005a0000,
+	 "PMU4: truncating FFE drive strength search range. Out of drive strengths to check.\n"
+	},
+	{0x005b0002,
+	 "PMU5: Weak 1 changed to pull-up %5d ohms, pull-down %5d ohms\n"
+	},
+	{0x005c0002,
+	 "PMU5: Weak 0 changed to pull-up %5d ohms, pull-down %5d ohms\n"
+	},
+	{0x005d0003,
+	 "PMU0: dlyMargin L %02d R %02d, min %02d\n"
+	},
+	{0x005e0003,
+	 "PMU0: vrefMargin T %02d B %02d, min %02d\n"
+	},
+	{0x005f0002,
+	 "PMU3: new minimum VrefMargin (%d < %d) recorded\n"
+	},
+	{0x00600002,
+	 "PMU3: new minimum DlyMargin (%d < %d) recorded\n"
+	},
+	{0x00610000,
+	 "PMU0: RX finding the per-nibble, per-tg rxClkDly values\n"
+	},
+	{0x00620003,
+	 "PMU0: Merging collected eyes [%d..%d) and analyzing for nibble %d's optimal rxClkDly\n"
+	},
+	{0x00630002,
+	 "PMU0: -- centers: delay = %d, voltage = %d\n"
+	},
+	{0x00640003,
+	 "PMU0: dumping optimized eye -- centers: delay = %d (%d), voltage = %d\n"
+	},
+	{0x00650000,
+	 "PMU0: TX optimizing txDqDelays\n"
+	},
+	{0x00660001,
+	 "PMU3: Analyzing collected eye %d for a lane's optimal TxDqDly\n"
+	},
+	{0x00670001,
+	 "PMU0: eye-lane %d is disable\n"
+	},
+	{0x00680000,
+	 "PMU0: TX optimizing device voltages\n"
+	},
+	{0x00690002,
+	 "PMU0: Merging collected eyes [%d..%d) and analyzing for optimal device txVref\n"
+	},
+	{0x006a0002,
+	 "PMU0: -- centers: delay = %d, voltage = %d\n"
+	},
+	{0x006b0003,
+	 "PMU0: dumping optimized eye -- centers: delay = %d (%d), voltage = %d\n"
+	},
+	{0x006c0000,
+	 "PMU4: VrefDac (compound all TG) Bottom Top -> Center\n"
+	},
+	{0x006d0005,
+	 "PMU4: DB%d L%d   %3d   %3d  ->  %3d (DISCONNECTED)\n"
+	},
+	{0x006e0005,
+	 "PMU4: DB%d L%d   %3d   %3d  ->  %3d\n"
+	},
+	{0x006f0005,
+	 "PMU0: writing rxClkDelay for tg%d db%1d nib%1d to 0x%02x from eye[%02d] (DISCONNECTED)\n"
+	},
+	{0x00700003,
+	 "PMU: Error: Dbyte %d nibble %d's optimal rxClkDly of 0x%x is out of bounds\n"
+	},
+	{0x00710005,
+	 "PMU0: writing rxClkDelay for tg%d db%1d nib%1d to 0x%02x from eye[%02d]\n"
+	},
+	{0x00720005,
+	 "PMU0: tx voltage for tg%2d nib%2d to %3d (%d) from eye[%02d]\n"
+	},
+	{0x00730001,
+	 "PMU0: vref Sum = %d\n"
+	},
+	{0x00740004,
+	 "PMU0: tx voltage total is %d/%d -> %d -> %d\n"
+	},
+	{0x00750007,
+	 "PMU0: writing txDqDelay for tg%1d db%1d ln%1d to  0x%02x (%d coarse, %d fine) from eye[%02d] (DISCONNECTED)\n"
+	},
+	{0x00760003,
+	 "PMU: Error: Dbyte %d lane %d's optimal txDqDly of 0x%x is out of bounds\n"
+	},
+	{0x00770007,
+	 "PMU0: writing txDqDelay for tg%1d db%1d l%1d to  0x%02x (%d coarse, %d fine) from eye[%02d]\n"
+	},
+	{0x00780002,
+	 "PMU0: %d (0=tx, 1=rx) TgMask for this simulation: %x\n"
+	},
+	{0x00790001,
+	 "PMU0: eye-byte %d is disable\n"
+	},
+	{0x007a0001,
+	 "PMU0: eye-lane %d is disable\n"
+	},
+	{0x007b0003,
+	 "PMU10: Start d4_2d_lrdimm_rx_dfe dimm %d nbTap %d biasStepMode %d\n"
+	},
+	{0x007c0001,
+	 "PMU10: DB DFE feature not fully supported, F2BCEx value is 0x%02x\n"
+	},
+	{0x007d0001,
+	 "PMU10: DB DFE feature fully supported, F2BCEx value is 0x%02x\n"
+	},
+	{0x007e0002,
+	 "PMU8: Start d4_2d_lrdimm_rx_dfe for tap %d biasStepInc %d\n"
+	},
+	{0x007f0001,
+	 "PMU7: Start d4_2d_lrdimm_rx_dfe tapCoff 0x%0x\n"
+	},
+	{0x00800003,
+	 "PMU6: d4_2d_lrdimm_rx_dfe db %d lane %d area %d\n"
+	},
+	{0x00810004,
+	 "PMU7: d4_2d_lrdimm_rx_dfe db %d lane %d max area %d best bias 0x%0x\n"
+	},
+	{0x00820001,
+	 "PMU0: eye-lane %d is disable\n"
+	},
+	{0x00830003,
+	 "PMU5: Setting 0x%x improved rank weight (%4d < %4d)\n"
+	},
+	{0x00840001,
+	 "PMU4: Setting 0x%x still optimal\n"
+	},
+	{0x00850002,
+	 "PMU5: ---- Training CS%d MR%d DRAM Equalization ----\n"
+	},
+	{0x00860001,
+	 "PMU0: eye-lane %d is disable\n"
+	},
+	{0x00870003,
+	 "PMU0: eye %d weight %d allTgWeight %d\n"
+	},
+	{0x00880002,
+	 "PMU5: FFE figure of merit improved from %d to %d\n"
+	},
+	{0x00890002,
+	 "PMU: Error: LP4 rank %d cannot be mapped on tg %d\n"
+	},
+	{0x008a0000,
+	 "PMU4: Adjusting vrefDac0 for just 1->x transitions\n"
+	},
+	{0x008b0000,
+	 "PMU4: Adjusting vrefDac1 for just 0->x transitions\n"
+	},
+	{0x008c0001,
+	 "PMU5: Strong 1, pull-up %d ohms\n"
+	},
+	{0x008d0001,
+	 "PMU5: Strong 0, pull-down %d ohms\n"
+	},
+	{0x008e0000,
+	 "PMU4: Enabling weak drive strengths (FFE)\n"
+	},
+	{0x008f0000,
+	 "PMU5: Changing all weak driver strengths\n"
+	},
+	{0x00900000,
+	 "PMU5: Finalizing weak drive strengths\n"
+	},
+	{0x00910000,
+	 "PMU4: retraining with optimal drive strength settings\n"
+	},
+	{0x00920002,
+	 "PMU0: targeting CsX = %d and CsY = %d\n"
+	},
+	{0x00930001,
+	 "PMU1:prbsGenCtl:%x\n"
+	},
+	{0x00940000,
+	 "PMU1: loading 2D acsm sequence\n"
+	},
+	{0x00950000,
+	 "PMU1: loading 1D acsm sequence\n"
+	},
+	{0x00960002,
+	 "PMU3: %d memclocks @ %d to get half of 300ns\n"
+	},
+	{0x00970000,
+	 "PMU: Error: User requested MPR read pattern for read DQS training in DDR3 Mode\n"
+	},
+	{0x00980000,
+	 "PMU3: Running 1D search for left eye edge\n"
+	},
+	{0x00990001,
+	 "PMU1: In Phase Left Edge Search cs %d\n"
+	},
+	{0x009a0001,
+	 "PMU1: Out of Phase Left Edge Search cs %d\n"
+	},
+	{0x009b0000,
+	 "PMU3: Running 1D search for right eye edge\n"
+	},
+	{0x009c0001,
+	 "PMU1: In Phase Right Edge Search cs %d\n"
+	},
+	{0x009d0001,
+	 "PMU1: Out of Phase Right Edge Search cs %d\n"
+	},
+	{0x009e0001,
+	 "PMU1: mxRdLat training pstate %d\n"
+	},
+	{0x009f0001,
+	 "PMU1: mxRdLat search for cs %d\n"
+	},
+	{0x00a00001,
+	 "PMU0: MaxRdLat non consistent DtsmLoThldXingInd 0x%03x\n"
+	},
+	{0x00a10003,
+	 "PMU4: CS %d Dbyte %d worked with DFIMRL = %d DFICLKs\n"
+	},
+	{0x00a20004,
+	 "PMU3: MaxRdLat Read Lane err mask for csn %d, DFIMRL %2d DFIClks, dbyte %d = 0x%03x\n"
+	},
+	{0x00a30003,
+	 "PMU3: MaxRdLat Read Lane err mask for csn %d DFIMRL %2d, All dbytes = 0x%03x\n"
+	},
+	{0x00a40001,
+	 "PMU: Error: CS%d failed to find a DFIMRL setting that worked for all bytes during MaxRdLat training\n"
+	},
+	{0x00a50002,
+	 "PMU3: Smallest passing DFIMRL for all dbytes in CS%d = %d DFIClks\n"
+	},
+	{0x00a60000,
+	 "PMU: Error: No passing DFIMRL value found for any chip select during MaxRdLat training\n"
+	},
+	{0x00a70003,
+	 "PMU: Error: Dbyte %d lane %d txDqDly passing region is too small (width = %d)\n"
+	},
+	{0x00a80006,
+	 "PMU10: Adjusting rxclkdly db %d nib %d from %d+%d=%d->%d\n"
+	},
+	{0x00a90000,
+	 "PMU4: TxDqDly Passing Regions (EyeLeft EyeRight -> EyeCenter) Units=1/32 UI\n"
+	},
+	{0x00aa0005,
+	 "PMU4: DB %d Lane %d: %3d %3d -> %3d\n"
+	},
+	{0x00ab0002,
+	 "PMU2: TXDQ delayLeft[%2d] = %3d (DISCONNECTED)\n"
+	},
+	{0x00ac0004,
+	 "PMU2: TXDQ delayLeft[%2d] = %3d oopScaled = %3d selectOop %d\n"
+	},
+	{0x00ad0002,
+	 "PMU2: TXDQ delayRight[%2d] = %3d (DISCONNECTED)\n"
+	},
+	{0x00ae0004,
+	 "PMU2: TXDQ delayRight[%2d] = %3d oopScaled = %3d selectOop %d\n"
+	},
+	{0x00af0003,
+	 "PMU: Error: Dbyte %d lane %d txDqDly passing region is too small (width = %d)\n"
+	},
+	{0x00b00000,
+	 "PMU4: TxDqDly Passing Regions (EyeLeft EyeRight -> EyeCenter) Units=1/32 UI\n"
+	},
+	{0x00b10002,
+	 "PMU4: DB %d Lane %d: (DISCONNECTED)\n"
+	},
+	{0x00b20005,
+	 "PMU4: DB %d Lane %d: %3d %3d -> %3d\n"
+	},
+	{0x00b30002,
+	 "PMU3: Running 1D search csn %d for DM Right/NotLeft(%d) eye edge\n"
+	},
+	{0x00b40002,
+	 "PMU3: WrDq DM byte%2d with Errcnt %d\n"
+	},
+	{0x00b50002,
+	 "PMU3: WrDq DM byte%2d avgDly 0x%04x\n"
+	},
+	{0x00b60002,
+	 "PMU1: WrDq DM byte%2d with Errcnt %d\n"
+	},
+	{0x00b70001,
+	 "PMU: Error: Dbyte %d txDqDly DM training did not start inside the eye\n"
+	},
+	{0x00b80000,
+	 "PMU4: DM TxDqDly Passing Regions (EyeLeft EyeRight -> EyeCenter) Units=1/32 UI\n"
+	},
+	{0x00b90002,
+	 "PMU4: DB %d Lane %d: (DISCONNECTED)\n"
+	},
+	{0x00ba0005,
+	 "PMU4: DB %d Lane %d: %3d %3d -> %3d\n"
+	},
+	{0x00bb0003,
+	 "PMU: Error: Dbyte %d lane %d txDqDly DM passing region is too small (width = %d)\n"
+	},
+	{0x00bc0004,
+	 "PMU3: Errcnt for MRD/MWD search nib %2d delay = (%d, 0x%02x) = %d\n"
+	},
+	{0x00bd0000,
+	 "PMU3: Precharge all open banks\n"
+	},
+	{0x00be0002,
+	 "PMU: Error: Dbyte %d nibble %d found mutliple working coarse delay setting for MRD/MWD\n"
+	},
+	{0x00bf0000,
+	 "PMU4: MRD Passing Regions (coarseVal, fineLeft fineRight -> fineCenter)\n"
+	},
+	{0x00c00000,
+	 "PMU4: MWD Passing Regions (coarseVal, fineLeft fineRight -> fineCenter)\n"
+	},
+	{0x00c10004,
+	 "PMU10: Warning: DB %d nibble %d has multiple working coarse delays, %d and %d, choosing the smaller delay\n"
+	},
+	{0x00c20003,
+	 "PMU: Error: Dbyte %d nibble %d MRD/MWD passing region is too small (width = %d)\n"
+	},
+	{0x00c30006,
+	 "PMU4: DB %d nibble %d: %3d, %3d %3d -> %3d\n"
+	},
+	{0x00c40002,
+	 "PMU1: Start MRD/nMWD %d for csn %d\n"
+	},
+	{0x00c50002,
+	 "PMU2: RXDQS delayLeft[%2d] = %3d (DISCONNECTED)\n"
+	},
+	{0x00c60006,
+	 "PMU2: RXDQS delayLeft[%2d] = %3d delayOop[%2d] = %3d OopScaled %4d, selectOop %d\n"
+	},
+	{0x00c70002,
+	 "PMU2: RXDQS delayRight[%2d] = %3d (DISCONNECTED)\n"
+	},
+	{0x00c80006,
+	 "PMU2: RXDQS delayRight[%2d] = %3d delayOop[%2d] = %4d OopScaled %4d, selectOop %d\n"
+	},
+	{0x00c90000,
+	 "PMU4: RxClkDly Passing Regions (EyeLeft EyeRight -> EyeCenter)\n"
+	},
+	{0x00ca0002,
+	 "PMU4: DB %d nibble %d: (DISCONNECTED)\n"
+	},
+	{0x00cb0005,
+	 "PMU4: DB %d nibble %d: %3d %3d -> %3d\n"
+	},
+	{0x00cc0003,
+	 "PMU: Error: Dbyte %d nibble %d rxClkDly passing region is too small (width = %d)\n"
+	},
+	{0x00cd0002,
+	 "PMU0: goodbar = %d for RDWR_BLEN %d\n"
+	},
+	{0x00ce0001,
+	 "PMU3: RxClkDly = %d\n"
+	},
+	{0x00cf0005,
+	 "PMU0: db %d l %d absLane %d -> bottom %d top %d\n"
+	},
+	{0x00d00009,
+	 "PMU3: BYTE %d - %3d %3d %3d %3d %3d %3d %3d %3d\n"
+	},
+	{0x00d10002,
+	 "PMU: Error: dbyte %d lane %d's per-lane vrefDAC's had no passing region\n"
+	},
+	{0x00d20004,
+	 "PMU0: db%d l%d - %d %d\n"
+	},
+	{0x00d30002,
+	 "PMU0: goodbar = %d for RDWR_BLEN %d\n"
+	},
+	{0x00d40004,
+	 "PMU3: db%d l%d saw %d issues at rxClkDly %d\n"
+	},
+	{0x00d50003,
+	 "PMU3: db%d l%d first saw a pass->fail edge at rxClkDly %d\n"
+	},
+	{0x00d60002,
+	 "PMU3: lane %d PBD = %d\n"
+	},
+	{0x00d70003,
+	 "PMU3: db%d l%d first saw a DBI pass->fail edge at rxClkDly %d\n"
+	},
+	{0x00d80003,
+	 "PMU2: db%d l%d already passed rxPBD = %d\n"
+	},
+	{0x00d90003,
+	 "PMU0: db%d l%d, PBD = %d\n"
+	},
+	{0x00da0002,
+	 "PMU: Error: dbyte %d lane %d failed read deskew\n"
+	},
+	{0x00db0003,
+	 "PMU0: db%d l%d, inc PBD = %d\n"
+	},
+	{0x00dc0003,
+	 "PMU1: Running lane deskew on pstate %d csn %d rdDBIEn %d\n"
+	},
+	{0x00dd0000,
+	 "PMU: Error: Read deskew training has been requested, but csrMajorModeDbyte[2] is set\n"
+	},
+	{0x00de0002,
+	 "PMU1: AcsmCsMapCtrl%02d 0x%04x\n"
+	},
+	{0x00df0002,
+	 "PMU1: AcsmCsMapCtrl%02d 0x%04x\n"
+	},
+	{0x00e00001,
+	 "PMU: Error: Wrong PMU image loaded. message Block DramType = 0x%02x, but image built for D3U Type\n"
+	},
+	{0x00e10001,
+	 "PMU: Error: Wrong PMU image loaded. message Block DramType = 0x%02x, but image built for D3R Type\n"
+	},
+	{0x00e20001,
+	 "PMU: Error: Wrong PMU image loaded. message Block DramType = 0x%02x, but image built for D4U Type\n"
+	},
+	{0x00e30001,
+	 "PMU: Error: Wrong PMU image loaded. message Block DramType = 0x%02x, but image built for D4R Type\n"
+	},
+	{0x00e40001,
+	 "PMU: Error: Wrong PMU image loaded. message Block DramType = 0x%02x, but image built for D4LR Type\n"
+	},
+	{0x00e50000,
+	 "PMU: Error: Both 2t timing mode and ddr4 geardown mode specified in the messageblock's PhyCfg and MR3 fields. Only one can be enabled\n"
+	},
+	{0x00e60003,
+	 "PMU10: PHY TOTALS - NUM_DBYTES %d NUM_NIBBLES %d NUM_ANIBS %d\n"
+	},
+	{0x00e70006,
+	 "PMU10: CSA=0x%02x, CSB=0x%02x, TSTAGES=0x%04x, HDTOUT=%d, MMISC=%d DRAMFreq=%dMT DramType=LPDDR3\n"
+	},
+	{0x00e80006,
+	 "PMU10: CSA=0x%02x, CSB=0x%02x, TSTAGES=0x%04x, HDTOUT=%d, MMISC=%d DRAMFreq=%dMT DramType=LPDDR4\n"
+	},
+	{0x00e90008,
+	 "PMU10: CS=0x%02x, TSTAGES=0x%04x, HDTOUT=%d, 2T=%d, MMISC=%d AddrMirror=%d DRAMFreq=%dMT DramType=%d\n"
+	},
+	{0x00ea0004,
+	 "PMU10: Pstate%d MR0=0x%04x MR1=0x%04x MR2=0x%04x\n"
+	},
+	{0x00eb0008,
+	 "PMU10: Pstate%d MRS MR0=0x%04x MR1=0x%04x MR2=0x%04x MR3=0x%04x MR4=0x%04x MR5=0x%04x MR6=0x%04x\n"
+	},
+	{0x00ec0005,
+	 "PMU10: Pstate%d MRS MR1_A0=0x%04x MR2_A0=0x%04x MR3_A0=0x%04x MR11_A0=0x%04x\n"
+	},
+	{0x00ed0000,
+	 "PMU10: UseBroadcastMR set. All ranks and channels use MRXX_A0 for MR settings.\n"
+	},
+	{0x00ee0005,
+	 "PMU10: Pstate%d MRS MR01_A0=0x%02x MR02_A0=0x%02x MR03_A0=0x%02x MR11_A0=0x%02x\n"
+	},
+	{0x00ef0005,
+	 "PMU10: Pstate%d MRS MR12_A0=0x%02x MR13_A0=0x%02x MR14_A0=0x%02x MR22_A0=0x%02x\n"
+	},
+	{0x00f00005,
+	 "PMU10: Pstate%d MRS MR01_A1=0x%02x MR02_A1=0x%02x MR03_A1=0x%02x MR11_A1=0x%02x\n"
+	},
+	{0x00f10005,
+	 "PMU10: Pstate%d MRS MR12_A1=0x%02x MR13_A1=0x%02x MR14_A1=0x%02x MR22_A1=0x%02x\n"
+	},
+	{0x00f20005,
+	 "PMU10: Pstate%d MRS MR01_B0=0x%02x MR02_B0=0x%02x MR03_B0=0x%02x MR11_B0=0x%02x\n"
+	},
+	{0x00f30005,
+	 "PMU10: Pstate%d MRS MR12_B0=0x%02x MR13_B0=0x%02x MR14_B0=0x%02x MR22_B0=0x%02x\n"
+	},
+	{0x00f40005,
+	 "PMU10: Pstate%d MRS MR01_B1=0x%02x MR02_B1=0x%02x MR03_B1=0x%02x MR11_B1=0x%02x\n"
+	},
+	{0x00f50005,
+	 "PMU10: Pstate%d MRS MR12_B1=0x%02x MR13_B1=0x%02x MR14_B1=0x%02x MR22_B1=0x%02x\n"
+	},
+	{0x00f60002,
+	 "PMU1: AcsmOdtCtrl%02d 0x%02x\n"
+	},
+	{0x00f70002,
+	 "PMU1: AcsmCsMapCtrl%02d 0x%04x\n"
+	},
+	{0x00f80002,
+	 "PMU1: AcsmCsMapCtrl%02d 0x%04x\n"
+	},
+	{0x00f90000,
+	 "PMU1: HwtCAMode set\n"
+	},
+	{0x00fa0001,
+	 "PMU3: DDR4 infinite preamble enter/exit mode %d\n"
+	},
+	{0x00fb0002,
+	 "PMU1: In rxenb_train() csn=%d pstate=%d\n"
+	},
+	{0x00fc0000,
+	 "PMU3: Finding DQS falling edge\n"
+	},
+	{0x00fd0000,
+	 "PMU3: Searching for DDR3/LPDDR3/LPDDR4 read preamble\n"
+	},
+	{0x00fe0009,
+	 "PMU3: dtsm fails Even Nibbles : %2x %2x %2x %2x %2x %2x %2x %2x %2x\n"
+	},
+	{0x00ff0009,
+	 "PMU3: dtsm fails Odd  Nibbles : %2x %2x %2x %2x %2x %2x %2x %2x %2x\n"
+	},
+	{0x01000002,
+	 "PMU3: Preamble search pass=%d anyfail=%d\n"
+	},
+	{0x01010000,
+	 "PMU: Error: RxEn training preamble not found\n"
+	},
+	{0x01020000,
+	 "PMU3: Found DQS pre-amble\n"
+	},
+	{0x01030001,
+	 "PMU: Error: Dbyte %d couldn't find the rising edge of DQS during RxEn Training\n"
+	},
+	{0x01040000,
+	 "PMU3: RxEn aligning to first rising edge of burst\n"
+	},
+	{0x01050001,
+	 "PMU3: Decreasing RxEn delay by %d fine step to allow full capture of reads\n"
+	},
+	{0x01060001,
+	 "PMU3: MREP Delay = %d\n"
+	},
+	{0x01070003,
+	 "PMU3: Errcnt for MREP nib %2d delay = %2d is %d\n"
+	},
+	{0x01080002,
+	 "PMU3: MREP nibble %d sampled a 1 at data buffer delay %d\n"
+	},
+	{0x01090002,
+	 "PMU3: MREP nibble %d saw a 0 to 1 transition at data buffer delay %d\n"
+	},
+	{0x010a0000,
+	 "PMU2:  MREP did not find a 0 to 1 transition for all nibbles. Failing nibbles assumed to have rising edge close to fine delay 63\n"
+	},
+	{0x010b0002,
+	 "PMU2:  Rising edge found in alias window, setting rxDly for nibble %d = %d\n"
+	},
+	{0x010c0002,
+	 "PMU: Error: Failed MREP for nib %d with %d one\n"
+	},
+	{0x010d0003,
+	 "PMU2:  Rising edge not found in alias window with %d one, leaving rxDly for nibble %d = %d\n"
+	},
+	{0x010e0002,
+	 "PMU3: Training DIMM %d CSn %d\n"
+	},
+	{0x010f0001,
+	 "PMU3: exitCAtrain_lp3 cs 0x%x\n"
+	},
+	{0x01100001,
+	 "PMU3: enterCAtrain_lp3 cs 0x%x\n"
+	},
+	{0x01110001,
+	 "PMU3: CAtrain_switchmsb_lp3 cs 0x%x\n"
+	},
+	{0x01120001,
+	 "PMU3: CATrain_rdwr_lp3 looking for pattern %x\n"
+	},
+	{0x01130000,
+	 "PMU3: exitCAtrain_lp4\n"
+	},
+	{0x01140001,
+	 "PMU3: DEBUG enterCAtrain_lp4 1: cs 0x%x\n"
+	},
+	{0x01150001,
+	 "PMU3: DEBUG enterCAtrain_lp4 3: Put dbyte %d in async mode\n"
+	},
+	{0x01160000,
+	 "PMU3: DEBUG enterCAtrain_lp4 5: Send MR13 to turn on CA training\n"
+	},
+	{0x01170003,
+	 "PMU3: DEBUG enterCAtrain_lp4 7: idx = %d vref = %x mr12 = %x\n"
+	},
+	{0x01180001,
+	 "PMU3: CATrain_rdwr_lp4 looking for pattern %x\n"
+	},
+	{0x01190004,
+	 "PMU3: Phase %d CAreadbackA db:%d %x xo:%x\n"
+	},
+	{0x011a0005,
+	 "PMU3: DEBUG lp4SetCatrVref 1: cs=%d chan=%d mr12=%x vref=%d.%d%%\n"
+	},
+	{0x011b0003,
+	 "PMU3: DEBUG lp4SetCatrVref 3: mr12 = %x send vref= %x to db=%d\n"
+	},
+	{0x011c0000,
+	 "PMU10:Optimizing vref\n"
+	},
+	{0x011d0004,
+	 "PMU4:mr12:%2x cs:%d chan %d r:%4x\n"
+	},
+	{0x011e0005,
+	 "PMU3: i:%2d bstr:%2d bsto:%2d st:%d r:%d\n"
+	},
+	{0x011f0002,
+	 "Failed to find sufficient CA Vref Passing Region for CS %d ch. %d\n"
+	},
+	{0x01200005,
+	 "PMU3:Found %d.%d%% MR12:%x for cs:%d chan %d\n"
+	},
+	{0x01210002,
+	 "PMU3:Calculated %d for AtxImpedence from acx %d.\n"
+	},
+	{0x01220000,
+	 "PMU3:CA Odt impedence ==0.  Use default vref.\n"
+	},
+	{0x01230003,
+	 "PMU3:Calculated %d.%d%% for Vref MR12=0x%x.\n"
+	},
+	{0x01240000,
+	 "PMU3: CAtrain_lp\n"
+	},
+	{0x01250000,
+	 "PMU3: CAtrain Begins.\n"
+	},
+	{0x01260001,
+	 "PMU3: CAtrain_lp testing dly %d\n"
+	},
+	{0x01270001,
+	 "PMU5: CA bitmap dump for cs %x\n"
+	},
+	{0x01280001,
+	 "PMU5: CAA%d "
+	},
+	{0x01290001, "%02x"
+	},
+	{0x012a0000, "\n"
+	},
+	{0x012b0001,
+	 "PMU5: CAB%d "
+	},
+	{0x012c0001, "%02x"
+	},
+	{0x012d0000, "\n"
+	},
+	{0x012e0003,
+	 "PMU3: anibi=%d, anibichan[anibi]=%d ,chan=%d\n"
+	},
+	{0x012f0001, "%02x"
+	},
+	{0x01300001, "\nPMU3:Raw CA setting :%x"
+	},
+	{0x01310002, "\nPMU3:ATxDly setting:%x margin:%d\n"
+	},
+	{0x01320002, "\nPMU3:InvClk ATxDly setting:%x margin:%d\n"
+	},
+	{0x01330000, "\nPMU3:No Range found!\n"
+	},
+	{0x01340003,
+	 "PMU3: 2 anibi=%d, anibichan[anibi]=%d ,chan=%d"
+	},
+	{0x01350002, "\nPMU3: no neg clock => CA setting anib=%d, :%d\n"
+	},
+	{0x01360001,
+	 "PMU3:Normal margin:%d\n"
+	},
+	{0x01370001,
+	 "PMU3:Inverted margin:%d\n"
+	},
+	{0x01380000,
+	 "PMU3:Using Inverted clock\n"
+	},
+	{0x01390000,
+	 "PMU3:Using normal clk\n"
+	},
+	{0x013a0003,
+	 "PMU3: 3 anibi=%d, anibichan[anibi]=%d ,chan=%d\n"
+	},
+	{0x013b0002,
+	 "PMU3: Setting ATxDly for anib %x to %x\n"
+	},
+	{0x013c0000,
+	 "PMU: Error: CA Training Failed.\n"
+	},
+	{0x013d0000,
+	 "PMU1: Writing MRs\n"
+	},
+	{0x013e0000,
+	 "PMU4:Using MR12 values from 1D CA VREF training.\n"
+	},
+	{0x013f0000,
+	 "PMU3:Writing all MRs to fsp 1\n"
+	},
+	{0x01400000,
+	 "PMU10:Lp4Quickboot mode.\n"
+	},
+	{0x01410000,
+	 "PMU3: Writing MRs\n"
+	},
+	{0x01420001,
+	 "PMU10: Setting boot clock divider to %d\n"
+	},
+	{0x01430000,
+	 "PMU3: Resetting DRAM\n"
+	},
+	{0x01440000,
+	 "PMU3: setup for RCD initalization\n"
+	},
+	{0x01450000,
+	 "PMU3: pmu_exit_SR from dev_init()\n"
+	},
+	{0x01460000,
+	 "PMU3: initializing RCD\n"
+	},
+	{0x01470000,
+	 "PMU10: **** Executing 2D Image ****\n"
+	},
+	{0x01480001,
+	 "PMU10: **** Start DDR4 Training. PMU Firmware Revision 0x%04x ****\n"
+	},
+	{0x01490001,
+	 "PMU10: **** Start DDR3 Training. PMU Firmware Revision 0x%04x ****\n"
+	},
+	{0x014a0001,
+	 "PMU10: **** Start LPDDR3 Training. PMU Firmware Revision 0x%04x ****\n"
+	},
+	{0x014b0001,
+	 "PMU10: **** Start LPDDR4 Training. PMU Firmware Revision 0x%04x ****\n"
+	},
+	{0x014c0000,
+	 "PMU: Error: Mismatched internal revision between DCCM and ICCM images\n"
+	},
+	{0x014d0001,
+	 "PMU10: **** Testchip %d Specific Firmware ****\n"
+	},
+	{0x014e0000,
+	 "PMU1: LRDIMM with EncodedCS mode, one DIMM\n"
+	},
+	{0x014f0000,
+	 "PMU1: LRDIMM with EncodedCS mode, two DIMMs\n"
+	},
+	{0x01500000,
+	 "PMU1: RDIMM with EncodedCS mode, one DIMM\n"
+	},
+	{0x01510000,
+	 "PMU2: Starting LRDIMM MREP training for all ranks\n"
+	},
+	{0x01520000,
+	 "PMU199: LRDIMM MREP training for all ranks completed\n"
+	},
+	{0x01530000,
+	 "PMU2: Starting LRDIMM DWL training for all ranks\n"
+	},
+	{0x01540000,
+	 "PMU199: LRDIMM DWL training for all ranks completed\n"
+	},
+	{0x01550000,
+	 "PMU2: Starting LRDIMM MRD training for all ranks\n"
+	},
+	{0x01560000,
+	 "PMU199: LRDIMM MRD training for all ranks completed\n"
+	},
+	{0x01570000,
+	 "PMU2: Starting RXEN training for all ranks\n"
+	},
+	{0x01580000,
+	 "PMU2: Starting write leveling fine delay training for all ranks\n"
+	},
+	{0x01590000,
+	 "PMU2: Starting LRDIMM MWD training for all ranks\n"
+	},
+	{0x015a0000,
+	 "PMU199: LRDIMM MWD training for all ranks completed\n"
+	},
+	{0x015b0000,
+	 "PMU2: Starting write leveling fine delay training for all ranks\n"
+	},
+	{0x015c0000,
+	 "PMU2: Starting read deskew training\n"
+	},
+	{0x015d0000,
+	 "PMU2: Starting SI friendly 1d RdDqs training for all ranks\n"
+	},
+	{0x015e0000,
+	 "PMU2: Starting write leveling coarse delay training for all ranks\n"
+	},
+	{0x015f0000,
+	 "PMU2: Starting 1d WrDq training for all ranks\n"
+	},
+	{0x01600000,
+	 "PMU2: Running DQS2DQ Oscillator for all ranks\n"
+	},
+	{0x01610000,
+	 "PMU2: Starting again read deskew training but with PRBS\n"
+	},
+	{0x01620000,
+	 "PMU2: Starting 1d RdDqs training for all ranks\n"
+	},
+	{0x01630000,
+	 "PMU2: Starting again 1d WrDq training for all ranks\n"
+	},
+	{0x01640000,
+	 "PMU2: Starting MaxRdLat training\n"
+	},
+	{0x01650000,
+	 "PMU2: Starting 2d WrDq training for all ranks\n"
+	},
+	{0x01660000,
+	 "PMU2: Starting 2d RdDqs training for all ranks\n"
+	},
+	{0x01670002,
+	 "PMU3:read_fifo %x %x\n"
+	},
+	{0x01680001,
+	 "PMU: Error: Invalid PhyDrvImpedance of 0x%x specified in message block.\n"
+	},
+	{0x01690001,
+	 "PMU: Error: Invalid PhyOdtImpedance of 0x%x specified in message block.\n"
+	},
+	{0x016a0001,
+	 "PMU: Error: Invalid BPZNResVal of 0x%x specified in message block.\n"
+	},
+	{0x016b0005,
+	 "PMU3: fixRxEnBackOff csn:%d db:%d dn:%d bo:%d dly:%x\n"
+	},
+	{0x016c0001,
+	 "PMU3: fixRxEnBackOff dly:%x\n"
+	},
+	{0x016d0000,
+	 "PMU3: Entering setupPpt\n"
+	},
+	{0x016e0000,
+	 "PMU3: Start lp4PopulateHighLowBytes\n"
+	},
+	{0x016f0002,
+	 "PMU3:Dbyte Detect: db%d received %x\n"
+	},
+	{0x01700002,
+	 "PMU3:getDqs2Dq read %x from dbyte %d\n"
+	},
+	{0x01710002,
+	 "PMU3:getDqs2Dq(2) read %x from dbyte %d\n"
+	},
+	{0x01720001,
+	 "PMU: Error: Dbyte %d read 0 from the DQS oscillator it is connected to\n"
+	},
+	{0x01730002,
+	 "PMU4: Dbyte %d dqs2dq = %d/32 UI\n"
+	},
+	{0x01740003,
+	 "PMU3:getDqs2Dq set dqs2dq:%d/32 ui (%d ps) from dbyte %d\n"
+	},
+	{0x01750003,
+	 "PMU3: Setting coarse delay in AtxDly chiplet %d from 0x%02x to 0x%02x\n"
+	},
+	{0x01760003,
+	 "PMU3: Clearing coarse delay in AtxDly chiplet %d from 0x%02x to 0x%02x\n"
+	},
+	{0x01770000,
+	 "PMU3: Performing DDR4 geardown sync sequence\n"
+	},
+	{0x01780000,
+	 "PMU1: Enter self refresh\n"
+	},
+	{0x01790000,
+	 "PMU1: Exit self refresh\n"
+	},
+	{0x017a0000,
+	 "PMU: Error: No dbiEnable with lp4\n"
+	},
+	{0x017b0000,
+	 "PMU: Error: No dbiDisable with lp4\n"
+	},
+	{0x017c0001,
+	 "PMU1: DDR4 update Rx DBI Setting disable %d\n"
+	},
+	{0x017d0001,
+	 "PMU1: DDR4 update 2nCk WPre Setting disable %d\n"
+	},
+	{0x017e0005,
+	 "PMU1: read_delay: db%d lane%d delays[%2d] = 0x%02x (max 0x%02x)\n"
+	},
+	{0x017f0004,
+	 "PMU1: write_delay: db%d lane%d delays[%2d] = 0x%04x\n"
+	},
+	{0x01800001,
+	 "PMU5: ID=%d -- db0  db1  db2  db3  db4  db5  db6  db7  db8  db9 --\n"
+	},
+	{0x0181000b,
+	 "PMU5: [%d]:0x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n"
+	},
+	{0x01820003,
+	 "PMU2: dump delays - pstate=%d dimm=%d csn=%d\n"
+	},
+	{0x01830000,
+	 "PMU3: Printing Mid-Training Delay Information\n"
+	},
+	{0x01840001,
+	 "PMU5: CS%d <<KEY>> 0 TrainingCntr <<KEY>> coarse(15:10) fine(9:0)\n"
+	},
+	{0x01850001,
+	 "PMU5: CS%d <<KEY>> 0 RxEnDly, 1 RxClkDly <<KEY>> coarse(10:6) fine(5:0)\n"
+	},
+	{0x01860001,
+	 "PMU5: CS%d <<KEY>> 0 TxDqsDly, 1 TxDqDly <<KEY>> coarse(9:6) fine(5:0)\n"
+	},
+	{0x01870001,
+	 "PMU5: CS%d <<KEY>> 0 RxPBDly <<KEY>> 1 Delay Unit ~= 7ps\n"
+	},
+	{0x01880000,
+	 "PMU5: all CS <<KEY>> 0 DFIMRL <<KEY>> Units = DFI clocks\n"
+	},
+	{0x01890000,
+	 "PMU5: all CS <<KEY>> VrefDACs <<KEY>> DAC(6:0)\n"
+	},
+	{0x018a0000,
+	 "PMU1: Set DMD in MR13 and wrDBI in MR3 for training\n"
+	},
+	{0x018b0000,
+	 "PMU: Error: getMaxRxen() failed to find largest rxen nibble delay\n"
+	},
+	{0x018c0003,
+	 "PMU2: getMaxRxen(): maxDly %d maxTg %d maxNib %d\n"
+	},
+	{0x018d0003,
+	 "PMU2: getRankMaxRxen(): maxDly %d Tg %d maxNib %d\n"
+	},
+	{0x018e0000,
+	 "PMU1: skipping CDD calculation in 2D image\n"
+	},
+	{0x018f0001,
+	 "PMU3: Calculating CDDs for pstate %d\n"
+	},
+	{0x01900003,
+	 "PMU3: rxFromDly[%d][%d] = %d\n"
+	},
+	{0x01910003,
+	 "PMU3: rxToDly  [%d][%d] = %d\n"
+	},
+	{0x01920003,
+	 "PMU3: rxDly    [%d][%d] = %d\n"
+	},
+	{0x01930003,
+	 "PMU3: txDly    [%d][%d] = %d\n"
+	},
+	{0x01940003,
+	 "PMU3: allFine CDD_RR_%d_%d = %d\n"
+	},
+	{0x01950003,
+	 "PMU3: allFine CDD_WW_%d_%d = %d\n"
+	},
+	{0x01960003,
+	 "PMU3: CDD_RR_%d_%d = %d\n"
+	},
+	{0x01970003,
+	 "PMU3: CDD_WW_%d_%d = %d\n"
+	},
+	{0x01980003,
+	 "PMU3: allFine CDD_RW_%d_%d = %d\n"
+	},
+	{0x01990003,
+	 "PMU3: allFine CDD_WR_%d_%d = %d\n"
+	},
+	{0x019a0003,
+	 "PMU3: CDD_RW_%d_%d = %d\n"
+	},
+	{0x019b0003,
+	 "PMU3: CDD_WR_%d_%d = %d\n"
+	},
+	{0x019c0004,
+	 "PMU3: F%dBC2x_B%d_D%d = 0x%02x\n"
+	},
+	{0x019d0004,
+	 "PMU3: F%dBC3x_B%d_D%d = 0x%02x\n"
+	},
+	{0x019e0004,
+	 "PMU3: F%dBC4x_B%d_D%d = 0x%02x\n"
+	},
+	{0x019f0004,
+	 "PMU3: F%dBC5x_B%d_D%d = 0x%02x\n"
+	},
+	{0x01a00004,
+	 "PMU3: F%dBC8x_B%d_D%d = 0x%02x\n"
+	},
+	{0x01a10004,
+	 "PMU3: F%dBC9x_B%d_D%d = 0x%02x\n"
+	},
+	{0x01a20004,
+	 "PMU3: F%dBCAx_B%d_D%d = 0x%02x\n"
+	},
+	{0x01a30004,
+	 "PMU3: F%dBCBx_B%d_D%d = 0x%02x\n"
+	},
+	{0x01a40000,
+	 "PMU10: Entering context_switch_postamble\n"
+	},
+	{0x01a50003,
+	 "PMU10: context_switch_postamble is enabled for DIMM %d, RC0A=0x%x, RC3x=0x%x\n"
+	},
+	{0x01a60000,
+	 "PMU10: Setting bcw fspace 0\n"
+	},
+	{0x01a70001,
+	 "PMU10: Sending BC0A = 0x%x\n"
+	},
+	{0x01a80001,
+	 "PMU10: Sending BC6x = 0x%x\n"
+	},
+	{0x01a90001,
+	 "PMU10: Sending RC0A = 0x%x\n"
+	},
+	{0x01aa0001,
+	 "PMU10: Sending RC3x = 0x%x\n"
+	},
+	{0x01ab0001,
+	 "PMU10: Sending RC0A = 0x%x\n"
+	},
+	{0x01ac0001,
+	 "PMU1: enter_lp3: DEBUG: pstate = %d\n"
+	},
+	{0x01ad0001,
+	 "PMU1: enter_lp3: DEBUG: dfifreqxlat_pstate = %d\n"
+	},
+	{0x01ae0001,
+	 "PMU1: enter_lp3: DEBUG: pllbypass = %d\n"
+	},
+	{0x01af0001,
+	 "PMU1: enter_lp3: DEBUG: forcecal = %d\n"
+	},
+	{0x01b00001,
+	 "PMU1: enter_lp3: DEBUG: pllmaxrange = 0x%x\n"
+	},
+	{0x01b10001,
+	 "PMU1: enter_lp3: DEBUG: dacval_out = 0x%x\n"
+	},
+	{0x01b20001,
+	 "PMU1: enter_lp3: DEBUG: pllctrl3 = 0x%x\n"
+	},
+	{0x01b30000,
+	 "PMU3: Loading DRAM with BIOS supplied MR values and entering self refresh prior to exiting PMU code.\n"
+	},
+	{0x01b40002,
+	 "PMU3: Setting DataBuffer function space of dimmcs 0x%02x to %d\n"
+	},
+	{0x01b50002,
+	 "PMU4: Setting RCW FxRC%Xx = 0x%02x\n"
+	},
+	{0x01b60002,
+	 "PMU4: Setting RCW FxRC%02x = 0x%02x\n"
+	},
+	{0x01b70001,
+	 "PMU1: DDR4 update Rd Pre Setting disable %d\n"
+	},
+	{0x01b80002,
+	 "PMU2: Setting BCW FxBC%Xx = 0x%02x\n"
+	},
+	{0x01b90002,
+	 "PMU2: Setting BCW BC%02x = 0x%02x\n"
+	},
+	{0x01ba0002,
+	 "PMU2: Setting BCW PBA mode FxBC%Xx = 0x%02x\n"
+	},
+	{0x01bb0002,
+	 "PMU2: Setting BCW PBA mode BC%02x = 0x%02x\n"
+	},
+	{0x01bc0003,
+	 "PMU4: BCW value for dimm %d, fspace %d, addr 0x%04x\n"
+	},
+	{0x01bd0002,
+	 "PMU4: DB %d, value 0x%02x\n"
+	},
+	{0x01be0000,
+	 "PMU6: WARNING MREP underflow, set to min value -2 coarse, 0 fine\n"
+	},
+	{0x01bf0004,
+	 "PMU6: LRDIMM Writing final data buffer fine delay value nib %2d, trainDly %3d, fineDly code %2d, new MREP fine %2d\n"
+	},
+	{0x01c00003,
+	 "PMU6: LRDIMM Writing final data buffer fine delay value nib %2d, trainDly %3d, fineDly code %2d\n"
+	},
+	{0x01c10003,
+	 "PMU6: LRDIMM Writing data buffer fine delay type %d nib %2d, code %2d\n"
+	},
+	{0x01c20002,
+	 "PMU6: Writing final data buffer coarse delay value dbyte %2d, coarse = 0x%02x\n"
+	},
+	{0x01c30003,
+	 "PMU4: data 0x%04x at MB addr 0x%08x saved at CSR addr 0x%08x\n"
+	},
+	{0x01c40003,
+	 "PMU4: data 0x%04x at MB addr 0x%08x restored from CSR addr 0x%08x\n"
+	},
+	{0x01c50003,
+	 "PMU4: data 0x%04x at MB addr 0x%08x saved at CSR addr 0x%08x\n"
+	},
+	{0x01c60003,
+	 "PMU4: data 0x%04x at MB addr 0x%08x restored from CSR addr 0x%08x\n"
+	},
+	{0x01c70001,
+	 "PMU3: Update BC00, BC01, BC02 for rank-dimm 0x%02x\n"
+	},
+	{0x01c80000,
+	 "PMU3: Writing D4 RDIMM RCD Control words F0RC00 -> F0RC0F\n"
+	},
+	{0x01c90000,
+	 "PMU3: Disable parity in F0RC0E\n"
+	},
+	{0x01ca0000,
+	 "PMU3: Writing D4 RDIMM RCD Control words F1RC00 -> F1RC05\n"
+	},
+	{0x01cb0000,
+	 "PMU3: Writing D4 RDIMM RCD Control words F1RC1x -> F1RC9x\n"
+	},
+	{0x01cc0000,
+	 "PMU3: Writing D4 Data buffer Control words BC00 -> BC0E\n"
+	},
+	{0x01cd0002,
+	 "PMU1: setAltCL Sending MR0 0x%x cl=%d\n"
+	},
+	{0x01ce0002,
+	 "PMU1: restoreFromAltCL Sending MR0 0x%x cl=%d\n"
+	},
+	{0x01cf0002,
+	 "PMU1: restoreAcsmFromAltCL Sending MR0 0x%x cl=%d\n"
+	},
+	{0x01d00002,
+	 "PMU2: Setting D3R RC%d = 0x%01x\n"
+	},
+	{0x01d10000,
+	 "PMU3: Writing D3 RDIMM RCD Control words RC0 -> RC11\n"
+	},
+	{0x01d20002,
+	 "PMU0: VrefDAC0/1 vddqStart %d dacToVddq %d\n"
+	},
+	{0x01d30001,
+	 "PMU: Error: Messageblock phyVref=0x%x is above the limit for TSMC28's attenuated LPDDR4 receivers. Please see the pub databook\n"
+	},
+	{0x01d40001,
+	 "PMU: Error: Messageblock phyVref=0x%x is above the limit for TSMC28's attenuated DDR4 receivers. Please see the pub databook\n"
+	},
+	{0x01d50001,
+	 "PMU0: PHY VREF @ (%d/1000) VDDQ\n"
+	},
+	{0x01d60002,
+	 "PMU0: initalizing phy vrefDacs to %d ExtVrefRange %x\n"
+	},
+	{0x01d70002,
+	 "PMU0: initalizing global vref to %d range %d\n"
+	},
+	{0x01d80002,
+	 "PMU4: Setting initial device vrefDQ for CS%d to MR6 = 0x%04x\n"
+	},
+	{0x01d90003,
+	 "PMU1: In write_level_fine() csn=%d dimm=%d pstate=%d\n"
+	},
+	{0x01da0000,
+	 "PMU3: Fine write leveling hardware search increasing TxDqsDly until full bursts are seen\n"
+	},
+	{0x01db0000,
+	 "PMU4: WL normalized pos   : ........................|........................\n"
+	},
+	{0x01dc0007,
+	 "PMU4: WL margin for nib %2d: %08x%08x%08x%08x%08x%08x\n"
+	},
+	{0x01dd0000,
+	 "PMU4: WL normalized pos   : ........................|........................\n"
+	},
+	{0x01de0000,
+	 "PMU3: Exiting write leveling mode\n"
+	},
+	{0x01df0001,
+	 "PMU3: got %d for cl in load_wrlvl_acsm\n"
+	},
+	{0x01e00003,
+	 "PMU1: In write_level_coarse() csn=%d dimm=%d pstate=%d\n"
+	},
+	{0x01e10003,
+	 "PMU3: left eye edge search db:%d ln:%d dly:0x%x\n"
+	},
+	{0x01e20003,
+	 "PMU3: right eye edge search db:%d ln:%d dly:0x%x\n"
+	},
+	{0x01e30004,
+	 "PMU3: eye center db:%d ln:%d dly:0x%x (maxdq:%x)\n"
+	},
+	{0x01e40003,
+	 "PMU3: Wrote to TxDqDly db:%d ln:%d dly:0x%x\n"
+	},
+	{0x01e50003,
+	 "PMU3: Wrote to TxDqDly db:%d ln:%d dly:0x%x\n"
+	},
+	{0x01e60002,
+	 "PMU3: Coarse write leveling dbyte%2d is still failing for TxDqsDly=0x%04x\n"
+	},
+	{0x01e70002,
+	 "PMU4: Coarse write leveling iteration %d saw %d data miscompares across the entire phy\n"
+	},
+	{0x01e80000,
+	 "PMU: Error: Failed write leveling coarse\n"
+	},
+	{0x01e90001,
+	 "PMU3: got %d for cl in load_wrlvl_acsm\n"
+	},
+	{0x01ea0003,
+	 "PMU3: In write_level_coarse() csn=%d dimm=%d pstate=%d\n"
+	},
+	{0x01eb0003,
+	 "PMU3: left eye edge search db:%d ln:%d dly:0x%x\n"
+	},
+	{0x01ec0003,
+	 "PMU3: right eye edge search db: %d ln: %d dly: 0x%x\n"
+	},
+	{0x01ed0004,
+	 "PMU3: eye center db: %d ln: %d dly: 0x%x (maxdq: 0x%x)\n"
+	},
+	{0x01ee0003,
+	 "PMU3: Wrote to TxDqDly db: %d ln: %d dly: 0x%x\n"
+	},
+	{0x01ef0003,
+	 "PMU3: Wrote to TxDqDly db: %d ln: %d dly: 0x%x\n"
+	},
+	{0x01f00002,
+	 "PMU3: Coarse write leveling nibble%2d is still failing for TxDqsDly=0x%04x\n"
+	},
+	{0x01f10002,
+	 "PMU4: Coarse write leveling iteration %d saw %d data miscompares across the entire phy\n"
+	},
+	{0x01f20000,
+	 "PMU: Error: Failed write leveling coarse\n"
+	},
+	{0x01f30000,
+	 "PMU4: WL normalized pos   : ................................|................................\n"
+	},
+	{0x01f40009,
+	 "PMU4: WL margin for nib %2d: %08x%08x%08x%08x%08x%08x%08x%08x\n"
+	},
+	{0x01f50000,
+	 "PMU4: WL normalized pos   : ................................|................................\n"
+	},
+	{0x01f60001,
+	 "PMU8: Adjust margin after WL coarse to be larger than %d\n"
+	},
+	{0x01f70001,
+	 "PMU: Error: All margin after write leveling coarse are smaller than minMargin %d\n"
+	},
+	{0x01f80002,
+	 "PMU8: Decrement nib %d TxDqsDly by %d fine step\n"
+	},
+	{0x01f90003,
+	 "PMU3: In write_level_coarse() csn=%d dimm=%d pstate=%d\n"
+	},
+	{0x01fa0005,
+	 "PMU2: Write level: dbyte %d nib%d dq/dmbi %2d dqsfine 0x%04x dqDly 0x%04x\n"
+	},
+	{0x01fb0002,
+	 "PMU3: Coarse write leveling nibble%2d is still failing for TxDqsDly=0x%04x\n"
+	},
+	{0x01fc0002,
+	 "PMU4: Coarse write leveling iteration %d saw %d data miscompares across the entire phy\n"
+	},
+	{0x01fd0000,
+	 "PMU: Error: Failed write leveling coarse\n"
+	},
+	{0x01fe0001,
+	 "PMU3: DWL delay = %d\n"
+	},
+	{0x01ff0003,
+	 "PMU3: Errcnt for DWL nib %2d delay = %2d is %d\n"
+	},
+	{0x02000002,
+	 "PMU3: DWL nibble %d sampled a 1 at delay %d\n"
+	},
+	{0x02010003,
+	 "PMU3: DWL nibble %d passed at delay %d. Rising edge was at %d\n"
+	},
+	{0x02020000,
+	 "PMU2: DWL did nto find a rising edge of memclk for all nibbles. Failing nibbles assumed to have rising edge close to fine delay 63\n"
+	},
+	{0x02030002,
+	 "PMU2:  Rising edge found in alias window, setting wrlvlDly for nibble %d = %d\n"
+	},
+	{0x02040002,
+	 "PMU: Error: Failed DWL for nib %d with %d one\n"
+	},
+	{0x02050003,
+	 "PMU2:  Rising edge not found in alias window with %d one, leaving wrlvlDly for nibble %d = %d\n"
+	},
+	{0x04000000,
+	 "PMU: Error:Mailbox Buffer Overflowed.\n"
+	},
+	{0x04010000,
+	 "PMU: Error:Mailbox Buffer Overflowed.\n"
+	},
+	{0x04020000,
+	 "PMU: ***** Assertion Error - terminating *****\n"
+	},
+	{0x04030002,
+	 "PMU1: swapByte db %d by %d\n"
+	},
+	{0x04040003,
+	 "PMU3: get_cmd_dly max(%d ps, %d memclk) = %d\n"
+	},
+	{0x04050002,
+	 "PMU0: Write CSR 0x%06x 0x%04x\n"
+	},
+	{0x04060002,
+	 "PMU0: hwt_init_ppgc_prbs(): Polynomial: %x, Deg: %d\n"
+	},
+	{0x04070001,
+	 "PMU: Error: acsm_set_cmd to non existent instruction address %d\n"
+	},
+	{0x04080001,
+	 "PMU: Error: acsm_set_cmd with unknown ddr cmd 0x%x\n"
+	},
+	{0x0409000c,
+	 "PMU1: acsm_addr %02x, acsm_flgs %04x, ddr_cmd %02x, cmd_dly %02x, ddr_addr %04x, ddr_bnk %02x, ddr_cs %02x, cmd_rcnt %02x, AcsmSeq0/1/2/3 %04x %04x %04x %04x\n"
+	},
+	{0x040a0000,
+	 "PMU: Error: Polling on ACSM done failed to complete in acsm_poll_done()...\n"
+	},
+	{0x040b0000,
+	 "PMU1: acsm RUN\n"
+	},
+	{0x040c0000,
+	 "PMU1: acsm STOPPED\n"
+	},
+	{0x040d0002,
+	 "PMU1: acsm_init: acsm_mode %04x mxrdlat %04x\n"
+	},
+	{0x040e0002,
+	 "PMU: Error: setAcsmCLCWL: cl and cwl must be each >= 2 and 5, resp. CL=%d CWL=%d\n"
+	},
+	{0x040f0002,
+	 "PMU: Error: setAcsmCLCWL: cl and cwl must be each >= 5. CL=%d CWL=%d\n"
+	},
+	{0x04100002,
+	 "PMU1: setAcsmCLCWL: CASL %04d WCASL %04d\n"
+	},
+	{0x04110001,
+	 "PMU: Error: Reserved value of register F0RC0F found in message block: 0x%04x\n"
+	},
+	{0x04120001,
+	 "PMU3: Written MRS to CS=0x%02x\n"
+	},
+	{0x04130001,
+	 "PMU3: Written MRS to CS=0x%02x\n"
+	},
+	{0x04140000,
+	 "PMU3: Entering Boot Freq Mode.\n"
+	},
+	{0x04150001,
+	 "PMU: Error: Boot clock divider setting of %d is too small\n"
+	},
+	{0x04160000,
+	 "PMU3: Exiting Boot Freq Mode.\n"
+	},
+	{0x04170002,
+	 "PMU3: Writing MR%d OP=%x\n"
+	},
+	{0x04180000,
+	 "PMU: Error: Delay too large in slomo\n"
+	},
+	{0x04190001,
+	 "PMU3: Written MRS to CS=0x%02x\n"
+	},
+	{0x041a0000,
+	 "PMU3: Enable Channel A\n"
+	},
+	{0x041b0000,
+	 "PMU3: Enable Channel B\n"
+	},
+	{0x041c0000,
+	 "PMU3: Enable All Channels\n"
+	},
+	{0x041d0002,
+	 "PMU2: Use PDA mode to set MR%d with value 0x%02x\n"
+	},
+	{0x041e0001,
+	 "PMU3: Written Vref with PDA to CS=0x%02x\n"
+	},
+	{0x041f0000,
+	 "PMU1: start_cal: DEBUG: setting CalRun to 1\n"
+	},
+	{0x04200000,
+	 "PMU1: start_cal: DEBUG: setting CalRun to 0\n"
+	},
+	{0x04210001,
+	 "PMU1: lock_pll_dll: DEBUG: pstate = %d\n"
+	},
+	{0x04220001,
+	 "PMU1: lock_pll_dll: DEBUG: dfifreqxlat_pstate = %d\n"
+	},
+	{0x04230001,
+	 "PMU1: lock_pll_dll: DEBUG: pllbypass = %d\n"
+	},
+	{0x04240001,
+	 "PMU3: SaveLcdlSeed: Saving seed %d\n"
+	},
+	{0x04250000,
+	 "PMU1: in phy_defaults()\n"
+	},
+	{0x04260003,
+	 "PMU3: ACXConf:%d MaxNumDbytes:%d NumDfi:%d\n"
+	},
+	{0x04270005,
+	 "PMU1: setAltAcsmCLCWL setting cl=%d cwl=%d\n"
+	},
+};
+#endif /* DEBUG */
+#endif
diff --git a/drivers/nxp/ddr/phy-gen2/phy.c b/drivers/nxp/ddr/phy-gen2/phy.c
new file mode 100644
index 0000000..97de1ae
--- /dev/null
+++ b/drivers/nxp/ddr/phy-gen2/phy.c
@@ -0,0 +1,2669 @@
+/*
+ * Copyright 2021 NXP
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <errno.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include "csr.h"
+#include <ddr.h>
+#include "ddr4fw.h"
+#include <drivers/delay_timer.h>
+#ifdef NXP_WARM_BOOT
+#include <fspi_api.h>
+#endif
+#include "input.h"
+#include <lib/mmio.h>
+#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#ifdef DDR_PHY_DEBUG
+#include "messages.h"
+#endif
+#ifdef NXP_WARM_BOOT
+#include "phy.h"
+#endif
+#include "pie.h"
+
+#define TIMEOUTDEFAULT 500
+#define MAP_PHY_ADDR(pstate, n, instance, offset, c) \
+		((((pstate * n) + instance + c) << 12) + offset)
+
+static uint32_t map_phy_addr_space(uint32_t addr)
+{
+	/* 23 bit addressing */
+	uint32_t pstate =     (addr & U(0x700000)) >> 20U; /* bit 22:20 */
+	uint32_t block_type = (addr & U(0x0f0000)) >> 16U; /* bit 19:16 */
+	uint32_t instance =   (addr & U(0x00f000)) >> 12U; /* bit 15:12 */
+	uint32_t offset =     (addr & U(0x000fff));        /* bit 11:0 */
+
+	switch (block_type) {
+	case 0x0: /* 0x0 : ANIB */
+		return MAP_PHY_ADDR(pstate, 12, instance, offset, 0);
+	case 0x1: /* 0x1 : DBYTE */
+		return MAP_PHY_ADDR(pstate, 10, instance, offset, 0x30);
+	case 0x2: /* 0x2 : MASTER */
+		return MAP_PHY_ADDR(pstate, 1, 0, offset, 0x58);
+	case 0x4: /* 0x4 : ACSM */
+		return MAP_PHY_ADDR(pstate, 1, 0, offset, 0x5c);
+	case 0x5: /* 0x5 : μCTL Memory */
+		return MAP_PHY_ADDR(pstate, 0, instance, offset, 0x60);
+	case 0x7: /* 0x7 : PPGC */
+		return MAP_PHY_ADDR(pstate, 0, 0, offset, 0x68);
+	case 0x9: /* 0x9 : INITENG */
+		return MAP_PHY_ADDR(pstate, 1, 0, offset, 0x69);
+	case 0xc: /* 0xC : DRTUB */
+		return MAP_PHY_ADDR(pstate, 0, 0, offset, 0x6d);
+	case 0xd: /* 0xD : APB Only */
+		return MAP_PHY_ADDR(pstate, 0, 0, offset, 0x6e);
+	default:
+		printf("ERR: Invalid block_type = 0x%x\n", block_type);
+		return 0;
+	}
+}
+
+static inline uint16_t *phy_io_addr(void *phy, uint32_t addr)
+{
+	return phy + (map_phy_addr_space(addr) << 2);
+}
+
+static inline void phy_io_write16(uint16_t *phy, uint32_t addr, uint16_t data)
+{
+	mmio_write_16((uintptr_t)phy_io_addr(phy, addr), data);
+#ifdef DEBUG_PHY_IO
+	printf("0x%06x,0x%x\n", addr, data);
+#endif
+}
+
+static inline uint16_t phy_io_read16(uint16_t *phy, uint32_t addr)
+{
+	uint16_t reg = mmio_read_16((uintptr_t) phy_io_addr(phy, addr));
+
+#ifdef DEBUG_PHY_IO
+	printf("R: 0x%06x,0x%x\n", addr, reg);
+#endif
+
+	return reg;
+}
+
+#ifdef NXP_APPLY_MAX_CDD
+
+#define CDD_VAL_READ_ADDR (0x054012)
+#define CDD_DATA_LEN    (60)
+
+static void read_phy_reg(uint16_t *phy, uint32_t addr,
+		uint16_t *buf, uint32_t len)
+{
+	uint32_t i = 0U;
+
+	for (i = 0U; i < len/2; i++) {
+		buf[i] = phy_io_read16(phy, (addr + i));
+	}
+}
+
+static uint32_t findrank(uint32_t cs_in_use)
+{
+	uint32_t val = 0U;
+
+	switch (cs_in_use) {
+	case U(0xf):
+		val = 4U;
+		break;
+	case U(0x3):
+		val = 2U;
+		break;
+	case U(0x1):
+		val = 1U;
+		break;
+	default:
+		printf("Error - Invalid cs_in_use value\n");
+	}
+	return val;
+}
+
+static uint8_t findmax(uint8_t *buf, uint32_t len)
+{
+	uint8_t max = 0U;
+	uint32_t i = 0U;
+
+	for (i = 0U; i < len; i++) {
+		if (buf[i] > max) {
+			max = buf[i];
+		}
+	}
+
+	return max;
+}
+
+static void get_cdd_val(uint16_t **phy_ptr, uint32_t rank, uint32_t freq,
+		uint32_t *tcfg0, uint32_t *tcfg4)
+{
+	uint8_t cdd[CDD_DATA_LEN+4] = {0U};
+	uint32_t i, val = 0U;
+	uint16_t *phy;
+	uint8_t buf[16] = {U(0x0)};
+	uint8_t trr = 0U, tww = 0U, trw = 0U, twr = 0U;
+	uint8_t rrmax = 0U, wwmax = 0U, rwmax = 0U, wrmax = 0U;
+	uint8_t tmp = U(0x0);
+	uint8_t *c =  NULL;
+
+	for (i = 0U; i < NUM_OF_DDRC; i++) {
+
+		phy = phy_ptr[i];
+		if (phy == NULL) {
+			continue;
+		}
+
+		phy_io_write16(phy, t_apbonly |
+				csr_micro_cont_mux_sel_addr, U(0x0));
+
+		read_phy_reg(phy, CDD_VAL_READ_ADDR,
+				(uint16_t *)&cdd, CDD_DATA_LEN);
+
+		phy_io_write16(phy, t_apbonly |
+				csr_micro_cont_mux_sel_addr, U(0x1));
+
+	/* CDD values and address
+	 *
+	 *   0x054012    0x24    cdd[0]  CDD[X][X]
+	 *   0x054012    0x25    cdd[1]  RR[3][2]
+	 *   0x054013    0x26    cdd[2]  RR[3][1]
+	 *   0x054013    0x27    cdd[3]  RR[3][0]
+	 *   0x054014    0x28    cdd[4]  RR[2][3]
+	 *   0x054014    0x29    cdd[5]  RR[2][1]
+	 *   0x054015    0x2a    cdd[6]  RR[2][0]
+	 *   0x054015    0x2b    cdd[7]  RR[1][3]
+	 *   0x054016    0x2c    cdd[8]  RR[1][2]
+	 *   0x054016    0x2d    cdd[9]  RR[1][0]
+	 *   0x054017    0x2e    cdd[10] RR[0][3]
+	 *   0x054017    0x2f    cdd[11] RR[0][2]
+	 *   0x054018    0x30    cdd[12] RR[0][1]
+
+	 *   0x054018    0x31    cdd[13] WW[3][2]
+	 *   0x054019    0x32    cdd[14] WW[3][1]
+	 *   0x054019    0x33    cdd[15] WW[3][0]
+	 *   0x05401a    0x34    cdd[16] WW[2][3]
+	 *   0x05401a    0x35    cdd[17] WW[2][1]
+	 *   0x05401b    0x36    cdd[18] WW[2][0]
+	 *   0x05401b    0x37    cdd[19] WW[1][3]
+	 *   0x05401c    0x38    cdd[20] WW[1][2]
+	 *   0x05401c    0x39    cdd[21] WW[1][0]
+	 *   0x05401d    0x3a    cdd[22] WW[0][3]
+	 *   0x05401d    0x3b    cdd[23] WW[0][2]
+	 *   0x05401e    0x3c    cdd[24] WW[0][1]
+
+	 *   0x05401e    0x3d    cdd[25] RW[3][3]
+	 *   0x05401f    0x3e    cdd[26] RW[3][2]
+	 *   0x05401f    0x3f    cdd[27] RW[3][1]
+	 *   0x054020    0x40    cdd[28] RW[3][0]
+	 *   0x054020    0x41    cdd[29] RW[2][3]
+	 *   0x054021    0x42    cdd[30] RW[2][2]
+	 *   0x054021    0x43    cdd[31] RW[2][1]
+	 *   0x054022    0x44    cdd[32] RW[2][0]
+	 *   0x054022    0x45    cdd[33] RW[1][3]
+	 *   0x054023    0x46    cdd[34] RW[1][2]
+	 *   0x054023    0x47    cdd[35] RW[1][1]
+	 *   0x054024    0x48    cdd[36] RW[1][0]
+	 *   0x054024    0x49    cdd[37] RW[0][3]
+	 *   0x054025    0x4a    cdd[38] RW[0][2]
+	 *   0x054025    0x4b    cdd[39] RW[0][1]
+	 *   0x054026    0x4c    cdd[40] RW[0][0]
+
+	 *   0x054026    0x4d    cdd[41] WR[3][3]
+	 *   0x054027    0x4e    cdd[42] WR[3][2]
+	 *   0x054027    0x4f    cdd[43] WR[3][1]
+	 *   0x054028    0x50    cdd[44] WR[3][0]
+	 *   0x054028    0x51    cdd[45] WR[2][3]
+	 *   0x054029    0x52    cdd[46] WR[2][2]
+	 *   0x054029    0x53    cdd[47] WR[2][1]
+	 *   0x05402a    0x54    cdd[48] WR[2][0]
+	 *   0x05402a    0x55    cdd[49] WR[1][3]
+	 *   0x05402b    0x56    cdd[50] WR[1][2]
+	 *   0x05402b    0x57    cdd[51] WR[1][1]
+	 *   0x05402c    0x58    cdd[52] WR[1][0]
+	 *   0x05402c    0x59    cdd[53] WR[0][3]
+	 *   0x05402d    0x5a    cdd[54] WR[0][2]
+	 *   0x05402d    0x5b    cdd[55] WR[0][1]
+	 *   0x05402e    0x5c    cdd[56] WR[0][0]
+	 *   0x05402e    0x5d    cdd[57] CDD[Y][Y]
+	 */
+
+		switch (rank) {
+		case 1U:
+			tmp = rwmax;
+			rwmax = cdd[40];
+			if (tmp > rwmax) {
+				rwmax = tmp;
+			}
+
+			tmp = wrmax;
+			wrmax = cdd[56];
+			if (tmp > wrmax) {
+				wrmax = tmp;
+			}
+
+			break;
+
+		case 2U:
+			buf[0] = cdd[12];
+			buf[1] = cdd[9];
+			tmp = rrmax;
+			rrmax = findmax(buf, 2U);
+			if (tmp > rrmax) {
+				rrmax = tmp;
+			}
+
+			buf[0] = cdd[24];
+			buf[1] = cdd[21];
+			tmp = wwmax;
+			wwmax = findmax(buf, 2U);
+			if (tmp > wwmax) {
+				wwmax = tmp;
+			}
+
+			buf[0] = cdd[40];
+			buf[1] = cdd[39];
+			buf[2] = cdd[36];
+			buf[3] = cdd[35];
+			tmp = rwmax;
+			rwmax = findmax(buf, 4U);
+			if (tmp > rwmax) {
+				rwmax = tmp;
+			}
+
+			buf[0] = cdd[56];
+			buf[1] = cdd[55];
+			buf[2] = cdd[52];
+			buf[3] = cdd[51];
+			tmp = wrmax;
+			wrmax = findmax(buf, 4U);
+			if (tmp > wrmax) {
+				wrmax = tmp;
+			}
+
+			break;
+
+		case 4U:
+			tmp = rrmax;
+			c = &cdd[1];
+			rrmax = findmax(c, 12U);
+			if (tmp > rrmax) {
+				rrmax = tmp;
+			}
+
+			tmp = wwmax;
+			c = &cdd[13];
+			wwmax = findmax(c, 12U);
+			if (tmp > wwmax) {
+				wwmax = tmp;
+			}
+
+			tmp = rwmax;
+			c = &cdd[25];
+			rwmax = findmax(c, 16U);
+			if (tmp > rwmax) {
+				rwmax = tmp;
+			}
+
+			tmp = wrmax;
+			c = &cdd[41];
+			wrmax = findmax(c, 16U);
+			if (tmp > wrmax) {
+				wrmax = tmp;
+			}
+
+			break;
+
+		}
+	}
+
+	rrmax += 3U;
+	wwmax += 4U;
+
+	if (wwmax > 7U) {
+		wwmax = 7U;
+	}
+
+	if (rrmax > 7U) {
+		rrmax = 7U;
+	}
+
+	if (wrmax > U(0xf)) {
+		wrmax = 0U;
+	}
+
+	if (rwmax > U(0x7)) {
+		rwmax = U(0x7);
+	}
+
+	val = *tcfg0;
+	tww = (val >> 24U) & U(0x3);
+	trr = (val >> 26U) & U(0x3);
+	twr = (val >> 28U) & U(0x3);
+	trw = (val >> 30U) & U(0x3);
+
+	val = *tcfg4;
+	tww = tww | (((val >> 8U) & U(0x1)) << 2U);
+	trr = trr | (((val >> 10U) & U(0x1)) << 2U);
+	twr = twr | (((val >> 12U) & U(0x1)) << 2U);
+	trw = trw | (((val >> 14U) & U(0x3)) << 2U);
+
+	if (trr > rrmax) {
+		rrmax = trr;
+	}
+
+	if (tww > wwmax) {
+		wwmax = tww;
+	}
+
+	if (trw > rwmax) {
+		rwmax = trw;
+	}
+
+	if (twr > wrmax) {
+		wrmax = twr;
+	}
+
+	debug("CDD rrmax %x wwmax %x rwmax %x wrmax %x\n",
+			rrmax, wwmax, rwmax, wrmax);
+
+	val = ((wwmax & U(0x3)) << 24U)
+		| ((rrmax & U(0x3)) << 26U)
+		| ((wrmax & U(0x3)) << 28U)
+		| ((rwmax & U(0x3)) << 30U);
+
+	*tcfg0 = (*tcfg0 & U(0x00FFFFFF)) | (val);
+
+	val = (((wwmax >> 2U) & U(0x1)) << 8U)
+		| (((rrmax >> 2U) & U(0x1)) << 10U)
+		| (((wrmax >> 2U) & U(0x1)) << 12U)
+		| (((rwmax >> 2U) & U(0x3)) << 14U);
+
+	*tcfg4 = (*tcfg4 & U(0xffff00ff)) | val;
+}
+#endif
+
+#ifdef NXP_WARM_BOOT
+int save_phy_training_values(uint16_t **phy_ptr, uint32_t address_to_store,
+		uint32_t num_of_phy, int train2d)
+{
+	uint16_t *phy = NULL, value = 0x0;
+	uint32_t size = 1U, num_of_regs = 1U, phy_store = 0U;
+	int i = 0, j = 0, ret = -EINVAL;
+
+	ret = xspi_sector_erase(address_to_store, PHY_ERASE_SIZE);
+	if (ret != 0) {
+		return -EINVAL;
+	}
+
+	for (j = 0; j < num_of_phy; j++) {
+		/* Save training values of all PHYs */
+		phy = phy_ptr[j];
+		size = sizeof(training_1D_values);
+		num_of_regs = ARRAY_SIZE(training_1D_values);
+
+		/* Enable access to the internal CSRs */
+		phy_io_write16(phy, t_apbonly |
+				csr_micro_cont_mux_sel_addr, 0x0);
+		/* Enable clocks in case they were disabled. */
+		phy_io_write16(phy, t_drtub |
+				csr_ucclk_hclk_enables_addr, 0x3);
+		if (train2d != 0) {
+		/* Address to store training values is
+		 * to be appended for next PHY
+		 */
+			phy_store = address_to_store + (j *
+					(sizeof(training_1D_values) +
+					 sizeof(training_2D_values)));
+		} else {
+			phy_store = address_to_store + (j *
+					(sizeof(training_1D_values)));
+		}
+		debug("Saving 1D Training reg val at: %d\n", phy_store);
+		for (i = 0; i < num_of_regs; i++) {
+			value = phy_io_read16(phy, training_1D_values[i].addr);
+#ifdef DEBUG_WARM_RESET
+			debug("%d. Reg: %x, value: %x PHY: %p\n", i,
+					training_1D_values[i].addr, value,
+					phy_io_addr(phy,
+						training_1D_values[i].addr));
+#endif
+			training_1D_values[i].data = value;
+		}
+		/* Storing 1D training values on flash */
+		ret = xspi_write(phy_store, (void *)training_1D_values, size);
+		if (train2d != 0) {
+			phy_store = phy_store+size;
+			size = sizeof(training_2D_values);
+			num_of_regs = ARRAY_SIZE(training_2D_values);
+			debug("Saving 2D Training reg val at:%d\n", phy_store);
+			for (i = 0; i < num_of_regs; i++) {
+				value = phy_io_read16(phy,
+						training_2D_values[i].addr);
+				training_2D_values[i].data = value;
+#ifdef DEBUG_WARM_RESET
+				debug("%d.2D addr:0x%x,val:0x%x,PHY:0x%p\n",
+						i, training_2D_values[i].addr,
+						value, phy_io_addr(phy,
+						training_2D_values[i].addr));
+#endif
+			}
+			/* Storing 2D training values on flash */
+			ret = xspi_write(phy_store, training_2D_values,
+					size);
+		}
+		/* Disable clocks in case they were disabled. */
+		phy_io_write16(phy, t_drtub |
+				csr_ucclk_hclk_enables_addr, 0x0);
+		/* Disable access to the internal CSRs */
+		phy_io_write16(phy, t_apbonly |
+				csr_micro_cont_mux_sel_addr, 0x1);
+	}
+	if (ret != 0) {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int restore_phy_training_values(uint16_t **phy_ptr, uint32_t address_to_restore,
+		uint32_t num_of_phy, int train2d)
+{
+	uint16_t *phy = NULL;
+	uint32_t size = 1U, num_of_regs = 1U, phy_store = 0U;
+	int i = 0, j = 0, ret = -EINVAL;
+
+	debug("Restoring Training register values\n");
+	for (j = 0; j < num_of_phy; j++) {
+		phy = phy_ptr[j];
+		size = sizeof(training_1D_values);
+		num_of_regs = ARRAY_SIZE(training_1D_values);
+		if (train2d != 0) {
+		/* The address to restore training values is
+		 * to be appended for next PHY
+		 */
+			phy_store = address_to_restore + (j *
+					(sizeof(training_1D_values) +
+					 sizeof(training_2D_values)));
+		} else {
+			phy_store = address_to_restore + (j *
+					(sizeof(training_1D_values)));
+		}
+		/* Enable access to the internal CSRs */
+		phy_io_write16(phy, t_apbonly |
+				csr_micro_cont_mux_sel_addr, 0x0);
+		/* Enable clocks in case they were disabled. */
+		phy_io_write16(phy, t_drtub |
+				csr_ucclk_hclk_enables_addr, 0x3);
+
+		/* Reading 1D training values from flash*/
+		ret = xspi_read(phy_store, (uint32_t *)training_1D_values,
+				size);
+		debug("Restoring 1D Training reg val at:%08x\n", phy_store);
+		for (i = 0; i < num_of_regs; i++) {
+			phy_io_write16(phy, training_1D_values[i].addr,
+					training_1D_values[i].data);
+#ifdef DEBUG_WARM_RESET
+			debug("%d. Reg: %x, value: %x PHY: %p\n", i,
+					training_1D_values[i].addr,
+					training_1D_values[i].data,
+					phy_io_addr(phy,
+						training_1D_values[i].addr));
+#endif
+		}
+		if (train2d != 0) {
+			phy_store = phy_store + size;
+			size = sizeof(training_2D_values);
+			num_of_regs = ARRAY_SIZE(training_2D_values);
+			/* Reading 2D training values from flash */
+			ret = xspi_read(phy_store,
+					(uint32_t *)training_2D_values,	size);
+			debug("Restoring 2D Training reg val at:%08x\n",
+					phy_store);
+			for (i = 0; i < num_of_regs; i++) {
+				phy_io_write16(phy, training_2D_values[i].addr,
+						training_2D_values[i].data);
+#ifdef DEBUG_WARM_RESET
+				debug("%d. Reg: %x, value: %x PHY: %p\n", i,
+						training_2D_values[i].addr,
+						training_2D_values[i].data,
+						phy_io_addr(phy,
+						training_1D_values[i].addr));
+#endif
+			}
+		}
+		/* Disable clocks in case they were disabled. */
+		phy_io_write16(phy, t_drtub |
+				csr_ucclk_hclk_enables_addr, 0x0);
+		/* Disable access to the internal CSRs */
+		phy_io_write16(phy, t_apbonly |
+				csr_micro_cont_mux_sel_addr, 0x1);
+	}
+	if (ret != 0) {
+		return -EINVAL;
+	}
+	return 0;
+}
+#endif
+
+static void load_pieimage(uint16_t *phy,
+			  enum dimm_types dimm_type)
+{
+	int i;
+	int size;
+	const struct pie *image = NULL;
+
+	switch (dimm_type) {
+	case UDIMM:
+	case SODIMM:
+	case NODIMM:
+		image = pie_udimm;
+		size = ARRAY_SIZE(pie_udimm);
+		break;
+	case RDIMM:
+		image = pie_rdimm;
+		size = ARRAY_SIZE(pie_rdimm);
+		break;
+	case LRDIMM:
+		image = pie_lrdimm;
+		size = ARRAY_SIZE(pie_lrdimm);
+		break;
+	default:
+		printf("Unsupported DIMM type\n");
+		break;
+	}
+
+	if (image != NULL) {
+		for (i = 0; i < size; i++)
+			phy_io_write16(phy, image[i].addr, image[i].data);
+	}
+}
+
+static void prog_acsm_playback(uint16_t *phy,
+			       const struct input *input, const void *msg)
+{
+	int vec;
+	const struct ddr4r1d *msg_blk;
+	uint16_t acsmplayback[2][3];
+	uint32_t f0rc0a;
+	uint32_t f0rc3x;
+	uint32_t f0rc5x;
+
+	if (input->basic.dimm_type != RDIMM) {
+		return;
+	}
+
+	msg_blk = msg;
+	f0rc0a = (msg_blk->f0rc0a_d0 & U(0xf)) | U(0xa0);
+	f0rc3x = (msg_blk->f0rc3x_d0 & U(0xff)) | U(0x300);
+	f0rc5x = (input->adv.phy_gen2_umctl_f0rc5x & U(0xff)) | U(0x500);
+
+	acsmplayback[0][0] = U(0x3ff) & f0rc0a;
+	acsmplayback[1][0] = (U(0x1c00) & f0rc0a) >> 10U;
+	acsmplayback[0][1] = U(0x3ff) & f0rc3x;
+	acsmplayback[1][1] = (U(0x1c00) & f0rc3x) >> 10U;
+	acsmplayback[0][2] = U(0x3ff) & f0rc5x;
+	acsmplayback[1][2] = (U(0x1c00) & f0rc5x) >> 10U;
+	for (vec = 0; vec < 3; vec++) {
+		phy_io_write16(phy, t_acsm | (csr_acsm_playback0x0_addr +
+			       (vec << 1)), acsmplayback[0][vec]);
+		phy_io_write16(phy, t_acsm | (csr_acsm_playback1x0_addr +
+			       (vec << 1)), acsmplayback[1][vec]);
+	}
+}
+
+static void prog_acsm_ctr(uint16_t *phy,
+			  const struct input *input)
+{
+	if (input->basic.dimm_type != RDIMM) {
+		return;
+	}
+
+	phy_io_write16(phy, t_acsm | csr_acsm_ctrl13_addr,
+		       0xf << csr_acsm_cke_enb_lsb);
+
+	phy_io_write16(phy, t_acsm | csr_acsm_ctrl0_addr,
+		       csr_acsm_par_mode_mask | csr_acsm_2t_mode_mask);
+}
+
+static void prog_cal_rate_run(uint16_t *phy,
+			  const struct input *input)
+{
+	int cal_rate;
+	int cal_interval;
+	int cal_once;
+	uint32_t addr;
+
+	cal_interval = input->adv.cal_interval;
+	cal_once = input->adv.cal_once;
+	cal_rate = 0x1 << csr_cal_run_lsb		|
+			cal_once << csr_cal_once_lsb	|
+			cal_interval << csr_cal_interval_lsb;
+	addr = t_master | csr_cal_rate_addr;
+	phy_io_write16(phy, addr, cal_rate);
+}
+
+static void prog_seq0bdly0(uint16_t *phy,
+		    const struct input *input)
+{
+	int ps_count[4];
+	int frq;
+	uint32_t addr;
+	int lower_freq_opt = 0;
+
+	__unused const soc_info_t *soc_info;
+
+	frq = input->basic.frequency >> 1;
+	ps_count[0] = frq >> 3; /* 0.5 * frq / 4*/
+	if (input->basic.frequency < 400) {
+		lower_freq_opt = (input->basic.dimm_type == RDIMM) ? 7 : 3;
+	} else if (input->basic.frequency < 533) {
+		lower_freq_opt = (input->basic.dimm_type == RDIMM) ? 14 : 11;
+	}
+
+	/* 1.0 * frq / 4 - lower_freq */
+	ps_count[1] = (frq >> 2) - lower_freq_opt;
+	ps_count[2] = (frq << 1) +  (frq >> 1); /* 10.0 * frq / 4 */
+
+#ifdef DDR_PLL_FIX
+	soc_info = get_soc_info();
+	if (soc_info->maj_ver == 1) {
+		ps_count[0] = 0x520; /* seq0bdly0 */
+		ps_count[1] = 0xa41; /* seq0bdly1 */
+		ps_count[2] = 0x668a; /* seq0bdly2 */
+	}
+#endif
+	if (frq > 266) {
+		ps_count[3] = 44;
+	} else if (frq > 200) {
+		ps_count[3] = 33;
+	} else {
+		ps_count[3] = 16;
+	}
+
+	addr = t_master | csr_seq0bdly0_addr;
+	phy_io_write16(phy, addr, ps_count[0]);
+
+	debug("seq0bdly0 = 0x%x\n", phy_io_read16(phy, addr));
+
+	addr = t_master | csr_seq0bdly1_addr;
+	phy_io_write16(phy, addr, ps_count[1]);
+
+	debug("seq0bdly1 = 0x%x\n", phy_io_read16(phy, addr));
+
+	addr = t_master | csr_seq0bdly2_addr;
+	phy_io_write16(phy, addr, ps_count[2]);
+
+	debug("seq0bdly2 = 0x%x\n", phy_io_read16(phy, addr));
+
+	addr = t_master | csr_seq0bdly3_addr;
+	phy_io_write16(phy, addr, ps_count[3]);
+
+	debug("seq0bdly3 = 0x%x\n", phy_io_read16(phy, addr));
+}
+
+/* Only RDIMM requires msg_blk */
+static void i_load_pie(uint16_t **phy_ptr,
+		       const struct input *input,
+		       const void *msg)
+{
+	int i;
+	uint16_t *phy;
+
+	for (i = 0; i < NUM_OF_DDRC; i++) {
+		phy = phy_ptr[i];
+		if (phy == NULL) {
+			continue;
+		}
+
+		phy_io_write16(phy,
+			       t_apbonly | csr_micro_cont_mux_sel_addr,
+			       0U);
+
+		load_pieimage(phy, input->basic.dimm_type);
+
+		prog_seq0bdly0(phy, input);
+		phy_io_write16(phy, t_initeng | csr_seq0bdisable_flag0_addr,
+			       U(0x0000));
+		phy_io_write16(phy, t_initeng | csr_seq0bdisable_flag1_addr,
+			       U(0x0173));
+		phy_io_write16(phy, t_initeng | csr_seq0bdisable_flag2_addr,
+			       U(0x0060));
+		phy_io_write16(phy, t_initeng | csr_seq0bdisable_flag3_addr,
+			       U(0x6110));
+		phy_io_write16(phy, t_initeng | csr_seq0bdisable_flag4_addr,
+			       U(0x2152));
+		phy_io_write16(phy, t_initeng | csr_seq0bdisable_flag5_addr,
+			       U(0xdfbd));
+		phy_io_write16(phy, t_initeng | csr_seq0bdisable_flag6_addr,
+			       input->basic.dimm_type == RDIMM &&
+			       input->adv.phy_gen2_umctl_opt == 1U ?
+			       U(0x6000) : U(0xffff));
+		phy_io_write16(phy, t_initeng | csr_seq0bdisable_flag7_addr,
+			       U(0x6152));
+		prog_acsm_playback(phy, input, msg);		/* rdimm */
+		prog_acsm_ctr(phy, input);			/* rdimm */
+
+		phy_io_write16(phy, t_master | csr_cal_zap_addr, U(0x1));
+		prog_cal_rate_run(phy, input);
+
+		phy_io_write16(phy, t_drtub | csr_ucclk_hclk_enables_addr,
+			       input->basic.dimm_type == RDIMM ? U(0x2) : 0U);
+
+		phy_io_write16(phy, t_apbonly | csr_micro_cont_mux_sel_addr, 1U);
+	}
+}
+
+static void phy_gen2_init_input(struct input *input)
+{
+	int i;
+
+	input->adv.dram_byte_swap		= 0;
+	input->adv.ext_cal_res_val		= 0;
+	input->adv.tx_slew_rise_dq		= 0xf;
+	input->adv.tx_slew_fall_dq		= 0xf;
+	input->adv.tx_slew_rise_ac		= 0xf;
+	input->adv.tx_slew_fall_ac		= 0xf;
+	input->adv.mem_alert_en			= 0;
+	input->adv.mem_alert_puimp		= 5;
+	input->adv.mem_alert_vref_level		= 0x29;
+	input->adv.mem_alert_sync_bypass	= 0;
+	input->adv.cal_interval			= 0x9;
+	input->adv.cal_once			= 0;
+	input->adv.dis_dyn_adr_tri		= 0;
+	input->adv.is2ttiming			= 0;
+	input->adv.d4rx_preamble_length		= 0;
+	input->adv.d4tx_preamble_length		= 0;
+
+	for (i = 0; i < 7; i++) {
+		debug("mr[%d] = 0x%x\n", i, input->mr[i]);
+	}
+
+	debug("input->cs_d0 = 0x%x\n", input->cs_d0);
+	debug("input->cs_d1 = 0x%x\n", input->cs_d1);
+	debug("input->mirror = 0x%x\n", input->mirror);
+	debug("PHY ODT impedance = %d ohm\n", input->adv.odtimpedance);
+	debug("PHY DQ driver impedance = %d ohm\n", input->adv.tx_impedance);
+	debug("PHY Addr driver impedance = %d ohm\n", input->adv.atx_impedance);
+
+	for (i = 0; i < 4; i++) {
+		debug("odt[%d] = 0x%x\n", i, input->odt[i]);
+	}
+
+	if (input->basic.dimm_type == RDIMM) {
+		for (i = 0; i < 16; i++) {
+			debug("input->rcw[%d] = 0x%x\n", i, input->rcw[i]);
+		}
+		debug("input->rcw3x = 0x%x\n", input->rcw3x);
+	}
+}
+
+/*
+ * All protocols share the same base structure of message block.
+ * RDIMM and LRDIMM have more entries defined than UDIMM.
+ * Create message blocks for 1D and 2D training.
+ * Update len with message block size.
+ */
+static int phy_gen2_msg_init(void *msg_1d,
+			     void *msg_2d,
+			     const struct input *input)
+{
+	struct ddr4u1d *msg_blk = msg_1d;
+	struct ddr4u2d *msg_blk_2d = msg_2d;
+	struct ddr4r1d *msg_blk_r;
+	struct ddr4lr1d *msg_blk_lr;
+
+	switch (input->basic.dimm_type) {
+	case UDIMM:
+	case SODIMM:
+	case NODIMM:
+		msg_blk->dram_type	= U(0x2);
+		break;
+	case RDIMM:
+		msg_blk->dram_type	= U(0x4);
+		break;
+	case LRDIMM:
+		msg_blk->dram_type	= U(0x5);
+		break;
+	default:
+		ERROR("Unsupported DIMM type\n");
+		return -EINVAL;
+	}
+	msg_blk->pstate			= 0U;
+
+	/*Enable quickRd2D, a substage of read deskew, to 1D training.*/
+	msg_blk->reserved00             = U(0x20);
+
+	/*Enable High-Effort WrDQ1D.*/
+	msg_blk->reserved00             |= U(0x40);
+
+	/* Enable 1D extra effort training.*/
+	msg_blk->reserved1c[3]		= U(0x3);
+
+	if (input->basic.dimm_type == LRDIMM) {
+		msg_blk->sequence_ctrl	= U(0x3f1f);
+	} else {
+		msg_blk->sequence_ctrl	= U(0x031f);
+	}
+	msg_blk->phy_config_override	= 0U;
+#ifdef DDR_PHY_DEBUG
+	msg_blk->hdt_ctrl		= U(0x5);
+#else
+	msg_blk->hdt_ctrl		= U(0xc9);
+#endif
+	msg_blk->msg_misc		= U(0x0);
+	msg_blk->dfimrlmargin		= U(0x1);
+	msg_blk->phy_vref		= input->vref ? input->vref : U(0x61);
+	msg_blk->cs_present		= input->cs_d0 | input->cs_d1;
+	msg_blk->cs_present_d0		= input->cs_d0;
+	msg_blk->cs_present_d1		= input->cs_d1;
+	if (input->mirror != 0) {
+		msg_blk->addr_mirror	= U(0x0a);	/* odd CS are mirrored */
+	}
+	msg_blk->share2dvref_result	= 1U;
+
+	msg_blk->acsm_odt_ctrl0		= input->odt[0];
+	msg_blk->acsm_odt_ctrl1		= input->odt[1];
+	msg_blk->acsm_odt_ctrl2		= input->odt[2];
+	msg_blk->acsm_odt_ctrl3		= input->odt[3];
+	msg_blk->enabled_dqs = (input->basic.num_active_dbyte_dfi0 +
+				input->basic.num_active_dbyte_dfi1) * 8;
+	msg_blk->x16present		= input->basic.dram_data_width == 0x10 ?
+					  msg_blk->cs_present : 0;
+	msg_blk->d4misc			= U(0x1);
+	msg_blk->cs_setup_gddec		= U(0x1);
+	msg_blk->rtt_nom_wr_park0	= 0U;
+	msg_blk->rtt_nom_wr_park1	= 0U;
+	msg_blk->rtt_nom_wr_park2	= 0U;
+	msg_blk->rtt_nom_wr_park3	= 0U;
+	msg_blk->rtt_nom_wr_park4	= 0U;
+	msg_blk->rtt_nom_wr_park5	= 0U;
+	msg_blk->rtt_nom_wr_park6	= 0U;
+	msg_blk->rtt_nom_wr_park7	= 0U;
+	msg_blk->mr0			= input->mr[0];
+	msg_blk->mr1			= input->mr[1];
+	msg_blk->mr2			= input->mr[2];
+	msg_blk->mr3			= input->mr[3];
+	msg_blk->mr4			= input->mr[4];
+	msg_blk->mr5			= input->mr[5];
+	msg_blk->mr6			= input->mr[6];
+	if ((msg_blk->mr4 & U(0x1c0)) != 0U) {
+		ERROR("Setting DRAM CAL mode is not supported\n");
+	}
+
+	msg_blk->alt_cas_l		= 0U;
+	msg_blk->alt_wcas_l		= 0U;
+
+	msg_blk->dramfreq		= input->basic.frequency * 2U;
+	msg_blk->pll_bypass_en		= input->basic.pll_bypass;
+	msg_blk->dfi_freq_ratio		= input->basic.dfi_freq_ratio == 0U ? 1U :
+					  input->basic.dfi_freq_ratio == 1U ? 2U :
+					  4U;
+	msg_blk->bpznres_val		= input->adv.ext_cal_res_val;
+	msg_blk->disabled_dbyte		= 0U;
+
+	debug("msg_blk->dram_type = 0x%x\n", msg_blk->dram_type);
+	debug("msg_blk->sequence_ctrl = 0x%x\n", msg_blk->sequence_ctrl);
+	debug("msg_blk->phy_cfg = 0x%x\n", msg_blk->phy_cfg);
+	debug("msg_blk->x16present = 0x%x\n", msg_blk->x16present);
+	debug("msg_blk->dramfreq = 0x%x\n", msg_blk->dramfreq);
+	debug("msg_blk->pll_bypass_en = 0x%x\n", msg_blk->pll_bypass_en);
+	debug("msg_blk->dfi_freq_ratio = 0x%x\n", msg_blk->dfi_freq_ratio);
+	debug("msg_blk->phy_odt_impedance = 0x%x\n",
+						msg_blk->phy_odt_impedance);
+	debug("msg_blk->phy_drv_impedance = 0x%x\n",
+						msg_blk->phy_drv_impedance);
+	debug("msg_blk->bpznres_val = 0x%x\n", msg_blk->bpznres_val);
+	debug("msg_blk->enabled_dqs = 0x%x\n", msg_blk->enabled_dqs);
+	debug("msg_blk->acsm_odt_ctrl0 = 0x%x\n", msg_blk->acsm_odt_ctrl0);
+	debug("msg_blk->acsm_odt_ctrl1 = 0x%x\n", msg_blk->acsm_odt_ctrl1);
+	debug("msg_blk->acsm_odt_ctrl2 = 0x%x\n", msg_blk->acsm_odt_ctrl2);
+	debug("msg_blk->acsm_odt_ctrl3 = 0x%x\n", msg_blk->acsm_odt_ctrl3);
+
+	/* RDIMM only */
+	if (input->basic.dimm_type == RDIMM ||
+	    input->basic.dimm_type == LRDIMM) {
+		msg_blk_r = (struct ddr4r1d *)msg_blk;
+		if (msg_blk_r->cs_present_d0 != 0U) {
+			msg_blk_r->f0rc00_d0 = input->rcw[0];
+			msg_blk_r->f0rc01_d0 = input->rcw[1];
+			msg_blk_r->f0rc02_d0 = input->rcw[2];
+			msg_blk_r->f0rc03_d0 = input->rcw[3];
+			msg_blk_r->f0rc04_d0 = input->rcw[4];
+			msg_blk_r->f0rc05_d0 = input->rcw[5];
+			msg_blk_r->f0rc06_d0 = input->rcw[6];
+			msg_blk_r->f0rc07_d0 = input->rcw[7];
+			msg_blk_r->f0rc08_d0 = input->rcw[8];
+			msg_blk_r->f0rc09_d0 = input->rcw[9];
+			msg_blk_r->f0rc0a_d0 = input->rcw[10];
+			msg_blk_r->f0rc0b_d0 = input->rcw[11];
+			msg_blk_r->f0rc0c_d0 = input->rcw[12];
+			msg_blk_r->f0rc0d_d0 = input->rcw[13];
+			msg_blk_r->f0rc0e_d0 = input->rcw[14];
+			msg_blk_r->f0rc0f_d0 = input->rcw[15];
+			msg_blk_r->f0rc3x_d0 = input->rcw3x;
+		}
+		if (msg_blk_r->cs_present_d1 != 0) {
+			msg_blk_r->f0rc00_d1 = input->rcw[0];
+			msg_blk_r->f0rc01_d1 = input->rcw[1];
+			msg_blk_r->f0rc02_d1 = input->rcw[2];
+			msg_blk_r->f0rc03_d1 = input->rcw[3];
+			msg_blk_r->f0rc04_d1 = input->rcw[4];
+			msg_blk_r->f0rc05_d1 = input->rcw[5];
+			msg_blk_r->f0rc06_d1 = input->rcw[6];
+			msg_blk_r->f0rc07_d1 = input->rcw[7];
+			msg_blk_r->f0rc08_d1 = input->rcw[8];
+			msg_blk_r->f0rc09_d1 = input->rcw[9];
+			msg_blk_r->f0rc0a_d1 = input->rcw[10];
+			msg_blk_r->f0rc0b_d1 = input->rcw[11];
+			msg_blk_r->f0rc0c_d1 = input->rcw[12];
+			msg_blk_r->f0rc0d_d1 = input->rcw[13];
+			msg_blk_r->f0rc0e_d1 = input->rcw[14];
+			msg_blk_r->f0rc0f_d1 = input->rcw[15];
+			msg_blk_r->f0rc3x_d1 = input->rcw3x;
+		}
+		if (input->basic.dimm_type == LRDIMM) {
+			msg_blk_lr = (struct ddr4lr1d *)msg_blk;
+			msg_blk_lr->bc0a_d0 = msg_blk_lr->f0rc0a_d0;
+			msg_blk_lr->bc0a_d1 = msg_blk_lr->f0rc0a_d1;
+			msg_blk_lr->f0bc6x_d0 = msg_blk_lr->f0rc3x_d0;
+			msg_blk_lr->f0bc6x_d1 = msg_blk_lr->f0rc3x_d1;
+		}
+	}
+
+	/* below is different for 1D and 2D message block */
+	if (input->basic.train2d != 0) {
+		memcpy(msg_blk_2d, msg_blk, sizeof(struct ddr4u1d));
+		/*High-Effort WrDQ1D is applicable to 2D traning also*/
+		msg_blk_2d->reserved00          |= U(0x40);
+		msg_blk_2d->sequence_ctrl	= U(0x0061);
+		msg_blk_2d->rx2d_train_opt	= 0U;
+		msg_blk_2d->tx2d_train_opt	= 0U;
+		msg_blk_2d->share2dvref_result	= 1U;
+		msg_blk_2d->delay_weight2d	= U(0x20);
+		msg_blk_2d->voltage_weight2d	= U(0x80);
+		debug("rx2d_train_opt %d, tx2d_train_opt %d\n",
+				msg_blk_2d->rx2d_train_opt,
+				msg_blk_2d->tx2d_train_opt);
+	}
+
+	msg_blk->phy_cfg = (((msg_blk->mr3 & U(0x8)) != 0U) ||
+				((msg_blk_2d->mr3 & 0x8) != 0U)) ? 0U
+				: input->adv.is2ttiming;
+
+	return 0;
+}
+
+static void prog_tx_pre_drv_mode(uint16_t *phy,
+				 const struct input *input)
+{
+	int lane, byte, b_addr, c_addr, p_addr;
+	int tx_slew_rate, tx_pre_p, tx_pre_n;
+	int tx_pre_drv_mode = 0x2;
+	uint32_t addr;
+
+	/* Program TxPreDrvMode with 0x2 */
+	/* FIXME: TxPreDrvMode depends on DramType? */
+	tx_pre_p = input->adv.tx_slew_rise_dq;
+	tx_pre_n = input->adv.tx_slew_fall_dq;
+	tx_slew_rate = tx_pre_drv_mode << csr_tx_pre_drv_mode_lsb	|
+		     tx_pre_p << csr_tx_pre_p_lsb			|
+		     tx_pre_n << csr_tx_pre_n_lsb;
+	p_addr = 0;
+	for (byte = 0; byte < input->basic.num_dbyte; byte++) {
+		c_addr = byte << 12;
+		for (lane = 0; lane <= 1; lane++) {
+			b_addr = lane << 8;
+			addr = p_addr | t_dbyte | c_addr | b_addr |
+					csr_tx_slew_rate_addr;
+			phy_io_write16(phy, addr, tx_slew_rate);
+		}
+	}
+}
+
+static void prog_atx_pre_drv_mode(uint16_t *phy,
+				  const struct input *input)
+{
+	int anib, c_addr;
+	int atx_slew_rate, atx_pre_p, atx_pre_n, atx_pre_drv_mode,
+		ck_anib_inst[2];
+	uint32_t addr;
+
+	atx_pre_n = input->adv.tx_slew_fall_ac;
+	atx_pre_p = input->adv.tx_slew_rise_ac;
+
+	if (input->basic.num_anib == 8) {
+		ck_anib_inst[0] = 1;
+		ck_anib_inst[1] = 1;
+	} else if (input->basic.num_anib == 10 || input->basic.num_anib == 12 ||
+	    input->basic.num_anib == 13) {
+		ck_anib_inst[0] = 4;
+		ck_anib_inst[1] = 5;
+	} else {
+		ERROR("Invalid number of aNIBs: %d\n", input->basic.num_anib);
+		return;
+	}
+
+	for (anib = 0; anib < input->basic.num_anib; anib++) {
+		c_addr = anib << 12;
+		if (anib == ck_anib_inst[0] || anib == ck_anib_inst[1]) {
+			atx_pre_drv_mode = 0;
+		} else {
+			atx_pre_drv_mode = 3;
+		}
+		atx_slew_rate = atx_pre_drv_mode << csr_atx_pre_drv_mode_lsb |
+				atx_pre_n << csr_atx_pre_n_lsb		     |
+				atx_pre_p << csr_atx_pre_p_lsb;
+		addr = t_anib | c_addr | csr_atx_slew_rate_addr;
+		phy_io_write16(phy, addr, atx_slew_rate);
+	}
+}
+
+static void prog_enable_cs_multicast(uint16_t *phy,
+				     const struct input *input)
+{
+	uint32_t addr = t_master | csr_enable_cs_multicast_addr;
+
+	if (input->basic.dimm_type != RDIMM &&
+	    input->basic.dimm_type != LRDIMM) {
+		return;
+	}
+
+	phy_io_write16(phy, addr, input->adv.cast_cs_to_cid);
+}
+
+static void prog_dfi_rd_data_cs_dest_map(uint16_t *phy,
+					 unsigned int ip_rev,
+					 const struct input *input,
+					 const struct ddr4lr1d *msg)
+{
+	const struct ddr4lr1d *msg_blk;
+	uint16_t dfi_xxdestm0 = 0U;
+	uint16_t dfi_xxdestm1 = 0U;
+	uint16_t dfi_xxdestm2 = 0U;
+	uint16_t dfi_xxdestm3 = 0U;
+	uint16_t dfi_rd_data_cs_dest_map;
+	uint16_t dfi_wr_data_cs_dest_map;
+	__unused const soc_info_t *soc_info;
+
+#ifdef ERRATA_DDR_A011396
+	/* Only apply to DDRC 5.05.00 */
+	soc_info = get_soc_info(NXP_DCFG_ADDR);
+	if ((soc_info->maj_ver == 1U) && (ip_rev == U(0x50500))) {
+		phy_io_write16(phy,
+				t_master | csr_dfi_rd_data_cs_dest_map_addr,
+				0U);
+		return;
+	}
+#endif
+
+	msg_blk = msg;
+
+	switch (input->basic.dimm_type) {
+	case UDIMM:
+	case SODIMM:
+	case NODIMM:
+		if ((msg_blk->msg_misc & U(0x40)) != 0U) {
+			dfi_rd_data_cs_dest_map = U(0xa0);
+			dfi_wr_data_cs_dest_map = U(0xa0);
+
+			phy_io_write16(phy,
+				t_master | csr_dfi_rd_data_cs_dest_map_addr,
+				dfi_rd_data_cs_dest_map);
+			phy_io_write16(phy,
+				t_master | csr_dfi_wr_data_cs_dest_map_addr,
+				dfi_wr_data_cs_dest_map);
+		}
+		break;
+	case LRDIMM:
+		if (msg->cs_present_d1 != 0U) {
+			dfi_xxdestm2 = 1U;
+			dfi_xxdestm3 = 1U;
+		}
+
+		dfi_rd_data_cs_dest_map =
+			dfi_xxdestm0 << csr_dfi_rd_destm0_lsb	|
+			dfi_xxdestm1 << csr_dfi_rd_destm1_lsb	|
+			dfi_xxdestm2 << csr_dfi_rd_destm2_lsb	|
+			dfi_xxdestm3 << csr_dfi_rd_destm3_lsb;
+		dfi_wr_data_cs_dest_map =
+			dfi_xxdestm0 << csr_dfi_wr_destm0_lsb	|
+			dfi_xxdestm1 << csr_dfi_wr_destm1_lsb	|
+			dfi_xxdestm2 << csr_dfi_wr_destm2_lsb	|
+			dfi_xxdestm3 << csr_dfi_wr_destm3_lsb;
+		phy_io_write16(phy, t_master | csr_dfi_rd_data_cs_dest_map_addr,
+				dfi_rd_data_cs_dest_map);
+		phy_io_write16(phy, t_master | csr_dfi_wr_data_cs_dest_map_addr,
+				dfi_wr_data_cs_dest_map);
+
+		break;
+	default:
+		break;
+	}
+}
+
+static void prog_pll_ctrl(uint16_t *phy,
+			   const struct input *input)
+{
+	uint32_t addr;
+	int pll_ctrl1 = 0x21; /* 000100001b */
+	int pll_ctrl4 = 0x17f; /* 101111111b */
+	int pll_test_mode = 0x24; /* 00100100b */
+
+	addr = t_master | csr_pll_ctrl1_addr;
+	phy_io_write16(phy, addr, pll_ctrl1);
+
+	debug("pll_ctrl1 = 0x%x\n", phy_io_read16(phy, addr));
+
+	addr = t_master | csr_pll_test_mode_addr;
+	phy_io_write16(phy, addr, pll_test_mode);
+
+	debug("pll_test_mode = 0x%x\n", phy_io_read16(phy, addr));
+
+	addr = t_master | csr_pll_ctrl4_addr;
+	phy_io_write16(phy, addr, pll_ctrl4);
+
+	debug("pll_ctrl4 = 0x%x\n", phy_io_read16(phy, addr));
+}
+
+static void prog_pll_ctrl2(uint16_t *phy,
+			   const struct input *input)
+{
+	int pll_ctrl2;
+	uint32_t addr = t_master | csr_pll_ctrl2_addr;
+
+	if (input->basic.frequency / 2 < 235) {
+		pll_ctrl2 = 0x7;
+	} else if (input->basic.frequency / 2 < 313) {
+		pll_ctrl2 = 0x6;
+	} else if (input->basic.frequency / 2 < 469) {
+		pll_ctrl2 = 0xb;
+	} else if (input->basic.frequency / 2 < 625) {
+		pll_ctrl2 = 0xa;
+	} else if (input->basic.frequency / 2 < 938) {
+		pll_ctrl2 = 0x19;
+	} else if (input->basic.frequency / 2 < 1067) {
+		pll_ctrl2 = 0x18;
+	} else {
+		pll_ctrl2 = 0x19;
+	}
+
+	phy_io_write16(phy, addr, pll_ctrl2);
+
+	debug("pll_ctrl2 = 0x%x\n", phy_io_read16(phy, addr));
+}
+
+static void prog_dll_lck_param(uint16_t *phy, const struct input *input)
+{
+	uint32_t addr = t_master | csr_dll_lockparam_addr;
+
+	phy_io_write16(phy, addr, U(0x212));
+	debug("dll_lck_param = 0x%x\n", phy_io_read16(phy, addr));
+}
+
+static void prog_dll_gain_ctl(uint16_t *phy, const struct input *input)
+{
+	uint32_t addr = t_master | csr_dll_gain_ctl_addr;
+
+	phy_io_write16(phy, addr, U(0x61));
+	debug("dll_gain_ctl = 0x%x\n", phy_io_read16(phy, addr));
+}
+
+static void prog_pll_pwr_dn(uint16_t *phy,
+			   const struct input *input)
+{
+	uint32_t addr;
+
+	addr = t_master | csr_pll_pwr_dn_addr;
+	phy_io_write16(phy, addr, 0U);
+
+	debug("pll_pwrdn = 0x%x\n", phy_io_read16(phy, addr));
+}
+
+static void prog_ard_ptr_init_val(uint16_t *phy,
+				  const struct input *input)
+{
+	int ard_ptr_init_val;
+	uint32_t addr = t_master | csr_ard_ptr_init_val_addr;
+
+	if (input->basic.frequency >= 933) {
+		ard_ptr_init_val = 0x2;
+	} else {
+		ard_ptr_init_val = 0x1;
+	}
+
+	phy_io_write16(phy, addr, ard_ptr_init_val);
+}
+
+static void prog_dqs_preamble_control(uint16_t *phy,
+				      const struct input *input)
+{
+	int data;
+	uint32_t addr = t_master | csr_dqs_preamble_control_addr;
+	const int wdqsextension = 0;
+	const int lp4sttc_pre_bridge_rx_en = 0;
+	const int lp4postamble_ext = 0;
+	const int lp4tgl_two_tck_tx_dqs_pre = 0;
+	const int position_dfe_init = 2;
+	const int dll_rx_preamble_mode = 1;
+	int two_tck_tx_dqs_pre = input->adv.d4tx_preamble_length;
+	int two_tck_rx_dqs_pre = input->adv.d4rx_preamble_length;
+
+	data = wdqsextension << csr_wdqsextension_lsb			|
+	       lp4sttc_pre_bridge_rx_en << csr_lp4sttc_pre_bridge_rx_en_lsb |
+	       lp4postamble_ext << csr_lp4postamble_ext_lsb		|
+	       lp4tgl_two_tck_tx_dqs_pre << csr_lp4tgl_two_tck_tx_dqs_pre_lsb |
+	       position_dfe_init << csr_position_dfe_init_lsb		|
+	       two_tck_tx_dqs_pre << csr_two_tck_tx_dqs_pre_lsb		|
+	       two_tck_rx_dqs_pre << csr_two_tck_rx_dqs_pre_lsb;
+	phy_io_write16(phy, addr, data);
+
+	data = dll_rx_preamble_mode << csr_dll_rx_preamble_mode_lsb;
+	addr = t_master | csr_dbyte_dll_mode_cntrl_addr;
+	phy_io_write16(phy, addr, data);
+}
+
+static void prog_proc_odt_time_ctl(uint16_t *phy,
+				   const struct input *input)
+{
+	int proc_odt_time_ctl;
+	uint32_t addr = t_master | csr_proc_odt_time_ctl_addr;
+
+	if (input->adv.wdqsext != 0) {
+		proc_odt_time_ctl = 0x3;
+	} else if (input->basic.frequency <= 933) {
+		proc_odt_time_ctl = 0xa;
+	} else if (input->basic.frequency <= 1200) {
+		if (input->adv.d4rx_preamble_length == 1) {
+			proc_odt_time_ctl = 0x2;
+		} else {
+			proc_odt_time_ctl = 0x6;
+		}
+	} else {
+		if (input->adv.d4rx_preamble_length == 1) {
+			proc_odt_time_ctl = 0x3;
+		} else {
+			proc_odt_time_ctl = 0x7;
+		}
+	}
+	phy_io_write16(phy, addr, proc_odt_time_ctl);
+}
+
+static const struct impedance_mapping map[] = {
+	{	29,	0x3f	},
+	{	31,	0x3e	},
+	{	33,	0x3b	},
+	{	36,	0x3a	},
+	{	39,	0x39	},
+	{	42,	0x38	},
+	{	46,	0x1b	},
+	{	51,	0x1a	},
+	{	57,	0x19	},
+	{	64,	0x18	},
+	{	74,	0x0b	},
+	{	88,	0x0a	},
+	{	108,	0x09	},
+	{	140,	0x08	},
+	{	200,	0x03	},
+	{	360,	0x02	},
+	{	481,	0x01	},
+	{}
+};
+
+static int map_impedance(int strength)
+{
+	const struct impedance_mapping *tbl = map;
+	int val = 0;
+
+	if (strength == 0) {
+		return 0;
+	}
+
+	while (tbl->ohm != 0U) {
+		if (strength < tbl->ohm) {
+			val = tbl->code;
+			break;
+		}
+		tbl++;
+	}
+
+	return val;
+}
+
+static int map_odtstren_p(int strength, int hard_macro_ver)
+{
+	int val = -1;
+
+	if (hard_macro_ver == 4) {
+		if (strength == 0) {
+			val = 0;
+		} else if (strength == 120) {
+			val = 0x8;
+		} else if (strength == 60) {
+			val = 0x18;
+		} else if (strength == 40) {
+			val = 0x38;
+		} else {
+			printf("error: unsupported ODTStrenP %d\n", strength);
+		}
+	} else {
+		val = map_impedance(strength);
+	}
+
+	return val;
+}
+
+static void prog_tx_odt_drv_stren(uint16_t *phy,
+				  const struct input *input)
+{
+	int lane, byte, b_addr, c_addr;
+	int tx_odt_drv_stren;
+	int odtstren_p, odtstren_n;
+	uint32_t addr;
+
+	odtstren_p = map_odtstren_p(input->adv.odtimpedance,
+				input->basic.hard_macro_ver);
+	if (odtstren_p < 0) {
+		return;
+	}
+
+	odtstren_n = 0;	/* always high-z */
+	tx_odt_drv_stren = odtstren_n << csr_odtstren_n_lsb | odtstren_p;
+	for (byte = 0; byte < input->basic.num_dbyte; byte++) {
+		c_addr = byte << 12;
+		for (lane = 0; lane <= 1; lane++) {
+			b_addr = lane << 8;
+			addr = t_dbyte | c_addr | b_addr |
+				csr_tx_odt_drv_stren_addr;
+			phy_io_write16(phy, addr, tx_odt_drv_stren);
+		}
+	}
+}
+
+static int map_drvstren_fsdq_p(int strength, int hard_macro_ver)
+{
+	int val = -1;
+
+	if (hard_macro_ver == 4) {
+		if (strength == 0) {
+			val = 0x07;
+		} else if (strength == 120) {
+			val = 0x0F;
+		} else if (strength == 60) {
+			val = 0x1F;
+		} else if (strength == 40) {
+			val = 0x3F;
+		} else {
+			printf("error: unsupported drv_stren_fSDq_p %d\n",
+			       strength);
+		}
+	} else {
+		val = map_impedance(strength);
+	}
+
+	return val;
+}
+
+static int map_drvstren_fsdq_n(int strength, int hard_macro_ver)
+{
+	int val = -1;
+
+	if (hard_macro_ver == 4) {
+		if (strength == 0) {
+			val = 0x00;
+		} else if (strength == 120) {
+			val = 0x08;
+		} else if (strength == 60) {
+			val = 0x18;
+		} else if (strength == 40) {
+			val = 0x38;
+		} else {
+			printf("error: unsupported drvStrenFSDqN %d\n",
+			       strength);
+		}
+	} else {
+		val = map_impedance(strength);
+	}
+
+	return val;
+}
+
+static void prog_tx_impedance_ctrl1(uint16_t *phy,
+				    const struct input *input)
+{
+	int lane, byte, b_addr, c_addr;
+	int tx_impedance_ctrl1;
+	int drv_stren_fsdq_p, drv_stren_fsdq_n;
+	uint32_t addr;
+
+	drv_stren_fsdq_p = map_drvstren_fsdq_p(input->adv.tx_impedance,
+					input->basic.hard_macro_ver);
+	drv_stren_fsdq_n = map_drvstren_fsdq_n(input->adv.tx_impedance,
+					input->basic.hard_macro_ver);
+	tx_impedance_ctrl1 = drv_stren_fsdq_n << csr_drv_stren_fsdq_n_lsb |
+			   drv_stren_fsdq_p << csr_drv_stren_fsdq_p_lsb;
+
+	for (byte = 0; byte < input->basic.num_dbyte; byte++) {
+		c_addr = byte << 12;
+		for (lane = 0; lane <= 1; lane++) {
+			b_addr = lane << 8;
+			addr = t_dbyte | c_addr | b_addr |
+				csr_tx_impedance_ctrl1_addr;
+			phy_io_write16(phy, addr, tx_impedance_ctrl1);
+		}
+	}
+}
+
+static int map_adrv_stren_p(int strength, int hard_macro_ver)
+{
+	int val = -1;
+
+	if (hard_macro_ver == 4) {
+		if (strength == 120) {
+			val = 0x1c;
+		} else if (strength == 60) {
+			val = 0x1d;
+		} else if (strength == 40) {
+			val = 0x1f;
+		} else {
+			printf("error: unsupported aDrv_stren_p %d\n",
+			       strength);
+		}
+	} else {
+		if (strength == 120) {
+			val = 0x00;
+		} else if (strength == 60) {
+			val = 0x01;
+		} else if (strength == 40) {
+			val = 0x03;
+		} else if (strength == 30) {
+			val = 0x07;
+		} else if (strength == 24) {
+			val = 0x0f;
+		} else if (strength == 20) {
+			val = 0x1f;
+		} else {
+			printf("error: unsupported aDrv_stren_p %d\n",
+			       strength);
+		}
+	}
+
+	return val;
+}
+
+static int map_adrv_stren_n(int strength, int hard_macro_ver)
+{
+	int val = -1;
+
+	if (hard_macro_ver == 4) {
+		if (strength == 120) {
+			val = 0x00;
+		} else if (strength == 60) {
+			val = 0x01;
+		} else if (strength == 40) {
+			val = 0x03;
+		} else {
+			printf("Error: unsupported ADrvStrenP %d\n", strength);
+		}
+	} else {
+		if (strength == 120) {
+			val = 0x00;
+		} else if (strength == 60) {
+			val = 0x01;
+		} else if (strength == 40) {
+			val = 0x03;
+		} else if (strength == 30) {
+			val = 0x07;
+		} else if (strength == 24) {
+			val = 0x0f;
+		} else if (strength == 20) {
+			val = 0x1f;
+		} else {
+			printf("Error: unsupported ADrvStrenP %d\n", strength);
+		}
+	}
+
+	return val;
+}
+
+static void prog_atx_impedance(uint16_t *phy,
+			       const struct input *input)
+{
+	int anib, c_addr;
+	int atx_impedance;
+	int adrv_stren_p;
+	int adrv_stren_n;
+	uint32_t addr;
+
+	if (input->basic.hard_macro_ver == 4 &&
+	    input->adv.atx_impedance == 20) {
+		printf("Error:ATxImpedance has to be 40 for HardMacroVer 4\n");
+		return;
+	}
+
+	adrv_stren_p = map_adrv_stren_p(input->adv.atx_impedance,
+					input->basic.hard_macro_ver);
+	adrv_stren_n = map_adrv_stren_n(input->adv.atx_impedance,
+					input->basic.hard_macro_ver);
+	atx_impedance = adrv_stren_n << csr_adrv_stren_n_lsb		|
+		       adrv_stren_p << csr_adrv_stren_p_lsb;
+	for (anib = 0; anib < input->basic.num_anib; anib++) {
+		c_addr = anib << 12;
+		addr = t_anib | c_addr | csr_atx_impedance_addr;
+		phy_io_write16(phy, addr, atx_impedance);
+	}
+}
+
+static void prog_dfi_mode(uint16_t *phy,
+			  const struct input *input)
+{
+	int dfi_mode;
+	uint32_t addr;
+
+	if (input->basic.dfi1exists == 1) {
+		dfi_mode = 0x5;	/* DFI1 exists but disabled */
+	} else {
+		dfi_mode = 0x1;	/* DFI1 does not physically exists */
+	}
+	addr = t_master | csr_dfi_mode_addr;
+	phy_io_write16(phy, addr, dfi_mode);
+}
+
+static void prog_acx4_anib_dis(uint16_t *phy, const struct input *input)
+{
+	uint32_t addr;
+
+	addr = t_master | csr_acx4_anib_dis_addr;
+	phy_io_write16(phy, addr, 0x0);
+	debug("%s 0x%x\n", __func__, phy_io_read16(phy, addr));
+}
+
+static void prog_dfi_camode(uint16_t *phy,
+			    const struct input *input)
+{
+	int dfi_camode = 2;
+	uint32_t addr = t_master | csr_dfi_camode_addr;
+
+	phy_io_write16(phy, addr, dfi_camode);
+}
+
+static void prog_cal_drv_str0(uint16_t *phy,
+			      const struct input *input)
+{
+	int cal_drv_str0;
+	int cal_drv_str_pd50;
+	int cal_drv_str_pu50;
+	uint32_t addr;
+
+	cal_drv_str_pu50 = input->adv.ext_cal_res_val;
+	cal_drv_str_pd50 = cal_drv_str_pu50;
+	cal_drv_str0 = cal_drv_str_pu50 << csr_cal_drv_str_pu50_lsb |
+			cal_drv_str_pd50;
+	addr = t_master | csr_cal_drv_str0_addr;
+	phy_io_write16(phy, addr, cal_drv_str0);
+}
+
+static void prog_cal_uclk_info(uint16_t *phy,
+			       const struct input *input)
+{
+	int cal_uclk_ticks_per1u_s;
+	uint32_t addr;
+
+	cal_uclk_ticks_per1u_s = input->basic.frequency >> 1;
+	if (cal_uclk_ticks_per1u_s < 24) {
+		cal_uclk_ticks_per1u_s = 24;
+	}
+
+	addr = t_master | csr_cal_uclk_info_addr;
+	phy_io_write16(phy, addr, cal_uclk_ticks_per1u_s);
+}
+
+static void prog_cal_rate(uint16_t *phy,
+			  const struct input *input)
+{
+	int cal_rate;
+	int cal_interval;
+	int cal_once;
+	uint32_t addr;
+
+	cal_interval = input->adv.cal_interval;
+	cal_once = input->adv.cal_once;
+	cal_rate = cal_once << csr_cal_once_lsb		|
+		  cal_interval << csr_cal_interval_lsb;
+	addr = t_master | csr_cal_rate_addr;
+	phy_io_write16(phy, addr, cal_rate);
+}
+
+static void prog_vref_in_global(uint16_t *phy,
+				const struct input *input,
+				const struct ddr4u1d *msg)
+{
+	int vref_in_global;
+	int global_vref_in_dac = 0;
+	int global_vref_in_sel = 0;
+	uint32_t addr;
+
+	/*
+	 * phy_vref_prcnt = msg->phy_vref / 128.0
+	 *  global_vref_in_dac = (phy_vref_prcnt - 0.345) / 0.005;
+	 */
+	global_vref_in_dac = (msg->phy_vref * 1000 - 345 * 128 + 320) /
+			     (5 * 128);
+
+	vref_in_global = global_vref_in_dac << csr_global_vref_in_dac_lsb |
+		       global_vref_in_sel;
+	addr = t_master | csr_vref_in_global_addr;
+	phy_io_write16(phy, addr, vref_in_global);
+}
+
+static void prog_dq_dqs_rcv_cntrl(uint16_t *phy,
+				  const struct input *input)
+{
+	int lane, byte, b_addr, c_addr;
+	int dq_dqs_rcv_cntrl;
+	int gain_curr_adj_defval = 0xb;
+	int major_mode_dbyte = 3;
+	int dfe_ctrl_defval = 0;
+	int ext_vref_range_defval = 0;
+	int sel_analog_vref = 1;
+	uint32_t addr;
+
+	dq_dqs_rcv_cntrl = gain_curr_adj_defval << csr_gain_curr_adj_lsb |
+			major_mode_dbyte << csr_major_mode_dbyte_lsb	|
+			dfe_ctrl_defval << csr_dfe_ctrl_lsb		|
+			ext_vref_range_defval << csr_ext_vref_range_lsb	|
+			sel_analog_vref << csr_sel_analog_vref_lsb;
+	for (byte = 0; byte < input->basic.num_dbyte; byte++) {
+		c_addr = byte << 12;
+		for (lane = 0; lane <= 1; lane++) {
+			b_addr = lane << 8;
+			addr = t_dbyte | c_addr | b_addr |
+					csr_dq_dqs_rcv_cntrl_addr;
+			phy_io_write16(phy, addr, dq_dqs_rcv_cntrl);
+		}
+	}
+}
+
+static void prog_mem_alert_control(uint16_t *phy,
+				   const struct input *input)
+{
+	int mem_alert_control;
+	int mem_alert_control2;
+	int malertpu_en;
+	int malertrx_en;
+	int malertvref_level;
+	int malertpu_stren;
+	int malertsync_bypass;
+	int malertdisable_val_defval = 1;
+	uint32_t addr;
+
+	if (input->basic.dram_type == DDR4 && input->adv.mem_alert_en == 1) {
+		malertpu_en = 1;
+		malertrx_en = 1;
+		malertpu_stren = input->adv.mem_alert_puimp;
+		malertvref_level = input->adv.mem_alert_vref_level;
+		malertsync_bypass = input->adv.mem_alert_sync_bypass;
+		mem_alert_control = malertdisable_val_defval << 14	|
+				  malertrx_en << 13		|
+				  malertpu_en << 12		|
+				  malertpu_stren << 8		|
+				  malertvref_level;
+		mem_alert_control2 = malertsync_bypass <<
+					csr_malertsync_bypass_lsb;
+		addr = t_master | csr_mem_alert_control_addr;
+		phy_io_write16(phy, addr, mem_alert_control);
+		addr = t_master | csr_mem_alert_control2_addr;
+		phy_io_write16(phy, addr, mem_alert_control2);
+	}
+}
+
+static void prog_dfi_freq_ratio(uint16_t *phy,
+				const struct input *input)
+{
+	int dfi_freq_ratio;
+	uint32_t addr = t_master | csr_dfi_freq_ratio_addr;
+
+	dfi_freq_ratio = input->basic.dfi_freq_ratio;
+	phy_io_write16(phy, addr, dfi_freq_ratio);
+}
+
+static void prog_tristate_mode_ca(uint16_t *phy,
+				  const struct input *input)
+{
+	int tristate_mode_ca;
+	int dis_dyn_adr_tri;
+	int ddr2tmode;
+	int ck_dis_val_def = 1;
+	uint32_t addr = t_master | csr_tristate_mode_ca_addr;
+
+	dis_dyn_adr_tri = input->adv.dis_dyn_adr_tri;
+	ddr2tmode = input->adv.is2ttiming;
+	tristate_mode_ca = ck_dis_val_def << csr_ck_dis_val_lsb	|
+			 ddr2tmode << csr_ddr2tmode_lsb		|
+			 dis_dyn_adr_tri << csr_dis_dyn_adr_tri_lsb;
+	phy_io_write16(phy, addr, tristate_mode_ca);
+}
+
+static void prog_dfi_xlat(uint16_t *phy,
+			  const struct input *input)
+{
+	uint16_t loop_vector;
+	int dfifreqxlat_dat;
+	int pllbypass_dat;
+	uint32_t addr;
+
+	/* fIXME: Shall unused P1, P2, P3 be bypassed? */
+	pllbypass_dat = input->basic.pll_bypass; /* only [0] is used */
+	for (loop_vector = 0; loop_vector < 8; loop_vector++) {
+		if (loop_vector == 0) {
+			dfifreqxlat_dat = pllbypass_dat + 0x5555;
+		} else if (loop_vector == 7) {
+			dfifreqxlat_dat = 0xf000;
+		} else {
+			dfifreqxlat_dat = 0x5555;
+		}
+		addr = t_master | (csr_dfi_freq_xlat0_addr + loop_vector);
+		phy_io_write16(phy, addr, dfifreqxlat_dat);
+	}
+}
+
+static void prog_dbyte_misc_mode(uint16_t *phy,
+				 const struct input *input,
+				 const struct ddr4u1d *msg)
+{
+	int dbyte_misc_mode;
+	int dq_dqs_rcv_cntrl1;
+	int dq_dqs_rcv_cntrl1_1;
+	int byte, c_addr;
+	uint32_t addr;
+
+	dbyte_misc_mode = 0x1 << csr_dbyte_disable_lsb;
+	dq_dqs_rcv_cntrl1 = 0x1ff << csr_power_down_rcvr_lsb		|
+			 0x1 << csr_power_down_rcvr_dqs_lsb	|
+			 0x1 << csr_rx_pad_standby_en_lsb;
+	dq_dqs_rcv_cntrl1_1 = (0x100 << csr_power_down_rcvr_lsb |
+			csr_rx_pad_standby_en_mask);
+	for (byte = 0; byte < input->basic.num_dbyte; byte++) {
+		c_addr = byte << 12;
+		if (byte <= input->basic.num_active_dbyte_dfi0 - 1) {
+			/* disable RDBI lane if not used. */
+			if ((input->basic.dram_data_width != 4) &&
+				(((msg->mr5 >> 12) & 0x1) == 0)) {
+				addr = t_dbyte
+					| c_addr
+					| csr_dq_dqs_rcv_cntrl1_addr;
+				phy_io_write16(phy, addr, dq_dqs_rcv_cntrl1_1);
+			}
+		} else {
+			addr = t_dbyte | c_addr | csr_dbyte_misc_mode_addr;
+			phy_io_write16(phy, addr, dbyte_misc_mode);
+			addr = t_dbyte | c_addr | csr_dq_dqs_rcv_cntrl1_addr;
+			phy_io_write16(phy, addr, dq_dqs_rcv_cntrl1);
+		}
+	}
+}
+
+static void prog_master_x4config(uint16_t *phy,
+				 const struct input *input)
+{
+	int master_x4config;
+	int x4tg;
+	uint32_t addr = t_master | csr_master_x4config_addr;
+
+	x4tg = input->basic.dram_data_width == 4 ? 0xf : 0;
+	master_x4config = x4tg << csr_x4tg_lsb;
+	phy_io_write16(phy, addr, master_x4config);
+}
+
+static void prog_dmipin_present(uint16_t *phy,
+				const struct input *input,
+				const struct ddr4u1d *msg)
+{
+	int dmipin_present;
+	uint32_t addr = t_master | csr_dmipin_present_addr;
+
+	dmipin_present = (msg->mr5 >> 12) & 0x1;
+	phy_io_write16(phy, addr, dmipin_present);
+}
+
+static void prog_dfi_phyupd(uint16_t *phy,
+			  const struct input *input)
+{
+	int dfiphyupd_dat;
+	uint32_t addr;
+
+	addr = t_master | (csr_dfiphyupd_addr);
+	dfiphyupd_dat = phy_io_read16(phy, addr) &
+				~csr_dfiphyupd_threshold_mask;
+
+	phy_io_write16(phy, addr, dfiphyupd_dat);
+}
+
+static void prog_cal_misc2(uint16_t *phy,
+			  const struct input *input)
+{
+	int cal_misc2_dat, cal_drv_pdth_data, cal_offsets_dat;
+	uint32_t addr;
+
+	addr = t_master | (csr_cal_misc2_addr);
+	cal_misc2_dat = phy_io_read16(phy, addr) |
+			(1 << csr_cal_misc2_err_dis);
+
+	phy_io_write16(phy, addr, cal_misc2_dat);
+
+	addr = t_master | (csr_cal_offsets_addr);
+
+	cal_drv_pdth_data = 0x9 << 6;
+	cal_offsets_dat = (phy_io_read16(phy, addr) & ~csr_cal_drv_pdth_mask)
+			| cal_drv_pdth_data;
+
+	phy_io_write16(phy, addr, cal_offsets_dat);
+}
+
+static int c_init_phy_config(uint16_t **phy_ptr,
+			     unsigned int ip_rev,
+			     const struct input *input,
+			     const void *msg)
+{
+	int i;
+	uint16_t *phy;
+	__unused const soc_info_t *soc_info;
+
+	for (i = 0; i < NUM_OF_DDRC; i++) {
+		phy = phy_ptr[i];
+		if (phy == NULL) {
+			continue;
+		}
+
+		debug("Initialize PHY %d config\n", i);
+		prog_dfi_phyupd(phy, input);
+		prog_cal_misc2(phy, input);
+		prog_tx_pre_drv_mode(phy, input);
+		prog_atx_pre_drv_mode(phy, input);
+		prog_enable_cs_multicast(phy, input);	/* rdimm and lrdimm */
+		prog_dfi_rd_data_cs_dest_map(phy, ip_rev, input, msg);
+		prog_pll_ctrl2(phy, input);
+#ifdef DDR_PLL_FIX
+		soc_info = get_soc_info();
+		debug("SOC_SI_REV = %x\n", soc_info->maj_ver);
+		if (soc_info->maj_ver == 1) {
+			prog_pll_pwr_dn(phy, input);
+
+			/*Enable FFE aka TxEqualizationMode for rev1 SI*/
+			phy_io_write16(phy, 0x010048, 0x1);
+		}
+#endif
+		prog_ard_ptr_init_val(phy, input);
+		prog_dqs_preamble_control(phy, input);
+		prog_dll_lck_param(phy, input);
+		prog_dll_gain_ctl(phy, input);
+		prog_proc_odt_time_ctl(phy, input);
+		prog_tx_odt_drv_stren(phy, input);
+		prog_tx_impedance_ctrl1(phy, input);
+		prog_atx_impedance(phy, input);
+		prog_dfi_mode(phy, input);
+		prog_dfi_camode(phy, input);
+		prog_cal_drv_str0(phy, input);
+		prog_cal_uclk_info(phy, input);
+		prog_cal_rate(phy, input);
+		prog_vref_in_global(phy, input, msg);
+		prog_dq_dqs_rcv_cntrl(phy, input);
+		prog_mem_alert_control(phy, input);
+		prog_dfi_freq_ratio(phy, input);
+		prog_tristate_mode_ca(phy, input);
+		prog_dfi_xlat(phy, input);
+		prog_dbyte_misc_mode(phy, input, msg);
+		prog_master_x4config(phy, input);
+		prog_dmipin_present(phy, input, msg);
+		prog_acx4_anib_dis(phy, input);
+	}
+
+	return 0;
+}
+
+static uint32_t get_mail(uint16_t *phy, int stream)
+{
+	int timeout;
+	uint32_t mail = 0U;
+
+	timeout = TIMEOUTDEFAULT;
+	while (((--timeout) != 0) &&
+	       ((phy_io_read16(phy, t_apbonly | csr_uct_shadow_regs)
+		& uct_write_prot_shadow_mask) != 0)) {
+		mdelay(10);
+	}
+	if (timeout == 0) {
+		ERROR("Timeout getting mail from PHY\n");
+		return 0xFFFF;
+	}
+
+	mail = phy_io_read16(phy, t_apbonly |
+			     csr_uct_write_only_shadow);
+	if (stream != 0) {
+		mail |= phy_io_read16(phy, t_apbonly |
+				      csr_uct_dat_write_only_shadow) << 16;
+	}
+
+	/* Ack */
+	phy_io_write16(phy, t_apbonly | csr_dct_write_prot, 0);
+
+	timeout = TIMEOUTDEFAULT;
+	while (((--timeout) != 0) &&
+	       ((phy_io_read16(phy, t_apbonly | csr_uct_shadow_regs)
+		 & uct_write_prot_shadow_mask) == 0)) {
+		mdelay(1);
+	}
+	if (timeout == 0) {
+		ERROR("Timeout ack PHY mail\n");
+	}
+
+	/* completed */
+	phy_io_write16(phy, t_apbonly | csr_dct_write_prot, 1U);
+
+	return mail;
+}
+
+#ifdef DDR_PHY_DEBUG
+static const char *lookup_msg(uint32_t index, int train2d)
+{
+	int i;
+	int size;
+	const struct phy_msg *messages;
+	const char *ptr = NULL;
+
+	if (train2d != 0) {
+		messages = messages_2d;
+		size = ARRAY_SIZE(messages_2d);
+	} else {
+		messages = messages_1d;
+		size = ARRAY_SIZE(messages_1d);
+	}
+	for (i = 0; i < size; i++) {
+		if (messages[i].index == index) {
+			ptr = messages[i].msg;
+			break;
+		}
+	}
+
+	return ptr;
+}
+#endif
+
+#define MAX_ARGS 32
+static void decode_stream_message(uint16_t *phy, int train2d)
+{
+	uint32_t index __unused;
+
+	__unused const char *format;
+	__unused uint32_t args[MAX_ARGS];
+	__unused int i;
+
+#ifdef DDR_PHY_DEBUG
+	index = get_mail(phy, 1);
+	if ((index & 0xffff) > MAX_ARGS) {	/* up to MAX_ARGS args so far */
+		printf("Program error in %s\n", __func__);
+	}
+	for (i = 0; i < (index & 0xffff) && i < MAX_ARGS; i++) {
+		args[i] = get_mail(phy, 1);
+	}
+
+	format = lookup_msg(index, train2d);
+	if (format != NULL) {
+		printf("0x%08x: ", index);
+		printf(format, args[0], args[1], args[2], args[3], args[4],
+		       args[5], args[6], args[7], args[8], args[9], args[10],
+		       args[11], args[12], args[13], args[14], args[15],
+		       args[16], args[17], args[18], args[19], args[20],
+		       args[21], args[22], args[23], args[24], args[25],
+		       args[26], args[27], args[28], args[29], args[30],
+		       args[31]);
+	}
+#endif
+}
+
+static int wait_fw_done(uint16_t *phy, int train2d)
+{
+	uint32_t mail = 0U;
+
+	while (mail == U(0x0)) {
+		mail = get_mail(phy, 0);
+		switch (mail) {
+		case U(0x7):
+			debug("%s Training completed\n", train2d ? "2D" : "1D");
+			break;
+		case U(0xff):
+			debug("%s Training failure\n", train2d ? "2D" : "1D");
+			break;
+		case U(0x0):
+			debug("End of initialization\n");
+			mail = 0U;
+			break;
+		case U(0x1):
+			debug("End of fine write leveling\n");
+			mail = 0U;
+			break;
+		case U(0x2):
+			debug("End of read enable training\n");
+			mail = 0U;
+			break;
+		case U(0x3):
+			debug("End of read delay center optimization\n");
+			mail = 0U;
+			break;
+		case U(0x4):
+			debug("End of write delay center optimization\n");
+			mail = 0U;
+			break;
+		case U(0x5):
+			debug("End of 2D read delay/voltage center optimztn\n");
+			mail = 0U;
+			break;
+		case U(0x6):
+			debug("End of 2D write delay/voltage center optmztn\n");
+			mail = 0U;
+			break;
+		case U(0x8):
+			decode_stream_message(phy, train2d);
+			mail = 0U;
+			break;
+		case U(0x9):
+			debug("End of max read latency training\n");
+			mail = 0U;
+			break;
+		case U(0xa):
+			debug("End of read dq deskew training\n");
+			mail = 0U;
+			break;
+		case U(0xc):
+			debug("End of LRDIMM Specific training, including:\n");
+			debug("/tDWL, MREP, MRD and MWD\n");
+			mail = 0U;
+			break;
+		case U(0xd):
+			debug("End of CA training\n");
+			mail = 0U;
+			break;
+		case U(0xfd):
+			debug("End of MPR read delay center optimization\n");
+			mail = 0U;
+			break;
+		case U(0xfe):
+			debug("End of Write leveling coarse delay\n");
+			mail = 0U;
+			break;
+		case U(0xffff):
+			debug("Timed out\n");
+			break;
+		default:
+			mail = 0U;
+			break;
+		}
+	}
+
+	if (mail == U(0x7)) {
+		return 0;
+	} else if (mail == U(0xff)) {
+		return -EIO;
+	} else if (mail == U(0xffff)) {
+		return -ETIMEDOUT;
+	}
+
+	debug("PHY_GEN2 FW: Unxpected mail = 0x%x\n", mail);
+
+	return -EINVAL;
+}
+
+static int g_exec_fw(uint16_t **phy_ptr, int train2d, struct input *input)
+{
+	int ret = -EINVAL;
+	int i;
+	uint16_t *phy;
+
+	for (i = 0; i < NUM_OF_DDRC; i++) {
+		phy = phy_ptr[i];
+		if (phy == NULL) {
+			continue;
+		}
+		debug("Applying PLL optimal settings\n");
+		prog_pll_ctrl2(phy, input);
+		prog_pll_ctrl(phy, input);
+		phy_io_write16(phy,
+			       t_apbonly | csr_micro_cont_mux_sel_addr,
+			       0x1);
+		phy_io_write16(phy,
+			       t_apbonly | csr_micro_reset_addr,
+			       csr_reset_to_micro_mask |
+			       csr_stall_to_micro_mask);
+		phy_io_write16(phy,
+			       t_apbonly | csr_micro_reset_addr,
+			       csr_stall_to_micro_mask);
+		phy_io_write16(phy,
+			       t_apbonly | csr_micro_reset_addr,
+			       0);
+
+		ret = wait_fw_done(phy, train2d);
+		if (ret == -ETIMEDOUT) {
+			ERROR("Wait timed out: Firmware execution on PHY %d\n",
+			      i);
+		}
+	}
+	return ret;
+}
+
+static inline int send_fw(uint16_t *phy,
+			   uint32_t dst,
+			   uint16_t *img,
+			   uint32_t size)
+{
+	uint32_t i;
+
+	if ((size % 2U) != 0U) {
+		ERROR("Wrong image size 0x%x\n", size);
+		return -EINVAL;
+	}
+
+	for (i = 0U; i < size / 2; i++) {
+		phy_io_write16(phy, dst + i, *(img + i));
+	}
+
+	return 0;
+}
+
+static int load_fw(uint16_t **phy_ptr,
+		   struct input *input,
+		   int train2d,
+		   void *msg,
+		   size_t len,
+		   uintptr_t phy_gen2_fw_img_buf,
+		   int (*img_loadr)(unsigned int, uintptr_t *, uint32_t *),
+		   uint32_t warm_boot_flag)
+{
+	uint32_t imem_id, dmem_id;
+	uintptr_t image_buf;
+	uint32_t size;
+	int ret;
+	int i;
+	uint16_t *phy;
+
+	switch (input->basic.dimm_type) {
+	case UDIMM:
+	case SODIMM:
+	case NODIMM:
+		imem_id = train2d ? DDR_IMEM_UDIMM_2D_IMAGE_ID :
+			  DDR_IMEM_UDIMM_1D_IMAGE_ID;
+		dmem_id = train2d ? DDR_DMEM_UDIMM_2D_IMAGE_ID :
+			  DDR_DMEM_UDIMM_1D_IMAGE_ID;
+		break;
+	case RDIMM:
+		imem_id = train2d ? DDR_IMEM_RDIMM_2D_IMAGE_ID :
+			  DDR_IMEM_RDIMM_1D_IMAGE_ID;
+		dmem_id = train2d ? DDR_DMEM_RDIMM_2D_IMAGE_ID :
+			  DDR_DMEM_RDIMM_1D_IMAGE_ID;
+		break;
+	default:
+		ERROR("Unsupported DIMM type\n");
+		return -EINVAL;
+	}
+
+	size = PHY_GEN2_MAX_IMAGE_SIZE;
+	image_buf = (uintptr_t)phy_gen2_fw_img_buf;
+	mmap_add_dynamic_region(phy_gen2_fw_img_buf,
+			phy_gen2_fw_img_buf,
+			PHY_GEN2_MAX_IMAGE_SIZE,
+			MT_MEMORY | MT_RW | MT_SECURE);
+	ret = img_loadr(imem_id, &image_buf, &size);
+	if (ret != 0) {
+		ERROR("Failed to load %d firmware.\n", imem_id);
+		return ret;
+	}
+	debug("Loaded Imaged id %d of size %x at address %lx\n",
+						imem_id, size, image_buf);
+
+	for (i = 0; i < NUM_OF_DDRC; i++) {
+		phy = phy_ptr[i];
+		if (phy == NULL) {
+			continue;
+		}
+
+		if (warm_boot_flag != DDR_WARM_BOOT) {
+			if (train2d == 0) {
+				phy_io_write16(phy, t_master |
+						csr_mem_reset_l_addr,
+						csr_protect_mem_reset_mask);
+			}
+		}
+		/* Enable access to the internal CSRs */
+		phy_io_write16(phy, t_apbonly | csr_micro_cont_mux_sel_addr, 0);
+
+		ret = send_fw(phy, PHY_GEN2_IMEM_ADDR,
+			      (uint16_t *)image_buf, size);
+		if (ret != 0) {
+			return ret;
+		}
+	}
+
+	size = PHY_GEN2_MAX_IMAGE_SIZE;
+	image_buf = (uintptr_t)phy_gen2_fw_img_buf;
+	ret = img_loadr(dmem_id, &image_buf, &size);
+	if (ret != 0) {
+		ERROR("Failed to load %d firmware.\n", dmem_id);
+		return ret;
+	}
+	debug("Loaded Imaged id %d of size %x at address %lx\n",
+						dmem_id, size, image_buf);
+	image_buf += len;
+	size -= len;
+
+	for (i = 0; i < NUM_OF_DDRC; i++) {
+		phy = phy_ptr[i];
+		if (phy == NULL) {
+			continue;
+		}
+
+		ret = send_fw(phy, PHY_GEN2_DMEM_ADDR, msg, len);
+		if (ret != 0) {
+			return ret;
+		}
+
+		ret = send_fw(phy, PHY_GEN2_DMEM_ADDR + len / 2,
+			      (uint16_t *)image_buf, size);
+		if (ret != 0) {
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+static void parse_odt(const unsigned int val,
+		       const int read,
+		       const int i,
+		       const unsigned int cs_d0,
+		       const unsigned int cs_d1,
+		       unsigned int *odt)
+{
+	int shift = read ? 4 : 0;
+	int j;
+
+	if (i < 0 || i > 3) {
+		printf("Error: invalid chip-select value\n");
+	}
+	switch (val) {
+	case DDR_ODT_CS:
+		odt[i] |= (1 << i) << shift;
+		break;
+	case DDR_ODT_ALL_OTHER_CS:
+		for (j = 0; j < DDRC_NUM_CS; j++) {
+			if (i == j) {
+				continue;
+			}
+			if (((cs_d0 | cs_d1) & (1 << j)) == 0) {
+				continue;
+			}
+			odt[j] |= (1 << i) << shift;
+		}
+		break;
+	case DDR_ODT_CS_AND_OTHER_DIMM:
+		odt[i] |= (1 << i) << 4;
+		/* fallthrough */
+	case DDR_ODT_OTHER_DIMM:
+		for (j = 0; j < DDRC_NUM_CS; j++) {
+			if ((((cs_d0 & (1 << i)) != 0) &&
+						((cs_d1 & (1 << j)) != 0)) ||
+			    (((cs_d1 & (1 << i)) != 0) &&
+						((cs_d0 & (1 << j)) != 0))) {
+				odt[j] |= (1 << i) << shift;
+			}
+		}
+		break;
+	case DDR_ODT_ALL:
+		for (j = 0; j < DDRC_NUM_CS; j++) {
+			if (((cs_d0 | cs_d1) & (1 << j)) == 0) {
+				continue;
+			}
+			odt[j] |= (1 << i) << shift;
+		}
+		break;
+	case DDR_ODT_SAME_DIMM:
+		for (j = 0; j < DDRC_NUM_CS; j++) {
+			if ((((cs_d0 & (1 << i)) != 0) &&
+						((cs_d0 & (1 << j)) != 0)) ||
+			    (((cs_d1 & (1 << i)) != 0) &&
+						((cs_d1 & (1 << j)) != 0))) {
+				odt[j] |= (1 << i) << shift;
+			}
+		}
+		break;
+	case DDR_ODT_OTHER_CS_ONSAMEDIMM:
+		for (j = 0; j < DDRC_NUM_CS; j++) {
+			if (i == j) {
+				continue;
+			}
+			if ((((cs_d0 & (1 << i)) != 0) &&
+						((cs_d0 & (1 << j)) != 0)) ||
+			    (((cs_d1 & (1 << i)) != 0) &&
+						((cs_d1 & (1 << j)) != 0))) {
+				odt[j] |= (1 << i) << shift;
+			}
+		}
+		break;
+	case DDR_ODT_NEVER:
+		break;
+	default:
+		break;
+	}
+}
+
+#ifdef DEBUG_DDR_INPUT_CONFIG
+char *dram_types_str[] = {
+		"DDR4",
+		"DDR3",
+		"LDDDR4",
+		"LPDDR3",
+		"LPDDR2",
+		"DDR5"
+};
+
+char *dimm_types_str[] = {
+		"UDIMM",
+		"SODIMM",
+		"RDIMM",
+		"LRDIMM",
+		"NODIMM",
+};
+
+
+static void print_jason_format(struct input *input,
+			       struct ddr4u1d *msg_1d,
+			       struct ddr4u2d *msg_2d)
+{
+
+	printf("\n{");
+	printf("\n    \"dram_type\": \"%s\",", dram_types_str[input->basic.dram_type]);
+	printf("\n    \"dimm_type\": \"%s\",", dimm_types_str[input->basic.dimm_type]);
+	printf("\n    \"hard_macro_ver\": \"%d\",", input->basic.hard_macro_ver);
+	printf("\n    \"num_dbyte\": \"0x%04x\",", (unsigned int)input->basic.num_dbyte);
+	printf("\n    \"num_active_dbyte_dfi0\": \"0x%04x\",", (unsigned int)input->basic.num_active_dbyte_dfi0);
+	printf("\n    \"num_anib\": \"0x%04x\",", (unsigned int)input->basic.num_anib);
+	printf("\n    \"num_rank_dfi0\": \"0x%04x\",", (unsigned int)input->basic.num_rank_dfi0);
+	printf("\n    \"num_pstates\": \"0x%04x\",", (unsigned int)input->basic.num_pstates);
+	printf("\n    \"frequency\": \"%d\",", input->basic.frequency);
+	printf("\n    \"pll_bypass\": \"0x%04x\",", (unsigned int)input->basic.dfi_freq_ratio);
+	printf("\n    \"dfi_freq_ratio\": \"0x%04x\",", (unsigned int)input->basic.dfi_freq_ratio);
+	printf("\n    \"dfi1_exists\":  \"0x%04x\",", (unsigned int)input->basic.dfi1exists);
+	printf("\n    \"dram_data_width\": \"0x%04x\",", (unsigned int)input->basic.dram_data_width);
+	printf("\n    \"dram_byte_swap\": \"0x%04x\",", (unsigned int)input->adv.dram_byte_swap);
+	printf("\n    \"ext_cal_res_val\": \"0x%04x\",", (unsigned int)input->adv.ext_cal_res_val);
+	printf("\n    \"tx_slew_rise_dq\": \"0x%04x\",", (unsigned int)input->adv.tx_slew_rise_dq);
+	printf("\n    \"tx_slew_fall_dq\": \"0x%04x\",", (unsigned int)input->adv.tx_slew_fall_dq);
+	printf("\n    \"tx_slew_rise_ac\": \"0x%04x\",", (unsigned int)input->adv.tx_slew_rise_ac);
+	printf("\n    \"tx_slew_fall_ac\": \"0x%04x\",", (unsigned int)input->adv.tx_slew_fall_ac);
+	printf("\n    \"odt_impedance\": \"%d\",", input->adv.odtimpedance);
+	printf("\n    \"tx_impedance\": \"%d\",", input->adv.tx_impedance);
+	printf("\n    \"atx_impedance\": \"%d\",", input->adv.atx_impedance);
+	printf("\n    \"mem_alert_en\": \"0x%04x\",", (unsigned int)input->adv.mem_alert_en);
+	printf("\n    \"mem_alert_pu_imp\": \"0x%04x\",", (unsigned int)input->adv.mem_alert_puimp);
+	printf("\n    \"mem_alert_vref_level\": \"0x%04x\",", (unsigned int)input->adv.mem_alert_vref_level);
+	printf("\n    \"mem_alert_sync_bypass\": \"0x%04x\",", (unsigned int)input->adv.mem_alert_sync_bypass);
+	printf("\n    \"cal_interval\": \"0x%04x\",", (unsigned int)input->adv.cal_interval);
+	printf("\n    \"cal_once\": \"0x%04x\",", (unsigned int)input->adv.cal_once);
+	printf("\n    \"dis_dyn_adr_tri\": \"0x%04x\",", (unsigned int)input->adv.dis_dyn_adr_tri);
+	printf("\n    \"is2t_timing\": \"0x%04x\",", (unsigned int)input->adv.is2ttiming);
+	printf("\n    \"d4rx_preabmle_length\": \"0x%04x\",", (unsigned int)input->adv.d4rx_preamble_length);
+	printf("\n    \"d4tx_preamble_length\": \"0x%04x\",", (unsigned int)input->adv.d4tx_preamble_length);
+	printf("\n    \"msg_misc\": \"0x%02x\",", (unsigned int)msg_1d->msg_misc);
+	printf("\n    \"reserved00\": \"0x%01x\",", (unsigned int)msg_1d->reserved00);
+	printf("\n    \"hdt_ctrl\": \"0x%02x\",", (unsigned int)msg_1d->hdt_ctrl);
+	printf("\n    \"cs_present\": \"0x%02x\",", (unsigned int)msg_1d->cs_present);
+	printf("\n    \"phy_vref\": \"0x%02x\",", (unsigned int)msg_1d->phy_vref);
+	printf("\n    \"dfi_mrl_margin\": \"0x%02x\",", (unsigned int)msg_1d->dfimrlmargin);
+	printf("\n    \"addr_mirror\": \"0x%02x\",", (unsigned int)msg_1d->addr_mirror);
+	printf("\n    \"wr_odt_pat_rank0\": \"0x%02x\",", (unsigned int)(msg_1d->acsm_odt_ctrl0 & 0x0f));
+	printf("\n    \"wr_odt_pat_rank1\": \"0x%02x\",", (unsigned int)(msg_1d->acsm_odt_ctrl1 & 0x0f));
+	printf("\n    \"wr_odt_pat_rank2\": \"0x%02x\",", (unsigned int)(msg_1d->acsm_odt_ctrl2 & 0x0f));
+	printf("\n    \"wr_odt_pat_rank3\": \"0x%02x\",", (unsigned int)(msg_1d->acsm_odt_ctrl3 & 0x0f));
+	printf("\n    \"rd_odt_pat_rank0\": \"0x%02x\",", (unsigned int)(msg_1d->acsm_odt_ctrl0 & 0xf0));
+	printf("\n    \"rd_odt_pat_rank1\": \"0x%02x\",", (unsigned int)(msg_1d->acsm_odt_ctrl1 & 0xf0));
+	printf("\n    \"rd_odt_pat_rank2\": \"0x%02x\",", (unsigned int)(msg_1d->acsm_odt_ctrl2 & 0xf0));
+	printf("\n    \"rd_odt_pat_rank3\": \"0x%02x\",", (unsigned int)(msg_1d->acsm_odt_ctrl3 & 0xf0));
+	printf("\n    \"d4_misc\": \"0x%01x\",", (unsigned int)msg_1d->d4misc);
+	printf("\n    \"share_2d_vref_results\": \"0x%01x\",", (unsigned int)msg_1d->share2dvref_result);
+	printf("\n    \"sequence_ctrl\": \"0x%04x\",", (unsigned int)msg_1d->sequence_ctrl);
+	printf("\n    \"mr0\": \"0x%04x\",", (unsigned int)msg_1d->mr0);
+	printf("\n    \"mr1\": \"0x%04x\",", (unsigned int)msg_1d->mr1);
+	printf("\n    \"mr2\": \"0x%04x\",", (unsigned int)msg_1d->mr2);
+	printf("\n    \"mr3\": \"0x%04x\",", (unsigned int)msg_1d->mr3);
+	printf("\n    \"mr4\": \"0x%04x\",", (unsigned int)msg_1d->mr4);
+	printf("\n    \"mr5\": \"0x%04x\",", (unsigned int)msg_1d->mr5);
+	printf("\n    \"mr6\": \"0x%04x\",", (unsigned int)msg_1d->mr6);
+	printf("\n    \"alt_cal_l\": \"0x%04x\",", (unsigned int)msg_1d->alt_cas_l);
+	printf("\n    \"alt_wcal_l\": \"0x%04x\",", (unsigned int)msg_1d->alt_wcas_l);
+	printf("\n    \"sequence_ctrl_2d\": \"0x%04x\",", (unsigned int)msg_2d->sequence_ctrl);
+	printf("\n    \"rtt_nom_wr_park0\": \"0x%01x\",", (unsigned int)msg_1d->rtt_nom_wr_park0);
+	printf("\n    \"rtt_nom_wr_park1\": \"0x%01x\",", (unsigned int)msg_1d->rtt_nom_wr_park1);
+	printf("\n    \"rtt_nom_wr_park2\": \"0x%01x\",", (unsigned int)msg_1d->rtt_nom_wr_park2);
+	printf("\n    \"rtt_nom_wr_park3\": \"0x%01x\",", (unsigned int)msg_1d->rtt_nom_wr_park3);
+	printf("\n    \"rtt_nom_wr_park4\": \"0x%01x\",", (unsigned int)msg_1d->rtt_nom_wr_park4);
+	printf("\n    \"rtt_nom_wr_park5\": \"0x%01x\",", (unsigned int)msg_1d->rtt_nom_wr_park5);
+	printf("\n    \"rtt_nom_wr_park6\": \"0x%01x\",", (unsigned int)msg_1d->rtt_nom_wr_park6);
+	printf("\n    \"rtt_nom_wr_park7\": \"0x%01x\"", (unsigned int)msg_1d->rtt_nom_wr_park7);
+	printf("\n}");
+	printf("\n");
+}
+#endif
+
+int compute_ddr_phy(struct ddr_info *priv)
+{
+	const unsigned long clk = priv->clk;
+	const struct memctl_opt *popts = &priv->opt;
+	const struct ddr_conf *conf = &priv->conf;
+	const struct dimm_params *dimm_param = &priv->dimm;
+	struct ddr_cfg_regs *regs = &priv->ddr_reg;
+	int ret;
+	static struct input input;
+	static struct ddr4u1d msg_1d;
+	static struct ddr4u2d msg_2d;
+	unsigned int i;
+	unsigned int odt_rd, odt_wr;
+	__unused const soc_info_t *soc_info;
+#ifdef NXP_APPLY_MAX_CDD
+	unsigned int tcfg0, tcfg4, rank;
+#endif
+
+	if (dimm_param == NULL) {
+		ERROR("Empty DIMM parameters.\n");
+		return -EINVAL;
+	}
+
+	zeromem(&input, sizeof(input));
+	zeromem(&msg_1d, sizeof(msg_1d));
+	zeromem(&msg_2d, sizeof(msg_2d));
+
+	input.basic.dram_type = DDR4;
+	/* FIXME: Add condition for LRDIMM */
+	input.basic.dimm_type = (dimm_param->rdimm != 0) ? RDIMM : UDIMM;
+	input.basic.num_dbyte = dimm_param->primary_sdram_width / 8 +
+				 dimm_param->ec_sdram_width / 8;
+	input.basic.num_active_dbyte_dfi0 = input.basic.num_dbyte;
+	input.basic.num_rank_dfi0 = dimm_param->n_ranks;
+	input.basic.dram_data_width = dimm_param->device_width;
+	input.basic.hard_macro_ver	= 0xa;
+	input.basic.num_pstates	= 1;
+	input.basic.dfi_freq_ratio	= 1;
+	input.basic.num_anib		= 0xc;
+	input.basic.train2d		= popts->skip2d ? 0 : 1;
+	input.basic.frequency = (int) (clk / 2000000ul);
+	debug("frequency = %dMHz\n", input.basic.frequency);
+	input.cs_d0 = conf->cs_on_dimm[0];
+#if DDRC_NUM_DIMM > 1
+	input.cs_d1 = conf->cs_on_dimm[1];
+#endif
+	input.mirror = dimm_param->mirrored_dimm;
+	input.mr[0] = regs->sdram_mode[0] & U(0xffff);
+	input.mr[1] = regs->sdram_mode[0] >> 16U;
+	input.mr[2] = regs->sdram_mode[1] >> 16U;
+	input.mr[3] = regs->sdram_mode[1] & U(0xffff);
+	input.mr[4] = regs->sdram_mode[8] >> 16U;
+	input.mr[5] = regs->sdram_mode[8] & U(0xffff);
+	input.mr[6] = regs->sdram_mode[9] >> 16U;
+	input.vref = popts->vref_phy;
+	debug("Vref_phy = %d percent\n", (input.vref * 100U) >> 7U);
+	for (i = 0U; i < DDRC_NUM_CS; i++) {
+		if ((regs->cs[i].config & SDRAM_CS_CONFIG_EN) == 0U) {
+			continue;
+		}
+		odt_rd = (regs->cs[i].config >> 20U) & U(0x7);
+		odt_wr = (regs->cs[i].config >> 16U) & U(0x7);
+		parse_odt(odt_rd, true, i, input.cs_d0, input.cs_d1,
+			   input.odt);
+		parse_odt(odt_wr, false, i, input.cs_d0, input.cs_d1,
+			   input.odt);
+	}
+
+	/* Do not set sdram_cfg[RD_EN] or sdram_cfg2[RCW_EN] for RDIMM */
+	if (dimm_param->rdimm != 0U) {
+		regs->sdram_cfg[0] &= ~(1 << 28U);
+		regs->sdram_cfg[1] &= ~(1 << 2U);
+		input.rcw[0] = (regs->sdram_rcw[0] >> 28U) & U(0xf);
+		input.rcw[1] = (regs->sdram_rcw[0] >> 24U) & U(0xf);
+		input.rcw[2] = (regs->sdram_rcw[0] >> 20U) & U(0xf);
+		input.rcw[3] = (regs->sdram_rcw[0] >> 16U) & U(0xf);
+		input.rcw[4] = (regs->sdram_rcw[0] >> 12U) & U(0xf);
+		input.rcw[5] = (regs->sdram_rcw[0] >> 8U) & U(0xf);
+		input.rcw[6] = (regs->sdram_rcw[0] >> 4U) & U(0xf);
+		input.rcw[7] = (regs->sdram_rcw[0] >> 0U) & U(0xf);
+		input.rcw[8] = (regs->sdram_rcw[1] >> 28U) & U(0xf);
+		input.rcw[9] = (regs->sdram_rcw[1] >> 24U) & U(0xf);
+		input.rcw[10] = (regs->sdram_rcw[1] >> 20U) & U(0xf);
+		input.rcw[11] = (regs->sdram_rcw[1] >> 16U) & U(0xf);
+		input.rcw[12] = (regs->sdram_rcw[1] >> 12U) & U(0xf);
+		input.rcw[13] = (regs->sdram_rcw[1] >> 8U) & U(0xf);
+		input.rcw[14] = (regs->sdram_rcw[1] >> 4U) & U(0xf);
+		input.rcw[15] = (regs->sdram_rcw[1] >> 0U) & U(0xf);
+		input.rcw3x = (regs->sdram_rcw[2] >> 8U) & U(0xff);
+	}
+
+	input.adv.odtimpedance = popts->odt ? popts->odt : 60;
+	input.adv.tx_impedance = popts->phy_tx_impedance ?
+					popts->phy_tx_impedance : 28;
+	input.adv.atx_impedance = popts->phy_atx_impedance ?
+					popts->phy_atx_impedance : 30;
+
+	debug("Initializing input adv data structure\n");
+	phy_gen2_init_input(&input);
+
+	debug("Initializing message block\n");
+	ret = phy_gen2_msg_init(&msg_1d, &msg_2d, &input);
+	if (ret != 0) {
+		ERROR("Init msg failed (error code %d)\n", ret);
+		return ret;
+	}
+
+	ret = c_init_phy_config(priv->phy, priv->ip_rev, &input, &msg_1d);
+	if (ret != 0) {
+		ERROR("Init PHY failed (error code %d)\n", ret);
+		return ret;
+	}
+#ifdef NXP_WARM_BOOT
+	debug("Warm boot flag value %0x\n", priv->warm_boot_flag);
+	if (priv->warm_boot_flag == DDR_WARM_BOOT) {
+		debug("Restoring the Phy training data\n");
+		// Restore the training data
+		ret = restore_phy_training_values(priv->phy,
+						  PHY_TRAINING_REGS_ON_FLASH,
+						  priv->num_ctlrs,
+						  input.basic.train2d);
+		if (ret != 0) {
+			ERROR("Restoring of training data failed %d\n", ret);
+			return ret;
+		}
+	} else {
+#endif
+
+		debug("Load 1D firmware\n");
+		ret = load_fw(priv->phy, &input, 0, &msg_1d,
+			      sizeof(struct ddr4u1d), priv->phy_gen2_fw_img_buf,
+					priv->img_loadr, priv->warm_boot_flag);
+		if (ret != 0) {
+			ERROR("Loading firmware failed (error code %d)\n", ret);
+			return ret;
+		}
+
+		debug("Execute firmware\n");
+		ret = g_exec_fw(priv->phy, 0, &input);
+		if (ret != 0) {
+			ERROR("Execution FW failed (error code %d)\n", ret);
+		}
+
+#ifdef NXP_APPLY_MAX_CDD
+		soc_info = get_soc_info(NXP_DCFG_ADDR);
+		if (soc_info->maj_ver == 2) {
+			tcfg0 = regs->timing_cfg[0];
+			tcfg4 = regs->timing_cfg[4];
+			rank = findrank(conf->cs_in_use);
+			get_cdd_val(priv->phy, rank, input.basic.frequency,
+					&tcfg0, &tcfg4);
+			regs->timing_cfg[0] = tcfg0;
+			regs->timing_cfg[4] = tcfg4;
+		}
+#endif
+
+		if ((ret == 0) && (input.basic.train2d != 0)) {
+			/* 2D training starts here */
+			debug("Load 2D firmware\n");
+			ret = load_fw(priv->phy, &input, 1, &msg_2d,
+				      sizeof(struct ddr4u2d),
+				      priv->phy_gen2_fw_img_buf,
+				      priv->img_loadr,
+				      priv->warm_boot_flag);
+			if (ret != 0) {
+				ERROR("Loading fw failed (err code %d)\n", ret);
+			} else {
+				debug("Execute 2D firmware\n");
+				ret = g_exec_fw(priv->phy, 1, &input);
+				if (ret != 0) {
+					ERROR("Execution FW failed (err %d)\n",
+					       ret);
+				}
+			}
+		}
+#ifdef NXP_WARM_BOOT
+		if (priv->warm_boot_flag != DDR_WRM_BOOT_NT_SUPPORTED &&
+		    ret == 0) {
+			debug("save the phy training data\n");
+			//Save training data TBD
+			ret = save_phy_training_values(priv->phy,
+						PHY_TRAINING_REGS_ON_FLASH,
+						priv->num_ctlrs,
+						input.basic.train2d);
+			if (ret != 0) {
+				ERROR("Saving training data failed.");
+				ERROR("Warm boot will fail. Error=%d.\n", ret);
+			}
+		}
+	} /* else */
+#endif
+
+	if (ret == 0) {
+		debug("Load PIE\n");
+		i_load_pie(priv->phy, &input, &msg_1d);
+
+		NOTICE("DDR4 %s with %d-rank %d-bit bus (x%d)\n",
+		       input.basic.dimm_type == RDIMM ? "RDIMM" :
+		       input.basic.dimm_type == LRDIMM ? "LRDIMM" :
+		       "UDIMM",
+		       dimm_param->n_ranks,
+		       dimm_param->primary_sdram_width,
+		       dimm_param->device_width);
+	}
+#ifdef DEBUG_DDR_INPUT_CONFIG
+	print_jason_format(&input, &msg_1d, &msg_2d);
+#endif
+
+	return ret;
+}
diff --git a/drivers/nxp/ddr/phy-gen2/phy.h b/drivers/nxp/ddr/phy-gen2/phy.h
new file mode 100644
index 0000000..15e80d1
--- /dev/null
+++ b/drivers/nxp/ddr/phy-gen2/phy.h
@@ -0,0 +1,334 @@
+/*
+ * Copyright 2021 NXP
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#if !defined(PHY_H) && defined(NXP_WARM_BOOT)
+#define PHY_H
+
+#include <flash_info.h>
+
+/* To store sector size to be erase on flash*/
+#define PHY_ERASE_SIZE F_SECTOR_ERASE_SZ
+
+/*Structure to implement address-data map tuples to store PHY training values*/
+struct phy_training_values {
+	uint32_t addr;
+	uint16_t data;
+};
+/* Saves PHY Training Register values after cold reset
+ *@param[in] phy_ptr array to store addresses of PHYs
+ *@param[in] address_to_store address to save PHY training register values
+ *on flash
+ *@param[in] num_of_phy the number of PHY for which training values are
+ *to be saved
+ *@param[in] train2d flag to store whether 2D training registers are to
+ *be saved or not
+ *
+ *PHY training values will be stored on flash at contigous memory in the order:
+ *1D training registers, 2D training registers
+ *for each PHY
+ *
+ *if train2d is false saving 2D training registers will be skipped
+ */
+int save_phy_training_values(uint16_t **phy_ptr, uint32_t address_to_store,
+		uint32_t num_of_phy, int train2d);
+
+/*Restores PHY Training Register values after warm reset
+ *@param[in] phy_ptr array to store addresses of PHYs
+ *@param[in] address_to_store address to retrieve PHY training register
+ *values from flash
+ *@param[in] num_of_phy the number of PHY for which training values are
+ *to be restored
+ *@param[in] train2d flag to store whether 2D training registers are
+ *to be restored or not
+ *
+ *if train2d is false saving 2D training registers will be skipped
+ */
+
+int restore_phy_training_values(uint16_t **phy_ptr, uint32_t address_to_restore,
+		uint32_t num_of_phy, int train2d);
+
+/*
+ * Address data tuples to store the PHY 1D
+ */
+
+struct phy_training_values training_1D_values[] = {
+	{0x200B2, 0},	{0x200CB, 0},	{0x10043, 0},	{0x11043, 0},
+	{0x12043, 0},	{0x13043, 0},	{0x14043, 0},	{0x15043, 0},
+	{0x16043, 0},	{0x17043, 0},	{0x18043, 0},	{0x10143, 0},
+	{0x11143, 0},	{0x12143, 0},	{0x13143, 0},	{0x14143, 0},
+	{0x15143, 0},	{0x16143, 0},	{0x17143, 0},	{0x18143, 0},
+	{0x10080, 0},	{0x11080, 0},	{0x12080, 0},	{0x13080, 0},
+	{0x14080, 0},	{0x15080, 0},	{0x16080, 0},	{0x17080, 0},
+	{0x18080, 0},	{0x10180, 0},	{0x11180, 0},	{0x12180, 0},
+	{0x13180, 0},	{0x14180, 0},	{0x15180, 0},	{0x16180, 0},
+	{0x17180, 0},	{0x18180, 0},	{0x10081, 0},	{0x11081, 0},
+	{0x12081, 0},	{0x13081, 0},	{0x14081, 0},	{0x15081, 0},
+	{0x16081, 0},	{0x17081, 0},	{0x18081, 0},	{0x10181, 0},
+	{0x11181, 0},	{0x12181, 0},	{0x13181, 0},	{0x14181, 0},
+	{0x15181, 0},	{0x16181, 0},	{0x17181, 0},	{0x18181, 0},
+	{0x10082, 0},	{0x11082, 0},	{0x12082, 0},	{0x13082, 0},
+	{0x14082, 0},	{0x15082, 0},	{0x16082, 0},	{0x17082, 0},
+	{0x18082, 0},	{0x10182, 0},	{0x11182, 0},	{0x12182, 0},
+	{0x13182, 0},	{0x14182, 0},	{0x15182, 0},	{0x16182, 0},
+	{0x17182, 0},	{0x18182, 0},	{0x10083, 0},	{0x11083, 0},
+	{0x12083, 0},	{0x13083, 0},	{0x14083, 0},	{0x15083, 0},
+	{0x16083, 0},	{0x17083, 0},	{0x18083, 0},	{0x10183, 0},
+	{0x11183, 0},	{0x12183, 0},	{0x13183, 0},	{0x14183, 0},
+	{0x15183, 0},	{0x16183, 0},	{0x17183, 0},	{0x18183, 0},
+	{0x100D0, 0},	{0x110D0, 0},	{0x120D0, 0},	{0x130D0, 0},
+	{0x140D0, 0},	{0x150D0, 0},	{0x160D0, 0},	{0x170D0, 0},
+	{0x180D0, 0},	{0x101D0, 0},	{0x111D0, 0},	{0x121D0, 0},
+	{0x131D0, 0},	{0x141D0, 0},	{0x151D0, 0},	{0x161D0, 0},
+	{0x171D0, 0},	{0x181D0, 0},	{0x100D1, 0},	{0x110D1, 0},
+	{0x120D1, 0},	{0x130D1, 0},	{0x140D1, 0},	{0x150D1, 0},
+	{0x160D1, 0},	{0x170D1, 0},	{0x180D1, 0},	{0x101D1, 0},
+	{0x111D1, 0},	{0x121D1, 0},	{0x131D1, 0},	{0x141D1, 0},
+	{0x151D1, 0},	{0x161D1, 0},	{0x171D1, 0},	{0x181D1, 0},
+	{0x100D2, 0},	{0x110D2, 0},	{0x120D2, 0},	{0x130D2, 0},
+	{0x140D2, 0},	{0x150D2, 0},	{0x160D2, 0},	{0x170D2, 0},
+	{0x180D2, 0},	{0x101D2, 0},	{0x111D2, 0},	{0x121D2, 0},
+	{0x131D2, 0},	{0x141D2, 0},	{0x151D2, 0},	{0x161D2, 0},
+	{0x171D2, 0},	{0x181D2, 0},	{0x100D3, 0},	{0x110D3, 0},
+	{0x120D3, 0},	{0x130D3, 0},	{0x140D3, 0},	{0x150D3, 0},
+	{0x160D3, 0},	{0x170D3, 0},	{0x180D3, 0},	{0x101D3, 0},
+	{0x111D3, 0},	{0x121D3, 0},	{0x131D3, 0},	{0x141D3, 0},
+	{0x151D3, 0},	{0x161D3, 0},	{0x171D3, 0},	{0x181D3, 0},
+	{0x10068, 0},	{0x11068, 0},	{0x12068, 0},	{0x13068, 0},
+	{0x14068, 0},	{0x15068, 0},	{0x16068, 0},	{0x17068, 0},
+	{0x18068, 0},	{0x10168, 0},	{0x11168, 0},	{0x12168, 0},
+	{0x13168, 0},	{0x14168, 0},	{0x15168, 0},	{0x16168, 0},
+	{0x17168, 0},	{0x18168, 0},	{0x10268, 0},	{0x11268, 0},
+	{0x12268, 0},	{0x13268, 0},	{0x14268, 0},	{0x15268, 0},
+	{0x16268, 0},	{0x17268, 0},	{0x18268, 0},	{0x10368, 0},
+	{0x11368, 0},	{0x12368, 0},	{0x13368, 0},	{0x14368, 0},
+	{0x15368, 0},	{0x16368, 0},	{0x17368, 0},	{0x18368, 0},
+	{0x10468, 0},	{0x11468, 0},	{0x12468, 0},	{0x13468, 0},
+	{0x14468, 0},	{0x15468, 0},	{0x16468, 0},	{0x17468, 0},
+	{0x18468, 0},	{0x10568, 0},	{0x11568, 0},	{0x12568, 0},
+	{0x13568, 0},	{0x14568, 0},	{0x15568, 0},	{0x16568, 0},
+	{0x17568, 0},	{0x18568, 0},	{0x10668, 0},	{0x11668, 0},
+	{0x12668, 0},	{0x13668, 0},	{0x14668, 0},	{0x15668, 0},
+	{0x16668, 0},	{0x17668, 0},	{0x18668, 0},	{0x10768, 0},
+	{0x11768, 0},	{0x12768, 0},	{0x13768, 0},	{0x14768, 0},
+	{0x15768, 0},	{0x16768, 0},	{0x17768, 0},	{0x18768, 0},
+	{0x10868, 0},	{0x11868, 0},	{0x12868, 0},	{0x13868, 0},
+	{0x14868, 0},	{0x15868, 0},	{0x16868, 0},	{0x17868, 0},
+	{0x18868, 0},	{0x10069, 0},	{0x11069, 0},	{0x12069, 0},
+	{0x13069, 0},	{0x14069, 0},	{0x15069, 0},	{0x16069, 0},
+	{0x17069, 0},	{0x18069, 0},	{0x10169, 0},	{0x11169, 0},
+	{0x12169, 0},	{0x13169, 0},	{0x14169, 0},	{0x15169, 0},
+	{0x16169, 0},	{0x17169, 0},	{0x18169, 0},	{0x10269, 0},
+	{0x11269, 0},	{0x12269, 0},	{0x13269, 0},	{0x14269, 0},
+	{0x15269, 0},	{0x16269, 0},	{0x17269, 0},	{0x18269, 0},
+	{0x10369, 0},	{0x11369, 0},	{0x12369, 0},	{0x13369, 0},
+	{0x14369, 0},	{0x15369, 0},	{0x16369, 0},	{0x17369, 0},
+	{0x18369, 0},	{0x10469, 0},	{0x11469, 0},	{0x12469, 0},
+	{0x13469, 0},	{0x14469, 0},	{0x15469, 0},	{0x16469, 0},
+	{0x17469, 0},	{0x18469, 0},	{0x10569, 0},	{0x11569, 0},
+	{0x12569, 0},	{0x13569, 0},	{0x14569, 0},	{0x15569, 0},
+	{0x16569, 0},	{0x17569, 0},	{0x18569, 0},	{0x10669, 0},
+	{0x11669, 0},	{0x12669, 0},	{0x13669, 0},	{0x14669, 0},
+	{0x15669, 0},	{0x16669, 0},	{0x17669, 0},	{0x18669, 0},
+	{0x10769, 0},	{0x11769, 0},	{0x12769, 0},	{0x13769, 0},
+	{0x14769, 0},	{0x15769, 0},	{0x16769, 0},	{0x17769, 0},
+	{0x18769, 0},	{0x10869, 0},	{0x11869, 0},	{0x12869, 0},
+	{0x13869, 0},	{0x14869, 0},	{0x15869, 0},	{0x16869, 0},
+	{0x17869, 0},	{0x18869, 0},	{0x1006A, 0},	{0x1106A, 0},
+	{0x1206A, 0},	{0x1306A, 0},	{0x1406A, 0},	{0x1506A, 0},
+	{0x1606A, 0},	{0x1706A, 0},	{0x1806A, 0},	{0x1016A, 0},
+	{0x1116A, 0},	{0x1216A, 0},	{0x1316A, 0},	{0x1416A, 0},
+	{0x1516A, 0},	{0x1616A, 0},	{0x1716A, 0},	{0x1816A, 0},
+	{0x1026A, 0},	{0x1126A, 0},	{0x1226A, 0},	{0x1326A, 0},
+	{0x1426A, 0},	{0x1526A, 0},	{0x1626A, 0},	{0x1726A, 0},
+	{0x1826A, 0},	{0x1036A, 0},	{0x1136A, 0},	{0x1236A, 0},
+	{0x1336A, 0},	{0x1436A, 0},	{0x1536A, 0},	{0x1636A, 0},
+	{0x1736A, 0},	{0x1836A, 0},	{0x1046A, 0},	{0x1146A, 0},
+	{0x1246A, 0},	{0x1346A, 0},	{0x1446A, 0},	{0x1546A, 0},
+	{0x1646A, 0},	{0x1746A, 0},	{0x1846A, 0},	{0x1056A, 0},
+	{0x1156A, 0},	{0x1256A, 0},	{0x1356A, 0},	{0x1456A, 0},
+	{0x1556A, 0},	{0x1656A, 0},	{0x1756A, 0},	{0x1856A, 0},
+	{0x1066A, 0},	{0x1166A, 0},	{0x1266A, 0},	{0x1366A, 0},
+	{0x1466A, 0},	{0x1566A, 0},	{0x1666A, 0},	{0x1766A, 0},
+	{0x1866A, 0},	{0x1076A, 0},	{0x1176A, 0},	{0x1276A, 0},
+	{0x1376A, 0},	{0x1476A, 0},	{0x1576A, 0},	{0x1676A, 0},
+	{0x1776A, 0},	{0x1876A, 0},	{0x1086A, 0},	{0x1186A, 0},
+	{0x1286A, 0},	{0x1386A, 0},	{0x1486A, 0},	{0x1586A, 0},
+	{0x1686A, 0},	{0x1786A, 0},	{0x1886A, 0},	{0x1006B, 0},
+	{0x1106B, 0},	{0x1206B, 0},	{0x1306B, 0},	{0x1406B, 0},
+	{0x1506B, 0},	{0x1606B, 0},	{0x1706B, 0},	{0x1806B, 0},
+	{0x1016B, 0},	{0x1116B, 0},	{0x1216B, 0},	{0x1316B, 0},
+	{0x1416B, 0},	{0x1516B, 0},	{0x1616B, 0},	{0x1716B, 0},
+	{0x1816B, 0},	{0x1026B, 0},	{0x1126B, 0},	{0x1226B, 0},
+	{0x1326B, 0},	{0x1426B, 0},	{0x1526B, 0},	{0x1626B, 0},
+	{0x1726B, 0},	{0x1826B, 0},	{0x1036B, 0},	{0x1136B, 0},
+	{0x1236B, 0},	{0x1336B, 0},	{0x1436B, 0},	{0x1536B, 0},
+	{0x1636B, 0},	{0x1736B, 0},	{0x1836B, 0},	{0x1046B, 0},
+	{0x1146B, 0},	{0x1246B, 0},	{0x1346B, 0},	{0x1446B, 0},
+	{0x1546B, 0},	{0x1646B, 0},	{0x1746B, 0},	{0x1846B, 0},
+	{0x1056B, 0},	{0x1156B, 0},	{0x1256B, 0},	{0x1356B, 0},
+	{0x1456B, 0},	{0x1556B, 0},	{0x1656B, 0},	{0x1756B, 0},
+	{0x1856B, 0},	{0x1066B, 0},	{0x1166B, 0},	{0x1266B, 0},
+	{0x1366B, 0},	{0x1466B, 0},	{0x1566B, 0},	{0x1666B, 0},
+	{0x1766B, 0},	{0x1866B, 0},	{0x1076B, 0},	{0x1176B, 0},
+	{0x1276B, 0},	{0x1376B, 0},	{0x1476B, 0},	{0x1576B, 0},
+	{0x1676B, 0},	{0x1776B, 0},	{0x1876B, 0},	{0x1086B, 0},
+	{0x1186B, 0},	{0x1286B, 0},	{0x1386B, 0},	{0x1486B, 0},
+	{0x1586B, 0},	{0x1686B, 0},	{0x1786B, 0},	{0x1886B, 0},
+	{0x1008C, 0},	{0x1108C, 0},	{0x1208C, 0},	{0x1308C, 0},
+	{0x1408C, 0},	{0x1508C, 0},	{0x1608C, 0},	{0x1708C, 0},
+	{0x1808C, 0},	{0x1018C, 0},	{0x1118C, 0},	{0x1218C, 0},
+	{0x1318C, 0},	{0x1418C, 0},	{0x1518C, 0},	{0x1618C, 0},
+	{0x1718C, 0},	{0x1818C, 0},	{0x1008D, 0},	{0x1108D, 0},
+	{0x1208D, 0},	{0x1308D, 0},	{0x1408D, 0},	{0x1508D, 0},
+	{0x1608D, 0},	{0x1708D, 0},	{0x1808D, 0},	{0x1018D, 0},
+	{0x1118D, 0},	{0x1218D, 0},	{0x1318D, 0},	{0x1418D, 0},
+	{0x1518D, 0},	{0x1618D, 0},	{0x1718D, 0},	{0x1818D, 0},
+	{0x1008E, 0},	{0x1108E, 0},	{0x1208E, 0},	{0x1308E, 0},
+	{0x1408E, 0},	{0x1508E, 0},	{0x1608E, 0},	{0x1708E, 0},
+	{0x1808E, 0},	{0x1018E, 0},	{0x1118E, 0},	{0x1218E, 0},
+	{0x1318E, 0},	{0x1418E, 0},	{0x1518E, 0},	{0x1618E, 0},
+	{0x1718E, 0},	{0x1818E, 0},	{0x1008F, 0},	{0x1108F, 0},
+	{0x1208F, 0},	{0x1308F, 0},	{0x1408F, 0},	{0x1508F, 0},
+	{0x1608F, 0},	{0x1708F, 0},	{0x1808F, 0},	{0x1018F, 0},
+	{0x1118F, 0},	{0x1218F, 0},	{0x1318F, 0},	{0x1418F, 0},
+	{0x1518F, 0},	{0x1618F, 0},	{0x1718F, 0},	{0x1818F, 0},
+	{0x100C0, 0},	{0x110C0, 0},	{0x120C0, 0},	{0x130C0, 0},
+	{0x140C0, 0},	{0x150C0, 0},	{0x160C0, 0},	{0x170C0, 0},
+	{0x180C0, 0},	{0x101C0, 0},	{0x111C0, 0},	{0x121C0, 0},
+	{0x131C0, 0},	{0x141C0, 0},	{0x151C0, 0},	{0x161C0, 0},
+	{0x171C0, 0},	{0x181C0, 0},	{0x102C0, 0},	{0x112C0, 0},
+	{0x122C0, 0},	{0x132C0, 0},	{0x142C0, 0},	{0x152C0, 0},
+	{0x162C0, 0},	{0x172C0, 0},	{0x182C0, 0},	{0x103C0, 0},
+	{0x113C0, 0},	{0x123C0, 0},	{0x133C0, 0},	{0x143C0, 0},
+	{0x153C0, 0},	{0x163C0, 0},	{0x173C0, 0},	{0x183C0, 0},
+	{0x104C0, 0},	{0x114C0, 0},	{0x124C0, 0},	{0x134C0, 0},
+	{0x144C0, 0},	{0x154C0, 0},	{0x164C0, 0},	{0x174C0, 0},
+	{0x184C0, 0},	{0x105C0, 0},	{0x115C0, 0},	{0x125C0, 0},
+	{0x135C0, 0},	{0x145C0, 0},	{0x155C0, 0},	{0x165C0, 0},
+	{0x175C0, 0},	{0x185C0, 0},	{0x106C0, 0},	{0x116C0, 0},
+	{0x126C0, 0},	{0x136C0, 0},	{0x146C0, 0},	{0x156C0, 0},
+	{0x166C0, 0},	{0x176C0, 0},	{0x186C0, 0},	{0x107C0, 0},
+	{0x117C0, 0},	{0x127C0, 0},	{0x137C0, 0},	{0x147C0, 0},
+	{0x157C0, 0},	{0x167C0, 0},	{0x177C0, 0},	{0x187C0, 0},
+	{0x108C0, 0},	{0x118C0, 0},	{0x128C0, 0},	{0x138C0, 0},
+	{0x148C0, 0},	{0x158C0, 0},	{0x168C0, 0},	{0x178C0, 0},
+	{0x188C0, 0},	{0x100C1, 0},	{0x110C1, 0},	{0x120C1, 0},
+	{0x130C1, 0},	{0x140C1, 0},	{0x150C1, 0},	{0x160C1, 0},
+	{0x170C1, 0},	{0x180C1, 0},	{0x101C1, 0},	{0x111C1, 0},
+	{0x121C1, 0},	{0x131C1, 0},	{0x141C1, 0},	{0x151C1, 0},
+	{0x161C1, 0},	{0x171C1, 0},	{0x181C1, 0},	{0x102C1, 0},
+	{0x112C1, 0},	{0x122C1, 0},	{0x132C1, 0},	{0x142C1, 0},
+	{0x152C1, 0},	{0x162C1, 0},	{0x172C1, 0},	{0x182C1, 0},
+	{0x103C1, 0},	{0x113C1, 0},	{0x123C1, 0},	{0x133C1, 0},
+	{0x143C1, 0},	{0x153C1, 0},	{0x163C1, 0},	{0x173C1, 0},
+	{0x183C1, 0},	{0x104C1, 0},	{0x114C1, 0},	{0x124C1, 0},
+	{0x134C1, 0},	{0x144C1, 0},	{0x154C1, 0},	{0x164C1, 0},
+	{0x174C1, 0},	{0x184C1, 0},	{0x105C1, 0},	{0x115C1, 0},
+	{0x125C1, 0},	{0x135C1, 0},	{0x145C1, 0},	{0x155C1, 0},
+	{0x165C1, 0},	{0x175C1, 0},	{0x185C1, 0},	{0x106C1, 0},
+	{0x116C1, 0},	{0x126C1, 0},	{0x136C1, 0},	{0x146C1, 0},
+	{0x156C1, 0},	{0x166C1, 0},	{0x176C1, 0},	{0x186C1, 0},
+	{0x107C1, 0},	{0x117C1, 0},	{0x127C1, 0},	{0x137C1, 0},
+	{0x147C1, 0},	{0x157C1, 0},	{0x167C1, 0},	{0x177C1, 0},
+	{0x187C1, 0},	{0x108C1, 0},	{0x118C1, 0},	{0x128C1, 0},
+	{0x138C1, 0},	{0x148C1, 0},	{0x158C1, 0},	{0x168C1, 0},
+	{0x178C1, 0},	{0x188C1, 0},	{0x100C2, 0},	{0x110C2, 0},
+	{0x120C2, 0},	{0x130C2, 0},	{0x140C2, 0},	{0x150C2, 0},
+	{0x160C2, 0},	{0x170C2, 0},	{0x180C2, 0},	{0x101C2, 0},
+	{0x111C2, 0},	{0x121C2, 0},	{0x131C2, 0},	{0x141C2, 0},
+	{0x151C2, 0},	{0x161C2, 0},	{0x171C2, 0},	{0x181C2, 0},
+	{0x102C2, 0},	{0x112C2, 0},	{0x122C2, 0},	{0x132C2, 0},
+	{0x142C2, 0},	{0x152C2, 0},	{0x162C2, 0},	{0x172C2, 0},
+	{0x182C2, 0},	{0x103C2, 0},	{0x113C2, 0},	{0x123C2, 0},
+	{0x133C2, 0},	{0x143C2, 0},	{0x153C2, 0},	{0x163C2, 0},
+	{0x173C2, 0},	{0x183C2, 0},	{0x104C2, 0},	{0x114C2, 0},
+	{0x124C2, 0},	{0x134C2, 0},	{0x144C2, 0},	{0x154C2, 0},
+	{0x164C2, 0},	{0x174C2, 0},	{0x184C2, 0},	{0x105C2, 0},
+	{0x115C2, 0},	{0x125C2, 0},	{0x135C2, 0},	{0x145C2, 0},
+	{0x155C2, 0},	{0x165C2, 0},	{0x175C2, 0},	{0x185C2, 0},
+	{0x106C2, 0},	{0x116C2, 0},	{0x126C2, 0},	{0x136C2, 0},
+	{0x146C2, 0},	{0x156C2, 0},	{0x166C2, 0},	{0x176C2, 0},
+	{0x186C2, 0},	{0x107C2, 0},	{0x117C2, 0},	{0x127C2, 0},
+	{0x137C2, 0},	{0x147C2, 0},	{0x157C2, 0},	{0x167C2, 0},
+	{0x177C2, 0},	{0x187C2, 0},	{0x108C2, 0},	{0x118C2, 0},
+	{0x128C2, 0},	{0x138C2, 0},	{0x148C2, 0},	{0x158C2, 0},
+	{0x168C2, 0},	{0x178C2, 0},	{0x188C2, 0},	{0x100C3, 0},
+	{0x110C3, 0},	{0x120C3, 0},	{0x130C3, 0},	{0x140C3, 0},
+	{0x150C3, 0},	{0x160C3, 0},	{0x170C3, 0},	{0x180C3, 0},
+	{0x101C3, 0},	{0x111C3, 0},	{0x121C3, 0},	{0x131C3, 0},
+	{0x141C3, 0},	{0x151C3, 0},	{0x161C3, 0},	{0x171C3, 0},
+	{0x181C3, 0},	{0x102C3, 0},	{0x112C3, 0},	{0x122C3, 0},
+	{0x132C3, 0},	{0x142C3, 0},	{0x152C3, 0},	{0x162C3, 0},
+	{0x172C3, 0},	{0x182C3, 0},	{0x103C3, 0},	{0x113C3, 0},
+	{0x123C3, 0},	{0x133C3, 0},	{0x143C3, 0},	{0x153C3, 0},
+	{0x163C3, 0},	{0x173C3, 0},	{0x183C3, 0},	{0x104C3, 0},
+	{0x114C3, 0},	{0x124C3, 0},	{0x134C3, 0},	{0x144C3, 0},
+	{0x154C3, 0},	{0x164C3, 0},	{0x174C3, 0},	{0x184C3, 0},
+	{0x105C3, 0},	{0x115C3, 0},	{0x125C3, 0},	{0x135C3, 0},
+	{0x145C3, 0},	{0x155C3, 0},	{0x165C3, 0},	{0x175C3, 0},
+	{0x185C3, 0},	{0x106C3, 0},	{0x116C3, 0},	{0x126C3, 0},
+	{0x136C3, 0},	{0x146C3, 0},	{0x156C3, 0},	{0x166C3, 0},
+	{0x176C3, 0},	{0x186C3, 0},	{0x107C3, 0},	{0x117C3, 0},
+	{0x127C3, 0},	{0x137C3, 0},	{0x147C3, 0},	{0x157C3, 0},
+	{0x167C3, 0},	{0x177C3, 0},	{0x187C3, 0},	{0x108C3, 0},
+	{0x118C3, 0},	{0x128C3, 0},	{0x138C3, 0},	{0x148C3, 0},
+	{0x158C3, 0},	{0x168C3, 0},	{0x178C3, 0},	{0x188C3, 0},
+	{0x10020, 0},	{0x11020, 0},	{0x12020, 0},	{0x13020, 0},
+	{0x14020, 0},	{0x15020, 0},	{0x16020, 0},	{0x17020, 0},
+	{0x18020, 0},	{0x2007D, 0},	{0x20077, 0}
+};
+
+/*
+ *Array to store the PHY 2D Training register addresses
+ */
+struct phy_training_values training_2D_values[] = {
+	{0x1008C, 0},   {0x1108C, 0},   {0x1208C, 0},   {0x1308C, 0},
+	{0x1408C, 0},   {0x1508C, 0},   {0x1608C, 0},   {0x1708C, 0},
+	{0x1808C, 0},   {0x1018C, 0},   {0x1118C, 0},   {0x1218C, 0},
+	{0x1318C, 0},   {0x1418C, 0},   {0x1518C, 0},   {0x1618C, 0},
+	{0x1718C, 0},   {0x1818C, 0},   {0x10040, 0},   {0x11040, 0},
+	{0x12040, 0},   {0x13040, 0},   {0x14040, 0},   {0x15040, 0},
+	{0x16040, 0},   {0x17040, 0},   {0x18040, 0},   {0x10140, 0},
+	{0x11140, 0},   {0x12140, 0},   {0x13140, 0},   {0x14140, 0},
+	{0x15140, 0},   {0x16140, 0},   {0x17140, 0},   {0x18140, 0},
+	{0x10240, 0},   {0x11240, 0},   {0x12240, 0},   {0x13240, 0},
+	{0x14240, 0},   {0x15240, 0},   {0x16240, 0},   {0x17240, 0},
+	{0x18240, 0},   {0x10340, 0},   {0x11340, 0},   {0x12340, 0},
+	{0x13340, 0},   {0x14340, 0},   {0x15340, 0},   {0x16340, 0},
+	{0x17340, 0},   {0x18340, 0},   {0x10440, 0},   {0x11440, 0},
+	{0x12440, 0},   {0x13440, 0},   {0x14440, 0},   {0x15440, 0},
+	{0x16440, 0},   {0x17440, 0},   {0x18440, 0},   {0x10540, 0},
+	{0x11540, 0},   {0x12540, 0},   {0x13540, 0},   {0x14540, 0},
+	{0x15540, 0},   {0x16540, 0},   {0x17540, 0},   {0x18540, 0},
+	{0x10640, 0},   {0x11640, 0},   {0x12640, 0},   {0x13640, 0},
+	{0x14640, 0},   {0x15640, 0},   {0x16640, 0},   {0x17640, 0},
+	{0x18640, 0},   {0x10740, 0},   {0x11740, 0},   {0x12740, 0},
+	{0x13740, 0},   {0x14740, 0},   {0x15740, 0},   {0x16740, 0},
+	{0x17740, 0},   {0x18740, 0},   {0x10840, 0},   {0x11840, 0},
+	{0x12840, 0},   {0x13840, 0},   {0x14840, 0},   {0x15840, 0},
+	{0x16840, 0},   {0x17840, 0},   {0x18840, 0},   {0x10030, 0},
+	{0x11030, 0},   {0x12030, 0},   {0x13030, 0},   {0x14030, 0},
+	{0x15030, 0},   {0x16030, 0},   {0x17030, 0},   {0x18030, 0},
+	{0x10130, 0},   {0x11130, 0},   {0x12130, 0},   {0x13130, 0},
+	{0x14130, 0},   {0x15130, 0},   {0x16130, 0},   {0x17130, 0},
+	{0x18130, 0},   {0x10230, 0},   {0x11230, 0},   {0x12230, 0},
+	{0x13230, 0},   {0x14230, 0},   {0x15230, 0},   {0x16230, 0},
+	{0x17230, 0},   {0x18230, 0},   {0x10330, 0},   {0x11330, 0},
+	{0x12330, 0},   {0x13330, 0},   {0x14330, 0},   {0x15330, 0},
+	{0x16330, 0},   {0x17330, 0},   {0x18330, 0},   {0x10430, 0},
+	{0x11430, 0},   {0x12430, 0},   {0x13430, 0},   {0x14430, 0},
+	{0x15430, 0},   {0x16430, 0},   {0x17430, 0},   {0x18430, 0},
+	{0x10530, 0},   {0x11530, 0},   {0x12530, 0},   {0x13530, 0},
+	{0x14530, 0},   {0x15530, 0},   {0x16530, 0},   {0x17530, 0},
+	{0x18530, 0},   {0x10630, 0},   {0x11630, 0},   {0x12630, 0},
+	{0x13630, 0},   {0x14630, 0},   {0x15630, 0},   {0x16630, 0},
+	{0x17630, 0},   {0x18630, 0},   {0x10730, 0},   {0x11730, 0},
+	{0x12730, 0},   {0x13730, 0},   {0x14730, 0},   {0x15730, 0},
+	{0x16730, 0},   {0x17730, 0},   {0x18730, 0},   {0x10830, 0},
+	{0x11830, 0},   {0x12830, 0},   {0x13830, 0},   {0x14830, 0},
+	{0x15830, 0},   {0x16830, 0},   {0x17830, 0},   {0x18830, 0}
+};
+
+#endif
diff --git a/drivers/nxp/ddr/phy-gen2/pie.h b/drivers/nxp/ddr/phy-gen2/pie.h
new file mode 100644
index 0000000..b89066a
--- /dev/null
+++ b/drivers/nxp/ddr/phy-gen2/pie.h
@@ -0,0 +1,632 @@
+/*
+ * Copyright 2021 NXP
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PIE_H
+#define PIE_H
+
+struct pie {
+	uint32_t addr;
+	uint16_t data;
+};
+
+static const struct pie pie_udimm[] = {
+	{0x90000, 0x10},
+	{0x90001, 0x400},
+	{0x90002, 0x10e},
+	{0x90003, 0x0},
+	{0x90004, 0x0},
+	{0x90005, 0x8},
+	{0x90029, 0xb},
+	{0x9002a, 0x480},
+	{0x9002b, 0x109},
+	{0x9002c, 0x8},
+	{0x9002d, 0x448},
+	{0x9002e, 0x139},
+	{0x9002f, 0x8},
+	{0x90030, 0x478},
+	{0x90031, 0x109},
+	{0x90032, 0x2},
+	{0x90033, 0x10},
+	{0x90034, 0x139},
+	{0x90035, 0xb},
+	{0x90036, 0x7c0},
+	{0x90037, 0x139},
+	{0x90038, 0x44},
+	{0x90039, 0x633},
+	{0x9003a, 0x159},
+	{0x9003b, 0x14f},
+	{0x9003c, 0x630},
+	{0x9003d, 0x159},
+	{0x9003e, 0x47},
+	{0x9003f, 0x633},
+	{0x90040, 0x149},
+	{0x90041, 0x4f},
+	{0x90042, 0x633},
+	{0x90043, 0x179},
+	{0x90044, 0x8},
+	{0x90045, 0xe0},
+	{0x90046, 0x109},
+	{0x90047, 0x0},
+	{0x90048, 0x7c8},
+	{0x90049, 0x109},
+	{0x9004a, 0x0},
+	{0x9004b, 0x1},
+	{0x9004c, 0x8},
+	{0x9004d, 0x0},
+	{0x9004e, 0x45a},
+	{0x9004f, 0x9},
+	{0x90050, 0x0},
+	{0x90051, 0x448},
+	{0x90052, 0x109},
+	{0x90053, 0x40},
+	{0x90054, 0x633},
+	{0x90055, 0x179},
+	{0x90056, 0x1},
+	{0x90057, 0x618},
+	{0x90058, 0x109},
+	{0x90059, 0x40c0},
+	{0x9005a, 0x633},
+	{0x9005b, 0x149},
+	{0x9005c, 0x8},
+	{0x9005d, 0x4},
+	{0x9005e, 0x48},
+	{0x9005f, 0x4040},
+	{0x90060, 0x633},
+	{0x90061, 0x149},
+	{0x90062, 0x0},
+	{0x90063, 0x4},
+	{0x90064, 0x48},
+	{0x90065, 0x40},
+	{0x90066, 0x633},
+	{0x90067, 0x149},
+	{0x90068, 0x10},
+	{0x90069, 0x4},
+	{0x9006a, 0x18},
+	{0x9006b, 0x0},
+	{0x9006c, 0x4},
+	{0x9006d, 0x78},
+	{0x9006e, 0x549},
+	{0x9006f, 0x633},
+	{0x90070, 0x159},
+	{0x90071, 0xd49},
+	{0x90072, 0x633},
+	{0x90073, 0x159},
+	{0x90074, 0x94a},
+	{0x90075, 0x633},
+	{0x90076, 0x159},
+	{0x90077, 0x441},
+	{0x90078, 0x633},
+	{0x90079, 0x149},
+	{0x9007a, 0x42},
+	{0x9007b, 0x633},
+	{0x9007c, 0x149},
+	{0x9007d, 0x1},
+	{0x9007e, 0x633},
+	{0x9007f, 0x149},
+	{0x90080, 0x0},
+	{0x90081, 0xe0},
+	{0x90082, 0x109},
+	{0x90083, 0xa},
+	{0x90084, 0x10},
+	{0x90085, 0x109},
+	{0x90086, 0x9},
+	{0x90087, 0x3c0},
+	{0x90088, 0x149},
+	{0x90089, 0x9},
+	{0x9008a, 0x3c0},
+	{0x9008b, 0x159},
+	{0x9008c, 0x18},
+	{0x9008d, 0x10},
+	{0x9008e, 0x109},
+	{0x9008f, 0x0},
+	{0x90090, 0x3c0},
+	{0x90091, 0x109},
+	{0x90092, 0x18},
+	{0x90093, 0x4},
+	{0x90094, 0x48},
+	{0x90095, 0x18},
+	{0x90096, 0x4},
+	{0x90097, 0x58},
+	{0x90098, 0xb},
+	{0x90099, 0x10},
+	{0x9009a, 0x109},
+	{0x9009b, 0x1},
+	{0x9009c, 0x10},
+	{0x9009d, 0x109},
+	{0x9009e, 0x5},
+	{0x9009f, 0x7c0},
+	{0x900a0, 0x109},
+	{0x900a1, 0x0},
+	{0x900a2, 0x8140},
+	{0x900a3, 0x10c},
+	{0x900a4, 0x10},
+	{0x900a5, 0x8138},
+	{0x900a6, 0x10c},
+	{0x900a7, 0x8},
+	{0x900a8, 0x7c8},
+	{0x900a9, 0x101},
+	{0x900aa, 0x8},
+	{0x900ab, 0x448},
+	{0x900ac, 0x109},
+	{0x900ad, 0xf},
+	{0x900ae, 0x7c0},
+	{0x900af, 0x109},
+	{0x900b0, 0x47},
+	{0x900b1, 0x630},
+	{0x900b2, 0x109},
+	{0x900b3, 0x8},
+	{0x900b4, 0x618},
+	{0x900b5, 0x109},
+	{0x900b6, 0x8},
+	{0x900b7, 0xe0},
+	{0x900b8, 0x109},
+	{0x900b9, 0x0},
+	{0x900ba, 0x7c8},
+	{0x900bb, 0x109},
+	{0x900bc, 0x8},
+	{0x900bd, 0x8140},
+	{0x900be, 0x10c},
+	{0x900bf, 0x0},
+	{0x900c0, 0x478},
+	{0x900c1, 0x109},
+	{0x900c2, 0x0},
+	{0x900c3, 0x1},
+	{0x900c4, 0x8},
+	{0x900c5, 0x8},
+	{0x900c6, 0x4},
+	{0x900c7, 0x8},
+	{0x900c8, 0x8},
+	{0x900c9, 0x7c8},
+	{0x900ca, 0x101},
+	{0x90006, 0x0},
+	{0x90007, 0x0},
+	{0x90008, 0x8},
+	{0x90009, 0x0},
+	{0x9000a, 0x0},
+	{0x9000b, 0x0},
+	{0xd00e7, 0x400},
+	{0x90017, 0x0},
+	{0x90026, 0x2b},
+};
+
+static const struct pie pie_rdimm[] = {
+	{0x90000, 0x10},
+	{0x90001, 0x400},
+	{0x90002, 0x10e},
+	{0x90003, 0x0},
+	{0x90004, 0x0},
+	{0x90005, 0x8},
+	{0x40000, 0x10},
+	{0x40020, 0x0},
+	{0x40040, 0x0},
+	{0x40060, 0x0},
+	{0x40001, 0x70a},
+	{0x40021, 0x7005},
+	{0x40041, 0x0},
+	{0x40061, 0x2001},
+	{0x40002, 0x4010},
+	{0x40022, 0x0},
+	{0x40042, 0x0},
+	{0x40062, 0x0},
+	{0x90029, 0x10},
+	{0x9002a, 0x400},
+	{0x9002b, 0x16e},
+	{0x9002c, 0x8},
+	{0x9002d, 0x370},
+	{0x9002e, 0x169},
+	{0x9002f, 0x8},
+	{0x90030, 0x7aa},
+	{0x90031, 0x6a},
+	{0x90032, 0x10},
+	{0x90033, 0x7b2},
+	{0x90034, 0x6a},
+	{0x90035, 0x0},
+	{0x90036, 0x48a},
+	{0x90037, 0x6a},
+	{0x90038, 0x9},
+	{0x90039, 0x480},
+	{0x9003a, 0x16a},
+	{0x9003b, 0x4},
+	{0x9003c, 0x790},
+	{0x9003d, 0x16a},
+	{0x9003e, 0xc},
+	{0x9003f, 0x408},
+	{0x90040, 0x169},
+	{0x90041, 0xa},
+	{0x90042, 0x0},
+	{0x90043, 0x68},
+	{0x90044, 0x0},
+	{0x90045, 0x408},
+	{0x90046, 0x169},
+	{0x90047, 0x1},
+	{0x90048, 0x480},
+	{0x90049, 0x16a},
+	{0x9004a, 0xb},
+	{0x9004b, 0x480},
+	{0x9004c, 0x109},
+	{0x9004d, 0x8},
+	{0x9004e, 0x448},
+	{0x9004f, 0x139},
+	{0x90050, 0x78},
+	{0x90051, 0x8},
+	{0x90052, 0x139},
+	{0x90053, 0x2},
+	{0x90054, 0x10},
+	{0x90055, 0x139},
+	{0x90056, 0xb},
+	{0x90057, 0x7c0},
+	{0x90058, 0x139},
+	{0x90059, 0x44},
+	{0x9005a, 0x633},
+	{0x9005b, 0x159},
+	{0x9005c, 0x14f},
+	{0x9005d, 0x630},
+	{0x9005e, 0x159},
+	{0x9005f, 0x47},
+	{0x90060, 0x633},
+	{0x90061, 0x149},
+	{0x90062, 0x4f},
+	{0x90063, 0x633},
+	{0x90064, 0x179},
+	{0x90065, 0x8},
+	{0x90066, 0xe0},
+	{0x90067, 0x109},
+	{0x90068, 0x0},
+	{0x90069, 0x7c8},
+	{0x9006a, 0x109},
+	{0x9006b, 0x0},
+	{0x9006c, 0x1},
+	{0x9006d, 0x8},
+	{0x9006e, 0x0},
+	{0x9006f, 0x45a},
+	{0x90070, 0x9},
+	{0x90071, 0x0},
+	{0x90072, 0x448},
+	{0x90073, 0x109},
+	{0x90074, 0x40},
+	{0x90075, 0x633},
+	{0x90076, 0x179},
+	{0x90077, 0x1},
+	{0x90078, 0x618},
+	{0x90079, 0x109},
+	{0x9007a, 0x40c0},
+	{0x9007b, 0x633},
+	{0x9007c, 0x149},
+	{0x9007d, 0x8},
+	{0x9007e, 0x4},
+	{0x9007f, 0x48},
+	{0x90080, 0x4040},
+	{0x90081, 0x633},
+	{0x90082, 0x149},
+	{0x90083, 0x0},
+	{0x90084, 0x4},
+	{0x90085, 0x48},
+	{0x90086, 0x40},
+	{0x90087, 0x633},
+	{0x90088, 0x149},
+	{0x90089, 0x10},
+	{0x9008a, 0x4},
+	{0x9008b, 0x18},
+	{0x9008c, 0x0},
+	{0x9008d, 0x4},
+	{0x9008e, 0x78},
+	{0x9008f, 0x549},
+	{0x90090, 0x633},
+	{0x90091, 0x159},
+	{0x90092, 0xd49},
+	{0x90093, 0x633},
+	{0x90094, 0x159},
+	{0x90095, 0x94a},
+	{0x90096, 0x633},
+	{0x90097, 0x159},
+	{0x90098, 0x441},
+	{0x90099, 0x633},
+	{0x9009a, 0x149},
+	{0x9009b, 0x42},
+	{0x9009c, 0x633},
+	{0x9009d, 0x149},
+	{0x9009e, 0x1},
+	{0x9009f, 0x633},
+	{0x900a0, 0x149},
+	{0x900a1, 0x0},
+	{0x900a2, 0xe0},
+	{0x900a3, 0x109},
+	{0x900a4, 0xa},
+	{0x900a5, 0x10},
+	{0x900a6, 0x109},
+	{0x900a7, 0x9},
+	{0x900a8, 0x3c0},
+	{0x900a9, 0x149},
+	{0x900aa, 0x9},
+	{0x900ab, 0x3c0},
+	{0x900ac, 0x159},
+	{0x900ad, 0x18},
+	{0x900ae, 0x10},
+	{0x900af, 0x109},
+	{0x900b0, 0x0},
+	{0x900b1, 0x3c0},
+	{0x900b2, 0x109},
+	{0x900b3, 0x18},
+	{0x900b4, 0x4},
+	{0x900b5, 0x48},
+	{0x900b6, 0x18},
+	{0x900b7, 0x4},
+	{0x900b8, 0x58},
+	{0x900b9, 0xb},
+	{0x900ba, 0x10},
+	{0x900bb, 0x109},
+	{0x900bc, 0x1},
+	{0x900bd, 0x10},
+	{0x900be, 0x109},
+	{0x900bf, 0x5},
+	{0x900c0, 0x7c0},
+	{0x900c1, 0x109},
+	{0x900c2, 0x3},
+	{0x900c3, 0x370},
+	{0x900c4, 0x169},
+	{0x900c5, 0x3},
+	{0x900c6, 0x8},
+	{0x900c7, 0x139},
+	{0x900c8, 0x0},
+	{0x900c9, 0x400},
+	{0x900ca, 0x16e},
+	{0x900cb, 0x8},
+	{0x900cc, 0x478},
+	{0x900cd, 0x109},
+	{0x900ce, 0x0},
+	{0x900cf, 0x8140},
+	{0x900d0, 0x10c},
+	{0x900d1, 0x10},
+	{0x900d2, 0x8138},
+	{0x900d3, 0x10c},
+	{0x900d4, 0x8},
+	{0x900d5, 0x7c8},
+	{0x900d6, 0x101},
+	{0x900d7, 0x7a},
+	{0x900d8, 0x8},
+	{0x900d9, 0x109},
+	{0x900da, 0x8},
+	{0x900db, 0x448},
+	{0x900dc, 0x109},
+	{0x900dd, 0xf},
+	{0x900de, 0x7c0},
+	{0x900df, 0x109},
+	{0x900e0, 0x47},
+	{0x900e1, 0x630},
+	{0x900e2, 0x109},
+	{0x900e3, 0x8},
+	{0x900e4, 0x618},
+	{0x900e5, 0x109},
+	{0x900e6, 0x8},
+	{0x900e7, 0xe0},
+	{0x900e8, 0x109},
+	{0x900e9, 0x0},
+	{0x900ea, 0x8},
+	{0x900eb, 0x109},
+	{0x900ec, 0x0},
+	{0x900ed, 0x7c8},
+	{0x900ee, 0x109},
+	{0x900ef, 0x8},
+	{0x900f0, 0x8140},
+	{0x900f1, 0x10c},
+	{0x900f2, 0x0},
+	{0x900f3, 0x478},
+	{0x900f4, 0x109},
+	{0x900f5, 0x0},
+	{0x900f6, 0x1},
+	{0x900f7, 0x8},
+	{0x900f8, 0x8},
+	{0x900f9, 0x4},
+	{0x900fa, 0x8},
+	{0x900fb, 0x8},
+	{0x900fc, 0x7c8},
+	{0x900fd, 0x101},
+	{0x90006, 0x0},
+	{0x90007, 0x0},
+	{0x90008, 0x8},
+	{0x90009, 0x0},
+	{0x9000a, 0x0},
+	{0x9000b, 0x0},
+	{0xd00e7, 0x400},
+	{0x90017, 0x0},
+	{0x90026, 0x3a},
+};
+
+static const struct pie pie_lrdimm[] = {
+	{0x90000, 0x10},
+	{0x90001, 0x400},
+	{0x90002, 0x10e},
+	{0x90003, 0x0},
+	{0x90004, 0x0},
+	{0x90005, 0x8},
+	{0x90029, 0xb},
+	{0x9002a, 0x480},
+	{0x9002b, 0x109},
+	{0x9002c, 0x8},
+	{0x9002d, 0x448},
+	{0x9002e, 0x139},
+	{0x9002f, 0x78},
+	{0x90030, 0x8},
+	{0x90031, 0x139},
+	{0x90032, 0x2},
+	{0x90033, 0x10},
+	{0x90034, 0x139},
+	{0x90035, 0xb},
+	{0x90036, 0x7c0},
+	{0x90037, 0x139},
+	{0x90038, 0x44},
+	{0x90039, 0x633},
+	{0x9003a, 0x159},
+	{0x9003b, 0x14f},
+	{0x9003c, 0x630},
+	{0x9003d, 0x159},
+	{0x9003e, 0x47},
+	{0x9003f, 0x633},
+	{0x90040, 0x149},
+	{0x90041, 0x4f},
+	{0x90042, 0x633},
+	{0x90043, 0x179},
+	{0x90044, 0x8},
+	{0x90045, 0xe0},
+	{0x90046, 0x109},
+	{0x90047, 0x0},
+	{0x90048, 0x7c8},
+	{0x90049, 0x109},
+	{0x9004a, 0x0},
+	{0x9004b, 0x1},
+	{0x9004c, 0x8},
+	{0x9004d, 0x0},
+	{0x9004e, 0x45a},
+	{0x9004f, 0x9},
+	{0x90050, 0x0},
+	{0x90051, 0x448},
+	{0x90052, 0x109},
+	{0x90053, 0x40},
+	{0x90054, 0x633},
+	{0x90055, 0x179},
+	{0x90056, 0x1},
+	{0x90057, 0x618},
+	{0x90058, 0x109},
+	{0x90059, 0x40c0},
+	{0x9005a, 0x633},
+	{0x9005b, 0x149},
+	{0x9005c, 0x8},
+	{0x9005d, 0x4},
+	{0x9005e, 0x48},
+	{0x9005f, 0x4040},
+	{0x90060, 0x633},
+	{0x90061, 0x149},
+	{0x90062, 0x0},
+	{0x90063, 0x4},
+	{0x90064, 0x48},
+	{0x90065, 0x40},
+	{0x90066, 0x633},
+	{0x90067, 0x149},
+	{0x90068, 0x10},
+	{0x90069, 0x4},
+	{0x9006a, 0x18},
+	{0x9006b, 0x0},
+	{0x9006c, 0x4},
+	{0x9006d, 0x78},
+	{0x9006e, 0x549},
+	{0x9006f, 0x633},
+	{0x90070, 0x159},
+	{0x90071, 0xd49},
+	{0x90072, 0x633},
+	{0x90073, 0x159},
+	{0x90074, 0x94a},
+	{0x90075, 0x633},
+	{0x90076, 0x159},
+	{0x90077, 0x441},
+	{0x90078, 0x633},
+	{0x90079, 0x149},
+	{0x9007a, 0x42},
+	{0x9007b, 0x633},
+	{0x9007c, 0x149},
+	{0x9007d, 0x1},
+	{0x9007e, 0x633},
+	{0x9007f, 0x149},
+	{0x90080, 0x0},
+	{0x90081, 0xe0},
+	{0x90082, 0x109},
+	{0x90083, 0xa},
+	{0x90084, 0x10},
+	{0x90085, 0x109},
+	{0x90086, 0x9},
+	{0x90087, 0x3c0},
+	{0x90088, 0x149},
+	{0x90089, 0x9},
+	{0x9008a, 0x3c0},
+	{0x9008b, 0x159},
+	{0x9008c, 0x18},
+	{0x9008d, 0x10},
+	{0x9008e, 0x109},
+	{0x9008f, 0x0},
+	{0x90090, 0x3c0},
+	{0x90091, 0x109},
+	{0x90092, 0x18},
+	{0x90093, 0x4},
+	{0x90094, 0x48},
+	{0x90095, 0x18},
+	{0x90096, 0x4},
+	{0x90097, 0x58},
+	{0x90098, 0xb},
+	{0x90099, 0x10},
+	{0x9009a, 0x109},
+	{0x9009b, 0x1},
+	{0x9009c, 0x10},
+	{0x9009d, 0x109},
+	{0x9009e, 0x5},
+	{0x9009f, 0x7c0},
+	{0x900a0, 0x109},
+	{0x900a1, 0x3},
+	{0x900a2, 0x8},
+	{0x900a3, 0x139},
+	{0x900a4, 0x0},
+	{0x900a5, 0x400},
+	{0x900a6, 0x16e},
+	{0x900a7, 0x8},
+	{0x900a8, 0x478},
+	{0x900a9, 0x109},
+	{0x900aa, 0x0},
+	{0x900ab, 0x8140},
+	{0x900ac, 0x10c},
+	{0x900ad, 0x10},
+	{0x900ae, 0x8138},
+	{0x900af, 0x10c},
+	{0x900b0, 0x8},
+	{0x900b1, 0x7c8},
+	{0x900b2, 0x101},
+	{0x900b3, 0x7a},
+	{0x900b4, 0x8},
+	{0x900b5, 0x109},
+	{0x900b6, 0x8},
+	{0x900b7, 0x448},
+	{0x900b8, 0x109},
+	{0x900b9, 0xf},
+	{0x900ba, 0x7c0},
+	{0x900bb, 0x109},
+	{0x900bc, 0x47},
+	{0x900bd, 0x630},
+	{0x900be, 0x109},
+	{0x900bf, 0x8},
+	{0x900c0, 0x618},
+	{0x900c1, 0x109},
+	{0x900c2, 0x8},
+	{0x900c3, 0xe0},
+	{0x900c4, 0x109},
+	{0x900c5, 0x0},
+	{0x900c6, 0x8},
+	{0x900c7, 0x109},
+	{0x900c8, 0x0},
+	{0x900c9, 0x7c8},
+	{0x900ca, 0x109},
+	{0x900cb, 0x8},
+	{0x900cc, 0x8140},
+	{0x900cd, 0x10c},
+	{0x900ce, 0x0},
+	{0x900cf, 0x478},
+	{0x900d0, 0x109},
+	{0x900d1, 0x0},
+	{0x900d2, 0x1},
+	{0x900d3, 0x8},
+	{0x900d4, 0x8},
+	{0x900d5, 0x4},
+	{0x900d6, 0x8},
+	{0x900d7, 0x8},
+	{0x900d8, 0x7c8},
+	{0x900d9, 0x101},
+	{0x90006, 0x0},
+	{0x90007, 0x0},
+	{0x90008, 0x8},
+	{0x90009, 0x0},
+	{0x9000a, 0x0},
+	{0x9000b, 0x0},
+	{0xd00e7, 0x400},
+	{0x90017, 0x0},
+	{0x90026, 0x2e},
+};
+#endif
diff --git a/drivers/nxp/drivers.mk b/drivers/nxp/drivers.mk
new file mode 100644
index 0000000..c6d5541
--- /dev/null
+++ b/drivers/nxp/drivers.mk
@@ -0,0 +1,90 @@
+#
+# Copyright 2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+
+###############################################################################
+
+
+PLAT_DRIVERS_PATH	:=	drivers/nxp
+
+ifeq (${SMMU_NEEDED},yes)
+PLAT_INCLUDES	+= -Iinclude/drivers/nxp/smmu/
+endif
+
+ifeq (${DCFG_NEEDED},yes)
+include $(PLAT_DRIVERS_PATH)/dcfg/dcfg.mk
+endif
+
+ifeq (${CSU_NEEDED},yes)
+include $(PLAT_DRIVERS_PATH)/csu/csu.mk
+endif
+
+ifeq (${TIMER_NEEDED},yes)
+include $(PLAT_DRIVERS_PATH)/timer/timer.mk
+endif
+
+ifeq (${INTERCONNECT_NEEDED},yes)
+include ${PLAT_DRIVERS_PATH}/interconnect/interconnect.mk
+endif
+
+ifeq (${GIC_NEEDED},yes)
+include ${PLAT_DRIVERS_PATH}/gic/gic.mk
+endif
+
+ifeq (${SD_MMC_NEEDED},yes)
+include $(PLAT_DRIVERS_PATH)/sd/sd_mmc.mk
+endif
+
+ifeq (${CONSOLE_NEEDED},yes)
+include $(PLAT_DRIVERS_PATH)/console/console.mk
+endif
+
+ifeq (${SFP_NEEDED},yes)
+include $(PLAT_DRIVERS_PATH)/sfp/sfp.mk
+endif
+
+ifeq (${XSPI_NEEDED},yes)
+include $(PLAT_DRIVERS_PATH)/flexspi/nor/flexspi_nor.mk
+endif
+
+ifeq (${QSPI_NEEDED},yes)
+include $(PLAT_DRIVERS_PATH)/qspi/qspi.mk
+endif
+
+ifeq (${SNVS_NEEDED},yes)
+include $(PLAT_DRIVERS_PATH)/sec_mon/sec_mon.mk
+endif
+
+ifeq ($(I2C_NEEDED),yes)
+$(eval $(call add_define, I2C_INIT))
+include $(PLAT_DRIVERS_PATH)/i2c/i2c.mk
+endif
+
+ifeq ($(DDR_DRIVER_NEEDED),yes)
+$(eval $(call add_define, DDR_INIT))
+# define DDR_CNTRL_SOURCES
+ifeq ($(DDRCNTLR),MMDC)
+include $(PLAT_DRIVERS_PATH)/ddr/fsl-mmdc/ddr.mk
+else
+include $(PLAT_DRIVERS_PATH)/ddr/nxp-ddr/ddr.mk
+endif # DDR_CNTRL_SOURCES
+endif
+
+ifeq (${PMU_NEEDED},yes)
+include $(PLAT_DRIVERS_PATH)/pmu/pmu.mk
+endif
+
+ifeq (${CRYPTO_NEEDED},yes)
+include $(PLAT_DRIVERS_PATH)/crypto/caam/caam.mk
+endif
+
+ifeq (${TZASC_NEEDED},yes)
+include $(PLAT_DRIVERS_PATH)/tzc/tzc.mk
+endif
+
+ifeq (${GPIO_NEEDED},yes)
+include ${PLAT_DRIVERS_PATH}/gpio/gpio.mk
+endif
diff --git a/drivers/nxp/flexspi/nor/flexspi_nor.c b/drivers/nxp/flexspi/nor/flexspi_nor.c
new file mode 100644
index 0000000..748228d
--- /dev/null
+++ b/drivers/nxp/flexspi/nor/flexspi_nor.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+
+#include <fspi_api.h>
+#include <lib/mmio.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+int flexspi_nor_io_setup(uintptr_t nxp_flexspi_flash_addr,
+			 size_t nxp_flexspi_flash_size, uint32_t fspi_base_reg_addr)
+{
+	int ret = 0;
+
+	ret = fspi_init(fspi_base_reg_addr, nxp_flexspi_flash_addr);
+	/* Adding NOR Memory Map in XLAT Table */
+	mmap_add_region(nxp_flexspi_flash_addr, nxp_flexspi_flash_addr,
+			nxp_flexspi_flash_size, MT_MEMORY | MT_RW);
+
+	return ret;
+}
diff --git a/drivers/nxp/flexspi/nor/flexspi_nor.h b/drivers/nxp/flexspi/nor/flexspi_nor.h
new file mode 100644
index 0000000..61fc236
--- /dev/null
+++ b/drivers/nxp/flexspi/nor/flexspi_nor.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef FLEXSPI_NOR_H
+#define FLEXSPI_NOR_H
+
+int flexspi_nor_io_setup(uintptr_t nxp_flexspi_flash_addr,
+			 size_t nxp_flexspi_flash_size,
+			 uint32_t fspi_base_reg_addr);
+
+#endif /*	FLEXSPI_NOR_H	*/
diff --git a/drivers/nxp/flexspi/nor/flexspi_nor.mk b/drivers/nxp/flexspi/nor/flexspi_nor.mk
new file mode 100644
index 0000000..6d9eebb
--- /dev/null
+++ b/drivers/nxp/flexspi/nor/flexspi_nor.mk
@@ -0,0 +1,35 @@
+#
+# Copyright 2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${XSPI_NOR},)
+XSPI_NOR	:= 1
+
+FLEXSPI_DRIVERS_PATH	:=  ${PLAT_DRIVERS_PATH}/flexspi/nor
+
+PLAT_XSPI_INCLUDES	+= -I$(FLEXSPI_DRIVERS_PATH)
+
+XSPI_BOOT_SOURCES	+= $(FLEXSPI_DRIVERS_PATH)/flexspi_nor.c	\
+			   ${FLEXSPI_DRIVERS_PATH}/fspi.c
+ifeq ($(DEBUG),1)
+XSPI_BOOT_SOURCES	+= ${FLEXSPI_DRIVERS_PATH}/test_fspi.c
+endif
+
+PLAT_XSPI_INCLUDES	+= -Iinclude/drivers/nxp/flexspi
+
+PLAT_INCLUDES		+= ${PLAT_XSPI_INCLUDES}
+
+ifeq (${BL_COMM_XSPI_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${XSPI_BOOT_SOURCES}
+else
+ifeq (${BL2_XSPI_NEEDED},yes)
+BL2_SOURCES		+= ${XSPI_BOOT_SOURCES}
+endif
+ifeq (${BL31_XSPI_NEEDED},yes)
+BL31_SOURCES		+= ${XSPI_BOOT_SOURCES}
+endif
+endif
+
+endif
diff --git a/drivers/nxp/flexspi/nor/fspi.c b/drivers/nxp/flexspi/nor/fspi.c
new file mode 100644
index 0000000..7c919b8
--- /dev/null
+++ b/drivers/nxp/flexspi/nor/fspi.c
@@ -0,0 +1,853 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * NXP FlexSpi Controller Driver.
+ * Copyright 2021 NXP
+ *
+ */
+#include <endian.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <flash_info.h>
+#include "fspi.h"
+#include <fspi_api.h>
+#include <xspi_error_codes.h>
+
+#ifdef DEBUG_FLEXSPI
+#define PR printf("In [%s][%d]\n", __func__, __LINE__)
+#define PRA(a, b) printf("In [%s][%d] %s="a"\n", __func__, __LINE__, #b, b)
+#else
+#define PR
+#define PRA(a, b)
+#endif
+
+/*
+ * This errata is valid for all NXP SoC.
+ */
+#define ERRATA_FLASH_A050272 1
+
+static uintptr_t fspi_base_reg_addr;
+static uintptr_t fspi_flash_base_addr;
+
+static void fspi_RDSR(uint32_t *, const void *, uint32_t);
+
+static void fspi_writel(uint32_t x_addr, uint32_t x_val)
+{
+	fspi_out32((uint32_t *)(fspi_base_reg_addr + x_addr),
+		 (uint32_t) x_val);
+}
+
+static uint32_t fspi_readl(uint32_t x_addr)
+{
+	return fspi_in32((uint32_t *)(fspi_base_reg_addr + x_addr));
+}
+
+static void fspi_MDIS(uint8_t x_disable)
+{
+	uint32_t ui_reg;
+
+	ui_reg = fspi_readl(FSPI_MCR0);
+	if (x_disable != 0U) {
+		ui_reg |= FSPI_MCR0_MDIS;
+	} else {
+		ui_reg &= (uint32_t) (~FSPI_MCR0_MDIS);
+	}
+
+	fspi_writel(FSPI_MCR0, ui_reg);
+}
+
+static void fspi_lock_LUT(void)
+{
+	fspi_writel(FSPI_LUTKEY, FSPI_LUTKEY_VALUE);
+	VERBOSE("%s 0x%x\n", __func__, fspi_readl(FSPI_LCKCR));
+	fspi_writel(FSPI_LCKCR, FSPI_LCKER_LOCK);
+	VERBOSE("%s 0x%x\n", __func__, fspi_readl(FSPI_LCKCR));
+}
+
+static void fspi_unlock_LUT(void)
+{
+	fspi_writel(FSPI_LUTKEY,  FSPI_LUTKEY_VALUE);
+	VERBOSE("%s 0x%x\n", __func__, fspi_readl(FSPI_LCKCR));
+	fspi_writel(FSPI_LCKCR, FSPI_LCKER_UNLOCK);
+	VERBOSE("%s 0x%x\n", __func__, fspi_readl(FSPI_LCKCR));
+}
+
+static void fspi_op_setup(uint32_t fspi_op_seq_id, bool ignore_flash_sz)
+{
+	uint32_t x_addr, x_instr0 = 0, x_instr1 = 0, x_instr2 = 0;
+	uint32_t cmd_id1, cmd_id2;
+
+	VERBOSE("In func %s\n", __func__);
+
+	switch (fspi_op_seq_id) {
+	case FSPI_READ_SEQ_ID:
+		cmd_id1 = FSPI_NOR_CMD_READ;
+		cmd_id2 = FSPI_NOR_CMD_READ_4B;
+		x_instr2 = FSPI_INSTR_OPRND0(0) | FSPI_INSTR_PAD0(FSPI_LUT_PAD1)
+				| FSPI_INSTR_OPCODE0(FSPI_LUT_READ);
+		break;
+	case FSPI_FASTREAD_SEQ_ID:
+		cmd_id1 = FSPI_NOR_CMD_FASTREAD;
+		cmd_id2 = FSPI_NOR_CMD_FASTREAD_4B;
+		x_instr2 = FSPI_INSTR_OPRND0(8) | FSPI_INSTR_PAD0(FSPI_LUT_PAD1)
+				| FSPI_INSTR_OPCODE0(FSPI_DUMMY_SDR)
+				| FSPI_INSTR_OPRND1(0)
+				| FSPI_INSTR_PAD1(FSPI_LUT_PAD1)
+				| FSPI_INSTR_OPCODE1(FSPI_LUT_READ);
+		break;
+	case FSPI_WRITE_SEQ_ID:
+		cmd_id1 = FSPI_NOR_CMD_PP;
+		cmd_id2 = FSPI_NOR_CMD_PP_4B;
+		x_instr2 = FSPI_INSTR_OPRND0(0) | FSPI_INSTR_PAD0(FSPI_LUT_PAD1)
+				| FSPI_INSTR_OPCODE0(FSPI_LUT_WRITE);
+		break;
+	case FSPI_WREN_SEQ_ID:
+		cmd_id1 = FSPI_NOR_CMD_WREN;
+		cmd_id2 = FSPI_NOR_CMD_WREN;
+		break;
+	case FSPI_SE_SEQ_ID:
+		cmd_id1 = FSPI_NOR_CMD_SE_64K;
+		cmd_id2 = FSPI_NOR_CMD_SE_64K_4B;
+		break;
+	case FSPI_4K_SEQ_ID:
+		cmd_id1 = FSPI_NOR_CMD_SE_4K;
+		cmd_id2 = FSPI_NOR_CMD_SE_4K_4B;
+		break;
+	case FSPI_BE_SEQ_ID:
+		cmd_id1 = FSPI_NOR_CMD_BE;
+		cmd_id2 = FSPI_NOR_CMD_BE;
+		break;
+	case FSPI_RDSR_SEQ_ID:
+		cmd_id1 = FSPI_NOR_CMD_RDSR;
+		cmd_id2 = FSPI_NOR_CMD_RDSR;
+		break;
+	}
+
+	x_addr = FSPI_LUTREG_OFFSET + (uint32_t)(0x10 * fspi_op_seq_id);
+	if ((F_FLASH_SIZE_BYTES <= SZ_16M_BYTES) || (ignore_flash_sz)) {
+		x_instr0 = FSPI_INSTR_OPRND0(cmd_id1);
+		x_instr1 = FSPI_INSTR_OPRND1(FSPI_LUT_ADDR24BIT);
+		VERBOSE("CMD_ID = %x offset = 0x%x\n", cmd_id1, x_addr);
+	} else {
+		x_instr0 = FSPI_INSTR_OPRND0(cmd_id2);
+		x_instr1 = FSPI_INSTR_OPRND1(FSPI_LUT_ADDR32BIT);
+		VERBOSE("CMD_ID = %x offset = 0x%x\n", cmd_id2, x_addr);
+	}
+	x_instr0 |= FSPI_INSTR_PAD0(FSPI_LUT_PAD1)
+		| FSPI_INSTR_OPCODE0(FSPI_LUT_CMD);
+
+	x_instr1 |= FSPI_INSTR_PAD1(FSPI_LUT_PAD1)
+		| FSPI_INSTR_OPCODE1(FSPI_LUT_ADDR);
+
+	if (fspi_op_seq_id == FSPI_RDSR_SEQ_ID) {
+		x_instr0 |= FSPI_INSTR_OPRND1(1) | FSPI_INSTR_PAD1(FSPI_LUT_PAD1)
+					| FSPI_INSTR_OPCODE1(FSPI_LUT_READ);
+	} else if ((fspi_op_seq_id != FSPI_BE_SEQ_ID)
+			&& (fspi_op_seq_id != FSPI_WREN_SEQ_ID)) {
+		x_instr0 |= x_instr1;
+	}
+
+	fspi_writel((x_addr), x_instr0);
+	fspi_writel((x_addr + U(0x4)), x_instr2);
+	fspi_writel((x_addr + U(0x8)), (uint32_t) 0x0);	/* STOP command */
+	fspi_writel((x_addr + U(0xc)), (uint32_t) 0x0);	/* STOP command */
+}
+
+static void fspi_setup_LUT(void)
+{
+	VERBOSE("In func %s\n", __func__);
+	fspi_unlock_LUT();
+
+	/* LUT Setup for READ Command 3-Byte low Frequency */
+	fspi_op_setup(FSPI_READ_SEQ_ID, false);
+
+	/* LUT Setup for FAST READ Command 3-Byte/4-Byte high Frequency */
+	fspi_op_setup(FSPI_FASTREAD_SEQ_ID, false);
+
+	/* LUT Setup for Page Program */
+	fspi_op_setup(FSPI_WRITE_SEQ_ID, false);
+
+	/* LUT Setup for WREN */
+	fspi_op_setup(FSPI_WREN_SEQ_ID, true);
+
+	/* LUT Setup for Sector_Erase */
+	fspi_op_setup(FSPI_SE_SEQ_ID, false);
+
+	/* LUT Setup for Sub Sector 4K Erase */
+	fspi_op_setup(FSPI_4K_SEQ_ID, false);
+
+	/* LUT Setup for Bulk_Erase */
+	fspi_op_setup(FSPI_BE_SEQ_ID, true);
+
+	/* Read Status */
+	fspi_op_setup(FSPI_RDSR_SEQ_ID, true);
+
+	fspi_lock_LUT();
+}
+
+static inline void fspi_ahb_invalidate(void)
+{
+	uint32_t reg;
+
+	VERBOSE("In func %s %d\n", __func__, __LINE__);
+	reg = fspi_readl(FSPI_MCR0);
+	reg |= FSPI_MCR0_SWRST;
+	fspi_writel(FSPI_MCR0, reg);
+	while ((fspi_readl(FSPI_MCR0) & FSPI_MCR0_SWRST) != 0)
+		;  /* FSPI_MCR0_SWRESET_MASK */
+	VERBOSE("In func %s %d\n", __func__, __LINE__);
+}
+
+#if defined(CONFIG_FSPI_AHB)
+static void fspi_init_ahb(void)
+{
+	uint32_t i, x_flash_cr2, seq_id;
+
+	x_flash_cr2 = 0;
+	/* Reset AHB RX buffer CR configuration */
+	for (i = 0; i < 8; i++) {
+		fspi_writel((FSPI_AHBRX_BUF0CR0 + 4 * i), 0U);
+	}
+
+	/* Set ADATSZ with the maximum AHB buffer size */
+	fspi_writel(FSPI_AHBRX_BUF7CR0,
+			((uint32_t) ((FSPI_RX_MAX_AHBBUF_SIZE / 8U) |
+				    FSPI_AHBRXBUF0CR7_PREF)));
+
+	/* Known limitation handling: prefetch and
+	 * no start address alignment.*/
+	fspi_writel(FSPI_AHBCR, FSPI_AHBCR_PREF_EN);
+	INFO("xAhbcr=0x%x\n", fspi_readl(FSPI_AHBCR));
+
+	// Setup AHB READ sequenceID for all flashes.
+	x_flash_cr2 = fspi_readl(FSPI_FLSHA1CR2);
+	INFO("x_flash_cr2=0x%x\n", x_flash_cr2);
+
+	seq_id = CONFIG_FSPI_FASTREAD ?
+			FSPI_FASTREAD_SEQ_ID : FSPI_READ_SEQ_ID;
+	x_flash_cr2 |= ((seq_id << FSPI_FLSHXCR2_ARDSEQI_SHIFT) & 0x1f);
+
+	INFO("x_flash_cr2=0x%x\n", x_flash_cr2);
+
+	fspi_writel(FSPI_FLSHA1CR2,  x_flash_cr2);
+	x_flash_cr2 = fspi_readl(FSPI_FLSHA1CR2);
+	INFO("x_flash_cr2=0x%x\n", x_flash_cr2);
+}
+#endif
+
+int xspi_read(uint32_t pc_rx_addr, uint32_t *pc_rx_buf, uint32_t x_size_bytes)
+{
+	if (x_size_bytes == 0) {
+		ERROR("Zero length reads are not allowed\n");
+		return XSPI_READ_FAIL;
+	}
+
+#if defined(CONFIG_FSPI_AHB)
+	return xspi_ahb_read(pc_rx_addr, pc_rx_buf, x_size_bytes);
+#else
+	return xspi_ip_read(pc_rx_addr, pc_rx_buf, x_size_bytes);
+#endif
+}
+#if defined(CONFIG_FSPI_AHB)
+int xspi_ahb_read(uint32_t pc_rx_addr, uint32_t *pc_rx_buf, uint32_t x_size_bytes)
+{
+	VERBOSE("In func %s 0x%x\n", __func__, (pc_rx_addr));
+
+	if (F_FLASH_SIZE_BYTES <= SZ_16M_BYTES) {
+		pc_rx_addr = ((uint32_t)(pcRxAddr & MASK_24BIT_ADDRESS));
+	} else {
+		pc_rx_addr = ((uint32_t)(pcRxAddr & MASK_32BIT_ADDRESS));
+	}
+
+	pc_rx_addr = ((uint32_t)(pcRxAddr + fspi_flash_base_addr));
+
+	if (((pc_rx_addr % 4) != 0) || (((uintptr_t)pc_rx_buf % 4) != 0)) {
+		WARN("%s: unaligned Start Address src=%ld dst=0x%p\n",
+		     __func__, (pc_rx_addr - fspi_flash_base_addr), pc_rx_buf);
+	}
+
+	/* Directly copy from AHB Buffer */
+	memcpy(pc_rx_buf, (void *)(uintptr_t)pc_rx_addr, x_size_bytes);
+
+	fspi_ahb_invalidate();
+	return XSPI_SUCCESS;
+}
+#endif
+
+int xspi_ip_read(uint32_t pc_rx_addr, uint32_t *pv_rx_buf, uint32_t ui_len)
+{
+
+	uint32_t i = 0U, j = 0U, x_rem = 0U;
+	uint32_t x_iteration = 0U, x_size_rx = 0U, x_size_wm, temp_size;
+	uint32_t data = 0U;
+	uint32_t x_len_bytes;
+	uint32_t x_addr, sts0, intr, seq_id;
+
+	x_addr = (uint32_t) pc_rx_addr;
+	x_len_bytes = ui_len;
+
+	/* Watermark level : 8 bytes. (BY DEFAULT) */
+	x_size_wm = 8U;
+
+	/* Clear  RX Watermark interrupt in INT register, if any existing.  */
+	fspi_writel(FSPI_INTR, FSPI_INTR_IPRXWA);
+	PRA("0x%x", fspi_readl(FSPI_INTR));
+	/* Invalid the RXFIFO, to run next IP Command */
+	/* Clears data entries in IP Rx FIFOs, Also reset R/W pointers */
+	fspi_writel(FSPI_IPRXFCR, FSPI_IPRXFCR_CLR);
+	fspi_writel(FSPI_INTR, FSPI_INTEN_IPCMDDONE);
+
+	while (x_len_bytes) {
+
+		/* FlexSPI can store no more than  FSPI_RX_IPBUF_SIZE */
+		x_size_rx = (x_len_bytes >  FSPI_RX_IPBUF_SIZE) ?
+			   FSPI_RX_IPBUF_SIZE : x_len_bytes;
+
+		/* IP Control Register0 - SF Address to be read */
+		fspi_writel(FSPI_IPCR0, x_addr);
+		PRA("0x%x", fspi_readl(FSPI_IPCR0));
+		/* IP Control Register1 - SEQID_READ operation, Size */
+
+		seq_id = CONFIG_FSPI_FASTREAD ?
+				FSPI_FASTREAD_SEQ_ID : FSPI_READ_SEQ_ID;
+
+		fspi_writel(FSPI_IPCR1,
+			    (uint32_t)(seq_id << FSPI_IPCR1_ISEQID_SHIFT) |
+			    (uint16_t) x_size_rx);
+
+		PRA("0x%x", fspi_readl(FSPI_IPCR1));
+
+		do {
+			sts0 = fspi_readl(FSPI_STS0);
+		} while (((sts0 & FSPI_STS0_ARB_IDLE) == 0) &&
+			 ((sts0 & FSPI_STS0_SEQ_IDLE) == 0));
+
+		/* Trigger IP Read Command */
+		fspi_writel(FSPI_IPCMD, FSPI_IPCMD_TRG_MASK);
+		PRA("0x%x", fspi_readl(FSPI_IPCMD));
+
+		intr = fspi_readl(FSPI_INTR);
+		if (((intr & FSPI_INTR_IPCMDGE) != 0) ||
+		    ((intr & FSPI_INTR_IPCMDERR) != 0)) {
+			ERROR("Error in IP READ INTR=0x%x\n", intr);
+			return -XSPI_IP_READ_FAIL;
+		}
+		/* Will read in n iterations of each 8 FIFO's(WM level) */
+		x_iteration = x_size_rx / x_size_wm;
+		for (i = 0U; i < x_iteration; i++) {
+			if ((fspi_readl(FSPI_INTR) & FSPI_INTR_IPRXWA_MASK) == 0) {
+				PRA("0x%x", fspi_readl(FSPI_INTR));
+			}
+			/* Wait for IP Rx Watermark Fill event */
+			while (!(fspi_readl(FSPI_INTR) & FSPI_INTR_IPRXWA_MASK)) {
+				PRA("0x%x", fspi_readl(FSPI_INTR));
+			}
+
+			/* Read RX FIFO's(upto WM level) & copy to rxbuffer */
+			for (j = 0U; j < x_size_wm; j += 4U) {
+				/* Read FIFO Data Register */
+				data = fspi_readl(FSPI_RFDR + j);
+#if FSPI_IPDATA_SWAP /* Just In case you want swap */
+				data = bswap32(data);
+#endif
+				memcpy(pv_rx_buf++, &data, 4);
+			}
+
+			/* Clear IP_RX_WATERMARK Event in INTR register */
+			/* Reset FIFO Read pointer for next iteration.*/
+			fspi_writel(FSPI_INTR, FSPI_INTR_IPRXWA);
+		}
+
+		x_rem = x_size_rx % x_size_wm;
+
+		if (x_rem != 0U) {
+			/* Wait for data filled */
+			while (!(fspi_readl(FSPI_IPRXFSTS) & FSPI_IPRXFSTS_FILL_MASK)) {
+				PRA("0x%x", fspi_readl(FSPI_IPRXFSTS));
+			}
+
+			temp_size = 0;
+			j = 0U;
+			while (x_rem > 0U) {
+				data = 0U;
+				data =  fspi_readl(FSPI_RFDR + j);
+#if FSPI_IPDATA_SWAP /* Just In case you want swap */
+				data = bswap32(data);
+#endif
+				temp_size = (x_rem < 4) ? x_rem : 4;
+				memcpy(pv_rx_buf++, &data, temp_size);
+				x_rem -= temp_size;
+			}
+		}
+
+
+		while (!(fspi_readl(FSPI_INTR) & FSPI_INTR_IPCMDDONE_MASK)) {
+			PRA("0x%x", fspi_readl(FSPI_INTR));
+		}
+
+		/* Invalid the RX FIFO, to run next IP Command */
+		fspi_writel(FSPI_IPRXFCR, FSPI_IPRXFCR_CLR);
+		/* Clear IP Command Done flag in interrupt register*/
+		fspi_writel(FSPI_INTR, FSPI_INTR_IPCMDDONE_MASK);
+
+		/* Update remaining len, Increment x_addr read pointer. */
+		x_len_bytes -= x_size_rx;
+		x_addr += x_size_rx;
+	}
+	PR;
+	return XSPI_SUCCESS;
+}
+
+void xspi_ip_write(uint32_t pc_wr_addr, uint32_t *pv_wr_buf, uint32_t ui_len)
+{
+
+	uint32_t x_iteration = 0U, x_rem = 0U;
+	uint32_t x_size_tx = 0U, x_size_wm, temp_size;
+	uint32_t i = 0U, j = 0U;
+	uint32_t ui_data = 0U;
+	uint32_t x_addr, x_len_bytes;
+
+
+	x_size_wm = 8U;	/* Default TX WaterMark level: 8 Bytes. */
+	x_addr = (uint32_t)pc_wr_addr;
+	x_len_bytes = ui_len;
+	VERBOSE("In func %s[%d] x_addr =0x%x xLen_bytes=%d\n",
+			__func__, __LINE__, x_addr, x_len_bytes);
+
+	while (x_len_bytes != 0U) {
+
+		x_size_tx = (x_len_bytes >  FSPI_TX_IPBUF_SIZE) ?
+				FSPI_TX_IPBUF_SIZE : x_len_bytes;
+
+		/* IP Control Register0 - SF Address to be read */
+		fspi_writel(FSPI_IPCR0, x_addr);
+		INFO("In func %s[%d] x_addr =0x%x xLen_bytes=%d\n",
+				__func__, __LINE__, x_addr, x_len_bytes);
+
+		/*
+		 * Fill TX FIFO's..
+		 *
+		 */
+
+		x_iteration = x_size_tx / x_size_wm;
+		for (i = 0U; i < x_iteration; i++) {
+
+			/* Ensure TX FIFO Watermark Available */
+			while ((fspi_readl(FSPI_INTR) & FSPI_INTR_IPTXWE_MASK) == 0)
+				;
+
+
+			/* Fill TxFIFO's ( upto watermark level) */
+			for (j = 0U; j < x_size_wm; j += 4U) {
+				memcpy(&ui_data, pv_wr_buf++,  4);
+				/* Write TX FIFO Data Register */
+				fspi_writel((FSPI_TFDR + j), ui_data);
+
+			}
+
+			/* Clear IP_TX_WATERMARK Event in INTR register */
+			/* Reset the FIFO Write pointer for next iteration */
+			fspi_writel(FSPI_INTR, FSPI_INTR_IPTXWE);
+		}
+
+		x_rem = x_size_tx % x_size_wm;
+
+		if (x_rem != 0U) {
+			/* Wait for TXFIFO empty */
+			while (!(fspi_readl(FSPI_INTR) & FSPI_INTR_IPTXWE))
+				;
+
+			temp_size = 0U;
+			j = 0U;
+			while (x_rem > 0U) {
+				ui_data = 0U;
+				temp_size = (x_rem < 4U) ? x_rem : 4U;
+				memcpy(&ui_data, pv_wr_buf++, temp_size);
+				INFO("%d ---> pv_wr_buf=0x%p\n", __LINE__, pv_wr_buf);
+				fspi_writel((FSPI_TFDR + j), ui_data);
+				x_rem -= temp_size;
+				j += 4U ; /* TODO: May not be needed*/
+			}
+			/* Clear IP_TX_WATERMARK Event in INTR register */
+			/* Reset FIFO's Write pointer for next iteration.*/
+			fspi_writel(FSPI_INTR, FSPI_INTR_IPTXWE);
+		}
+
+		/* IP Control Register1 - SEQID_WRITE operation, Size */
+		fspi_writel(FSPI_IPCR1, (uint32_t)(FSPI_WRITE_SEQ_ID << FSPI_IPCR1_ISEQID_SHIFT) | (uint16_t) x_size_tx);
+		/* Trigger IP Write Command */
+		fspi_writel(FSPI_IPCMD, FSPI_IPCMD_TRG_MASK);
+
+		/* Wait for IP Write command done */
+		while (!(fspi_readl(FSPI_INTR) & FSPI_INTR_IPCMDDONE_MASK))
+			;
+
+		/* Invalidate TX FIFOs & acknowledge IP_CMD_DONE event */
+		fspi_writel(FSPI_IPTXFCR, FSPI_IPTXFCR_CLR);
+		fspi_writel(FSPI_INTR, FSPI_INTR_IPCMDDONE_MASK);
+
+		/* for next iteration */
+		x_len_bytes  -=  x_size_tx;
+		x_addr += x_size_tx;
+	}
+
+}
+
+int xspi_write(uint32_t pc_wr_addr, void *pv_wr_buf, uint32_t ui_len)
+{
+
+	uint32_t x_addr;
+	uint32_t x_page1_len = 0U, x_page_l_len = 0U;
+	uint32_t i, j = 0U;
+	void *buf = pv_wr_buf;
+
+	VERBOSE("\nIn func %s\n", __func__);
+
+	x_addr = (uint32_t)(pc_wr_addr);
+	if ((ui_len <= F_PAGE_256) && ((x_addr % F_PAGE_256) == 0)) {
+		x_page1_len = ui_len;
+		INFO("%d ---> x_page1_len=0x%x x_page_l_len =0x%x j=0x%x\n", __LINE__, x_page1_len, x_page_l_len, j);
+	} else if ((ui_len <= F_PAGE_256) && ((x_addr % F_PAGE_256) != 0)) {
+		x_page1_len = (F_PAGE_256 - (x_addr % F_PAGE_256));
+		if (ui_len > x_page1_len) {
+			x_page_l_len = (ui_len - x_page1_len) % F_PAGE_256;
+		} else {
+			x_page1_len = ui_len;
+			x_page_l_len = 0;
+		}
+		j = 0U;
+		INFO("%d 0x%x 0x%x\n", x_addr % F_PAGE_256, x_addr % F_PAGE_256, F_PAGE_256);
+		INFO("%d ---> x_page1_len=0x%x x_page_l_len =0x%x j=0x%x\n", __LINE__, x_page1_len, x_page_l_len, j);
+	} else if ((ui_len > F_PAGE_256) && ((x_addr % F_PAGE_256) == 0)) {
+		j = ui_len / F_PAGE_256;
+		x_page_l_len = ui_len % F_PAGE_256;
+		INFO("%d ---> x_page1_len=0x%x x_page_l_len =0x%x j=0x%x\n", __LINE__, x_page1_len, x_page_l_len, j);
+	} else if ((ui_len > F_PAGE_256) && ((x_addr % F_PAGE_256) != 0)) {
+		x_page1_len = (F_PAGE_256 - (x_addr % F_PAGE_256));
+		j = (ui_len - x_page1_len) / F_PAGE_256;
+		x_page_l_len = (ui_len - x_page1_len) % F_PAGE_256;
+		INFO("%d ---> x_page1_len=0x%x x_page_l_len =0x%x j=0x%x\n", __LINE__, x_page1_len, x_page_l_len, j);
+	}
+
+	if (x_page1_len != 0U) {
+		xspi_wren(x_addr);
+		xspi_ip_write(x_addr, (uint32_t *)buf, x_page1_len);
+		while (is_flash_busy())
+			;
+		INFO("%d Initial pc_wr_addr=0x%x, Final x_addr=0x%x, Initial ui_len=0x%x Final ui_len=0x%x\n",
+		     __LINE__, pc_wr_addr, x_addr, ui_len, (x_addr-pc_wr_addr));
+		INFO("Initial Buf pv_wr_buf=%p, final Buf=%p\n", pv_wr_buf, buf);
+		x_addr += x_page1_len;
+		/* TODO What is buf start is not 4 aligned */
+		buf = buf + x_page1_len;
+	}
+
+	for (i = 0U; i < j; i++) {
+		INFO("In for loop Buf pv_wr_buf=%p, final Buf=%p x_addr=0x%x offset_buf %d.\n",
+				pv_wr_buf, buf, x_addr, x_page1_len/4);
+		xspi_wren(x_addr);
+		xspi_ip_write(x_addr, (uint32_t *)buf, F_PAGE_256);
+		while (is_flash_busy())
+			;
+		INFO("%d Initial pc_wr_addr=0x%x, Final x_addr=0x%x, Initial ui_len=0x%x Final ui_len=0x%x\n",
+		     __LINE__, pc_wr_addr, x_addr, ui_len, (x_addr-pc_wr_addr));
+		x_addr += F_PAGE_256;
+		/* TODO What is buf start is not 4 aligned */
+		buf = buf + F_PAGE_256;
+		INFO("Initial Buf pv_wr_buf=%p, final Buf=%p\n", pv_wr_buf, buf);
+	}
+
+	if (x_page_l_len != 0U) {
+		INFO("%d Initial Buf pv_wr_buf=%p, final Buf=%p x_page_l_len=0x%x\n", __LINE__, pv_wr_buf, buf, x_page_l_len);
+		xspi_wren(x_addr);
+		xspi_ip_write(x_addr, (uint32_t *)buf, x_page_l_len);
+		while (is_flash_busy())
+			;
+		INFO("%d Initial pc_wr_addr=0x%x, Final x_addr=0x%x, Initial ui_len=0x%x Final ui_len=0x%x\n",
+				__LINE__, pc_wr_addr, x_addr, ui_len, (x_addr-pc_wr_addr));
+	}
+
+	VERBOSE("Now calling func call Invalidate%s\n", __func__);
+	fspi_ahb_invalidate();
+	return XSPI_SUCCESS;
+}
+
+int xspi_wren(uint32_t pc_wr_addr)
+{
+	VERBOSE("In func %s Addr=0x%x\n", __func__, pc_wr_addr);
+
+	fspi_writel(FSPI_IPTXFCR, FSPI_IPTXFCR_CLR);
+
+	fspi_writel(FSPI_IPCR0, (uint32_t)pc_wr_addr);
+	fspi_writel(FSPI_IPCR1, ((FSPI_WREN_SEQ_ID << FSPI_IPCR1_ISEQID_SHIFT) |  0));
+	fspi_writel(FSPI_IPCMD, FSPI_IPCMD_TRG_MASK);
+
+	while ((fspi_readl(FSPI_INTR) & FSPI_INTR_IPCMDDONE_MASK) == 0)
+		;
+
+	fspi_writel(FSPI_INTR, FSPI_INTR_IPCMDDONE_MASK);
+	return XSPI_SUCCESS;
+}
+
+static void fspi_bbluk_er(void)
+{
+	VERBOSE("In func %s\n", __func__);
+	fspi_writel(FSPI_IPCR0, 0x0);
+	fspi_writel(FSPI_IPCR1, ((FSPI_BE_SEQ_ID << FSPI_IPCR1_ISEQID_SHIFT) | 20));
+	fspi_writel(FSPI_IPCMD, FSPI_IPCMD_TRG_MASK);
+
+	while ((fspi_readl(FSPI_INTR) & FSPI_INTR_IPCMDDONE_MASK) == 0)
+		;
+	fspi_writel(FSPI_INTR, FSPI_INTR_IPCMDDONE_MASK);
+
+}
+
+static void fspi_RDSR(uint32_t *rxbuf, const void *p_addr, uint32_t size)
+{
+	uint32_t iprxfcr = 0U;
+	uint32_t data = 0U;
+
+	iprxfcr = fspi_readl(FSPI_IPRXFCR);
+	/* IP RX FIFO would be read by processor */
+	iprxfcr = iprxfcr & (uint32_t)~FSPI_IPRXFCR_CLR;
+	/* Invalid data entries in IP RX FIFO */
+	iprxfcr = iprxfcr | FSPI_IPRXFCR_CLR;
+	fspi_writel(FSPI_IPRXFCR, iprxfcr);
+
+	fspi_writel(FSPI_IPCR0, (uintptr_t) p_addr);
+	fspi_writel(FSPI_IPCR1,
+		    (uint32_t) ((FSPI_RDSR_SEQ_ID << FSPI_IPCR1_ISEQID_SHIFT)
+		    | (uint16_t) size));
+	/* Trigger the command */
+	fspi_writel(FSPI_IPCMD, FSPI_IPCMD_TRG_MASK);
+	/* Wait for command done */
+	while ((fspi_readl(FSPI_INTR) & FSPI_INTR_IPCMDDONE_MASK) == 0)
+		;
+	fspi_writel(FSPI_INTR, FSPI_INTR_IPCMDDONE_MASK);
+
+	data = fspi_readl(FSPI_RFDR);
+	memcpy(rxbuf, &data, size);
+
+	/* Rx FIFO invalidation needs to be done prior w1c of INTR.IPRXWA bit */
+	fspi_writel(FSPI_IPRXFCR, FSPI_IPRXFCR_CLR);
+	fspi_writel(FSPI_INTR, FSPI_INTR_IPRXWA_MASK);
+	fspi_writel(FSPI_INTR, FSPI_INTR_IPCMDDONE_MASK);
+
+}
+
+bool is_flash_busy(void)
+{
+#define FSPI_ONE_BYTE 1
+	uint8_t data[4];
+
+	VERBOSE("In func %s\n\n", __func__);
+	fspi_RDSR((uint32_t *) data, 0, FSPI_ONE_BYTE);
+
+	return !!((uint32_t) data[0] & FSPI_NOR_SR_WIP_MASK);
+}
+
+int xspi_bulk_erase(void)
+{
+	VERBOSE("In func %s\n", __func__);
+	xspi_wren((uint32_t) 0x0);
+	fspi_bbluk_er();
+	while (is_flash_busy())
+		;
+	fspi_ahb_invalidate();
+	return XSPI_SUCCESS;
+}
+
+static void fspi_sec_er(uint32_t pc_wr_addr)
+{
+	uint32_t x_addr;
+
+	VERBOSE("In func %s\n", __func__);
+	x_addr = (uint32_t)(pc_wr_addr);
+
+	fspi_writel(FSPI_IPCR0, x_addr);
+	INFO("In [%s][%d] Erase address 0x%x\n", __func__, __LINE__, (x_addr));
+#if CONFIG_FSPI_ERASE_4K
+	fspi_writel(FSPI_IPCR1, ((FSPI_4K_SEQ_ID << FSPI_IPCR1_ISEQID_SHIFT) | 0));
+#else
+	fspi_writel(FSPI_IPCR1, ((FSPI_SE_SEQ_ID << FSPI_IPCR1_ISEQID_SHIFT) | 0));
+#endif
+	fspi_writel(FSPI_IPCMD, FSPI_IPCMD_TRG_MASK);
+
+	while ((fspi_readl(FSPI_INTR) & FSPI_INTR_IPCMDDONE_MASK) == 0) {
+		PRA("0x%x", fspi_readl(FSPI_INTR));
+	}
+	fspi_writel(FSPI_INTR, FSPI_INTR_IPCMDDONE_MASK);
+}
+
+int xspi_sector_erase(uint32_t pc_wr_addr, uint32_t ui_len)
+{
+	uint32_t x_addr, x_len_bytes, i, num_sector = 0U;
+
+	VERBOSE("In func %s\n", __func__);
+	x_addr = (uint32_t)(pc_wr_addr);
+	if ((x_addr % F_SECTOR_ERASE_SZ) != 0) {
+		ERROR("!!! In func %s, unalinged start address can only be in multiples of 0x%x\n",
+		      __func__, F_SECTOR_ERASE_SZ);
+		return -XSPI_ERASE_FAIL;
+	}
+
+	x_len_bytes = ui_len * 1;
+	if (x_len_bytes < F_SECTOR_ERASE_SZ) {
+		ERROR("!!! In func %s, Less than 1 sector can only be in multiples of 0x%x\n",
+				__func__, F_SECTOR_ERASE_SZ);
+		return -XSPI_ERASE_FAIL;
+	}
+
+	num_sector = x_len_bytes/F_SECTOR_ERASE_SZ;
+	num_sector += x_len_bytes % F_SECTOR_ERASE_SZ ? 1U : 0U;
+	INFO("F_SECTOR_ERASE_SZ: 0x%08x, num_sector: %d\n", F_SECTOR_ERASE_SZ, num_sector);
+
+	for (i = 0U; i < num_sector ; i++) {
+		xspi_wren(x_addr + (F_SECTOR_ERASE_SZ * i));
+		fspi_sec_er(x_addr + (F_SECTOR_ERASE_SZ * i));
+		while (is_flash_busy())
+			;
+	}
+	fspi_ahb_invalidate();
+	return XSPI_SUCCESS;
+}
+
+
+__attribute__((unused)) static void  fspi_delay_ms(uint32_t x)
+{
+	volatile unsigned long  ul_count;
+
+	for (ul_count = 0U; ul_count < (30U * x); ul_count++)
+		;
+
+}
+
+
+#if defined(DEBUG_FLEXSPI)
+static void fspi_dump_regs(void)
+{
+	uint32_t i;
+
+	VERBOSE("\nRegisters Dump:\n");
+	VERBOSE("Flexspi: Register FSPI_MCR0(0x%x) = 0x%08x\n", FSPI_MCR0, fspi_readl(FSPI_MCR0));
+	VERBOSE("Flexspi: Register FSPI_MCR2(0x%x) = 0x%08x\n", FSPI_MCR2, fspi_readl(FSPI_MCR2));
+	VERBOSE("Flexspi: Register FSPI_DLL_A_CR(0x%x) = 0x%08x\n", FSPI_DLLACR, fspi_readl(FSPI_DLLACR));
+	VERBOSE("\n");
+
+	for (i = 0U; i < 8U; i++) {
+		VERBOSE("Flexspi: Register FSPI_AHBRX_BUF0CR0(0x%x) = 0x%08x\n", FSPI_AHBRX_BUF0CR0 + i * 4, fspi_readl((FSPI_AHBRX_BUF0CR0 + i * 4)));
+	}
+	VERBOSE("\n");
+
+	VERBOSE("Flexspi: Register FSPI_AHBRX_BUF7CR0(0x%x) = 0x%08x\n", FSPI_AHBRX_BUF7CR0, fspi_readl(FSPI_AHBRX_BUF7CR0));
+	VERBOSE("Flexspi: Register FSPI_AHB_CR(0x%x) \t  = 0x%08x\n", FSPI_AHBCR, fspi_readl(FSPI_AHBCR));
+	VERBOSE("\n");
+
+	for (i = 0U; i < 4U; i++) {
+		VERBOSE("Flexspi: Register FSPI_FLSH_A1_CR2,(0x%x) = 0x%08x\n", FSPI_FLSHA1CR2 + i * 4, fspi_readl(FSPI_FLSHA1CR2 + i * 4));
+	}
+}
+#endif
+
+int fspi_init(uint32_t base_reg_addr, uint32_t flash_start_addr)
+{
+	uint32_t	mcrx;
+	uint32_t	flash_size;
+
+	if (fspi_base_reg_addr != 0U) {
+		INFO("FSPI is already initialized.\n");
+		return XSPI_SUCCESS;
+	}
+
+	fspi_base_reg_addr = base_reg_addr;
+	fspi_flash_base_addr = flash_start_addr;
+
+	INFO("Flexspi driver: Version v1.0\n");
+	INFO("Flexspi: Default MCR0 = 0x%08x, before reset\n", fspi_readl(FSPI_MCR0));
+	VERBOSE("Flexspi: Resetting controller...\n");
+
+	/* Reset FlexSpi Controller */
+	fspi_writel(FSPI_MCR0, FSPI_MCR0_SWRST);
+	while ((fspi_readl(FSPI_MCR0) & FSPI_MCR0_SWRST))
+		;  /* FSPI_MCR0_SWRESET_MASK */
+
+
+	/* Disable Controller Module before programming its registersi, especially MCR0 (Master Control Register0) */
+	fspi_MDIS(1);
+	/*
+	 * Program MCR0 with default values, AHB Timeout(0xff), IP Timeout(0xff).  {FSPI_MCR0- 0xFFFF0000}
+	 */
+
+	/* Timeout wait cycle for AHB command grant */
+	mcrx = fspi_readl(FSPI_MCR0);
+	mcrx |= (uint32_t)((FSPI_MAX_TIMEOUT_AHBCMD << FSPI_MCR0_AHBGRANTWAIT_SHIFT) & (FSPI_MCR0_AHBGRANTWAIT_MASK));
+
+	/* Time out wait cycle for IP command grant*/
+	mcrx |= (uint32_t) (FSPI_MAX_TIMEOUT_IPCMD << FSPI_MCR0_IPGRANTWAIT_SHIFT) & (FSPI_MCR0_IPGRANTWAIT_MASK);
+
+	/* TODO why BE64 set BE32*/
+	mcrx |= (uint32_t) (FSPI_ENDCFG_LE64 << FSPI_MCR0_ENDCFG_SHIFT) & FSPI_MCR0_ENDCFG_MASK;
+
+	fspi_writel(FSPI_MCR0, mcrx);
+
+	/* Reset the DLL register to default value */
+	fspi_writel(FSPI_DLLACR, FSPI_DLLACR_OVRDEN);
+	fspi_writel(FSPI_DLLBCR, FSPI_DLLBCR_OVRDEN);
+
+#if ERRATA_FLASH_A050272	/* ERRATA DLL */
+	for (uint8_t delay = 100U; delay > 0U; delay--)	{
+		__asm__ volatile ("nop");
+	}
+#endif
+
+	/* Configure flash control registers for different chip select */
+	flash_size = (F_FLASH_SIZE_BYTES * FLASH_NUM) / FSPI_BYTES_PER_KBYTES;
+	fspi_writel(FSPI_FLSHA1CR0, flash_size);
+	fspi_writel(FSPI_FLSHA2CR0, 0U);
+	fspi_writel(FSPI_FLSHB1CR0, 0U);
+	fspi_writel(FSPI_FLSHB2CR0, 0U);
+
+#if defined(CONFIG_FSPI_AHB)
+	fspi_init_ahb();
+#endif
+	/* RE-Enable Controller Module */
+	fspi_MDIS(0);
+	INFO("Flexspi: After MCR0 = 0x%08x,\n", fspi_readl(FSPI_MCR0));
+	fspi_setup_LUT();
+
+	/* Dump of all registers, ensure controller not disabled anymore*/
+#if defined(DEBUG_FLEXSPI)
+	fspi_dump_regs();
+#endif
+
+	INFO("Flexspi: Init done!!\n");
+
+#if DEBUG_FLEXSPI
+
+	uint32_t xspi_addr = SZ_57M;
+
+	/*
+	 * Second argument of fspi_test is the size of buffer(s) passed
+	 * to the function.
+	 * SIZE_BUFFER defined in test_fspi.c is kept large enough to
+	 * accommodate variety of sizes for regressive tests.
+	 */
+	fspi_test(xspi_addr, 0x40, 0);
+	fspi_test(xspi_addr, 0x15, 2);
+	fspi_test(xspi_addr, 0x80, 0);
+	fspi_test(xspi_addr, 0x81, 0);
+	fspi_test(xspi_addr, 0x79, 3);
+
+	fspi_test(xspi_addr + 0x11, 0x15, 0);
+	fspi_test(xspi_addr + 0x11, 0x40, 0);
+	fspi_test(xspi_addr + 0xff, 0x40, 1);
+	fspi_test(xspi_addr + 0x25, 0x81, 2);
+	fspi_test(xspi_addr + 0xef, 0x6f, 3);
+
+	fspi_test((xspi_addr - F_SECTOR_ERASE_SZ), 0x229, 0);
+#endif
+
+	return XSPI_SUCCESS;
+}
diff --git a/drivers/nxp/flexspi/nor/fspi.h b/drivers/nxp/flexspi/nor/fspi.h
new file mode 100644
index 0000000..da2e269
--- /dev/null
+++ b/drivers/nxp/flexspi/nor/fspi.h
@@ -0,0 +1,385 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * FlexSpi Registers & Bits definition.
+ *
+ */
+
+#ifndef FSPI_H
+#define FSPI_H
+
+#ifndef __ASSEMBLER__
+#include <lib/mmio.h>
+
+#ifdef NXP_FSPI_BE
+#define fspi_in32(a)		bswap32(mmio_read_32((uintptr_t)(a)))
+#define fspi_out32(a, v)	mmio_write_32((uintptr_t)(a), bswap32(v))
+#elif defined(NXP_FSPI_LE)
+#define fspi_in32(a)		mmio_read_32((uintptr_t)(a))
+#define fspi_out32(a, v)	mmio_write_32((uintptr_t)(a), v)
+#else
+#error Please define FSPI register endianness
+#endif
+
+#endif
+
+/* All LE so not swap needed */
+#define FSPI_IPDATA_SWAP		0U
+#define FSPI_AHBDATA_SWAP		0U
+
+#define CONFIG_FSPI_FASTREAD		1U
+
+#define FSPI_BYTES_PER_KBYTES		0x400U
+#define FLASH_NUM			1U
+
+#define FSPI_READ_SEQ_ID		0U
+#define FSPI_WREN_SEQ_ID		1U
+#define FSPI_WRITE_SEQ_ID		2U
+#define FSPI_SE_SEQ_ID			3U
+#define FSPI_RDSR_SEQ_ID		4U
+#define FSPI_BE_SEQ_ID			5U
+#define FSPI_FASTREAD_SEQ_ID		6U
+#define FSPI_4K_SEQ_ID			7U
+
+/*
+ * LUT register layout:
+ *
+ *  ---------------------------------------------------
+ *  | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
+ *  ---------------------------------------------------
+ *
+ *    INSTR_SHIFT- 10, PAD_SHIFT - 8, OPRND_SHIFT -0
+ */
+#define FSPI_INSTR_OPRND0_SHIFT		0
+#define FSPI_INSTR_OPRND0(x)		(x << FSPI_INSTR_OPRND0_SHIFT)
+#define FSPI_INSTR_PAD0_SHIFT		8
+#define FSPI_INSTR_PAD0(x)		((x) << FSPI_INSTR_PAD0_SHIFT)
+#define FSPI_INSTR_OPCODE0_SHIFT	10
+#define FSPI_INSTR_OPCODE0(x)		((x) << FSPI_INSTR_OPCODE0_SHIFT)
+#define FSPI_INSTR_OPRND1_SHIFT		16
+#define FSPI_INSTR_OPRND1(x)		((x) << FSPI_INSTR_OPRND1_SHIFT)
+#define FSPI_INSTR_PAD1_SHIFT		24
+#define FSPI_INSTR_PAD1(x)		((x) << FSPI_INSTR_PAD1_SHIFT)
+#define FSPI_INSTR_OPCODE1_SHIFT	26
+#define FSPI_INSTR_OPCODE1(x)		((x) << FSPI_INSTR_OPCODE1_SHIFT)
+
+/* Instruction set for the LUT register. */
+#define LUT_STOP			0x00
+#define LUT_CMD				0x01
+#define LUT_ADDR			0x02
+#define LUT_CADDR_SDR			0x03
+#define LUT_MODE			0x04
+#define LUT_MODE2			0x05
+#define LUT_MODE4			0x06
+#define LUT_MODE8			0x07
+#define LUT_NXP_WRITE			0x08
+#define LUT_NXP_READ			0x09
+
+#define LUT_LEARN_SDR			0x0A
+#define LUT_DATSZ_SDR			0x0B
+#define LUT_DUMMY			0x0C
+#define LUT_DUMMY_RWDS_SDR		0x0D
+#define LUT_JMP_ON_CS			0x1F
+#define LUT_CMD_DDR			0x21
+#define LUT_ADDR_DDR			0x22
+#define LUT_CADDR_DDR			0x23
+#define LUT_MODE_DDR			0x24
+#define LUT_MODE2_DDR			0x25
+#define LUT_MODE4_DDR			0x26
+#define LUT_MODE8_DDR			0x27
+#define LUT_WRITE_DDR			0x28
+#define LUT_READ_DDR			0x29
+#define LUT_LEARN_DDR			0x2A
+#define LUT_DATSZ_DDR			0x2B
+#define LUT_DUMMY_DDR			0x2C
+#define LUT_DUMMY_RWDS_DDR		0x2D
+
+#define FSPI_NOR_CMD_READ		0x03
+#define FSPI_NOR_CMD_READ_4B		0x13
+#define FSPI_NOR_CMD_FASTREAD		0x0b
+#define FSPI_NOR_CMD_FASTREAD_4B	0x0c
+#define FSPI_NOR_CMD_PP			0x02
+#define FSPI_NOR_CMD_PP_4B		0x12
+#define FSPI_NOR_CMD_WREN		0x06
+#define FSPI_NOR_CMD_SE_64K		0xd8
+#define FSPI_NOR_CMD_SE_64K_4B		0xdc
+#define FSPI_NOR_CMD_SE_4K		0x20
+#define FSPI_NOR_CMD_SE_4K_4B		0x21
+#define FSPI_NOR_CMD_BE			0x60
+#define FSPI_NOR_CMD_RDSR		0x05
+#define FSPI_NOR_CMD_WREN_STOP		0x04
+
+#define FSPI_LUT_STOP			0x00
+#define FSPI_LUT_CMD			0x01
+#define FSPI_LUT_ADDR			0x02
+
+#define FSPI_LUT_PAD1			0
+#define FSPI_LUT_PAD2			1
+#define FSPI_LUT_PAD4			2
+#define FSPI_LUT_PAD8			3
+
+#define FSPI_LUT_ADDR24BIT		0x18
+#define FSPI_LUT_ADDR32BIT		0x20
+
+#define FSPI_LUT_WRITE			0x08
+#define FSPI_LUT_READ			0x09
+#define FSPI_DUMMY_SDR			0x0c
+
+/* TODO Check size if functional*/
+#define FSPI_RX_IPBUF_SIZE		0x200	/*  64*64 bits  */
+#define FSPI_TX_IPBUF_SIZE		0x400	/* 128*64 bits */
+
+#define FSPI_RX_MAX_AHBBUF_SIZE		0x800 /* 256 * 64bits */
+#define FSPI_TX_MAX_AHBBUF_SIZE		0x40  /* 8 * 64bits   */
+
+#define FSPI_LUTREG_OFFSET			0x200ul
+
+#define FSPI_MAX_TIMEOUT_AHBCMD		0xFFU
+#define FSPI_MAX_TIMEOUT_IPCMD		0xFF
+#define FSPI_SER_CLK_DIV		0x04
+#define FSPI_HSEN			0
+#define FSPI_ENDCFG_BE64		0x01
+#define FSPI_ENDCFG_BE32		0x03
+#define FSPI_ENDCFG_LE32		0x02
+#define FSPI_ENDCFG_LE64		0x0
+
+#define MASK_24BIT_ADDRESS		0x00ffffff
+#define MASK_32BIT_ADDRESS		0xffffffff
+
+/* Registers used by the driver */
+#define FSPI_MCR0			0x0ul
+#define FSPI_MCR0_AHB_TIMEOUT(x)	((x) << 24)
+#define FSPI_MCR0_IP_TIMEOUT(x)		((x) << 16)
+#define FSPI_MCR0_LEARN_EN		BIT(15)
+#define FSPI_MCR0_SCRFRUN_EN		BIT(14)
+#define FSPI_MCR0_OCTCOMB_EN		BIT(13)
+#define FSPI_MCR0_DOZE_EN		BIT(12)
+#define FSPI_MCR0_HSEN			BIT(11)
+#define FSPI_MCR0_SERCLKDIV		BIT(8)
+#define FSPI_MCR0_ATDF_EN		BIT(7)
+#define FSPI_MCR0_ARDF_EN		BIT(6)
+#define FSPI_MCR0_RXCLKSRC(x)		((x) << 4)
+#define FSPI_MCR0_END_CFG(x)		((x) << 2)
+#define FSPI_MCR0_MDIS			BIT(1)
+#define FSPI_MCR0_SWRST			BIT(0)
+
+#define FSPI_MCR0_AHBGRANTWAIT_SHIFT	24
+#define FSPI_MCR0_AHBGRANTWAIT_MASK	(0xFFU << FSPI_MCR0_AHBGRANTWAIT_SHIFT)
+#define FSPI_MCR0_IPGRANTWAIT_SHIFT	16
+#define FSPI_MCR0_IPGRANTWAIT_MASK	(0xFF << FSPI_MCR0_IPGRANTWAIT_SHIFT)
+#define FSPI_MCR0_HSEN_SHIFT		11
+#define FSPI_MCR0_HSEN_MASK		(1 << FSPI_MCR0_HSEN_SHIFT)
+#define FSPI_MCR0_SERCLKDIV_SHIFT	8
+#define FSPI_MCR0_SERCLKDIV_MASK	(7 << FSPI_MCR0_SERCLKDIV_SHIFT)
+#define FSPI_MCR0_ENDCFG_SHIFT		2
+#define FSPI_MCR0_ENDCFG_MASK		(3 << FSPI_MCR0_ENDCFG_SHIFT)
+#define FSPI_MCR0_RXCLKSRC_SHIFT	4
+#define FSPI_MCR0_RXCLKSRC_MASK		(3 << FSPI_MCR0_RXCLKSRC_SHIFT)
+
+#define FSPI_MCR1			0x04
+#define FSPI_MCR1_SEQ_TIMEOUT(x)	((x) << 16)
+#define FSPI_MCR1_AHB_TIMEOUT(x)	(x)
+
+#define FSPI_MCR2			0x08
+#define FSPI_MCR2_IDLE_WAIT(x)		((x) << 24)
+#define FSPI_MCR2_SAMEDEVICEEN		BIT(15)
+#define FSPI_MCR2_CLRLRPHS		BIT(14)
+#define FSPI_MCR2_ABRDATSZ		BIT(8)
+#define FSPI_MCR2_ABRLEARN		BIT(7)
+#define FSPI_MCR2_ABR_READ		BIT(6)
+#define FSPI_MCR2_ABRWRITE		BIT(5)
+#define FSPI_MCR2_ABRDUMMY		BIT(4)
+#define FSPI_MCR2_ABR_MODE		BIT(3)
+#define FSPI_MCR2_ABRCADDR		BIT(2)
+#define FSPI_MCR2_ABRRADDR		BIT(1)
+#define FSPI_MCR2_ABR_CMD		BIT(0)
+
+#define FSPI_AHBCR			0x0c
+#define FSPI_AHBCR_RDADDROPT		BIT(6)
+#define FSPI_AHBCR_PREF_EN		BIT(5)
+#define FSPI_AHBCR_BUFF_EN		BIT(4)
+#define FSPI_AHBCR_CACH_EN		BIT(3)
+#define FSPI_AHBCR_CLRTXBUF		BIT(2)
+#define FSPI_AHBCR_CLRRXBUF		BIT(1)
+#define FSPI_AHBCR_PAR_EN		BIT(0)
+
+#define FSPI_INTEN			0x10
+#define FSPI_INTEN_SCLKSBWR		BIT(9)
+#define FSPI_INTEN_SCLKSBRD		BIT(8)
+#define FSPI_INTEN_DATALRNFL		BIT(7)
+#define FSPI_INTEN_IPTXWE		BIT(6)
+#define FSPI_INTEN_IPRXWA		BIT(5)
+#define FSPI_INTEN_AHBCMDERR		BIT(4)
+#define FSPI_INTEN_IPCMDERR		BIT(3)
+#define FSPI_INTEN_AHBCMDGE		BIT(2)
+#define FSPI_INTEN_IPCMDGE		BIT(1)
+#define FSPI_INTEN_IPCMDDONE		BIT(0)
+
+#define FSPI_INTR			0x14
+#define FSPI_INTR_SCLKSBWR		BIT(9)
+#define FSPI_INTR_SCLKSBRD		BIT(8)
+#define FSPI_INTR_DATALRNFL		BIT(7)
+#define FSPI_INTR_IPTXWE		BIT(6)
+#define FSPI_INTR_IPRXWA		BIT(5)
+#define FSPI_INTR_AHBCMDERR		BIT(4)
+#define FSPI_INTR_IPCMDERR		BIT(3)
+#define FSPI_INTR_AHBCMDGE		BIT(2)
+#define FSPI_INTR_IPCMDGE		BIT(1)
+#define FSPI_INTR_IPCMDDONE		BIT(0)
+
+#define FSPI_LUTKEY			0x18
+#define FSPI_LUTKEY_VALUE		0x5AF05AF0
+
+#define FSPI_LCKCR			0x1C
+
+#define FSPI_LCKER_LOCK			0x1
+#define FSPI_LCKER_UNLOCK		0x2
+
+#define FSPI_BUFXCR_INVALID_MSTRID	0xE
+#define FSPI_AHBRX_BUF0CR0		0x20
+#define FSPI_AHBRX_BUF1CR0		0x24
+#define FSPI_AHBRX_BUF2CR0		0x28
+#define FSPI_AHBRX_BUF3CR0		0x2C
+#define FSPI_AHBRX_BUF4CR0		0x30
+#define FSPI_AHBRX_BUF5CR0		0x34
+#define FSPI_AHBRX_BUF6CR0		0x38
+#define FSPI_AHBRX_BUF7CR0		0x3C
+
+#define FSPI_AHBRXBUF0CR7_PREF		BIT(31)
+
+#define FSPI_AHBRX_BUF0CR1		0x40
+#define FSPI_AHBRX_BUF1CR1		0x44
+#define FSPI_AHBRX_BUF2CR1		0x48
+#define FSPI_AHBRX_BUF3CR1		0x4C
+#define FSPI_AHBRX_BUF4CR1		0x50
+#define FSPI_AHBRX_BUF5CR1		0x54
+#define FSPI_AHBRX_BUF6CR1		0x58
+#define FSPI_AHBRX_BUF7CR1		0x5C
+
+#define FSPI_FLSHA1CR0			0x60
+#define FSPI_FLSHA2CR0			0x64
+#define FSPI_FLSHB1CR0			0x68
+#define FSPI_FLSHB2CR0			0x6C
+#define FSPI_FLSHXCR0_SZ_KB		10
+#define FSPI_FLSHXCR0_SZ(x)		((x) >> FSPI_FLSHXCR0_SZ_KB)
+
+#define FSPI_FLSHA1CR1			0x70
+#define FSPI_FLSHA2CR1			0x74
+#define FSPI_FLSHB1CR1			0x78
+#define FSPI_FLSHB2CR1			0x7C
+#define FSPI_FLSHXCR1_CSINTR(x)		((x) << 16)
+#define FSPI_FLSHXCR1_CAS(x)		((x) << 11)
+#define FSPI_FLSHXCR1_WA		BIT(10)
+#define FSPI_FLSHXCR1_TCSH(x)		((x) << 5)
+#define FSPI_FLSHXCR1_TCSS(x)		(x)
+
+#define FSPI_FLSHXCR1_TCSH_SHIFT	5
+#define FSPI_FLSHXCR1_TCSH_MASK		(0x1F << FSPI_FLSHXCR1_TCSH_SHIFT)
+#define FSPI_FLSHXCR1_TCSS_SHIFT	0
+#define FSPI_FLSHXCR1_TCSS_MASK		(0x1F << FSPI_FLSHXCR1_TCSS_SHIFT)
+
+#define FSPI_FLSHA1CR2			0x80
+#define FSPI_FLSHA2CR2			0x84
+#define FSPI_FLSHB1CR2			0x88
+#define FSPI_FLSHB2CR2			0x8C
+#define FSPI_FLSHXCR2_CLRINSP		BIT(24)
+#define FSPI_FLSHXCR2_AWRWAIT		BIT(16)
+#define FSPI_FLSHXCR2_AWRSEQN_SHIFT	13
+#define FSPI_FLSHXCR2_AWRSEQI_SHIFT	8
+#define FSPI_FLSHXCR2_ARDSEQN_SHIFT	5
+#define FSPI_FLSHXCR2_ARDSEQI_SHIFT	0
+
+#define FSPI_IPCR0			0xA0
+
+#define FSPI_IPCR1			0xA4
+#define FSPI_IPCR1_IPAREN		BIT(31)
+#define FSPI_IPCR1_SEQNUM_SHIFT		24
+#define FSPI_IPCR1_SEQID_SHIFT		16
+#define FSPI_IPCR1_IDATSZ(x)		(x)
+
+#define FSPI_IPCMD			0xB0
+#define FSPI_IPCMD_TRG			BIT(0)
+
+
+/* IP Command Register */
+#define FSPI_IPCMD_TRG_SHIFT		0
+#define FSPI_IPCMD_TRG_MASK		(1 << FSPI_IPCMD_TRG_SHIFT)
+
+#define FSPI_INTR_IPRXWA_SHIFT		5
+#define FSPI_INTR_IPRXWA_MASK		(1 << FSPI_INTR_IPRXWA_SHIFT)
+
+#define FSPI_INTR_IPCMDDONE_SHIFT	0
+#define FSPI_INTR_IPCMDDONE_MASK	(1 << FSPI_INTR_IPCMDDONE_SHIFT)
+
+#define FSPI_INTR_IPTXWE_SHIFT		6
+#define FSPI_INTR_IPTXWE_MASK		(1 << FSPI_INTR_IPTXWE_SHIFT)
+
+#define FSPI_IPTXFSTS_FILL_SHIFT	0
+#define FSPI_IPTXFSTS_FILL_MASK		(0xFF << FSPI_IPTXFSTS_FILL_SHIFT)
+
+#define FSPI_IPCR1_ISEQID_SHIFT		16
+#define FSPI_IPCR1_ISEQID_MASK		(0x1F << FSPI_IPCR1_ISEQID_SHIFT)
+
+#define FSPI_IPRXFSTS_FILL_SHIFT	0
+#define FSPI_IPRXFSTS_FILL_MASK		(0xFF << FSPI_IPRXFSTS_FILL_SHIFT)
+
+#define FSPI_DLPR			0xB4
+
+#define FSPI_IPRXFCR			0xB8
+#define FSPI_IPRXFCR_CLR		BIT(0)
+#define FSPI_IPRXFCR_DMA_EN		BIT(1)
+#define FSPI_IPRXFCR_WMRK(x)		((x) << 2)
+
+#define FSPI_IPTXFCR			0xBC
+#define FSPI_IPTXFCR_CLR		BIT(0)
+#define FSPI_IPTXFCR_DMA_EN		BIT(1)
+#define FSPI_IPTXFCR_WMRK(x)		((x) << 2)
+
+#define FSPI_DLLACR			0xC0
+#define FSPI_DLLACR_OVRDEN		BIT(8)
+
+#define FSPI_DLLBCR			0xC4
+#define FSPI_DLLBCR_OVRDEN		BIT(8)
+
+#define FSPI_STS0			0xE0
+#define FSPI_STS0_DLPHB(x)		((x) << 8)
+#define FSPI_STS0_DLPHA(x)		((x) << 4)
+#define FSPI_STS0_CMD_SRC(x)		((x) << 2)
+#define FSPI_STS0_ARB_IDLE		BIT(1)
+#define FSPI_STS0_SEQ_IDLE		BIT(0)
+
+#define FSPI_STS1			0xE4
+#define FSPI_STS1_IP_ERRCD(x)		((x) << 24)
+#define FSPI_STS1_IP_ERRID(x)		((x) << 16)
+#define FSPI_STS1_AHB_ERRCD(x)		((x) << 8)
+#define FSPI_STS1_AHB_ERRID(x)		(x)
+
+#define FSPI_AHBSPNST			0xEC
+#define FSPI_AHBSPNST_DATLFT(x)		((x) << 16)
+#define FSPI_AHBSPNST_BUFID(x)		((x) << 1)
+#define FSPI_AHBSPNST_ACTIVE		BIT(0)
+
+#define FSPI_IPRXFSTS			0xF0
+#define FSPI_IPRXFSTS_RDCNTR(x)		((x) << 16)
+#define FSPI_IPRXFSTS_FILL(x)		(x)
+
+#define FSPI_IPTXFSTS			0xF4
+#define FSPI_IPTXFSTS_WRCNTR(x)		((x) << 16)
+#define FSPI_IPTXFSTS_FILL(x)		(x)
+
+#define FSPI_NOR_SR_WIP_SHIFT		(0)
+#define FSPI_NOR_SR_WIP_MASK		(1 << FSPI_NOR_SR_WIP_SHIFT)
+
+#define FSPI_RFDR			0x100
+#define FSPI_TFDR			0x180
+
+#define FSPI_LUT_BASE			0x200
+#define FSPI_LUT_OFFSET			(SEQID_LUT * 4 * 4)
+#define FSPI_LUT_REG(idx) \
+	(FSPI_LUT_BASE + FSPI_LUT_OFFSET + (idx) * 4)
+
+/* register map end */
+
+#endif
diff --git a/drivers/nxp/flexspi/nor/test_fspi.c b/drivers/nxp/flexspi/nor/test_fspi.c
new file mode 100644
index 0000000..c36c5b8
--- /dev/null
+++ b/drivers/nxp/flexspi/nor/test_fspi.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+
+#include <common/debug.h>
+#include <flash_info.h>
+#include "fspi.h"
+#include <fspi_api.h>
+
+/*
+ * The macros are defined to be used as test vector for testing fspi.
+ */
+#define	SIZE_BUFFER			0x250
+
+/*
+ * You may choose fspi_swap based on core endianness and flexspi IP/AHB
+ * buffer endianness set in MCR.
+ */
+#define fspi_swap32(A)			(A)
+
+void fspi_test(uint32_t fspi_test_addr, uint32_t size, int extra)
+{
+	uint32_t buffer[SIZE_BUFFER];
+	uint32_t count = 1;
+	uint32_t failed, i;
+
+	NOTICE("-------------------------- %d----------------------------------\n", count++);
+	INFO("Sector Erase size: 0x%08x, size: %d\n", F_SECTOR_ERASE_SZ, size);
+	/* Test Sector Erase */
+	xspi_sector_erase(fspi_test_addr - fspi_test_addr % F_SECTOR_ERASE_SZ,
+			  F_SECTOR_ERASE_SZ);
+
+	/* Test Erased data using IP read */
+	xspi_ip_read((fspi_test_addr), buffer, size * 4);
+
+	failed = 0;
+	for (i = 0; i < size; i++)
+		if (fspi_swap32(0xffffffff) != buffer[i]) {
+			failed = 1;
+			break;
+		}
+
+	if (failed == 0) {
+		NOTICE("[%d]: Success Erase: data in buffer[%d] 0x%08x\n", __LINE__, i-3, buffer[i-3]);
+	} else {
+		ERROR("Erase: Failed  -->xxx with buffer[%d]=0x%08x\n", i, buffer[i]);
+	}
+
+	for (i = 0; i < SIZE_BUFFER; i++)
+		buffer[i] = 0x12345678;
+
+	/* Write data from buffer to flash */
+	xspi_write(fspi_test_addr, (void *)buffer, (size * 4 + extra));
+	/* Check written data using IP read */
+	xspi_ip_read(fspi_test_addr, buffer, (size * 4 + extra));
+	failed = 0;
+	for (i = 0; i < size; i++)
+		if (fspi_swap32(0x12345678) != buffer[i]) {
+			failed = 1;
+			break;
+		}
+
+	if (failed == 0) {
+		NOTICE("[%d]: Success IpWrite with IP READ in buffer[%d] 0x%08x\n", __LINE__, i-3, buffer[i-3]);
+	} else {
+		ERROR("Write: Failed  -->xxxx with IP READ in buffer[%d]=0x%08x\n", i, buffer[i]);
+		return;
+	}
+
+	/* xspi_read may use AHB read */
+	xspi_read((fspi_test_addr), buffer, (size * 4 + extra));
+	failed = 0;
+	for (i = 0; i < size; i++)
+		if (fspi_swap32(0x12345678) != buffer[i]) {
+			failed = 1;
+			break;
+		}
+
+	if (failed == 0) {
+		NOTICE("[%d]: Success IpWrite with AHB OR IP READ on buffer[%d] 0x%08x\n", __LINE__, i-3, buffer[i-3]);
+	} else {
+		ERROR("Write: Failed  -->xxxx with AHB READ on buffer[%d]=0x%08x\n", i, buffer[i]);
+		return;
+	}
+}
diff --git a/drivers/nxp/gic/gic.mk b/drivers/nxp/gic/gic.mk
new file mode 100644
index 0000000..68091e8
--- /dev/null
+++ b/drivers/nxp/gic/gic.mk
@@ -0,0 +1,46 @@
+# Copyright 2021 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+#------------------------------------------------------------------------------
+#
+# Select the GIC files
+#
+# -----------------------------------------------------------------------------
+
+ifeq (${ADD_GIC},)
+ADD_GIC			:= 1
+ifeq ($(GIC), GIC400)
+include drivers/arm/gic/v2/gicv2.mk
+GIC_SOURCES		+=	${GICV2_SOURCES}
+GIC_SOURCES		+=	${PLAT_DRIVERS_PATH}/gic/ls_gicv2.c	\
+				plat/common/plat_gicv2.c
+
+PLAT_INCLUDES		+=	-I${PLAT_DRIVERS_PATH}/gic/include/gicv2
+else
+ifeq ($(GIC), GIC500)
+include drivers/arm/gic/v3/gicv3.mk
+GIC_SOURCES		+=	${GICV3_SOURCES}
+GIC_SOURCES		+=	${PLAT_DRIVERS_PATH}/gic/ls_gicv3.c	\
+				plat/common/plat_gicv3.c
+
+PLAT_INCLUDES		+=	-I${PLAT_DRIVERS_PATH}/gic/include/gicv3
+else
+    $(error -> GIC type not set!)
+endif
+endif
+
+ifeq (${BL_COMM_GIC_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${GIC_SOURCES}
+else
+ifeq (${BL2_GIC_NEEDED},yes)
+BL2_SOURCES		+= ${GIC_SOURCES}
+endif
+ifeq (${BL31_GIC_NEEDED},yes)
+BL31_SOURCES		+= ${GIC_SOURCES}
+endif
+endif
+endif
+
+# -----------------------------------------------------------------------------
diff --git a/drivers/nxp/gic/include/gicv2/plat_gic.h b/drivers/nxp/gic/include/gicv2/plat_gic.h
new file mode 100644
index 0000000..ff34744
--- /dev/null
+++ b/drivers/nxp/gic/include/gicv2/plat_gic.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PLAT_GICV2_H
+#define PLAT_GICV2_H
+
+#include <drivers/arm/gicv2.h>
+
+ /* register offsets */
+#define GICD_CTLR_OFFSET          0x0
+#define GICD_CPENDSGIR3_OFFSET    0xF1C
+#define GICD_SPENDSGIR3_OFFSET    0xF2C
+#define GICD_SGIR_OFFSET          0xF00
+#define GICD_IGROUPR0_OFFSET      0x080
+#define GICD_TYPER_OFFSET         0x0004
+#define GICD_ISENABLER0_OFFSET    0x0100
+#define GICD_ICENABLER0_OFFSET    0x0180
+#define GICD_IPRIORITYR3_OFFSET   0x040C
+#define GICD_ISENABLERn_OFFSET    0x0100
+#define GICD_ISACTIVER0_OFFSET    0x300
+
+#define GICC_CTLR_OFFSET          0x0
+#define GICC_PMR_OFFSET           0x0004
+#define GICC_IAR_OFFSET           0x000C
+#define GICC_DIR_OFFSET           0x1000
+#define GICC_EOIR_OFFSET          0x0010
+
+ /* bitfield masks */
+#define GICC_CTLR_EN_GRP0           0x1
+#define GICC_CTLR_EN_GRP1           0x2
+#define GICC_CTLR_EOImodeS_MASK     0x200
+#define GICC_CTLR_DIS_BYPASS        0x60
+#define GICC_CTLR_CBPR_MASK         0x10
+#define GICC_CTLR_FIQ_EN_MASK       0x8
+#define GICC_CTLR_ACKCTL_MASK       0x4
+#define GICC_PMR_FILTER             0xFF
+
+#define GICD_CTLR_EN_GRP0           0x1
+#define GICD_CTLR_EN_GRP1           0x2
+#define GICD_IGROUP0_SGI15          0x8000
+#define GICD_ISENABLE0_SGI15        0x8000
+#define GICD_ICENABLE0_SGI15        0x8000
+#define GICD_ISACTIVER0_SGI15       0x8000
+#define GICD_CPENDSGIR_CLR_MASK     0xFF000000
+#define GICD_IPRIORITY_SGI15_MASK   0xFF000000
+#define GICD_SPENDSGIR3_SGI15_MASK  0xFF000000
+#define GICD_SPENDSGIR3_SGI15_OFFSET  0x18
+
+#ifndef __ASSEMBLER__
+
+/* GIC common API's */
+void plat_ls_gic_driver_init(const uintptr_t nxp_gicd_addr,
+			     const uintptr_t nxp_gicc_addr,
+			     uint8_t plat_core_count,
+			     interrupt_prop_t *ls_interrupt_props,
+			     uint8_t ls_interrupt_prop_count,
+			     uint32_t *target_mask_array);
+void plat_ls_gic_init(void);
+void plat_ls_gic_cpuif_enable(void);
+void plat_ls_gic_cpuif_disable(void);
+void plat_ls_gic_redistif_on(void);
+void plat_ls_gic_redistif_off(void);
+void plat_gic_pcpu_init(void);
+/* GIC utility functions */
+void get_gic_offset(uint32_t *gicc_base, uint32_t *gicd_base);
+#endif
+
+#endif /* PLAT_GICV2_H */
diff --git a/drivers/nxp/gic/include/gicv3/plat_gic.h b/drivers/nxp/gic/include/gicv3/plat_gic.h
new file mode 100644
index 0000000..f4e12de
--- /dev/null
+++ b/drivers/nxp/gic/include/gicv3/plat_gic.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PLAT_GICV3_H
+#define PLAT_GICV3_H
+
+#include <drivers/arm/gicv3.h>
+
+ /* offset between redistributors */
+#define GIC_RD_OFFSET       0x00020000
+ /* offset between SGI's */
+#define GIC_SGI_OFFSET      0x00020000
+ /* offset from rd base to sgi base */
+#define GIC_RD_2_SGI_OFFSET 0x00010000
+
+ /* register offsets */
+#define GICD_CTLR_OFFSET        0x0
+#define GICD_CLR_SPI_SR         0x58
+#define GICD_IGROUPR_2          0x88
+#define GICD_ISENABLER_2        0x108
+#define GICD_ICENABLER_2        0x188
+#define GICD_ICPENDR_2          0x288
+#define GICD_ICACTIVER_2        0x388
+#define GICD_IPRIORITYR_22      0x458
+#define GICD_ICFGR_5            0xC14
+#define GICD_IGRPMODR_2         0xD08
+
+#define GICD_IROUTER60_OFFSET   0x61e0
+#define GICD_IROUTER76_OFFSET   0x6260
+#define GICD_IROUTER89_OFFSET   0x62C8
+#define GICD_IROUTER112_OFFSET  0x6380
+#define GICD_IROUTER113_OFFSET  0x6388
+
+#define GICR_ICENABLER0_OFFSET  0x180
+#define GICR_CTLR_OFFSET        0x0
+#define GICR_IGROUPR0_OFFSET    0x80
+#define GICR_IGRPMODR0_OFFSET   0xD00
+#define GICR_IPRIORITYR3_OFFSET 0x40C
+#define GICR_ICPENDR0_OFFSET    0x280
+#define GICR_ISENABLER0_OFFSET  0x100
+#define GICR_TYPER_OFFSET       0x8
+#define GICR_WAKER_OFFSET       0x14
+#define GICR_ICACTIVER0_OFFSET  0x380
+#define GICR_ICFGR0_OFFSET      0xC00
+
+ /* bitfield masks */
+#define GICD_CTLR_EN_GRP_MASK   0x7
+#define GICD_CTLR_EN_GRP_1NS    0x2
+#define GICD_CTLR_EN_GRP_1S     0x4
+#define GICD_CTLR_EN_GRP_0      0x1
+#define GICD_CTLR_ARE_S_MASK    0x10
+#define GICD_CTLR_RWP           0x80000000
+
+#define GICR_ICENABLER0_SGI15   0x00008000
+#define GICR_CTLR_RWP           0x8
+#define GICR_CTLR_DPG0_MASK     0x2000000
+#define GICR_IGROUPR0_SGI15     0x00008000
+#define GICR_IGRPMODR0_SGI15    0x00008000
+#define GICR_ISENABLER0_SGI15   0x00008000
+#define GICR_IPRIORITYR3_SGI15_MASK  0xFF000000
+#define GICR_ICPENDR0_SGI15     0x8000
+
+#define GIC_SPI_89_MASK         0x02000000
+#define GIC_SPI89_PRIORITY_MASK 0xFF00
+#define GIC_IRM_SPI89           0x80000000
+
+#define GICD_IROUTER_VALUE      0x100
+#define GICR_WAKER_SLEEP_BIT    0x2
+#define GICR_WAKER_ASLEEP       (1 << 2 | 1 << 1)
+
+#define ICC_SRE_EL3_SRE          0x1
+#define ICC_IGRPEN0_EL1_EN       0x1
+#define ICC_CTLR_EL3_CBPR_EL1S   0x1
+#define ICC_CTLR_EL3_RM          0x20
+#define ICC_CTLR_EL3_EOIMODE_EL3 0x4
+#define ICC_CTLR_EL3_PMHE        0x40
+#define ICC_PMR_EL1_P_FILTER     0xFF
+#define ICC_IAR0_EL1_SGI15       0xF
+#define ICC_SGI0R_EL1_INTID      0x0F000000
+#define ICC_IAR0_INTID_SPI_89    0x59
+
+#define  ICC_IGRPEN1_EL1 S3_0_C12_C12_7
+#define  ICC_PMR_EL1     S3_0_C4_C6_0
+#define  ICC_SRE_EL3     S3_6_C12_C12_5
+#define  ICC_CTLR_EL3    S3_6_C12_C12_4
+#define  ICC_SRE_EL2     S3_4_C12_C9_5
+#define  ICC_CTLR_EL1    S3_0_C12_C12_4
+
+#ifndef __ASSEMBLER__
+
+/* GIC common API's */
+typedef unsigned int (*my_core_pos_fn)(void);
+
+void plat_ls_gic_driver_init(const uintptr_t nxp_gicd_addr,
+			     const uintptr_t nxp_gicr_addr,
+			     uint8_t plat_core_count,
+			     interrupt_prop_t *ls_interrupt_props,
+			     uint8_t ls_interrupt_prop_count,
+			     uintptr_t *target_mask_array,
+			     mpidr_hash_fn mpidr_to_core_pos);
+//void plat_ls_gic_driver_init(void);
+void plat_ls_gic_init(void);
+void plat_ls_gic_cpuif_enable(void);
+void plat_ls_gic_cpuif_disable(void);
+void plat_ls_gic_redistif_on(void);
+void plat_ls_gic_redistif_off(void);
+void plat_gic_pcpu_init(void);
+#endif
+
+#endif /* PLAT_GICV3_H */
diff --git a/drivers/nxp/gic/ls_gicv2.c b/drivers/nxp/gic/ls_gicv2.c
new file mode 100644
index 0000000..62bc8db
--- /dev/null
+++ b/drivers/nxp/gic/ls_gicv2.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <gicv2.h>
+#include <plat_gic.h>
+
+
+/*
+ * NXP common helper to initialize the GICv3 only driver.
+ */
+void plat_ls_gic_driver_init(uintptr_t nxp_gicd_addr,
+			     uintptr_t nxp_gicc_addr,
+			     uint8_t plat_core_count,
+			     interrupt_prop_t *ls_interrupt_props,
+			     uint8_t ls_interrupt_prop_count,
+			     uint32_t *target_mask_array)
+{
+	static struct gicv2_driver_data ls_gic_data;
+
+	ls_gic_data.gicd_base = nxp_gicd_addr;
+	ls_gic_data.gicc_base = nxp_gicc_addr;
+	ls_gic_data.target_masks = target_mask_array;
+	ls_gic_data.target_masks_num = plat_core_count;
+	ls_gic_data.interrupt_props = ls_interrupt_props;
+	ls_gic_data.interrupt_props_num = ls_interrupt_prop_count;
+
+	gicv2_driver_init(&ls_gic_data);
+}
+
+void plat_ls_gic_init(void)
+{
+	gicv2_distif_init();
+	gicv2_pcpu_distif_init();
+	gicv2_cpuif_enable();
+}
+
+/******************************************************************************
+ * ARM common helper to enable the GICv2 CPU interface
+ *****************************************************************************/
+void plat_ls_gic_cpuif_enable(void)
+{
+	gicv2_cpuif_enable();
+}
+
+/******************************************************************************
+ * ARM common helper to disable the GICv2 CPU interface
+ *****************************************************************************/
+void plat_ls_gic_cpuif_disable(void)
+{
+	gicv2_cpuif_disable();
+}
+
+/******************************************************************************
+ * NXP common helper to initialize GICv2 per cpu
+ *****************************************************************************/
+void plat_gic_pcpu_init(void)
+{
+	gicv2_pcpu_distif_init();
+	gicv2_cpuif_enable();
+}
+
+/******************************************************************************
+ * Stubs for Redistributor power management. Although GICv2 doesn't have
+ * Redistributor interface, these are provided for the sake of uniform GIC API
+ *****************************************************************************/
+void plat_ls_gic_redistif_on(void)
+{
+}
+
+void plat_ls_gic_redistif_off(void)
+{
+}
diff --git a/drivers/nxp/gic/ls_gicv3.c b/drivers/nxp/gic/ls_gicv3.c
new file mode 100644
index 0000000..9c02bd6
--- /dev/null
+++ b/drivers/nxp/gic/ls_gicv3.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <drivers/arm/gicv3.h>
+#include <plat_gic.h>
+#include <plat/common/platform.h>
+
+/*
+ * NXP common helper to initialize the GICv3 only driver.
+ */
+void plat_ls_gic_driver_init(uintptr_t nxp_gicd_addr,
+			     uintptr_t nxp_gicr_addr,
+			     uint8_t plat_core_count,
+			     interrupt_prop_t *ls_interrupt_props,
+			     uint8_t ls_interrupt_prop_count,
+			     uintptr_t *target_mask_array,
+			     mpidr_hash_fn mpidr_to_core_pos)
+{
+	static struct gicv3_driver_data ls_gic_data;
+
+	ls_gic_data.gicd_base = nxp_gicd_addr;
+	ls_gic_data.gicr_base = nxp_gicr_addr;
+	ls_gic_data.interrupt_props = ls_interrupt_props;
+	ls_gic_data.interrupt_props_num = ls_interrupt_prop_count;
+	ls_gic_data.rdistif_num = plat_core_count;
+	ls_gic_data.rdistif_base_addrs = target_mask_array;
+	ls_gic_data.mpidr_to_core_pos = mpidr_to_core_pos;
+
+	gicv3_driver_init(&ls_gic_data);
+}
+
+void plat_ls_gic_init(void)
+{
+	gicv3_distif_init();
+	gicv3_rdistif_init(plat_my_core_pos());
+	gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+/*
+ * NXP common helper to enable the GICv3 CPU interface
+ */
+void plat_ls_gic_cpuif_enable(void)
+{
+	gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+/*
+ * NXP common helper to disable the GICv3 CPU interface
+ */
+void plat_ls_gic_cpuif_disable(void)
+{
+	gicv3_cpuif_disable(plat_my_core_pos());
+}
+
+/*
+ * NXP common helper to initialize the per cpu distributor interface in GICv3
+ */
+void plat_gic_pcpu_init(void)
+{
+	gicv3_rdistif_init(plat_my_core_pos());
+	gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+/*
+ * Stubs for Redistributor power management. Although GICv3 doesn't have
+ * Redistributor interface, these are provided for the sake of uniform GIC API
+ */
+void plat_ls_gic_redistif_on(void)
+{
+}
+
+void plat_ls_gic_redistif_off(void)
+{
+}
diff --git a/drivers/nxp/gpio/gpio.mk b/drivers/nxp/gpio/gpio.mk
new file mode 100644
index 0000000..157c60a
--- /dev/null
+++ b/drivers/nxp/gpio/gpio.mk
@@ -0,0 +1,30 @@
+#
+# Copyright 2021 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-----------------------------------------------------------------------------
+
+ifeq (${GPIO_ADDED},)
+
+GPIO_ADDED		:= 1
+
+GPIO_DRIVERS_PATH	:=  drivers/nxp/gpio
+
+PLAT_INCLUDES		+=  -I$(GPIO_DRIVERS_PATH)
+
+GPIO_SOURCES		:= $(GPIO_DRIVERS_PATH)/nxp_gpio.c
+
+ifeq (${BL_COMM_GPIO_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${GPIO_SOURCES}
+else
+ifeq (${BL2_GPIO_NEEDED},yes)
+BL2_SOURCES		+= ${GPIO_SOURCES}
+endif
+ifeq (${BL31_GPIO_NEEDED},yes)
+BL31_SOURCES		+= ${GPIO_SOURCES}
+endif
+endif
+
+endif
+#------------------------------------------------
diff --git a/drivers/nxp/gpio/nxp_gpio.c b/drivers/nxp/gpio/nxp_gpio.c
new file mode 100644
index 0000000..28c9db9
--- /dev/null
+++ b/drivers/nxp/gpio/nxp_gpio.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <common/debug.h>
+#include <lib/mmio.h>
+#include <nxp_gpio.h>
+
+static gpio_init_info_t *gpio_init_info;
+
+void gpio_init(gpio_init_info_t *gpio_init_data)
+{
+	gpio_init_info = gpio_init_data;
+}
+
+/* This function set GPIO pin for raising POVDD. */
+int set_gpio_bit(uint32_t *gpio_base_addr,
+		 uint32_t bit_num)
+{
+	uint32_t val = 0U;
+	uint32_t *gpdir = NULL;
+	uint32_t *gpdat = NULL;
+
+	if (gpio_init_info == NULL) {
+		ERROR("GPIO is not initialized.\n");
+		return GPIO_FAILURE;
+	}
+
+	gpdir = gpio_base_addr + GPDIR_REG_OFFSET;
+	gpdat = gpio_base_addr + (GPDAT_REG_OFFSET >> 2);
+
+	/*
+	 * Set the corresponding bit in direction register
+	 * to configure the GPIO as output.
+	 */
+	val = gpio_read32(gpdir);
+	val = val | bit_num;
+	gpio_write32(gpdir, val);
+
+	/* Set the corresponding bit in GPIO data register */
+	val = gpio_read32(gpdat);
+	val = val | bit_num;
+	gpio_write32(gpdat, val);
+
+	val = gpio_read32(gpdat);
+
+	if ((val & bit_num) == 0U) {
+		return GPIO_FAILURE;
+	}
+
+	return GPIO_SUCCESS;
+}
+
+/* This function reset GPIO pin set for raising POVDD. */
+int clr_gpio_bit(uint32_t *gpio_base_addr, uint32_t bit_num)
+{
+	uint32_t val = 0U;
+	uint32_t *gpdir = NULL;
+	uint32_t *gpdat = NULL;
+
+
+	if (gpio_init_info == NULL) {
+		ERROR("GPIO is not initialized.\n");
+		return GPIO_FAILURE;
+	}
+
+	gpdir = gpio_base_addr + GPDIR_REG_OFFSET;
+	gpdat = gpio_base_addr + GPDAT_REG_OFFSET;
+
+	/*
+	 * Reset the corresponding bit in direction and data register
+	 * to configure the GPIO as input.
+	 */
+	val = gpio_read32(gpdat);
+	val = val & ~(bit_num);
+	gpio_write32(gpdat, val);
+
+	val = gpio_read32(gpdat);
+
+	val = gpio_read32(gpdir);
+	val = val & ~(bit_num);
+	gpio_write32(gpdir, val);
+
+	val = gpio_read32(gpdat);
+
+	if ((val & bit_num) != 0U) {
+		return GPIO_FAILURE;
+	}
+
+	return GPIO_SUCCESS;
+}
+
+uint32_t *select_gpio_n_bitnum(uint32_t povdd_gpio, uint32_t *bit_num)
+{
+	uint32_t *ret_gpio;
+	uint32_t povdd_gpio_val = 0U;
+	uint32_t gpio_num = 0U;
+
+	if (gpio_init_info == NULL) {
+		ERROR("GPIO is not initialized.\n");
+	}
+	/*
+	 * Subtract 1 from fuse_hdr povdd_gpio value as
+	 * for 0x1 value, bit 0 is to be set
+	 * for 0x20 value i.e 32, bit 31 i.e. 0x1f is to be set.
+	 * 0x1f - 0x00 : GPIO_1
+	 * 0x3f - 0x20 : GPIO_2
+	 * 0x5f - 0x40 : GPIO_3
+	 * 0x7f - 0x60 : GPIO_4
+	 */
+	povdd_gpio_val = (povdd_gpio - 1U) & GPIO_SEL_MASK;
+
+	/* Right shift by 5 to divide by 32 */
+	gpio_num = povdd_gpio_val >> GPIO_ID_BASE_ADDR_SHIFT;
+	*bit_num = 1U << (GPIO_BITS_PER_BASE_REG
+			  - (povdd_gpio_val & GPIO_BIT_MASK)
+			  - 1U);
+
+	switch (gpio_num) {
+	case GPIO_0:
+		ret_gpio = (uint32_t *) gpio_init_info->gpio1_base_addr;
+		break;
+	case GPIO_1:
+		ret_gpio = (uint32_t *) gpio_init_info->gpio2_base_addr;
+		break;
+	case GPIO_2:
+		ret_gpio = (uint32_t *) gpio_init_info->gpio3_base_addr;
+		break;
+	case GPIO_3:
+		ret_gpio = (uint32_t *) gpio_init_info->gpio4_base_addr;
+		break;
+	default:
+		ret_gpio = NULL;
+	}
+
+	if (ret_gpio == NULL) {
+		INFO("GPIO_NUM = %d doesn't exist.\n", gpio_num);
+	}
+
+	return ret_gpio;
+}
diff --git a/drivers/nxp/gpio/nxp_gpio.h b/drivers/nxp/gpio/nxp_gpio.h
new file mode 100644
index 0000000..df75840
--- /dev/null
+++ b/drivers/nxp/gpio/nxp_gpio.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PLAT_GPIO_H
+#define PLAT_GPIO_H
+
+#include <endian.h>
+#include <lib/mmio.h>
+
+/* GPIO Register offset */
+#define GPIO_SEL_MASK		0x7F
+#define GPIO_BIT_MASK		0x1F
+#define GPDIR_REG_OFFSET	0x0
+#define GPDAT_REG_OFFSET	0x8
+
+#define GPIO_ID_BASE_ADDR_SHIFT 5U
+#define GPIO_BITS_PER_BASE_REG	32U
+
+#define GPIO_0			0
+#define GPIO_1			1
+#define GPIO_2			2
+#define GPIO_3			3
+
+#define GPIO_SUCCESS		0x0
+#define GPIO_FAILURE		0x1
+
+#ifdef NXP_GPIO_BE
+#define gpio_read32(a)           bswap32(mmio_read_32((uintptr_t)(a)))
+#define gpio_write32(a, v)       mmio_write_32((uintptr_t)(a), bswap32(v))
+#elif defined(NXP_GPIO_LE)
+#define gpio_read32(a)           mmio_read_32((uintptr_t)(a))
+#define gpio_write32(a, v)       mmio_write_32((uintptr_t)(a), (v))
+#else
+#error Please define GPIO register endianness
+#endif
+
+typedef struct {
+	uintptr_t gpio1_base_addr;
+	uintptr_t gpio2_base_addr;
+	uintptr_t gpio3_base_addr;
+	uintptr_t gpio4_base_addr;
+} gpio_init_info_t;
+
+void gpio_init(gpio_init_info_t *gpio_init_data);
+uint32_t *select_gpio_n_bitnum(uint32_t povdd_gpio, uint32_t *bit_num);
+int clr_gpio_bit(uint32_t *gpio_base_addr, uint32_t bit_num);
+int set_gpio_bit(uint32_t *gpio_base_addr, uint32_t bit_num);
+
+#endif /* PLAT_GPIO_H */
diff --git a/drivers/nxp/i2c/i2c.c b/drivers/nxp/i2c/i2c.c
new file mode 100644
index 0000000..9281409
--- /dev/null
+++ b/drivers/nxp/i2c/i2c.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2016-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <errno.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include "i2c.h"
+#include <nxp_timer.h>
+
+static uintptr_t g_nxp_i2c_addr;
+
+void i2c_init(uintptr_t nxp_i2c_addr)
+{
+	struct ls_i2c *ccsr_i2c = (void *)nxp_i2c_addr;
+
+	g_nxp_i2c_addr = nxp_i2c_addr;
+	/* Presume workaround for erratum a009203 applied */
+	i2c_out(&ccsr_i2c->cr, I2C_CR_DIS);
+	i2c_out(&ccsr_i2c->fd, I2C_FD_CONSERV);
+	i2c_out(&ccsr_i2c->sr, I2C_SR_RST);
+	i2c_out(&ccsr_i2c->cr, I2C_CR_EN);
+}
+
+static int wait_for_state(struct ls_i2c *ccsr_i2c,
+			  unsigned char state, unsigned char mask)
+{
+	unsigned char sr;
+	uint64_t start_time = get_timer_val(0);
+	uint64_t timer;
+
+	do {
+		sr = i2c_in(&ccsr_i2c->sr);
+		if (sr & I2C_SR_AL) {
+			i2c_out(&ccsr_i2c->sr, sr);
+			WARN("I2C arbitration lost\n");
+			return -EIO;
+		}
+		if ((sr & mask) == state) {
+			return (int)sr;
+		}
+
+		timer = get_timer_val(start_time);
+		if (timer > I2C_TIMEOUT)
+			break;
+		mdelay(1);
+	} while (1);
+	WARN("I2C: Timeout waiting for state 0x%x, sr = 0x%x\n", state, sr);
+
+	return -ETIMEDOUT;
+}
+
+static int tx_byte(struct ls_i2c *ccsr_i2c, unsigned char c)
+{
+	int ret;
+
+	i2c_out(&ccsr_i2c->sr, I2C_SR_IF);
+	i2c_out(&ccsr_i2c->dr, c);
+	ret = wait_for_state(ccsr_i2c, I2C_SR_IF, I2C_SR_IF);
+	if (ret < 0) {
+		WARN("%s: state error\n", __func__);
+		return ret;
+	}
+	if (ret & I2C_SR_RX_NAK) {
+		WARN("%s: nodev\n", __func__);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int gen_stop(struct ls_i2c *ccsr_i2c)
+{
+	unsigned char cr;
+	int ret;
+
+	cr = i2c_in(&ccsr_i2c->cr);
+	cr &= ~(I2C_CR_MA | I2C_CR_TX);
+	i2c_out(&ccsr_i2c->cr, cr);
+	ret = wait_for_state(ccsr_i2c, I2C_SR_IDLE, I2C_SR_BB);
+	if (ret < 0) {
+		WARN("I2C: Generating stop failed.\n");
+	}
+	return ret;
+}
+
+static int i2c_write_addr(struct ls_i2c *ccsr_i2c, unsigned char chip,
+			  int addr, int alen)
+{
+	int ret;
+	unsigned char cr;
+
+	if (alen != 1) {
+		WARN("I2C: Unsupported address len [%d]\n", alen);
+		return -EIO;
+	}
+
+	if (i2c_in(&ccsr_i2c->ad) == (chip << 1)) {
+		WARN("I2C: slave address same as self\n");
+		return -ENODEV;
+	}
+	i2c_out(&ccsr_i2c->sr, I2C_SR_IF);
+	ret = wait_for_state(ccsr_i2c, I2C_SR_IDLE, I2C_SR_BB);
+	if (ret < 0) {
+		return ret;
+	}
+
+	cr = i2c_in(&ccsr_i2c->cr);
+	cr |= I2C_CR_MA;
+	i2c_out(&ccsr_i2c->cr, cr);
+	ret = wait_for_state(ccsr_i2c, I2C_SR_BB, I2C_SR_BB);
+	if (ret < 0) {
+		return ret;
+	}
+
+	VERBOSE("Before writing chip %d\n", chip);
+	cr |= I2C_CR_TX | I2C_CR_TX_NAK;
+	i2c_out(&ccsr_i2c->cr, cr);
+	ret = tx_byte(ccsr_i2c, chip << 1);
+	if (ret < 0) {
+		gen_stop(ccsr_i2c);
+		return ret;
+	}
+
+	VERBOSE("Before writing addr\n");
+	while (alen--) {
+		ret = tx_byte(ccsr_i2c, (addr >> (alen << 3)) & 0xff);
+		if (ret < 0) {
+			gen_stop(ccsr_i2c);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int read_data(struct ls_i2c *ccsr_i2c, unsigned char chip,
+		     unsigned char *buf, int len)
+{
+	int i;
+	int ret;
+	unsigned char cr;
+
+	cr = i2c_in(&ccsr_i2c->cr);
+	cr &= ~(I2C_CR_TX | I2C_CR_TX_NAK);
+	if (len == 1) {
+		cr |= I2C_CR_TX_NAK;
+	}
+	i2c_out(&ccsr_i2c->cr, cr);
+	i2c_out(&ccsr_i2c->sr, I2C_SR_IF);
+	i2c_in(&ccsr_i2c->dr);	/* dummy read */
+	for (i = 0; i < len; i++) {
+		ret = wait_for_state(ccsr_i2c, I2C_SR_IF, I2C_SR_IF);
+		if (ret < 0) {
+			gen_stop(ccsr_i2c);
+			return ret;
+		}
+		if (i == (len - 1)) {
+			gen_stop(ccsr_i2c);
+		} else if (i == (len - 2)) {
+			/* Updating the command to send
+			 * No ACK.
+			 */
+			cr = i2c_in(&ccsr_i2c->cr);
+			cr |= I2C_CR_TX_NAK;
+			i2c_out(&ccsr_i2c->cr, cr);
+		}
+		i2c_out(&ccsr_i2c->sr, I2C_SR_IF);
+		buf[i] = i2c_in(&ccsr_i2c->dr);
+	}
+
+	return 0;
+}
+
+static int write_data(struct ls_i2c *ccsr_i2c, unsigned char chip,
+		      const unsigned char *buf, int len)
+{
+	int i;
+	int ret;
+
+	for (i = 0; i < len; i++) {
+		ret = tx_byte(ccsr_i2c, buf[i]);
+		if (ret < 0) {
+			break;
+		}
+	}
+	ret = gen_stop(ccsr_i2c);
+
+	return ret;
+}
+
+
+int i2c_read(unsigned char chip, int addr, int alen,
+	     unsigned char *buf, int len)
+{
+	int ret;
+	unsigned char cr;
+	struct ls_i2c *ccsr_i2c = (void *)g_nxp_i2c_addr;
+
+	ret = i2c_write_addr(ccsr_i2c, chip, addr, alen);
+	if (ret < 0) {
+		gen_stop(ccsr_i2c);
+		return ret;
+	}
+
+	cr = i2c_in(&ccsr_i2c->cr);
+	cr |= I2C_CR_RSTA;
+	i2c_out(&ccsr_i2c->cr, cr);
+
+	ret = tx_byte(ccsr_i2c, (chip << 1) | 1);
+	if (ret < 0) {
+		gen_stop(ccsr_i2c);
+		return ret;
+	}
+
+	return read_data(ccsr_i2c, chip, buf, len);
+}
+
+int i2c_write(unsigned char chip, int addr, int alen,
+	      const unsigned char *buf, int len)
+{
+	int ret;
+	struct ls_i2c *ccsr_i2c = (void *)g_nxp_i2c_addr;
+
+	ret = i2c_write_addr(ccsr_i2c, chip, addr, alen);
+	if (ret < 0) {
+		return ret;
+	}
+
+	return write_data(ccsr_i2c, chip, buf, len);
+}
+
+int i2c_probe_chip(unsigned char chip)
+{
+	int ret;
+	struct ls_i2c *ccsr_i2c = (void *)g_nxp_i2c_addr;
+
+	ret = i2c_write_addr(ccsr_i2c, chip, 0, 0);
+	if (ret < 0) {
+		WARN("write addr failed\n");
+		return ret;
+	}
+
+	ret = gen_stop(ccsr_i2c);
+	if (ret < 0) {
+		WARN("I2C: Probe not complete.\n");
+	}
+
+	return ret;
+}
diff --git a/drivers/nxp/i2c/i2c.h b/drivers/nxp/i2c/i2c.h
new file mode 100644
index 0000000..925bbc0
--- /dev/null
+++ b/drivers/nxp/i2c/i2c.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2016-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+
+#ifndef I2C_H
+#define I2C_H
+
+#include <lib/mmio.h>
+
+#define I2C_TIMEOUT	1000	/* ms */
+
+#define I2C_FD_CONSERV	0x7e
+#define I2C_CR_DIS	(1 << 7)
+#define I2C_CR_EN	(0 << 7)
+#define I2C_CR_MA	(1 << 5)
+#define I2C_CR_TX	(1 << 4)
+#define I2C_CR_TX_NAK	(1 << 3)
+#define I2C_CR_RSTA	(1 << 2)
+#define I2C_SR_BB	(1 << 5)
+#define I2C_SR_IDLE	(0 << 5)
+#define I2C_SR_AL	(1 << 4)
+#define I2C_SR_IF	(1 << 1)
+#define I2C_SR_RX_NAK	(1 << 0)
+#define I2C_SR_RST	(I2C_SR_AL | I2C_SR_IF)
+
+#define I2C_GLITCH_EN	0x8
+
+#define i2c_in(a)	mmio_read_8((uintptr_t)(a))
+#define i2c_out(a, v)	mmio_write_8((uintptr_t)(a), (v))
+
+struct ls_i2c {
+	unsigned char ad;	/* I2c Bus Address Register */
+	unsigned char fd;	/* I2c Bus Frequency Dividor Register */
+	unsigned char cr;	/* I2c Bus Control Register */
+	unsigned char sr;	/* I2c Bus Status Register */
+	unsigned char dr;	/* I2C Bus Data I/O Register */
+	unsigned char ic;	/* I2C Bus Interrupt Config Register */
+	unsigned char dbg;	/* I2C Bus Debug Register */
+};
+
+void i2c_init(uintptr_t nxp_i2c_addr);
+int i2c_read(unsigned char chip, int addr, int alen,
+	     unsigned char *buf, int len);
+int i2c_write(unsigned char chip, int addr, int alen,
+	      const unsigned char *buf, int len);
+int i2c_probe_chip(unsigned char chip);
+
+#endif /* I2C_H */
diff --git a/drivers/nxp/i2c/i2c.mk b/drivers/nxp/i2c/i2c.mk
new file mode 100644
index 0000000..ae89115
--- /dev/null
+++ b/drivers/nxp/i2c/i2c.mk
@@ -0,0 +1,25 @@
+#
+# Copyright 2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${ADD_I2C},)
+
+ADD_I2C			:= 1
+I2C_DRIVERS_PATH        := ${PLAT_DRIVERS_PATH}/i2c
+
+I2C_SOURCES		+= $(I2C_DRIVERS_PATH)/i2c.c
+PLAT_INCLUDES		+= -I$(I2C_DRIVERS_PATH)
+
+ifeq (${BL_COMM_I2C_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${I2C_SOURCES}
+else
+ifeq (${BL2_I2C_NEEDED},yes)
+BL2_SOURCES		+= ${I2C_SOURCES}
+endif
+ifeq (${BL31_I2C_NEEDED},yes)
+BL31_SOURCES		+= ${I2C_SOURCES}
+endif
+endif
+endif
diff --git a/drivers/nxp/interconnect/interconnect.mk b/drivers/nxp/interconnect/interconnect.mk
new file mode 100644
index 0000000..81e3fa9
--- /dev/null
+++ b/drivers/nxp/interconnect/interconnect.mk
@@ -0,0 +1,44 @@
+# Copyright 2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+#------------------------------------------------------------------------------
+#
+# Select the Interconnect files
+#
+# -----------------------------------------------------------------------------
+
+ifeq (${ADD_INTERCONNECT},)
+
+ADD_INTERCONNECT	:= 1
+PLAT_INCLUDES		+= -I${PLAT_DRIVERS_PATH}/interconnect
+
+ifeq (, $(filter $(INTERCONNECT), CCI400 CCN502 CCN504 CCN508))
+    $(error -> Interconnect type not set!)
+else
+$(eval $(call add_define_val,INTERCONNECT,${INTERCONNECT}))
+ifeq ($(INTERCONNECT), $(filter $(INTERCONNECT), CCN502 CCN504 CCN508))
+INTERCONNECT_SOURCES	:= 	drivers/arm/ccn/ccn.c 		\
+				${PLAT_DRIVERS_PATH}/interconnect/ls_ccn.c
+else
+ifeq ($(INTERCONNECT), CCI400)
+INTERCONNECT_SOURCES	:= 	drivers/arm/cci/cci.c 		\
+				${PLAT_DRIVERS_PATH}/interconnect/ls_cci.c
+endif
+endif
+endif
+
+ifeq (${BL_COMM_INTERCONNECT_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${INTERCONNECT_SOURCES}
+else
+ifeq (${BL2_INTERCONNECT_NEEDED},yes)
+BL2_SOURCES		+= ${INTERCONNECT_SOURCES}
+endif
+ifeq (${BL31_INTERCONNECT_NEEDED},yes)
+BL31_SOURCES		+= ${INTERCONNECT_SOURCES}
+endif
+endif
+endif
+
+# -----------------------------------------------------------------------------
diff --git a/drivers/nxp/interconnect/ls_cci.c b/drivers/nxp/interconnect/ls_cci.c
new file mode 100644
index 0000000..72a898a
--- /dev/null
+++ b/drivers/nxp/interconnect/ls_cci.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <arch.h>
+#include <cci.h>
+
+#include <plat_arm.h>
+
+/******************************************************************************
+ * The following functions are defined as weak to allow a platform to override
+ * the way ARM CCI driver is initialised and used.
+ *****************************************************************************/
+#pragma weak plat_arm_interconnect_enter_coherency
+#pragma weak plat_arm_interconnect_exit_coherency
+
+/******************************************************************************
+ * Helper function to place current master into coherency
+ *****************************************************************************/
+void plat_ls_interconnect_enter_coherency(unsigned int num_clusters)
+{
+	cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+
+	for (uint32_t index = 1U; index < num_clusters; index++) {
+		cci_enable_snoop_dvm_reqs(index);
+	}
+}
+
+/******************************************************************************
+ * Helper function to remove current master from coherency
+ *****************************************************************************/
+void plat_ls_interconnect_exit_coherency(void)
+{
+	cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
diff --git a/drivers/nxp/interconnect/ls_ccn.c b/drivers/nxp/interconnect/ls_ccn.c
new file mode 100644
index 0000000..8f90325
--- /dev/null
+++ b/drivers/nxp/interconnect/ls_ccn.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <arch.h>
+#include <ccn.h>
+
+#include <plat_arm.h>
+
+/******************************************************************************
+ * Helper function to place current master into coherency
+ *****************************************************************************/
+void plat_ls_interconnect_enter_coherency(unsigned int num_clusters)
+{
+	ccn_enter_snoop_dvm_domain(1ULL << MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+
+	for (uint32_t index = 1U; index < num_clusters; index++) {
+		ccn_enter_snoop_dvm_domain(1ULL << index);
+	}
+}
+
+/******************************************************************************
+ * Helper function to remove current master from coherency
+ *****************************************************************************/
+void plat_ls_interconnect_exit_coherency(void)
+{
+	ccn_exit_snoop_dvm_domain(1ULL << MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
diff --git a/drivers/nxp/interconnect/ls_interconnect.h b/drivers/nxp/interconnect/ls_interconnect.h
new file mode 100644
index 0000000..26787fb
--- /dev/null
+++ b/drivers/nxp/interconnect/ls_interconnect.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef LS_INTERCONNECT_H
+#define LS_INTERCONNECT_H
+
+#if (INTERCONNECT == CCI400)
+#define CCI_TERMINATE_BARRIER_TX	0x8
+#endif
+
+/* Interconnect CCI/CCN functions */
+void plat_ls_interconnect_enter_coherency(unsigned int num_clusters);
+void plat_ls_interconnect_exit_coherency(void);
+
+#endif
diff --git a/drivers/nxp/pmu/pmu.c b/drivers/nxp/pmu/pmu.c
new file mode 100644
index 0000000..2a907c8
--- /dev/null
+++ b/drivers/nxp/pmu/pmu.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <dcfg.h>
+#include <lib/mmio.h>
+#include <pmu.h>
+
+void enable_timer_base_to_cluster(uintptr_t nxp_pmu_addr)
+{
+	uint32_t *cltbenr = NULL;
+	uint32_t cltbenr_val = 0U;
+
+	cltbenr = (uint32_t *)(nxp_pmu_addr
+				+ CLUST_TIMER_BASE_ENBL_OFFSET);
+
+	cltbenr_val = mmio_read_32((uintptr_t)cltbenr);
+
+	cltbenr_val = cltbenr_val
+			| (1 << MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+
+	mmio_write_32((uintptr_t)cltbenr, cltbenr_val);
+
+	VERBOSE("Enable cluster time base\n");
+}
+
+/*
+ * Enable core timebase.  In certain Layerscape SoCs, the clock for each core's
+ * has an enable bit in the PMU Physical Core Time Base Enable
+ * Register (PCTBENR), which allows the watchdog to operate.
+ */
+
+void enable_core_tb(uintptr_t nxp_pmu_addr)
+{
+	uint32_t *pctbenr = (uint32_t *) (nxp_pmu_addr +
+					  CORE_TIMEBASE_ENBL_OFFSET);
+
+	mmio_write_32((uintptr_t)pctbenr, 0xff);
+}
diff --git a/drivers/nxp/pmu/pmu.h b/drivers/nxp/pmu/pmu.h
new file mode 100644
index 0000000..28199e8
--- /dev/null
+++ b/drivers/nxp/pmu/pmu.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PMU_H
+#define PMU_H
+
+/* PMU Registers' OFFSET */
+#define PMU_PCPW20SR_OFFSET		0x830
+#define PMU_CLL2FLUSHSETR_OFFSET	0x1110
+#define PMU_CLSL2FLUSHCLRR_OFFSET	0x1114
+#define PMU_CLL2FLUSHSR_OFFSET		0x1118
+#define PMU_POWMGTCSR_VAL		(1 << 20)
+
+/* PMU Registers */
+#define CORE_TIMEBASE_ENBL_OFFSET	0x8A0
+#define CLUST_TIMER_BASE_ENBL_OFFSET	0x18A0
+
+#define PMU_IDLE_CLUSTER_MASK		0x2
+#define PMU_FLUSH_CLUSTER_MASK		0x2
+#define PMU_IDLE_CORE_MASK		0xfe
+
+/* pmu register offsets and bitmaps */
+#define PMU_POWMGTDCR0_OFFSET		0xC20
+#define PMU_POWMGTCSR_OFFSET		0x4000
+#define PMU_CLAINACTSETR_OFFSET		0x1100
+#define PMU_CLAINACTCLRR_OFFSET		0x1104
+#define PMU_CLSINACTSETR_OFFSET		0x1108
+#define PMU_CLSINACTCLRR_OFFSET		0x110C
+#define PMU_CLL2FLUSHSETR_OFFSET	0x1110
+#define PMU_CLL2FLUSHCLRR_OFFSET	0x1114
+#define PMU_IPPDEXPCR0_OFFSET		0x4040
+#define PMU_IPPDEXPCR1_OFFSET		0x4044
+#define PMU_IPPDEXPCR2_OFFSET		0x4048
+#define PMU_IPPDEXPCR3_OFFSET		0x404C
+#define PMU_IPPDEXPCR4_OFFSET		0x4050
+#define PMU_IPPDEXPCR5_OFFSET		0x4054
+#define PMU_IPPDEXPCR6_OFFSET		0x4058
+#define PMU_IPSTPCR0_OFFSET		0x4120
+#define PMU_IPSTPCR1_OFFSET		0x4124
+#define PMU_IPSTPCR2_OFFSET		0x4128
+#define PMU_IPSTPCR3_OFFSET		0x412C
+#define PMU_IPSTPCR4_OFFSET		0x4130
+#define PMU_IPSTPCR5_OFFSET		0x4134
+#define PMU_IPSTPCR6_OFFSET		0x4138
+#define PMU_IPSTPACKSR0_OFFSET		0x4140
+#define PMU_IPSTPACKSR1_OFFSET		0x4144
+#define PMU_IPSTPACKSR2_OFFSET		0x4148
+#define PMU_IPSTPACKSR3_OFFSET		0x414C
+#define PMU_IPSTPACKSR4_OFFSET		0x4150
+#define PMU_IPSTPACKSR5_OFFSET		0x4154
+#define PMU_IPSTPACKSR6_OFFSET		0x4158
+
+#define CLAINACT_DISABLE_ACP		0xFF
+#define CLSINACT_DISABLE_SKY		0xFF
+#define POWMGTDCR_STP_OV_EN		0x1
+#define POWMGTCSR_LPM20_REQ		0x00100000
+
+/* Used by PMU */
+#define DEVDISR1_MASK			0x024F3504
+#define DEVDISR2_MASK			0x0003FFFF
+#define DEVDISR3_MASK			0x0000303F
+#define DEVDISR4_MASK			0x0000FFFF
+#define DEVDISR5_MASK			0x00F07603
+#define DEVDISR6_MASK			0x00000001
+
+#ifndef __ASSEMBLER__
+void enable_timer_base_to_cluster(uintptr_t nxp_pmu_addr);
+void enable_core_tb(uintptr_t nxp_pmu_addr);
+#endif /* __ASSEMBLER__ */
+
+#endif
diff --git a/drivers/nxp/pmu/pmu.mk b/drivers/nxp/pmu/pmu.mk
new file mode 100644
index 0000000..56b0422
--- /dev/null
+++ b/drivers/nxp/pmu/pmu.mk
@@ -0,0 +1,28 @@
+#
+# Copyright 2021 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-----------------------------------------------------------------------------
+ifeq (${PMU_ADDED},)
+
+PMU_ADDED		:= 1
+
+PMU_DRIVERS_PATH	:=  ${PLAT_DRIVERS_PATH}/pmu
+
+PLAT_INCLUDES		+= -I$(PMU_DRIVERS_PATH)
+
+PMU_SOURCES		+= $(PMU_DRIVERS_PATH)/pmu.c
+
+ifeq (${BL_COMM_PMU_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${PMU_SOURCES}
+else
+ifeq (${BL2_PMU_NEEDED},yes)
+BL2_SOURCES		+= ${PMU_SOURCES}
+endif
+ifeq (${BL31_PMU_NEEDED},yes)
+BL31_SOURCES		+= ${PMU_SOURCES}
+endif
+endif
+endif
+#------------------------------------------------
diff --git a/drivers/nxp/qspi/qspi.c b/drivers/nxp/qspi/qspi.c
new file mode 100644
index 0000000..97b2a19
--- /dev/null
+++ b/drivers/nxp/qspi/qspi.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+
+#include <common/debug.h>
+#include <lib/mmio.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <qspi.h>
+
+int qspi_io_setup(uintptr_t nxp_qspi_flash_addr,
+		  size_t nxp_qspi_flash_size,
+		  uintptr_t fip_offset)
+{
+	uint32_t qspi_mcr_val = qspi_in32(CHS_QSPI_MCR);
+
+	/* Enable and change endianness of QSPI IP */
+	qspi_out32(CHS_QSPI_MCR, (qspi_mcr_val | CHS_QSPI_64LE));
+
+	/* Adding QSPI Memory Map in XLAT Table */
+	mmap_add_region(nxp_qspi_flash_addr, nxp_qspi_flash_addr,
+			nxp_qspi_flash_size, MT_MEMORY | MT_RW);
+
+	return 0;
+}
diff --git a/drivers/nxp/qspi/qspi.h b/drivers/nxp/qspi/qspi.h
new file mode 100644
index 0000000..db11c3b
--- /dev/null
+++ b/drivers/nxp/qspi/qspi.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef QSPI_H
+#define QSPI_H
+
+#include <endian.h>
+#include <lib/mmio.h>
+
+#define CHS_QSPI_MCR			0x01550000
+#define CHS_QSPI_64LE			0xC
+
+#ifdef NXP_QSPI_BE
+#define qspi_in32(a)           bswap32(mmio_read_32((uintptr_t)(a)))
+#define qspi_out32(a, v)       mmio_write_32((uintptr_t)(a), bswap32(v))
+#elif defined(NXP_QSPI_LE)
+#define qspi_in32(a)           mmio_read_32((uintptr_t)(a))
+#define qspi_out32(a, v)       mmio_write_32((uintptr_t)(a), (v))
+#else
+#error Please define CCSR QSPI register endianness
+#endif
+
+int qspi_io_setup(uintptr_t nxp_qspi_flash_addr,
+		  size_t nxp_qspi_flash_size,
+		  uintptr_t fip_offset);
+#endif /* __QSPI_H__ */
diff --git a/drivers/nxp/qspi/qspi.mk b/drivers/nxp/qspi/qspi.mk
new file mode 100644
index 0000000..3e2c735
--- /dev/null
+++ b/drivers/nxp/qspi/qspi.mk
@@ -0,0 +1,28 @@
+#
+# Copyright 2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${QSPI_ADDED},)
+
+QSPI_ADDED		:= 1
+
+QSPI_DRIVERS_PATH	:=  ${PLAT_DRIVERS_PATH}/qspi
+
+QSPI_SOURCES		:=  $(QSPI_DRIVERS_PATH)/qspi.c
+
+PLAT_INCLUDES		+= -I$(QSPI_DRIVERS_PATH)
+
+ifeq (${BL_COMM_QSPI_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${QSPI_SOURCES}
+else
+ifeq (${BL2_QSPI_NEEDED},yes)
+BL2_SOURCES		+= ${QSPI_SOURCES}
+endif
+ifeq (${BL31_QSPI_NEEDED},yes)
+BL31_SOURCES		+= ${QSPI_SOURCES}
+endif
+endif
+
+endif
diff --git a/drivers/nxp/sd/sd_mmc.c b/drivers/nxp/sd/sd_mmc.c
new file mode 100644
index 0000000..f7f48e7
--- /dev/null
+++ b/drivers/nxp/sd/sd_mmc.c
@@ -0,0 +1,1496 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ *
+ */
+
+#include <endian.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/io/io_block.h>
+#include "nxp_timer.h"
+#include "sd_mmc.h"
+#include <utils.h>
+#include <utils_def.h>
+
+
+/* Private structure for MMC driver data */
+static struct mmc mmc_drv_data;
+
+#ifndef NXP_POLICY_OTA
+/*
+ * For NXP_POLICY_OTA, SD needs to do R/W on OCRAM. OCRAM is secure memory at
+ * default. SD can only do non-secure DMA. Configuring SD to work in PIO mode
+ * instead of DMA mode will make SD R/W on OCRAM available.
+ */
+/* To debug without dma comment this MACRO */
+#define NXP_SD_DMA_CAPABILITY
+#endif
+#define SD_TIMEOUT        1000 /* ms */
+#define SD_TIMEOUT_HIGH   20000 /* ms */
+#define SD_BLOCK_TIMEOUT  8 /* ms */
+
+#define ERROR_ESDHC_CARD_DETECT_FAIL	-1
+#define ERROR_ESDHC_UNUSABLE_CARD	-2
+#define ERROR_ESDHC_COMMUNICATION_ERROR	-3
+#define ERROR_ESDHC_BLOCK_LENGTH	-4
+#define ERROR_ESDHC_DMA_ERROR		-5
+#define ERROR_ESDHC_BUSY		-6
+
+/***************************************************************
+ * Function    :    set_speed
+ * Arguments   :    mmc - Pointer to mmc struct
+ *                  clock - Clock Value to be set
+ * Return      :    void
+ * Description :    Calculates the value of SDCLKFS and DVS to be set
+ *                  for getting the required clock assuming the base_clk
+ *                  as a fixed value (MAX_PLATFORM_CLOCK)
+ *****************************************************************/
+static void set_speed(struct mmc *mmc, uint32_t clock)
+{
+	/* sdhc_clk = (base clock) / [(SDCLKFS × 2) × (DVS +1)] */
+
+	uint32_t dvs = 1U;
+	uint32_t sdclkfs = 2U;
+	/* TBD - Change this to actual platform clock by reading via RCW */
+	uint32_t base_clk = MAX_PLATFORM_CLOCK;
+
+	if (base_clk / 16 > clock) {
+		for (sdclkfs = 2U; sdclkfs < 256U; sdclkfs *= 2U) {
+			if ((base_clk / sdclkfs) <= (clock * 16)) {
+				break;
+			}
+		}
+	}
+
+	for (dvs = 1U; dvs <= 16U; dvs++) {
+		if ((base_clk / (dvs * sdclkfs)) <= clock) {
+			break;
+		}
+	}
+
+	sdclkfs >>= 1U;
+	dvs -= 1U;
+
+	esdhc_out32(&mmc->esdhc_regs->sysctl,
+			(ESDHC_SYSCTL_DTOCV(TIMEOUT_COUNTER_SDCLK_2_27) |
+			 ESDHC_SYSCTL_SDCLKFS(sdclkfs) | ESDHC_SYSCTL_DVS(dvs) |
+			 ESDHC_SYSCTL_SDCLKEN));
+}
+
+/***************************************************************************
+ * Function    :    esdhc_init
+ * Arguments   :    mmc - Pointer to mmc struct
+ *                  card_detect - flag to indicate if card insert needs
+ *                  to be detected or not. For SDHC2 controller, Card detect
+ *                  is not present, so this field will be false
+ * Return      :    SUCCESS or Error Code
+ * Description :    1. Set Initial Clock Speed
+ *                  2. Card Detect if not eMMC
+ *                  3. Enable Controller Clock
+ *                  4. Send 80 ticks for card to power up
+ *                  5. Set LE mode and Bus Width as 1 bit.
+ ***************************************************************************/
+static int esdhc_init(struct mmc *mmc, bool card_detect)
+{
+	uint32_t val;
+	uint64_t start_time;
+
+	/* Reset the entire host controller */
+	val = esdhc_in32(&mmc->esdhc_regs->sysctl) | ESDHC_SYSCTL_RSTA;
+	esdhc_out32(&mmc->esdhc_regs->sysctl, val);
+
+	/* Wait until the controller is available */
+	start_time = get_timer_val(0);
+	while (get_timer_val(start_time) < SD_TIMEOUT_HIGH) {
+		val = esdhc_in32(&mmc->esdhc_regs->sysctl) & ESDHC_SYSCTL_RSTA;
+		if (val == 0U) {
+			break;
+		}
+	}
+
+	val = esdhc_in32(&mmc->esdhc_regs->sysctl) &
+		(ESDHC_SYSCTL_RSTA);
+	if (val != 0U) {
+		ERROR("SD Reset failed\n");
+		return ERROR_ESDHC_BUSY;
+	}
+
+	/* Set initial clock speed */
+	set_speed(mmc, CARD_IDENTIFICATION_FREQ);
+
+	if (card_detect) {
+		/* Check CINS in prsstat register */
+		val = esdhc_in32(&mmc->esdhc_regs->prsstat) &
+			ESDHC_PRSSTAT_CINS;
+		if (val == 0) {
+			ERROR("CINS not set in prsstat\n");
+			return ERROR_ESDHC_CARD_DETECT_FAIL;
+		}
+	}
+
+	/* Enable controller clock */
+	val = esdhc_in32(&mmc->esdhc_regs->sysctl) | ESDHC_SYSCTL_SDCLKEN;
+	esdhc_out32(&mmc->esdhc_regs->sysctl, val);
+
+	/* Send 80 clock ticks for the card to power up */
+	val = esdhc_in32(&mmc->esdhc_regs->sysctl) | ESDHC_SYSCTL_INITA;
+	esdhc_out32(&mmc->esdhc_regs->sysctl, val);
+
+	start_time = get_timer_val(0);
+	while (get_timer_val(start_time) < SD_TIMEOUT) {
+		val = esdhc_in32(&mmc->esdhc_regs->sysctl) & ESDHC_SYSCTL_INITA;
+		if (val != 0U) {
+			break;
+		}
+	}
+
+	val = esdhc_in32(&mmc->esdhc_regs->sysctl) & ESDHC_SYSCTL_INITA;
+	if (val == 0U) {
+		ERROR("Failed to power up the card\n");
+		return ERROR_ESDHC_CARD_DETECT_FAIL;
+	}
+
+	INFO("Card detected successfully\n");
+
+	val = esdhc_in32(&mmc->esdhc_regs->proctl);
+	val = val | (ESDHC_PROCTL_EMODE_LE | ESDHC_PROCTL_DTW_1BIT);
+
+	/* Set little endian mode, set bus width as 1-bit */
+	esdhc_out32(&mmc->esdhc_regs->proctl, val);
+
+	/* Enable cache snooping for DMA transactions */
+	val = esdhc_in32(&mmc->esdhc_regs->ctl) | ESDHC_DCR_SNOOP;
+	esdhc_out32(&mmc->esdhc_regs->ctl, val);
+
+	return 0;
+}
+
+/***************************************************************************
+ * Function    :    esdhc_send_cmd
+ * Arguments   :    mmc - Pointer to mmc struct
+ *                  cmd - Command Number
+ *                  args - Command Args
+ * Return      :    SUCCESS is 0, or Error Code ( < 0)
+ * Description :    Updates the eSDHC registers cmdargs and xfertype
+ ***************************************************************************/
+static int esdhc_send_cmd(struct mmc *mmc, uint32_t cmd, uint32_t args)
+{
+	uint32_t val;
+	uint64_t start_time;
+	uint32_t xfertyp = 0;
+
+	esdhc_out32(&mmc->esdhc_regs->irqstat, ESDHC_IRQSTAT_CLEAR_ALL);
+
+	/* Wait for the command line & data line to be free */
+	/* (poll the CIHB,CDIHB bit of the present state register) */
+	start_time = get_timer_val(0);
+	while (get_timer_val(start_time) < SD_TIMEOUT_HIGH) {
+		val = esdhc_in32(&mmc->esdhc_regs->prsstat) &
+			(ESDHC_PRSSTAT_CIHB | ESDHC_PRSSTAT_CDIHB);
+		if (val == 0U) {
+			break;
+		}
+	}
+
+	val = esdhc_in32(&mmc->esdhc_regs->prsstat) &
+		(ESDHC_PRSSTAT_CIHB | ESDHC_PRSSTAT_CDIHB);
+	if (val != 0U) {
+		ERROR("SD send cmd: Command Line or Data Line Busy cmd = %x\n",
+				cmd);
+		return ERROR_ESDHC_BUSY;
+	}
+
+	if (cmd == CMD2 || cmd == CMD9) {
+		xfertyp |= ESDHC_XFERTYP_RSPTYP_136;
+	} else  if (cmd == CMD7 || (cmd == CMD6 && mmc->card.type == MMC_CARD)) {
+		xfertyp |= ESDHC_XFERTYP_RSPTYP_48_BUSY;
+	} else if (cmd != CMD0) {
+		xfertyp |= ESDHC_XFERTYP_RSPTYP_48;
+	}
+
+	if (cmd == CMD2 || cmd == CMD9) {
+		xfertyp |= ESDHC_XFERTYP_CCCEN; /* Command index check enable */
+	} else if ((cmd != CMD0) && (cmd != ACMD41) && (cmd != CMD1)) {
+		xfertyp = xfertyp | ESDHC_XFERTYP_CCCEN | ESDHC_XFERTYP_CICEN;
+	}
+
+	if ((cmd == CMD8 || cmd == CMD14 || cmd == CMD19) &&
+			mmc->card.type == MMC_CARD) {
+		xfertyp |=  ESDHC_XFERTYP_DPSEL;
+		if (cmd != CMD19) {
+			xfertyp |= ESDHC_XFERTYP_DTDSEL;
+		}
+	}
+
+	if (cmd == CMD6 || cmd == CMD17 || cmd == CMD18 || cmd == CMD24 ||
+	    cmd == ACMD51) {
+		if (!(mmc->card.type == MMC_CARD && cmd == CMD6)) {
+			if (cmd == CMD24) {
+				xfertyp |= ESDHC_XFERTYP_DPSEL;
+			} else {
+				xfertyp |= (ESDHC_XFERTYP_DPSEL |
+					    ESDHC_XFERTYP_DTDSEL);
+			}
+		}
+
+		if (cmd == CMD18) {
+			xfertyp |= ESDHC_XFERTYP_BCEN;
+			if (mmc->dma_support != 0) {
+				/* Set BCEN of XFERTYP */
+				xfertyp |= ESDHC_XFERTYP_DMAEN;
+			}
+		}
+
+		if ((cmd == CMD17 || cmd == CMD24) && (mmc->dma_support != 0)) {
+			xfertyp |= ESDHC_XFERTYP_DMAEN;
+		}
+	}
+
+	xfertyp |= ((cmd & 0x3F) << 24);
+	esdhc_out32(&mmc->esdhc_regs->cmdarg, args);
+	esdhc_out32(&mmc->esdhc_regs->xfertyp, xfertyp);
+
+#ifdef NXP_SD_DEBUG
+	INFO("cmd = %d\n", cmd);
+	INFO("args = %x\n", args);
+	INFO("xfertyp: = %x\n", xfertyp);
+#endif
+	return 0;
+}
+
+/***************************************************************************
+ * Function    :    esdhc_wait_response
+ * Arguments   :    mmc - Pointer to mmc struct
+ *                  response - Value updated
+ * Return      :    SUCCESS - Response Received
+ *                  COMMUNICATION_ERROR - Command not Complete
+ *                  COMMAND_ERROR - CIE, CCE or CEBE  error
+ *                  RESP_TIMEOUT - CTOE error
+ * Description :    Checks for successful command completion.
+ *                  Clears the CC bit at the end.
+ ***************************************************************************/
+static int esdhc_wait_response(struct mmc *mmc, uint32_t *response)
+{
+	uint32_t val;
+	uint64_t start_time;
+	uint32_t status = 0U;
+
+	/* Wait for the command to complete */
+	start_time = get_timer_val(0);
+	while (get_timer_val(start_time) < SD_TIMEOUT_HIGH) {
+		val = esdhc_in32(&mmc->esdhc_regs->irqstat) & ESDHC_IRQSTAT_CC;
+		if (val != 0U) {
+			break;
+		}
+	}
+
+	val = esdhc_in32(&mmc->esdhc_regs->irqstat) & ESDHC_IRQSTAT_CC;
+	if (val == 0U) {
+		ERROR("%s:IRQSTAT Cmd not complete(CC not set)\n", __func__);
+		return ERROR_ESDHC_COMMUNICATION_ERROR;
+	}
+
+	status = esdhc_in32(&mmc->esdhc_regs->irqstat);
+
+	/* Check whether the interrupt is a CRC, CTOE or CIE error */
+	if ((status & (ESDHC_IRQSTAT_CIE | ESDHC_IRQSTAT_CEBE |
+				ESDHC_IRQSTAT_CCE)) != 0) {
+		ERROR("%s: IRQSTAT CRC, CEBE or CIE error = %x\n",
+							__func__, status);
+		return COMMAND_ERROR;
+	}
+
+	if ((status & ESDHC_IRQSTAT_CTOE) != 0) {
+		INFO("%s: IRQSTAT CTOE set = %x\n", __func__, status);
+		return RESP_TIMEOUT;
+	}
+
+	if ((status & ESDHC_IRQSTAT_DMAE) != 0) {
+		ERROR("%s: IRQSTAT DMAE set = %x\n", __func__, status);
+		return ERROR_ESDHC_DMA_ERROR;
+	}
+
+	if (response != NULL) {
+		/* Get response values from eSDHC CMDRSPx registers. */
+		response[0] = esdhc_in32(&mmc->esdhc_regs->cmdrsp[0]);
+		response[1] = esdhc_in32(&mmc->esdhc_regs->cmdrsp[1]);
+		response[2] = esdhc_in32(&mmc->esdhc_regs->cmdrsp[2]);
+		response[3] = esdhc_in32(&mmc->esdhc_regs->cmdrsp[3]);
+#ifdef NXP_SD_DEBUG
+		INFO("Resp R1 R2 R3 R4\n");
+		INFO("Resp R1 = %x\n", response[0]);
+		INFO("R2 = %x\n", response[1]);
+		INFO("R3 = %x\n", response[2]);
+		INFO("R4 = %x\n", response[3]);
+		INFO("\n");
+#endif
+	}
+
+	/* Clear the CC bit - w1c */
+	val = esdhc_in32(&mmc->esdhc_regs->irqstat) | ESDHC_IRQSTAT_CC;
+	esdhc_out32(&mmc->esdhc_regs->irqstat, val);
+
+	return 0;
+}
+
+/***************************************************************************
+ * Function    :    mmc_switch_to_high_frquency
+ * Arguments   :    mmc - Pointer to mmc struct
+ * Return      :    SUCCESS or Error Code
+ * Description :    mmc card bellow ver 4.0 does not support high speed
+ *                  freq = 20 MHz
+ *                  Send CMD6 (CMD_SWITCH_FUNC) With args 0x03B90100
+ *                  Send CMD13 (CMD_SEND_STATUS)
+ *                  if SWITCH Error, freq = 26 MHz
+ *                  if no error, freq = 52 MHz
+ ***************************************************************************/
+static int mmc_switch_to_high_frquency(struct mmc *mmc)
+{
+	int error;
+	uint32_t response[4];
+	uint64_t start_time;
+
+	mmc->card.bus_freq = MMC_SS_20MHZ;
+	/* mmc card bellow ver 4.0 does not support high speed */
+	if (mmc->card.version < MMC_CARD_VERSION_4_X) {
+		return 0;
+	}
+
+	/* send switch cmd to change the card to High speed */
+	error = esdhc_send_cmd(mmc, CMD_SWITCH_FUNC, SET_EXT_CSD_HS_TIMING);
+	if (error != 0) {
+		return error;
+	}
+	error = esdhc_wait_response(mmc, response);
+	if (error != 0) {
+		return error;
+	}
+
+	start_time = get_timer_val(0);
+	do {
+		/* check the status for which error */
+		error = esdhc_send_cmd(mmc,
+				CMD_SEND_STATUS, mmc->card.rca << 16);
+		if (error != 0) {
+			return error;
+		}
+
+		error = esdhc_wait_response(mmc, response);
+		if (error != 0) {
+			return error;
+		}
+	} while (((response[0] & SWITCH_ERROR) != 0) &&
+			(get_timer_val(start_time) < SD_TIMEOUT));
+
+	/* Check for the present state of card */
+	if ((response[0] & SWITCH_ERROR) != 0) {
+		mmc->card.bus_freq = MMC_HS_26MHZ;
+	} else {
+		mmc->card.bus_freq = MMC_HS_52MHZ;
+	}
+
+	return 0;
+}
+
+/***************************************************************************
+ * Function    :    esdhc_set_data_attributes
+ * Arguments   :    mmc - Pointer to mmc struct
+ *                  blkcnt
+ *                  blklen
+ * Return      :    SUCCESS or Error Code
+ * Description :    Set block attributes and watermark level register
+ ***************************************************************************/
+static int esdhc_set_data_attributes(struct mmc *mmc, uint32_t *dest_ptr,
+		uint32_t blkcnt, uint32_t blklen)
+{
+	uint32_t val;
+	uint64_t start_time;
+	uint32_t wml;
+	uint32_t wl;
+	uint32_t dst = (uint32_t)((uint64_t)(dest_ptr));
+
+	/* set blkattr when no transactions are executing */
+	start_time = get_timer_val(0);
+	while (get_timer_val(start_time) < SD_TIMEOUT_HIGH) {
+		val = esdhc_in32(&mmc->esdhc_regs->prsstat) & ESDHC_PRSSTAT_DLA;
+		if (val == 0U) {
+			break;
+		}
+	}
+
+	val = esdhc_in32(&mmc->esdhc_regs->prsstat) & ESDHC_PRSSTAT_DLA;
+	if (val != 0U) {
+		ERROR("%s: Data line active.Can't set attribute\n", __func__);
+		return ERROR_ESDHC_COMMUNICATION_ERROR;
+	}
+
+	wml = esdhc_in32(&mmc->esdhc_regs->wml);
+	wml &= ~(ESDHC_WML_WR_BRST_MASK | ESDHC_WML_RD_BRST_MASK |
+			ESDHC_WML_RD_WML_MASK | ESDHC_WML_WR_WML_MASK);
+
+	if ((mmc->dma_support != 0) && (dest_ptr != NULL)) {
+		/* Set burst length to 128 bytes */
+		esdhc_out32(&mmc->esdhc_regs->wml,
+				wml | ESDHC_WML_WR_BRST(BURST_128_BYTES));
+		esdhc_out32(&mmc->esdhc_regs->wml,
+				wml | ESDHC_WML_RD_BRST(BURST_128_BYTES));
+
+		/* Set DMA System Destination Address */
+		esdhc_out32(&mmc->esdhc_regs->dsaddr, dst);
+	} else {
+		wl = (blklen >= BLOCK_LEN_512) ?
+			WML_512_BYTES : ((blklen + 3) / 4);
+		/* Set 'Read Water Mark Level' register */
+		esdhc_out32(&mmc->esdhc_regs->wml, wml | ESDHC_WML_RD_WML(wl));
+	}
+
+	/* Configure block Attributes register */
+	esdhc_out32(&mmc->esdhc_regs->blkattr,
+		ESDHC_BLKATTR_BLKCNT(blkcnt) | ESDHC_BLKATTR_BLKSZE(blklen));
+
+	mmc->block_len = blklen;
+
+	return 0;
+}
+
+/***************************************************************************
+ * Function    :    esdhc_read_data_nodma
+ * Arguments   :    mmc - Pointer to mmc struct
+ *                  dest_ptr - Bufffer where read data is to be copied
+ *                  len - Length of Data to be read
+ * Return      :    SUCCESS or Error Code
+ * Description :    Read data from the sdhc buffer without using DMA
+ *                  and using polling mode
+ ***************************************************************************/
+static int esdhc_read_data_nodma(struct mmc *mmc, void *dest_ptr, uint32_t len)
+{
+	uint32_t i = 0U;
+	uint32_t status;
+	uint32_t num_blocks;
+	uint32_t *dst = (uint32_t *)dest_ptr;
+	uint32_t val;
+	uint64_t start_time;
+
+	num_blocks = len / mmc->block_len;
+
+	while ((num_blocks--) != 0U) {
+
+		start_time = get_timer_val(0);
+		while (get_timer_val(start_time) < SD_TIMEOUT_HIGH) {
+			val = esdhc_in32(&mmc->esdhc_regs->prsstat) &
+				ESDHC_PRSSTAT_BREN;
+			if (val != 0U) {
+				break;
+			}
+		}
+
+		val = esdhc_in32(&mmc->esdhc_regs->prsstat)
+			& ESDHC_PRSSTAT_BREN;
+		if (val == 0U) {
+			return ERROR_ESDHC_COMMUNICATION_ERROR;
+		}
+
+		for (i = 0U, status = esdhc_in32(&mmc->esdhc_regs->irqstat);
+				i < mmc->block_len / 4;    i++, dst++) {
+			/* get data from data port */
+			val = mmio_read_32(
+					(uintptr_t)&mmc->esdhc_regs->datport);
+			esdhc_out32(dst, val);
+			/* Increment destination pointer */
+			status = esdhc_in32(&mmc->esdhc_regs->irqstat);
+		}
+		/* Check whether the interrupt is an DTOE/DCE/DEBE */
+		if ((status & (ESDHC_IRQSTAT_DTOE | ESDHC_IRQSTAT_DCE |
+					ESDHC_IRQSTAT_DEBE)) != 0) {
+			ERROR("SD read error - DTOE, DCE, DEBE bit set = %x\n",
+									status);
+			return ERROR_ESDHC_COMMUNICATION_ERROR;
+		}
+	}
+
+	/* Wait for TC */
+
+	start_time = get_timer_val(0);
+	while (get_timer_val(start_time) < SD_TIMEOUT_HIGH) {
+		val = esdhc_in32(&mmc->esdhc_regs->irqstat) & ESDHC_IRQSTAT_TC;
+		if (val != 0U) {
+			break;
+		}
+	}
+
+	val = esdhc_in32(&mmc->esdhc_regs->irqstat) & ESDHC_IRQSTAT_TC;
+	if (val == 0U) {
+		ERROR("SD read timeout: Transfer bit not set in IRQSTAT\n");
+		return ERROR_ESDHC_COMMUNICATION_ERROR;
+	}
+
+	return 0;
+}
+
+/***************************************************************************
+ * Function    :    esdhc_write_data_nodma
+ * Arguments   :    mmc - Pointer to mmc struct
+ *                  src_ptr - Buffer where data is copied from
+ *                  len - Length of Data to be written
+ * Return      :    SUCCESS or Error Code
+ * Description :    Write data to the sdhc buffer without using DMA
+ *                  and using polling mode
+ ***************************************************************************/
+static int esdhc_write_data_nodma(struct mmc *mmc, void *src_ptr, uint32_t len)
+{
+	uint32_t i = 0U;
+	uint32_t status;
+	uint32_t num_blocks;
+	uint32_t *src = (uint32_t *)src_ptr;
+	uint32_t val;
+	uint64_t start_time;
+
+	num_blocks = len / mmc->block_len;
+
+	while ((num_blocks--) != 0U) {
+		start_time = get_timer_val(0);
+		while (get_timer_val(start_time) < SD_TIMEOUT_HIGH) {
+			val = esdhc_in32(&mmc->esdhc_regs->prsstat) &
+					 ESDHC_PRSSTAT_BWEN;
+			if (val != 0U) {
+				break;
+			}
+		}
+
+		val = esdhc_in32(&mmc->esdhc_regs->prsstat) &
+				 ESDHC_PRSSTAT_BWEN;
+		if (val == 0U) {
+			return ERROR_ESDHC_COMMUNICATION_ERROR;
+		}
+
+		for (i = 0U, status = esdhc_in32(&mmc->esdhc_regs->irqstat);
+		     i < mmc->block_len / 4; i++, src++) {
+			val = esdhc_in32(src);
+			/* put data to data port */
+			mmio_write_32((uintptr_t)&mmc->esdhc_regs->datport,
+				      val);
+			/* Increment source pointer */
+			status = esdhc_in32(&mmc->esdhc_regs->irqstat);
+		}
+		/* Check whether the interrupt is an DTOE/DCE/DEBE */
+		if ((status & (ESDHC_IRQSTAT_DTOE | ESDHC_IRQSTAT_DCE |
+					ESDHC_IRQSTAT_DEBE)) != 0) {
+			ERROR("SD write error - DTOE, DCE, DEBE bit set = %x\n",
+			      status);
+			return ERROR_ESDHC_COMMUNICATION_ERROR;
+		}
+	}
+
+	/* Wait for TC */
+	start_time = get_timer_val(0);
+	while (get_timer_val(start_time) < SD_TIMEOUT_HIGH) {
+		val = esdhc_in32(&mmc->esdhc_regs->irqstat) & ESDHC_IRQSTAT_TC;
+		if (val != 0U) {
+			break;
+		}
+	}
+
+	val = esdhc_in32(&mmc->esdhc_regs->irqstat) & ESDHC_IRQSTAT_TC;
+	if (val == 0U) {
+		ERROR("SD write timeout: Transfer bit not set in IRQSTAT\n");
+		return ERROR_ESDHC_COMMUNICATION_ERROR;
+	}
+
+	return 0;
+}
+
+/***************************************************************************
+ * Function    :    esdhc_read_data_dma
+ * Arguments   :    mmc - Pointer to mmc struct
+ *                  len - Length of Data to be read
+ * Return      :    SUCCESS or Error Code
+ * Description :    Read data from the sd card using DMA.
+ ***************************************************************************/
+static int esdhc_read_data_dma(struct mmc *mmc, uint32_t len)
+{
+	uint32_t status;
+	uint32_t tblk;
+	uint64_t start_time;
+
+	tblk = SD_BLOCK_TIMEOUT * (len / mmc->block_len);
+
+	start_time = get_timer_val(0);
+
+	/* poll till TC is set */
+	do {
+		status = esdhc_in32(&mmc->esdhc_regs->irqstat);
+
+		if ((status & (ESDHC_IRQSTAT_DEBE | ESDHC_IRQSTAT_DCE
+					| ESDHC_IRQSTAT_DTOE)) != 0) {
+			ERROR("SD read error - DTOE, DCE, DEBE bit set = %x\n",
+								 status);
+			return ERROR_ESDHC_COMMUNICATION_ERROR;
+		}
+
+		if ((status & ESDHC_IRQSTAT_DMAE) != 0) {
+			ERROR("SD read error - DMA error = %x\n", status);
+			return ERROR_ESDHC_DMA_ERROR;
+		}
+
+	} while (((status & ESDHC_IRQSTAT_TC) == 0) &&
+		((esdhc_in32(&mmc->esdhc_regs->prsstat) & ESDHC_PRSSTAT_DLA) != 0) &&
+		(get_timer_val(start_time) < SD_TIMEOUT_HIGH + tblk));
+
+	if (get_timer_val(start_time) > SD_TIMEOUT_HIGH + tblk) {
+		ERROR("SD read DMA timeout\n");
+		return ERROR_ESDHC_COMMUNICATION_ERROR;
+	}
+
+	return 0;
+}
+
+/***************************************************************************
+ * Function    :    esdhc_write_data_dma
+ * Arguments   :    mmc - Pointer to mmc struct
+ *                  len - Length of Data to be written
+ * Return      :    SUCCESS or Error Code
+ * Description :    Write data to the sd card using DMA.
+ ***************************************************************************/
+static int esdhc_write_data_dma(struct mmc *mmc, uint32_t len)
+{
+	uint32_t status;
+	uint32_t tblk;
+	uint64_t start_time;
+
+	tblk = SD_BLOCK_TIMEOUT * (len / mmc->block_len);
+
+	start_time = get_timer_val(0);
+
+	/* poll till TC is set */
+	do {
+		status = esdhc_in32(&mmc->esdhc_regs->irqstat);
+
+		if ((status & (ESDHC_IRQSTAT_DEBE | ESDHC_IRQSTAT_DCE
+					| ESDHC_IRQSTAT_DTOE)) != 0) {
+			ERROR("SD write error - DTOE, DCE, DEBE bit set = %x\n",
+			      status);
+			return ERROR_ESDHC_COMMUNICATION_ERROR;
+		}
+
+		if ((status & ESDHC_IRQSTAT_DMAE) != 0) {
+			ERROR("SD write error - DMA error = %x\n", status);
+			return ERROR_ESDHC_DMA_ERROR;
+		}
+	} while (((status & ESDHC_IRQSTAT_TC) == 0) &&
+		((esdhc_in32(&mmc->esdhc_regs->prsstat) & ESDHC_PRSSTAT_DLA) != 0) &&
+		(get_timer_val(start_time) < SD_TIMEOUT_HIGH + tblk));
+
+	if (get_timer_val(start_time) > SD_TIMEOUT_HIGH + tblk) {
+		ERROR("SD write DMA timeout\n");
+		return ERROR_ESDHC_COMMUNICATION_ERROR;
+	}
+
+	return 0;
+}
+
+/***************************************************************************
+ * Function    :    esdhc_read_data
+ * Arguments   :    mmc - Pointer to mmc struct
+ *                  dest_ptr - Bufffer where read data is to be copied
+ *                  len - Length of Data to be read
+ * Return      :    SUCCESS or Error Code
+ * Description :    Calls esdhc_read_data_nodma and clear interrupt status
+ ***************************************************************************/
+int esdhc_read_data(struct mmc *mmc, void *dest_ptr, uint32_t len)
+{
+	int ret;
+
+	if (mmc->dma_support && len > 64) {
+		ret = esdhc_read_data_dma(mmc, len);
+	} else {
+		ret = esdhc_read_data_nodma(mmc, dest_ptr, len);
+	}
+
+	/* clear interrupt status */
+	esdhc_out32(&mmc->esdhc_regs->irqstat, ESDHC_IRQSTAT_CLEAR_ALL);
+
+	return ret;
+}
+
+/***************************************************************************
+ * Function    :    esdhc_write_data
+ * Arguments   :    mmc - Pointer to mmc struct
+ *                  src_ptr - Buffer where data is copied from
+ *                  len - Length of Data to be written
+ * Return      :    SUCCESS or Error Code
+ * Description :    Calls esdhc_write_data_nodma and clear interrupt status
+ ***************************************************************************/
+int esdhc_write_data(struct mmc *mmc, void *src_ptr, uint32_t len)
+{
+	int ret;
+
+	if (mmc->dma_support && len > 64) {
+		ret = esdhc_write_data_dma(mmc, len);
+	} else {
+		ret = esdhc_write_data_nodma(mmc, src_ptr, len);
+	}
+
+	/* clear interrupt status */
+	esdhc_out32(&mmc->esdhc_regs->irqstat, ESDHC_IRQSTAT_CLEAR_ALL);
+
+	return ret;
+}
+
+/***************************************************************************
+ * Function    :    sd_switch_to_high_freq
+ * Arguments   :    mmc - Pointer to mmc struct
+ * Return      :    SUCCESS or Error Code
+ * Description :    1. Send ACMD51 (CMD_SEND_SCR)
+ *                  2. Read the SCR to check if card supports higher freq
+ *                  3. check version from SCR
+ *                  4. If SD 1.0, return (no Switch) freq = 25 MHz.
+ *                  5. Send CMD6 (CMD_SWITCH_FUNC) with args 0x00FFFFF1 to
+ *                     check the status of switch func
+ *                  6. Send CMD6 (CMD_SWITCH_FUNC) With args 0x80FFFFF1 to
+ *                     switch to high frequency = 50 Mhz
+ ***************************************************************************/
+static int sd_switch_to_high_freq(struct mmc *mmc)
+{
+	int err;
+	uint8_t scr[8];
+	uint8_t status[64];
+	uint32_t response[4];
+	uint32_t version;
+	uint32_t count;
+	uint32_t sd_versions[] = {SD_CARD_VERSION_1_0, SD_CARD_VERSION_1_10,
+		SD_CARD_VERSION_2_0};
+
+	mmc->card.bus_freq = SD_SS_25MHZ;
+	/* Send Application command */
+	err = esdhc_send_cmd(mmc, CMD_APP_CMD, mmc->card.rca << 16);
+	if (err != 0) {
+		return err;
+	}
+
+	err = esdhc_wait_response(mmc, response);
+	if (err != 0) {
+		return err;
+	}
+
+	esdhc_set_data_attributes(mmc, NULL, 1, 8);
+	/* Read the SCR to find out if this card supports higher speeds */
+	err = esdhc_send_cmd(mmc, CMD_SEND_SCR,  mmc->card.rca << 16);
+	if (err != 0) {
+		return err;
+	}
+	err = esdhc_wait_response(mmc, response);
+	if (err != 0) {
+		return err;
+	}
+
+	/* read 8 bytes of scr data */
+	err = esdhc_read_data(mmc, scr, 8U);
+	if (err != 0) {
+		return ERROR_ESDHC_COMMUNICATION_ERROR;
+	}
+
+	/* check version from SCR */
+	version = scr[0] & U(0xF);
+	if (version <= 2U) {
+		mmc->card.version = sd_versions[version];
+	} else {
+		mmc->card.version = SD_CARD_VERSION_2_0;
+	}
+
+	/* does not support switch func */
+	if (mmc->card.version == SD_CARD_VERSION_1_0) {
+		return 0;
+	}
+
+	/* read 64 bytes of status */
+	esdhc_set_data_attributes(mmc, NULL, 1U, 64U);
+
+	/* check the status of switch func */
+	for (count = 0U; count < 4U; count++) {
+		err = esdhc_send_cmd(mmc, CMD_SWITCH_FUNC,
+				SD_SWITCH_FUNC_CHECK_MODE);
+		if (err != 0) {
+			return err;
+		}
+		err = esdhc_wait_response(mmc, response);
+		if (err != 0) {
+			return err;
+		}
+		/* read 64 bytes of scr data */
+		err = esdhc_read_data(mmc, status, 64U);
+		if (err != 0) {
+			return ERROR_ESDHC_COMMUNICATION_ERROR;
+		}
+
+		if ((status[29] & SD_SWITCH_FUNC_HIGH_SPEED) == 0) {
+			break;
+		}
+	}
+
+	if ((status[13] & SD_SWITCH_FUNC_HIGH_SPEED) == 0) {
+		return 0;
+	}
+
+	/* SWITCH */
+	esdhc_set_data_attributes(mmc, NULL, 1, 64);
+	err = esdhc_send_cmd(mmc, CMD_SWITCH_FUNC, SD_SWITCH_FUNC_SWITCH_MODE);
+	if (err != 0) {
+		return err;
+	}
+	err = esdhc_wait_response(mmc, response);
+	if (err != 0) {
+		return err;
+	}
+
+	err = esdhc_read_data(mmc, status, 64U);
+	if (err != 0) {
+		return ERROR_ESDHC_COMMUNICATION_ERROR;
+	}
+
+	if ((status[16]) == U(0x01)) {
+		mmc->card.bus_freq = SD_HS_50MHZ;
+	}
+
+	return 0;
+}
+
+/***************************************************************************
+ * Function    :    change_state_to_transfer_state
+ * Arguments   :    mmc - Pointer to mmc struct
+ * Return      :    SUCCESS or Error Code
+ * Description :    1. Send CMD7 (CMD_SELECT_CARD) to toggles the card
+ *                     between stand-by and transfer state
+ *                  2. Send CMD13 (CMD_SEND_STATUS) to check state as
+ *                     Transfer State
+ ***************************************************************************/
+static int change_state_to_transfer_state(struct mmc *mmc)
+{
+	int error = 0;
+	uint32_t response[4];
+	uint64_t start_time;
+
+	/* Command CMD_SELECT_CARD/CMD7 toggles the card between stand-by
+	 * and transfer states
+	 */
+	error = esdhc_send_cmd(mmc, CMD_SELECT_CARD, mmc->card.rca << 16);
+	if (error != 0) {
+		return error;
+	}
+	error = esdhc_wait_response(mmc, response);
+	if (error != 0) {
+		return error;
+	}
+
+	start_time = get_timer_val(0);
+	while (get_timer_val(start_time) < SD_TIMEOUT_HIGH) {
+		/* send CMD13 to check card status */
+		error = esdhc_send_cmd(mmc,
+					CMD_SEND_STATUS, mmc->card.rca << 16);
+		if (error != 0) {
+			return error;
+		}
+		error = esdhc_wait_response(mmc, response);
+		if ((error != 0) || ((response[0] & R1_ERROR) != 0)) {
+			return error;
+		}
+
+		/* Check for the present state of card */
+		if (((response[0] >> 9U) & U(0xF)) == STATE_TRAN) {
+			break;
+		}
+	}
+	if (((response[0] >> 9U) & U(0xF)) == STATE_TRAN) {
+		return 0;
+	} else {
+		return ERROR_ESDHC_COMMUNICATION_ERROR;
+	}
+}
+
+/***************************************************************************
+ * Function    :    get_cid_rca_csd
+ * Arguments   :    mmc - Pointer to mmc struct
+ * Return      :    SUCCESS or Error Code
+ * Description :    1. Send CMD2 (CMD_ALL_SEND_CID)
+ *                  2. get RCA for SD cards, set rca for mmc cards
+ *                     Send CMD3 (CMD_SEND_RELATIVE_ADDR)
+ *                  3. Send CMD9 (CMD_SEND_CSD)
+ *                  4. Get MMC Version from CSD
+ ***************************************************************************/
+static int get_cid_rca_csd(struct mmc *mmc)
+{
+	int err;
+	uint32_t version;
+	uint32_t response[4];
+	uint32_t mmc_version[] = {MMC_CARD_VERSION_1_2, MMC_CARD_VERSION_1_4,
+		MMC_CARD_VERSION_2_X, MMC_CARD_VERSION_3_X,
+		MMC_CARD_VERSION_4_X};
+
+	err = esdhc_send_cmd(mmc, CMD_ALL_SEND_CID, 0);
+	if (err != 0) {
+		return err;
+	}
+	err = esdhc_wait_response(mmc, response);
+	if (err != 0) {
+		return err;
+	}
+
+	/* get RCA for SD cards, set rca for mmc cards */
+	mmc->card.rca = SD_MMC_CARD_RCA;
+
+	/* send RCA cmd */
+	err = esdhc_send_cmd(mmc, CMD_SEND_RELATIVE_ADDR, mmc->card.rca << 16);
+	if (err != 0) {
+		return err;
+	}
+	err = esdhc_wait_response(mmc, response);
+	if (err != 0) {
+		return err;
+	}
+
+	/* for SD, get the the RCA */
+	if (mmc->card.type == SD_CARD) {
+		mmc->card.rca = (response[0] >> 16) & 0xFFFF;
+	}
+
+	/* Get the CSD (card specific data) from card. */
+	err = esdhc_send_cmd(mmc, CMD_SEND_CSD, mmc->card.rca << 16);
+	if (err != 0) {
+		return err;
+	}
+	err = esdhc_wait_response(mmc, response);
+	if (err != 0) {
+		return err;
+	}
+
+	version = (response[3] >> 18U) & U(0xF);
+	if (mmc->card.type == MMC_CARD) {
+		if (version <= MMC_CARD_VERSION_4_X) {
+			mmc->card.version = mmc_version[version];
+		} else {
+			mmc->card.version = MMC_CARD_VERSION_4_X;
+		}
+	}
+
+	mmc->card.block_len = 1 << ((response[2] >> 8) & 0xF);
+
+	if (mmc->card.block_len > BLOCK_LEN_512) {
+		mmc->card.block_len = BLOCK_LEN_512;
+	}
+
+	return 0;
+}
+
+/***************************************************************************
+ * Function    :    identify_mmc_card
+ * Arguments   :    mmc - Pointer to mmc struct
+ * Return      :    SUCCESS or Error Code
+ * Description :    1. Send Reset Command
+ *                  2. Send CMD1 with args to set voltage range and Sector
+ *                     Mode. (Voltage Args = 0xFF8)
+ *                  3. Check the OCR Response
+ ***************************************************************************/
+static int identify_mmc_card(struct mmc *mmc)
+{
+	uint64_t start_time;
+	uint32_t resp[4];
+	int ret;
+	uint32_t args;
+
+	/* card reset */
+	ret = esdhc_send_cmd(mmc, CMD_GO_IDLE_STATE, 0U);
+	if (ret != 0) {
+		return ret;
+	}
+	ret = esdhc_wait_response(mmc, resp);
+	if (ret != 0) {
+		return ret;
+	}
+
+	/* Send CMD1 to get the ocr value repeatedly till the card */
+	/* busy is clear. timeout = 20sec */
+
+	start_time = get_timer_val(0);
+	do {
+		/* set the bits for the voltage ranges supported by host */
+		args = mmc->voltages_caps | MMC_OCR_SECTOR_MODE;
+		ret = esdhc_send_cmd(mmc, CMD_MMC_SEND_OP_COND, args);
+		if (ret != 0) {
+			return ret;
+		}
+		ret = esdhc_wait_response(mmc, resp);
+		if (ret != 0) {
+			return ERROR_ESDHC_UNUSABLE_CARD;
+		}
+	} while (((resp[0] & MMC_OCR_BUSY) == 0U) &&
+			(get_timer_val(start_time) < SD_TIMEOUT_HIGH));
+
+	if (get_timer_val(start_time) > SD_TIMEOUT_HIGH) {
+		return ERROR_ESDHC_UNUSABLE_CARD;
+	}
+
+	if ((resp[0] & MMC_OCR_CCS) == MMC_OCR_CCS) {
+		mmc->card.is_high_capacity = 1;
+	}
+
+	return MMC_CARD;
+}
+
+/***************************************************************************
+ * Function    :    check_for_sd_card
+ * Arguments   :    mmc - Pointer to mmc struct
+ * Return      :    SUCCESS or Error Code
+ * Description :    1. Send Reset Command
+ *                  2. Send CMD8 with pattern 0xAA (to check for SD 2.0)
+ *                  3. Send ACMD41 with args to set voltage range and HCS
+ *                     HCS is set only for SD Card > 2.0
+ *                     Voltage Caps = 0xFF8
+ *                  4. Check the OCR Response
+ ***************************************************************************/
+static int check_for_sd_card(struct mmc *mmc)
+{
+	uint64_t start_time;
+	uint32_t args;
+	int  ret;
+	uint32_t resp[4];
+
+	/* Send reset command */
+	ret = esdhc_send_cmd(mmc, CMD_GO_IDLE_STATE, 0U);
+	if (ret != 0) {
+		return ret;
+	}
+	ret = esdhc_wait_response(mmc, resp);
+	if (ret != 0) {
+		return ret;
+	}
+
+	/* send CMD8 with  pattern 0xAA */
+	args = MMC_VDD_HIGH_VOLTAGE | 0xAA;
+	ret = esdhc_send_cmd(mmc, CMD_SEND_IF_COND, args);
+	if (ret != 0) {
+		return ret;
+	}
+	ret = esdhc_wait_response(mmc, resp);
+	if (ret == RESP_TIMEOUT) { /* sd ver 1.x or not sd */
+		mmc->card.is_high_capacity = 0;
+	} else if ((resp[0] & U(0xFF)) == U(0xAA)) { /* ver 2.0 or later */
+		mmc->card.version = SD_CARD_VERSION_2_0;
+	} else {
+		return  NOT_SD_CARD;
+	}
+	/* Send Application command-55 to get the ocr value repeatedly till
+	 * the card busy is clear. timeout = 20sec
+	 */
+
+	start_time = get_timer_val(0);
+	do {
+		ret = esdhc_send_cmd(mmc, CMD_APP_CMD, 0U);
+		if (ret != 0) {
+			return ret;
+		}
+		ret = esdhc_wait_response(mmc, resp);
+		if (ret == COMMAND_ERROR) {
+			return ERROR_ESDHC_UNUSABLE_CARD;
+		}
+
+		/* set the bits for the voltage ranges supported by host */
+		args = mmc->voltages_caps;
+		if (mmc->card.version == SD_CARD_VERSION_2_0) {
+			args |= SD_OCR_HCS;
+		}
+
+		/* Send ACMD41 to set voltage range */
+		ret = esdhc_send_cmd(mmc, CMD_SD_SEND_OP_COND, args);
+		if (ret != 0) {
+			return ret;
+		}
+		ret = esdhc_wait_response(mmc, resp);
+		if (ret == COMMAND_ERROR) {
+			return ERROR_ESDHC_UNUSABLE_CARD;
+		} else if (ret == RESP_TIMEOUT) {
+			return NOT_SD_CARD;
+		}
+	} while (((resp[0] & MMC_OCR_BUSY) == 0U) &&
+			(get_timer_val(start_time) < SD_TIMEOUT_HIGH));
+
+	if (get_timer_val(start_time) > SD_TIMEOUT_HIGH) {
+		INFO("SD_TIMEOUT_HIGH\n");
+		return ERROR_ESDHC_UNUSABLE_CARD;
+	}
+
+	/* bit set in card capacity status */
+	if ((resp[0] & MMC_OCR_CCS) == MMC_OCR_CCS) {
+		mmc->card.is_high_capacity = 1;
+	}
+
+	return SD_CARD;
+}
+
+/***************************************************************************
+ * Function    :    esdhc_emmc_init
+ * Arguments   :    mmc - Pointer to mmc struct
+ *                  src_emmc - Flag to Indicate SRC as emmc
+ * Return      :    SUCCESS or Error Code (< 0)
+ * Description :    Base Function called from sd_mmc_init or emmc_init
+ ***************************************************************************/
+int esdhc_emmc_init(struct mmc *mmc, bool card_detect)
+{
+	int error = 0;
+	int ret = 0;
+
+	error = esdhc_init(mmc, card_detect);
+	if (error != 0) {
+		return error;
+	}
+
+	mmc->card.bus_freq = CARD_IDENTIFICATION_FREQ;
+	mmc->card.rca = 0;
+	mmc->card.is_high_capacity = 0;
+	mmc->card.type = ERROR_ESDHC_UNUSABLE_CARD;
+
+	/* Set Voltage caps as FF8 i.e all supported */
+	/* high voltage bits 2.7 - 3.6 */
+	mmc->voltages_caps = MMC_OCR_VDD_FF8;
+
+#ifdef NXP_SD_DMA_CAPABILITY
+	/* Getting host DMA capabilities. */
+	mmc->dma_support = esdhc_in32(&mmc->esdhc_regs->hostcapblt) &
+					ESDHC_HOSTCAPBLT_DMAS;
+#else
+	mmc->dma_support = 0;
+#endif
+
+	ret = NOT_SD_CARD;
+	/* If SRC is not EMMC, check for SD or MMC */
+	ret = check_for_sd_card(mmc);
+	switch (ret) {
+	case SD_CARD:
+		mmc->card.type = SD_CARD;
+		break;
+
+	case NOT_SD_CARD:
+		/* try for MMC card */
+		if (identify_mmc_card(mmc) == MMC_CARD) {
+			mmc->card.type = MMC_CARD;
+		} else {
+			return ERROR_ESDHC_UNUSABLE_CARD;
+		}
+		break;
+
+	default:
+		return ERROR_ESDHC_UNUSABLE_CARD;
+	}
+
+	/* get CID, RCA and CSD. For MMC, set the rca */
+	error = get_cid_rca_csd(mmc);
+	if (error != 0) {
+		return ERROR_ESDHC_COMMUNICATION_ERROR;
+	}
+
+	/* change state to Transfer mode */
+	error = change_state_to_transfer_state(mmc);
+	if (error != 0) {
+		return ERROR_ESDHC_COMMUNICATION_ERROR;
+	}
+
+	/* change to high frequency if supported */
+	if (mmc->card.type == SD_CARD) {
+		error = sd_switch_to_high_freq(mmc);
+	} else {
+		error = mmc_switch_to_high_frquency(mmc);
+	}
+	if (error != 0) {
+		return ERROR_ESDHC_COMMUNICATION_ERROR;
+	}
+
+	/* mmc: 20000000, 26000000, 52000000 */
+	/* sd: 25000000, 50000000 */
+	set_speed(mmc, mmc->card.bus_freq);
+
+	INFO("init done:\n");
+	return 0;
+}
+
+/***************************************************************************
+ * Function    :    sd_mmc_init
+ * Arguments   :    mmc - Pointer to mmc struct
+ * Return      :    SUCCESS or Error Code
+ * Description :    Base Function called via hal_init for SD/MMC
+ *                  initialization
+ ***************************************************************************/
+int sd_mmc_init(uintptr_t nxp_esdhc_addr, bool card_detect)
+{
+	struct mmc *mmc = NULL;
+	int ret;
+
+	mmc = &mmc_drv_data;
+	memset(mmc, 0, sizeof(struct mmc));
+	mmc->esdhc_regs = (struct esdhc_regs *)nxp_esdhc_addr;
+
+	INFO("esdhc_emmc_init\n");
+	ret = esdhc_emmc_init(mmc, card_detect);
+	return ret;
+}
+
+/***************************************************************************
+ * Function    :    esdhc_read_block
+ * Arguments   :    mmc - Pointer to mmc struct
+ *                  dst - Destination Pointer
+ *                  block - Block Number
+ * Return      :    SUCCESS or Error Code
+ * Description :    Read a Single block to Destination Pointer
+ *                  1. Send CMD16 (CMD_SET_BLOCKLEN) with args as blocklen
+ *                  2. Send CMD17 (CMD_READ_SINGLE_BLOCK) with args offset
+ ***************************************************************************/
+static int esdhc_read_block(struct mmc *mmc, void *dst, uint32_t block)
+{
+	uint32_t offset;
+	int err;
+
+	/* send cmd16 to set the block size. */
+	err = esdhc_send_cmd(mmc, CMD_SET_BLOCKLEN, mmc->card.block_len);
+	if (err != 0) {
+		return err;
+	}
+	err = esdhc_wait_response(mmc, NULL);
+	if (err != 0) {
+		return ERROR_ESDHC_COMMUNICATION_ERROR;
+	}
+
+	if (mmc->card.is_high_capacity != 0) {
+		offset = block;
+	} else {
+		offset = block * mmc->card.block_len;
+	}
+
+	esdhc_set_data_attributes(mmc, dst, 1, mmc->card.block_len);
+	err = esdhc_send_cmd(mmc, CMD_READ_SINGLE_BLOCK, offset);
+	if (err != 0) {
+		return err;
+	}
+	err = esdhc_wait_response(mmc, NULL);
+	if (err != 0) {
+		return err;
+	}
+
+	err = esdhc_read_data(mmc, dst, mmc->card.block_len);
+
+	return err;
+}
+
+/***************************************************************************
+ * Function    :    esdhc_write_block
+ * Arguments   :    mmc - Pointer to mmc struct
+ *                  src - Source Pointer
+ *                  block - Block Number
+ * Return      :    SUCCESS or Error Code
+ * Description :    Write a Single block from Source Pointer
+ *                  1. Send CMD16 (CMD_SET_BLOCKLEN) with args as blocklen
+ *                  2. Send CMD24 (CMD_WRITE_SINGLE_BLOCK) with args offset
+ ***************************************************************************/
+static int esdhc_write_block(struct mmc *mmc, void *src, uint32_t block)
+{
+	uint32_t offset;
+	int err;
+
+	/* send cmd16 to set the block size. */
+	err = esdhc_send_cmd(mmc, CMD_SET_BLOCKLEN, mmc->card.block_len);
+	if (err != 0) {
+		return err;
+	}
+	err = esdhc_wait_response(mmc, NULL);
+	if (err != 0) {
+		return ERROR_ESDHC_COMMUNICATION_ERROR;
+	}
+
+	if (mmc->card.is_high_capacity != 0) {
+		offset = block;
+	} else {
+		offset = block * mmc->card.block_len;
+	}
+
+	esdhc_set_data_attributes(mmc, src, 1, mmc->card.block_len);
+	err = esdhc_send_cmd(mmc, CMD_WRITE_SINGLE_BLOCK, offset);
+	if (err != 0) {
+		return err;
+	}
+	err = esdhc_wait_response(mmc, NULL);
+	if (err != 0) {
+		return err;
+	}
+
+	err = esdhc_write_data(mmc, src, mmc->card.block_len);
+
+	return err;
+}
+
+/***************************************************************************
+ * Function    :    esdhc_read
+ * Arguments   :    src_offset - offset on sd/mmc to read from. Should be block
+ *		    size aligned
+ *                  dst - Destination Pointer
+ *                  size - Length of Data ( Multiple of block size)
+ * Return      :    SUCCESS or Error Code
+ * Description :    Calls esdhc_read_block repeatedly for reading the
+ *                  data.
+ ***************************************************************************/
+int esdhc_read(struct mmc *mmc, uint32_t src_offset, uintptr_t dst, size_t size)
+{
+	int error = 0;
+	uint32_t blk, num_blocks;
+	uint8_t *buff = (uint8_t *)dst;
+
+#ifdef NXP_SD_DEBUG
+	INFO("sd mmc read\n");
+	INFO("src = %x, dst = %lxsize = %lu\n", src_offset, dst, size);
+#endif
+
+	/* check for size */
+	if (size == 0) {
+		return 0;
+	}
+
+	if ((size % mmc->card.block_len) != 0) {
+		ERROR("Size is not block aligned\n");
+		return -1;
+	}
+
+	if ((src_offset % mmc->card.block_len) != 0) {
+		ERROR("Size is not block aligned\n");
+		return -1;
+	}
+
+	/* start block */
+	blk = src_offset / mmc->card.block_len;
+#ifdef NXP_SD_DEBUG
+	INFO("blk = %x\n", blk);
+#endif
+
+	/* Number of blocks to be read */
+	num_blocks = size / mmc->card.block_len;
+
+	while (num_blocks) {
+		error = esdhc_read_block(mmc, buff, blk);
+		if (error != 0) {
+			ERROR("Read error = %x\n", error);
+			return error;
+		}
+
+		buff = buff + mmc->card.block_len;
+		blk++;
+		num_blocks--;
+	}
+
+	INFO("sd-mmc read done.\n");
+	return error;
+}
+
+/***************************************************************************
+ * Function    :    esdhc_write
+ * Arguments   :    src - Source Pointer
+ *                  dst_offset - offset on sd/mmc to write to. Should be block
+ *		    size aligned
+ *                  size - Length of Data (Multiple of block size)
+ * Return      :    SUCCESS or Error Code
+ * Description :    Calls esdhc_write_block repeatedly for writing the
+ *                  data.
+ ***************************************************************************/
+int esdhc_write(struct mmc *mmc, uintptr_t src, uint32_t dst_offset,
+		size_t size)
+{
+	int error = 0;
+	uint32_t blk, num_blocks;
+	uint8_t *buff = (uint8_t *)src;
+
+#ifdef NXP_SD_DEBUG
+	INFO("sd mmc write\n");
+	INFO("src = %x, dst = %lxsize = %lu\n", src, dst_offset, size);
+#endif
+
+	/* check for size */
+	if (size == 0) {
+		return 0;
+	}
+
+	if ((size % mmc->card.block_len) != 0) {
+		ERROR("Size is not block aligned\n");
+		return -1;
+	}
+
+	if ((dst_offset % mmc->card.block_len) != 0) {
+		ERROR("Size is not block aligned\n");
+		return -1;
+	}
+
+	/* start block */
+	blk = dst_offset / mmc->card.block_len;
+#ifdef NXP_SD_DEBUG
+	INFO("blk = %x\n", blk);
+#endif
+
+	/* Number of blocks to be written */
+	num_blocks = size / mmc->card.block_len;
+
+	while (num_blocks != 0U) {
+		error = esdhc_write_block(mmc, buff, blk);
+		if (error != 0U) {
+			ERROR("Write error = %x\n", error);
+			return error;
+		}
+
+		buff = buff + mmc->card.block_len;
+		blk++;
+		num_blocks--;
+	}
+
+	INFO("sd-mmc write done.\n");
+	return error;
+}
+
+static size_t ls_sd_emmc_read(int lba, uintptr_t buf, size_t size)
+{
+	struct mmc *mmc = NULL;
+	int ret;
+
+	mmc = &mmc_drv_data;
+	lba *= BLOCK_LEN_512;
+	ret = esdhc_read(mmc, lba, buf, size);
+	return ret ? 0 : size;
+}
+
+static struct io_block_dev_spec ls_emmc_dev_spec = {
+	.buffer = {
+		.offset = 0,
+		.length = 0,
+	},
+	.ops = {
+		.read = ls_sd_emmc_read,
+	},
+	.block_size = BLOCK_LEN_512,
+};
+
+int sd_emmc_init(uintptr_t *block_dev_spec,
+			uintptr_t nxp_esdhc_addr,
+			size_t nxp_sd_block_offset,
+			size_t nxp_sd_block_size,
+			bool card_detect)
+{
+	int ret;
+
+	ret = sd_mmc_init(nxp_esdhc_addr, card_detect);
+	if (ret != 0) {
+		return ret;
+	}
+
+	ls_emmc_dev_spec.buffer.offset = nxp_sd_block_offset;
+	ls_emmc_dev_spec.buffer.length = nxp_sd_block_size;
+	*block_dev_spec = (uintptr_t)&ls_emmc_dev_spec;
+
+	return 0;
+}
diff --git a/drivers/nxp/sd/sd_mmc.h b/drivers/nxp/sd/sd_mmc.h
new file mode 100644
index 0000000..29ad328
--- /dev/null
+++ b/drivers/nxp/sd/sd_mmc.h
@@ -0,0 +1,337 @@
+/*
+ * Copyright (c) 2015, 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef SD_MMC_H
+#define SD_MMC_H
+
+#include <lib/mmio.h>
+
+/* operating freq */
+#define CARD_IDENTIFICATION_FREQ	400000
+#define SD_SS_25MHZ	20000000
+#define SD_HS_50MHZ	40000000
+#define MMC_SS_20MHZ	15000000
+#define MMC_HS_26MHZ	20000000
+#define MMC_HS_52MHZ	40000000
+
+/* Need to check this value ? */
+#define MAX_PLATFORM_CLOCK	800000000
+
+/* eSDHC system control register defines */
+#define ESDHC_SYSCTL_DTOCV(t)		(((t) & 0xF) << 16)
+#define ESDHC_SYSCTL_SDCLKFS(f)		(((f) & 0xFF) << 8)
+#define ESDHC_SYSCTL_DVS(d)		(((d) & 0xF) << 4)
+#define ESDHC_SYSCTL_SDCLKEN		(0x00000008)
+#define ESDHC_SYSCTL_RSTA		(0x01000000)
+
+/* Data timeout counter value. SDHC_CLK x 227 */
+#define TIMEOUT_COUNTER_SDCLK_2_27	0xE
+#define ESDHC_SYSCTL_INITA	0x08000000
+
+/* eSDHC interrupt status enable register defines */
+#define ESDHC_IRQSTATEN_CINS	0x00000040
+#define ESDHC_IRQSTATEN_BWR	0x00000010
+
+/* eSDHC interrupt status register defines */
+#define ESDHC_IRQSTAT_DMAE	(0x10000000)
+#define ESDHC_IRQSTAT_AC12E	(0x01000000)
+#define ESDHC_IRQSTAT_DEBE	(0x00400000)
+#define ESDHC_IRQSTAT_DCE	(0x00200000)
+#define ESDHC_IRQSTAT_DTOE	(0x00100000)
+#define ESDHC_IRQSTAT_CIE	(0x00080000)
+#define ESDHC_IRQSTAT_CEBE	(0x00040000)
+#define ESDHC_IRQSTAT_CCE	(0x00020000)
+#define ESDHC_IRQSTAT_CTOE	(0x00010000)
+#define ESDHC_IRQSTAT_CINT	(0x00000100)
+#define ESDHC_IRQSTAT_CRM	(0x00000080)
+#define ESDHC_IRQSTAT_CINS	(0x00000040)
+#define ESDHC_IRQSTAT_BRR	(0x00000020)
+#define ESDHC_IRQSTAT_BWR	(0x00000010)
+#define ESDHC_IRQSTAT_DINT	(0x00000008)
+#define ESDHC_IRQSTAT_BGE	(0x00000004)
+#define ESDHC_IRQSTAT_TC	(0x00000002)
+#define ESDHC_IRQSTAT_CC	(0x00000001)
+#define ESDHC_IRQSTAT_CMD_ERR	(ESDHC_IRQSTAT_CIE |\
+			ESDHC_IRQSTAT_CEBE |\
+			ESDHC_IRQSTAT_CCE)
+#define ESDHC_IRQSTAT_DATA_ERR	(ESDHC_IRQSTAT_DEBE |\
+			ESDHC_IRQSTAT_DCE |\
+			ESDHC_IRQSTAT_DTOE)
+#define ESDHC_IRQSTAT_CLEAR_ALL	(0xFFFFFFFF)
+
+/* eSDHC present state register defines */
+#define ESDHC_PRSSTAT_CLSL	0x00800000
+#define ESDHC_PRSSTAT_WPSPL	0x00080000
+#define ESDHC_PRSSTAT_CDPL	0x00040000
+#define ESDHC_PRSSTAT_CINS	0x00010000
+#define ESDHC_PRSSTAT_BREN	0x00000800
+#define ESDHC_PRSSTAT_BWEN	0x00000400
+#define ESDHC_PRSSTAT_RTA	0x00000200
+#define ESDHC_PRSSTAT_WTA	0x00000100
+#define ESDHC_PRSSTAT_SDOFF	0x00000080
+#define ESDHC_PRSSTAT_PEROFF	0x00000040
+#define ESDHC_PRSSTAT_HCKOFF	0x00000020
+#define ESDHC_PRSSTAT_IPGOFF	0x00000010
+#define ESDHC_PRSSTAT_DLA	0x00000004
+#define ESDHC_PRSSTAT_CDIHB	0x00000002
+#define ESDHC_PRSSTAT_CIHB	0x00000001
+
+/* eSDHC protocol control register defines */
+#define ESDHC_PROCTL_EMODE_LE	0x00000020
+#define ESDHC_PROCTL_DTW_1BIT	0x00000000
+#define ESDHC_PROCTL_DTW_4BIT	0x00000002
+#define ESDHC_PROCTL_DTW_8BIT	0x00000004
+
+/* Watermark Level Register (WML) */
+#define ESDHC_WML_RD_WML(w)	((w) & 0x7F)
+#define ESDHC_WML_WR_WML(w)	(((w) & 0x7F) << 16)
+#define ESDHC_WML_RD_BRST(w)	(((w) & 0xF) << 8)
+#define ESDHC_WML_WR_BRST(w)	(((w) & 0xF) << 24)
+#define ESDHC_WML_WR_BRST_MASK	(0x0F000000)
+#define ESDHC_WML_RD_BRST_MASK	(0x00000F00)
+#define ESDHC_WML_RD_WML_MASK	(0x0000007F)
+#define ESDHC_WML_WR_WML_MASK	(0x007F0000)
+#define WML_512_BYTES		(0x0)
+#define BURST_128_BYTES	(0x0)
+
+/* eSDHC control register define */
+#define ESDHC_DCR_SNOOP		0x00000040
+
+/* ESDHC Block attributes register */
+#define ESDHC_BLKATTR_BLKCNT(c)	(((c) & 0xffff) << 16)
+#define ESDHC_BLKATTR_BLKSZE(s)	((s) & 0xfff)
+
+/* Transfer Type Register */
+#define ESDHC_XFERTYP_CMD(c)	(((c) & 0x3F) << 24)
+#define ESDHC_XFERTYP_CMDTYP_NORMAL	(0x0)
+#define ESDHC_XFERTYP_CMDTYP_SUSPEND	(0x00400000)
+#define ESDHC_XFERTYP_CMDTYP_RESUME	(0x00800000)
+#define ESDHC_XFERTYP_CMDTYP_ABORT	(0x00C00000)
+#define ESDHC_XFERTYP_DPSEL	(0x00200000)
+#define ESDHC_XFERTYP_CICEN	(0x00100000)
+#define ESDHC_XFERTYP_CCCEN	(0x00080000)
+#define ESDHC_XFERTYP_RSPTYP_NONE	(0x0)
+#define ESDHC_XFERTYP_RSPTYP_136	(0x00010000)
+#define ESDHC_XFERTYP_RSPTYP_48	(0x00020000)
+#define ESDHC_XFERTYP_RSPTYP_48_BUSY	(0x00030000)
+#define ESDHC_XFERTYP_MSBSEL	(0x00000020)
+#define ESDHC_XFERTYP_DTDSEL	(0x00000010)
+#define ESDHC_XFERTYP_AC12EN	(0x00000004)
+#define ESDHC_XFERTYP_BCEN	(0x00000002)
+#define ESDHC_XFERTYP_DMAEN	(0x00000001)
+
+#define MMC_VDD_HIGH_VOLTAGE	0x00000100
+
+/* command index */
+#define CMD0	0
+#define CMD1	1
+#define CMD2	2
+#define CMD3	3
+#define CMD5	5
+#define CMD6	6
+#define CMD7	7
+#define CMD8	8
+#define CMD9	9
+#define CMD12	12
+#define CMD13	13
+#define CMD14	14
+#define CMD16	16
+#define CMD17	17
+#define CMD18	18
+#define CMD19	19
+#define CMD24	24
+#define CMD41	41
+#define CMD42	42
+#define CMD51	51
+#define CMD55	55
+#define CMD56	56
+#define ACMD6	CMD6
+#define ACMD13	CMD13
+#define ACMD41	CMD41
+#define ACMD42	CMD42
+#define ACMD51	CMD51
+
+/* commands abbreviations */
+#define CMD_GO_IDLE_STATE	CMD0
+#define CMD_MMC_SEND_OP_COND	CMD1
+#define CMD_ALL_SEND_CID	CMD2
+#define CMD_SEND_RELATIVE_ADDR	CMD3
+#define CMD_SET_DSR	CMD4
+#define CMD_SWITCH_FUNC	CMD6
+#define CMD_SELECT_CARD	CMD7
+#define CMD_DESELECT_CARD	CMD7
+#define CMD_SEND_IF_COND	CMD8
+#define CMD_MMC_SEND_EXT_CSD	CMD8
+#define CMD_SEND_CSD	CMD9
+#define CMD_SEND_CID	CMD10
+#define CMD_STOP_TRANSMISSION	CMD12
+#define CMD_SEND_STATUS	CMD13
+#define CMD_BUS_TEST_R	CMD14
+#define CMD_GO_INACTIVE_STATE	CMD15
+#define CMD_SET_BLOCKLEN	CMD16
+#define CMD_READ_SINGLE_BLOCK	CMD17
+#define CMD_READ_MULTIPLE_BLOCK	CMD18
+#define CMD_WRITE_SINGLE_BLOCK	CMD24
+#define CMD_BUS_TEST_W	CMD19
+#define CMD_APP_CMD	CMD55
+#define CMD_GEN_CMD	CMD56
+#define CMD_SET_BUS_WIDTH	ACMD6
+#define CMD_SD_STATUS	ACMD13
+#define CMD_SD_SEND_OP_COND	ACMD41
+#define CMD_SET_CLR_CARD_DETECT	ACMD42
+#define CMD_SEND_SCR	ACMD51
+
+/* MMC card spec version */
+#define MMC_CARD_VERSION_1_2	0
+#define MMC_CARD_VERSION_1_4	1
+#define MMC_CARD_VERSION_2_X	2
+#define MMC_CARD_VERSION_3_X	3
+#define MMC_CARD_VERSION_4_X	4
+
+/* SD Card Spec Version */
+/* May need to add version 3 here? */
+#define SD_CARD_VERSION_1_0	0
+#define SD_CARD_VERSION_1_10	1
+#define SD_CARD_VERSION_2_0	2
+
+/* card types */
+#define MMC_CARD	0
+#define SD_CARD		1
+#define NOT_SD_CARD	MMC_CARD
+
+/* Card rca */
+#define SD_MMC_CARD_RCA	0x1
+#define BLOCK_LEN_512	512
+
+/* card state */
+#define STATE_IDLE	0
+#define STATE_READY	1
+#define STATE_IDENT	2
+#define STATE_STBY	3
+#define STATE_TRAN	4
+#define STATE_DATA	5
+#define STATE_RCV	6
+#define STATE_PRG	7
+#define STATE_DIS	8
+
+/* Card OCR register */
+/* VDD voltage window 1,65 to 1.95 */
+#define MMC_OCR_VDD_165_195	0x00000080
+/* VDD voltage window 2.7-2.8 */
+#define MMC_OCR_VDD_FF8	0x00FF8000
+#define MMC_OCR_CCS	0x40000000/* Card Capacity */
+#define MMC_OCR_BUSY	0x80000000/* busy bit */
+#define SD_OCR_HCS	0x40000000/* High capacity host */
+#define MMC_OCR_SECTOR_MODE	0x40000000/* Access Mode as Sector */
+
+/* mmc Switch function */
+#define SET_EXT_CSD_HS_TIMING	0x03B90100/* set High speed */
+
+/* check supports switching or not */
+#define SD_SWITCH_FUNC_CHECK_MODE	0x00FFFFF1
+#define SD_SWITCH_FUNC_SWITCH_MODE	0x80FFFFF1/* switch */
+#define SD_SWITCH_FUNC_HIGH_SPEED	0x02/* HIGH SPEED FUNC */
+#define SWITCH_ERROR		0x00000080
+
+/* errors in sending commands */
+#define RESP_TIMEOUT	0x1
+#define COMMAND_ERROR	0x2
+/* error in response */
+#define R1_ERROR	(1 << 19)
+#define R1_CURRENT_STATE(x)	(((x) & 0x00001E00) >> 9)
+
+/* Host Controller Capabilities */
+#define ESDHC_HOSTCAPBLT_DMAS           (0x00400000)
+
+
+/* SD/MMC memory map */
+struct esdhc_regs {
+	uint32_t dsaddr;	/* dma system address */
+	uint32_t blkattr;	/* Block attributes */
+	uint32_t cmdarg;	/* Command argument */
+	uint32_t xfertyp;	/* Command transfer type */
+	uint32_t cmdrsp[4];	/* Command response0,1,2,3 */
+	uint32_t datport;	/* Data buffer access port */
+	uint32_t prsstat;	/* Present state */
+	uint32_t proctl;	/* Protocol control */
+	uint32_t sysctl;	/* System control */
+	uint32_t irqstat;	/* Interrupt status */
+	uint32_t irqstaten;	/* Interrupt status enable */
+	uint32_t irqsigen;	/* Interrupt signal enable */
+	uint32_t autoc12err;	/* Auto CMD12 status */
+	uint32_t hostcapblt;	/* Host controller capabilities */
+	uint32_t wml;	/* Watermark level */
+	uint32_t res1[2];
+	uint32_t fevt;	/* Force event */
+	uint32_t res2;
+	uint32_t adsaddrl;
+	uint32_t adsaddrh;
+	uint32_t res3[39];
+	uint32_t hostver;	/* Host controller version */
+	uint32_t res4;
+	uint32_t dmaerr;	/* DMA error address */
+	uint32_t dmaerrh;	/* DMA error address high */
+	uint32_t dmaerrattr; /* DMA error atrribute */
+	uint32_t res5;
+	uint32_t hostcapblt2;/* Host controller capabilities2 */
+	uint32_t res6[2];
+	uint32_t tcr;	/* Tuning control */
+	uint32_t res7[7];
+	uint32_t dirctrl;	/* Direction control */
+	uint32_t ccr;	/* Clock control */
+	uint32_t res8[177];
+	uint32_t ctl;	/* Control register */
+};
+
+/* SD/MMC card attributes */
+struct card_attributes {
+	uint32_t type;	/* sd or mmc card */
+	uint32_t version;	/* version */
+	uint32_t block_len;	/* block length */
+	uint32_t bus_freq;	/* sdhc bus frequency */
+	uint16_t rca;	/* relative card address */
+	uint8_t is_high_capacity;	/* high capacity */
+};
+
+struct mmc {
+	struct esdhc_regs *esdhc_regs;
+	struct card_attributes card;
+
+	uint32_t block_len;
+	uint32_t voltages_caps;	/* supported voltaes */
+	uint32_t dma_support;	/* DMA support */
+};
+
+enum cntrl_num {
+	SDHC1 = 0,
+	SDHC2
+};
+
+int sd_emmc_init(uintptr_t *block_dev_spec,
+			uintptr_t nxp_esdhc_addr,
+			size_t nxp_sd_block_offset,
+			size_t nxp_sd_block_size,
+			bool card_detect);
+
+int esdhc_emmc_init(struct mmc *mmc, bool card_detect);
+int esdhc_read(struct mmc *mmc, uint32_t src_offset, uintptr_t dst,
+	       size_t size);
+int esdhc_write(struct mmc *mmc, uintptr_t src, uint32_t dst_offset,
+		size_t size);
+
+#ifdef NXP_ESDHC_BE
+#define esdhc_in32(a)           bswap32(mmio_read_32((uintptr_t)(a)))
+#define esdhc_out32(a, v)       mmio_write_32((uintptr_t)(a), bswap32(v))
+#elif defined(NXP_ESDHC_LE)
+#define esdhc_in32(a)           mmio_read_32((uintptr_t)(a))
+#define esdhc_out32(a, v)       mmio_write_32((uintptr_t)(a), (v))
+#else
+#error Please define CCSR ESDHC register endianness
+#endif
+
+#endif /*SD_MMC_H*/
diff --git a/drivers/nxp/sd/sd_mmc.mk b/drivers/nxp/sd/sd_mmc.mk
new file mode 100644
index 0000000..af91b1f
--- /dev/null
+++ b/drivers/nxp/sd/sd_mmc.mk
@@ -0,0 +1,28 @@
+#
+# Copyright 2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${ADD_SD_MMC},)
+
+ADD_SD_MMC	:= 1
+
+SD_DRIVERS_PATH		:=  ${PLAT_DRIVERS_PATH}/sd
+
+SD_MMC_BOOT_SOURCES	+= ${SD_DRIVERS_PATH}/sd_mmc.c \
+			   drivers/io/io_block.c
+
+PLAT_INCLUDES		+= -I$(SD_DRIVERS_PATH)
+
+ifeq (${BL_COMM_SD_MMC_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${SD_MMC_BOOT_SOURCES}
+else
+ifeq (${BL2_SD_MMC_NEEDED},yes)
+BL2_SOURCES		+= ${SD_MMC_BOOT_SOURCES}
+endif
+ifeq (${BL3_SD_MMC_NEEDED},yes)
+BL31_SOURCES		+= ${SD_MMC_BOOT_SOURCES}
+endif
+endif
+endif
diff --git a/drivers/nxp/sec_mon/sec_mon.mk b/drivers/nxp/sec_mon/sec_mon.mk
new file mode 100644
index 0000000..51e3e86
--- /dev/null
+++ b/drivers/nxp/sec_mon/sec_mon.mk
@@ -0,0 +1,27 @@
+#
+# Copyright 2021 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${ADD_SNVS},)
+
+ADD_SNVS		:= 1
+
+SNVS_DRIVERS_PATH	:= ${PLAT_DRIVERS_PATH}/sec_mon
+
+PLAT_INCLUDES		+= -I$(SNVS_DRIVERS_PATH)
+
+SNVS_SOURCES		+= $(SNVS_DRIVERS_PATH)/snvs.c
+
+ifeq (${BL_COMM_SNVS_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${SNVS_SOURCES}
+else
+ifeq (${BL2_SNVS_NEEDED},yes)
+BL2_SOURCES		+= ${SNVS_SOURCES}
+endif
+ifeq (${BL31_SNVS_NEEDED},yes)
+BL31_SOURCES		+= ${SNVS_SOURCES}
+endif
+endif
+endif
diff --git a/drivers/nxp/sec_mon/snvs.c b/drivers/nxp/sec_mon/snvs.c
new file mode 100644
index 0000000..6208b67
--- /dev/null
+++ b/drivers/nxp/sec_mon/snvs.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <snvs.h>
+
+static uintptr_t g_nxp_snvs_addr;
+
+void snvs_init(uintptr_t nxp_snvs_addr)
+{
+	g_nxp_snvs_addr = nxp_snvs_addr;
+}
+
+uint32_t get_snvs_state(void)
+{
+	struct snvs_regs *snvs = (struct snvs_regs *) (g_nxp_snvs_addr);
+
+	return (snvs_read32(&snvs->hp_stat) & HPSTS_MASK_SSM_ST);
+}
+
+static uint32_t do_snvs_state_transition(uint32_t state_transtion_bit,
+					 uint32_t target_state)
+{
+	struct snvs_regs *snvs = (struct snvs_regs *) (g_nxp_snvs_addr);
+	uint32_t sts = get_snvs_state();
+	uint32_t fetch_cnt = 16U;
+	uint32_t val = snvs_read32(&snvs->hp_com) | state_transtion_bit;
+
+	snvs_write32(&snvs->hp_com, val);
+
+	/* polling loop till SNVS is in target state */
+	do {
+		sts = get_snvs_state();
+	} while ((sts != target_state) && ((--fetch_cnt) != 0));
+
+	return sts;
+}
+void transition_snvs_non_secure(void)
+{
+	struct snvs_regs *snvs = (struct snvs_regs *) (g_nxp_snvs_addr);
+	uint32_t sts = get_snvs_state();
+
+	switch (sts) {
+		/* If initial state is check or Non-Secure, then
+		 * set the Software Security Violation Bit and
+		 * transition to Non-Secure State.
+		 */
+	case HPSTS_CHECK_SSM_ST:
+		sts = do_snvs_state_transition(HPCOM_SW_SV, HPSTS_NON_SECURE_SSM_ST);
+		break;
+
+		/* If initial state is Trusted, Secure or Soft-Fail, then
+		 * first set the Software Security Violation Bit and
+		 * transition to Soft-Fail State.
+		 */
+	case HPSTS_TRUST_SSM_ST:
+	case HPSTS_SECURE_SSM_ST:
+	case HPSTS_SOFT_FAIL_SSM_ST:
+		sts = do_snvs_state_transition(HPCOM_SW_SV, HPSTS_NON_SECURE_SSM_ST);
+
+		/* If SSM Soft Fail to Non-Secure State Transition
+		 * Disable is not set, then set SSM_ST bit and
+		 * transition to Non-Secure State.
+		 */
+		if ((snvs_read32(&snvs->hp_com) & HPCOM_SSM_SFNS_DIS) == 0) {
+			sts = do_snvs_state_transition(HPCOM_SSM_ST, HPSTS_NON_SECURE_SSM_ST);
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+void transition_snvs_soft_fail(void)
+{
+	do_snvs_state_transition(HPCOM_SW_FSV, HPSTS_SOFT_FAIL_SSM_ST);
+}
+
+uint32_t transition_snvs_trusted(void)
+{
+	struct snvs_regs *snvs = (struct snvs_regs *) (g_nxp_snvs_addr);
+	uint32_t sts = get_snvs_state();
+
+	switch (sts) {
+		/* If initial state is check, set the SSM_ST bit to
+		 * change the state to trusted.
+		 */
+	case HPSTS_CHECK_SSM_ST:
+		sts = do_snvs_state_transition(HPCOM_SSM_ST, HPSTS_TRUST_SSM_ST);
+		break;
+		/* If SSM Secure to Trusted State Transition Disable
+		 * is not set, then set SSM_ST bit and
+		 * transition to Trusted State.
+		 */
+	case HPSTS_SECURE_SSM_ST:
+		if ((snvs_read32(&snvs->hp_com) & HPCOM_SSM_ST_DIS) == 0) {
+			sts = do_snvs_state_transition(HPCOM_SSM_ST, HPSTS_TRUST_SSM_ST);
+		}
+		break;
+		/* If initial state is Soft-Fail or Non-Secure, then
+		 * transition to Trusted is not Possible.
+		 */
+	default:
+		break;
+	}
+
+	return sts;
+}
+
+uint32_t transition_snvs_secure(void)
+{
+	uint32_t sts = get_snvs_state();
+
+	if (sts == HPSTS_SECURE_SSM_ST) {
+		return sts;
+	}
+
+	if (sts != HPSTS_TRUST_SSM_ST) {
+		sts = transition_snvs_trusted();
+		if (sts != HPSTS_TRUST_SSM_ST) {
+			return sts;
+		}
+	}
+
+	sts = do_snvs_state_transition(HPCOM_SSM_ST, HPSTS_TRUST_SSM_ST);
+
+	return sts;
+}
+
+void snvs_write_lp_gpr_bit(uint32_t offset, uint32_t bit_pos, bool flag_val)
+{
+	if (flag_val) {
+		snvs_write32(g_nxp_snvs_addr + offset,
+			     (snvs_read32(g_nxp_snvs_addr + offset))
+			     | (1 << bit_pos));
+	} else {
+		snvs_write32(g_nxp_snvs_addr + offset,
+			     (snvs_read32(g_nxp_snvs_addr + offset))
+			     & ~(1 << bit_pos));
+	}
+}
+
+uint32_t snvs_read_lp_gpr_bit(uint32_t offset, uint32_t bit_pos)
+{
+	return (snvs_read32(g_nxp_snvs_addr + offset) & (1 << bit_pos));
+}
+
+void snvs_disable_zeroize_lp_gpr(void)
+{
+	snvs_write_lp_gpr_bit(NXP_LPCR_OFFSET,
+			  NXP_GPR_Z_DIS_BIT,
+			  true);
+}
+
+#if defined(NXP_NV_SW_MAINT_LAST_EXEC_DATA) && defined(NXP_COINED_BB)
+void snvs_write_app_data_bit(uint32_t bit_pos)
+{
+	snvs_write_lp_gpr_bit(NXP_APP_DATA_LP_GPR_OFFSET,
+			      bit_pos,
+			      true);
+}
+
+uint32_t snvs_read_app_data(void)
+{
+	return snvs_read32(g_nxp_snvs_addr + NXP_APP_DATA_LP_GPR_OFFSET);
+}
+
+uint32_t snvs_read_app_data_bit(uint32_t bit_pos)
+{
+	uint8_t ret = snvs_read_lp_gpr_bit(NXP_APP_DATA_LP_GPR_OFFSET, bit_pos);
+
+	return ((ret != 0U) ? 1U : 0U);
+}
+
+void snvs_clear_app_data(void)
+{
+	snvs_write32(g_nxp_snvs_addr + NXP_APP_DATA_LP_GPR_OFFSET, 0x0);
+}
+#endif
diff --git a/drivers/nxp/sec_mon/snvs.h b/drivers/nxp/sec_mon/snvs.h
new file mode 100644
index 0000000..4455383
--- /dev/null
+++ b/drivers/nxp/sec_mon/snvs.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef SNVS_H
+#define SNVS_H
+
+
+#ifndef __ASSEMBLER__
+
+#include <endian.h>
+#include <stdbool.h>
+
+#include <lib/mmio.h>
+
+struct snvs_regs {
+	uint32_t reserved1;
+	uint32_t hp_com;		/* 0x04 SNVS_HP Command Register */
+	uint32_t reserved2[3];
+	uint32_t hp_stat;		/* 0x14 SNVS_HP Status Register */
+};
+
+#ifdef NXP_SNVS_BE
+#define snvs_read32(a)           bswap32(mmio_read_32((uintptr_t)(a)))
+#define snvs_write32(a, v)       mmio_write_32((uintptr_t)(a), bswap32((v)))
+#elif defined(NXP_SNVS_LE)
+#define snvs_read32(a)           mmio_read_32((uintptr_t)(a))
+#define snvs_write32(a, v)       mmio_write_32((uintptr_t)(a), (v))
+#else
+#error Please define CCSR SNVS register endianness
+#endif
+
+void snvs_init(uintptr_t nxp_snvs_addr);
+uint32_t get_snvs_state(void);
+void transition_snvs_non_secure(void);
+void transition_snvs_soft_fail(void);
+uint32_t transition_snvs_trusted(void);
+uint32_t transition_snvs_secure(void);
+
+uint32_t snvs_read_lp_gpr_bit(uint32_t offset, uint32_t bit_pos);
+void snvs_write_lp_gpr_bit(uint32_t offset, uint32_t bit_pos, bool flag_val);
+
+void snvs_disable_zeroize_lp_gpr(void);
+
+#if defined(NXP_NV_SW_MAINT_LAST_EXEC_DATA) && defined(NXP_COINED_BB)
+uint32_t snvs_read_app_data(void);
+uint32_t snvs_read_app_data_bit(uint32_t bit_pos);
+void snvs_clear_app_data(void);
+void snvs_write_app_data_bit(uint32_t bit_pos);
+#endif
+
+#endif	/*  __ASSEMBLER__  */
+
+/* SSM_ST field in SNVS status reg */
+#define HPSTS_CHECK_SSM_ST	0x900	/* SNVS is in check state */
+#define HPSTS_NON_SECURE_SSM_ST	0xb00	/* SNVS is in non secure state */
+#define HPSTS_TRUST_SSM_ST	0xd00	/* SNVS is in trusted state */
+#define HPSTS_SECURE_SSM_ST	0xf00	/* SNVS is in secure state */
+#define HPSTS_SOFT_FAIL_SSM_ST	0x300	/* SNVS is in soft fail state */
+#define HPSTS_MASK_SSM_ST	0xf00	/* SSM_ST field mask in SNVS reg */
+
+/* SNVS register bits */
+#define HPCOM_SW_SV		0x100	/* Security Violation bit */
+#define HPCOM_SW_FSV		0x200	/* Fatal Security Violation bit */
+#define HPCOM_SSM_ST		0x1	/* SSM_ST field in SNVS command reg */
+#define HPCOM_SSM_ST_DIS	0x2	/* Disable Secure to Trusted State */
+#define HPCOM_SSM_SFNS_DIS	0x4	/* Disable Soft Fail to Non-Secure */
+
+#define NXP_LP_GPR0_OFFSET	0x90
+#define NXP_LPCR_OFFSET		0x38
+#define NXP_GPR_Z_DIS_BIT	24
+
+#ifdef NXP_COINED_BB
+
+#ifndef NXP_APP_DATA_LP_GPR_OFFSET
+#define NXP_APP_DATA_LP_GPR_OFFSET NXP_LP_GPR0_OFFSET
+#endif
+
+#define NXP_LPGPR_ZEROTH_BIT		0
+
+#endif	/* NXP_COINED_BB */
+
+#endif	/* SNVS_H  */
diff --git a/drivers/nxp/sfp/fuse_prov.c b/drivers/nxp/sfp/fuse_prov.c
new file mode 100644
index 0000000..4d30f5f
--- /dev/null
+++ b/drivers/nxp/sfp/fuse_prov.c
@@ -0,0 +1,462 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <caam.h>
+#include <common/debug.h>
+#include <dcfg.h>
+#include <drivers/delay_timer.h>
+#include <fuse_prov.h>
+#include <sfp.h>
+#include <sfp_error_codes.h>
+
+
+static int write_a_fuse(uint32_t *fuse_addr, uint32_t *fuse_hdr_val,
+			uint32_t mask)
+{
+	uint32_t last_stored_val = sfp_read32(fuse_addr);
+
+	 /* Check if fuse already blown or not */
+	if ((last_stored_val & mask) == mask) {
+		return ERROR_ALREADY_BLOWN;
+	}
+
+	 /* Write fuse in mirror registers */
+	sfp_write32(fuse_addr, last_stored_val | (*fuse_hdr_val & mask));
+
+	 /* Read back to check if write success */
+	if (sfp_read32(fuse_addr) != (last_stored_val | (*fuse_hdr_val & mask))) {
+		return ERROR_WRITE;
+	}
+
+	return 0;
+}
+
+static int write_fuses(uint32_t *fuse_addr, uint32_t *fuse_hdr_val, uint8_t len)
+{
+	int i;
+
+	 /* Check if fuse already blown or not */
+	for (i = 0; i < len; i++) {
+		if (sfp_read32(&fuse_addr[i]) != 0) {
+			return ERROR_ALREADY_BLOWN;
+		}
+	}
+
+	 /* Write fuse in mirror registers */
+	for (i = 0; i < len; i++) {
+		sfp_write32(&fuse_addr[i], fuse_hdr_val[i]);
+	}
+
+	 /* Read back to check if write success */
+	for (i = 0; i < len; i++) {
+		if (sfp_read32(&fuse_addr[i]) != fuse_hdr_val[i]) {
+			return ERROR_WRITE;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * This function program Super Root Key Hash (SRKH) in fuse
+ * registers.
+ */
+static int prog_srkh(struct fuse_hdr_t *fuse_hdr,
+		     struct sfp_ccsr_regs_t *sfp_ccsr_regs)
+{
+	int ret = 0;
+
+	ret = write_fuses(sfp_ccsr_regs->srk_hash, fuse_hdr->srkh, 8);
+
+	if (ret != 0) {
+		ret = (ret == ERROR_ALREADY_BLOWN) ?
+			ERROR_SRKH_ALREADY_BLOWN : ERROR_SRKH_WRITE;
+	}
+
+	return ret;
+}
+
+/* This function program OEMUID[0-4] in fuse registers. */
+static int prog_oemuid(struct fuse_hdr_t *fuse_hdr,
+		       struct sfp_ccsr_regs_t *sfp_ccsr_regs)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < 5; i++) {
+		 /* Check OEMUIDx to be blown or not */
+		if (((fuse_hdr->flags >> (FLAG_OUID0_SHIFT + i)) & 0x1) != 0) {
+			 /* Check if OEMUID[i] already blown or not */
+			ret = write_fuses(&sfp_ccsr_regs->oem_uid[i],
+					 &fuse_hdr->oem_uid[i], 1);
+
+			if (ret != 0) {
+				ret = (ret == ERROR_ALREADY_BLOWN) ?
+					ERROR_OEMUID_ALREADY_BLOWN
+					: ERROR_OEMUID_WRITE;
+			}
+		}
+	}
+	return ret;
+}
+
+/* This function program DCV[0-1], DRV[0-1] in fuse registers. */
+static int prog_debug(struct fuse_hdr_t *fuse_hdr,
+		      struct sfp_ccsr_regs_t *sfp_ccsr_regs)
+{
+	int ret;
+
+	 /* Check DCV to be blown or not */
+	if (((fuse_hdr->flags >> (FLAG_DCV0_SHIFT)) & 0x3) != 0) {
+		 /* Check if DCV[i] already blown or not */
+		ret = write_fuses(sfp_ccsr_regs->dcv, fuse_hdr->dcv, 2);
+
+		if (ret != 0) {
+			ret = (ret == ERROR_ALREADY_BLOWN) ?
+				ERROR_DCV_ALREADY_BLOWN
+				: ERROR_DCV_WRITE;
+		}
+	}
+
+	 /* Check DRV to be blown or not */
+	if ((((fuse_hdr->flags >> (FLAG_DRV0_SHIFT)) & 0x3)) != 0) {
+		 /* Check if DRV[i] already blown or not */
+		ret = write_fuses(sfp_ccsr_regs->drv, fuse_hdr->drv, 2);
+
+		if (ret != 0) {
+			ret = (ret == ERROR_ALREADY_BLOWN) ?
+				ERROR_DRV_ALREADY_BLOWN
+				: ERROR_DRV_WRITE;
+		} else {
+			 /* Check for DRV hamming error */
+			if (sfp_read32((void *)(get_sfp_addr()
+							+ SFP_SVHESR_OFFSET))
+				& SFP_SVHESR_DRV_MASK) {
+				return ERROR_DRV_HAMMING_ERROR;
+			}
+		}
+	}
+
+	return 0;
+}
+
+ /*
+  * Turn a 256-bit random value (32 bytes) into an OTPMK code word
+  * modifying the input data array in place
+  */
+static void otpmk_make_code_word_256(uint8_t *otpmk, bool minimal_flag)
+{
+	int i;
+	uint8_t parity_bit;
+	uint8_t code_bit;
+
+	if (minimal_flag == true) {
+		 /*
+		  * Force bits 252, 253, 254 and 255 to 1
+		  * This is because these fuses may have already been blown
+		  * and the OTPMK cannot force them back to 0
+		  */
+		otpmk[252/8] |= (1 << (252%8));
+		otpmk[253/8] |= (1 << (253%8));
+		otpmk[254/8] |= (1 << (254%8));
+		otpmk[255/8] |= (1 << (255%8));
+	}
+
+	 /* Generate the hamming code for the code word */
+	parity_bit = 0;
+	code_bit = 0;
+	for (i = 0; i < 256; i += 1) {
+		if ((otpmk[i/8] & (1 << (i%8))) != 0) {
+			parity_bit ^= 1;
+			code_bit   ^= i;
+		}
+	}
+
+	 /* Inverting otpmk[code_bit] will cause the otpmk
+	  * to become a valid code word (except for overall parity)
+	  */
+	if (code_bit < 252) {
+		otpmk[code_bit/8] ^= (1 << (code_bit % 8));
+		parity_bit  ^= 1;  // account for flipping a bit changing parity
+	} else {
+		 /* Invert two bits:  (code_bit - 4) and 4
+		  * Because we invert two bits, no need to touch the parity bit
+		  */
+		otpmk[(code_bit - 4)/8] ^= (1 << ((code_bit - 4) % 8));
+		otpmk[4/8] ^= (1 << (4 % 8));
+	}
+
+	 /* Finally, adjust the overall parity of the otpmk
+	  * otpmk bit 0
+	  */
+	otpmk[0] ^= parity_bit;
+}
+
+/* This function program One Time Programmable Master Key (OTPMK)
+ *  in fuse registers.
+ */
+static int prog_otpmk(struct fuse_hdr_t *fuse_hdr,
+		      struct sfp_ccsr_regs_t *sfp_ccsr_regs)
+{
+	int ret = 0;
+	uint32_t otpmk_flags;
+	uint32_t otpmk_random[8] __aligned(CACHE_WRITEBACK_GRANULE);
+
+	otpmk_flags = (fuse_hdr->flags >> (FLAG_OTPMK_SHIFT)) & FLAG_OTPMK_MASK;
+
+	switch (otpmk_flags) {
+	case PROG_OTPMK_MIN:
+		memset(fuse_hdr->otpmk, 0, sizeof(fuse_hdr->otpmk));
+
+		 /* Minimal OTPMK value (252-255 bits set to 1) */
+		fuse_hdr->otpmk[0] |= OTPMK_MIM_BITS_MASK;
+		break;
+
+	case PROG_OTPMK_RANDOM:
+		if (is_sec_enabled() == false) {
+			ret = ERROR_OTPMK_SEC_DISABLED;
+			goto out;
+		}
+
+		 /* Generate Random number using CAAM for OTPMK */
+		memset(otpmk_random, 0, sizeof(otpmk_random));
+		if (get_rand_bytes_hw((uint8_t *)otpmk_random,
+				      sizeof(otpmk_random)) != 0) {
+			ret = ERROR_OTPMK_SEC_ERROR;
+			goto out;
+		}
+
+		 /* Run hamming over random no. to make OTPMK */
+		otpmk_make_code_word_256((uint8_t *)otpmk_random, false);
+
+		 /* Swap OTPMK */
+		fuse_hdr->otpmk[0] = otpmk_random[7];
+		fuse_hdr->otpmk[1] = otpmk_random[6];
+		fuse_hdr->otpmk[2] = otpmk_random[5];
+		fuse_hdr->otpmk[3] = otpmk_random[4];
+		fuse_hdr->otpmk[4] = otpmk_random[3];
+		fuse_hdr->otpmk[5] = otpmk_random[2];
+		fuse_hdr->otpmk[6] = otpmk_random[1];
+		fuse_hdr->otpmk[7] = otpmk_random[0];
+		break;
+
+	case PROG_OTPMK_USER:
+		break;
+
+	case PROG_OTPMK_RANDOM_MIN:
+		 /* Here assumption is that user is aware of minimal OTPMK
+		  * already blown.
+		  */
+
+		 /* Generate Random number using CAAM for OTPMK */
+		if (is_sec_enabled() == false) {
+			ret = ERROR_OTPMK_SEC_DISABLED;
+			goto out;
+		}
+
+		memset(otpmk_random, 0, sizeof(otpmk_random));
+		if (get_rand_bytes_hw((uint8_t *)otpmk_random,
+				      sizeof(otpmk_random)) != 0) {
+			ret = ERROR_OTPMK_SEC_ERROR;
+			goto out;
+		}
+
+		 /* Run hamming over random no. to make OTPMK */
+		otpmk_make_code_word_256((uint8_t *)otpmk_random, true);
+
+		 /* Swap OTPMK */
+		fuse_hdr->otpmk[0] = otpmk_random[7];
+		fuse_hdr->otpmk[1] = otpmk_random[6];
+		fuse_hdr->otpmk[2] = otpmk_random[5];
+		fuse_hdr->otpmk[3] = otpmk_random[4];
+		fuse_hdr->otpmk[4] = otpmk_random[3];
+		fuse_hdr->otpmk[5] = otpmk_random[2];
+		fuse_hdr->otpmk[6] = otpmk_random[1];
+		fuse_hdr->otpmk[7] = otpmk_random[0];
+		break;
+
+	case PROG_OTPMK_USER_MIN:
+		 /*
+		  * Here assumption is that user is aware of minimal OTPMK
+		  * already blown. Check if minimal bits are set in user
+		  * supplied OTPMK.
+		  */
+		if ((fuse_hdr->otpmk[0] & OTPMK_MIM_BITS_MASK) !=
+							OTPMK_MIM_BITS_MASK) {
+			ret = ERROR_OTPMK_USER_MIN;
+			goto out;
+		}
+		break;
+
+	default:
+		ret = 0;
+		goto out;
+	}
+
+	ret = write_fuses(sfp_ccsr_regs->otpmk, fuse_hdr->otpmk, 8);
+
+	if (ret != 0) {
+		ret = (ret == ERROR_ALREADY_BLOWN) ?
+			ERROR_OTPMK_ALREADY_BLOWN
+			: ERROR_OTPMK_WRITE;
+	} else {
+		 /* Check for DRV hamming error */
+		if ((sfp_read32((void *)(get_sfp_addr() + SFP_SVHESR_OFFSET))
+			& SFP_SVHESR_OTPMK_MASK) != 0) {
+			ret = ERROR_OTPMK_HAMMING_ERROR;
+		}
+	}
+
+out:
+	return ret;
+}
+
+/* This function program OSPR1 in fuse registers.
+ */
+static int prog_ospr1(struct fuse_hdr_t *fuse_hdr,
+		      struct sfp_ccsr_regs_t *sfp_ccsr_regs)
+{
+	int ret;
+	uint32_t mask;
+
+#ifdef NXP_SFP_VER_3_4
+	if (((fuse_hdr->flags >> FLAG_MC_SHIFT) & 0x1) != 0) {
+		mask = OSPR1_MC_MASK;
+	}
+#endif
+	if (((fuse_hdr->flags >> FLAG_DBG_LVL_SHIFT) & 0x1) != 0) {
+		mask = mask | OSPR1_DBG_LVL_MASK;
+	}
+
+	ret = write_a_fuse(&sfp_ccsr_regs->ospr1, &fuse_hdr->ospr1, mask);
+
+	if (ret != 0) {
+		ret = (ret == ERROR_ALREADY_BLOWN) ?
+				ERROR_OSPR1_ALREADY_BLOWN
+				: ERROR_OSPR1_WRITE;
+	}
+
+	return ret;
+}
+
+/* This function program SYSCFG in fuse registers.
+ */
+static int prog_syscfg(struct fuse_hdr_t *fuse_hdr,
+		       struct sfp_ccsr_regs_t *sfp_ccsr_regs)
+{
+	int ret;
+
+	 /* Check if SYSCFG already blown or not */
+	ret = write_a_fuse(&sfp_ccsr_regs->ospr, &fuse_hdr->sc, OSPR0_SC_MASK);
+
+	if (ret != 0) {
+		ret = (ret == ERROR_ALREADY_BLOWN) ?
+				ERROR_SC_ALREADY_BLOWN
+				: ERROR_SC_WRITE;
+	}
+
+	return ret;
+}
+
+/* This function does fuse provisioning.
+ */
+int provision_fuses(unsigned long long fuse_scr_addr,
+		    bool en_povdd_status)
+{
+	struct fuse_hdr_t *fuse_hdr = NULL;
+	struct sfp_ccsr_regs_t *sfp_ccsr_regs = (void *)(get_sfp_addr()
+							+ SFP_FUSE_REGS_OFFSET);
+	int ret = 0;
+
+	fuse_hdr = (struct fuse_hdr_t *)fuse_scr_addr;
+
+	/*
+	 * Check for Write Protect (WP) fuse. If blown then do
+	 *  no fuse provisioning.
+	 */
+	if ((sfp_read32(&sfp_ccsr_regs->ospr) & 0x1) != 0) {
+		goto out;
+	}
+
+	 /* Check if SRKH to be blown or not */
+	if (((fuse_hdr->flags >> FLAG_SRKH_SHIFT) & 0x1) != 0) {
+		INFO("Fuse: Program SRKH\n");
+		ret = prog_srkh(fuse_hdr, sfp_ccsr_regs);
+		if (ret != 0) {
+			error_handler(ret);
+			goto out;
+		}
+	}
+
+	 /* Check if OEMUID to be blown or not */
+	if (((fuse_hdr->flags >> FLAG_OUID0_SHIFT) & FLAG_OUID_MASK) != 0) {
+		INFO("Fuse: Program OEMUIDs\n");
+		ret = prog_oemuid(fuse_hdr, sfp_ccsr_regs);
+		if (ret != 0) {
+			error_handler(ret);
+			goto out;
+		}
+	}
+
+	 /* Check if Debug values to be blown or not */
+	if (((fuse_hdr->flags >> FLAG_DCV0_SHIFT) & FLAG_DEBUG_MASK) != 0) {
+		INFO("Fuse: Program Debug values\n");
+		ret = prog_debug(fuse_hdr, sfp_ccsr_regs);
+		if (ret != 0) {
+			error_handler(ret);
+			goto out;
+		}
+	}
+
+	 /* Check if OTPMK values to be blown or not */
+	if (((fuse_hdr->flags >> FLAG_OTPMK_SHIFT) & PROG_NO_OTPMK) !=
+		PROG_NO_OTPMK) {
+		INFO("Fuse: Program OTPMK\n");
+		ret = prog_otpmk(fuse_hdr, sfp_ccsr_regs);
+		if (ret != 0) {
+			error_handler(ret);
+			goto out;
+		}
+	}
+
+
+	 /* Check if MC or DBG LVL to be blown or not */
+	if ((((fuse_hdr->flags >> FLAG_MC_SHIFT) & 0x1) != 0) ||
+		(((fuse_hdr->flags >> FLAG_DBG_LVL_SHIFT) & 0x1) != 0)) {
+		INFO("Fuse: Program OSPR1\n");
+		ret = prog_ospr1(fuse_hdr, sfp_ccsr_regs);
+		if (ret != 0) {
+			error_handler(ret);
+			goto out;
+		}
+	}
+
+	 /* Check if SYSCFG to be blown or not */
+	if (((fuse_hdr->flags >> FLAG_SYSCFG_SHIFT) & 0x1) != 0) {
+		INFO("Fuse: Program SYSCFG\n");
+		ret = prog_syscfg(fuse_hdr, sfp_ccsr_regs);
+		if (ret != 0) {
+			error_handler(ret);
+			goto out;
+		}
+	}
+
+	if (en_povdd_status) {
+		ret = sfp_program_fuses();
+		if (ret != 0) {
+			error_handler(ret);
+			goto out;
+		}
+	}
+out:
+	return ret;
+}
diff --git a/drivers/nxp/sfp/fuse_prov.h b/drivers/nxp/sfp/fuse_prov.h
new file mode 100644
index 0000000..e015318
--- /dev/null
+++ b/drivers/nxp/sfp/fuse_prov.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#if !defined(FUSE_PROV_H) && defined(POLICY_FUSE_PROVISION)
+#define FUSE_PROV_H
+
+#include <endian.h>
+#include <lib/mmio.h>
+
+#define MASK_NONE		U(0xFFFFFFFF)
+#define ERROR_WRITE		U(0xA)
+#define ERROR_ALREADY_BLOWN	U(0xB)
+
+/* Flag bit shifts */
+#define FLAG_POVDD_SHIFT	U(0)
+#define FLAG_SYSCFG_SHIFT	U(1)
+#define FLAG_SRKH_SHIFT		U(2)
+#define FLAG_MC_SHIFT		U(3)
+#define FLAG_DCV0_SHIFT		U(4)
+#define FLAG_DCV1_SHIFT		U(5)
+#define FLAG_DRV0_SHIFT		U(6)
+#define FLAG_DRV1_SHIFT		U(7)
+#define FLAG_OUID0_SHIFT	U(8)
+#define FLAG_OUID1_SHIFT	U(9)
+#define FLAG_OUID2_SHIFT	U(10)
+#define FLAG_OUID3_SHIFT	U(11)
+#define FLAG_OUID4_SHIFT	U(12)
+#define FLAG_DBG_LVL_SHIFT	U(13)
+#define FLAG_OTPMK_SHIFT	U(16)
+#define FLAG_OUID_MASK		U(0x1F)
+#define FLAG_DEBUG_MASK		U(0xF)
+#define FLAG_OTPMK_MASK		U(0xF)
+
+/* OTPMK flag values */
+#define PROG_OTPMK_MIN		U(0x0)
+#define PROG_OTPMK_RANDOM	U(0x1)
+#define PROG_OTPMK_USER		U(0x2)
+#define PROG_OTPMK_RANDOM_MIN	U(0x5)
+#define PROG_OTPMK_USER_MIN	U(0x6)
+#define PROG_NO_OTPMK		U(0x8)
+
+#define OTPMK_MIM_BITS_MASK	U(0xF0000000)
+
+/* System configuration bit shifts */
+#define SCB_WP_SHIFT		U(0)
+#define SCB_ITS_SHIFT		U(2)
+#define SCB_NSEC_SHIFT		U(4)
+#define SCB_ZD_SHIFT		U(5)
+#define SCB_K0_SHIFT		U(15)
+#define SCB_K1_SHIFT		U(14)
+#define SCB_K2_SHIFT		U(13)
+#define SCB_K3_SHIFT		U(12)
+#define SCB_K4_SHIFT		U(11)
+#define SCB_K5_SHIFT		U(10)
+#define SCB_K6_SHIFT		U(9)
+#define SCB_FR0_SHIFT		U(30)
+#define SCB_FR1_SHIFT		U(31)
+
+/* Fuse Header Structure */
+struct fuse_hdr_t {
+	uint8_t barker[4];          /* 0x00 Barker code */
+	uint32_t flags;             /* 0x04 Script flags */
+	uint32_t povdd_gpio;        /* 0x08 GPIO for POVDD */
+	uint32_t otpmk[8];          /* 0x0C-0x2B OTPMK */
+	uint32_t srkh[8];           /* 0x2C-0x4B SRKH */
+	uint32_t oem_uid[5];        /* 0x4C-0x5F OEM unique id's */
+	uint32_t dcv[2];            /* 0x60-0x67 Debug Challenge */
+	uint32_t drv[2];            /* 0x68-0x6F Debug Response */
+	uint32_t ospr1;             /* 0x70 OSPR1 */
+	uint32_t sc;                /* 0x74 OSPR0 (System Configuration) */
+	uint32_t reserved[2];       /* 0x78-0x7F Reserved */
+};
+
+/* Function to do fuse provisioning */
+int provision_fuses(unsigned long long fuse_scr_addr,
+		    bool en_povdd_status);
+
+#define EFUSE_POWERUP_DELAY_mSec	U(25)
+#endif	/* FUSE_PROV_H  */
diff --git a/drivers/nxp/sfp/sfp.c b/drivers/nxp/sfp/sfp.c
new file mode 100644
index 0000000..e06c6b9
--- /dev/null
+++ b/drivers/nxp/sfp/sfp.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <caam.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <sfp.h>
+#include <sfp_error_codes.h>
+
+static uintptr_t g_nxp_sfp_addr;
+static uint32_t srk_hash[SRK_HASH_SIZE/sizeof(uint32_t)]
+					__aligned(CACHE_WRITEBACK_GRANULE);
+
+void sfp_init(uintptr_t nxp_sfp_addr)
+{
+	g_nxp_sfp_addr = nxp_sfp_addr;
+}
+
+uintptr_t get_sfp_addr(void)
+{
+	return g_nxp_sfp_addr;
+}
+
+uint32_t *get_sfp_srk_hash(void)
+{
+	struct sfp_ccsr_regs_t *sfp_ccsr_regs =
+			(void *) (g_nxp_sfp_addr + SFP_FUSE_REGS_OFFSET);
+	int i = 0;
+
+	/* Add comparison of hash with SFP hash here */
+	for (i = 0; i < SRK_HASH_SIZE/sizeof(uint32_t); i++)
+		srk_hash[i] =
+			mmio_read_32((uintptr_t)&sfp_ccsr_regs->srk_hash[i]);
+
+	return srk_hash;
+}
+
+void set_sfp_wr_disable(void)
+{
+	/*
+	 * Mark SFP Write Disable and Write Disable Lock
+	 * Bit to prevent write to SFP fuses like
+	 * OUID's, Key Revocation fuse etc
+	 */
+	void *sfpcr = (void *)(g_nxp_sfp_addr + SFP_SFPCR_OFFSET);
+	uint32_t sfpcr_val;
+
+	sfpcr_val = sfp_read32(sfpcr);
+	sfpcr_val |= (SFP_SFPCR_WD | SFP_SFPCR_WDL);
+	sfp_write32(sfpcr, sfpcr_val);
+}
+
+int sfp_program_fuses(void)
+{
+	uint32_t ingr;
+	uint32_t sfp_cmd_status = 0U;
+	int ret = 0;
+
+	/* Program SFP fuses from mirror registers */
+	sfp_write32((void *)(g_nxp_sfp_addr + SFP_INGR_OFFSET),
+		    SFP_INGR_PROGFB_CMD);
+
+	/* Wait until fuse programming is successful */
+	do {
+		ingr = sfp_read32(g_nxp_sfp_addr + SFP_INGR_OFFSET);
+	} while (ingr & SFP_INGR_PROGFB_CMD);
+
+	/* Check for SFP fuse programming error */
+	sfp_cmd_status = sfp_read32(g_nxp_sfp_addr + SFP_INGR_OFFSET)
+			 & SFP_INGR_ERROR_MASK;
+
+	if (sfp_cmd_status != 0U) {
+		return ERROR_PROGFB_CMD;
+	}
+
+	return ret;
+}
+
+uint32_t sfp_read_oem_uid(uint8_t oem_uid)
+{
+	uint32_t val = 0U;
+	struct sfp_ccsr_regs_t *sfp_ccsr_regs = (void *)(g_nxp_sfp_addr
+							+ SFP_FUSE_REGS_OFFSET);
+
+	if (oem_uid > MAX_OEM_UID) {
+		ERROR("Invalid OEM UID received.\n");
+		return ERROR_OEMUID_WRITE;
+	}
+
+	val = sfp_read32(&sfp_ccsr_regs->oem_uid[oem_uid]);
+
+	return val;
+}
+
+/*
+ * return val:  0 - No update required.
+ *              1 - successful update done.
+ *              ERROR_OEMUID_WRITE - Invalid OEM UID
+ */
+uint32_t sfp_write_oem_uid(uint8_t oem_uid, uint32_t sfp_val)
+{
+	uint32_t val = 0U;
+	struct sfp_ccsr_regs_t *sfp_ccsr_regs = (void *)(g_nxp_sfp_addr
+							+ SFP_FUSE_REGS_OFFSET);
+
+	val = sfp_read_oem_uid(oem_uid);
+
+	if (val == ERROR_OEMUID_WRITE) {
+		return ERROR_OEMUID_WRITE;
+	}
+
+	/* Counter already set. No need to do anything */
+	if ((val & sfp_val) != 0U) {
+		return 0U;
+	}
+
+	val |= sfp_val;
+
+	INFO("SFP Value is %x for setting sfp_val = %d\n", val, sfp_val);
+
+	sfp_write32(&sfp_ccsr_regs->oem_uid[oem_uid], val);
+
+	return 1U;
+}
+
+int sfp_check_its(void)
+{
+	struct sfp_ccsr_regs_t *sfp_ccsr_regs = (void *)(g_nxp_sfp_addr
+							+ SFP_FUSE_REGS_OFFSET);
+
+	if ((sfp_read32(&sfp_ccsr_regs->ospr) & OSPR_ITS_MASK) != 0) {
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+int sfp_check_oem_wp(void)
+{
+	struct sfp_ccsr_regs_t *sfp_ccsr_regs = (void *)(g_nxp_sfp_addr
+							+ SFP_FUSE_REGS_OFFSET);
+
+	if ((sfp_read32(&sfp_ccsr_regs->ospr) & OSPR_WP_MASK) != 0) {
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+/* This function returns ospr's key_revoc values.*/
+uint32_t get_key_revoc(void)
+{
+	struct sfp_ccsr_regs_t *sfp_ccsr_regs = (void *)(g_nxp_sfp_addr
+							+ SFP_FUSE_REGS_OFFSET);
+
+	return (sfp_read32(&sfp_ccsr_regs->ospr) & OSPR_KEY_REVOC_MASK) >>
+						OSPR_KEY_REVOC_SHIFT;
+}
diff --git a/drivers/nxp/sfp/sfp.h b/drivers/nxp/sfp/sfp.h
new file mode 100644
index 0000000..2cb4c7d
--- /dev/null
+++ b/drivers/nxp/sfp/sfp.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef SFP_H
+#define SFP_H
+
+#include <endian.h>
+#include <lib/mmio.h>
+
+/* SFP Configuration Register Offsets */
+#define SFP_INGR_OFFSET		U(0x20)
+#define SFP_SVHESR_OFFSET	U(0x24)
+#define SFP_SFPCR_OFFSET	U(0x28)
+#define SFP_VER_OFFSET		U(0x38)
+
+/* SFP Hamming register masks for OTPMK and DRV */
+#define SFP_SVHESR_DRV_MASK	U(0x7F)
+#define SFP_SVHESR_OTPMK_MASK	U(0x7FC00)
+
+/* SFP commands */
+#define SFP_INGR_READFB_CMD	U(0x1)
+#define SFP_INGR_PROGFB_CMD	U(0x2)
+#define SFP_INGR_ERROR_MASK	U(0x100)
+
+/* SFPCR Masks */
+#define SFP_SFPCR_WD		U(0x80000000)
+#define SFP_SFPCR_WDL		U(0x40000000)
+
+/* SFPCR Masks */
+#define SFP_SFPCR_WD		U(0x80000000)
+#define SFP_SFPCR_WDL		U(0x40000000)
+
+#define SFP_FUSE_REGS_OFFSET	U(0x200)
+
+#ifdef NXP_SFP_VER_3_4
+#define OSPR0_SC_MASK		U(0xC000FE35)
+#elif defined(NXP_SFP_VER_3_2)
+#define OSPR0_SC_MASK		U(0x0000E035)
+#endif
+
+#if defined(NXP_SFP_VER_3_4)
+#define OSPR_KEY_REVOC_SHIFT	U(9)
+#define OSPR_KEY_REVOC_MASK	U(0x0000fe00)
+#elif defined(NXP_SFP_VER_3_2)
+#define OSPR_KEY_REVOC_SHIFT	U(13)
+#define OSPR_KEY_REVOC_MASK	U(0x0000e000)
+#endif /* NXP_SFP_VER_3_4 */
+
+#define OSPR1_MC_MASK		U(0xFFFF0000)
+#define OSPR1_DBG_LVL_MASK	U(0x00000007)
+
+#define OSPR_ITS_MASK		U(0x00000004)
+#define OSPR_WP_MASK		U(0x00000001)
+
+#define MAX_OEM_UID		U(5)
+#define SRK_HASH_SIZE		U(32)
+
+/* SFP CCSR Register Map */
+struct sfp_ccsr_regs_t {
+	uint32_t ospr;			/* 0x200 OSPR0 */
+	uint32_t ospr1;			/* 0x204 OSPR1 */
+	uint32_t dcv[2];		/* 0x208 Debug Challenge Value */
+	uint32_t drv[2];		/* 0x210 Debug Response Value */
+	uint32_t fswpr;			/* 0x218 FSL Section Write Protect */
+	uint32_t fsl_uid[2];		/* 0x21c FSL UID 0 */
+	uint32_t isbcr;			/* 0x224 ISBC Configuration */
+	uint32_t fsspr[3];		/* 0x228 FSL Scratch Pad */
+	uint32_t otpmk[8];		/* 0x234 OTPMK */
+	uint32_t srk_hash[SRK_HASH_SIZE/sizeof(uint32_t)];
+					/* 0x254 Super Root Key Hash */
+	uint32_t oem_uid[MAX_OEM_UID];	/* 0x274 OEM UID 0 */
+};
+
+uintptr_t get_sfp_addr(void);
+void sfp_init(uintptr_t nxp_sfp_addr);
+uint32_t *get_sfp_srk_hash(void);
+int sfp_check_its(void);
+int sfp_check_oem_wp(void);
+uint32_t get_key_revoc(void);
+void set_sfp_wr_disable(void);
+int sfp_program_fuses(void);
+
+uint32_t sfp_read_oem_uid(uint8_t oem_uid);
+uint32_t sfp_write_oem_uid(uint8_t oem_uid, uint32_t sfp_val);
+
+#ifdef NXP_SFP_BE
+#define sfp_read32(a)           bswap32(mmio_read_32((uintptr_t)(a)))
+#define sfp_write32(a, v)       mmio_write_32((uintptr_t)(a), bswap32(v))
+#elif defined(NXP_SFP_LE)
+#define sfp_read32(a)           mmio_read_32((uintptr_t)(a))
+#define sfp_write32(a, v)       mmio_write_32((uintptr_t)(a), (v))
+#else
+#error Please define CCSR SFP register endianness
+#endif
+
+#endif/* SFP_H */
diff --git a/drivers/nxp/sfp/sfp.mk b/drivers/nxp/sfp/sfp.mk
new file mode 100644
index 0000000..2546dc2
--- /dev/null
+++ b/drivers/nxp/sfp/sfp.mk
@@ -0,0 +1,35 @@
+#
+# Copyright 2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-----------------------------------------------------------------------------
+ifeq (${SFP_ADDED},)
+
+SFP_ADDED		:= 1
+$(eval $(call add_define, NXP_SFP_ENABLED))
+
+SFP_DRIVERS_PATH	:=  ${PLAT_DRIVERS_PATH}/sfp
+
+PLAT_INCLUDES		+= -I$(SFP_DRIVERS_PATH)
+
+SFP_SOURCES		+= $(SFP_DRIVERS_PATH)/sfp.c
+
+ifeq (${FUSE_PROG}, 1)
+SFP_BL2_SOURCES		+= $(SFP_DRIVERS_PATH)/fuse_prov.c
+endif
+
+ifeq (${BL_COMM_SFP_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${SFP_SOURCES}
+BL2_SOURCES		+= ${SFP_BL2_SOURCES}
+else
+ifeq (${BL2_SFP_NEEDED},yes)
+BL2_SOURCES		+= ${SFP_SOURCES}\
+			   ${SFP_BL2_SOURCES}
+endif
+ifeq (${BL31_SFP_NEEDED},yes)
+BL31_SOURCES		+= ${SFP_SOURCES}
+endif
+endif
+endif
+#------------------------------------------------
diff --git a/drivers/nxp/sfp/sfp_error_codes.h b/drivers/nxp/sfp/sfp_error_codes.h
new file mode 100644
index 0000000..7be7a27
--- /dev/null
+++ b/drivers/nxp/sfp/sfp_error_codes.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef SFP_ERROR_CODES_H
+#define SFP_ERROR_CODES_H
+
+ /* Error codes */
+#define ERROR_FUSE_BARKER		0x1
+#define ERROR_READFB_CMD		0x2
+#define ERROR_PROGFB_CMD		0x3
+#define ERROR_SRKH_ALREADY_BLOWN	0x4
+#define ERROR_SRKH_WRITE		0x5
+#define ERROR_OEMUID_ALREADY_BLOWN	0x6
+#define ERROR_OEMUID_WRITE		0x7
+#define ERROR_DCV_ALREADY_BLOWN		0x8
+#define ERROR_DCV_WRITE			0x9
+#define ERROR_DRV_ALREADY_BLOWN		0xa
+#define ERROR_DRV_HAMMING_ERROR		0xb
+#define ERROR_DRV_WRITE			0x18
+#define ERROR_OTPMK_ALREADY_BLOWN	0xc
+#define ERROR_OTPMK_HAMMING_ERROR	0xd
+#define ERROR_OTPMK_USER_MIN		0xe
+#define ERROR_OSPR1_ALREADY_BLOWN	0xf
+#define ERROR_OSPR1_WRITE		0x10
+#define ERROR_SC_ALREADY_BLOWN		0x11
+#define ERROR_SC_WRITE			0x12
+#define ERROR_POVDD_GPIO_FAIL		0x13
+#define ERROR_GPIO_SET_FAIL		0x14
+#define ERROR_GPIO_RESET_FAIL		0x15
+#define ERROR_OTPMK_SEC_DISABLED	0x16
+#define ERROR_OTPMK_SEC_ERROR		0x17
+#define ERROR_OTPMK_WRITE		0x19
+#define PLAT_ERROR_ENABLE_POVDD		0x20
+#define PLAT_ERROR_DISABLE_POVDD	0x21
+
+#endif /* SFP_ERROR_CODES_H */
diff --git a/drivers/nxp/timer/nxp_timer.c b/drivers/nxp/timer/nxp_timer.c
new file mode 100644
index 0000000..8eecd2e
--- /dev/null
+++ b/drivers/nxp/timer/nxp_timer.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <lib/utils_def.h>
+#include <nxp_timer.h>
+#include <plat/common/platform.h>
+
+static uintptr_t g_nxp_timer_addr;
+static timer_ops_t ops;
+
+uint64_t get_timer_val(uint64_t start)
+{
+	uint64_t cntpct;
+
+	isb();
+	cntpct = read_cntpct_el0();
+	return (cntpct * 1000ULL / read_cntfrq_el0() - start);
+}
+
+static uint32_t timer_get_value(void)
+{
+	uint64_t cntpct;
+
+	isb();
+	cntpct = read_cntpct_el0();
+#ifdef ERRATA_SOC_A008585
+	uint8_t	max_fetch_count = 10U;
+	/* This erratum number needs to be confirmed to match ARM document */
+	uint64_t temp;
+
+	isb();
+	temp = read_cntpct_el0();
+
+	while (temp != cntpct && max_fetch_count) {
+		isb();
+		cntpct = read_cntpct_el0();
+		isb();
+		temp = read_cntpct_el0();
+		max_fetch_count--;
+	}
+#endif
+
+	/*
+	 * Generic delay timer implementation expects the timer to be a down
+	 * counter. We apply bitwise NOT operator to the tick values returned
+	 * by read_cntpct_el0() to simulate the down counter. The value is
+	 * clipped from 64 to 32 bits.
+	 */
+	return (uint32_t)(~cntpct);
+}
+
+static void delay_timer_init_args(uint32_t mult, uint32_t div)
+{
+	ops.get_timer_value	= timer_get_value,
+	ops.clk_mult		= mult;
+	ops.clk_div		= div;
+
+	timer_init(&ops);
+
+	VERBOSE("Generic delay timer configured with mult=%u and div=%u\n",
+		mult, div);
+}
+
+/*
+ * Initialise the nxp on-chip free rolling usec counter as the delay
+ * timer.
+ */
+void delay_timer_init(uintptr_t nxp_timer_addr)
+{
+	/* Value in ticks */
+	unsigned int mult = MHZ_TICKS_PER_SEC;
+
+	unsigned int div;
+
+	unsigned int counter_base_frequency = plat_get_syscnt_freq2();
+
+	g_nxp_timer_addr = nxp_timer_addr;
+	/* Rounding off the Counter Frequency to MHZ_TICKS_PER_SEC */
+	if (counter_base_frequency > MHZ_TICKS_PER_SEC) {
+		counter_base_frequency = (counter_base_frequency
+					/ MHZ_TICKS_PER_SEC)
+					* MHZ_TICKS_PER_SEC;
+	} else {
+		counter_base_frequency = (counter_base_frequency
+					/ KHZ_TICKS_PER_SEC)
+					* KHZ_TICKS_PER_SEC;
+	}
+
+	/* Value in ticks per second (Hz) */
+	div = counter_base_frequency;
+
+	/* Reduce multiplier and divider by dividing them repeatedly by 10 */
+	while ((mult % 10U == 0U) && (div % 10U == 0U)) {
+		mult /= 10U;
+		div /= 10U;
+	}
+
+	/* Enable and initialize the System level generic timer */
+	mmio_write_32(g_nxp_timer_addr + CNTCR_OFF,
+			CNTCR_FCREQ(0) | CNTCR_EN);
+
+	delay_timer_init_args(mult, div);
+}
+
+
+#ifdef IMAGE_BL31
+/*******************************************************************************
+ * TBD: Configures access to the system counter timer module.
+ ******************************************************************************/
+void ls_configure_sys_timer(uintptr_t ls_sys_timctl_base,
+			    uint8_t ls_config_cntacr,
+			    uint8_t plat_ls_ns_timer_frame_id)
+{
+	unsigned int reg_val;
+
+	if (ls_config_cntacr == 1U) {
+		reg_val = (1U << CNTACR_RPCT_SHIFT) | (1U << CNTACR_RVCT_SHIFT);
+		reg_val |= (1U << CNTACR_RFRQ_SHIFT) | (1U << CNTACR_RVOFF_SHIFT);
+		reg_val |= (1U << CNTACR_RWVT_SHIFT) | (1U << CNTACR_RWPT_SHIFT);
+		mmio_write_32(ls_sys_timctl_base +
+		      CNTACR_BASE(plat_ls_ns_timer_frame_id), reg_val);
+		mmio_write_32(ls_sys_timctl_base, plat_get_syscnt_freq2());
+	}
+
+	reg_val = (1U << CNTNSAR_NS_SHIFT(plat_ls_ns_timer_frame_id));
+	mmio_write_32(ls_sys_timctl_base + CNTNSAR, reg_val);
+}
+
+void enable_init_timer(void)
+{
+	/* Enable and initialize the System level generic timer */
+	mmio_write_32(g_nxp_timer_addr + CNTCR_OFF,
+			CNTCR_FCREQ(0) | CNTCR_EN);
+}
+#endif
diff --git a/drivers/nxp/timer/nxp_timer.h b/drivers/nxp/timer/nxp_timer.h
new file mode 100644
index 0000000..280e5b2
--- /dev/null
+++ b/drivers/nxp/timer/nxp_timer.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#
+#ifndef NXP_TIMER_H
+#define NXP_TIMER_H
+
+ /* System Counter Offset and Bit Mask */
+#define SYS_COUNTER_CNTCR_OFFSET	0x0
+#define SYS_COUNTER_CNTCR_EN		0x00000001
+#define CNTCR_EN_MASK			0x1
+
+#ifndef __ASSEMBLER__
+uint64_t get_timer_val(uint64_t start);
+
+#ifdef IMAGE_BL31
+void ls_configure_sys_timer(uintptr_t ls_sys_timctl_base,
+			    uint8_t ls_config_cntacr,
+			    uint8_t plat_ls_ns_timer_frame_id);
+void enable_init_timer(void);
+#endif
+
+/*
+ * Initialise the nxp on-chip free rolling usec counter as the delay
+ * timer.
+ */
+void delay_timer_init(uintptr_t nxp_timer_addr);
+void ls_bl31_timer_init(uintptr_t nxp_timer_addr);
+#endif	/* __ASSEMBLER__ */
+
+#endif /* NXP_TIMER_H */
diff --git a/drivers/nxp/timer/timer.mk b/drivers/nxp/timer/timer.mk
new file mode 100644
index 0000000..b9e298f
--- /dev/null
+++ b/drivers/nxp/timer/timer.mk
@@ -0,0 +1,27 @@
+#
+# Copyright 2021 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${ADD_TIMER},)
+
+ADD_TIMER		:= 1
+
+TIMER_DRIVERS_PATH	:=  ${PLAT_DRIVERS_PATH}/timer
+
+PLAT_INCLUDES		+= -I$(TIMER_DRIVERS_PATH)
+TIMER_SOURCES	+= drivers/delay_timer/delay_timer.c	\
+			   $(PLAT_DRIVERS_PATH)/timer/nxp_timer.c
+
+ifeq (${BL_COMM_TIMER_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${TIMER_SOURCES}
+else
+ifeq (${BL2_TIMER_NEEDED},yes)
+BL2_SOURCES		+= ${TIMER_SOURCES}
+endif
+ifeq (${BL31_TIMER_NEEDED},yes)
+BL31_SOURCES		+= ${TIMER_SOURCES}
+endif
+endif
+endif
diff --git a/drivers/nxp/tzc/plat_tzc400.c b/drivers/nxp/tzc/plat_tzc400.c
new file mode 100644
index 0000000..4fe5221
--- /dev/null
+++ b/drivers/nxp/tzc/plat_tzc400.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <common/debug.h>
+
+#include <plat_tzc400.h>
+
+#pragma weak populate_tzc400_reg_list
+
+#ifdef DEFAULT_TZASC_CONFIG
+/*
+ * Typical Memory map of DRAM0
+ *    |-----------NXP_NS_DRAM_ADDR ( = NXP_DRAM0_ADDR)----------|
+ *    |								|
+ *    |								|
+ *    |			Non-SECURE REGION			|
+ *    |								|
+ *    |								|
+ *    |								|
+ *    |------- (NXP_NS_DRAM_ADDR + NXP_NS_DRAM_SIZE - 1) -------|
+ *    |-----------------NXP_SECURE_DRAM_ADDR--------------------|
+ *    |								|
+ *    |								|
+ *    |								|
+ *    |			SECURE REGION (= 64MB)			|
+ *    |								|
+ *    |								|
+ *    |								|
+ *    |--- (NXP_SECURE_DRAM_ADDR + NXP_SECURE_DRAM_SIZE - 1)----|
+ *    |-----------------NXP_SP_SHRD_DRAM_ADDR-------------------|
+ *    |								|
+ *    |	       Secure EL1 Payload SHARED REGION (= 2MB)         |
+ *    |								|
+ *    |-----------(NXP_DRAM0_ADDR + NXP_DRAM0_SIZE - 1)---------|
+ *
+ *
+ *
+ * Typical Memory map of DRAM1
+ *    |---------------------NXP_DRAM1_ADDR----------------------|
+ *    |								|
+ *    |								|
+ *    |			Non-SECURE REGION			|
+ *    |								|
+ *    |								|
+ *    |---(NXP_DRAM1_ADDR + Dynamically calculated Size - 1) ---|
+ *
+ *
+ * Typical Memory map of DRAM2
+ *    |---------------------NXP_DRAM2_ADDR----------------------|
+ *    |								|
+ *    |								|
+ *    |			Non-SECURE REGION			|
+ *    |								|
+ *    |								|
+ *    |---(NXP_DRAM2_ADDR + Dynamically calculated Size - 1) ---|
+ */
+
+/*****************************************************************************
+ * This function sets up access permissions on memory regions
+ *
+ * Input:
+ *	tzc400_reg_list	: TZC400 Region List
+ *	dram_idx	: DRAM index
+ *	list_idx	: TZC400 Region List Index
+ *	dram_start_addr	: Start address of DRAM at dram_idx.
+ *	dram_size	: Size of DRAM at dram_idx.
+ *	secure_dram_sz	: Secure DRAM Size
+ *	shrd_dram_sz	: Shared DRAM Size
+ *
+ * Out:
+ *	list_idx	: last populated index + 1
+ *
+ ****************************************************************************/
+int populate_tzc400_reg_list(struct tzc400_reg *tzc400_reg_list,
+			     int dram_idx, int list_idx,
+			     uint64_t dram_start_addr,
+			     uint64_t dram_size,
+			     uint32_t secure_dram_sz,
+			     uint32_t shrd_dram_sz)
+{
+	if (list_idx == 0) {
+		/* No need to configure TZC Region 0 in this list.
+		 */
+		list_idx++;
+	}
+	/* Continue with list entries for index > 0 */
+	if (dram_idx == 0) {
+		/* TZC Region 1 on DRAM0 for Secure Memory*/
+		tzc400_reg_list[list_idx].reg_filter_en = 1;
+		tzc400_reg_list[list_idx].start_addr = dram_start_addr + dram_size;
+		tzc400_reg_list[list_idx].end_addr = dram_start_addr + dram_size
+						+ secure_dram_sz - 1;
+		tzc400_reg_list[list_idx].sec_attr = TZC_REGION_S_RDWR;
+		tzc400_reg_list[list_idx].nsaid_permissions = TZC_REGION_NS_NONE;
+		list_idx++;
+
+		/* TZC Region 2 on DRAM0 for Shared Memory*/
+		tzc400_reg_list[list_idx].reg_filter_en = 1;
+		tzc400_reg_list[list_idx].start_addr = dram_start_addr + dram_size
+							+ secure_dram_sz;
+		tzc400_reg_list[list_idx].end_addr = dram_start_addr + dram_size
+							+ secure_dram_sz
+							+ shrd_dram_sz
+							- 1;
+		tzc400_reg_list[list_idx].sec_attr = TZC_REGION_S_RDWR;
+		tzc400_reg_list[list_idx].nsaid_permissions = TZC_NS_ACCESS_ID;
+		list_idx++;
+
+		/* TZC Region 3 on DRAM0 for Non-Secure Memory*/
+		tzc400_reg_list[list_idx].reg_filter_en = 1;
+		tzc400_reg_list[list_idx].start_addr = dram_start_addr;
+		tzc400_reg_list[list_idx].end_addr = dram_start_addr + dram_size
+							- 1;
+		tzc400_reg_list[list_idx].sec_attr = TZC_REGION_S_RDWR;
+		tzc400_reg_list[list_idx].nsaid_permissions = TZC_NS_ACCESS_ID;
+		list_idx++;
+	} else {
+		/* TZC Region 3+i on DRAM(> 0) for Non-Secure Memory*/
+		tzc400_reg_list[list_idx].reg_filter_en = 1;
+		tzc400_reg_list[list_idx].start_addr = dram_start_addr;
+		tzc400_reg_list[list_idx].end_addr = dram_start_addr + dram_size
+							- 1;
+		tzc400_reg_list[list_idx].sec_attr = TZC_REGION_S_RDWR;
+		tzc400_reg_list[list_idx].nsaid_permissions = TZC_NS_ACCESS_ID;
+		list_idx++;
+	}
+
+	return list_idx;
+}
+#else
+int populate_tzc400_reg_list(struct tzc400_reg *tzc400_reg_list,
+			     int dram_idx, int list_idx,
+			     uint64_t dram_start_addr,
+			     uint64_t dram_size,
+			     uint32_t secure_dram_sz,
+			     uint32_t shrd_dram_sz)
+{
+	ERROR("tzc400_reg_list used is not a default list\n");
+	ERROR("%s needs to be over-written.\n", __func__);
+	return 0;
+}
+#endif	/* DEFAULT_TZASC_CONFIG */
+
+/*******************************************************************************
+ * Configure memory access permissions
+ *   - Region 0 with no access;
+ *   - Region 1 to 4 as per the tzc400_reg_list populated by
+ *     function populate_tzc400_reg_list() with default for all the SoC.
+ ******************************************************************************/
+void mem_access_setup(uintptr_t base, uint32_t total_regions,
+		      struct tzc400_reg *tzc400_reg_list)
+{
+	uint32_t list_indx = 0U;
+
+	INFO("Configuring TrustZone Controller\n");
+
+	tzc400_init(base);
+
+	/* Disable filters. */
+	tzc400_disable_filters();
+
+	/* Region 0 set to no access by default */
+	tzc400_configure_region0(TZC_REGION_S_NONE, 0U);
+
+	for (list_indx = 1U; list_indx < total_regions; list_indx++) {
+		tzc400_configure_region(
+			tzc400_reg_list[list_indx].reg_filter_en,
+			list_indx,
+			tzc400_reg_list[list_indx].start_addr,
+			tzc400_reg_list[list_indx].end_addr,
+			tzc400_reg_list[list_indx].sec_attr,
+			tzc400_reg_list[list_indx].nsaid_permissions);
+	}
+
+	/*
+	 * Raise an exception if a NS device tries to access secure memory
+	 * TODO: Add interrupt handling support.
+	 */
+	tzc400_set_action(TZC_ACTION_ERR);
+
+	/* Enable filters. */
+	tzc400_enable_filters();
+}
diff --git a/drivers/nxp/tzc/plat_tzc400.h b/drivers/nxp/tzc/plat_tzc400.h
new file mode 100644
index 0000000..1b8e3a4
--- /dev/null
+++ b/drivers/nxp/tzc/plat_tzc400.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#if !defined(PLAT_TZC400_H) && defined(IMAGE_BL2)
+#define PLAT_TZC400_H
+
+#include <tzc400.h>
+
+/* Structure to configure TZC Regions' boundaries and attributes. */
+struct tzc400_reg {
+	uint8_t reg_filter_en;
+	unsigned long long start_addr;
+	unsigned long long end_addr;
+	unsigned int sec_attr;
+	unsigned int nsaid_permissions;
+};
+
+#define TZC_REGION_NS_NONE	0x00000000U
+
+/* NXP Platforms do not support NS Access ID (NSAID) based non-secure access.
+ * Supports only non secure through generic NS ACCESS ID
+ */
+#define TZC_NS_ACCESS_ID	0xFFFFFFFFU
+
+/* Number of DRAM regions to be configured
+ * for the platform can be over-written.
+ *
+ * Array tzc400_reg_list too, needs be over-written
+ * if there is any changes to default DRAM region
+ * configuration.
+ */
+#ifndef MAX_NUM_TZC_REGION
+/* 3 regions:
+ *  Region 0(default),
+ *  Region 1 (DRAM0, Secure Memory),
+ *  Region 2 (DRAM0, Shared memory)
+ */
+#define MAX_NUM_TZC_REGION	NUM_DRAM_REGIONS + 3
+#define DEFAULT_TZASC_CONFIG	1
+#endif
+
+void mem_access_setup(uintptr_t base, uint32_t total_regions,
+		      struct tzc400_reg *tzc400_reg_list);
+int populate_tzc400_reg_list(struct tzc400_reg *tzc400_reg_list,
+			     int dram_idx, int list_idx,
+			     uint64_t dram_start_addr,
+			     uint64_t dram_size,
+			     uint32_t secure_dram_sz,
+			     uint32_t shrd_dram_sz);
+
+#endif /* PLAT_TZC400_H */
diff --git a/drivers/nxp/tzc/tzc.mk b/drivers/nxp/tzc/tzc.mk
new file mode 100644
index 0000000..830d78e
--- /dev/null
+++ b/drivers/nxp/tzc/tzc.mk
@@ -0,0 +1,35 @@
+#
+# Copyright 2021 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${ADD_TZASC},)
+
+ADD_TZASC		:= 1
+
+TZASC_DRIVERS_PATH	:=  ${PLAT_DRIVERS_PATH}/tzc
+
+PLAT_INCLUDES		+= -I$(TZASC_DRIVERS_PATH)
+
+ifeq ($(TZC_ID), TZC400)
+TZASC_SOURCES		+= drivers/arm/tzc/tzc400.c\
+			   $(TZASC_DRIVERS_PATH)/plat_tzc400.c
+else ifeq ($(TZC_ID), NONE)
+    $(info -> No TZC present on platform)
+else
+    $(error -> TZC type not set!)
+endif
+
+ifeq (${BL_COMM_TZASC_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${TZASC_SOURCES}
+else
+ifeq (${BL2_TZASC_NEEDED},yes)
+BL2_SOURCES		+= ${TZASC_SOURCES}
+endif
+ifeq (${BL31_TZASC_NEEDED},yes)
+BL31_SOURCES		+= ${TZASC_SOURCES}
+endif
+endif
+
+endif
diff --git a/include/common/tbbr/cot_def.h b/include/common/tbbr/cot_def.h
index 6ce7f80..800ad07 100644
--- a/include/common/tbbr/cot_def.h
+++ b/include/common/tbbr/cot_def.h
@@ -7,6 +7,10 @@
 #ifndef COT_DEF_H
 #define COT_DEF_H
 
+#ifdef MBEDTLS_CONFIG_FILE
+#include MBEDTLS_CONFIG_FILE
+#endif
+
 /* TBBR CoT definitions */
 #if defined(SPD_spmd)
 #define COT_MAX_VERIFIED_PARAMS		8
diff --git a/include/common/tbbr/tbbr_img_def.h b/include/common/tbbr/tbbr_img_def.h
index bd125e6..e1c8c29 100644
--- a/include/common/tbbr/tbbr_img_def.h
+++ b/include/common/tbbr/tbbr_img_def.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -21,9 +21,17 @@
 #define SP_PKG7_ID			(MAX_IMAGE_IDS + 8)
 #define SP_PKG8_ID			(MAX_IMAGE_IDS + 9)
 #define MAX_SP_IDS			U(8)
-#define MAX_NUMBER_IDS			(MAX_IMAGE_IDS + MAX_SP_IDS + U(2))
+#define MAX_IMG_IDS_WITH_SPMDS		(MAX_IMAGE_IDS + MAX_SP_IDS + U(2))
 #else
-#define MAX_NUMBER_IDS			MAX_IMAGE_IDS
+#define MAX_IMG_IDS_WITH_SPMDS		MAX_IMAGE_IDS
+#endif
+
+#ifdef PLAT_TBBR_IMG_DEF
+#include <plat_tbbr_img_def.h>
+#endif
+
+#ifndef MAX_NUMBER_IDS
+#define MAX_NUMBER_IDS			MAX_IMG_IDS_WITH_SPMDS
 #endif
 
 #endif /* TBBR_IMG_DEF_H */
diff --git a/include/drivers/arm/tzc400.h b/include/drivers/arm/tzc400.h
index cf2e82b..aacd5df 100644
--- a/include/drivers/arm/tzc400.h
+++ b/include/drivers/arm/tzc400.h
@@ -65,8 +65,8 @@
 #define FAIL_CONTROL_NS_SECURE			U(0)
 #define FAIL_CONTROL_NS_NONSECURE		U(1)
 #define FAIL_CONTROL_PRIV_SHIFT			20
-#define FAIL_CONTROL_PRIV_PRIV			U(0)
-#define FAIL_CONTROL_PRIV_UNPRIV		U(1)
+#define FAIL_CONTROL_PRIV_UNPRIV		U(0)
+#define FAIL_CONTROL_PRIV_PRIV			U(1)
 
 /*
  * FAIL_ID_ID_MASK depends on AID_WIDTH which is platform specific.
diff --git a/include/drivers/nxp/flexspi/flash_info.h b/include/drivers/nxp/flexspi/flash_info.h
new file mode 100644
index 0000000..6df79c9
--- /dev/null
+++ b/include/drivers/nxp/flexspi/flash_info.h
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ *  Copyright 2020 NXP
+ */
+
+/**
+ * @Flash info
+ *
+ */
+#ifndef FLASH_INFO_H
+#define FLASH_INFO_H
+
+#define SZ_16M_BYTES			0x1000000U
+
+#if defined(CONFIG_MT25QU512A)
+#define F_SECTOR_64K			0x10000U
+#define F_PAGE_256			0x100U
+#define F_SECTOR_4K			0x1000U
+#define F_FLASH_SIZE_BYTES		0x4000000U
+#define F_SECTOR_ERASE_SZ		F_SECTOR_64K
+#ifdef CONFIG_FSPI_4K_ERASE
+#define F_SECTOR_ERASE_SZ		F_SECTOR_4K
+#endif
+
+#elif defined(CONFIG_MX25U25645G)
+#define F_SECTOR_64K			0x10000U
+#define F_PAGE_256			0x100U
+#define F_SECTOR_4K			0x1000U
+#define F_FLASH_SIZE_BYTES		0x2000000U
+#define F_SECTOR_ERASE_SZ		F_SECTOR_64K
+#ifdef CONFIG_FSPI_4K_ERASE
+#define F_SECTOR_ERASE_SZ		F_SECTOR_4K
+#endif
+
+#elif defined(CONFIG_MX25U51245G)
+#define F_SECTOR_64K			0x10000U
+#define F_PAGE_256			0x100U
+#define F_SECTOR_4K			0x1000U
+#define F_FLASH_SIZE_BYTES		0x4000000U
+#define F_SECTOR_ERASE_SZ		F_SECTOR_64K
+#ifdef CONFIG_FSPI_4K_ERASE
+#define F_SECTOR_ERASE_SZ		F_SECTOR_4K
+#endif
+
+#elif defined(CONFIG_MT35XU512A)
+#define F_SECTOR_128K			0x20000U
+#define F_SECTOR_32K			0x8000U
+#define F_PAGE_256			0x100U
+#define F_SECTOR_4K			0x1000U
+#define F_FLASH_SIZE_BYTES		0x4000000U
+#define F_SECTOR_ERASE_SZ		F_SECTOR_128K
+#ifdef CONFIG_FSPI_4K_ERASE
+#define F_SECTOR_ERASE_SZ		F_SECTOR_4K
+#endif
+
+#ifdef NXP_WARM_BOOT
+#define FLASH_WR_COMP_WAIT_BY_NOP_COUNT	0x20000
+#endif
+
+#endif
+#endif /* FLASH_INFO_H */
diff --git a/include/drivers/nxp/flexspi/fspi_api.h b/include/drivers/nxp/flexspi/fspi_api.h
new file mode 100644
index 0000000..d0de543
--- /dev/null
+++ b/include/drivers/nxp/flexspi/fspi_api.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+
+/*!
+ * @file	fspi_api.h
+ * @brief	This file contains the FlexSPI/FSPI API to communicate
+ *		to attached Slave device.
+ * @addtogroup	FSPI_API
+ * @{
+ */
+
+#ifndef FSPI_API_H
+#define FSPI_API_H
+
+#if DEBUG_FLEXSPI
+#define SZ_57M			0x3900000u
+#endif
+
+/*!
+ * Basic set of APIs.
+ */
+
+/*!
+ * @details AHB read/IP Read, decision to be internal to API
+ * Minimum Read size = 1Byte
+ * @param[in] src_off source offset from where data to read from flash
+ * @param[out] des Destination location where data needs to be copied
+ * @param[in] len length in Bytes,where 1-word=4-bytes/32-bits
+ *
+ * @return XSPI_SUCCESS or error code
+ */
+int xspi_read(uint32_t src_off, uint32_t *des, uint32_t len);
+/*!
+ * @details Sector erase, Minimum size
+ * 256KB(0x40000)/128KB(0x20000)/64K(0x10000)/4K(0x1000)
+ * depending upon flash, Calls xspi_wren() internally
+ * @param[out] erase_offset Destination erase location on flash which
+ * has to be erased, needs to be multiple of 0x40000/0x20000/0x10000
+ * @param[in] erase_len length in bytes in Hex like 0x100000 for 1MB, minimum
+ * erase size is 1 sector(0x40000/0x20000/0x10000)
+ *
+ * @return XSPI_SUCCESS or error code
+ */
+int xspi_sector_erase(uint32_t erase_offset, uint32_t erase_len);
+/*!
+ * @details IP write, For writing data to flash, calls xspi_wren() internally.
+ * Single/multiple page write can start @any offset, but performance will be low
+ * due to ERRATA
+ * @param[out] dst_off Destination location on flash where data needs to
+ * be written
+ * @param[in] src source offset from where data to be read
+ * @param[in] len length in bytes,where 1-word=4-bytes/32-bits
+ *
+ * @return XSPI_SUCCESS or error code
+ */
+int xspi_write(uint32_t dst_off, void *src, uint32_t len);
+/*!
+ * @details fspi_init, Init function.
+ * @param[in] uint32_t base_reg_addr
+ * @param[in] uint32_t flash_start_addr
+ *
+ * @return XSPI_SUCCESS or error code
+ */
+int fspi_init(uint32_t base_reg_addr, uint32_t flash_start_addr);
+/*!
+ * @details is_flash_busy, Check if any erase or write or lock is
+ * pending on flash/slave
+ * @param[in] void
+ *
+ * @return TRUE/FLASE
+ */
+bool is_flash_busy(void);
+
+/*!
+ * Advanced set of APIs.
+ */
+
+/*!
+ * @details Write enable, to be used by advance users only.
+ * Step 1 for sending write commands to flash.
+ * @param[in] dst_off destination offset where data will be written
+ *
+ * @return XSPI_SUCCESS or error code
+ */
+int xspi_wren(uint32_t dst_off);
+/*!
+ * @details AHB read, meaning direct memory mapped access to flash,
+ * Minimum Read size = 1Byte
+ * @param[in] src_off source offset from where data to read from flash,
+ * needs to be word aligned
+ * @param[out] des Destination location where data needs to be copied
+ * @param[in] len length in Bytes,where 1-word=4-bytes/32-bits
+ *
+ * @return XSPI_SUCCESS or error code
+ */
+int xspi_ahb_read(uint32_t src_off, uint32_t *des, uint32_t len);
+/*!
+ * @details IP read, READ via RX buffer from flash, minimum READ size = 1Byte
+ * @param[in] src_off source offset from where data to be read from flash
+ * @param[out] des Destination location where data needs to be copied
+ * @param[in] len length in Bytes,where 1-word=4-bytes/32-bits
+ *
+ * @return XSPI_SUCCESS or error code
+ */
+int xspi_ip_read(uint32_t src_off, uint32_t *des, uint32_t len);
+/*!
+ * @details CHIP erase, Erase complete chip in one go
+ *
+ * @return XSPI_SUCCESS or error code
+ */
+int xspi_bulk_erase(void);
+
+/*!
+ * Add test cases to confirm flash read/erase/write functionality.
+ */
+void fspi_test(uint32_t fspi_test_addr, uint32_t size, int extra);
+#endif /* FSPI_API_H */
diff --git a/include/drivers/nxp/flexspi/xspi_error_codes.h b/include/drivers/nxp/flexspi/xspi_error_codes.h
new file mode 100644
index 0000000..18b31eb
--- /dev/null
+++ b/include/drivers/nxp/flexspi/xspi_error_codes.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+/* error codes */
+#ifndef XSPI_ERROR_CODES_H
+#define XSPI_ERROR_CODES_H
+
+#include <errno.h>
+
+typedef enum {
+	XSPI_SUCCESS                     = 0,
+	XSPI_READ_FAIL			 = ELAST + 1,
+	XSPI_ERASE_FAIL,
+	XSPI_IP_READ_FAIL,
+	XSPI_AHB_READ_FAIL,
+	XSPI_IP_WRITE_FAIL,
+	XSPI_AHB_WRITE_FAIL,
+	XSPI_BLOCK_TIMEOUT,
+	XSPI_UNALIGN_ADDR,
+	XSPI_UNALIGN_SIZE,
+} XSPI_STATUS_CODES;
+#undef ELAST
+#define ELAST XSPI_STATUS_CODES.XSPI_UNALIGN_SIZE
+#endif
diff --git a/include/drivers/nxp/smmu/nxp_smmu.h b/include/drivers/nxp/smmu/nxp_smmu.h
new file mode 100644
index 0000000..d64c33b
--- /dev/null
+++ b/include/drivers/nxp/smmu/nxp_smmu.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef NXP_SMMU_H
+#define NXP_SMMU_H
+
+#define SMMU_SCR0		(0x0)
+#define SMMU_NSCR0		(0x400)
+
+#define SCR0_CLIENTPD_MASK	0x00000001
+#define SCR0_USFCFG_MASK	0x00000400
+
+static inline void bypass_smmu(uintptr_t smmu_base_addr)
+{
+	uint32_t val;
+
+	val = (mmio_read_32(smmu_base_addr + SMMU_SCR0) | SCR0_CLIENTPD_MASK) &
+		~(SCR0_USFCFG_MASK);
+	mmio_write_32((smmu_base_addr + SMMU_SCR0), val);
+
+	val = (mmio_read_32(smmu_base_addr + SMMU_NSCR0) | SCR0_CLIENTPD_MASK) &
+		~(SCR0_USFCFG_MASK);
+	mmio_write_32((smmu_base_addr + SMMU_NSCR0), val);
+}
+
+#endif
diff --git a/include/lib/cpus/aarch64/cortex_matterhorn_elp_arm.h b/include/lib/cpus/aarch64/cortex_matterhorn_elp_arm.h
new file mode 100644
index 0000000..309578e
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_matterhorn_elp_arm.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2021, ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_MATTERHORN_ELP_ARM_H
+#define CORTEX_MATTERHORN_ELP_ARM_H
+
+#define CORTEX_MATTERHORN_ELP_ARM_MIDR					U(0x410FD480)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_MATTERHORN_ELP_ARM_CPUECTLR_EL1				S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_MATTERHORN_ELP_ARM_CPUPWRCTLR_EL1			S3_0_C15_C2_7
+#define CORTEX_MATTERHORN_ELP_ARM_CPUPWRCTLR_EL1_CORE_PWRDN_BIT		U(1)
+
+#endif /* CORTEX_MATTERHORN_ELP_ARM_H */
diff --git a/include/lib/utils_def.h b/include/lib/utils_def.h
index 2d0e9c0..7a7012d 100644
--- a/include/lib/utils_def.h
+++ b/include/lib/utils_def.h
@@ -163,4 +163,9 @@
  */
 #define MHZ_TICKS_PER_SEC	U(1000000)
 
+/*
+ * Ticks elapsed in one second with a signal of 1 KHz
+ */
+#define KHZ_TICKS_PER_SEC U(1000)
+
 #endif /* UTILS_DEF_H */
diff --git a/include/services/ffa_svc.h b/include/services/ffa_svc.h
index 0513eab..ec75bc9 100644
--- a/include/services/ffa_svc.h
+++ b/include/services/ffa_svc.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -22,7 +22,7 @@
 
 /* The macros below are used to identify FFA calls from the SMC function ID */
 #define FFA_FNUM_MIN_VALUE	U(0x60)
-#define FFA_FNUM_MAX_VALUE	U(0x7f)
+#define FFA_FNUM_MAX_VALUE	U(0x84)
 #define is_ffa_fid(fid) __extension__ ({		\
 	__typeof__(fid) _fid = (fid);			\
 	((GET_SMC_NUM(_fid) >= FFA_FNUM_MIN_VALUE) &&	\
@@ -85,6 +85,7 @@
 #define FFA_FNUM_MEM_RETRIEVE_RESP	U(0x75)
 #define FFA_FNUM_MEM_RELINQUISH	U(0x76)
 #define FFA_FNUM_MEM_RECLAIM		U(0x77)
+#define FFA_FNUM_SECONDARY_EP_REGISTER	U(0x84)
 
 /* FFA SMC32 FIDs */
 #define FFA_ERROR		FFA_FID(SMC_32, FFA_FNUM_ERROR)
@@ -116,6 +117,7 @@
 #define FFA_MEM_RECLAIM	FFA_FID(SMC_32, FFA_FNUM_MEM_RECLAIM)
 
 /* FFA SMC64 FIDs */
+#define FFA_ERROR_SMC64		FFA_FID(SMC_64, FFA_FNUM_ERROR)
 #define FFA_SUCCESS_SMC64	FFA_FID(SMC_64, FFA_FNUM_SUCCESS)
 #define FFA_RXTX_MAP_SMC64	FFA_FID(SMC_64, FFA_FNUM_RXTX_MAP)
 #define FFA_MSG_SEND_DIRECT_REQ_SMC64 \
@@ -127,6 +129,8 @@
 #define FFA_MEM_SHARE_SMC64	FFA_FID(SMC_64, FFA_FNUM_MEM_SHARE)
 #define FFA_MEM_RETRIEVE_REQ_SMC64 \
 	FFA_FID(SMC_64, FFA_FNUM_MEM_RETRIEVE_REQ)
+#define FFA_SECONDARY_EP_REGISTER_SMC64 \
+	FFA_FID(SMC_64, FFA_FNUM_SECONDARY_EP_REGISTER)
 
 /*
  * Reserve a special value for traffic targeted to the Hypervisor or SPM.
diff --git a/include/tools_share/firmware_image_package.h b/include/tools_share/firmware_image_package.h
index bcde04f..dc65cc6 100644
--- a/include/tools_share/firmware_image_package.h
+++ b/include/tools_share/firmware_image_package.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -82,6 +82,10 @@
 #define UUID_FW_CONFIG \
 	{{0x58,  0x07, 0xe1, 0x6a}, {0x84, 0x59}, {0x47, 0xbe}, 0x8e, 0xd5, {0x64, 0x8e, 0x8d, 0xdd, 0xab, 0x0e} }
 
+#ifdef PLAT_DEF_FIP_UUID
+#include <plat_def_fip_uuid.h>
+#endif
+
 typedef struct fip_toc_header {
 	uint32_t	name;
 	uint32_t	serial_number;
diff --git a/include/tools_share/tbbr_oid.h b/include/tools_share/tbbr_oid.h
index c789f79..52b43ab 100644
--- a/include/tools_share/tbbr_oid.h
+++ b/include/tools_share/tbbr_oid.h
@@ -160,4 +160,7 @@
 #define SP_PKG7_HASH_OID			"1.3.6.1.4.1.4128.2100.1307"
 #define SP_PKG8_HASH_OID			"1.3.6.1.4.1.4128.2100.1308"
 
+#ifdef PLAT_DEF_OID
+#include <platform_oid.h>
+#endif
 #endif /* TBBR_OID_H */
diff --git a/include/tools_share/uuid.h b/include/tools_share/uuid.h
index 36be9ed..a6891d1 100644
--- a/include/tools_share/uuid.h
+++ b/include/tools_share/uuid.h
@@ -56,8 +56,16 @@
 	uint8_t		node[_UUID_NODE_LEN];
 };
 
+struct efi_guid {
+	uint32_t time_low;
+	uint16_t time_mid;
+	uint16_t time_hi_and_version;
+	uint8_t clock_seq_and_node[8];
+};
+
 union uuid_helper_t {
 	struct uuid uuid_struct;
+	struct efi_guid efi_guid;
 	uint32_t word[4];
 };
 
diff --git a/lib/cpus/aarch64/cortex_matterhorn_elp_arm.S b/lib/cpus/aarch64/cortex_matterhorn_elp_arm.S
new file mode 100644
index 0000000..b0f81a2
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_matterhorn_elp_arm.S
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2021, ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_matterhorn_elp_arm.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex Matterhorn ELP ARM must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex Matterhorn ELP ARM supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+	/* ----------------------------------------------------
+	 * HW will do the cache maintenance while powering down
+	 * ----------------------------------------------------
+	 */
+func cortex_matterhorn_elp_arm_core_pwr_dwn
+	/* ---------------------------------------------------
+	 * Enable CPU power down bit in power control register
+	 * ---------------------------------------------------
+	 */
+	mrs	x0, CORTEX_MATTERHORN_ELP_ARM_CPUPWRCTLR_EL1
+	orr	x0, x0, #CORTEX_MATTERHORN_ELP_ARM_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+	msr	CORTEX_MATTERHORN_ELP_ARM_CPUPWRCTLR_EL1, x0
+	isb
+	ret
+endfunc cortex_matterhorn_elp_arm_core_pwr_dwn
+
+	/*
+	 * Errata printing function for Cortex Matterhorn_elp_arm. Must follow AAPCS.
+	 */
+#if REPORT_ERRATA
+func cortex_matterhorn_elp_arm_errata_report
+	ret
+endfunc cortex_matterhorn_elp_arm_errata_report
+#endif
+
+func cortex_matterhorn_elp_arm_reset_func
+	/* Disable speculative loads */
+	msr	SSBS, xzr
+	isb
+	ret
+endfunc cortex_matterhorn_elp_arm_reset_func
+
+	/* ---------------------------------------------
+	 * This function provides Cortex-Matterhorn_elp_arm specific
+	 * register information for crash reporting.
+	 * It needs to return with x6 pointing to
+	 * a list of register names in ascii and
+	 * x8 - x15 having values of registers to be
+	 * reported.
+	 * ---------------------------------------------
+	 */
+.section .rodata.cortex_matterhorn_elp_arm_regs, "aS"
+cortex_matterhorn_elp_arm_regs:  /* The ascii list of register names to be reported */
+	.asciz	"cpuectlr_el1", ""
+
+func cortex_matterhorn_elp_arm_cpu_reg_dump
+	adr	x6, cortex_matterhorn_elp_arm_regs
+	mrs	x8, CORTEX_MATTERHORN_ELP_ARM_CPUECTLR_EL1
+	ret
+endfunc cortex_matterhorn_elp_arm_cpu_reg_dump
+
+declare_cpu_ops cortex_matterhorn_elp_arm, CORTEX_MATTERHORN_ELP_ARM_MIDR, \
+	cortex_matterhorn_elp_arm_reset_func, \
+	cortex_matterhorn_elp_arm_core_pwr_dwn
diff --git a/make_helpers/tbbr/tbbr_tools.mk b/make_helpers/tbbr/tbbr_tools.mk
index 853ad11..f7cced4 100644
--- a/make_helpers/tbbr/tbbr_tools.mk
+++ b/make_helpers/tbbr/tbbr_tools.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -33,7 +33,7 @@
 #
 
 # Certificate generation tool default parameters
-TRUSTED_KEY_CERT	:=	${BUILD_PLAT}/trusted_key.crt
+TRUSTED_KEY_CERT	?=	${BUILD_PLAT}/trusted_key.crt
 FWU_CERT		:=	${BUILD_PLAT}/fwu_cert.crt
 
 # Default non-volatile counter values (overridable by the platform)
diff --git a/plat/allwinner/common/sunxi_cpu_ops.c b/plat/allwinner/common/sunxi_cpu_ops.c
index cbad720..43c03ac 100644
--- a/plat/allwinner/common/sunxi_cpu_ops.c
+++ b/plat/allwinner/common/sunxi_cpu_ops.c
@@ -15,11 +15,14 @@
 #include <lib/utils_def.h>
 #include <plat/common/platform.h>
 
-#include <core_off_arisc.h>
 #include <sunxi_cpucfg.h>
 #include <sunxi_mmap.h>
 #include <sunxi_private.h>
 
+#ifndef SUNXI_CPUIDLE_EN_REG
+#include <core_off_arisc.h>
+#endif
+
 static void sunxi_cpu_disable_power(unsigned int cluster, unsigned int core)
 {
 	if (mmio_read_32(SUNXI_CPU_POWER_CLAMP_REG(cluster, core)) == 0xff)
@@ -72,6 +75,14 @@
 	/* Simplifies assembly, all SoCs so far are single cluster anyway. */
 	assert(MPIDR_AFFLVL1_VAL(mpidr) == 0);
 
+#ifdef SUNXI_CPUIDLE_EN_REG
+	/* Enable the CPUIDLE hardware (only really needs to be done once). */
+	mmio_write_32(SUNXI_CPUIDLE_EN_REG, 0x16aa0000);
+	mmio_write_32(SUNXI_CPUIDLE_EN_REG, 0xaa160001);
+
+	/* Trigger power off for this core. */
+	mmio_write_32(SUNXI_CORE_CLOSE_REG, BIT_32(core));
+#else
 	/*
 	 * If we are supposed to turn ourself off, tell the arisc SCP
 	 * to do that work for us. The code expects the core mask to be
@@ -79,6 +90,7 @@
 	 */
 	sunxi_execute_arisc_code(arisc_core_off, sizeof(arisc_core_off),
 				 BIT_32(core));
+#endif
 }
 
 void sunxi_cpu_on(u_register_t mpidr)
diff --git a/plat/allwinner/sun50i_a64/platform.mk b/plat/allwinner/sun50i_a64/platform.mk
index f6d5aa9..5f41035 100644
--- a/plat/allwinner/sun50i_a64/platform.mk
+++ b/plat/allwinner/sun50i_a64/platform.mk
@@ -9,3 +9,6 @@
 
 BL31_SOURCES		+=	drivers/allwinner/axp/axp803.c		\
 				drivers/allwinner/sunxi_rsb.c
+
+FDT_ASSUME_MASK := "(ASSUME_LATEST | ASSUME_NO_ROLLBACK | ASSUME_LIBFDT_ORDER)"
+$(eval $(call add_define,FDT_ASSUME_MASK))
diff --git a/plat/allwinner/sun50i_h6/include/sunxi_cpucfg.h b/plat/allwinner/sun50i_h6/include/sunxi_cpucfg.h
index 556fb97..a2b94af 100644
--- a/plat/allwinner/sun50i_h6/include/sunxi_cpucfg.h
+++ b/plat/allwinner/sun50i_h6/include/sunxi_cpucfg.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -24,4 +24,9 @@
 #define SUNXI_CPU_POWER_CLAMP_REG(c, n)	(SUNXI_R_CPUCFG_BASE + 0x0050 + \
 					(c) * 0x10 + (n) * 4)
 
+#define SUNXI_CPUIDLE_EN_REG		(SUNXI_R_CPUCFG_BASE + 0x0100)
+#define SUNXI_CORE_CLOSE_REG		(SUNXI_R_CPUCFG_BASE + 0x0104)
+#define SUNXI_PWR_SW_DELAY_REG		(SUNXI_R_CPUCFG_BASE + 0x0140)
+#define SUNXI_CONFIG_DELAY_REG		(SUNXI_R_CPUCFG_BASE + 0x0144)
+
 #endif /* SUNXI_CPUCFG_H */
diff --git a/plat/arm/board/fvp/fdts/fvp_spmc_manifest.dts b/plat/arm/board/fvp/fdts/fvp_spmc_manifest.dts
index f4805db..4838396 100644
--- a/plat/arm/board/fvp/fdts/fvp_spmc_manifest.dts
+++ b/plat/arm/board/fvp/fdts/fvp_spmc_manifest.dts
@@ -47,7 +47,7 @@
 			is_ffa_partition;
 			debug_name = "cactus-tertiary";
 			load_address = <0x7200000>;
-			vcpu_count = <8>;
+			vcpu_count = <1>;
 			mem_size = <1048576>;
 		};
 	};
diff --git a/plat/arm/board/tc0/platform.mk b/plat/arm/board/tc0/platform.mk
index 393d09c..20ea6e3 100644
--- a/plat/arm/board/tc0/platform.mk
+++ b/plat/arm/board/tc0/platform.mk
@@ -1,4 +1,4 @@
-# Copyright (c) 2020, Arm Limited. All rights reserved.
+# Copyright (c) 2020-2021, Arm Limited. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -44,7 +44,8 @@
 PLAT_INCLUDES		+=	-I${TC0_BASE}/include/
 
 TC0_CPU_SOURCES	:=	lib/cpus/aarch64/cortex_klein.S         \
-			lib/cpus/aarch64/cortex_matterhorn.S
+			lib/cpus/aarch64/cortex_matterhorn.S \
+			lib/cpus/aarch64/cortex_matterhorn_elp_arm.S
 
 INTERCONNECT_SOURCES	:=	${TC0_BASE}/tc0_interconnect.c
 
diff --git a/plat/arm/common/arm_bl31_setup.c b/plat/arm/common/arm_bl31_setup.c
index 81ef6e7..6dd4587 100644
--- a/plat/arm/common/arm_bl31_setup.c
+++ b/plat/arm/common/arm_bl31_setup.c
@@ -156,19 +156,6 @@
 	bl33_image_ep_info.args.arg0 = (u_register_t)ARM_DRAM1_BASE;
 #endif
 
-# if ARM_LINUX_KERNEL_AS_BL33
-	/*
-	 * According to the file ``Documentation/arm64/booting.txt`` of the
-	 * Linux kernel tree, Linux expects the physical address of the device
-	 * tree blob (DTB) in x0, while x1-x3 are reserved for future use and
-	 * must be 0.
-	 */
-	bl33_image_ep_info.args.arg0 = (u_register_t)ARM_PRELOADED_DTB_BASE;
-	bl33_image_ep_info.args.arg1 = 0U;
-	bl33_image_ep_info.args.arg2 = 0U;
-	bl33_image_ep_info.args.arg3 = 0U;
-# endif
-
 #else /* RESET_TO_BL31 */
 
 	/*
@@ -206,6 +193,19 @@
 	if (bl33_image_ep_info.pc == 0U)
 		panic();
 #endif /* RESET_TO_BL31 */
+
+# if ARM_LINUX_KERNEL_AS_BL33
+	/*
+	 * According to the file ``Documentation/arm64/booting.txt`` of the
+	 * Linux kernel tree, Linux expects the physical address of the device
+	 * tree blob (DTB) in x0, while x1-x3 are reserved for future use and
+	 * must be 0.
+	 */
+	bl33_image_ep_info.args.arg0 = (u_register_t)ARM_PRELOADED_DTB_BASE;
+	bl33_image_ep_info.args.arg1 = 0U;
+	bl33_image_ep_info.args.arg2 = 0U;
+	bl33_image_ep_info.args.arg3 = 0U;
+# endif
 }
 
 void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk
index 74afc53..a225b40 100644
--- a/plat/arm/common/arm_common.mk
+++ b/plat/arm/common/arm_common.mk
@@ -86,11 +86,7 @@
 $(eval $(call add_define,ARM_LINUX_KERNEL_AS_BL33))
 
 ifeq (${ARM_LINUX_KERNEL_AS_BL33},1)
-  ifeq (${ARCH},aarch64)
-    ifneq (${RESET_TO_BL31},1)
-      $(error "ARM_LINUX_KERNEL_AS_BL33 is only available if RESET_TO_BL31=1.")
-    endif
-  else
+  ifneq (${ARCH},aarch64)
     ifneq (${RESET_TO_SP_MIN},1)
       $(error "ARM_LINUX_KERNEL_AS_BL33 is only available if RESET_TO_SP_MIN=1.")
     endif
diff --git a/plat/nxp/common/aarch64/bl31_data.S b/plat/nxp/common/aarch64/bl31_data.S
new file mode 100644
index 0000000..cc91540
--- /dev/null
+++ b/plat/nxp/common/aarch64/bl31_data.S
@@ -0,0 +1,558 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <asm_macros.S>
+
+#include "bl31_data.h"
+#include "plat_psci.h"
+#include "platform_def.h"
+
+.global _getCoreData
+.global _setCoreData
+.global _getCoreState
+.global _setCoreState
+.global _init_global_data
+.global _get_global_data
+.global _set_global_data
+.global _initialize_psci
+.global _init_task_flags
+.global _set_task1_start
+.global _set_task1_done
+
+
+/* Function returns the specified data field value from the specified cpu
+ * core data area
+ * in:  x0 = core mask lsb
+ *	x1 = data field name/offset
+ * out: x0 = data value
+ * uses x0, x1, x2, [x13, x14, x15]
+ */
+func _getCoreData
+
+	/* generate a 0-based core number from the input mask */
+	clz   x2, x0
+	mov   x0, #63
+	sub   x0, x0, x2
+
+	/* x0 = core number (0-based) */
+	/* x1 = field offset */
+
+	/* determine if this is bootcore or secondary core */
+	cbnz  x0, 1f
+
+	/* get base address for bootcore data */
+	ldr  x2, =BC_PSCI_BASE
+	add  x2, x2, x1
+	b	2f
+
+1:	/* get base address for secondary core data */
+
+	/* x0 = core number (0-based) */
+	/* x1 = field offset */
+
+	/* generate number of regions to offset */
+	mov   x2, #SEC_REGION_SIZE
+	mul   x2, x2, x0
+
+	/* x1 = field offset */
+	/* x2 = region offset */
+
+	/* generate the total offset to data element */
+	sub   x1, x2, x1
+
+	/* x1 = total offset to data element */
+
+	/* get the base address */
+	ldr   x2, =SECONDARY_TOP
+
+	/* apply offset to base addr */
+	sub   x2, x2, x1
+2:
+	/* x2 = data element address */
+
+	dc   ivac, x2
+	dsb  sy
+	isb
+	/* read data */
+	ldr  x0, [x2]
+
+	ret
+endfunc _getCoreData
+
+
+/* Function returns the SoC-specific state of the specified cpu
+ * in:  x0 = core mask lsb
+ * out: x0 = data value
+ * uses x0, x1, x2, [x13, x14, x15]
+ */
+func _getCoreState
+
+	mov   x1, #CORE_STATE_DATA
+
+	/* generate a 0-based core number from the input mask */
+	clz   x2, x0
+	mov   x0, #63
+	sub   x0, x0, x2
+
+	/* x0 = core number (0-based) */
+	/* x1 = field offset */
+
+	/* determine if this is bootcore or secondary core */
+	cbnz  x0, 1f
+
+	/* get base address for bootcore data */
+	ldr  x2, =BC_PSCI_BASE
+	add  x2, x2, x1
+	b	2f
+
+1:	/* get base address for secondary core data */
+
+	/* x0 = core number (0-based) */
+	/* x1 = field offset */
+
+	/* generate number of regions to offset */
+	mov   x2, #SEC_REGION_SIZE
+	mul   x2, x2, x0
+
+	/* x1 = field offset */
+	/* x2 = region offset */
+
+	/* generate the total offset to data element */
+	sub   x1, x2, x1
+
+	/* x1 = total offset to data element */
+
+	/* get the base address */
+	ldr   x2, =SECONDARY_TOP
+
+	/* apply offset to base addr */
+	sub   x2, x2, x1
+2:
+	/* x2 = data element address */
+
+	dc   ivac, x2
+	dsb  sy
+	isb
+
+	/* read data */
+	ldr  x0, [x2]
+
+	ret
+endfunc _getCoreState
+
+
+/* Function writes the specified data value into the specified cpu
+ * core data area
+ * in:  x0 = core mask lsb
+ *	  x1 = data field offset
+ *	  x2 = data value to write/store
+ * out: none
+ * uses x0, x1, x2, x3, [x13, x14, x15]
+ */
+func _setCoreData
+	/* x0 = core mask */
+	/* x1 = field offset */
+	/* x2 = data value */
+
+	clz   x3, x0
+	mov   x0, #63
+	sub   x0, x0, x3
+
+	/* x0 = core number (0-based) */
+	/* x1 = field offset */
+	/* x2 = data value */
+
+	/* determine if this is bootcore or secondary core */
+	cbnz  x0, 1f
+
+	/* get base address for bootcore data */
+	ldr  x3, =BC_PSCI_BASE
+	add  x3, x3, x1
+	b	2f
+
+1:	/* get base address for secondary core data */
+
+	/* x0 = core number (0-based) */
+	/* x1 = field offset */
+	/* x2 = data value */
+
+	/* generate number of regions to offset */
+	mov   x3, #SEC_REGION_SIZE
+	mul   x3, x3, x0
+
+	/* x1 = field offset */
+	/* x2 = data value */
+	/* x3 = region offset */
+
+	/* generate the total offset to data element */
+	sub   x1, x3, x1
+
+	/* x1 = total offset to data element */
+	/* x2 = data value */
+
+	ldr   x3, =SECONDARY_TOP
+
+	/* apply offset to base addr */
+	sub   x3, x3, x1
+
+2:
+	/* x2 = data value */
+	/* x3 = data element address */
+
+	str   x2, [x3]
+
+	dc	cvac, x3
+	dsb   sy
+	isb
+	ret
+endfunc _setCoreData
+
+
+/* Function stores the specified core state
+ * in:  x0 = core mask lsb
+ *	x1 = data value to write/store
+ * out: none
+ * uses x0, x1, x2, x3, [x13, x14, x15]
+ */
+func _setCoreState
+	mov  x2, #CORE_STATE_DATA
+
+	clz   x3, x0
+	mov   x0, #63
+	sub   x0, x0, x3
+
+	/* x0 = core number (0-based) */
+	/* x1 = data value */
+	/* x2 = field offset */
+
+	/* determine if this is bootcore or secondary core */
+	cbnz  x0, 1f
+
+	/* get base address for bootcore data */
+	ldr  x3, =BC_PSCI_BASE
+	add  x3, x3, x2
+	b	2f
+
+1:	/* get base address for secondary core data */
+
+	/* x0 = core number (0-based) */
+	/* x1 = data value */
+	/* x2 = field offset */
+
+	/* generate number of regions to offset */
+	mov   x3, #SEC_REGION_SIZE
+	mul   x3, x3, x0
+
+	/* x1 = data value */
+	/* x2 = field offset */
+	/* x3 = region offset */
+
+	/* generate the total offset to data element */
+	sub   x2, x3, x2
+
+	/* x1 = data value */
+	/* x2 = total offset to data element */
+
+	ldr   x3, =SECONDARY_TOP
+
+	/* apply offset to base addr */
+	sub   x3, x3, x2
+
+2:
+	/* x1 = data value */
+	/* x3 = data element address */
+
+	str   x1, [x3]
+
+	dc	civac, x3
+	dsb   sy
+	isb
+	ret
+endfunc _setCoreState
+
+
+/* Function sets the task1 start
+ * in:  w0 = value to set flag to
+ * out: none
+ * uses x0, x1
+ */
+func _set_task1_start
+
+	ldr  x1, =SMC_TASK1_BASE
+
+	add  x1, x1, #TSK_START_OFFSET
+	str  w0, [x1]
+	dc   cvac, x1
+	dsb  sy
+	isb
+	ret
+endfunc _set_task1_start
+
+
+/* Function sets the state of the task 1 done flag
+ * in:  w0 = value to set flag to
+ * out: none
+ * uses x0, x1
+ */
+func _set_task1_done
+
+	ldr  x1, =SMC_TASK1_BASE
+
+	add  x1, x1, #TSK_DONE_OFFSET
+	str  w0, [x1]
+	dc   cvac, x1
+	dsb  sy
+	isb
+	ret
+endfunc _set_task1_done
+
+
+/* Function initializes the smc global data entries
+ * Note: the constant LAST_SMC_GLBL_OFFSET must reference the last entry in the
+ *	   smc global region
+ * in:  none
+ * out: none
+ * uses x0, x1, x2
+ */
+func _init_global_data
+
+	ldr  x1, =SMC_GLBL_BASE
+
+	/* x1 = SMC_GLBL_BASE */
+
+	mov x2, #LAST_SMC_GLBL_OFFSET
+	add x2, x2, x1
+1:
+	str  xzr, [x1]
+	dc   cvac, x1
+	cmp  x2, x1
+	add  x1, x1, #8
+	b.hi 1b
+
+	dsb  sy
+	isb
+	ret
+endfunc _init_global_data
+
+
+/* Function gets the value of the specified global data element
+ * in:  x0 = offset of data element
+ * out: x0 = requested data element
+ * uses x0, x1
+ */
+func _get_global_data
+
+	ldr  x1, =SMC_GLBL_BASE
+	add  x1, x1, x0
+	dc   ivac, x1
+	isb
+
+	ldr  x0, [x1]
+	ret
+endfunc _get_global_data
+
+
+/* Function sets the value of the specified global data element
+ * in:  x0 = offset of data element
+ *	  x1 = value to write
+ * out: none
+ * uses x0, x1, x2
+ */
+func _set_global_data
+
+	ldr  x2, =SMC_GLBL_BASE
+	add  x0, x0, x2
+	str  x1, [x0]
+	dc   cvac, x0
+
+	dsb  sy
+	isb
+	ret
+endfunc _set_global_data
+
+
+/* Function initializes the core data areas
+ * only executed by the boot core
+ * in:   none
+ * out:  none
+ * uses: x0, x1, x2, x3, x4, x5, x6, x7, [x13, x14, x15]
+ */
+func _initialize_psci
+	mov   x7, x30
+
+	/* initialize the bootcore psci data */
+	ldr   x5, =BC_PSCI_BASE
+	mov   x6, #CORE_RELEASED
+
+	str   x6,  [x5], #8
+	dc cvac, x5
+	str   xzr, [x5], #8
+	dc cvac, x5
+	str   xzr, [x5], #8
+	dc cvac, x5
+	str   xzr, [x5], #8
+	dc cvac, x5
+	str   xzr, [x5], #8
+	dc cvac, x5
+	str   xzr, [x5], #8
+	dc cvac, x5
+	str   xzr, [x5], #8
+	dc cvac, x5
+	str   xzr, [x5], #8
+	dc cvac, x5
+	str   xzr, [x5], #8
+	dc cvac, x5
+	str   xzr, [x5], #8
+	dc cvac, x5
+	str   xzr, [x5], #8
+	dc cvac, x5
+	str   xzr, [x5], #8
+	dc cvac, x5
+	str   xzr, [x5], #8
+	dc cvac, x5
+	str   xzr, [x5], #8
+	dc cvac, x5
+	str   xzr, [x5], #8
+	dc cvac, x5
+	str   xzr, [x5]
+	dc cvac, x5
+	dsb sy
+	isb
+
+	/* see if we have any secondary cores */
+	mov   x4, #PLATFORM_CORE_COUNT
+	sub   x4, x4, #1
+	cbz   x4, 3f
+
+	/* initialize the secondary core's psci data */
+	ldr  x5, =SECONDARY_TOP
+	/* core mask lsb for core 1 */
+	mov  x3, #2
+	sub  x5, x5, #SEC_REGION_SIZE
+
+	/* x3 = core1 mask lsb */
+	/* x4 = number of secondary cores */
+	/* x5 = core1 psci data base address */
+2:
+	/* set core state in x6 */
+	mov  x0, x3
+	mov  x6, #CORE_IN_RESET
+	bl   _soc_ck_disabled
+	cbz  x0, 1f
+	mov  x6, #CORE_DISABLED
+1:
+	add   x2, x5, #CORE_STATE_DATA
+	str   x6,  [x2]
+	dc cvac, x2
+	add   x2, x5, #SPSR_EL3_DATA
+	str   xzr, [x2]
+	dc cvac, x2
+	add   x2, x5, #CNTXT_ID_DATA
+	str   xzr, [x2]
+	dc cvac, x2
+	add   x2, x5, #START_ADDR_DATA
+	str   xzr, [x2]
+	dc cvac, x2
+	add   x2, x5, #LINK_REG_DATA
+	str   xzr, [x2]
+	dc cvac, x2
+	add   x2, x5, #GICC_CTLR_DATA
+	str   xzr, [x2]
+	dc cvac, x2
+	add   x2, x5, #ABORT_FLAG_DATA
+	str   xzr, [x2]
+	dc cvac, x2
+	add   x2, x5, #SCTLR_DATA
+	str   xzr, [x2]
+	dc cvac, x2
+	add   x2, x5, #CPUECTLR_DATA
+	str   xzr, [x2]
+	dc cvac, x2
+	add   x2, x5, #AUX_01_DATA
+	str   xzr, [x2]
+	dc cvac, x2
+	add   x2, x5, #AUX_02_DATA
+	str   xzr, [x2]
+	dc cvac, x2
+	add   x2, x5, #AUX_03_DATA
+	str   xzr, [x2]
+	dc cvac, x2
+	add   x2, x5, #AUX_04_DATA
+	str   xzr, [x2]
+	dc cvac, x2
+	add   x2, x5, #AUX_05_DATA
+	str   xzr, [x2]
+	dc cvac, x2
+	add   x2, x5, #SCR_EL3_DATA
+	str   xzr, [x2]
+	dc cvac, x2
+	add   x2, x5, #HCR_EL2_DATA
+	str   xzr, [x2]
+	dc cvac, x2
+	dsb sy
+	isb
+
+	sub   x4, x4, #1
+	cbz   x4, 3f
+
+	/* generate next core mask */
+	lsl  x3, x3, #1
+
+	/* decrement base address to next data area */
+	sub  x5, x5, #SEC_REGION_SIZE
+	b	2b
+3:
+	mov   x30, x7
+	ret
+endfunc _initialize_psci
+
+
+/* Function initializes the soc init task flags
+ * in:  none
+ * out: none
+ * uses x0, x1, [x13, x14, x15]
+ */
+func _init_task_flags
+
+	/* get the base address of the first task structure */
+	ldr  x0, =SMC_TASK1_BASE
+
+	/* x0 = task1 base address */
+
+	str  wzr, [x0, #TSK_START_OFFSET]
+	str  wzr, [x0, #TSK_DONE_OFFSET]
+	str  wzr, [x0, #TSK_CORE_OFFSET]
+	dc   cvac, x0
+
+	/* move to task2 structure */
+	add  x0, x0, #SMC_TASK_OFFSET
+
+	str  wzr, [x0, #TSK_START_OFFSET]
+	str  wzr, [x0, #TSK_DONE_OFFSET]
+	str  wzr, [x0, #TSK_CORE_OFFSET]
+	dc   cvac, x0
+
+	/* move to task3 structure */
+	add  x0, x0, #SMC_TASK_OFFSET
+
+	str  wzr, [x0, #TSK_START_OFFSET]
+	str  wzr, [x0, #TSK_DONE_OFFSET]
+	str  wzr, [x0, #TSK_CORE_OFFSET]
+	dc   cvac, x0
+
+	/* move to task4 structure */
+	add  x0, x0, #SMC_TASK_OFFSET
+
+	str  wzr, [x0, #TSK_START_OFFSET]
+	str  wzr, [x0, #TSK_DONE_OFFSET]
+	str  wzr, [x0, #TSK_CORE_OFFSET]
+	dc   cvac, x0
+
+	dsb  sy
+	isb
+	ret
+endfunc _init_task_flags
diff --git a/plat/nxp/common/aarch64/ls_helpers.S b/plat/nxp/common/aarch64/ls_helpers.S
new file mode 100644
index 0000000..19ea9e5
--- /dev/null
+++ b/plat/nxp/common/aarch64/ls_helpers.S
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2018-2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <asm_macros.S>
+#include <drivers/console.h>
+#include <lib/cpus/aarch64/cortex_a72.h>
+
+#include <platform_def.h>
+
+
+	.globl	plat_crash_console_init
+	.globl	plat_crash_console_putc
+	.globl	plat_crash_console_flush
+	.globl  plat_core_pos
+	.globl  plat_my_core_pos
+	.globl  plat_core_mask
+	.globl  plat_my_core_mask
+	.globl  plat_core_pos_by_mpidr
+	.globl _disable_ldstr_pfetch_A53
+	.globl _disable_ldstr_pfetch_A72
+	.global	_set_smmu_pagesz_64
+
+	/* int plat_crash_console_init(void)
+	 * Function to initialize the crash console
+	 * without a C Runtime to print crash report.
+	 * Clobber list : x0 - x4
+	 */
+
+	/* int plat_crash_console_init(void)
+	 * Use normal console by default. Switch it to crash
+	 * mode so serial consoles become active again.
+	 * NOTE: This default implementation will only work for
+	 * crashes that occur after a normal console (marked
+	 * valid for the crash state) has been registered with
+	 * the console framework. To debug crashes that occur
+	 * earlier, the platform has to override these functions
+	 * with an implementation that initializes a console
+	 * driver with hardcoded parameters. See
+	 * docs/porting-guide.rst for more information.
+	 */
+func plat_crash_console_init
+	mov	x3, x30
+	mov	x0, #CONSOLE_FLAG_CRASH
+	bl	console_switch_state
+	mov	x0, #1
+	ret	x3
+endfunc plat_crash_console_init
+
+	/* void plat_crash_console_putc(int character)
+	 * Output through the normal console by default.
+	 */
+func plat_crash_console_putc
+	b	console_putc
+endfunc plat_crash_console_putc
+
+	/* void plat_crash_console_flush(void)
+	 * Flush normal console by default.
+	 */
+func plat_crash_console_flush
+	b	console_flush
+endfunc plat_crash_console_flush
+
+/* This function implements a part of the critical interface between the psci
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is returned
+ * in case the MPIDR is invalid.
+ */
+func plat_core_pos_by_mpidr
+
+	b	plat_core_pos
+
+endfunc plat_core_pos_by_mpidr
+
+#if (SYMMETRICAL_CLUSTERS)
+/* unsigned int plat_my_core_mask(void)
+ *  generate a mask bit for this core
+ */
+func plat_my_core_mask
+	mrs	x0, MPIDR_EL1
+	b	plat_core_mask
+endfunc plat_my_core_mask
+
+/* unsigned int plat_core_mask(u_register_t mpidr)
+ * generate a lsb-based mask bit for the core specified by mpidr in x0.
+ *
+ * SoC core = ((cluster * cpu_per_cluster) + core)
+ * mask = (1 << SoC core)
+ */
+func plat_core_mask
+	mov	w1, wzr
+	mov	w2, wzr
+
+	/* extract cluster */
+	bfxil	w1, w0, #8, #8
+	/* extract cpu # */
+	bfxil	w2, w0, #0, #8
+
+	mov	w0, wzr
+
+	/* error checking */
+	cmp	w1, #NUMBER_OF_CLUSTERS
+	b.ge	1f
+	cmp	w2, #CORES_PER_CLUSTER
+	b.ge	1f
+
+	mov	w0, #CORES_PER_CLUSTER
+	mul	w1, w1, w0
+	add	w1, w1, w2
+	mov	w2, #0x1
+	lsl	w0, w2, w1
+1:
+	ret
+endfunc plat_core_mask
+
+/*
+ * unsigned int plat_my_core_pos(void)
+ *  generate a linear core number for this core
+ */
+func plat_my_core_pos
+	mrs	x0, MPIDR_EL1
+	b	plat_core_pos
+endfunc plat_my_core_pos
+
+/*
+ * unsigned int plat_core_pos(u_register_t mpidr)
+ * Generate a linear core number for the core specified by mpidr.
+ *
+ * SoC core = ((cluster * cpu_per_cluster) + core)
+ * Returns -1 if mpidr invalid
+ */
+func plat_core_pos
+	mov	w1, wzr
+	mov	w2, wzr
+	bfxil	w1, w0, #8, #8	/* extract cluster */
+	bfxil	w2, w0, #0, #8	/* extract cpu #   */
+
+	mov	w0, #-1
+
+	/* error checking */
+	cmp	w1, #NUMBER_OF_CLUSTERS
+	b.ge	1f
+	cmp	w2, #CORES_PER_CLUSTER
+	b.ge	1f
+
+	mov	w0, #CORES_PER_CLUSTER
+	mul	w1, w1, w0
+	add	w0, w1, w2
+1:
+	ret
+endfunc plat_core_pos
+
+#endif
+
+/* this function disables the load-store prefetch of the calling core
+ * Note: this function is for A72 cores ONLY
+ * in:  none
+ * out: none
+ * uses x0
+ */
+func _disable_ldstr_pfetch_A72
+
+	mrs	x0, CORTEX_A72_CPUACTLR_EL1
+	tst	x0, #CORTEX_A72_CPUACTLR_EL1_DISABLE_L1_DCACHE_HW_PFTCH
+	b.eq	1f
+	b	2f
+
+.align 6
+1:
+	dsb	sy
+	isb
+	orr	x0, x0, #CORTEX_A72_CPUACTLR_EL1_DISABLE_L1_DCACHE_HW_PFTCH
+	msr	CORTEX_A72_CPUACTLR_EL1, x0
+	isb
+
+2:
+	ret
+endfunc _disable_ldstr_pfetch_A72
+
+/*
+ * Function sets the SACR pagesize to 64k
+ */
+func _set_smmu_pagesz_64
+
+	ldr	x1, =NXP_SMMU_ADDR
+	ldr	w0, [x1, #0x10]
+	orr	w0, w0, #1 << 16	/* setting to 64K page */
+	str	w0, [x1, #0x10]
+
+	ret
+endfunc _set_smmu_pagesz_64
diff --git a/plat/nxp/common/fip_handler/common/plat_def_fip_uuid.h b/plat/nxp/common/fip_handler/common/plat_def_fip_uuid.h
new file mode 100644
index 0000000..65aef14
--- /dev/null
+++ b/plat/nxp/common/fip_handler/common/plat_def_fip_uuid.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PLAT_DEF_FIP_UUID_H
+#define PLAT_DEF_FIP_UUID_H
+
+/* PHy images configs */
+#define UUID_DDR_IMEM_UDIMM_1D \
+	{{0x5b, 0xdb, 0xe3, 0x83}, {0xd1, 0x9f}, {0xc7, 0x06}, 0xd4, 0x91, {0x76, 0x4f, 0x9d, 0x23, 0x2d, 0x2d} }
+
+#define UUID_DDR_IMEM_UDIMM_2D \
+	{{0xfa, 0x0e, 0xeb, 0x21}, {0xe0, 0x7f}, {0x8e, 0x65}, 0x95, 0xd8, {0x2b, 0x94, 0xf6, 0xb8, 0x28, 0x0a} }
+
+#define UUID_DDR_DMEM_UDIMM_1D \
+	{{0xba, 0xbb, 0xfd, 0x7e}, {0x5b, 0xf0}, {0xeb, 0xb8}, 0xeb, 0x71, {0xb1, 0x85, 0x07, 0xdd, 0xe1, 0x32} }
+
+#define UUID_DDR_DMEM_UDIMM_2D \
+	{{0xb6, 0x99, 0x61, 0xda}, {0xf9, 0x92}, {0x4b, 0x9e}, 0x0c, 0x49, {0x74, 0xa5, 0xe0, 0x5c, 0xbe, 0xc3} }
+
+#define UUID_DDR_IMEM_RDIMM_1D \
+	{{0x42, 0x33, 0x66, 0x52}, {0xd8, 0x94}, {0x4d, 0xc1}, 0x91, 0xcc, {0x26, 0x8f, 0x7a, 0x67, 0xf1, 0xa2} }
+
+#define UUID_DDR_IMEM_RDIMM_2D \
+	{{0x2e, 0x95, 0x73, 0xba}, {0xb5, 0xca}, {0x7c, 0xc7}, 0xef, 0xc9, {0x5e, 0xb0, 0x42, 0xec, 0x08, 0x7a} }
+
+#define UUID_DDR_DMEM_RDIMM_1D \
+	{{0x1c, 0x51, 0x17, 0xed}, {0x30, 0x0d}, {0xae, 0xba}, 0x87, 0x03, {0x1f, 0x37, 0x85, 0xec, 0xe1, 0x44} }
+
+#define UUID_DDR_DMEM_RDIMM_2D \
+	{{0xe9, 0x0a, 0x90, 0x78}, {0x11, 0xd6}, {0x8b, 0xba}, 0x24, 0x35, {0xec, 0x10, 0x75, 0x4f, 0x56, 0xa5} }
+
+#define UUID_DDR_FW_KEY_CERT \
+	{{0xac, 0x4b, 0xb8, 0x9c}, {0x8f, 0xb9}, {0x11, 0xea}, 0xbc, 0x55, {0x02, 0x42, 0xac, 0x12, 0x00, 0x03} }
+
+#define UUID_DDR_UDIMM_FW_CONTENT_CERT \
+	{{0x2c, 0x7f, 0x52, 0x54}, {0x70, 0x92}, {0x48, 0x40}, 0x8c, 0x34, {0x87, 0x4b, 0xbf, 0xbd, 0x9d, 0x89} }
+
+#define UUID_DDR_RDIMM_FW_CONTENT_CERT \
+	{{0x94, 0xc3, 0x63, 0x30}, {0x7c, 0xf7}, {0x4f, 0x1d}, 0xaa, 0xcd, {0xb5, 0x80, 0xb2, 0xc2, 0x40, 0xa5} }
+
+#define UUID_FUSE_PROV \
+	{{0xec, 0x45, 0x90, 0x42}, {0x30, 0x0d}, {0xae, 0xba}, 0x87, 0x03, {0x1f, 0x37, 0x85, 0xec, 0xe1, 0x44} }
+
+#define UUID_FUSE_UP \
+	{{0x89, 0x46, 0xef, 0x78}, {0x11, 0xd6}, {0x8b, 0xba}, 0x24, 0x35, {0xec, 0x10, 0x75, 0x4f, 0x56, 0xa5} }
+
+#endif	/*	PLAT_DEF_FIP_UUID_H	*/
diff --git a/plat/nxp/common/fip_handler/common/plat_tbbr_img_def.h b/plat/nxp/common/fip_handler/common/plat_tbbr_img_def.h
new file mode 100644
index 0000000..9856f70
--- /dev/null
+++ b/plat/nxp/common/fip_handler/common/plat_tbbr_img_def.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef NXP_IMG_DEF_H
+#define NXP_IMG_DEF_H
+
+#include <export/common/tbbr/tbbr_img_def_exp.h>
+
+#ifdef CONFIG_DDR_FIP_IMAGE
+/* DDR FIP IMAGE ID */
+#define DDR_FIP_IMAGE_ID		MAX_IMG_IDS_WITH_SPMDS
+
+#define DDR_IMEM_UDIMM_1D_IMAGE_ID	MAX_IMG_IDS_WITH_SPMDS + 1
+#define DDR_IMEM_UDIMM_2D_IMAGE_ID	MAX_IMG_IDS_WITH_SPMDS + 2
+
+#define DDR_DMEM_UDIMM_1D_IMAGE_ID	MAX_IMG_IDS_WITH_SPMDS + 3
+#define DDR_DMEM_UDIMM_2D_IMAGE_ID	MAX_IMG_IDS_WITH_SPMDS + 4
+
+#define DDR_IMEM_RDIMM_1D_IMAGE_ID	MAX_IMG_IDS_WITH_SPMDS + 5
+#define DDR_IMEM_RDIMM_2D_IMAGE_ID	MAX_IMG_IDS_WITH_SPMDS + 6
+
+#define DDR_DMEM_RDIMM_1D_IMAGE_ID	MAX_IMG_IDS_WITH_SPMDS + 7
+#define DDR_DMEM_RDIMM_2D_IMAGE_ID	MAX_IMG_IDS_WITH_SPMDS + 8
+
+#define DDR_FW_KEY_CERT_ID		MAX_IMG_IDS_WITH_SPMDS + 9
+#define DDR_UDIMM_FW_CONTENT_CERT_ID	MAX_IMG_IDS_WITH_SPMDS + 10
+#define DDR_RDIMM_FW_CONTENT_CERT_ID	MAX_IMG_IDS_WITH_SPMDS + 11
+/* Max Images */
+#define MAX_IMG_WITH_DDR_IDS		MAX_IMG_IDS_WITH_SPMDS + 12
+#else
+#define MAX_IMG_WITH_DDR_IDS		MAX_IMG_IDS_WITH_SPMDS
+#endif
+
+#ifdef POLICY_FUSE_PROVISION
+/* FUSE FIP IMAGE ID */
+#define FUSE_FIP_IMAGE_ID		MAX_IMG_WITH_DDR_IDS
+
+#define FUSE_PROV_IMAGE_ID		MAX_IMG_WITH_DDR_IDS + 1
+
+#define FUSE_UP_IMAGE_ID		MAX_IMG_WITH_DDR_IDS + 2
+
+#define MAX_IMG_WITH_FIMG_IDS		MAX_IMG_WITH_DDR_IDS + 3
+#else
+#define MAX_IMG_WITH_FIMG_IDS		MAX_IMG_WITH_DDR_IDS
+#endif
+
+#define MAX_NUMBER_IDS			MAX_IMG_WITH_FIMG_IDS
+
+#endif	/* NXP_IMG_DEF_H */
diff --git a/plat/nxp/common/fip_handler/common/platform_oid.h b/plat/nxp/common/fip_handler/common/platform_oid.h
new file mode 100644
index 0000000..bbd6041
--- /dev/null
+++ b/plat/nxp/common/fip_handler/common/platform_oid.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#define DDR_FW_CONTENT_CERT_PK_OID		"1.3.6.1.4.1.4128.2200.1"
+#define DDR_IMEM_UDIMM_1D_HASH_OID		"1.3.6.1.4.1.4128.2200.2"
+#define DDR_IMEM_UDIMM_2D_HASH_OID		"1.3.6.1.4.1.4128.2200.3"
+#define DDR_DMEM_UDIMM_1D_HASH_OID		"1.3.6.1.4.1.4128.2200.4"
+#define DDR_DMEM_UDIMM_2D_HASH_OID		"1.3.6.1.4.1.4128.2200.5"
+#define DDR_IMEM_RDIMM_1D_HASH_OID		"1.3.6.1.4.1.4128.2200.6"
+#define DDR_IMEM_RDIMM_2D_HASH_OID		"1.3.6.1.4.1.4128.2200.7"
+#define DDR_DMEM_RDIMM_1D_HASH_OID		"1.3.6.1.4.1.4128.2200.8"
+#define DDR_DMEM_RDIMM_2D_HASH_OID		"1.3.6.1.4.1.4128.2200.9"
diff --git a/plat/nxp/common/fip_handler/ddr_fip/ddr_fip_io.mk b/plat/nxp/common/fip_handler/ddr_fip/ddr_fip_io.mk
new file mode 100644
index 0000000..7d673ba
--- /dev/null
+++ b/plat/nxp/common/fip_handler/ddr_fip/ddr_fip_io.mk
@@ -0,0 +1,38 @@
+#
+# Copyright 2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-----------------------------------------------------------------------------
+ifeq (${DDR_FIP_IO_STORAGE_ADDED},)
+
+$(eval $(call add_define, PLAT_DEF_FIP_UUID))
+$(eval $(call add_define, PLAT_TBBR_IMG_DEF))
+$(eval $(call SET_NXP_MAKE_FLAG,IMG_LOADR_NEEDED,BL2))
+
+DDR_FIP_IO_STORAGE_ADDED	:= 1
+$(eval $(call add_define,CONFIG_DDR_FIP_IMAGE))
+
+FIP_HANDLER_PATH	:=  ${PLAT_COMMON_PATH}/fip_handler
+FIP_HANDLER_COMMON_PATH	:=  ${FIP_HANDLER_PATH}/common
+DDR_FIP_IO_STORAGE_PATH	:=  ${FIP_HANDLER_PATH}/ddr_fip
+
+PLAT_INCLUDES		+= -I${FIP_HANDLER_COMMON_PATH}\
+			   -I$(DDR_FIP_IO_STORAGE_PATH)
+
+DDR_FIP_IO_SOURCES	+= $(DDR_FIP_IO_STORAGE_PATH)/ddr_io_storage.c
+
+$(shell cp tools/nxp/plat_fiptool/plat_fiptool.mk ${PLAT_DIR})
+
+ifeq (${BL_COMM_DDR_FIP_IO_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${DDR_FIP_IO_SOURCES}
+else
+ifeq (${BL2_DDR_FIP_IO_NEEDED},yes)
+BL2_SOURCES		+= ${DDR_FIP_IO_SOURCES}
+endif
+ifeq (${BL31_DDR_FIP_IO_NEEDED},yes)
+BL31_SOURCES		+= ${DDR_FIP_IO_SOURCES}
+endif
+endif
+endif
+#------------------------------------------------
diff --git a/plat/nxp/common/fip_handler/ddr_fip/ddr_io_storage.c b/plat/nxp/common/fip_handler/ddr_fip/ddr_io_storage.c
new file mode 100644
index 0000000..fc3c4a4
--- /dev/null
+++ b/plat/nxp/common/fip_handler/ddr_fip/ddr_io_storage.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <io_block.h>
+#include <io_driver.h>
+#include <io_fip.h>
+#include <io_memmap.h>
+#include <io_storage.h>
+#include <lib/utils.h>
+#include <tools_share/firmware_image_package.h>
+#include "ddr_io_storage.h"
+#include "plat_common.h"
+#include "platform_def.h"
+
+
+/* TBD - Move these defined to the platform_def.h file.
+ * Keeping them for reference here
+ */
+extern uintptr_t backend_dev_handle;
+
+static uint32_t ddr_fip;
+
+static uintptr_t ddr_fip_dev_handle;
+
+static io_block_spec_t ddr_fip_block_spec = {
+	.offset = PLAT_DDR_FIP_OFFSET,
+	.length = PLAT_DDR_FIP_MAX_SIZE
+};
+
+static const io_uuid_spec_t ddr_imem_udimm_1d_uuid_spec = {
+	.uuid = UUID_DDR_IMEM_UDIMM_1D,
+};
+
+static const io_uuid_spec_t ddr_imem_udimm_2d_uuid_spec = {
+	.uuid = UUID_DDR_IMEM_UDIMM_2D,
+};
+
+static const io_uuid_spec_t ddr_dmem_udimm_1d_uuid_spec = {
+	.uuid = UUID_DDR_DMEM_UDIMM_1D,
+};
+
+static const io_uuid_spec_t ddr_dmem_udimm_2d_uuid_spec = {
+	.uuid = UUID_DDR_DMEM_UDIMM_2D,
+};
+
+static const io_uuid_spec_t ddr_imem_rdimm_1d_uuid_spec = {
+	.uuid = UUID_DDR_IMEM_RDIMM_1D,
+};
+
+static const io_uuid_spec_t ddr_imem_rdimm_2d_uuid_spec = {
+	.uuid = UUID_DDR_IMEM_RDIMM_2D,
+};
+
+static const io_uuid_spec_t ddr_dmem_rdimm_1d_uuid_spec = {
+	.uuid = UUID_DDR_DMEM_RDIMM_1D,
+};
+
+static const io_uuid_spec_t ddr_dmem_rdimm_2d_uuid_spec = {
+	.uuid = UUID_DDR_DMEM_RDIMM_2D,
+};
+
+#if TRUSTED_BOARD_BOOT
+static const io_uuid_spec_t ddr_fw_key_cert_uuid_spec = {
+	.uuid = UUID_DDR_FW_KEY_CERT,
+};
+static const io_uuid_spec_t ddr_udimm_fw_cert_uuid_spec = {
+	.uuid = UUID_DDR_UDIMM_FW_CONTENT_CERT,
+};
+static const io_uuid_spec_t ddr_rdimm_fw_cert_uuid_spec = {
+	.uuid = UUID_DDR_RDIMM_FW_CONTENT_CERT,
+};
+#endif
+
+static int open_ddr_fip(const uintptr_t spec);
+
+struct plat_io_policy {
+	uintptr_t *dev_handle;
+	uintptr_t image_spec;
+	int (*check)(const uintptr_t spec);
+};
+
+/* By default, ARM platforms load images from the FIP */
+static const struct plat_io_policy ddr_policies[] = {
+	[DDR_FIP_IMAGE_ID - DDR_FIP_IMAGE_ID] = {
+		&backend_dev_handle,
+		(uintptr_t)&ddr_fip_block_spec,
+		NULL
+	},
+	[DDR_IMEM_UDIMM_1D_IMAGE_ID - DDR_FIP_IMAGE_ID] = {
+		&ddr_fip_dev_handle,
+		(uintptr_t)&ddr_imem_udimm_1d_uuid_spec,
+		open_ddr_fip
+	},
+	[DDR_IMEM_UDIMM_2D_IMAGE_ID - DDR_FIP_IMAGE_ID] = {
+		&ddr_fip_dev_handle,
+		(uintptr_t)&ddr_imem_udimm_2d_uuid_spec,
+		open_ddr_fip
+	},
+	[DDR_DMEM_UDIMM_1D_IMAGE_ID - DDR_FIP_IMAGE_ID] = {
+		&ddr_fip_dev_handle,
+		(uintptr_t)&ddr_dmem_udimm_1d_uuid_spec,
+		open_ddr_fip
+	},
+	[DDR_DMEM_UDIMM_2D_IMAGE_ID - DDR_FIP_IMAGE_ID] = {
+		&ddr_fip_dev_handle,
+		(uintptr_t)&ddr_dmem_udimm_2d_uuid_spec,
+		open_ddr_fip
+	},
+	[DDR_IMEM_RDIMM_1D_IMAGE_ID - DDR_FIP_IMAGE_ID] = {
+		&ddr_fip_dev_handle,
+		(uintptr_t)&ddr_imem_rdimm_1d_uuid_spec,
+		open_ddr_fip
+	},
+	[DDR_IMEM_RDIMM_2D_IMAGE_ID - DDR_FIP_IMAGE_ID] = {
+		&ddr_fip_dev_handle,
+		(uintptr_t)&ddr_imem_rdimm_2d_uuid_spec,
+		open_ddr_fip
+	},
+	[DDR_DMEM_RDIMM_1D_IMAGE_ID - DDR_FIP_IMAGE_ID] = {
+		&ddr_fip_dev_handle,
+		(uintptr_t)&ddr_dmem_rdimm_1d_uuid_spec,
+		open_ddr_fip
+	},
+	[DDR_DMEM_RDIMM_2D_IMAGE_ID - DDR_FIP_IMAGE_ID] = {
+		&ddr_fip_dev_handle,
+		(uintptr_t)&ddr_dmem_rdimm_2d_uuid_spec,
+		open_ddr_fip
+	},
+#if TRUSTED_BOARD_BOOT
+	[DDR_FW_KEY_CERT_ID - DDR_FIP_IMAGE_ID] = {
+		&ddr_fip_dev_handle,
+		(uintptr_t)&ddr_fw_key_cert_uuid_spec,
+		open_ddr_fip
+	},
+	[DDR_UDIMM_FW_CONTENT_CERT_ID - DDR_FIP_IMAGE_ID] = {
+		&ddr_fip_dev_handle,
+		(uintptr_t)&ddr_udimm_fw_cert_uuid_spec,
+		open_ddr_fip
+	},
+	[DDR_RDIMM_FW_CONTENT_CERT_ID - DDR_FIP_IMAGE_ID] = {
+		&ddr_fip_dev_handle,
+		(uintptr_t)&ddr_rdimm_fw_cert_uuid_spec,
+		open_ddr_fip
+	},
+#endif
+};
+
+static int open_ddr_fip(const uintptr_t spec)
+{
+	int result;
+	uintptr_t local_image_handle;
+
+	/* See if a Firmware Image Package is available */
+	result = io_dev_init(ddr_fip_dev_handle, (uintptr_t)DDR_FIP_IMAGE_ID);
+	if (result == 0) {
+		result = io_open(ddr_fip_dev_handle, spec, &local_image_handle);
+		if (result == 0) {
+			VERBOSE("Using FIP\n");
+			io_close(local_image_handle);
+		}
+	}
+	return result;
+}
+
+/* The image can be one of the DDR PHY images, which can be sleected via DDR
+ * policies
+ */
+int plat_get_ddr_fip_image_source(unsigned int image_id, uintptr_t *dev_handle,
+				  uintptr_t *image_spec,
+				  int (*check)(const uintptr_t spec))
+{
+	int result = -1;
+	const struct plat_io_policy *policy;
+
+	if (image_id >= (DDR_FIP_IMAGE_ID + ARRAY_SIZE(ddr_policies))) {
+		return result;
+	}
+
+	policy = &ddr_policies[image_id - DDR_FIP_IMAGE_ID];
+	if (image_id == DDR_FIP_IMAGE_ID) {
+		result = check(policy->image_spec);
+	} else {
+		result = policy->check(policy->image_spec);
+	}
+	if (result == 0) {
+		*image_spec = policy->image_spec;
+		*dev_handle = *(policy->dev_handle);
+	}
+	return result;
+}
+
+int ddr_fip_setup(const io_dev_connector_t *fip_dev_con, unsigned int boot_dev)
+{
+	int io_result;
+	size_t ddr_fip_offset = PLAT_DDR_FIP_OFFSET;
+
+	/* Open connections to ddr fip and cache the handles */
+	io_result = io_dev_open(fip_dev_con, (uintptr_t)&ddr_fip,
+				&ddr_fip_dev_handle);
+	assert(io_result == 0);
+
+	switch (boot_dev) {
+#if QSPI_BOOT
+	case BOOT_DEVICE_QSPI:
+		ddr_fip_offset += NXP_QSPI_FLASH_ADDR;
+		break;
+#endif
+#if NOR_BOOT
+	case BOOT_DEVICE_IFC_NOR:
+		ddr_fip_offset += NXP_NOR_FLASH_ADDR;
+		break;
+#endif
+#if FLEXSPI_NOR_BOOT
+	case BOOT_DEVICE_FLEXSPI_NOR:
+		ddr_fip_offset += NXP_FLEXSPI_FLASH_ADDR;
+		break;
+#endif
+	default:
+		break;
+	}
+
+	ddr_fip_block_spec.offset = ddr_fip_offset;
+
+	return io_result;
+}
diff --git a/plat/nxp/common/fip_handler/ddr_fip/ddr_io_storage.h b/plat/nxp/common/fip_handler/ddr_fip/ddr_io_storage.h
new file mode 100644
index 0000000..6df3902
--- /dev/null
+++ b/plat/nxp/common/fip_handler/ddr_fip/ddr_io_storage.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef DDR_IO_STORAGE_H
+#define DDR_IO_STORAGE_H
+
+#include <drivers/io/io_driver.h>
+
+#ifndef PLAT_DDR_FIP_OFFSET
+#define PLAT_DDR_FIP_OFFSET	0x800000
+#endif
+
+#ifndef PLAT_DDR_FIP_MAX_SIZE
+#define PLAT_DDR_FIP_MAX_SIZE	0x32000
+#endif
+
+int ddr_fip_setup(const io_dev_connector_t *fip_dev_con, unsigned int boot_dev);
+int plat_get_ddr_fip_image_source(unsigned int image_id, uintptr_t *dev_handle,
+				  uintptr_t *image_spec,
+				  int (*check)(const uintptr_t spec));
+
+#endif	/*	DDR_IO_STORAGE_H	*/
diff --git a/plat/nxp/common/fip_handler/fuse_fip/fuse.mk b/plat/nxp/common/fip_handler/fuse_fip/fuse.mk
new file mode 100644
index 0000000..d8f5ae6
--- /dev/null
+++ b/plat/nxp/common/fip_handler/fuse_fip/fuse.mk
@@ -0,0 +1,100 @@
+#
+# Copyright 2018-2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+
+NEED_FUSE	:= yes
+
+$(eval $(call add_define, PLAT_DEF_FIP_UUID))
+$(eval $(call add_define, POLICY_FUSE_PROVISION))
+$(eval $(call add_define, PLAT_TBBR_IMG_DEF))
+
+$(eval $(call SET_NXP_MAKE_FLAG,IMG_LOADR_NEEDED,BL2))
+$(eval $(call SET_NXP_MAKE_FLAG,SFP_NEEDED,BL2))
+$(eval $(call SET_NXP_MAKE_FLAG,GPIO_NEEDED,BL2))
+
+FIP_HANDLER_PATH	:=  ${PLAT_COMMON_PATH}/fip_handler
+FIP_HANDLER_COMMON_PATH	:=  ${FIP_HANDLER_PATH}/common
+
+FUSE_SOURCES		:=  ${FIP_HANDLER_PATH}/fuse_fip/fuse_io_storage.c
+
+PLAT_INCLUDES		+=  -I${FIP_HANDLER_COMMON_PATH}\
+			    -I${FIP_HANDLER_PATH}/fuse_fip
+
+FUSE_FIP_NAME		:=	fuse_fip.bin
+
+fip_fuse: ${BUILD_PLAT}/${FUSE_FIP_NAME}
+
+ifeq (${FUSE_PROV_FILE},)
+
+$(shell cp tools/nxp/plat_fiptool/plat_fiptool.mk ${PLAT_DIR})
+
+else
+ifeq (${TRUSTED_BOARD_BOOT},1)
+FUSE_PROV_FILE_SB = $(notdir ${FUSE_PROV_FILE})_prov.sb
+FUSE_FIP_ARGS += --fuse-prov ${BUILD_PLAT}/${FUSE_PROV_FILE_SB}
+FUSE_FIP_DEPS += ${BUILD_PLAT}/${FUSE_PROV_FILE_SB}
+else
+FUSE_FIP_ARGS += --fuse-prov ${FUSE_PROV_FILE}
+FUSE_FIP_DEPS += ${FUSE_PROV_FILE}
+endif
+endif
+
+ifeq (${FUSE_UP_FILE},)
+else
+ifeq (${TRUSTED_BOARD_BOOT},1)
+FUSE_UP_FILE_SB = $(notdir ${FUSE_UP_FILE})_up.sb
+FUSE_FIP_ARGS += --fuse-up ${BUILD_PLAT}/${FUSE_UP_FILE_SB}
+FUSE_FIP_DEPS += ${BUILD_PLAT}/${FUSE_UP_FILE_SB}
+else
+FUSE_FIP_ARGS += --fuse-up ${FUSE_UP_FILE}
+FUSE_FIP_DEPS += ${FUSE_UP_FILE}
+endif
+endif
+
+ifeq (${TRUSTED_BOARD_BOOT},1)
+
+ifeq (${MBEDTLS_DIR},)
+else
+  $(error Error: Trusted Board Boot with X509 certificates not supported with FUSE_PROG build option)
+endif
+
+# Path to CST directory is required to generate the CSF header
+# and prepend it to image before fip image gets generated
+ifeq (${CST_DIR},)
+  $(error Error: CST_DIR not set)
+endif
+
+ifeq (${FUSE_INPUT_FILE},)
+FUSE_INPUT_FILE := $(PLAT_DRIVERS_PATH)/auth/csf_hdr_parser/${CSF_FILE}
+endif
+
+ifeq (${FUSE_PROV_FILE},)
+else
+${BUILD_PLAT}/${FUSE_PROV_FILE_SB}: ${FUSE_PROV_FILE}
+	@echo " Generating CSF Header for $@ $<"
+	$(CST_DIR)/create_hdr_esbc --in $< --out $@ --app_off ${CSF_HDR_SZ} \
+					--app $< ${FUSE_INPUT_FILE}
+endif
+
+ifeq (${FUSE_UP_FILE},)
+else
+${BUILD_PLAT}/${FUSE_UP_FILE_SB}: ${FUSE_UP_FILE}
+	@echo " Generating CSF Header for $@ $<"
+	$(CST_DIR)/create_hdr_esbc --in $< --out $@ --app_off ${CSF_HDR_SZ} \
+					--app $< ${FUSE_INPUT_FILE}
+endif
+
+endif
+
+${BUILD_PLAT}/${FUSE_FIP_NAME}: fiptool ${FUSE_FIP_DEPS}
+ifeq (${FUSE_FIP_DEPS},)
+	$(error "Error: FUSE_PROV_FILE or/and FUSE_UP_FILE needs to point to the right file")
+endif
+	${FIPTOOL} create ${FUSE_FIP_ARGS} $@
+	${FIPTOOL} info $@
+	@${ECHO_BLANK_LINE}
+	@echo "Built $@ successfully"
+	@${ECHO_BLANK_LINE}
diff --git a/plat/nxp/common/fip_handler/fuse_fip/fuse_io.h b/plat/nxp/common/fip_handler/fuse_fip/fuse_io.h
new file mode 100644
index 0000000..e8775d0
--- /dev/null
+++ b/plat/nxp/common/fip_handler/fuse_fip/fuse_io.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#ifndef FUSE_IO_H
+#define FUSE_IO_H
+
+#include <drivers/io/io_driver.h>
+
+/* Can be overridden from platform_def.h file.
+ */
+#ifndef PLAT_FUSE_FIP_OFFSET
+#define PLAT_FUSE_FIP_OFFSET	0x880000
+#endif
+#ifndef PLAT_FUSE_FIP_MAX_SIZE
+#define PLAT_FUSE_FIP_MAX_SIZE	0x80000
+#endif
+
+int fip_fuse_provisioning(uintptr_t image_buf, uint32_t size);
+int fuse_fip_setup(const io_dev_connector_t *fip_dev_con, unsigned int boot_dev);
+int plat_get_fuse_image_source(unsigned int image_id,
+			       uintptr_t *dev_handle,
+			       uintptr_t *image_spec,
+			       int (*check)(const uintptr_t spec));
+#endif	/*	FUSE_IO_H	*/
diff --git a/plat/nxp/common/fip_handler/fuse_fip/fuse_io_storage.c b/plat/nxp/common/fip_handler/fuse_fip/fuse_io_storage.c
new file mode 100644
index 0000000..017ffcf
--- /dev/null
+++ b/plat/nxp/common/fip_handler/fuse_fip/fuse_io_storage.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <dcfg.h>
+#include <drivers/delay_timer.h>
+#include <fuse_prov.h>
+#include <io_block.h>
+#include <io_driver.h>
+#include <io_fip.h>
+#include <io_memmap.h>
+#include <io_storage.h>
+#include <lib/utils.h>
+#include <nxp_gpio.h>
+#include <sfp.h>
+#include <sfp_error_codes.h>
+#include <tools_share/firmware_image_package.h>
+
+#include "fuse_io.h"
+#include <load_img.h>
+#include <plat/common/platform.h>
+#include "plat_common.h"
+#include "platform_def.h"
+
+extern uintptr_t backend_dev_handle;
+
+static uint32_t fuse_fip;
+
+static uintptr_t fuse_fip_dev_handle;
+
+static io_block_spec_t fuse_fip_block_spec = {
+	.offset = PLAT_FUSE_FIP_OFFSET,
+	.length = PLAT_FUSE_FIP_MAX_SIZE
+};
+
+static const io_uuid_spec_t fuse_prov_uuid_spec = {
+	.uuid = UUID_FUSE_PROV,
+};
+
+static const io_uuid_spec_t fuse_up_uuid_spec = {
+	.uuid = UUID_FUSE_UP,
+};
+
+static int open_fuse_fip(const uintptr_t spec);
+
+struct plat_io_policy {
+	uintptr_t *dev_handle;
+	uintptr_t image_spec;
+	int (*check)(const uintptr_t spec);
+};
+
+/* By default, ARM platforms load images from the FIP */
+static const struct plat_io_policy fuse_policies[] = {
+	[FUSE_FIP_IMAGE_ID - FUSE_FIP_IMAGE_ID] = {
+		&backend_dev_handle,
+		(uintptr_t)&fuse_fip_block_spec,
+		NULL
+	},
+	[FUSE_PROV_IMAGE_ID - FUSE_FIP_IMAGE_ID] = {
+		&fuse_fip_dev_handle,
+		(uintptr_t)&fuse_prov_uuid_spec,
+		open_fuse_fip
+	},
+	[FUSE_UP_IMAGE_ID - FUSE_FIP_IMAGE_ID] = {
+		&fuse_fip_dev_handle,
+		(uintptr_t)&fuse_up_uuid_spec,
+		open_fuse_fip
+	}
+};
+
+static int open_fuse_fip(const uintptr_t spec)
+{
+	int result;
+	uintptr_t local_image_handle;
+
+	/* See if a Firmware Image Package is available */
+	result = io_dev_init(fuse_fip_dev_handle, (uintptr_t)FUSE_FIP_IMAGE_ID);
+	if (result == 0) {
+		result = io_open(fuse_fip_dev_handle,
+				 spec,
+				 &local_image_handle);
+		if (result == 0) {
+			VERBOSE("Using FIP\n");
+			io_close(local_image_handle);
+		}
+	}
+	return result;
+}
+
+/* The image can be one of the DDR PHY images, which can be sleected via DDR
+ * policies
+ */
+int plat_get_fuse_image_source(unsigned int image_id,
+			       uintptr_t *dev_handle,
+			       uintptr_t *image_spec,
+			       int (*check)(const uintptr_t spec))
+{
+	int result;
+	const struct plat_io_policy *policy;
+
+	assert(image_id < (FUSE_FIP_IMAGE_ID + ARRAY_SIZE(fuse_policies)));
+
+	policy = &fuse_policies[image_id - FUSE_FIP_IMAGE_ID];
+
+	if (image_id == FUSE_FIP_IMAGE_ID) {
+		result = check(policy->image_spec);
+	} else {
+		result = policy->check(policy->image_spec);
+	}
+
+	if (result == 0) {
+		*image_spec = policy->image_spec;
+		*dev_handle = *(policy->dev_handle);
+	}
+	return result;
+}
+
+int fuse_fip_setup(const io_dev_connector_t *fip_dev_con, unsigned int boot_dev)
+{
+	int io_result;
+	size_t fuse_fip_offset = PLAT_FUSE_FIP_OFFSET;
+
+	/* Open connections to fuse fip and cache the handles */
+	io_result = io_dev_open(fip_dev_con, (uintptr_t)&fuse_fip,
+				&fuse_fip_dev_handle);
+
+	assert(io_result == 0);
+
+	switch (boot_dev) {
+#if QSPI_BOOT
+	case BOOT_DEVICE_QSPI:
+		fuse_fip_offset += NXP_QSPI_FLASH_ADDR;
+		break;
+#endif
+#if NOR_BOOT
+	case BOOT_DEVICE_IFC_NOR:
+		fuse_fip_offset += NXP_NOR_FLASH_ADDR;
+		break;
+#endif
+#if FLEXSPI_NOR_BOOT
+	case BOOT_DEVICE_FLEXSPI_NOR:
+		fuse_fip_offset += NXP_FLEXSPI_FLASH_ADDR;
+		break;
+#endif
+	default:
+		break;
+	}
+
+	fuse_fip_block_spec.offset = fuse_fip_offset;
+
+	return io_result;
+}
+
+int fip_fuse_provisioning(uintptr_t image_buf, uint32_t size)
+{
+	uint32_t bit_num;
+	uint32_t *gpio_base_addr = NULL;
+	struct fuse_hdr_t *fuse_hdr = NULL;
+	uint8_t barker[] = {0x68U, 0x39U, 0x27U, 0x81U};
+	int ret = -1;
+
+	if (sfp_check_oem_wp() == 0) {
+		ret = load_img(FUSE_PROV_IMAGE_ID, &image_buf, &size);
+		if (ret != 0) {
+			ERROR("Failed to load FUSE PRIV image\n");
+			assert(ret == 0);
+		}
+		fuse_hdr = (struct fuse_hdr_t *)image_buf;
+
+		/* Check barker code */
+		if (memcmp(fuse_hdr->barker, barker, sizeof(barker)) != 0) {
+			ERROR("FUSE Barker code mismatch.\n");
+			error_handler(ERROR_FUSE_BARKER);
+			return 1;
+		}
+
+		/* Check if GPIO pin to be set for POVDD */
+		if (((fuse_hdr->flags >> FLAG_POVDD_SHIFT) & 0x1) != 0) {
+			gpio_base_addr =
+				select_gpio_n_bitnum(fuse_hdr->povdd_gpio,
+						     &bit_num);
+			/*
+			 * Add delay so that Efuse gets the power
+			 * when GPIO is enabled.
+			 */
+			ret = set_gpio_bit(gpio_base_addr, bit_num);
+			mdelay(EFUSE_POWERUP_DELAY_mSec);
+		} else {
+			ret = (board_enable_povdd() == true) ? 0 : PLAT_ERROR_ENABLE_POVDD;
+		}
+		if (ret != 0) {
+			ERROR("Error enabling board POVDD: %d\n", ret);
+			ERROR("Only SFP mirror register will be set.\n");
+		}
+
+		provision_fuses(image_buf, ret == 0);
+
+		 /* Check if GPIO pin to be reset for POVDD */
+		if (((fuse_hdr->flags >> FLAG_POVDD_SHIFT) & 0x1) != 0) {
+			if (gpio_base_addr == NULL) {
+				gpio_base_addr =
+					select_gpio_n_bitnum(
+							fuse_hdr->povdd_gpio,
+							&bit_num);
+			}
+			ret = clr_gpio_bit(gpio_base_addr, bit_num);
+		} else {
+			ret = board_disable_povdd() ? 0 : PLAT_ERROR_DISABLE_POVDD;
+		}
+
+		if (ret != 0) {
+			ERROR("Error disabling board POVDD: %d\n", ret);
+		}
+	}
+	return 0;
+}
diff --git a/plat/nxp/common/img_loadr/img_loadr.mk b/plat/nxp/common/img_loadr/img_loadr.mk
new file mode 100644
index 0000000..f64b1fa
--- /dev/null
+++ b/plat/nxp/common/img_loadr/img_loadr.mk
@@ -0,0 +1,21 @@
+#
+# Copyright 2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+IMG_LOADR_DRIVERS_PATH	:=  ${PLAT_COMMON_PATH}/img_loadr
+
+IMG_LOADR_SOURCES	:=  $(IMG_LOADR_DRIVERS_PATH)/load_img.c
+PLAT_INCLUDES		+= -I$(IMG_LOADR_DRIVERS_PATH)
+
+ifeq (${BL_COMM_IMG_LOADR_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${IMG_LOADR_SOURCES}
+else
+ifeq (${BL2_IMG_LOADR_NEEDED},yes)
+BL2_SOURCES		+= ${IMG_LOADR_SOURCES}
+endif
+ifeq (${BL31_IMG_LOADR_NEEDED},yes)
+BL31_SOURCES		+= ${IMG_LOADR_SOURCES}
+endif
+endif
diff --git a/plat/nxp/common/img_loadr/load_img.c b/plat/nxp/common/img_loadr/load_img.c
new file mode 100644
index 0000000..c185c36
--- /dev/null
+++ b/plat/nxp/common/img_loadr/load_img.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+
+#include <common/bl_common.h>
+#include <common/desc_image_load.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#include "load_img.h"
+
+/******************************************************************************
+ * This function can be used to load DDR PHY/FUSE Images
+ *
+ * @param [in] image_id		 Image ID to be loaded
+ *
+ * @param [in,out]  image_base   Location at which the image should be loaded
+ *				 In case image is prepended by a CSF header,
+ *				 image_base is pointer to actual image after
+ *				 the header
+ *
+ * @param [in,out]  image_size   User should pass the maximum size of the image
+ *				 possible.(Buffer size starting from image_base)
+ *				 Actual size of the image loaded is returned
+ *				 back.
+ *****************************************************************************/
+int load_img(unsigned int image_id, uintptr_t *image_base,
+		      uint32_t *image_size)
+{
+	int err = 0;
+
+	image_desc_t img_info = {
+		.image_id = image_id,
+		SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
+				VERSION_2, image_info_t, 0),
+#ifdef CSF_HEADER_PREPENDED
+		.image_info.image_base = *image_base - CSF_HDR_SZ,
+		.image_info.image_max_size = *image_size + CSF_HDR_SZ,
+#else
+		.image_info.image_base = *image_base,
+		.image_info.image_max_size = *image_size,
+#endif
+	};
+
+	/* Create MMU entry for the CSF header */
+#if PLAT_XLAT_TABLES_DYNAMIC
+#ifdef CSF_HEADER_PREPENDED
+	mmap_add_dynamic_region(img_info.image_info.image_base,
+			img_info.image_info.image_base,
+			CSF_HDR_SZ,
+			MT_MEMORY | MT_RW | MT_SECURE);
+#endif
+#endif
+
+	VERBOSE("BL2: Loading IMG %d\n", image_id);
+	err = load_auth_image(image_id, &img_info.image_info);
+	if (err != 0) {
+		VERBOSE("Failed to load IMG %d\n", image_id);
+		return err;
+	}
+
+#ifdef CSF_HEADER_PREPENDED
+	*image_base = img_info.image_info.image_base + CSF_HDR_SZ;
+	*image_size = img_info.image_info.image_size - CSF_HDR_SZ;
+#if PLAT_XLAT_TABLES_DYNAMIC
+	mmap_remove_dynamic_region(img_info.image_info.image_base,
+				   CSF_HDR_SZ);
+#endif
+#else
+	*image_base = img_info.image_info.image_base;
+	*image_size = img_info.image_info.image_size;
+#endif
+
+	return err;
+}
diff --git a/plat/nxp/common/img_loadr/load_img.h b/plat/nxp/common/img_loadr/load_img.h
new file mode 100644
index 0000000..6f9de32
--- /dev/null
+++ b/plat/nxp/common/img_loadr/load_img.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef LOAD_IMAGE_H
+#define LOAD_IMAGE_H
+
+int load_img(unsigned int image_id, uintptr_t *image_base,
+		      uint32_t *image_size);
+
+#endif /* LOAD_IMAGE_H */
diff --git a/plat/nxp/common/include/default/ch_2/soc_default_base_addr.h b/plat/nxp/common/include/default/ch_2/soc_default_base_addr.h
new file mode 100644
index 0000000..175a796
--- /dev/null
+++ b/plat/nxp/common/include/default/ch_2/soc_default_base_addr.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef SOC_DEFAULT_BASE_ADDR_H
+#define SOC_DEFAULT_BASE_ADDR_H
+
+/* CCSR mmu_def.h */
+#define NXP_CCSR_ADDR			0x01000000
+#define NXP_CCSR_SIZE			0x0F000000
+
+#define NXP_DCSR_ADDR			0x20000000
+#define NXP_DCSR_SIZE			0x4000000
+
+/* Flex-SPI controller address */
+#define NXP_FLEXSPI_ADDR		0x020C0000
+/* QSPI Flash Start address */
+#define NXP_QSPI_FLASH_ADDR		0x40000000
+/* NOR Flash Start address */
+#define NXP_IFC_REGION_ADDR		0x60000000
+#define NXP_NOR_FLASH_ADDR		NXP_IFC_REGION_ADDR
+
+/* MMU 500 soc.c*/
+#define NXP_SMMU_ADDR			0x09000000
+
+#define NXP_SNVS_ADDR			0x01E90000
+
+#define NXP_DCFG_ADDR			0x01EE0000
+#define NXP_SFP_ADDR			0x01E80000
+#define NXP_RCPM_ADDR			0x01EE2000
+#define NXP_CSU_ADDR			0x01510000
+#define NXP_SCFG_ADDR			0x01570000
+#define NXP_DCSR_ADDR			0x20000000
+#define NXP_DCSR_DCFG_ADDR		(NXP_DCSR_ADDR + 0x00140000)
+#define NXP_I2C_ADDR			0x02180000
+#define NXP_ESDHC_ADDR			0x01560000
+#define NXP_UART_ADDR			0x021C0500
+#define NXP_UART1_ADDR			0x021C0600
+
+#define NXP_GPIO1_ADDR			0x02300000
+#define NXP_GPIO2_ADDR			0x02310000
+#define NXP_GPIO3_ADDR			0x02320000
+#define NXP_GPIO4_ADDR			0x02330000
+
+#define NXP_WDOG1_NS_ADDR		0x02390000
+#define NXP_WDOG2_NS_ADDR		0x023A0000
+#define NXP_WDOG1_TZ_ADDR		0x023B0000
+#define NXP_WDOG2_TZ_ADDR		0x023C0000
+
+#define NXP_TIMER_STATUS_ADDR		0x023F0000
+
+#define NXP_GICD_4K_ADDR		0x01401000
+#define NXP_GICC_4K_ADDR		0x01402000
+#define NXP_GICD_64K_ADDR		0x01410000
+#define NXP_GICC_64K_ADDR		0x01420000
+
+#define NXP_CAAM_ADDR			0x01700000
+
+#define NXP_TZC_ADDR			0x01500000
+#define NXP_DDR_ADDR			0x01080000
+
+#define NXP_TIMER_ADDR			0x02B00000
+#define NXP_CCI_ADDR			0x01180000
+#define NXP_RESET_ADDR			0x01E60000
+#define NXP_SEC_REGFILE_ADDR		0x01E88000
+#endif	/*	SOC_DEFAULT_BASE_ADDR_H		*/
diff --git a/plat/nxp/common/include/default/ch_2/soc_default_helper_macros.h b/plat/nxp/common/include/default/ch_2/soc_default_helper_macros.h
new file mode 100644
index 0000000..789b112
--- /dev/null
+++ b/plat/nxp/common/include/default/ch_2/soc_default_helper_macros.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef SOC_DEFAULT_HELPER_MACROS_H
+#define SOC_DEFAULT_HELPER_MACROS_H
+
+#ifdef NXP_OCRAM_TZPC_ADDR
+
+/* 0x1: means 4 KB
+ * 0x2: means 8 KB
+ */
+#define TZPC_BLOCK_SIZE			0x1000
+#endif
+
+/* DDR controller offsets and defines */
+#ifdef NXP_DDR_ADDR
+
+#define DDR_CFG_2_OFFSET                0x114
+#define CFG_2_FORCE_REFRESH             0x80000000
+
+#endif /* NXP_DDR_ADDR */
+
+ /* Reset block register offsets */
+#ifdef NXP_RESET_ADDR
+
+/* Register Offset */
+#define RST_RSTCR_OFFSET		0x0
+#define RST_RSTRQMR1_OFFSET		0x10
+#define RST_RSTRQSR1_OFFSET		0x18
+#define BRR_OFFSET			0x60
+
+/* helper macros */
+#define RSTRQSR1_SWRR			0x800
+#define RSTRQMR_RPTOE_MASK		(1 << 19)
+
+#endif /* NXP_RESET_ADDR */
+
+/* Secure-Register-File register offsets and bit masks */
+#ifdef NXP_RST_ADDR
+/* Register Offset */
+#define CORE_HOLD_OFFSET		0x140
+#define RSTCNTL_OFFSET			0x180
+
+/* Helper macros */
+#define SW_RST_REQ_INIT			0x1
+#endif
+
+#ifdef NXP_RCPM_ADDR
+/* RCPM Register Offsets */
+#define RCPM_PCPH20SETR_OFFSET		0x0D4
+#define RCPM_PCPH20CLRR_OFFSET		0x0D8
+#define RCPM_POWMGTCSR_OFFSET		0x130
+#define RCPM_IPPDEXPCR0_OFFSET		0x140
+#define RCPM_POWMGTCSR_LPM20_REQ	0x00100000
+#endif
+
+#endif	/*	SOC_DEFAULT_HELPER_MACROS_H	*/
diff --git a/plat/nxp/common/include/default/ch_3/soc_default_base_addr.h b/plat/nxp/common/include/default/ch_3/soc_default_base_addr.h
new file mode 100644
index 0000000..e8a7645
--- /dev/null
+++ b/plat/nxp/common/include/default/ch_3/soc_default_base_addr.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef SOC_DEFAULT_BASE_ADDR_H
+#define SOC_DEFAULT_BASE_ADDR_H
+
+/* CCSR mmu_def.h */
+#define NXP_CCSR_ADDR			0x1000000
+#define NXP_CCSR_SIZE			0xF000000
+
+#define NXP_DCSR_ADDR			0x700000000
+#define NXP_DCSR_SIZE			0x40000000
+
+/* Flex-SPI controller address */
+#define NXP_FLEXSPI_ADDR		0x020C0000
+/* Flex-SPI Flash Start address */
+#define NXP_FLEXSPI_FLASH_ADDR		0x20000000
+
+/* MMU 500 soc.c*/
+#define NXP_SMMU_ADDR			0x05000000
+
+#define NXP_SNVS_ADDR			0x01E90000
+
+#define NXP_DCFG_ADDR			0x01E00000
+#define NXP_PMU_CCSR_ADDR		0x01E30000
+#define NXP_PMU_DCSR_ADDR		0x700123000
+#define NXP_PMU_ADDR                    NXP_PMU_CCSR_ADDR
+#define NXP_SFP_ADDR			0x01E80000
+#define NXP_SCFG_ADDR			0x01FC0000
+#define NXP_I2C_ADDR			0x02000000
+#define NXP_ESDHC_ADDR			0x02140000
+#define NXP_ESDHC2_ADDR			0x02150000
+#define NXP_UART_ADDR			0x021C0000
+#define NXP_UART1_ADDR			0x021D0000
+
+#define NXP_GPIO1_ADDR			0x02300000
+#define NXP_GPIO2_ADDR			0x02310000
+#define NXP_GPIO3_ADDR			0x02320000
+#define NXP_GPIO4_ADDR			0x02330000
+
+#define NXP_WDOG1_NS_ADDR		0x02390000
+#define NXP_WDOG2_NS_ADDR		0x023A0000
+#define NXP_WDOG1_TZ_ADDR		0x023B0000
+#define NXP_WDOG2_TZ_ADDR		0x023C0000
+
+#define NXP_TIMER_STATUS_ADDR		0x023F0000
+
+#define NXP_GICD_ADDR			0x06000000
+#define NXP_GICR_ADDR			0x06200000
+#define NXP_GICR_SGI_ADDR		0x06210000
+
+#define NXP_CAAM_ADDR			0x08000000
+
+#define NXP_TZC_ADDR			0x01100000
+#define NXP_TZC2_ADDR			0x01110000
+#define NXP_TZC3_ADDR			0x01120000
+
+#define NXP_RESET_ADDR			0x01E60000
+#define NXP_SEC_REGFILE_ADDR		0x01E88000
+#endif	/*	SOC_DEFAULT_BASE_ADDR_H		*/
diff --git a/plat/nxp/common/include/default/ch_3_2/soc_default_base_addr.h b/plat/nxp/common/include/default/ch_3_2/soc_default_base_addr.h
new file mode 100644
index 0000000..08300b0
--- /dev/null
+++ b/plat/nxp/common/include/default/ch_3_2/soc_default_base_addr.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef SOC_DEFAULT_BASE_ADDR_H
+#define SOC_DEFAULT_BASE_ADDR_H
+
+/* CCSR mmu_def.h */
+#define NXP_CCSR_ADDR			0x1000000
+#define NXP_CCSR_SIZE			0xF000000
+
+#define NXP_DCSR_ADDR			0x700000000
+#define NXP_DCSR_SIZE			0x40000000
+
+/* Flex-SPI controller address */
+#define NXP_FLEXSPI_ADDR		0x020C0000
+/* Flex-SPI Flash Start address */
+#define NXP_FLEXSPI_FLASH_ADDR		0x20000000
+
+/* MMU 500 soc.c*/
+#define NXP_SMMU_ADDR			0x05000000
+
+#define NXP_SNVS_ADDR			0x01E90000
+
+#define NXP_DCFG_ADDR			0x01E00000
+#define NXP_PMU_CCSR_ADDR		0x01E30000
+#define NXP_PMU_DCSR_ADDR		0x700123000
+#define NXP_PMU_ADDR                    NXP_PMU_CCSR_ADDR
+#define NXP_SFP_ADDR			0x01E80000
+#define NXP_SCFG_ADDR			0x01FC0000
+#define NXP_I2C_ADDR			0x02000000
+#define NXP_ESDHC_ADDR			0x02140000
+#define NXP_ESDHC2_ADDR			0x02150000
+#define NXP_UART_ADDR			0x021C0000
+#define NXP_UART1_ADDR			0x021D0000
+
+#define NXP_GPIO1_ADDR			0x02300000
+#define NXP_GPIO2_ADDR			0x02310000
+#define NXP_GPIO3_ADDR			0x02320000
+#define NXP_GPIO4_ADDR			0x02330000
+
+#define NXP_WDOG1_NS_ADDR		0x02390000
+#define NXP_WDOG2_NS_ADDR		0x023A0000
+#define NXP_WDOG1_TZ_ADDR		0x023B0000
+#define NXP_WDOG2_TZ_ADDR		0x023C0000
+
+#define NXP_TIMER_STATUS_ADDR		0x023F0000
+
+#define NXP_GICD_ADDR			0x06000000
+#define NXP_GICR_ADDR			0x06200000
+#define NXP_GICR_SGI_ADDR		0x06210000
+
+#define NXP_CAAM_ADDR			0x08000000
+
+#define NXP_TZC_ADDR			0x01100000
+#define NXP_TZC2_ADDR			0x01110000
+#define NXP_TZC3_ADDR			0x01120000
+
+#define NXP_TIMER_ADDR			0x023E0000
+
+#define NXP_RESET_ADDR			0x01E60000
+#define NXP_SEC_REGFILE_ADDR		0x01E88000
+#define NXP_RST_ADDR			0x01E88000
+
+#define TPMWAKEMR0_ADDR		0x700123c50
+#define TZPC_BLOCK_SIZE		0x1000
+
+#define NXP_TZC_ADDR			0x01100000
+#define NXP_TZC2_ADDR			0x01110000
+#define NXP_TZC3_ADDR			0x01120000
+#define NXP_TZC4_ADDR			0x01130000
+#define NXP_DDR_ADDR			0x01080000
+#define NXP_DDR2_ADDR			0x01090000
+
+#define NXP_OCRAM_TZPC_ADDR		0x02200000
+
+#define NXP_CCN_ADDR			0x04000000
+#define NXP_CCN_HNI_ADDR		0x04080000
+#define NXP_CCN_HN_F_0_ADDR		0x04200000
+
+#endif	/*	SOC_DEFAULT_BASE_ADDR_H		*/
diff --git a/plat/nxp/common/include/default/ch_3_2/soc_default_helper_macros.h b/plat/nxp/common/include/default/ch_3_2/soc_default_helper_macros.h
new file mode 100644
index 0000000..cdc823a
--- /dev/null
+++ b/plat/nxp/common/include/default/ch_3_2/soc_default_helper_macros.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef SOC_DEFAULT_HELPER_MACROS_H
+#define SOC_DEFAULT_HELPER_MACROS_H
+
+#ifdef NXP_OCRAM_TZPC_ADDR
+
+/* 0x1: means 4 KB
+ * 0x2: means 8 KB
+ */
+#define TZPC_BLOCK_SIZE		0x1000
+#endif
+
+/* DDR controller offsets and defines */
+#ifdef NXP_DDR_ADDR
+
+#define DDR_CFG_2_OFFSET                0x114
+#define CFG_2_FORCE_REFRESH             0x80000000
+
+#endif /* NXP_DDR_ADDR */
+
+ /* Reset block register offsets */
+#ifdef NXP_RESET_ADDR
+
+/* Register Offset */
+#define RST_RSTCR_OFFSET		0x0
+#define RST_RSTRQMR1_OFFSET		0x10
+#define RST_RSTRQSR1_OFFSET		0x18
+#define BRR_OFFSET			0x60
+
+/* helper macros */
+#define RSTRQSR1_SWRR			0x800
+#define RSTRQMR_RPTOE_MASK		(1 << 19)
+
+#endif /* NXP_RESET_ADDR */
+
+/* Secure-Register-File register offsets and bit masks */
+#ifdef NXP_RST_ADDR
+/* Register Offset */
+#define CORE_HOLD_OFFSET		0x140
+#define RSTCNTL_OFFSET			0x180
+
+/* Helper macros */
+#define SW_RST_REQ_INIT			0x1
+#endif
+
+#ifdef NXP_CCN_ADDR
+#define NXP_CCN_HN_F_1_ADDR		0x04210000
+
+#define CCN_HN_F_SAM_NODEID_MASK	0x7f
+#define CCN_HN_F_SNP_DMN_CTL_OFFSET	0x200
+#define CCN_HN_F_SNP_DMN_CTL_SET_OFFSET	0x210
+#define CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET	0x220
+#define CCN_HN_F_SNP_DMN_CTL_MASK	0x80a00
+#define CCN_HNF_NODE_COUNT              8
+#define CCN_HNF_OFFSET                  0x10000
+
+#define SA_AUX_CTRL_REG_OFFSET		0x500
+#define NUM_HNI_NODE			2
+#define CCN_HNI_MEMORY_MAP_SIZE		0x10000
+
+#define PCIeRC_RN_I_NODE_ID_OFFSET	0x8
+#define PoS_CONTROL_REG_OFFSET		0x0
+#define POS_EARLY_WR_COMP_EN		0x20
+#define HNI_POS_EN			0x01
+#define POS_TERMINATE_BARRIERS		0x10
+#define SERIALIZE_DEV_nGnRnE_WRITES	0x200
+#define ENABLE_ERR_SIGNAL_TO_MN		0x4
+#define ENABLE_RESERVE_BIT53		0x400
+#define ENABLE_WUO			0x10
+#endif /* NXP_CCN_ADDR */
+
+#endif	/*	SOC_DEFAULT_HELPER_MACROS_H	*/
diff --git a/plat/nxp/common/include/default/plat_default_def.h b/plat/nxp/common/include/default/plat_default_def.h
new file mode 100644
index 0000000..dd5dfe0
--- /dev/null
+++ b/plat/nxp/common/include/default/plat_default_def.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PLAT_DEFAULT_DEF_H
+#define PLAT_DEFAULT_DEF_H
+
+/*
+ * Platform binary types for linking
+ */
+#ifdef __aarch64__
+#define PLATFORM_LINKER_FORMAT          "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH            aarch64
+#else
+#define PLATFORM_LINKER_FORMAT          "elf32-littlearm"
+#define PLATFORM_LINKER_ARCH            arm
+#endif /* __aarch64__ */
+
+#define LS_BL31_PLAT_PARAM_VAL		0x0f1e2d3c4b5a6978ULL
+
+/* NXP Platforms have DRAM divided into banks.
+ * DRAM0 Bank:	Maximum size of this bank is fixed to 2GB
+ * DRAM1 Bank:	Greater than 2GB belongs to bank1 and size of bank1 varies from
+ *		one platform to other platform.
+ * DRAMn Bank:
+ *
+ * Except a few, all the platforms have 2GB size as DRAM0 BANK.
+ * Hence common for all the platforms.
+ * For platforms where DRAM0 Size is < 2GB, it is defined in platform_def.h
+ */
+#ifndef PLAT_DEF_DRAM0_SIZE
+#define PLAT_DEF_DRAM0_SIZE	0x80000000	/*  2G */
+#endif
+
+/* This is common for all platforms where: */
+#ifndef NXP_NS_DRAM_ADDR
+#define NXP_NS_DRAM_ADDR	NXP_DRAM0_ADDR
+#endif
+
+/* 64M is reserved for Secure memory
+ */
+#ifndef NXP_SECURE_DRAM_SIZE
+#define NXP_SECURE_DRAM_SIZE	(64 * 1024 * 1024)
+#endif
+
+/* 2M Secure EL1 Payload Shared Memory */
+#ifndef NXP_SP_SHRD_DRAM_SIZE
+#define NXP_SP_SHRD_DRAM_SIZE	(2 * 1024 * 1024)
+#endif
+
+#ifndef NXP_NS_DRAM_SIZE
+/* Non secure memory */
+#define NXP_NS_DRAM_SIZE	(PLAT_DEF_DRAM0_SIZE - \
+				(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE))
+#endif
+
+#ifndef NXP_SECURE_DRAM_ADDR
+#ifdef TEST_BL31
+#define NXP_SECURE_DRAM_ADDR 0
+#else
+#define NXP_SECURE_DRAM_ADDR	(NXP_NS_DRAM_ADDR + PLAT_DEF_DRAM0_SIZE - \
+				(NXP_SECURE_DRAM_SIZE  + NXP_SP_SHRD_DRAM_SIZE))
+#endif
+#endif
+
+#ifndef NXP_SP_SHRD_DRAM_ADDR
+#define NXP_SP_SHRD_DRAM_ADDR	(NXP_NS_DRAM_ADDR + PLAT_DEF_DRAM0_SIZE \
+				- NXP_SP_SHRD_DRAM_SIZE)
+#endif
+
+#ifndef BL31_BASE
+/* 2 MB reserved in secure memory for DDR */
+#define BL31_BASE		NXP_SECURE_DRAM_ADDR
+#endif
+
+#ifndef BL31_SIZE
+#define BL31_SIZE		(0x200000)
+#endif
+
+#ifndef BL31_LIMIT
+#define BL31_LIMIT		(BL31_BASE + BL31_SIZE)
+#endif
+
+/* Put BL32 in secure memory */
+#ifndef BL32_BASE
+#define BL32_BASE		(NXP_SECURE_DRAM_ADDR + BL31_SIZE)
+#endif
+
+#ifndef BL32_LIMIT
+#define BL32_LIMIT		(NXP_SECURE_DRAM_ADDR + \
+				NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)
+#endif
+
+/* BL33 memory region */
+/* Hardcoded based on current address in u-boot */
+#ifndef BL33_BASE
+#define BL33_BASE		0x82000000
+#endif
+
+#ifndef BL33_LIMIT
+#define BL33_LIMIT		(NXP_NS_DRAM_ADDR + NXP_NS_DRAM_SIZE)
+#endif
+
+/*
+ * FIP image defines - Offset at which FIP Image would be present
+ * Image would include Bl31 , Bl33 and Bl32 (optional)
+ */
+#ifdef POLICY_FUSE_PROVISION
+#ifndef FUSE_BUF
+#define FUSE_BUF		ULL(0x81000000)
+#endif
+
+#ifndef FUSE_SZ
+#define FUSE_SZ			0x80000
+#endif
+#endif
+
+#ifndef MAX_FIP_DEVICES
+#define MAX_FIP_DEVICES		2
+#endif
+
+#ifndef PLAT_FIP_OFFSET
+#define PLAT_FIP_OFFSET		0x100000
+#endif
+
+#ifndef PLAT_FIP_MAX_SIZE
+#define PLAT_FIP_MAX_SIZE	0x400000
+#endif
+
+/* Check if this size can be determined from array size */
+#if defined(IMAGE_BL2)
+#ifndef MAX_MMAP_REGIONS
+#define MAX_MMAP_REGIONS	8
+#endif
+#ifndef MAX_XLAT_TABLES
+#define MAX_XLAT_TABLES		6
+#endif
+#elif defined(IMAGE_BL31)
+#ifndef MAX_MMAP_REGIONS
+#define MAX_MMAP_REGIONS	9
+#endif
+#ifndef MAX_XLAT_TABLES
+#define MAX_XLAT_TABLES		9
+#endif
+#elif defined(IMAGE_BL32)
+#ifndef MAX_MMAP_REGIONS
+#define MAX_MMAP_REGIONS	8
+#endif
+#ifndef MAX_XLAT_TABLES
+#define MAX_XLAT_TABLES		9
+#endif
+#endif
+
+/*
+ * ID of the secure physical generic timer interrupt used by the BL32.
+ */
+#ifndef BL32_IRQ_SEC_PHY_TIMER
+#define BL32_IRQ_SEC_PHY_TIMER	29
+#endif
+
+#endif	/*	PLAT_DEFAULT_DEF_H	*/
diff --git a/plat/nxp/common/nv_storage/nv_storage.mk b/plat/nxp/common/nv_storage/nv_storage.mk
new file mode 100644
index 0000000..dddba5f
--- /dev/null
+++ b/plat/nxp/common/nv_storage/nv_storage.mk
@@ -0,0 +1,29 @@
+#
+# Copyright 2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# NXP Non-Volatile data flag storage used and then cleared by SW on boot-up
+
+$(eval $(call add_define,NXP_NV_SW_MAINT_LAST_EXEC_DATA))
+
+ifeq ($(NXP_COINED_BB),yes)
+$(eval $(call add_define,NXP_COINED_BB))
+# BL2 : To read the reset cause from LP SECMON GPR register
+# BL31: To write the reset cause to LP SECMON GPR register
+$(eval $(call SET_NXP_MAKE_FLAG,SNVS_NEEDED,BL_COMM))
+
+# BL2: DDR training data is stored on Flexspi NOR.
+ifneq (${BOOT_MODE},flexspi_nor)
+$(eval $(call SET_NXP_MAKE_FLAG,XSPI_NEEDED,BL2))
+endif
+
+else
+$(eval $(call add_define_val,DEFAULT_NV_STORAGE_BASE_ADDR,'${BL2_BIN_XSPI_NOR_END_ADDRESS} - 2 * ${NXP_XSPI_NOR_UNIT_SIZE}'))
+$(eval $(call SET_NXP_MAKE_FLAG,XSPI_NEEDED,BL_COMM))
+endif
+
+NV_STORAGE_INCLUDES	+=  -I${PLAT_COMMON_PATH}/nv_storage
+
+NV_STORAGE_SOURCES	+=  ${PLAT_COMMON_PATH}/nv_storage/plat_nv_storage.c
diff --git a/plat/nxp/common/nv_storage/plat_nv_storage.c b/plat/nxp/common/nv_storage/plat_nv_storage.c
new file mode 100644
index 0000000..7ec4fdb
--- /dev/null
+++ b/plat/nxp/common/nv_storage/plat_nv_storage.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <common/debug.h>
+#ifndef NXP_COINED_BB
+#include <flash_info.h>
+#include <fspi.h>
+#include <fspi_api.h>
+#endif
+#include <lib/mmio.h>
+#ifdef NXP_COINED_BB
+#include <snvs.h>
+#else
+#include <xspi_error_codes.h>
+#endif
+
+#include <plat_nv_storage.h>
+
+/*This structure will be a static structure and
+ * will be populated as first step of BL2 booting-up.
+ * fspi_strorage.c . To be located in the fspi driver folder.
+ */
+
+static nv_app_data_t nv_app_data;
+
+int read_nv_app_data(void)
+{
+	int ret = 0;
+
+#ifdef NXP_COINED_BB
+	uint8_t *nv_app_data_array = (uint8_t *) &nv_app_data;
+	uint8_t offset = 0U;
+
+	ret = snvs_read_app_data();
+	do {
+		nv_app_data_array[offset] = snvs_read_app_data_bit(offset);
+		offset++;
+
+	} while (offset < APP_DATA_MAX_OFFSET);
+	snvs_clear_app_data();
+#else
+	uintptr_t nv_base_addr = NV_STORAGE_BASE_ADDR;
+
+	ret = fspi_init(NXP_FLEXSPI_ADDR, NXP_FLEXSPI_FLASH_ADDR);
+
+	if (ret != XSPI_SUCCESS) {
+		ERROR("Failed to initialized driver flexspi-nor.\n");
+		ERROR("exiting warm-reset request.\n");
+		return -ENODEV;
+	}
+
+	xspi_read(nv_base_addr,
+		  (uint32_t *)&nv_app_data, sizeof(nv_app_data_t));
+	xspi_sector_erase((uint32_t) nv_base_addr,
+				F_SECTOR_ERASE_SZ);
+#endif
+	return ret;
+}
+
+int wr_nv_app_data(int data_offset,
+			uint8_t *data,
+			int data_size)
+{
+	int ret = 0;
+#ifdef NXP_COINED_BB
+#if !TRUSTED_BOARD_BOOT
+	snvs_disable_zeroize_lp_gpr();
+#endif
+	/* In case LP SecMon General purpose register,
+	 * only 1 bit flags can be saved.
+	 */
+	if ((data_size > 1) || (*data != DEFAULT_SET_VALUE)) {
+		ERROR("Only binary value is allowed to be written.\n");
+		ERROR("Use flash instead of SNVS GPR as NV location.\n");
+		return -ENODEV;
+	}
+	snvs_write_app_data_bit(data_offset);
+#else
+	uint8_t read_val[sizeof(nv_app_data_t)];
+	uint8_t ready_to_write_val[sizeof(nv_app_data_t)];
+	uintptr_t nv_base_addr = NV_STORAGE_BASE_ADDR;
+
+	assert((nv_base_addr + data_offset + data_size) > (nv_base_addr + F_SECTOR_ERASE_SZ));
+
+	ret = fspi_init(NXP_FLEXSPI_ADDR, NXP_FLEXSPI_FLASH_ADDR);
+
+	if (ret != XSPI_SUCCESS) {
+		ERROR("Failed to initialized driver flexspi-nor.\n");
+		ERROR("exiting warm-reset request.\n");
+		return -ENODEV;
+	}
+
+	ret = xspi_read(nv_base_addr + data_offset, (uint32_t *)read_val, data_size);
+
+	memset(ready_to_write_val, READY_TO_WRITE_VALUE, ARRAY_SIZE(ready_to_write_val));
+
+	if (memcmp(read_val, ready_to_write_val, data_size) == 0) {
+		xspi_write(nv_base_addr + data_offset, data, data_size);
+	}
+#endif
+
+	return ret;
+}
+
+const nv_app_data_t *get_nv_data(void)
+{
+	return (const nv_app_data_t *) &nv_app_data;
+}
diff --git a/plat/nxp/common/nv_storage/plat_nv_storage.h b/plat/nxp/common/nv_storage/plat_nv_storage.h
new file mode 100644
index 0000000..1f5264a
--- /dev/null
+++ b/plat/nxp/common/nv_storage/plat_nv_storage.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PLAT_NV_STRG_H
+#define PLAT_NV_STRG_H
+
+#define DEFAULT_SET_VALUE 0xA1
+#define READY_TO_WRITE_VALUE 0xFF
+
+#ifndef NV_STORAGE_BASE_ADDR
+#define NV_STORAGE_BASE_ADDR DEFAULT_NV_STORAGE_BASE_ADDR
+#endif
+
+typedef struct {
+uint8_t warm_rst_flag;
+uint8_t wdt_rst_flag;
+uint8_t dummy[2];
+} nv_app_data_t;
+
+
+/*below enum and above structure should be in-sync. */
+enum app_data_offset {
+	WARM_RESET_FLAG_OFFSET,
+	WDT_RESET_FLAG_OFFSET,
+	APP_DATA_MAX_OFFSET,
+};
+
+int read_nv_app_data(void);
+
+int wr_nv_app_data(int data_offset,
+			uint8_t *data,
+			int data_size);
+
+const nv_app_data_t *get_nv_data(void);
+
+#endif /* PLAT_NV_STRG_H */
diff --git a/plat/nxp/common/psci/aarch64/psci_utils.S b/plat/nxp/common/psci/aarch64/psci_utils.S
new file mode 100644
index 0000000..ea2abbf
--- /dev/null
+++ b/plat/nxp/common/psci/aarch64/psci_utils.S
@@ -0,0 +1,1155 @@
+
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+
+#include <lib/psci/psci.h>
+
+#include <bl31_data.h>
+#include <plat_psci.h>
+
+
+#define RESET_RETRY_CNT   800
+#define PSCI_ABORT_CNT	100
+
+#if (SOC_CORE_RELEASE)
+
+.global _psci_cpu_on
+
+/*
+ * int _psci_cpu_on(u_register_t core_mask)
+ * x0   = target cpu core mask
+ *
+ * Called from C, so save the non-volatile regs
+ * save these as pairs of registers to maintain the
+ * required 16-byte alignment on the stack
+ *
+ */
+
+func _psci_cpu_on
+	stp  x4,  x5,  [sp, #-16]!
+	stp  x6,  x7,  [sp, #-16]!
+	stp  x8,  x9,  [sp, #-16]!
+	stp  x10, x11, [sp, #-16]!
+	stp  x12, x13, [sp, #-16]!
+	stp  x14, x15, [sp, #-16]!
+	stp  x16, x17, [sp, #-16]!
+	stp  x18, x30, [sp, #-16]!
+
+	mov  x6, x0
+
+	/* x0   = core mask (lsb)
+	 * x6   = core mask (lsb)
+	 */
+
+	/* check if core disabled */
+	bl   _soc_ck_disabled		/* 0-2 */
+	cbnz w0, psci_disabled
+
+	/* check core data area to see if core cannot be turned on
+	 * read the core state
+	 */
+	mov  x0, x6
+	bl   _getCoreState		/* 0-5 */
+	mov  x9, x0
+
+	/* x6   = core mask (lsb)
+	 * x9   = core state (from data area)
+	 */
+
+	cmp  x9, #CORE_DISABLED
+	mov  x0, #PSCI_E_DISABLED
+	b.eq cpu_on_done
+
+	cmp  x9, #CORE_PENDING
+	mov  x0, #PSCI_E_ON_PENDING
+	b.eq cpu_on_done
+
+	cmp  x9, #CORE_RELEASED
+	mov  x0, #PSCI_E_ALREADY_ON
+	b.eq cpu_on_done
+
+8:
+	/* x6   = core mask (lsb)
+	 * x9   = core state (from data area)
+	 */
+
+	cmp  x9, #CORE_WFE
+	b.eq core_in_wfe
+	cmp  x9, #CORE_IN_RESET
+	b.eq core_in_reset
+	cmp  x9, #CORE_OFF
+	b.eq core_is_off
+	cmp  x9, #CORE_OFF_PENDING
+
+	/* if state == CORE_OFF_PENDING, set abort */
+	mov  x0, x6
+	mov  x1, #ABORT_FLAG_DATA
+	mov  x2, #CORE_ABORT_OP
+	bl   _setCoreData		/* 0-3, [13-15] */
+
+	ldr  x3, =PSCI_ABORT_CNT
+7:
+	/* watch for abort to take effect */
+	mov  x0, x6
+	bl   _getCoreState		/* 0-5 */
+	cmp  x0, #CORE_OFF
+	b.eq core_is_off
+	cmp  x0, #CORE_PENDING
+	mov  x0, #PSCI_E_SUCCESS
+	b.eq cpu_on_done
+
+	/* loop til finished */
+	sub  x3, x3, #1
+	cbnz x3, 7b
+
+	/* if we didn't see either CORE_OFF or CORE_PENDING, then this
+	 * core is in CORE_OFF_PENDING - exit with success, as the core will
+	 * respond to the abort request
+	 */
+	mov  x0, #PSCI_E_SUCCESS
+	b    cpu_on_done
+
+/* this is where we start up a core out of reset */
+core_in_reset:
+	/* see if the soc-specific module supports this op */
+	ldr  x7, =SOC_CORE_RELEASE
+	cbnz  x7, 3f
+
+	mov  x0, #PSCI_E_NOT_SUPPORTED
+	b    cpu_on_done
+
+	/* x6   = core mask (lsb) */
+3:
+	/* set core state in data area */
+	mov  x0, x6
+	mov  x1, #CORE_PENDING
+	bl   _setCoreState   			/* 0-3, [13-15] */
+
+	/* release the core from reset */
+	mov   x0, x6
+	bl    _soc_core_release 		/* 0-3 */
+	mov   x0, #PSCI_E_SUCCESS
+	b     cpu_on_done
+
+	/* Start up the core that has been powered-down via CPU_OFF
+	 */
+core_is_off:
+	/* see if the soc-specific module supports this op
+	 */
+	ldr  x7, =SOC_CORE_RESTART
+	cbnz x7, 2f
+
+	mov  x0, #PSCI_E_NOT_SUPPORTED
+	b    cpu_on_done
+
+	/* x6   = core mask (lsb) */
+2:
+	/* set core state in data area */
+	mov  x0, x6
+	mov  x1, #CORE_WAKEUP
+	bl   _setCoreState			/* 0-3, [13-15] */
+
+	/* put the core back into service */
+	mov  x0, x6
+#if (SOC_CORE_RESTART)
+	bl   _soc_core_restart			/* 0-5 */
+#endif
+	mov  x0, #PSCI_E_SUCCESS
+	b    cpu_on_done
+
+/* this is where we release a core that is being held in wfe */
+core_in_wfe:
+	/* x6   = core mask (lsb) */
+
+	/* set core state in data area */
+	mov  x0, x6
+	mov  x1, #CORE_PENDING
+	bl   _setCoreState			/* 0-3, [13-15] */
+	dsb  sy
+	isb
+
+	/* put the core back into service */
+	sev
+	sev
+	isb
+	mov  x0, #PSCI_E_SUCCESS
+
+cpu_on_done:
+	/* restore the aarch32/64 non-volatile registers */
+	ldp  x18, x30, [sp], #16
+	ldp  x16, x17, [sp], #16
+	ldp  x14, x15, [sp], #16
+	ldp  x12, x13, [sp], #16
+	ldp  x10, x11, [sp], #16
+	ldp  x8,  x9,  [sp], #16
+	ldp  x6,  x7,  [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	b    psci_completed
+endfunc _psci_cpu_on
+
+#endif
+
+
+#if (SOC_CORE_OFF)
+
+.global _psci_cpu_prep_off
+.global _psci_cpu_off_wfi
+
+/*
+ * void _psci_cpu_prep_off(u_register_t core_mask)
+ * this function performs the SoC-specific programming prior
+ * to shutting the core down
+ * x0 = core_mask
+ *
+ * called from C, so save the non-volatile regs
+ * save these as pairs of registers to maintain the
+ * required 16-byte alignment on the stack
+ */
+
+func _psci_cpu_prep_off
+
+	stp  x4,  x5,  [sp, #-16]!
+	stp  x6,  x7,  [sp, #-16]!
+	stp  x8,  x9,  [sp, #-16]!
+	stp  x10, x11, [sp, #-16]!
+	stp  x12, x13, [sp, #-16]!
+	stp  x14, x15, [sp, #-16]!
+	stp  x16, x17, [sp, #-16]!
+	stp  x18, x30, [sp, #-16]!
+
+	mov  x10, x0			/* x10 = core_mask */
+
+	/* the core does not return from cpu_off, so no need
+	 * to save/restore non-volatile registers
+	 */
+
+	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
+	msr DAIFSet, #0xF
+
+	/* read cpuectlr and save current value */
+	mrs   x4, CORTEX_A72_ECTLR_EL1
+	mov   x1, #CPUECTLR_DATA
+	mov   x2, x4
+	mov   x0, x10
+	bl    _setCoreData
+
+	/* remove the core from coherency */
+	bic   x4, x4, #CPUECTLR_SMPEN_MASK
+	msr   CORTEX_A72_ECTLR_EL1, x4
+
+	/* save scr_el3 */
+	mov  x0, x10
+	mrs  x4, SCR_EL3
+	mov  x2, x4
+	mov  x1, #SCR_EL3_DATA
+	bl    _setCoreData
+
+	/* x4 = scr_el3 */
+
+	/* secure SGI (FIQ) taken to EL3, set SCR_EL3[FIQ] */
+	orr   x4, x4, #SCR_FIQ_MASK
+	msr   scr_el3, x4
+
+	/* x10 = core_mask */
+
+	/* prep the core for shutdown */
+	mov  x0, x10
+	bl   _soc_core_prep_off
+
+	/* restore the aarch32/64 non-volatile registers */
+	ldp  x18, x30, [sp], #16
+	ldp  x16, x17, [sp], #16
+	ldp  x14, x15, [sp], #16
+	ldp  x12, x13, [sp], #16
+	ldp  x10, x11, [sp], #16
+	ldp  x8,  x9,  [sp], #16
+	ldp  x6,  x7,  [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	b    psci_completed
+endfunc _psci_cpu_prep_off
+
+/*
+ * void _psci_cpu_off_wfi(u_register_t core_mask, u_register_t resume_addr)
+ *   - this function shuts down the core
+ *   - this function does not return!!
+ */
+
+func _psci_cpu_off_wfi
+	/* save the wakeup address */
+	mov  x29, x1
+
+	/* x0 = core_mask */
+
+	/* shutdown the core */
+	bl   _soc_core_entr_off
+
+	/* branch to resume execution */
+	br   x29
+endfunc _psci_cpu_off_wfi
+
+#endif
+
+
+#if (SOC_CORE_RESTART)
+
+.global _psci_wakeup
+
+/*
+ * void _psci_wakeup(u_register_t core_mask)
+ * this function performs the SoC-specific programming
+ * after a core wakes up from OFF
+ * x0 = core mask
+ *
+ * called from C, so save the non-volatile regs
+ * save these as pairs of registers to maintain the
+ * required 16-byte alignment on the stack
+ */
+
+func _psci_wakeup
+
+	stp  x4,  x5,  [sp, #-16]!
+	stp  x6,  x7,  [sp, #-16]!
+	stp  x8,  x9,  [sp, #-16]!
+	stp  x10, x11, [sp, #-16]!
+	stp  x12, x13, [sp, #-16]!
+	stp  x14, x15, [sp, #-16]!
+	stp  x16, x17, [sp, #-16]!
+	stp  x18, x30, [sp, #-16]!
+
+	mov  x4, x0			/* x4 = core mask */
+
+	/* restore scr_el3 */
+	mov  x0, x4
+	mov  x1, #SCR_EL3_DATA
+	bl   _getCoreData
+	/* x0 = saved scr_el3 */
+	msr  SCR_EL3, x0
+
+	/* x4 = core mask */
+
+	/* restore CPUECTLR */
+	mov   x0, x4
+	mov   x1, #CPUECTLR_DATA
+	bl    _getCoreData
+	orr   x0, x0, #CPUECTLR_SMPEN_MASK
+	msr   CORTEX_A72_ECTLR_EL1, x0
+
+	/* x4 = core mask */
+
+	/* start the core back up */
+	mov   x0, x4
+	bl   _soc_core_exit_off
+
+	/* restore the aarch32/64 non-volatile registers
+	 */
+	ldp  x18, x30, [sp], #16
+	ldp  x16, x17, [sp], #16
+	ldp  x14, x15, [sp], #16
+	ldp  x12, x13, [sp], #16
+	ldp  x10, x11, [sp], #16
+	ldp  x8,  x9,  [sp], #16
+	ldp  x6,  x7,  [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	b    psci_completed
+endfunc _psci_wakeup
+
+#endif
+
+
+#if (SOC_SYSTEM_RESET)
+
+.global _psci_system_reset
+
+func _psci_system_reset
+
+	/* system reset is mandatory
+	 * system reset is soc-specific
+	 * Note: under no circumstances do we return from this call
+	 */
+	bl   _soc_sys_reset
+endfunc _psci_system_reset
+
+#endif
+
+
+#if (SOC_SYSTEM_OFF)
+
+.global _psci_system_off
+
+func _psci_system_off
+
+	/* system off is mandatory
+	 * system off is soc-specific
+	 * Note: under no circumstances do we return from this call */
+	b    _soc_sys_off
+endfunc _psci_system_off
+
+#endif
+
+
+#if (SOC_CORE_STANDBY)
+
+.global _psci_core_entr_stdby
+.global _psci_core_prep_stdby
+.global _psci_core_exit_stdby
+
+/*
+ * void _psci_core_entr_stdby(u_register_t core_mask) - this
+ * is the fast-path for simple core standby
+ */
+
+func _psci_core_entr_stdby
+	stp  x4,  x5, [sp, #-16]!
+	stp  x6, x30, [sp, #-16]!
+
+	mov  x5, x0		/* x5 = core mask */
+
+	/* save scr_el3 */
+	mov  x0, x5
+	mrs  x4, SCR_EL3
+	mov  x2, x4
+	mov  x1, #SCR_EL3_DATA
+	bl    _setCoreData
+
+	/* x4 = SCR_EL3
+	 * x5 = core mask
+	 */
+
+	/* allow interrupts @ EL3 */
+	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
+	msr  SCR_EL3, x4
+
+	/* x5 = core mask */
+
+	/* put the core into standby */
+	mov  x0, x5
+	bl   _soc_core_entr_stdby
+
+	/* restore scr_el3 */
+	mov  x0, x5
+	mov  x1, #SCR_EL3_DATA
+	bl   _getCoreData
+	/* x0 = saved scr_el3 */
+	msr  SCR_EL3, x0
+
+	ldp  x6,  x30, [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	isb
+	ret
+endfunc _psci_core_entr_stdby
+
+/*
+ * void _psci_core_prep_stdby(u_register_t core_mask) - this
+ * sets up the core to enter standby state thru the normal path
+ */
+
+func _psci_core_prep_stdby
+	stp  x4,  x5, [sp, #-16]!
+	stp  x6, x30, [sp, #-16]!
+
+	mov  x5, x0
+
+	/* x5 = core mask */
+
+	/* save scr_el3 */
+	mov  x0, x5
+	mrs  x4, SCR_EL3
+	mov  x2, x4
+	mov  x1, #SCR_EL3_DATA
+	bl    _setCoreData
+
+	/* allow interrupts @ EL3 */
+	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
+	msr  SCR_EL3, x4
+
+	/* x5 = core mask */
+
+	/* call for any SoC-specific programming */
+	mov  x0, x5
+	bl   _soc_core_prep_stdby
+
+	ldp  x6,  x30, [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	isb
+	ret
+endfunc _psci_core_prep_stdby
+
+/*
+ * void _psci_core_exit_stdby(u_register_t core_mask) - this
+ * exits the core from standby state thru the normal path
+ */
+
+func _psci_core_exit_stdby
+	stp  x4,  x5, [sp, #-16]!
+	stp  x6, x30, [sp, #-16]!
+
+	mov  x5, x0
+
+	/* x5 = core mask */
+
+	/* restore scr_el3 */
+	mov  x0, x5
+	mov  x1, #SCR_EL3_DATA
+	bl   _getCoreData
+	/* x0 = saved scr_el3 */
+	msr  SCR_EL3, x0
+
+	/* x5 = core mask */
+
+	/* perform any SoC-specific programming after standby state */
+	mov  x0, x5
+	bl   _soc_core_exit_stdby
+
+	ldp  x6,  x30, [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	isb
+	ret
+endfunc _psci_core_exit_stdby
+
+#endif
+
+
+#if (SOC_CORE_PWR_DWN)
+
+.global _psci_core_prep_pwrdn
+.global _psci_cpu_pwrdn_wfi
+.global _psci_core_exit_pwrdn
+
+/*
+ * void _psci_core_prep_pwrdn_(u_register_t core_mask)
+ * this function prepares the core for power-down
+ * x0 = core mask
+ *
+ * called from C, so save the non-volatile regs
+ * save these as pairs of registers to maintain the
+ * required 16-byte alignment on the stack
+ */
+
+func _psci_core_prep_pwrdn
+	stp  x4,  x5,  [sp, #-16]!
+	stp  x6,  x7,  [sp, #-16]!
+	stp  x8,  x9,  [sp, #-16]!
+	stp  x10, x11, [sp, #-16]!
+	stp  x12, x13, [sp, #-16]!
+	stp  x14, x15, [sp, #-16]!
+	stp  x16, x17, [sp, #-16]!
+	stp  x18, x30, [sp, #-16]!
+
+	mov  x6, x0
+
+	/* x6 = core mask */
+
+	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
+	msr DAIFSet, #0xF
+
+	/* save scr_el3 */
+	mov  x0, x6
+	mrs  x4, SCR_EL3
+	mov  x2, x4
+	mov  x1, #SCR_EL3_DATA
+	bl    _setCoreData
+
+	/* allow interrupts @ EL3 */
+	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
+	msr  SCR_EL3, x4
+
+	/* save cpuectlr */
+	mov  x0, x6
+	mov  x1, #CPUECTLR_DATA
+	mrs  x2, CORTEX_A72_ECTLR_EL1
+	bl   _setCoreData
+
+	/* x6 = core mask */
+
+	/* SoC-specific programming for power-down */
+	mov  x0, x6
+	bl  _soc_core_prep_pwrdn
+
+	/* restore the aarch32/64 non-volatile registers
+	 */
+	ldp  x18, x30, [sp], #16
+	ldp  x16, x17, [sp], #16
+	ldp  x14, x15, [sp], #16
+	ldp  x12, x13, [sp], #16
+	ldp  x10, x11, [sp], #16
+	ldp  x8,  x9,  [sp], #16
+	ldp  x6,  x7,  [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	b    psci_completed
+endfunc _psci_core_prep_pwrdn
+
+/*
+ * void _psci_cpu_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr)
+ * this function powers down the core
+ */
+
+func _psci_cpu_pwrdn_wfi
+	/* save the wakeup address */
+	mov  x29, x1
+
+	/* x0 = core mask */
+
+	/* shutdown the core */
+	bl   _soc_core_entr_pwrdn
+
+	/* branch to resume execution */
+	br   x29
+endfunc _psci_cpu_pwrdn_wfi
+
+/*
+ * void _psci_core_exit_pwrdn_(u_register_t core_mask)
+ * this function cleans up after a core power-down
+ * x0 = core mask
+ *
+ * called from C, so save the non-volatile regs
+ * save these as pairs of registers to maintain the
+ * required 16-byte alignment on the stack
+ */
+
+func _psci_core_exit_pwrdn
+	stp  x4,  x5,  [sp, #-16]!
+	stp  x6,  x7,  [sp, #-16]!
+	stp  x8,  x9,  [sp, #-16]!
+	stp  x10, x11, [sp, #-16]!
+	stp  x12, x13, [sp, #-16]!
+	stp  x14, x15, [sp, #-16]!
+	stp  x16, x17, [sp, #-16]!
+	stp  x18, x30, [sp, #-16]!
+
+	mov  x5, x0			/* x5 = core mask */
+
+	/* restore scr_el3 */
+	mov  x0, x5
+	mov  x1, #SCR_EL3_DATA
+	bl   _getCoreData
+	/* x0 = saved scr_el3 */
+	msr  SCR_EL3, x0
+
+	/* x5 = core mask */
+
+	/* restore cpuectlr */
+	mov  x0, x5
+	mov  x1, #CPUECTLR_DATA
+	bl   _getCoreData
+	/* make sure smp is set */
+	orr  x0, x0, #CPUECTLR_SMPEN_MASK
+	msr  CORTEX_A72_ECTLR_EL1, x0
+
+	/* x5 = core mask */
+
+	/* SoC-specific cleanup */
+	mov  x0, x5
+	bl   _soc_core_exit_pwrdn
+
+	/* restore the aarch32/64 non-volatile registers
+	 */
+	ldp  x18, x30, [sp], #16
+	ldp  x16, x17, [sp], #16
+	ldp  x14, x15, [sp], #16
+	ldp  x12, x13, [sp], #16
+	ldp  x10, x11, [sp], #16
+	ldp  x8,  x9,  [sp], #16
+	ldp  x6,  x7,  [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	b    psci_completed
+endfunc _psci_core_exit_pwrdn
+
+#endif
+
+#if (SOC_CLUSTER_STANDBY)
+
+.global _psci_clstr_prep_stdby
+.global _psci_clstr_exit_stdby
+
+/*
+ * void _psci_clstr_prep_stdby(u_register_t core_mask) - this
+ * sets up the clstr to enter standby state thru the normal path
+ */
+
+func _psci_clstr_prep_stdby
+	stp  x4,  x5, [sp, #-16]!
+	stp  x6, x30, [sp, #-16]!
+
+	mov  x5, x0
+
+	/* x5 = core mask */
+
+	/* save scr_el3 */
+	mov  x0, x5
+	mrs  x4, SCR_EL3
+	mov  x2, x4
+	mov  x1, #SCR_EL3_DATA
+	bl    _setCoreData
+
+	/* allow interrupts @ EL3 */
+	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
+	msr  SCR_EL3, x4
+
+	/* x5 = core mask */
+
+	/* call for any SoC-specific programming */
+	mov  x0, x5
+	bl   _soc_clstr_prep_stdby
+
+	ldp  x6,  x30, [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	isb
+	ret
+endfunc _psci_clstr_prep_stdby
+
+/*
+ * void _psci_clstr_exit_stdby(u_register_t core_mask) - this
+ * exits the clstr from standby state thru the normal path
+ */
+
+func _psci_clstr_exit_stdby
+	stp  x4,  x5, [sp, #-16]!
+	stp  x6, x30, [sp, #-16]!
+
+	mov  x5, x0			/* x5 = core mask */
+
+	/* restore scr_el3 */
+	mov  x0, x5
+	mov  x1, #SCR_EL3_DATA
+	bl   _getCoreData
+	/* x0 = saved scr_el3 */
+	msr  SCR_EL3, x0
+
+	/* x5 = core mask */
+
+	/* perform any SoC-specific programming after standby state */
+	mov  x0, x5
+	bl   _soc_clstr_exit_stdby
+
+	ldp  x6,  x30, [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	isb
+	ret
+endfunc _psci_clstr_exit_stdby
+
+#endif
+
+#if (SOC_CLUSTER_PWR_DWN)
+
+.global _psci_clstr_prep_pwrdn
+.global _psci_clstr_exit_pwrdn
+
+/*
+ * void _psci_clstr_prep_pwrdn_(u_register_t core_mask)
+ * this function prepares the cluster+core for power-down
+ * x0 = core mask
+ *
+ * called from C, so save the non-volatile regs
+ * save these as pairs of registers to maintain the
+ * required 16-byte alignment on the stack
+ */
+
+func _psci_clstr_prep_pwrdn
+	stp  x4,  x5,  [sp, #-16]!
+	stp  x6,  x7,  [sp, #-16]!
+	stp  x8,  x9,  [sp, #-16]!
+	stp  x10, x11, [sp, #-16]!
+	stp  x12, x13, [sp, #-16]!
+	stp  x14, x15, [sp, #-16]!
+	stp  x16, x17, [sp, #-16]!
+	stp  x18, x30, [sp, #-16]!
+
+	mov  x6, x0			/* x6 = core mask */
+
+	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
+	msr DAIFSet, #0xF
+
+	/* save scr_el3 */
+	mov  x0, x6
+	mrs  x4, SCR_EL3
+	mov  x2, x4
+	mov  x1, #SCR_EL3_DATA
+	bl    _setCoreData
+
+	/* allow interrupts @ EL3 */
+	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
+	msr  SCR_EL3, x4
+
+	/* save cpuectlr */
+	mov  x0, x6
+	mov  x1, #CPUECTLR_DATA
+	mrs  x2, CORTEX_A72_ECTLR_EL1
+	mov  x4, x2
+	bl   _setCoreData
+
+	/* remove core from coherency */
+	bic   x4, x4, #CPUECTLR_SMPEN_MASK
+	msr   CORTEX_A72_ECTLR_EL1, x4
+
+	/* x6 = core mask */
+
+	/* SoC-specific programming for power-down */
+	mov  x0, x6
+	bl  _soc_clstr_prep_pwrdn
+
+	/* restore the aarch32/64 non-volatile registers
+	 */
+	ldp  x18, x30, [sp], #16
+	ldp  x16, x17, [sp], #16
+	ldp  x14, x15, [sp], #16
+	ldp  x12, x13, [sp], #16
+	ldp  x10, x11, [sp], #16
+	ldp  x8,  x9,  [sp], #16
+	ldp  x6,  x7,  [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	b    psci_completed
+endfunc _psci_clstr_prep_pwrdn
+
+/*
+ * void _psci_clstr_exit_pwrdn_(u_register_t core_mask)
+ * this function cleans up after a cluster power-down
+ * x0 = core mask
+ *
+ * called from C, so save the non-volatile regs
+ * save these as pairs of registers to maintain the
+ * required 16-byte alignment on the stack
+ */
+
+func _psci_clstr_exit_pwrdn
+	stp  x4,  x5,  [sp, #-16]!
+	stp  x6,  x7,  [sp, #-16]!
+	stp  x8,  x9,  [sp, #-16]!
+	stp  x10, x11, [sp, #-16]!
+	stp  x12, x13, [sp, #-16]!
+	stp  x14, x15, [sp, #-16]!
+	stp  x16, x17, [sp, #-16]!
+	stp  x18, x30, [sp, #-16]!
+
+	mov  x4, x0			/* x4 = core mask */
+
+	/* restore scr_el3 */
+	mov  x0, x4
+	mov  x1, #SCR_EL3_DATA
+	bl   _getCoreData
+	/* x0 = saved scr_el3 */
+	msr  SCR_EL3, x0
+
+	/* x4 = core mask */
+
+	/* restore cpuectlr */
+	mov  x0, x4
+	mov  x1, #CPUECTLR_DATA
+	bl   _getCoreData
+	/* make sure smp is set */
+	orr  x0, x0, #CPUECTLR_SMPEN_MASK
+	msr  CORTEX_A72_ECTLR_EL1, x0
+
+	/* x4 = core mask */
+
+	/* SoC-specific cleanup */
+	mov  x0, x4
+	bl   _soc_clstr_exit_pwrdn
+
+	/* restore the aarch32/64 non-volatile registers
+	 */
+	ldp  x18, x30, [sp], #16
+	ldp  x16, x17, [sp], #16
+	ldp  x14, x15, [sp], #16
+	ldp  x12, x13, [sp], #16
+	ldp  x10, x11, [sp], #16
+	ldp  x8,  x9,  [sp], #16
+	ldp  x6,  x7,  [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	b    psci_completed
+endfunc _psci_clstr_exit_pwrdn
+
+#endif
+
+#if (SOC_SYSTEM_STANDBY)
+
+.global _psci_sys_prep_stdby
+.global _psci_sys_exit_stdby
+
+/*
+ * void _psci_sys_prep_stdby(u_register_t core_mask) - this
+ * sets up the system to enter standby state thru the normal path
+ */
+
+func _psci_sys_prep_stdby
+	stp  x4,  x5, [sp, #-16]!
+	stp  x6, x30, [sp, #-16]!
+
+	mov  x5, x0			/* x5 = core mask */
+
+	/* save scr_el3 */
+	mov  x0, x5
+	mrs  x4, SCR_EL3
+	mov  x2, x4
+	mov  x1, #SCR_EL3_DATA
+	bl    _setCoreData
+
+	/* allow interrupts @ EL3 */
+	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
+	msr  SCR_EL3, x4
+
+	/* x5 = core mask */
+
+	/* call for any SoC-specific programming */
+	mov  x0, x5
+	bl   _soc_sys_prep_stdby
+
+	ldp  x6,  x30, [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	isb
+	ret
+endfunc _psci_sys_prep_stdby
+
+/*
+ * void _psci_sys_exit_stdby(u_register_t core_mask) - this
+ * exits the system from standby state thru the normal path
+ */
+
+func _psci_sys_exit_stdby
+	stp  x4,  x5, [sp, #-16]!
+	stp  x6, x30, [sp, #-16]!
+
+	mov  x5, x0
+
+	/* x5 = core mask */
+
+	/* restore scr_el3 */
+	mov  x0, x5
+	mov  x1, #SCR_EL3_DATA
+	bl   _getCoreData
+	/* x0 = saved scr_el3 */
+	msr  SCR_EL3, x0
+
+	/* x5 = core mask */
+
+	/* perform any SoC-specific programming after standby state */
+	mov  x0, x5
+	bl   _soc_sys_exit_stdby
+
+	ldp  x6,  x30, [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	isb
+	ret
+endfunc _psci_sys_exit_stdby
+
+#endif
+
+#if (SOC_SYSTEM_PWR_DWN)
+
+.global _psci_sys_prep_pwrdn
+.global _psci_sys_pwrdn_wfi
+.global _psci_sys_exit_pwrdn
+
+/*
+ * void _psci_sys_prep_pwrdn_(u_register_t core_mask)
+ * this function prepares the system+core for power-down
+ * x0 = core mask
+ *
+ * called from C, so save the non-volatile regs
+ * save these as pairs of registers to maintain the
+ * required 16-byte alignment on the stack
+ */
+
+func _psci_sys_prep_pwrdn
+	stp  x4,  x5,  [sp, #-16]!
+	stp  x6,  x7,  [sp, #-16]!
+	stp  x8,  x9,  [sp, #-16]!
+	stp  x10, x11, [sp, #-16]!
+	stp  x12, x13, [sp, #-16]!
+	stp  x14, x15, [sp, #-16]!
+	stp  x16, x17, [sp, #-16]!
+	stp  x18, x30, [sp, #-16]!
+
+	mov  x6, x0			/* x6 = core mask */
+
+	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
+	msr DAIFSet, #0xF
+
+	/* save scr_el3 */
+	mov  x0, x6
+	mrs  x4, SCR_EL3
+	mov  x2, x4
+	mov  x1, #SCR_EL3_DATA
+	bl    _setCoreData
+
+	/* allow interrupts @ EL3 */
+	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
+	msr  SCR_EL3, x4
+
+	/* save cpuectlr */
+	mov  x0, x6
+	mov  x1, #CPUECTLR_DATA
+	mrs  x2, CORTEX_A72_ECTLR_EL1
+	mov  x4, x2
+	bl   _setCoreData
+
+	/* remove core from coherency */
+	bic   x4, x4, #CPUECTLR_SMPEN_MASK
+	msr   CORTEX_A72_ECTLR_EL1, x4
+
+	/* x6 = core mask */
+
+	/* SoC-specific programming for power-down */
+	mov  x0, x6
+	bl  _soc_sys_prep_pwrdn
+
+	/* restore the aarch32/64 non-volatile registers
+	 */
+	ldp  x18, x30, [sp], #16
+	ldp  x16, x17, [sp], #16
+	ldp  x14, x15, [sp], #16
+	ldp  x12, x13, [sp], #16
+	ldp  x10, x11, [sp], #16
+	ldp  x8,  x9,  [sp], #16
+	ldp  x6,  x7,  [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	b    psci_completed
+endfunc _psci_sys_prep_pwrdn
+
+
+/*
+ * void _psci_sys_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr)
+ * this function powers down the system
+ */
+
+func _psci_sys_pwrdn_wfi
+	/* save the wakeup address */
+	mov  x29, x1
+
+	/* x0 = core mask */
+
+	/* shutdown the system */
+	bl   _soc_sys_pwrdn_wfi
+
+	/* branch to resume execution */
+	br   x29
+endfunc _psci_sys_pwrdn_wfi
+
+/*
+ * void _psci_sys_exit_pwrdn_(u_register_t core_mask)
+ * this function cleans up after a system power-down
+ * x0 = core mask
+ *
+ * Called from C, so save the non-volatile regs
+ * save these as pairs of registers to maintain the
+ * required 16-byte alignment on the stack
+ */
+
+func _psci_sys_exit_pwrdn
+
+	stp  x4,  x5,  [sp, #-16]!
+	stp  x6,  x7,  [sp, #-16]!
+	stp  x8,  x9,  [sp, #-16]!
+	stp  x10, x11, [sp, #-16]!
+	stp  x12, x13, [sp, #-16]!
+	stp  x14, x15, [sp, #-16]!
+	stp  x16, x17, [sp, #-16]!
+	stp  x18, x30, [sp, #-16]!
+
+	mov  x4, x0			/* x4 = core mask */
+
+	/* restore scr_el3 */
+	mov  x0, x4
+	mov  x1, #SCR_EL3_DATA
+	bl   _getCoreData
+
+	/* x0 = saved scr_el3 */
+	msr  SCR_EL3, x0
+
+	/* x4 = core mask */
+
+	/* restore cpuectlr */
+	mov  x0, x4
+	mov  x1, #CPUECTLR_DATA
+	bl   _getCoreData
+
+	/* make sure smp is set */
+	orr  x0, x0, #CPUECTLR_SMPEN_MASK
+	msr  CORTEX_A72_ECTLR_EL1, x0
+
+	/* x4 = core mask */
+
+	/* SoC-specific cleanup */
+	mov  x0, x4
+	bl   _soc_sys_exit_pwrdn
+
+	/* restore the aarch32/64 non-volatile registers
+	 */
+	ldp  x18, x30, [sp], #16
+	ldp  x16, x17, [sp], #16
+	ldp  x14, x15, [sp], #16
+	ldp  x12, x13, [sp], #16
+	ldp  x10, x11, [sp], #16
+	ldp  x8,  x9,  [sp], #16
+	ldp  x6,  x7,  [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	b    psci_completed
+endfunc _psci_sys_exit_pwrdn
+
+#endif
+
+
+/* psci std returns */
+func psci_disabled
+	ldr  w0, =PSCI_E_DISABLED
+	b    psci_completed
+endfunc psci_disabled
+
+
+func psci_not_present
+	ldr  w0, =PSCI_E_NOT_PRESENT
+	b    psci_completed
+endfunc psci_not_present
+
+
+func psci_on_pending
+	ldr  w0, =PSCI_E_ON_PENDING
+	b    psci_completed
+endfunc psci_on_pending
+
+
+func psci_already_on
+	ldr  w0, =PSCI_E_ALREADY_ON
+	b    psci_completed
+endfunc psci_already_on
+
+
+func psci_failure
+	ldr  w0, =PSCI_E_INTERN_FAIL
+	b    psci_completed
+endfunc psci_failure
+
+
+func psci_unimplemented
+	ldr  w0, =PSCI_E_NOT_SUPPORTED
+	b    psci_completed
+endfunc psci_unimplemented
+
+
+func psci_denied
+	ldr  w0, =PSCI_E_DENIED
+	b    psci_completed
+endfunc psci_denied
+
+
+func psci_invalid
+	ldr  w0, =PSCI_E_INVALID_PARAMS
+	b    psci_completed
+endfunc psci_invalid
+
+
+func psci_success
+	mov  x0, #PSCI_E_SUCCESS
+endfunc psci_success
+
+
+func psci_completed
+	/* x0 = status code */
+	ret
+endfunc psci_completed
diff --git a/plat/nxp/common/psci/include/plat_psci.h b/plat/nxp/common/psci/include/plat_psci.h
new file mode 100644
index 0000000..97d4c97
--- /dev/null
+++ b/plat/nxp/common/psci/include/plat_psci.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PLAT_PSCI_H
+#define PLAT_PSCI_H
+
+ /* core abort current op */
+#define CORE_ABORT_OP     0x1
+
+ /* psci power levels - these are actually affinity levels
+  * in the psci_power_state_t array
+  */
+#define PLAT_CORE_LVL  PSCI_CPU_PWR_LVL
+#define PLAT_CLSTR_LVL U(1)
+#define PLAT_SYS_LVL   U(2)
+#define PLAT_MAX_LVL   PLAT_SYS_LVL
+
+ /* core state */
+ /* OFF states 0x0 - 0xF */
+#define CORE_IN_RESET     0x0
+#define CORE_DISABLED     0x1
+#define CORE_OFF          0x2
+#define CORE_STANDBY      0x3
+#define CORE_PWR_DOWN     0x4
+#define CORE_WFE          0x6
+#define CORE_WFI          0x7
+#define CORE_LAST	  0x8
+#define CORE_OFF_PENDING  0x9
+#define CORE_WORKING_INIT 0xA
+#define SYS_OFF_PENDING   0xB
+#define SYS_OFF           0xC
+
+ /* ON states 0x10 - 0x1F */
+#define CORE_PENDING      0x10
+#define CORE_RELEASED     0x11
+#define CORE_WAKEUP       0x12
+ /* highest off state */
+#define CORE_OFF_MAX	  0xF
+ /* lowest on state */
+#define CORE_ON_MIN       CORE_PENDING
+
+#define  DAIF_SET_MASK          0x3C0
+#define  SCTLR_I_C_M_MASK       0x00001005
+#define  SCTLR_C_MASK           0x00000004
+#define  SCTLR_I_MASK           0x00001000
+#define  CPUACTLR_L1PCTL_MASK   0x0000E000
+#define  DCSR_RCPM2_BASE        0x20170000
+#define  CPUECTLR_SMPEN_MASK    0x40
+#define  CPUECTLR_SMPEN_EN      0x40
+#define  CPUECTLR_RET_MASK      0x7
+#define  CPUECTLR_RET_SET       0x2
+#define  CPUECTLR_TIMER_MASK    0x7
+#define  CPUECTLR_TIMER_8TICKS  0x2
+#define  SCR_IRQ_MASK           0x2
+#define  SCR_FIQ_MASK           0x4
+
+/* pwr mgmt features supported in the soc-specific code:
+ *   value == 0x0, the soc code does not support this feature
+ *   value != 0x0, the soc code supports this feature
+ */
+#define SOC_CORE_RELEASE      0x1
+#define SOC_CORE_RESTART      0x1
+#define SOC_CORE_OFF          0x1
+#define SOC_CORE_STANDBY      0x1
+#define SOC_CORE_PWR_DWN      0x1
+#define SOC_CLUSTER_STANDBY   0x1
+#define SOC_CLUSTER_PWR_DWN   0x1
+#define SOC_SYSTEM_STANDBY    0x1
+#define SOC_SYSTEM_PWR_DWN    0x1
+#define SOC_SYSTEM_OFF        0x1
+#define SOC_SYSTEM_RESET      0x1
+#define SOC_SYSTEM_RESET2     0x1
+
+#ifndef __ASSEMBLER__
+
+void __dead2 _psci_system_reset(void);
+void __dead2 _psci_system_off(void);
+int _psci_cpu_on(u_register_t core_mask);
+void _psci_cpu_prep_off(u_register_t core_mask);
+void __dead2 _psci_cpu_off_wfi(u_register_t core_mask,
+				u_register_t wakeup_address);
+void __dead2 _psci_cpu_pwrdn_wfi(u_register_t core_mask,
+				u_register_t wakeup_address);
+void __dead2 _psci_sys_pwrdn_wfi(u_register_t core_mask,
+				u_register_t wakeup_address);
+void _psci_wakeup(u_register_t core_mask);
+void _psci_core_entr_stdby(u_register_t core_mask);
+void _psci_core_prep_stdby(u_register_t core_mask);
+void _psci_core_exit_stdby(u_register_t core_mask);
+void _psci_core_prep_pwrdn(u_register_t core_mask);
+void _psci_core_exit_pwrdn(u_register_t core_mask);
+void _psci_clstr_prep_stdby(u_register_t core_mask);
+void _psci_clstr_exit_stdby(u_register_t core_mask);
+void _psci_clstr_prep_pwrdn(u_register_t core_mask);
+void _psci_clstr_exit_pwrdn(u_register_t core_mask);
+void _psci_sys_prep_stdby(u_register_t core_mask);
+void _psci_sys_exit_stdby(u_register_t core_mask);
+void _psci_sys_prep_pwrdn(u_register_t core_mask);
+void _psci_sys_exit_pwrdn(u_register_t core_mask);
+
+#endif
+
+#endif /* __PLAT_PSCI_H__ */
diff --git a/plat/nxp/common/psci/plat_psci.c b/plat/nxp/common/psci/plat_psci.c
new file mode 100644
index 0000000..9281e97
--- /dev/null
+++ b/plat/nxp/common/psci/plat_psci.c
@@ -0,0 +1,475 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <common/debug.h>
+
+#include <plat_gic.h>
+#include <plat_common.h>
+#include <plat_psci.h>
+#ifdef NXP_WARM_BOOT
+#include <plat_warm_rst.h>
+#endif
+
+#include <platform_def.h>
+
+#if (SOC_CORE_OFF || SOC_CORE_PWR_DWN)
+static void __dead2 _no_return_wfi(void)
+{
+_bl31_dead_wfi:
+	wfi();
+	goto _bl31_dead_wfi;
+}
+#endif
+
+#if (SOC_CORE_RELEASE || SOC_CORE_PWR_DWN)
+ /* the entry for core warm boot */
+static uintptr_t warmboot_entry = (uintptr_t) NULL;
+#endif
+
+#if (SOC_CORE_RELEASE)
+static int _pwr_domain_on(u_register_t mpidr)
+{
+	int core_pos = plat_core_pos(mpidr);
+	int rc = PSCI_E_INVALID_PARAMS;
+	u_register_t core_mask;
+
+	if (core_pos >= 0 && core_pos < PLATFORM_CORE_COUNT) {
+
+		_soc_set_start_addr(warmboot_entry);
+
+		dsb();
+		isb();
+
+		core_mask = (1 << core_pos);
+		rc = _psci_cpu_on(core_mask);
+	}
+
+	return (rc);
+}
+#endif
+
+#if (SOC_CORE_OFF)
+static void _pwr_domain_off(const psci_power_state_t *target_state)
+{
+	u_register_t core_mask  = plat_my_core_mask();
+	u_register_t core_state = _getCoreState(core_mask);
+
+	 /* set core state in internal data */
+	core_state = CORE_OFF_PENDING;
+	_setCoreState(core_mask, core_state);
+
+	_psci_cpu_prep_off(core_mask);
+}
+#endif
+
+#if (SOC_CORE_OFF || SOC_CORE_PWR_DWN)
+static void __dead2 _pwr_down_wfi(const psci_power_state_t *target_state)
+{
+	u_register_t core_mask  = plat_my_core_mask();
+	u_register_t core_state = _getCoreState(core_mask);
+
+	switch (core_state) {
+#if (SOC_CORE_OFF)
+	case CORE_OFF_PENDING:
+		/* set core state in internal data */
+		core_state = CORE_OFF;
+		_setCoreState(core_mask, core_state);
+
+		 /* turn the core off */
+		_psci_cpu_off_wfi(core_mask, warmboot_entry);
+	break;
+#endif
+#if (SOC_CORE_PWR_DWN)
+	case CORE_PWR_DOWN:
+		 /* power-down the core */
+		_psci_cpu_pwrdn_wfi(core_mask, warmboot_entry);
+		break;
+#endif
+#if (SOC_SYSTEM_PWR_DWN)
+	case SYS_OFF_PENDING:
+		/* set core state in internal data */
+		core_state = SYS_OFF;
+		_setCoreState(core_mask, core_state);
+
+		/* power-down the system */
+		_psci_sys_pwrdn_wfi(core_mask, warmboot_entry);
+		break;
+#endif
+	default:
+		_no_return_wfi();
+	break;
+	}
+}
+#endif
+
+#if (SOC_CORE_RELEASE || SOC_CORE_RESTART)
+static void _pwr_domain_wakeup(const psci_power_state_t *target_state)
+{
+	u_register_t core_mask  = plat_my_core_mask();
+	u_register_t core_state = _getCoreState(core_mask);
+
+	switch (core_state) {
+	case CORE_PENDING: /* this core is coming out of reset */
+
+		 /* soc per cpu setup */
+		soc_init_percpu();
+
+		 /* gic per cpu setup */
+		plat_gic_pcpu_init();
+
+		 /* set core state in internal data */
+		core_state = CORE_RELEASED;
+		_setCoreState(core_mask, core_state);
+		break;
+
+#if (SOC_CORE_RESTART)
+	case CORE_WAKEUP:
+
+		 /* this core is waking up from OFF */
+		_psci_wakeup(core_mask);
+
+		 /* set core state in internal data */
+		core_state = CORE_RELEASED;
+		_setCoreState(core_mask, core_state);
+
+	break;
+#endif
+	}
+}
+#endif
+
+#if (SOC_CORE_STANDBY)
+static void _pwr_cpu_standby(plat_local_state_t  cpu_state)
+{
+	u_register_t core_mask  = plat_my_core_mask();
+	u_register_t core_state;
+
+	if (cpu_state == PLAT_MAX_RET_STATE) {
+
+		/* set core state to standby */
+		core_state = CORE_STANDBY;
+		_setCoreState(core_mask, core_state);
+
+		_psci_core_entr_stdby(core_mask);
+
+		/* when we are here, the core is waking up
+		 * set core state to released
+		 */
+		core_state = CORE_RELEASED;
+		_setCoreState(core_mask, core_state);
+	}
+}
+#endif
+
+#if (SOC_CORE_PWR_DWN)
+static void _pwr_suspend(const psci_power_state_t *state)
+{
+
+	u_register_t core_mask  = plat_my_core_mask();
+	u_register_t core_state;
+
+	if (state->pwr_domain_state[PLAT_MAX_LVL] == PLAT_MAX_OFF_STATE) {
+#if (SOC_SYSTEM_PWR_DWN)
+		_psci_sys_prep_pwrdn(core_mask);
+
+		 /* set core state */
+		core_state = SYS_OFF_PENDING;
+		_setCoreState(core_mask, core_state);
+#endif
+	} else if (state->pwr_domain_state[PLAT_MAX_LVL]
+				== PLAT_MAX_RET_STATE) {
+#if (SOC_SYSTEM_STANDBY)
+		_psci_sys_prep_stdby(core_mask);
+
+		 /* set core state */
+		core_state = CORE_STANDBY;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+	else if (state->pwr_domain_state[PLAT_CLSTR_LVL] ==
+					PLAT_MAX_OFF_STATE) {
+#if (SOC_CLUSTER_PWR_DWN)
+		_psci_clstr_prep_pwrdn(core_mask);
+
+		 /* set core state */
+		core_state = CORE_PWR_DOWN;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+	else if (state->pwr_domain_state[PLAT_CLSTR_LVL] ==
+					PLAT_MAX_RET_STATE) {
+#if (SOC_CLUSTER_STANDBY)
+		_psci_clstr_prep_stdby(core_mask);
+
+		 /* set core state */
+		core_state = CORE_STANDBY;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+	else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_OFF_STATE) {
+#if (SOC_CORE_PWR_DWN)
+		 /* prep the core for power-down */
+		_psci_core_prep_pwrdn(core_mask);
+
+		 /* set core state */
+		core_state = CORE_PWR_DOWN;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+	else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_RET_STATE) {
+#if (SOC_CORE_STANDBY)
+		_psci_core_prep_stdby(core_mask);
+
+		 /* set core state */
+		core_state = CORE_STANDBY;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+}
+#endif
+
+#if (SOC_CORE_PWR_DWN)
+static void _pwr_suspend_finish(const psci_power_state_t *state)
+{
+
+	u_register_t core_mask  = plat_my_core_mask();
+	u_register_t core_state;
+
+
+	if (state->pwr_domain_state[PLAT_MAX_LVL] == PLAT_MAX_OFF_STATE) {
+#if (SOC_SYSTEM_PWR_DWN)
+		_psci_sys_exit_pwrdn(core_mask);
+
+		/* when we are here, the core is back up
+		 * set core state to released
+		 */
+		core_state = CORE_RELEASED;
+		_setCoreState(core_mask, core_state);
+#endif
+	} else if (state->pwr_domain_state[PLAT_MAX_LVL]
+				== PLAT_MAX_RET_STATE) {
+#if (SOC_SYSTEM_STANDBY)
+		_psci_sys_exit_stdby(core_mask);
+
+		/* when we are here, the core is waking up
+		 * set core state to released
+		 */
+		core_state = CORE_RELEASED;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+	else if (state->pwr_domain_state[PLAT_CLSTR_LVL] ==
+						PLAT_MAX_OFF_STATE) {
+#if (SOC_CLUSTER_PWR_DWN)
+		_psci_clstr_exit_pwrdn(core_mask);
+
+		/* when we are here, the core is waking up
+		 * set core state to released
+		 */
+		core_state = CORE_RELEASED;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+	else if (state->pwr_domain_state[PLAT_CLSTR_LVL] ==
+						PLAT_MAX_RET_STATE) {
+#if (SOC_CLUSTER_STANDBY)
+		_psci_clstr_exit_stdby(core_mask);
+
+		/* when we are here, the core is waking up
+		 * set core state to released
+		 */
+		core_state = CORE_RELEASED;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+	else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_OFF_STATE) {
+#if (SOC_CORE_PWR_DWN)
+		_psci_core_exit_pwrdn(core_mask);
+
+		/* when we are here, the core is back up
+		 * set core state to released
+		 */
+		core_state = CORE_RELEASED;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+	else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_RET_STATE) {
+#if (SOC_CORE_STANDBY)
+		_psci_core_exit_stdby(core_mask);
+
+		/* when we are here, the core is waking up
+		 * set core state to released
+		 */
+		core_state = CORE_RELEASED;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+}
+#endif
+
+#if (SOC_CORE_STANDBY || SOC_CORE_PWR_DWN)
+
+#define PWR_STATE_TYPE_MASK    0x00010000
+#define PWR_STATE_TYPE_STNDBY  0x0
+#define PWR_STATE_TYPE_PWRDWN  0x00010000
+#define PWR_STATE_LVL_MASK     0x03000000
+#define PWR_STATE_LVL_CORE     0x0
+#define PWR_STATE_LVL_CLSTR    0x01000000
+#define PWR_STATE_LVL_SYS      0x02000000
+#define PWR_STATE_LVL_MAX      0x03000000
+
+ /* turns a requested power state into a target power state
+  * based on SoC capabilities
+  */
+static int _pwr_state_validate(uint32_t pwr_state,
+				    psci_power_state_t *state)
+{
+	int stat   = PSCI_E_INVALID_PARAMS;
+	int pwrdn  = (pwr_state & PWR_STATE_TYPE_MASK);
+	int lvl    = (pwr_state & PWR_STATE_LVL_MASK);
+
+	switch (lvl) {
+	case PWR_STATE_LVL_MAX:
+		if (pwrdn && SOC_SYSTEM_PWR_DWN)
+			state->pwr_domain_state[PLAT_MAX_LVL] =
+				PLAT_MAX_OFF_STATE;
+		else if (SOC_SYSTEM_STANDBY)
+			state->pwr_domain_state[PLAT_MAX_LVL] =
+				PLAT_MAX_RET_STATE;
+		 /* intentional fall-thru condition */
+	case PWR_STATE_LVL_SYS:
+		if (pwrdn && SOC_SYSTEM_PWR_DWN)
+			state->pwr_domain_state[PLAT_SYS_LVL] =
+				PLAT_MAX_OFF_STATE;
+		else if (SOC_SYSTEM_STANDBY)
+			state->pwr_domain_state[PLAT_SYS_LVL] =
+				PLAT_MAX_RET_STATE;
+		 /* intentional fall-thru condition */
+	case PWR_STATE_LVL_CLSTR:
+		if (pwrdn && SOC_CLUSTER_PWR_DWN)
+			state->pwr_domain_state[PLAT_CLSTR_LVL] =
+				PLAT_MAX_OFF_STATE;
+		else if (SOC_CLUSTER_STANDBY)
+			state->pwr_domain_state[PLAT_CLSTR_LVL] =
+				PLAT_MAX_RET_STATE;
+		 /* intentional fall-thru condition */
+	case PWR_STATE_LVL_CORE:
+		stat = PSCI_E_SUCCESS;
+
+		if (pwrdn && SOC_CORE_PWR_DWN)
+			state->pwr_domain_state[PLAT_CORE_LVL] =
+				PLAT_MAX_OFF_STATE;
+		else if (SOC_CORE_STANDBY)
+			state->pwr_domain_state[PLAT_CORE_LVL] =
+				PLAT_MAX_RET_STATE;
+		break;
+	}
+	return (stat);
+}
+
+#endif
+
+#if (SOC_SYSTEM_PWR_DWN)
+static void _pwr_state_sys_suspend(psci_power_state_t *req_state)
+{
+
+	/* if we need to have per-SoC settings, then we need to
+	 * extend this by calling into psci_utils.S and from there
+	 * on down to the SoC.S files
+	 */
+
+	req_state->pwr_domain_state[PLAT_MAX_LVL]   = PLAT_MAX_OFF_STATE;
+	req_state->pwr_domain_state[PLAT_SYS_LVL]   = PLAT_MAX_OFF_STATE;
+	req_state->pwr_domain_state[PLAT_CLSTR_LVL] = PLAT_MAX_OFF_STATE;
+	req_state->pwr_domain_state[PLAT_CORE_LVL]  = PLAT_MAX_OFF_STATE;
+
+}
+#endif
+
+#if defined(NXP_WARM_BOOT) && (SOC_SYSTEM_RESET2)
+static int psci_system_reset2(int is_vendor,
+			      int reset_type,
+			      u_register_t cookie)
+{
+	int ret = 0;
+
+	INFO("Executing the sequence of warm reset.\n");
+	ret = prep_n_execute_warm_reset();
+
+	return ret;
+}
+#endif
+
+static plat_psci_ops_t _psci_pm_ops = {
+#if (SOC_SYSTEM_OFF)
+	.system_off = _psci_system_off,
+#endif
+#if (SOC_SYSTEM_RESET)
+	.system_reset = _psci_system_reset,
+#endif
+#if defined(NXP_WARM_BOOT) && (SOC_SYSTEM_RESET2)
+	.system_reset2 = psci_system_reset2,
+#endif
+#if (SOC_CORE_RELEASE || SOC_CORE_RESTART)
+	 /* core released or restarted */
+	.pwr_domain_on_finish = _pwr_domain_wakeup,
+#endif
+#if (SOC_CORE_OFF)
+	 /* core shutting down */
+	.pwr_domain_off	= _pwr_domain_off,
+#endif
+#if (SOC_CORE_OFF || SOC_CORE_PWR_DWN)
+	.pwr_domain_pwr_down_wfi = _pwr_down_wfi,
+#endif
+#if (SOC_CORE_STANDBY || SOC_CORE_PWR_DWN)
+	 /* cpu_suspend */
+	.validate_power_state = _pwr_state_validate,
+#if (SOC_CORE_STANDBY)
+	.cpu_standby = _pwr_cpu_standby,
+#endif
+#if (SOC_CORE_PWR_DWN)
+	.pwr_domain_suspend        = _pwr_suspend,
+	.pwr_domain_suspend_finish = _pwr_suspend_finish,
+#endif
+#endif
+#if (SOC_SYSTEM_PWR_DWN)
+	.get_sys_suspend_power_state = _pwr_state_sys_suspend,
+#endif
+#if (SOC_CORE_RELEASE)
+	 /* core executing psci_cpu_on */
+	.pwr_domain_on	= _pwr_domain_on
+#endif
+};
+
+#if (SOC_CORE_RELEASE  || SOC_CORE_PWR_DWN)
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const plat_psci_ops_t **psci_ops)
+{
+	warmboot_entry = sec_entrypoint;
+	*psci_ops = &_psci_pm_ops;
+	return 0;
+}
+
+#else
+
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const plat_psci_ops_t **psci_ops)
+{
+	*psci_ops = &_psci_pm_ops;
+	return 0;
+}
+#endif
diff --git a/plat/nxp/common/psci/psci.mk b/plat/nxp/common/psci/psci.mk
new file mode 100644
index 0000000..a2791c2
--- /dev/null
+++ b/plat/nxp/common/psci/psci.mk
@@ -0,0 +1,35 @@
+#
+# Copyright 2018-2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+#------------------------------------------------------------------------------
+#
+# Select the PSCI files
+#
+# -----------------------------------------------------------------------------
+
+ifeq (${ADD_PSCI},)
+
+ADD_PSCI		:= 1
+PLAT_PSCI_PATH		:= $(PLAT_COMMON_PATH)/psci
+
+PSCI_SOURCES		:= ${PLAT_PSCI_PATH}/plat_psci.c	\
+			   ${PLAT_PSCI_PATH}/$(ARCH)/psci_utils.S	\
+			   plat/common/plat_psci_common.c
+
+PLAT_INCLUDES		+= -I${PLAT_PSCI_PATH}/include
+
+ifeq (${BL_COMM_PSCI_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${PSCI_SOURCES}
+else
+ifeq (${BL2_PSCI_NEEDED},yes)
+BL2_SOURCES		+= ${PSCI_SOURCES}
+endif
+ifeq (${BL31_PSCI_NEEDED},yes)
+BL31_SOURCES		+= ${PSCI_SOURCES}
+endif
+endif
+endif
+# -----------------------------------------------------------------------------
diff --git a/plat/nxp/common/setup/aarch64/ls_bl2_mem_params_desc.c b/plat/nxp/common/setup/aarch64/ls_bl2_mem_params_desc.c
new file mode 100644
index 0000000..7463d47
--- /dev/null
+++ b/plat/nxp/common/setup/aarch64/ls_bl2_mem_params_desc.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <common/bl_common.h>
+#include <common/desc_image_load.h>
+#ifdef CSF_HEADER_PREPENDED
+#include <csf_hdr.h>
+#endif
+#include <plat/common/platform.h>
+#include <platform_def.h>
+
+/*******************************************************************************
+ * Following descriptor provides BL image/ep information that gets used
+ * by BL2 to load the images and also subset of this information is
+ * passed to next BL image. The image loading sequence is managed by
+ * populating the images in required loading order. The image execution
+ * sequence is managed by populating the `next_handoff_image_id` with
+ * the next executable image id.
+ ******************************************************************************/
+static bl_mem_params_node_t bl2_mem_params_descs[] = {
+	/* Fill BL31 related information */
+	{
+		.image_id = BL31_IMAGE_ID,
+
+		SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+				VERSION_2, entry_point_info_t,
+				SECURE | EXECUTABLE | EP_FIRST_EXE),
+		.ep_info.pc = BL31_BASE,
+		.ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+				DISABLE_ALL_EXCEPTIONS),
+#if DEBUG
+		.ep_info.args.arg1 = LS_BL31_PLAT_PARAM_VAL,
+#endif
+
+		SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+			VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP),
+#ifdef CSF_HEADER_PREPENDED
+		.image_info.image_base = BL31_BASE - CSF_HDR_SZ,
+		.image_info.image_max_size = (BL31_LIMIT - BL31_BASE) +
+								CSF_HDR_SZ,
+#else
+		.image_info.image_base = BL31_BASE,
+		.image_info.image_max_size = (BL31_LIMIT - BL31_BASE),
+#endif
+
+# ifdef NXP_LOAD_BL32
+		.next_handoff_image_id = BL32_IMAGE_ID,
+# else
+		.next_handoff_image_id = BL33_IMAGE_ID,
+# endif
+	},
+# ifdef NXP_LOAD_BL32
+	/* Fill BL32 related information */
+	{
+		.image_id = BL32_IMAGE_ID,
+
+		SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+			VERSION_2, entry_point_info_t, SECURE | EXECUTABLE),
+		.ep_info.pc = BL32_BASE,
+
+		SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+				VERSION_2, image_info_t, 0),
+#ifdef CSF_HEADER_PREPENDED
+		.image_info.image_base = BL32_BASE - CSF_HDR_SZ,
+		.image_info.image_max_size = (BL32_LIMIT - BL32_BASE) +
+								CSF_HDR_SZ,
+#else
+		.image_info.image_base = BL32_BASE,
+		.image_info.image_max_size = (BL32_LIMIT - BL32_BASE),
+#endif
+		.next_handoff_image_id = BL33_IMAGE_ID,
+	},
+# endif /* BL32_BASE */
+
+	/* Fill BL33 related information */
+	{
+		.image_id = BL33_IMAGE_ID,
+		SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+			VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE),
+		.ep_info.pc = BL33_BASE,
+
+		SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+				VERSION_2, image_info_t, 0),
+#ifdef CSF_HEADER_PREPENDED
+		.image_info.image_base = BL33_BASE - CSF_HDR_SZ,
+		.image_info.image_max_size = (BL33_LIMIT - BL33_BASE) +
+								 CSF_HDR_SZ,
+#else
+		.image_info.image_base = BL33_BASE,
+		.image_info.image_max_size = BL33_LIMIT - BL33_BASE,
+#endif
+		.ep_info.spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
+					DISABLE_ALL_EXCEPTIONS),
+
+		.next_handoff_image_id = INVALID_IMAGE_ID,
+	}
+};
+
+REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs)
diff --git a/plat/nxp/common/setup/common.mk b/plat/nxp/common/setup/common.mk
new file mode 100644
index 0000000..1fcf1d0
--- /dev/null
+++ b/plat/nxp/common/setup/common.mk
@@ -0,0 +1,105 @@
+#
+# Copyright 2018-2021 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+
+###############################################################################
+# Flow begins in BL2 at EL3 mode
+BL2_AT_EL3			:= 1
+
+# Though one core is powered up by default, there are
+# platform specific ways to release more than one core
+COLD_BOOT_SINGLE_CPU		:= 0
+
+PROGRAMMABLE_RESET_ADDRESS	:= 1
+
+USE_COHERENT_MEM		:= 0
+
+# Use generic OID definition (tbbr_oid.h)
+USE_TBBR_DEFS			:= 1
+
+PLAT_XLAT_TABLES_DYNAMIC	:= 0
+
+ENABLE_SVE_FOR_NS		:= 0
+
+ENABLE_STACK_PROTECTOR		:= 0
+
+ERROR_DEPRECATED		:= 0
+
+LS_DISABLE_TRUSTED_WDOG		:= 1
+
+# On ARM platforms, separate the code and read-only data sections to allow
+# mapping the former as executable and the latter as execute-never.
+SEPARATE_CODE_AND_RODATA	:= 1
+
+# Enable new version of image loading on ARM platforms
+LOAD_IMAGE_V2			:= 1
+
+RCW				:= ""
+
+ifneq (${SPD},none)
+$(eval $(call add_define, NXP_LOAD_BL32))
+endif
+
+###############################################################################
+
+PLAT_TOOL_PATH		:=	tools/nxp
+CREATE_PBL_TOOL_PATH	:=	${PLAT_TOOL_PATH}/create_pbl
+PLAT_SETUP_PATH		:=	${PLAT_PATH}/common/setup
+
+PLAT_INCLUDES		+=	-I${PLAT_SETUP_PATH}/include			\
+				-Iinclude/plat/arm/common			\
+				-Iinclude/drivers/arm   			\
+				-Iinclude/lib					\
+				-Iinclude/drivers/io			\
+				-Ilib/psci
+
+# Required without TBBR.
+# To include the defines for DDR PHY Images.
+PLAT_INCLUDES		+=	-Iinclude/common/tbbr
+
+include ${PLAT_SETUP_PATH}/core.mk
+PLAT_BL_COMMON_SOURCES	+= 	${CPU_LIBS} \
+				plat/nxp/common/setup/ls_err.c		\
+				plat/nxp/common/setup/ls_common.c
+
+ifneq (${ENABLE_STACK_PROTECTOR},0)
+PLAT_BL_COMMON_SOURCES	+=	${PLAT_SETUP_PATH}/ls_stack_protector.c
+endif
+
+include lib/xlat_tables_v2/xlat_tables.mk
+
+PLAT_BL_COMMON_SOURCES	+=	${XLAT_TABLES_LIB_SRCS}
+
+BL2_SOURCES		+=	drivers/io/io_fip.c			\
+				drivers/io/io_memmap.c			\
+				drivers/io/io_storage.c			\
+				common/desc_image_load.c 		\
+				plat/nxp/common/setup/ls_image_load.c		\
+				plat/nxp/common/setup/ls_io_storage.c		\
+				plat/nxp/common/setup/ls_bl2_el3_setup.c	\
+				plat/nxp/common/setup/${ARCH}/ls_bl2_mem_params_desc.c
+
+BL31_SOURCES		+=	plat/nxp/common/setup/ls_bl31_setup.c	\
+
+ifeq (${LS_EL3_INTERRUPT_HANDLER}, yes)
+$(eval $(call add_define, LS_EL3_INTERRUPT_HANDLER))
+BL31_SOURCES		+=	plat/nxp/common/setup/ls_interrupt_mgmt.c
+endif
+
+ifeq (${TEST_BL31}, 1)
+BL31_SOURCES		+=	${TEST_SOURCES}
+endif
+
+# Verify build config
+# -------------------
+
+ifneq (${LOAD_IMAGE_V2}, 1)
+  $(error Error: Layerscape needs LOAD_IMAGE_V2=1)
+else
+$(eval $(call add_define,LOAD_IMAGE_V2))
+endif
+
+include $(CREATE_PBL_TOOL_PATH)/create_pbl.mk
diff --git a/plat/nxp/common/setup/core.mk b/plat/nxp/common/setup/core.mk
new file mode 100644
index 0000000..9b81f2d
--- /dev/null
+++ b/plat/nxp/common/setup/core.mk
@@ -0,0 +1,20 @@
+# Copyright 2018-2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+#------------------------------------------------------------------------------
+#
+# Select the CORE files
+#
+# -----------------------------------------------------------------------------
+
+CPU_LIBS		:=	lib/cpus/${ARCH}/aem_generic.S
+
+ifeq (,$(filter $(CORE_TYPE),a53 a55 a57 a72 a75))
+$(error "CORE_TYPE not specified or incorrect")
+else
+CPU_LIBS		+=	lib/cpus/${ARCH}/cortex_$(CORE_TYPE).S
+endif
+
+# -----------------------------------------------------------------------------
diff --git a/plat/nxp/common/setup/include/bl31_data.h b/plat/nxp/common/setup/include/bl31_data.h
new file mode 100644
index 0000000..dd20d43
--- /dev/null
+++ b/plat/nxp/common/setup/include/bl31_data.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef BL31_DATA_H
+#define	BL31_DATA_H
+
+#define SECURE_DATA_BASE     NXP_OCRAM_ADDR
+#define SECURE_DATA_SIZE     NXP_OCRAM_SIZE
+#define SECURE_DATA_TOP      (SECURE_DATA_BASE + SECURE_DATA_SIZE)
+#define SMC_REGION_SIZE      0x80
+#define SMC_GLBL_BASE        (SECURE_DATA_TOP - SMC_REGION_SIZE)
+#define BC_PSCI_DATA_SIZE    0xC0
+#define BC_PSCI_BASE         (SMC_GLBL_BASE - BC_PSCI_DATA_SIZE)
+#define SECONDARY_TOP        BC_PSCI_BASE
+
+#define SEC_PSCI_DATA_SIZE   0xC0
+#define SEC_REGION_SIZE      SEC_PSCI_DATA_SIZE
+
+/* SMC global data */
+#define BOOTLOC_OFFSET       0x0
+#define BOOT_SVCS_OSET       0x8
+
+/* offset to prefetch disable mask */
+#define PREFETCH_DIS_OFFSET  0x10
+/* must reference last smc global entry */
+#define LAST_SMC_GLBL_OFFSET 0x18
+
+#define SMC_TASK_OFFSET      0xC
+#define TSK_START_OFFSET     0x0
+#define TSK_DONE_OFFSET      0x4
+#define TSK_CORE_OFFSET      0x8
+#define SMC_TASK1_BASE       (SMC_GLBL_BASE + 32)
+#define SMC_TASK2_BASE       (SMC_TASK1_BASE + SMC_TASK_OFFSET)
+#define SMC_TASK3_BASE       (SMC_TASK2_BASE + SMC_TASK_OFFSET)
+#define SMC_TASK4_BASE       (SMC_TASK3_BASE + SMC_TASK_OFFSET)
+
+/* psci data area offsets */
+#define CORE_STATE_DATA    0x0
+#define SPSR_EL3_DATA      0x8
+#define CNTXT_ID_DATA      0x10
+#define START_ADDR_DATA    0x18
+#define LINK_REG_DATA      0x20
+#define GICC_CTLR_DATA     0x28
+#define ABORT_FLAG_DATA    0x30
+#define SCTLR_DATA         0x38
+#define CPUECTLR_DATA      0x40
+#define AUX_01_DATA        0x48  /* usage defined per SoC */
+#define AUX_02_DATA        0x50  /* usage defined per SoC */
+#define AUX_03_DATA        0x58  /* usage defined per SoC */
+#define AUX_04_DATA        0x60  /* usage defined per SoC */
+#define AUX_05_DATA        0x68  /* usage defined per SoC */
+#define AUX_06_DATA        0x70  /* usage defined per SoC */
+#define AUX_07_DATA        0x78  /* usage defined per SoC */
+#define SCR_EL3_DATA       0x80
+#define HCR_EL2_DATA       0x88
+
+#endif /* BL31_DATA_H */
diff --git a/plat/nxp/common/setup/include/ls_interrupt_mgmt.h b/plat/nxp/common/setup/include/ls_interrupt_mgmt.h
new file mode 100644
index 0000000..7dbddfb
--- /dev/null
+++ b/plat/nxp/common/setup/include/ls_interrupt_mgmt.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef LS_EL3_INTRPT_MGMT_H
+#define LS_EL3_INTRPT_MGMT_H
+
+#include <bl31/interrupt_mgmt.h>
+
+#define MAX_INTR_EL3		128
+
+/*
+ * Register handler to specific GIC entrance
+ * for INTR_TYPE_EL3 type of interrupt
+ */
+int request_intr_type_el3(uint32_t id, interrupt_type_handler_t handler);
+
+void ls_el3_interrupt_config(void);
+
+#endif	/*	LS_EL3_INTRPT_MGMT_H	*/
diff --git a/plat/nxp/common/setup/include/mmu_def.h b/plat/nxp/common/setup/include/mmu_def.h
new file mode 100644
index 0000000..2a7771b
--- /dev/null
+++ b/plat/nxp/common/setup/include/mmu_def.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef MMU_MAP_DEF_H
+#define MMU_MAP_DEF_H
+
+#include <lib/xlat_tables/xlat_tables_defs.h>
+
+#include <platform_def.h>
+
+
+#define LS_MAP_CCSR		MAP_REGION_FLAT(NXP_CCSR_ADDR, \
+					NXP_CCSR_SIZE, \
+					MT_DEVICE | MT_RW | MT_SECURE)
+
+#ifdef NXP_DCSR_ADDR
+#define LS_MAP_DCSR		MAP_REGION_FLAT(NXP_DCSR_ADDR, \
+					NXP_DCSR_SIZE, \
+					MT_DEVICE | MT_RW | MT_SECURE)
+#endif
+
+#define LS_MAP_CONSOLE		MAP_REGION_FLAT(NXP_DUART1_ADDR, \
+					NXP_DUART_SIZE, \
+					MT_DEVICE | MT_RW | MT_NS)
+
+#define LS_MAP_OCRAM		MAP_REGION_FLAT(NXP_OCRAM_ADDR, \
+					NXP_OCRAM_SIZE, \
+					MT_DEVICE | MT_RW | MT_SECURE)
+
+#endif /* MMU_MAP_DEF_H */
diff --git a/plat/nxp/common/setup/include/plat_common.h b/plat/nxp/common/setup/include/plat_common.h
new file mode 100644
index 0000000..18d36ca
--- /dev/null
+++ b/plat/nxp/common/setup/include/plat_common.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PLAT_COMMON_H
+#define PLAT_COMMON_H
+
+#include <stdbool.h>
+
+#include <lib/el3_runtime/cpu_data.h>
+#include <platform_def.h>
+
+#ifdef IMAGE_BL31
+
+#define BL31_END (uintptr_t)(&__BL31_END__)
+
+/*******************************************************************************
+ * This structure represents the superset of information that can be passed to
+ * BL31 e.g. while passing control to it from BL2. The BL32 parameters will be
+ * populated only if BL2 detects its presence. A pointer to a structure of this
+ * type should be passed in X0 to BL31's cold boot entrypoint.
+ *
+ * Use of this structure and the X0 parameter is not mandatory: the BL31
+ * platform code can use other mechanisms to provide the necessary information
+ * about BL32 and BL33 to the common and SPD code.
+ *
+ * BL31 image information is mandatory if this structure is used. If either of
+ * the optional BL32 and BL33 image information is not provided, this is
+ * indicated by the respective image_info pointers being zero.
+ ******************************************************************************/
+typedef struct bl31_params {
+	param_header_t h;
+	image_info_t *bl31_image_info;
+	entry_point_info_t *bl32_ep_info;
+	image_info_t *bl32_image_info;
+	entry_point_info_t *bl33_ep_info;
+	image_info_t *bl33_image_info;
+} bl31_params_t;
+
+/* BL3 utility functions */
+void ls_bl31_early_platform_setup(void *from_bl2,
+				void *plat_params_from_bl2);
+/* LS Helper functions	*/
+unsigned int plat_my_core_mask(void);
+unsigned int plat_core_mask(u_register_t mpidr);
+unsigned int plat_core_pos(u_register_t mpidr);
+//unsigned int plat_my_core_pos(void);
+
+/* BL31 Data API(s) */
+void _init_global_data(void);
+void _initialize_psci(void);
+uint32_t _getCoreState(u_register_t core_mask);
+void _setCoreState(u_register_t core_mask, u_register_t core_state);
+
+/* SoC defined structure and API(s) */
+void soc_runtime_setup(void);
+void soc_init(void);
+void soc_platform_setup(void);
+void soc_early_platform_setup2(void);
+#endif /* IMAGE_BL31 */
+
+#ifdef IMAGE_BL2
+void soc_early_init(void);
+void soc_mem_access(void);
+void soc_preload_setup(void);
+void soc_bl2_prepare_exit(void);
+
+/* IO storage utility functions */
+int plat_io_setup(void);
+int open_backend(const uintptr_t spec);
+
+void ls_bl2_plat_arch_setup(void);
+void ls_bl2_el3_plat_arch_setup(void);
+
+enum boot_device {
+	BOOT_DEVICE_IFC_NOR,
+	BOOT_DEVICE_IFC_NAND,
+	BOOT_DEVICE_QSPI,
+	BOOT_DEVICE_EMMC,
+	BOOT_DEVICE_SDHC2_EMMC,
+	BOOT_DEVICE_FLEXSPI_NOR,
+	BOOT_DEVICE_FLEXSPI_NAND,
+	BOOT_DEVICE_NONE
+};
+
+enum boot_device get_boot_dev(void);
+
+/* DDR Related functions */
+#if DDR_INIT
+#ifdef NXP_WARM_BOOT
+long long init_ddr(uint32_t wrm_bt_flg);
+#else
+long long init_ddr(void);
+#endif
+#endif
+
+/* Board specific weak functions */
+bool board_enable_povdd(void);
+bool board_disable_povdd(void);
+
+void mmap_add_ddr_region_dynamically(void);
+#endif /* IMAGE_BL2 */
+
+typedef struct {
+	uint64_t addr;
+	uint64_t size;
+} region_info_t;
+
+typedef struct {
+	uint64_t num_dram_regions;
+	uint64_t total_dram_size;
+	region_info_t region[NUM_DRAM_REGIONS];
+} dram_regions_info_t;
+
+dram_regions_info_t *get_dram_regions_info(void);
+
+void ls_setup_page_tables(uintptr_t total_base,
+			size_t total_size,
+			uintptr_t code_start,
+			uintptr_t code_limit,
+			uintptr_t rodata_start,
+			uintptr_t rodata_limit
+#if USE_COHERENT_MEM
+			, uintptr_t coh_start,
+			uintptr_t coh_limit
+#endif
+);
+
+
+/* Structure to define SoC personality */
+struct soc_type {
+	char name[10];
+	uint32_t personality;
+	uint32_t num_clusters;
+	uint32_t cores_per_cluster;
+};
+
+#define SOC_ENTRY(n, v, ncl, nc) {	\
+		.name = #n,		\
+		.personality = SVR_##v,	\
+		.num_clusters = (ncl),	\
+		.cores_per_cluster = (nc)}
+
+#endif /* PLAT_COMMON_H */
diff --git a/plat/nxp/common/setup/include/plat_macros.S b/plat/nxp/common/setup/include/plat_macros.S
new file mode 100644
index 0000000..69a3b08
--- /dev/null
+++ b/plat/nxp/common/setup/include/plat_macros.S
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PLAT_MACROS_S
+#define PLAT_MACROS_S
+
+	/* ---------------------------------------------
+	 * The below required platform porting macro
+	 * prints out relevant GIC and CCI registers
+	 * whenever an unhandled exception is taken in
+	 * BL31.
+	 * Clobbers: x0 - x10, x16, x17, sp
+	 * ---------------------------------------------
+	 */
+	.macro plat_crash_print_regs
+	.endm
+
+#endif /* PLAT_MACROS_S */
diff --git a/plat/nxp/common/setup/ls_bl2_el3_setup.c b/plat/nxp/common/setup/ls_bl2_el3_setup.c
new file mode 100644
index 0000000..6428eb9
--- /dev/null
+++ b/plat/nxp/common/setup/ls_bl2_el3_setup.c
@@ -0,0 +1,300 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+
+#include <common/desc_image_load.h>
+#include <dcfg.h>
+#ifdef POLICY_FUSE_PROVISION
+#include <fuse_io.h>
+#endif
+#include <mmu_def.h>
+#include <plat_common.h>
+#ifdef NXP_NV_SW_MAINT_LAST_EXEC_DATA
+#include <plat_nv_storage.h>
+#endif
+
+#pragma weak bl2_el3_early_platform_setup
+#pragma weak bl2_el3_plat_arch_setup
+#pragma weak bl2_el3_plat_prepare_exit
+
+static dram_regions_info_t dram_regions_info  = {0};
+
+/*******************************************************************************
+ * Return the pointer to the 'dram_regions_info structure of the DRAM.
+ * This structure is populated after init_ddr().
+ ******************************************************************************/
+dram_regions_info_t *get_dram_regions_info(void)
+{
+	return &dram_regions_info;
+}
+
+#ifdef DDR_INIT
+static void populate_dram_regions_info(void)
+{
+	long long dram_remain_size = dram_regions_info.total_dram_size;
+	uint8_t reg_id = 0U;
+
+	dram_regions_info.region[reg_id].addr = NXP_DRAM0_ADDR;
+	dram_regions_info.region[reg_id].size =
+			dram_remain_size > NXP_DRAM0_MAX_SIZE ?
+				NXP_DRAM0_MAX_SIZE : dram_remain_size;
+
+	if (dram_regions_info.region[reg_id].size != NXP_DRAM0_SIZE) {
+		ERROR("Incorrect DRAM0 size is defined in platform_def.h\n");
+	}
+
+	dram_remain_size -= dram_regions_info.region[reg_id].size;
+	dram_regions_info.region[reg_id].size -= (NXP_SECURE_DRAM_SIZE
+						+ NXP_SP_SHRD_DRAM_SIZE);
+
+	assert(dram_regions_info.region[reg_id].size > 0);
+
+	/* Reducing total dram size by 66MB */
+	dram_regions_info.total_dram_size -= (NXP_SECURE_DRAM_SIZE
+						+ NXP_SP_SHRD_DRAM_SIZE);
+
+#if defined(NXP_DRAM1_ADDR) && defined(NXP_DRAM1_MAX_SIZE)
+	if (dram_remain_size > 0) {
+		reg_id++;
+		dram_regions_info.region[reg_id].addr = NXP_DRAM1_ADDR;
+		dram_regions_info.region[reg_id].size =
+				dram_remain_size > NXP_DRAM1_MAX_SIZE ?
+					NXP_DRAM1_MAX_SIZE : dram_remain_size;
+		dram_remain_size -= dram_regions_info.region[reg_id].size;
+	}
+#endif
+#if defined(NXP_DRAM2_ADDR) && defined(NXP_DRAM2_MAX_SIZE)
+	if (dram_remain_size > 0) {
+		reg_id++;
+		dram_regions_info.region[reg_id].addr = NXP_DRAM1_ADDR;
+		dram_regions_info.region[reg_id].size =
+				dram_remain_size > NXP_DRAM1_MAX_SIZE ?
+					NXP_DRAM1_MAX_SIZE : dram_remain_size;
+		dram_remain_size -= dram_regions_info.region[reg_id].size;
+	}
+#endif
+	reg_id++;
+	dram_regions_info.num_dram_regions = reg_id;
+}
+#endif
+
+#ifdef IMAGE_BL32
+/*******************************************************************************
+ * Gets SPSR for BL32 entry
+ ******************************************************************************/
+static uint32_t ls_get_spsr_for_bl32_entry(void)
+{
+	/*
+	 * The Secure Payload Dispatcher service is responsible for
+	 * setting the SPSR prior to entry into the BL32 image.
+	 */
+	return 0U;
+}
+#endif
+
+/*******************************************************************************
+ * Gets SPSR for BL33 entry
+ ******************************************************************************/
+#ifndef AARCH32
+static uint32_t ls_get_spsr_for_bl33_entry(void)
+{
+	unsigned int mode;
+	uint32_t spsr;
+
+	/* Figure out what mode we enter the non-secure world in */
+	mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
+
+	/*
+	 * TODO: Consider the possibility of specifying the SPSR in
+	 * the FIP ToC and allowing the platform to have a say as
+	 * well.
+	 */
+	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+	return spsr;
+}
+#else
+/*******************************************************************************
+ * Gets SPSR for BL33 entry
+ ******************************************************************************/
+static uint32_t ls_get_spsr_for_bl33_entry(void)
+{
+	unsigned int hyp_status, mode, spsr;
+
+	hyp_status = GET_VIRT_EXT(read_id_pfr1());
+
+	mode = (hyp_status) ? MODE32_hyp : MODE32_svc;
+
+	/*
+	 * TODO: Consider the possibility of specifying the SPSR in
+	 * the FIP ToC and allowing the platform to have a say as
+	 * well.
+	 */
+	spsr = SPSR_MODE32(mode, plat_get_ns_image_entrypoint() & 0x1,
+			SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
+	return spsr;
+}
+#endif /* AARCH32 */
+
+void bl2_el3_early_platform_setup(u_register_t arg0 __unused,
+				  u_register_t arg1 __unused,
+				  u_register_t arg2 __unused,
+				  u_register_t arg3 __unused)
+{
+	/*
+	 * SoC specific early init
+	 * Any errata handling or SoC specific early initialization can
+	 * be done here
+	 * Set Counter Base Frequency in CNTFID0 and in cntfrq_el0.
+	 * Initialize the interconnect.
+	 * Enable coherency for primary CPU cluster
+	 */
+	soc_early_init();
+
+	/* Initialise the IO layer and register platform IO devices */
+	plat_io_setup();
+
+	if (dram_regions_info.total_dram_size > 0) {
+		populate_dram_regions_info();
+	}
+
+#ifdef NXP_NV_SW_MAINT_LAST_EXEC_DATA
+	read_nv_app_data();
+#if DEBUG
+	const nv_app_data_t *nv_app_data = get_nv_data();
+
+	INFO("Value of warm_reset flag = 0x%x\n", nv_app_data->warm_rst_flag);
+	INFO("Value of WDT flag = 0x%x\n", nv_app_data->wdt_rst_flag);
+#endif
+#endif
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only initializes the mmu in a quick and dirty way.
+ ******************************************************************************/
+void ls_bl2_el3_plat_arch_setup(void)
+{
+	unsigned int flags = 0U;
+	/* Initialise the IO layer and register platform IO devices */
+	ls_setup_page_tables(
+#if SEPARATE_RW_AND_NOLOAD
+			      BL2_START,
+			      BL2_LIMIT - BL2_START,
+#else
+			      BL2_BASE,
+			      (unsigned long)(&__BL2_END__) - BL2_BASE,
+#endif
+			      BL_CODE_BASE,
+			      BL_CODE_END,
+			      BL_RO_DATA_BASE,
+			      BL_RO_DATA_END
+#if USE_COHERENT_MEM
+			      , BL_COHERENT_RAM_BASE,
+			      BL_COHERENT_RAM_END
+#endif
+			      );
+
+	if ((dram_regions_info.region[0].addr == 0)
+		&& (dram_regions_info.total_dram_size == 0)) {
+		flags = XLAT_TABLE_NC;
+	}
+
+#ifdef AARCH32
+	enable_mmu_secure(0);
+#else
+	enable_mmu_el3(flags);
+#endif
+}
+
+void bl2_el3_plat_arch_setup(void)
+{
+	ls_bl2_el3_plat_arch_setup();
+}
+
+void bl2_platform_setup(void)
+{
+	/*
+	 * Perform platform setup before loading the image.
+	 */
+}
+
+/* Handling image information by platform. */
+int ls_bl2_handle_post_image_load(unsigned int image_id)
+{
+	int err = 0;
+	bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
+
+	assert(bl_mem_params);
+
+	switch (image_id) {
+	case BL31_IMAGE_ID:
+		bl_mem_params->ep_info.args.arg3 =
+					(u_register_t) &dram_regions_info;
+
+		/* Pass the value of PORSR1 register in Argument 4 */
+		bl_mem_params->ep_info.args.arg4 =
+					(u_register_t)read_reg_porsr1();
+		flush_dcache_range((uintptr_t)&dram_regions_info,
+				sizeof(dram_regions_info));
+		break;
+#if defined(AARCH64) && defined(IMAGE_BL32)
+	case BL32_IMAGE_ID:
+		bl_mem_params->ep_info.spsr = ls_get_spsr_for_bl32_entry();
+		break;
+#endif
+	case BL33_IMAGE_ID:
+		/* BL33 expects to receive the primary CPU MPID (through r0) */
+		bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
+		bl_mem_params->ep_info.spsr = ls_get_spsr_for_bl33_entry();
+		break;
+	}
+
+	return err;
+}
+
+/*******************************************************************************
+ * This function can be used by the platforms to update/use image
+ * information for given `image_id`.
+ ******************************************************************************/
+int bl2_plat_handle_post_image_load(unsigned int image_id)
+{
+	return ls_bl2_handle_post_image_load(image_id);
+}
+
+void bl2_el3_plat_prepare_exit(void)
+{
+	return soc_bl2_prepare_exit();
+}
+
+/* Called to do the dynamic initialization required
+ * before loading the next image.
+ */
+void bl2_plat_preload_setup(void)
+{
+
+	soc_preload_setup();
+
+	if (dram_regions_info.total_dram_size < NXP_DRAM0_SIZE) {
+		NOTICE("ERROR: DRAM0 Size is not correctly configured.");
+		assert(false);
+	}
+
+	if ((dram_regions_info.region[0].addr == 0)
+		&& (dram_regions_info.total_dram_size > 0)) {
+		populate_dram_regions_info();
+
+		mmap_add_ddr_region_dynamically();
+	}
+
+	/* setup the memory region access permissions */
+	soc_mem_access();
+
+#ifdef POLICY_FUSE_PROVISION
+	fip_fuse_provisioning((uintptr_t)FUSE_BUF, FUSE_SZ);
+#endif
+}
diff --git a/plat/nxp/common/setup/ls_bl31_setup.c b/plat/nxp/common/setup/ls_bl31_setup.c
new file mode 100644
index 0000000..6cf6ae3
--- /dev/null
+++ b/plat/nxp/common/setup/ls_bl31_setup.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+
+#ifdef LS_EL3_INTERRUPT_HANDLER
+#include <ls_interrupt_mgmt.h>
+#endif
+#include <mmu_def.h>
+#include <plat_common.h>
+
+/*
+ * Placeholder variables for copying the arguments that have been passed to
+ * BL31 from BL2.
+ */
+#ifdef TEST_BL31
+#define  SPSR_FOR_EL2H   0x3C9
+#define  SPSR_FOR_EL1H   0x3C5
+#else
+static entry_point_info_t bl31_image_ep_info;
+#endif
+
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+static dram_regions_info_t dram_regions_info = {0};
+static uint64_t rcw_porsr1;
+
+/* Return the pointer to the 'dram_regions_info structure of the DRAM.
+ * This structure is populated after init_ddr().
+ */
+dram_regions_info_t *get_dram_regions_info(void)
+{
+	return &dram_regions_info;
+}
+
+/* Return the RCW.PORSR1 value which was passed in from BL2
+ */
+uint64_t bl31_get_porsr1(void)
+{
+	return rcw_porsr1;
+}
+
+/*
+ * Return pointer to the 'entry_point_info' structure of the next image for the
+ * security state specified:
+ * - BL33 corresponds to the non-secure image type; while
+ * - BL32 corresponds to the secure image type.
+ * - A NULL pointer is returned, if the image does not exist.
+ */
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+	entry_point_info_t *next_image_info;
+
+	assert(sec_state_is_valid(type));
+	next_image_info = (type == NON_SECURE)
+			? &bl33_image_ep_info : &bl32_image_ep_info;
+
+#ifdef TEST_BL31
+	next_image_info->pc     = _get_test_entry();
+	next_image_info->spsr   = SPSR_FOR_EL2H;
+	next_image_info->h.attr = NON_SECURE;
+#endif
+
+	if (next_image_info->pc != 0U) {
+		return next_image_info;
+	} else {
+		return NULL;
+	}
+}
+
+/*
+ * Perform any BL31 early platform setup common to NXP platforms.
+ * - Here is an opportunity to copy parameters passed by the calling EL (S-EL1
+ * in BL2 & S-EL3 in BL1) before they are lost (potentially).
+ * - This needs to be done before the MMU is initialized so that the
+ *   memory layout can be used while creating page tables.
+ * - BL2 has flushed this information to memory, in order to fetch latest data.
+ */
+
+void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
+				u_register_t arg2, u_register_t arg3)
+{
+#ifndef TEST_BL31
+	int i = 0;
+	void *from_bl2 = (void *)arg0;
+#endif
+	soc_early_platform_setup2();
+
+#ifdef TEST_BL31
+	dram_regions_info.num_dram_regions  = 2;
+	dram_regions_info.total_dram_size   = 0x100000000;
+	dram_regions_info.region[0].addr    = 0x80000000;
+	dram_regions_info.region[0].size    = 0x80000000;
+	dram_regions_info.region[1].addr    = 0x880000000;
+	dram_regions_info.region[1].size    = 0x80000000;
+
+	bl33_image_ep_info.pc = _get_test_entry();
+#else
+	/*
+	 * Check params passed from BL2 should not be NULL,
+	 */
+	bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
+
+	assert(params_from_bl2 != NULL);
+	assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
+	assert(params_from_bl2->h.version >= VERSION_2);
+
+	bl_params_node_t *bl_params = params_from_bl2->head;
+
+	/*
+	 * Copy BL33 and BL32 (if present), entry point information.
+	 * They are stored in Secure RAM, in BL2's address space.
+	 */
+	while (bl_params != NULL) {
+		if (bl_params->image_id == BL31_IMAGE_ID) {
+			bl31_image_ep_info = *bl_params->ep_info;
+			dram_regions_info_t *loc_dram_regions_info =
+			(dram_regions_info_t *) bl31_image_ep_info.args.arg3;
+
+			dram_regions_info.num_dram_regions =
+					loc_dram_regions_info->num_dram_regions;
+			dram_regions_info.total_dram_size =
+					loc_dram_regions_info->total_dram_size;
+			VERBOSE("Number of DRAM Regions = %llx\n",
+					dram_regions_info.num_dram_regions);
+
+			for (i = 0; i < dram_regions_info.num_dram_regions;
+									i++) {
+				dram_regions_info.region[i].addr =
+					loc_dram_regions_info->region[i].addr;
+				dram_regions_info.region[i].size =
+					loc_dram_regions_info->region[i].size;
+				VERBOSE("DRAM%d Size = %llx\n", i,
+					dram_regions_info.region[i].size);
+			}
+			rcw_porsr1 = bl31_image_ep_info.args.arg4;
+		}
+
+		if (bl_params->image_id == BL32_IMAGE_ID) {
+			bl32_image_ep_info = *bl_params->ep_info;
+		}
+
+		if (bl_params->image_id == BL33_IMAGE_ID) {
+			bl33_image_ep_info = *bl_params->ep_info;
+		}
+
+		bl_params = bl_params->next_params_info;
+	}
+#endif /* TEST_BL31 */
+
+	if (bl33_image_ep_info.pc == 0) {
+		panic();
+	}
+
+	/*
+	 * perform basic initialization on the soc
+	 */
+	soc_init();
+}
+
+/*******************************************************************************
+ * Perform any BL31 platform setup common to ARM standard platforms
+ ******************************************************************************/
+void bl31_platform_setup(void)
+{
+	NOTICE("Welcome to %s BL31 Phase\n", BOARD);
+	soc_platform_setup();
+
+	/* Console logs gone missing as part going to
+	 * EL1 for initilizing Bl32 if present.
+	 * console flush is necessary to avoid it.
+	 */
+	(void)console_flush();
+}
+
+void bl31_plat_runtime_setup(void)
+{
+#ifdef LS_EL3_INTERRUPT_HANDLER
+	ls_el3_interrupt_config();
+#endif
+	soc_runtime_setup();
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup shared between
+ * ARM standard platforms. This only does basic initialization. Later
+ * architectural setup (bl31_arch_setup()) does not do anything platform
+ * specific.
+ ******************************************************************************/
+void bl31_plat_arch_setup(void)
+{
+
+	ls_setup_page_tables(BL31_BASE,
+			      BL31_END - BL31_BASE,
+			      BL_CODE_BASE,
+			      BL_CODE_END,
+			      BL_RO_DATA_BASE,
+			      BL_RO_DATA_END
+#if USE_COHERENT_MEM
+			      , BL_COHERENT_RAM_BASE,
+			      BL_COHERENT_RAM_END
+#endif
+			      );
+	enable_mmu_el3(0);
+}
diff --git a/plat/nxp/common/setup/ls_common.c b/plat/nxp/common/setup/ls_common.c
new file mode 100644
index 0000000..a6946e1
--- /dev/null
+++ b/plat/nxp/common/setup/ls_common.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <lib/mmio.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <mmu_def.h>
+#include <plat/common/platform.h>
+
+#include "plat_common.h"
+#include "platform_def.h"
+
+const mmap_region_t *plat_ls_get_mmap(void);
+
+/*
+ * Table of memory regions for various BL stages to map using the MMU.
+ * This doesn't include Trusted SRAM as arm_setup_page_tables() already
+ * takes care of mapping it.
+ *
+ * The flash needs to be mapped as writable in order to erase the FIP's Table of
+ * Contents in case of unrecoverable error (see plat_error_handler()).
+ */
+#ifdef IMAGE_BL2
+const mmap_region_t plat_ls_mmap[] = {
+	LS_MAP_CCSR,
+	{0}
+};
+#endif
+
+#ifdef IMAGE_BL31
+const mmap_region_t plat_ls_mmap[] = {
+	LS_MAP_CCSR,
+#ifdef NXP_DCSR_ADDR
+	LS_MAP_DCSR,
+#endif
+	LS_MAP_OCRAM,
+	{0}
+};
+#endif
+#ifdef IMAGE_BL32
+const mmap_region_t plat_ls_mmap[] = {
+	LS_MAP_CCSR,
+	LS_MAP_BL32_SEC_MEM,
+	{0}
+};
+#endif
+
+/* Weak definitions may be overridden in specific NXP SoC */
+#pragma weak plat_get_ns_image_entrypoint
+#pragma weak plat_ls_get_mmap
+
+#if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE)
+static void mmap_add_ddr_regions_statically(void)
+{
+	int i = 0;
+	dram_regions_info_t *info_dram_regions = get_dram_regions_info();
+	/* MMU map for Non-Secure DRAM Regions */
+	VERBOSE("DRAM Region %d: %p - %p\n", i,
+			(void *) info_dram_regions->region[i].addr,
+			(void *) (info_dram_regions->region[i].addr
+				+ info_dram_regions->region[i].size
+				- 1));
+	mmap_add_region(info_dram_regions->region[i].addr,
+			info_dram_regions->region[i].addr,
+			info_dram_regions->region[i].size,
+			MT_MEMORY | MT_RW | MT_NS);
+
+	/* MMU map for Secure DDR Region on DRAM-0 */
+	if (info_dram_regions->region[i].size >
+		(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) {
+		VERBOSE("Secure DRAM Region %d: %p - %p\n", i,
+			(void *) (info_dram_regions->region[i].addr
+				+ info_dram_regions->region[i].size),
+			(void *) (info_dram_regions->region[i].addr
+				+ info_dram_regions->region[i].size
+				+ NXP_SECURE_DRAM_SIZE
+				+ NXP_SP_SHRD_DRAM_SIZE
+				- 1));
+		mmap_add_region((info_dram_regions->region[i].addr
+				+ info_dram_regions->region[i].size),
+				(info_dram_regions->region[i].addr
+				+ info_dram_regions->region[i].size),
+				(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE),
+				MT_MEMORY | MT_RW | MT_SECURE);
+	}
+
+#ifdef IMAGE_BL31
+	for (i = 1; i < info_dram_regions->num_dram_regions; i++) {
+		if (info_dram_regions->region[i].size == 0)
+			break;
+		VERBOSE("DRAM Region %d: %p - %p\n", i,
+			(void *) info_dram_regions->region[i].addr,
+			(void *) (info_dram_regions->region[i].addr
+				+ info_dram_regions->region[i].size
+				- 1));
+		mmap_add_region(info_dram_regions->region[i].addr,
+				info_dram_regions->region[i].addr,
+				info_dram_regions->region[i].size,
+				MT_MEMORY | MT_RW | MT_NS);
+	}
+#endif
+}
+#endif
+
+#if defined(PLAT_XLAT_TABLES_DYNAMIC)
+void mmap_add_ddr_region_dynamically(void)
+{
+	int i = 0;
+	dram_regions_info_t *info_dram_regions = get_dram_regions_info();
+	/* MMU map for Non-Secure DRAM Regions */
+	VERBOSE("DRAM Region %d: %p - %p\n", i,
+			(void *) info_dram_regions->region[i].addr,
+			(void *) (info_dram_regions->region[i].addr
+				+ info_dram_regions->region[i].size
+				- 1));
+	mmap_add_dynamic_region(info_dram_regions->region[i].addr,
+			info_dram_regions->region[i].addr,
+			info_dram_regions->region[i].size,
+			MT_MEMORY | MT_RW | MT_NS);
+
+	/* MMU map for Secure DDR Region on DRAM-0 */
+	if (info_dram_regions->region[i].size >
+		(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) {
+		VERBOSE("Secure DRAM Region %d: %p - %p\n", i,
+			(void *) (info_dram_regions->region[i].addr
+				+ info_dram_regions->region[i].size),
+			(void *) (info_dram_regions->region[i].addr
+				+ info_dram_regions->region[i].size
+				+ NXP_SECURE_DRAM_SIZE
+				+ NXP_SP_SHRD_DRAM_SIZE
+				- 1));
+		mmap_add_dynamic_region((info_dram_regions->region[i].addr
+				+ info_dram_regions->region[i].size),
+				(info_dram_regions->region[i].addr
+				+ info_dram_regions->region[i].size),
+				(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE),
+				MT_MEMORY | MT_RW | MT_SECURE);
+	}
+
+#ifdef IMAGE_BL31
+	for (i = 1; i < info_dram_regions->num_dram_regions; i++) {
+		if (info_dram_regions->region[i].size == 0) {
+			break;
+		}
+		VERBOSE("DRAM Region %d: %p - %p\n", i,
+			(void *) info_dram_regions->region[i].addr,
+			(void *) (info_dram_regions->region[i].addr
+				+ info_dram_regions->region[i].size
+				- 1));
+		mmap_add_dynamic_region(info_dram_regions->region[i].addr,
+				info_dram_regions->region[i].addr,
+				info_dram_regions->region[i].size,
+				MT_MEMORY | MT_RW | MT_NS);
+	}
+#endif
+}
+#endif
+
+/*
+ * Set up the page tables for the generic and platform-specific memory regions.
+ * The extents of the generic memory regions are specified by the function
+ * arguments and consist of:
+ * - Trusted SRAM seen by the BL image;
+ * - Code section;
+ * - Read-only data section;
+ * - Coherent memory region, if applicable.
+ */
+void ls_setup_page_tables(uintptr_t total_base,
+			   size_t total_size,
+			   uintptr_t code_start,
+			   uintptr_t code_limit,
+			   uintptr_t rodata_start,
+			   uintptr_t rodata_limit
+#if USE_COHERENT_MEM
+			   ,
+			   uintptr_t coh_start,
+			   uintptr_t coh_limit
+#endif
+			   )
+{
+	/*
+	 * Map the Trusted SRAM with appropriate memory attributes.
+	 * Subsequent mappings will adjust the attributes for specific regions.
+	 */
+	VERBOSE("Memory seen by this BL image: %p - %p\n",
+		(void *) total_base, (void *) (total_base + total_size));
+	mmap_add_region(total_base, total_base,
+			total_size,
+			MT_MEMORY | MT_RW | MT_SECURE);
+
+	/* Re-map the code section */
+	VERBOSE("Code region: %p - %p\n",
+		(void *) code_start, (void *) code_limit);
+	mmap_add_region(code_start, code_start,
+			code_limit - code_start,
+			MT_CODE | MT_SECURE);
+
+	/* Re-map the read-only data section */
+	VERBOSE("Read-only data region: %p - %p\n",
+		(void *) rodata_start, (void *) rodata_limit);
+	mmap_add_region(rodata_start, rodata_start,
+			rodata_limit - rodata_start,
+			MT_RO_DATA | MT_SECURE);
+
+#if USE_COHERENT_MEM
+	/* Re-map the coherent memory region */
+	VERBOSE("Coherent region: %p - %p\n",
+		(void *) coh_start, (void *) coh_limit);
+	mmap_add_region(coh_start, coh_start,
+			coh_limit - coh_start,
+			MT_DEVICE | MT_RW | MT_SECURE);
+#endif
+
+	/* Now (re-)map the platform-specific memory regions */
+	mmap_add(plat_ls_get_mmap());
+
+
+#if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE)
+	mmap_add_ddr_regions_statically();
+#endif
+
+	/* Create the page tables to reflect the above mappings */
+	init_xlat_tables();
+}
+
+/*******************************************************************************
+ * Returns NXP platform specific memory map regions.
+ ******************************************************************************/
+const mmap_region_t *plat_ls_get_mmap(void)
+{
+	return plat_ls_mmap;
+}
diff --git a/plat/nxp/common/setup/ls_err.c b/plat/nxp/common/setup/ls_err.c
new file mode 100644
index 0000000..845cd15
--- /dev/null
+++ b/plat/nxp/common/setup/ls_err.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+
+#if TRUSTED_BOARD_BOOT
+#include <dcfg.h>
+#include <snvs.h>
+#endif
+
+#include "plat_common.h"
+
+/*
+ * Error handler
+ */
+void plat_error_handler(int err)
+{
+#if TRUSTED_BOARD_BOOT
+	uint32_t mode;
+	bool sb = check_boot_mode_secure(&mode);
+#endif
+
+	switch (err) {
+	case -ENOENT:
+	case -EAUTH:
+		printf("Authentication failure\n");
+#if TRUSTED_BOARD_BOOT
+		/* For SB production mode i.e ITS = 1 */
+		if (sb == true) {
+			if (mode == 1U) {
+				transition_snvs_soft_fail();
+			} else {
+				transition_snvs_non_secure();
+			}
+		}
+#endif
+		break;
+	default:
+		/* Unexpected error */
+		break;
+	}
+
+	/* Loop until the watchdog resets the system */
+	for (;;)
+		wfi();
+}
diff --git a/plat/nxp/common/setup/ls_image_load.c b/plat/nxp/common/setup/ls_image_load.c
new file mode 100644
index 0000000..259ab31
--- /dev/null
+++ b/plat/nxp/common/setup/ls_image_load.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <common/desc_image_load.h>
+
+/*******************************************************************************
+ * This function flushes the data structures so that they are visible
+ * in memory for the next BL image.
+ ******************************************************************************/
+void plat_flush_next_bl_params(void)
+{
+	flush_bl_params_desc();
+}
+
+/*******************************************************************************
+ * This function returns the list of loadable images.
+ ******************************************************************************/
+bl_load_info_t *plat_get_bl_image_load_info(void)
+{
+	return get_bl_load_info_from_mem_params_desc();
+}
+
+/*******************************************************************************
+ * This function returns the list of executable images.
+ ******************************************************************************/
+bl_params_t *plat_get_next_bl_params(void)
+{
+	return get_next_bl_params_from_mem_params_desc();
+}
diff --git a/plat/nxp/common/setup/ls_interrupt_mgmt.c b/plat/nxp/common/setup/ls_interrupt_mgmt.c
new file mode 100644
index 0000000..a81cb2b
--- /dev/null
+++ b/plat/nxp/common/setup/ls_interrupt_mgmt.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <bl31/interrupt_mgmt.h>
+#include <common/debug.h>
+#include <ls_interrupt_mgmt.h>
+#include <plat/common/platform.h>
+
+static interrupt_type_handler_t type_el3_interrupt_table[MAX_INTR_EL3];
+
+int request_intr_type_el3(uint32_t id, interrupt_type_handler_t handler)
+{
+	/* Validate 'handler' and 'id' parameters */
+	if (!handler || id >= MAX_INTR_EL3) {
+		return -EINVAL;
+	}
+
+	/* Check if a handler has already been registered */
+	if (type_el3_interrupt_table[id] != NULL) {
+		return -EALREADY;
+	}
+
+	type_el3_interrupt_table[id] = handler;
+
+	return 0;
+}
+
+static uint64_t ls_el3_interrupt_handler(uint32_t id, uint32_t flags,
+					  void *handle, void *cookie)
+{
+	uint32_t intr_id;
+	interrupt_type_handler_t handler;
+
+	intr_id = plat_ic_get_pending_interrupt_id();
+
+	INFO("Interrupt recvd is %d\n", intr_id);
+
+	handler = type_el3_interrupt_table[intr_id];
+	if (handler != NULL) {
+		handler(intr_id, flags, handle, cookie);
+	}
+
+	/*
+	 * Mark this interrupt as complete to avoid a interrupt storm.
+	 */
+	plat_ic_end_of_interrupt(intr_id);
+
+	return 0U;
+}
+
+void ls_el3_interrupt_config(void)
+{
+	uint64_t flags = 0U;
+	uint64_t rc;
+
+	set_interrupt_rm_flag(flags, NON_SECURE);
+	rc = register_interrupt_type_handler(INTR_TYPE_EL3,
+					     ls_el3_interrupt_handler, flags);
+	if (rc != 0U) {
+		panic();
+	}
+}
diff --git a/plat/nxp/common/setup/ls_io_storage.c b/plat/nxp/common/setup/ls_io_storage.c
new file mode 100644
index 0000000..0c01765
--- /dev/null
+++ b/plat/nxp/common/setup/ls_io_storage.c
@@ -0,0 +1,519 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+#include <endian.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <common/tbbr/tbbr_img_def.h>
+#include <drivers/io/io_block.h>
+#include <drivers/io/io_driver.h>
+#include <drivers/io/io_fip.h>
+#include <drivers/io/io_memmap.h>
+#include <drivers/io/io_storage.h>
+#ifdef FLEXSPI_NOR_BOOT
+#include <flexspi_nor.h>
+#endif
+#if defined(QSPI_BOOT)
+#include <qspi.h>
+#endif
+#if defined(SD_BOOT) || defined(EMMC_BOOT)
+#include <sd_mmc.h>
+#endif
+#include <tools_share/firmware_image_package.h>
+
+#ifdef CONFIG_DDR_FIP_IMAGE
+#include <ddr_io_storage.h>
+#endif
+#ifdef POLICY_FUSE_PROVISION
+#include <fuse_io.h>
+#endif
+#include "plat_common.h"
+#include "platform_def.h"
+
+uint32_t fip_device;
+/* IO devices */
+uintptr_t backend_dev_handle;
+
+static const io_dev_connector_t *fip_dev_con;
+static uintptr_t fip_dev_handle;
+static const io_dev_connector_t *backend_dev_con;
+
+static io_block_spec_t fip_block_spec = {
+	.offset = PLAT_FIP_OFFSET,
+	.length = PLAT_FIP_MAX_SIZE
+};
+
+static const io_uuid_spec_t bl2_uuid_spec = {
+	.uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2,
+};
+
+static const io_uuid_spec_t fuse_bl2_uuid_spec = {
+	.uuid = UUID_SCP_FIRMWARE_SCP_BL2,
+};
+
+static const io_uuid_spec_t bl31_uuid_spec = {
+	.uuid = UUID_EL3_RUNTIME_FIRMWARE_BL31,
+};
+
+static const io_uuid_spec_t bl32_uuid_spec = {
+	.uuid = UUID_SECURE_PAYLOAD_BL32,
+};
+
+static const io_uuid_spec_t bl33_uuid_spec = {
+	.uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
+};
+
+static const io_uuid_spec_t tb_fw_config_uuid_spec = {
+	.uuid = UUID_TB_FW_CONFIG,
+};
+
+static const io_uuid_spec_t hw_config_uuid_spec = {
+	.uuid = UUID_HW_CONFIG,
+};
+
+#if TRUSTED_BOARD_BOOT
+static const io_uuid_spec_t tb_fw_cert_uuid_spec = {
+	.uuid = UUID_TRUSTED_BOOT_FW_CERT,
+};
+
+static const io_uuid_spec_t trusted_key_cert_uuid_spec = {
+	.uuid = UUID_TRUSTED_KEY_CERT,
+};
+
+static const io_uuid_spec_t fuse_key_cert_uuid_spec = {
+	.uuid = UUID_SCP_FW_KEY_CERT,
+};
+
+static const io_uuid_spec_t soc_fw_key_cert_uuid_spec = {
+	.uuid = UUID_SOC_FW_KEY_CERT,
+};
+
+static const io_uuid_spec_t tos_fw_key_cert_uuid_spec = {
+	.uuid = UUID_TRUSTED_OS_FW_KEY_CERT,
+};
+
+static const io_uuid_spec_t nt_fw_key_cert_uuid_spec = {
+	.uuid = UUID_NON_TRUSTED_FW_KEY_CERT,
+};
+
+static const io_uuid_spec_t fuse_cert_uuid_spec = {
+	.uuid = UUID_SCP_FW_CONTENT_CERT,
+};
+
+static const io_uuid_spec_t soc_fw_cert_uuid_spec = {
+	.uuid = UUID_SOC_FW_CONTENT_CERT,
+};
+
+static const io_uuid_spec_t tos_fw_cert_uuid_spec = {
+	.uuid = UUID_TRUSTED_OS_FW_CONTENT_CERT,
+};
+
+static const io_uuid_spec_t nt_fw_cert_uuid_spec = {
+	.uuid = UUID_NON_TRUSTED_FW_CONTENT_CERT,
+};
+#endif /* TRUSTED_BOARD_BOOT */
+
+static int open_fip(const uintptr_t spec);
+
+struct plat_io_policy {
+	uintptr_t *dev_handle;
+	uintptr_t image_spec;
+	int (*check)(const uintptr_t spec);
+};
+
+/* By default, ARM platforms load images from the FIP */
+static const struct plat_io_policy policies[] = {
+	[FIP_IMAGE_ID] = {
+		&backend_dev_handle,
+		(uintptr_t)&fip_block_spec,
+		open_backend
+	},
+	[BL2_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl2_uuid_spec,
+		open_fip
+	},
+	[SCP_BL2_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&fuse_bl2_uuid_spec,
+		open_fip
+	},
+	[BL31_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl31_uuid_spec,
+		open_fip
+	},
+	[BL32_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl32_uuid_spec,
+		open_fip
+	},
+	[BL33_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl33_uuid_spec,
+		open_fip
+	},
+	[TB_FW_CONFIG_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&tb_fw_config_uuid_spec,
+		open_fip
+	},
+	[HW_CONFIG_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&hw_config_uuid_spec,
+		open_fip
+	},
+#if TRUSTED_BOARD_BOOT
+	[TRUSTED_BOOT_FW_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&tb_fw_cert_uuid_spec,
+		open_fip
+	},
+	[TRUSTED_KEY_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&trusted_key_cert_uuid_spec,
+		open_fip
+	},
+	[SCP_FW_KEY_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&fuse_key_cert_uuid_spec,
+		open_fip
+	},
+	[SOC_FW_KEY_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&soc_fw_key_cert_uuid_spec,
+		open_fip
+	},
+	[TRUSTED_OS_FW_KEY_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&tos_fw_key_cert_uuid_spec,
+		open_fip
+	},
+	[NON_TRUSTED_FW_KEY_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&nt_fw_key_cert_uuid_spec,
+		open_fip
+	},
+	[SCP_FW_CONTENT_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&fuse_cert_uuid_spec,
+		open_fip
+	},
+	[SOC_FW_CONTENT_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&soc_fw_cert_uuid_spec,
+		open_fip
+	},
+	[TRUSTED_OS_FW_CONTENT_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&tos_fw_cert_uuid_spec,
+		open_fip
+	},
+	[NON_TRUSTED_FW_CONTENT_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&nt_fw_cert_uuid_spec,
+		open_fip
+	},
+#endif /* TRUSTED_BOARD_BOOT */
+};
+
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak plat_io_setup
+
+/*
+ * Return an IO device handle and specification which can be used to access
+ */
+static int open_fip(const uintptr_t spec)
+{
+	int result;
+	uintptr_t local_image_handle;
+
+	/* See if a Firmware Image Package is available */
+	result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
+	if (result == 0) {
+		result = io_open(fip_dev_handle, spec, &local_image_handle);
+		if (result == 0) {
+			VERBOSE("Using FIP\n");
+			io_close(local_image_handle);
+		}
+	}
+	return result;
+}
+
+
+int open_backend(const uintptr_t spec)
+{
+	int result;
+	uintptr_t local_image_handle;
+
+	result = io_dev_init(backend_dev_handle, (uintptr_t)NULL);
+	if (result == 0) {
+		result = io_open(backend_dev_handle, spec, &local_image_handle);
+		if (result == 0) {
+			io_close(local_image_handle);
+		}
+	}
+	return result;
+}
+
+#if defined(SD_BOOT) || defined(EMMC_BOOT)
+static int plat_io_block_setup(size_t fip_offset, uintptr_t block_dev_spec)
+{
+	int io_result;
+
+	fip_block_spec.offset = fip_offset;
+
+	io_result = register_io_dev_block(&backend_dev_con);
+	assert(io_result == 0);
+
+	/* Open connections to devices and cache the handles */
+	io_result = io_dev_open(backend_dev_con, block_dev_spec,
+				&backend_dev_handle);
+	assert(io_result == 0);
+
+	return io_result;
+}
+#endif
+
+#if defined(FLEXSPI_NOR_BOOT) || defined(QSPI_BOOT)
+static int plat_io_memmap_setup(size_t fip_offset)
+{
+	int io_result;
+
+	fip_block_spec.offset = fip_offset;
+
+	io_result = register_io_dev_memmap(&backend_dev_con);
+	assert(io_result == 0);
+
+	/* Open connections to devices and cache the handles */
+	io_result = io_dev_open(backend_dev_con, (uintptr_t)NULL,
+				&backend_dev_handle);
+	assert(io_result == 0);
+
+	return io_result;
+}
+#endif
+
+static int ls_io_fip_setup(unsigned int boot_dev)
+{
+	int io_result;
+
+	io_result = register_io_dev_fip(&fip_dev_con);
+	assert(io_result == 0);
+
+	/* Open connections to devices and cache the handles */
+	io_result = io_dev_open(fip_dev_con, (uintptr_t)&fip_device,
+				&fip_dev_handle);
+	assert(io_result == 0);
+
+#ifdef CONFIG_DDR_FIP_IMAGE
+	/* Open connection to DDR FIP image if available */
+	io_result = ddr_fip_setup(fip_dev_con, boot_dev);
+
+	assert(io_result == 0);
+#endif
+
+#ifdef POLICY_FUSE_PROVISION
+	/* Open connection to FUSE FIP image if available */
+	io_result = fuse_fip_setup(fip_dev_con, boot_dev);
+
+	assert(io_result == 0);
+#endif
+
+	return io_result;
+}
+
+int ls_qspi_io_setup(void)
+{
+#ifdef QSPI_BOOT
+	qspi_io_setup(NXP_QSPI_FLASH_ADDR,
+			NXP_QSPI_FLASH_SIZE,
+			PLAT_FIP_OFFSET);
+	return plat_io_memmap_setup(NXP_QSPI_FLASH_ADDR + PLAT_FIP_OFFSET);
+#else
+	ERROR("QSPI driver not present. Check your BUILD\n");
+
+	/* Should never reach here */
+	assert(false);
+	return -1;
+#endif
+}
+
+int emmc_sdhc2_io_setup(void)
+{
+#if defined(EMMC_BOOT) && defined(NXP_ESDHC2_ADDR)
+	uintptr_t block_dev_spec;
+	int ret;
+
+	ret = sd_emmc_init(&block_dev_spec,
+			NXP_ESDHC2_ADDR,
+			NXP_SD_BLOCK_BUF_ADDR,
+			NXP_SD_BLOCK_BUF_SIZE,
+			false);
+	if (ret != 0) {
+		return ret;
+	}
+
+	return plat_io_block_setup(PLAT_FIP_OFFSET, block_dev_spec);
+#else
+	ERROR("EMMC driver not present. Check your BUILD\n");
+
+	/* Should never reach here */
+	assert(false);
+	return -1;
+#endif
+}
+
+int emmc_io_setup(void)
+{
+/* On the platforms which only has one ESDHC controller,
+ * eMMC-boot will use the first ESDHC controller.
+ */
+#if defined(SD_BOOT) || defined(EMMC_BOOT)
+	uintptr_t block_dev_spec;
+	int ret;
+
+	ret = sd_emmc_init(&block_dev_spec,
+			NXP_ESDHC_ADDR,
+			NXP_SD_BLOCK_BUF_ADDR,
+			NXP_SD_BLOCK_BUF_SIZE,
+			true);
+	if (ret != 0) {
+		return ret;
+	}
+
+	return plat_io_block_setup(PLAT_FIP_OFFSET, block_dev_spec);
+#else
+	ERROR("SD driver not present. Check your BUILD\n");
+
+	/* Should never reach here */
+	assert(false);
+	return -1;
+#endif
+}
+
+int ifc_nor_io_setup(void)
+{
+	ERROR("NOR driver not present. Check your BUILD\n");
+
+	/* Should never reach here */
+	assert(false);
+	return -1;
+}
+
+int ifc_nand_io_setup(void)
+{
+	ERROR("NAND driver not present. Check your BUILD\n");
+
+	/* Should never reach here */
+	assert(false);
+	return -1;
+}
+
+int ls_flexspi_nor_io_setup(void)
+{
+#ifdef FLEXSPI_NOR_BOOT
+	int ret = 0;
+
+	ret = flexspi_nor_io_setup(NXP_FLEXSPI_FLASH_ADDR,
+				   NXP_FLEXSPI_FLASH_SIZE,
+				   NXP_FLEXSPI_ADDR);
+
+	if (ret != 0) {
+		ERROR("FlexSPI NOR driver initialization error.\n");
+		/* Should never reach here */
+		assert(0);
+		panic();
+		return -1;
+	}
+
+	return plat_io_memmap_setup(NXP_FLEXSPI_FLASH_ADDR + PLAT_FIP_OFFSET);
+#else
+	ERROR("FlexSPI NOR driver not present. Check your BUILD\n");
+
+	/* Should never reach here */
+	assert(false);
+	return -1;
+#endif
+}
+
+static int (* const ls_io_setup_table[])(void) = {
+	[BOOT_DEVICE_IFC_NOR] = ifc_nor_io_setup,
+	[BOOT_DEVICE_IFC_NAND] = ifc_nand_io_setup,
+	[BOOT_DEVICE_QSPI] = ls_qspi_io_setup,
+	[BOOT_DEVICE_EMMC] = emmc_io_setup,
+	[BOOT_DEVICE_SDHC2_EMMC] = emmc_sdhc2_io_setup,
+	[BOOT_DEVICE_FLEXSPI_NOR] = ls_flexspi_nor_io_setup,
+	[BOOT_DEVICE_FLEXSPI_NAND] = ls_flexspi_nor_io_setup,
+};
+
+
+int plat_io_setup(void)
+{
+	int (*io_setup)(void);
+	unsigned int boot_dev = BOOT_DEVICE_NONE;
+	int ret;
+
+	boot_dev = get_boot_dev();
+	if (boot_dev == BOOT_DEVICE_NONE) {
+		ERROR("Boot Device detection failed, Check RCW_SRC\n");
+		return -EINVAL;
+	}
+
+	io_setup = ls_io_setup_table[boot_dev];
+	ret = io_setup();
+	if (ret != 0) {
+		return ret;
+	}
+
+	ret = ls_io_fip_setup(boot_dev);
+	if (ret != 0) {
+		return ret;
+	}
+
+	return 0;
+}
+
+
+/* Return an IO device handle and specification which can be used to access
+ * an image. Use this to enforce platform load policy
+ */
+int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
+			  uintptr_t *image_spec)
+{
+	int result = -1;
+	const struct plat_io_policy *policy;
+
+	if (image_id < ARRAY_SIZE(policies)) {
+
+		policy = &policies[image_id];
+		result = policy->check(policy->image_spec);
+		if (result == 0) {
+			*image_spec = policy->image_spec;
+			*dev_handle = *(policy->dev_handle);
+		}
+	}
+#ifdef CONFIG_DDR_FIP_IMAGE
+	else {
+		VERBOSE("Trying alternative IO\n");
+		result = plat_get_ddr_fip_image_source(image_id, dev_handle,
+						image_spec, open_backend);
+	}
+#endif
+#ifdef POLICY_FUSE_PROVISION
+	if (result != 0) {
+		VERBOSE("Trying FUSE IO\n");
+		result = plat_get_fuse_image_source(image_id, dev_handle,
+						image_spec, open_backend);
+	}
+#endif
+
+	return result;
+}
diff --git a/plat/nxp/common/setup/ls_stack_protector.c b/plat/nxp/common/setup/ls_stack_protector.c
new file mode 100644
index 0000000..ab78f88
--- /dev/null
+++ b/plat/nxp/common/setup/ls_stack_protector.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <stdint.h>
+
+#include <arch_helpers.h>
+
+#include <plat/common/platform.h>
+
+#define RANDOM_CANARY_VALUE ((u_register_t) 3288484550995823360ULL)
+
+u_register_t plat_get_stack_protector_canary(void)
+{
+	/*
+	 * TBD: Generate Random Number from NXP CAAM Block.
+	 */
+	return RANDOM_CANARY_VALUE ^ read_cntpct_el0();
+}
diff --git a/plat/nxp/common/sip_svc/aarch64/sipsvc.S b/plat/nxp/common/sip_svc/aarch64/sipsvc.S
new file mode 100644
index 0000000..6a47cbf
--- /dev/null
+++ b/plat/nxp/common/sip_svc/aarch64/sipsvc.S
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <asm_macros.S>
+#include <bl31_data.h>
+
+.global el2_2_aarch32
+.global prefetch_disable
+
+#define  SPSR_EL3_M4     0x10
+#define  SPSR_EL_MASK    0xC
+#define  SPSR_EL2        0x8
+#define  SCR_EL3_4_EL2_AARCH32  0x131
+#define  SPSR32_EL2_LE          0x1DA
+
+#define  MIDR_PARTNUM_START      4
+#define  MIDR_PARTNUM_WIDTH      12
+#define  MIDR_PARTNUM_A53        0xD03
+#define  MIDR_PARTNUM_A57        0xD07
+#define  MIDR_PARTNUM_A72        0xD08
+
+/*
+ * uint64_t el2_2_aarch32(u_register_t smc_id,
+ *                   u_register_t start_addr,
+ *                   u_register_t parm1,
+ *                   u_register_t parm2)
+ * this function allows changing the execution width of EL2 from Aarch64
+ * to Aarch32
+ * Note: MUST be called from EL2 @ Aarch64
+ * in:  x0 = smc function id
+ *      x1 = start address for EL2 @ Aarch32
+ *      x2 = first parameter to pass to EL2 @ Aarch32
+ *      x3 = second parameter to pass to EL2 @ Aarch32
+ * out: x0 = 0,  on success
+ *      x0 = -1, on failure
+ * uses x0, x1, x2, x3
+ */
+func el2_2_aarch32
+
+	/* check that caller is EL2 @ Aarch64 - err return if not */
+	mrs  x0, spsr_el3
+	/* see if we were called from Aarch32 */
+	tst  x0, #SPSR_EL3_M4
+	b.ne 2f
+
+	/* see if we were called from EL2 */
+	and   x0, x0, SPSR_EL_MASK
+	cmp   x0, SPSR_EL2
+	b.ne  2f
+
+	/* set ELR_EL3 */
+	msr  elr_el3, x1
+
+	/* set scr_el3 */
+	mov  x0, #SCR_EL3_4_EL2_AARCH32
+	msr  scr_el3, x0
+
+	/* set sctlr_el2 */
+	ldr   x1, =SCTLR_EL2_RES1
+	msr  sctlr_el2, x1
+
+	/* set spsr_el3 */
+	ldr  x0, =SPSR32_EL2_LE
+	msr  spsr_el3, x0
+
+	/* x2 = parm 1
+	 * x3 = parm2
+	 */
+
+	/* set the parameters to be passed-thru to EL2 @ Aarch32 */
+	mov  x1, x2
+	mov  x2, x3
+
+	/* x1 = parm 1
+	 * x2 = parm2
+	 */
+
+	mov  x0, xzr
+	/* invalidate the icache */
+	ic iallu
+	dsb sy
+	isb
+	b  1f
+2:
+	/* error return */
+	mvn  x0, xzr
+	ret
+1:
+	eret
+endfunc el2_2_aarch32
+
+/*
+ * int prefetch_disable(u_register_t smc_id, u_register_t mask)
+ * this function marks cores which need to have the prefetch disabled -
+ * secondary cores have prefetch disabled when they are released from reset -
+ * the bootcore has prefetch disabled when this call is made
+ * in:  x0 = function id
+ *      x1 = core mask, where bit[0]=core0, bit[1]=core1, etc
+ *           if a bit in the mask is set, then prefetch is disabled for that
+ *           core
+ * out: x0 = SMC_SUCCESS
+ */
+func prefetch_disable
+	stp  x4, x30, [sp, #-16]!
+
+	mov   x3, x1
+
+	/* x1 = core prefetch disable mask */
+	/* x3 = core prefetch disable mask */
+
+	/* store the mask */
+	mov   x0, #PREFETCH_DIS_OFFSET
+	bl   _set_global_data
+
+	/* x3 = core prefetch disable mask */
+
+	/* see if we need to disable prefetch on THIS core */
+	bl   plat_my_core_mask
+
+	/* x0 = core mask lsb */
+	/* x3 = core prefetch disable mask */
+
+	tst   x3, x0
+	b.eq  1f
+
+	/* read midr_el1 */
+	mrs   x1, midr_el1
+
+	/* x1 = midr_el1 */
+
+	mov   x0, xzr
+	bfxil x0, x1, #MIDR_PARTNUM_START, #MIDR_PARTNUM_WIDTH
+
+	/* x0 = part number (a53, a57, a72, etc) */
+
+	/* branch on cpu-specific */
+	cmp   x0, #MIDR_PARTNUM_A57
+	b.eq  1f
+	cmp   x0, #MIDR_PARTNUM_A72
+	b.ne  1f
+
+	bl    _disable_ldstr_pfetch_A72
+	b     1f
+1:
+	ldp   x4, x30, [sp], #16
+	mov   x0, xzr
+	ret
+endfunc prefetch_disable
diff --git a/plat/nxp/common/sip_svc/include/sipsvc.h b/plat/nxp/common/sip_svc/include/sipsvc.h
new file mode 100644
index 0000000..d9e61e9
--- /dev/null
+++ b/plat/nxp/common/sip_svc/include/sipsvc.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef SIPSVC_H
+#define SIPSVC_H
+
+#include <stdint.h>
+
+#define SMC_FUNC_MASK			0x0000ffff
+#define SMC32_PARAM_MASK		0xffffffff
+
+/* SMC function IDs for SiP Service queries */
+#define SIP_SVC_CALL_COUNT		0xff00
+#define SIP_SVC_UID			0xff01
+#define SIP_SVC_VERSION			0xff03
+#define SIP_SVC_PRNG			0xff10
+#define SIP_SVC_RNG			0xff11
+#define SIP_SVC_MEM_BANK		0xff12
+#define SIP_SVC_PREFETCH_DIS		0xff13
+#define SIP_SVC_HUK			0xff14
+#define SIP_SVC_ALLOW_L1L2_ERR		0xff15
+#define SIP_SVC_ALLOW_L2_CLR		0xff16
+#define SIP_SVC_2_AARCH32		0xff17
+#define SIP_SVC_PORSR1			0xff18
+
+/* Layerscape SiP Service Calls version numbers */
+#define LS_SIP_SVC_VERSION_MAJOR	0x0
+#define LS_SIP_SVC_VERSION_MINOR	0x1
+
+/* Number of Layerscape SiP Calls implemented */
+#define LS_COMMON_SIP_NUM_CALLS		10
+
+/* Parameter Type Constants */
+#define SIP_PARAM_TYPE_NONE		0x0
+#define SIP_PARAM_TYPE_VALUE_INPUT	0x1
+#define SIP_PARAM_TYPE_VALUE_OUTPUT	0x2
+#define SIP_PARAM_TYPE_VALUE_INOUT	0x3
+#define SIP_PARAM_TYPE_MEMREF_INPUT	0x5
+#define SIP_PARAM_TYPE_MEMREF_OUTPUT	0x6
+#define SIP_PARAM_TYPE_MEMREF_INOUT	0x7
+
+#define SIP_PARAM_TYPE_MASK		0xF
+
+/*
+ * The macro SIP_PARAM_TYPES can be used to construct a value that you can
+ * compare against an incoming paramTypes to check the type of all the
+ * parameters in one comparison.
+ */
+#define SIP_PARAM_TYPES(t0, t1, t2, t3) \
+		((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12))
+
+/*
+ * The macro SIP_PARAM_TYPE_GET can be used to extract the type of a given
+ * parameter from paramTypes if you need more fine-grained type checking.
+ */
+#define SIP_PARAM_TYPE_GET(t, i)	((((uint32_t)(t)) >> ((i) * 4)) & 0xF)
+
+/*
+ * The macro SIP_PARAM_TYPE_SET can be used to load the type of a given
+ * parameter from paramTypes without specifying all types (SIP_PARAM_TYPES)
+ */
+#define SIP_PARAM_TYPE_SET(t, i)	(((uint32_t)(t) & 0xF) << ((i) * 4))
+
+#define SIP_SVC_RNG_PARAMS		(SIP_PARAM_TYPE_VALUE_INPUT, \
+					 SIP_PARAM_TYPE_MEMREF_OUTPUT, \
+					 SIP_PARAM_TYPE_NONE, \
+					 SIP_PARAM_TYPE_NONE)
+
+/* Layerscape SiP Calls error code */
+enum {
+	LS_SIP_SUCCESS = 0,
+	LS_SIP_INVALID_PARAM = -1,
+	LS_SIP_NOT_SUPPORTED = -2,
+};
+
+#endif /* SIPSVC_H */
diff --git a/plat/nxp/common/sip_svc/sip_svc.c b/plat/nxp/common/sip_svc/sip_svc.c
new file mode 100644
index 0000000..1c8668e
--- /dev/null
+++ b/plat/nxp/common/sip_svc/sip_svc.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2018-2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <caam.h>
+#include <common/runtime_svc.h>
+#include <dcfg.h>
+#include <lib/mmio.h>
+#include <tools_share/uuid.h>
+
+#include <plat_common.h>
+#include <sipsvc.h>
+
+/* Layerscape SiP Service UUID */
+DEFINE_SVC_UUID2(nxp_sip_svc_uid,
+		 0x871de4ef, 0xedfc, 0x4209, 0xa4, 0x23,
+		 0x8d, 0x23, 0x75, 0x9d, 0x3b, 0x9f);
+
+#pragma weak nxp_plat_sip_handler
+static uintptr_t nxp_plat_sip_handler(unsigned int smc_fid,
+				      u_register_t x1,
+				      u_register_t x2,
+				      u_register_t x3,
+				      u_register_t x4,
+				      void *cookie,
+				      void *handle,
+				      u_register_t flags)
+{
+	ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
+	SMC_RET1(handle, SMC_UNK);
+}
+
+uint64_t el2_2_aarch32(u_register_t smc_id, u_register_t start_addr,
+		       u_register_t parm1, u_register_t parm2);
+
+uint64_t prefetch_disable(u_register_t smc_id, u_register_t mask);
+uint64_t bl31_get_porsr1(void);
+
+static void clean_top_32b_of_param(uint32_t smc_fid,
+				   u_register_t *px1,
+				   u_register_t *px2,
+				   u_register_t *px3,
+				   u_register_t *px4)
+{
+	/* if parameters from SMC32. Clean top 32 bits */
+	if (GET_SMC_CC(smc_fid) == SMC_32) {
+		*px1 = *px1 & SMC32_PARAM_MASK;
+		*px2 = *px2 & SMC32_PARAM_MASK;
+		*px3 = *px3 & SMC32_PARAM_MASK;
+		*px4 = *px4 & SMC32_PARAM_MASK;
+	}
+}
+
+/* This function handles Layerscape defined SiP Calls */
+static uintptr_t nxp_sip_handler(unsigned int smc_fid,
+				 u_register_t x1,
+				 u_register_t x2,
+				 u_register_t x3,
+				 u_register_t x4,
+				 void *cookie,
+				 void *handle,
+				 u_register_t flags)
+{
+	uint32_t ns;
+	uint64_t ret;
+	dram_regions_info_t *info_dram_regions;
+
+	/* if parameter is sent from SMC32. Clean top 32 bits */
+	clean_top_32b_of_param(smc_fid, &x1, &x2, &x3, &x4);
+
+	/* Determine which security state this SMC originated from */
+	ns = is_caller_non_secure(flags);
+	if (ns == 0) {
+		/* SiP SMC service secure world's call */
+		;
+	} else {
+		/* SiP SMC service normal world's call */
+		;
+	}
+
+	switch (smc_fid & SMC_FUNC_MASK) {
+	case SIP_SVC_RNG:
+		if (is_sec_enabled() == false) {
+			NOTICE("SEC is disabled.\n");
+			SMC_RET1(handle, SMC_UNK);
+		}
+
+		/* Return zero on failure */
+		ret = get_random((int)x1);
+		if (ret != 0) {
+			SMC_RET2(handle, SMC_OK, ret);
+		} else {
+			SMC_RET1(handle, SMC_UNK);
+		}
+		/* break is not required as SMC_RETx return */
+	case SIP_SVC_HUK:
+		if (is_sec_enabled() == false) {
+			NOTICE("SEC is disabled.\n");
+			SMC_RET1(handle, SMC_UNK);
+		}
+		ret = get_hw_unq_key_blob_hw((uint8_t *) x1, (uint32_t) x2);
+
+		if (ret == SMC_OK) {
+			SMC_RET1(handle, SMC_OK);
+		} else {
+			SMC_RET1(handle, SMC_UNK);
+		}
+		/* break is not required as SMC_RETx return */
+	case SIP_SVC_MEM_BANK:
+		VERBOSE("Handling SMC SIP_SVC_MEM_BANK.\n");
+		info_dram_regions = get_dram_regions_info();
+
+		if (x1 == -1) {
+			SMC_RET2(handle, SMC_OK,
+					info_dram_regions->total_dram_size);
+		} else if (x1 >= info_dram_regions->num_dram_regions) {
+			SMC_RET1(handle, SMC_UNK);
+		} else {
+			SMC_RET3(handle, SMC_OK,
+				info_dram_regions->region[x1].addr,
+				info_dram_regions->region[x1].size);
+		}
+		/* break is not required as SMC_RETx return */
+	case SIP_SVC_PREFETCH_DIS:
+		VERBOSE("In SIP_SVC_PREFETCH_DIS call\n");
+		ret = prefetch_disable(smc_fid, x1);
+		if (ret == SMC_OK) {
+			SMC_RET1(handle, SMC_OK);
+		} else {
+			SMC_RET1(handle, SMC_UNK);
+		}
+		/* break is not required as SMC_RETx return */
+	case SIP_SVC_2_AARCH32:
+		ret = el2_2_aarch32(smc_fid, x1, x2, x3);
+
+		/* In success case, control should not reach here. */
+		NOTICE("SMC: SIP_SVC_2_AARCH32 Failed.\n");
+		SMC_RET1(handle, SMC_UNK);
+		/* break is not required as SMC_RETx return */
+	case SIP_SVC_PORSR1:
+		ret = bl31_get_porsr1();
+		SMC_RET2(handle, SMC_OK, ret);
+		/* break is not required as SMC_RETx return */
+	default:
+		return nxp_plat_sip_handler(smc_fid, x1, x2, x3, x4,
+				cookie, handle, flags);
+	}
+}
+
+/* This function is responsible for handling all SiP calls */
+static uintptr_t sip_smc_handler(unsigned int smc_fid,
+				 u_register_t x1,
+				 u_register_t x2,
+				 u_register_t x3,
+				 u_register_t x4,
+				 void *cookie,
+				 void *handle,
+				 u_register_t flags)
+{
+	switch (smc_fid & SMC_FUNC_MASK) {
+	case SIP_SVC_CALL_COUNT:
+		/* Return the number of Layerscape SiP Service Calls. */
+		SMC_RET1(handle, LS_COMMON_SIP_NUM_CALLS);
+		break;
+	case SIP_SVC_UID:
+		/* Return UID to the caller */
+		SMC_UUID_RET(handle, nxp_sip_svc_uid);
+		break;
+	case SIP_SVC_VERSION:
+		/* Return the version of current implementation */
+		SMC_RET2(handle, LS_SIP_SVC_VERSION_MAJOR,
+			 LS_SIP_SVC_VERSION_MINOR);
+		break;
+	default:
+		return nxp_sip_handler(smc_fid, x1, x2, x3, x4,
+				       cookie, handle, flags);
+	}
+}
+
+/* Define a runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+	nxp_sip_svc,
+	OEN_SIP_START,
+	OEN_SIP_END,
+	SMC_TYPE_FAST,
+	NULL,
+	sip_smc_handler
+);
diff --git a/plat/nxp/common/sip_svc/sipsvc.mk b/plat/nxp/common/sip_svc/sipsvc.mk
new file mode 100644
index 0000000..c3a57de
--- /dev/null
+++ b/plat/nxp/common/sip_svc/sipsvc.mk
@@ -0,0 +1,35 @@
+#
+# Copyright 2018-2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+#------------------------------------------------------------------------------
+#
+# Select the SIP SVC files
+#
+# -----------------------------------------------------------------------------
+
+ifeq (${ADD_SIPSVC},)
+
+ADD_SIPSVC		:= 1
+
+PLAT_SIPSVC_PATH	:= $(PLAT_COMMON_PATH)/sip_svc
+
+SIPSVC_SOURCES		:= ${PLAT_SIPSVC_PATH}/sip_svc.c \
+			   ${PLAT_SIPSVC_PATH}/$(ARCH)/sipsvc.S
+
+PLAT_INCLUDES		+=	-I${PLAT_SIPSVC_PATH}/include
+
+ifeq (${BL_COMM_SIPSVC_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${SIPSVC_SOURCES}
+else
+ifeq (${BL2_SIPSVC_NEEDED},yes)
+BL2_SOURCES		+= ${SIPSVC_SOURCES}
+endif
+ifeq (${BL31_SIPSVC_NEEDED},yes)
+BL31_SOURCES		+= ${SIPSVC_SOURCES}
+endif
+endif
+endif
+# -----------------------------------------------------------------------------
diff --git a/plat/nxp/common/tbbr/csf_tbbr.c b/plat/nxp/common/tbbr/csf_tbbr.c
new file mode 100644
index 0000000..8f38f3e
--- /dev/null
+++ b/plat/nxp/common/tbbr/csf_tbbr.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2018-2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ *
+ */
+
+#include <errno.h>
+
+#include <common/debug.h>
+#include <csf_hdr.h>
+#include <dcfg.h>
+#include <drivers/auth/crypto_mod.h>
+#include <snvs.h>
+
+#include <plat/common/platform.h>
+#include "plat_common.h"
+
+extern bool rotpk_not_dpld;
+extern uint8_t rotpk_hash_table[MAX_KEY_ENTRIES][SHA256_BYTES];
+extern uint32_t num_rotpk_hash_entries;
+
+/*
+ * In case of secure boot, return ptr of rotpk_hash table in key_ptr and
+ * number of hashes in key_len
+ */
+int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len,
+			unsigned int *flags)
+{
+	uint32_t mode = 0U;
+	*flags = ROTPK_NOT_DEPLOYED;
+
+	/* ROTPK hash table must be available for secure boot */
+	if (rotpk_not_dpld == true) {
+		if (check_boot_mode_secure(&mode) == true) {
+			/* Production mode, don;t continue further */
+			if (mode == 1U) {
+				return -EAUTH;
+			}
+
+			/* For development mode, rotpk flag false
+			 * indicates that SRK hash comparison might
+			 * have failed. This is not fatal error.
+			 * Continue in this case but transition SNVS
+			 * to non-secure state
+			 */
+			transition_snvs_non_secure();
+			return 0;
+		} else {
+			return 0;
+		}
+	}
+
+	/*
+	 * We return the complete hash table and number of entries in
+	 * table for NXP platform specific implementation.
+	 * Here hash is always assume as SHA-256
+	 */
+	*key_ptr = rotpk_hash_table;
+	*key_len = num_rotpk_hash_entries;
+	*flags = ROTPK_IS_HASH;
+
+	return 0;
+}
+
+int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr)
+{
+	/*
+	 * No support for non-volatile counter. Update the ROT key to protect
+	 * the system against rollback.
+	 */
+	*nv_ctr = 0U;
+
+	return 0;
+}
+
+int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr)
+{
+	return 0;
+}
diff --git a/plat/nxp/common/tbbr/nxp_rotpk.S b/plat/nxp/common/tbbr/nxp_rotpk.S
new file mode 100644
index 0000000..8e084d1
--- /dev/null
+++ b/plat/nxp/common/tbbr/nxp_rotpk.S
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ *
+ */
+
+#ifndef _CSF_HDR_H_
+
+	.global nxp_rotpk_hash
+	.global nxp_rotpk_hash_end
+	.section .rodata.nxp_rotpk_hash, "a"
+nxp_rotpk_hash:
+	/* DER header */
+	.byte 0x30, 0x31, 0x30, 0x0D, 0x06, 0x09, 0x60, 0x86, 0x48
+	.byte 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20
+	/* SHA256 */
+	.incbin ROTPK_HASH
+nxp_rotpk_hash_end:
+#endif
diff --git a/plat/nxp/common/tbbr/tbbr.mk b/plat/nxp/common/tbbr/tbbr.mk
new file mode 100644
index 0000000..25852ba
--- /dev/null
+++ b/plat/nxp/common/tbbr/tbbr.mk
@@ -0,0 +1,155 @@
+#
+# Copyright 2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# For TRUSTED_BOARD_BOOT platforms need to include this makefile
+# Following definations are to be provided by platform.mk file or
+# by user - BL33_INPUT_FILE, BL32_INPUT_FILE, BL31_INPUT_FILE
+
+ifeq ($(CHASSIS), 2)
+include $(PLAT_DRIVERS_PATH)/csu/csu.mk
+CSF_FILE		:=	input_blx_ch${CHASSIS}
+BL2_CSF_FILE		:=	input_bl2_ch${CHASSIS}
+else
+ifeq ($(CHASSIS), 3_2)
+CSF_FILE		:=	input_blx_ch3
+BL2_CSF_FILE		:=	input_bl2_ch${CHASSIS}
+PBI_CSF_FILE		:=	input_pbi_ch${CHASSIS}
+$(eval $(call add_define, CSF_HDR_CH3))
+else
+    $(error -> CHASSIS not set!)
+endif
+endif
+
+PLAT_AUTH_PATH		:=  $(PLAT_DRIVERS_PATH)/auth
+
+
+ifeq (${BL2_INPUT_FILE},)
+    BL2_INPUT_FILE	:= $(PLAT_AUTH_PATH)/csf_hdr_parser/${BL2_CSF_FILE}
+endif
+
+ifeq (${PBI_INPUT_FILE},)
+    PBI_INPUT_FILE	:= $(PLAT_AUTH_PATH)/csf_hdr_parser/${PBI_CSF_FILE}
+endif
+
+# If MBEDTLS_DIR is not specified, use CSF Header option
+ifeq (${MBEDTLS_DIR},)
+    # Generic image processing filters to prepend CSF header
+    ifeq (${BL33_INPUT_FILE},)
+    BL33_INPUT_FILE	:= $(PLAT_AUTH_PATH)/csf_hdr_parser/${CSF_FILE}
+    endif
+
+    ifeq (${BL31_INPUT_FILE},)
+    BL31_INPUT_FILE	:= $(PLAT_AUTH_PATH)/csf_hdr_parser/${CSF_FILE}
+    endif
+
+    ifeq (${BL32_INPUT_FILE},)
+    BL32_INPUT_FILE	:= $(PLAT_AUTH_PATH)/csf_hdr_parser/${CSF_FILE}
+    endif
+
+    ifeq (${FUSE_INPUT_FILE},)
+    FUSE_INPUT_FILE	:= $(PLAT_AUTH_PATH)/csf_hdr_parser/${CSF_FILE}
+    endif
+
+    PLAT_INCLUDES	+= -I$(PLAT_DRIVERS_PATH)/sfp
+    PLAT_TBBR_SOURCES	+= $(PLAT_AUTH_PATH)/csf_hdr_parser/cot.c	\
+			   $(PLAT_COMMON_PATH)/tbbr/csf_tbbr.c
+    # IMG PARSER here is CSF header parser
+    include $(PLAT_DRIVERS_PATH)/auth/csf_hdr_parser/csf_hdr.mk
+    PLAT_TBBR_SOURCES 	+=	$(CSF_HDR_SOURCES)
+
+    SCP_BL2_PRE_TOOL_FILTER	:= CST_SCP_BL2
+    BL31_PRE_TOOL_FILTER	:= CST_BL31
+    BL32_PRE_TOOL_FILTER	:= CST_BL32
+    BL33_PRE_TOOL_FILTER	:= CST_BL33
+else
+
+    ifeq (${DISABLE_FUSE_WRITE}, 1)
+        $(eval $(call add_define,DISABLE_FUSE_WRITE))
+    endif
+
+    # For Mbedtls currently crypto is not supported via CAAM
+    # enable it when that support is there
+    CAAM_INTEG		:= 0
+    KEY_ALG		:= rsa
+    KEY_SIZE		:= 2048
+
+    $(eval $(call add_define,MBEDTLS_X509))
+    ifeq (${PLAT_DDR_PHY},PHY_GEN2)
+        $(eval $(call add_define,PLAT_DEF_OID))
+    endif
+    include drivers/auth/mbedtls/mbedtls_x509.mk
+
+
+    PLAT_TBBR_SOURCES	+= $(PLAT_AUTH_PATH)/tbbr/tbbr_cot.c \
+			   $(PLAT_COMMON_PATH)/tbbr/nxp_rotpk.S \
+			   $(PLAT_COMMON_PATH)/tbbr/x509_tbbr.c
+
+    #ROTPK key is embedded in BL2 image
+    ifeq (${ROT_KEY},)
+	ROT_KEY		= $(BUILD_PLAT)/rot_key.pem
+    endif
+
+    ifeq (${SAVE_KEYS},1)
+
+        ifeq (${TRUSTED_WORLD_KEY},)
+            TRUSTED_WORLD_KEY = ${BUILD_PLAT}/trusted.pem
+        endif
+
+        ifeq (${NON_TRUSTED_WORLD_KEY},)
+            NON_TRUSTED_WORLD_KEY = ${BUILD_PLAT}/non-trusted.pem
+        endif
+
+        ifeq (${BL31_KEY},)
+            BL31_KEY = ${BUILD_PLAT}/soc.pem
+        endif
+
+        ifeq (${BL32_KEY},)
+            BL32_KEY = ${BUILD_PLAT}/trusted_os.pem
+        endif
+
+        ifeq (${BL33_KEY},)
+            BL33_KEY = ${BUILD_PLAT}/non-trusted_os.pem
+        endif
+
+    endif
+
+    ROTPK_HASH		= $(BUILD_PLAT)/rotpk_sha256.bin
+
+    $(eval $(call add_define_val,ROTPK_HASH,'"$(ROTPK_HASH)"'))
+
+    $(BUILD_PLAT)/bl2/nxp_rotpk.o: $(ROTPK_HASH)
+
+    certificates: $(ROT_KEY)
+    $(ROT_KEY): | $(BUILD_PLAT)
+	@echo "  OPENSSL $@"
+	@if [ ! -f $(ROT_KEY) ]; then \
+		openssl genrsa 2048 > $@ 2>/dev/null; \
+	fi
+
+    $(ROTPK_HASH): $(ROT_KEY)
+	@echo "  OPENSSL $@"
+	$(Q)openssl rsa -in $< -pubout -outform DER 2>/dev/null |\
+	openssl dgst -sha256 -binary > $@ 2>/dev/null
+
+endif #MBEDTLS_DIR
+
+PLAT_INCLUDES		+=	-Iinclude/common/tbbr
+
+# Generic files for authentication framework
+TBBR_SOURCES		+=	drivers/auth/auth_mod.c		\
+				drivers/auth/crypto_mod.c	\
+				drivers/auth/img_parser_mod.c	\
+				plat/common/tbbr/plat_tbbr.c	\
+				${PLAT_TBBR_SOURCES}
+
+# If CAAM_INTEG is not defined (would be scenario with MBED TLS)
+# include mbedtls_crypto
+ifeq (${CAAM_INTEG},0)
+    include drivers/auth/mbedtls/mbedtls_crypto.mk
+else
+    include $(PLAT_DRIVERS_PATH)/crypto/caam/src/auth/auth.mk
+    TBBR_SOURCES	+= ${AUTH_SOURCES}
+endif
diff --git a/plat/nxp/common/tbbr/x509_tbbr.c b/plat/nxp/common/tbbr/x509_tbbr.c
new file mode 100644
index 0000000..ec87674
--- /dev/null
+++ b/plat/nxp/common/tbbr/x509_tbbr.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2018-2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <lib/cassert.h>
+#include <sfp.h>
+#include <tools_share/tbbr_oid.h>
+
+#include <plat/common/platform.h>
+#include "plat_common.h"
+
+extern char nxp_rotpk_hash[], nxp_rotpk_hash_end[];
+
+int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len,
+			unsigned int *flags)
+{
+	*key_ptr = nxp_rotpk_hash;
+	*key_len = nxp_rotpk_hash_end - nxp_rotpk_hash;
+	*flags = ROTPK_IS_HASH;
+
+	return 0;
+}
+
+int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr)
+{
+	const char *oid;
+	uint32_t uid_num;
+	uint32_t val = 0U;
+
+	assert(cookie != NULL);
+	assert(nv_ctr != NULL);
+
+	oid = (const char *)cookie;
+	if (strcmp(oid, TRUSTED_FW_NVCOUNTER_OID) == 0) {
+		uid_num = 3U;
+	} else if (strcmp(oid, NON_TRUSTED_FW_NVCOUNTER_OID) == 0) {
+		uid_num = 4U;
+	} else {
+		return 1;
+	}
+
+	val = sfp_read_oem_uid(uid_num);
+
+	INFO("SFP Value read is %x from UID %d\n", val, uid_num);
+	if (val == 0U) {
+		*nv_ctr = 0U;
+	} else {
+		*nv_ctr = (32U - __builtin_clz(val));
+	}
+
+	INFO("NV Counter value for UID %d is %d\n", uid_num, *nv_ctr);
+	return 0;
+
+}
+
+int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr)
+{
+	const char *oid;
+	uint32_t uid_num, sfp_val;
+
+	assert(cookie != NULL);
+
+	/* Counter values upto 32 are supported */
+	if (nv_ctr > 32U) {
+		return 1;
+	}
+
+	oid = (const char *)cookie;
+	if (strcmp(oid, TRUSTED_FW_NVCOUNTER_OID) == 0) {
+		uid_num = 3U;
+	} else if (strcmp(oid, NON_TRUSTED_FW_NVCOUNTER_OID) == 0) {
+		uid_num = 4U;
+	} else {
+		return 1;
+	}
+	sfp_val = (1U << (nv_ctr - 1));
+
+	if (sfp_write_oem_uid(uid_num, sfp_val) == 1) {
+		/* Enable POVDD on board */
+		if (board_enable_povdd()) {
+			sfp_program_fuses();
+		}
+
+		/* Disable POVDD on board */
+		board_disable_povdd();
+	} else {
+		ERROR("Invalid OEM UID sent.\n");
+		return 1;
+	}
+
+	return 0;
+}
+
+int plat_get_mbedtls_heap(void **heap_addr, size_t *heap_size)
+{
+	return get_mbedtls_heap_helper(heap_addr, heap_size);
+}
diff --git a/plat/nxp/common/warm_reset/plat_warm_reset.c b/plat/nxp/common/warm_reset/plat_warm_reset.c
new file mode 100644
index 0000000..966a73c
--- /dev/null
+++ b/plat/nxp/common/warm_reset/plat_warm_reset.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <errno.h>
+
+#include <common/debug.h>
+#include <ddr.h>
+#ifndef NXP_COINED_BB
+#include <flash_info.h>
+#include <fspi.h>
+#include <fspi_api.h>
+#endif
+#include <lib/mmio.h>
+#include <lib/psci/psci.h>
+#ifdef NXP_COINED_BB
+#include <snvs.h>
+#endif
+
+#include <plat_nv_storage.h>
+#include "plat_warm_rst.h"
+#include "platform_def.h"
+
+#if defined(IMAGE_BL2)
+
+uint32_t is_warm_boot(void)
+{
+	uint32_t ret = mmio_read_32(NXP_RESET_ADDR + RST_RSTRQSR1_OFFSET)
+				& ~(RSTRQSR1_SWRR);
+
+	const nv_app_data_t *nv_app_data = get_nv_data();
+
+	if (ret == 0U) {
+		INFO("Not a SW(Warm) triggered reset.\n");
+		return 0U;
+	}
+
+	ret = (nv_app_data->warm_rst_flag == WARM_BOOT_SUCCESS) ? 1 : 0;
+
+	if (ret != 0U) {
+		INFO("Warm Reset was triggered..\n");
+	} else {
+		INFO("Warm Reset was not triggered..\n");
+	}
+
+	return ret;
+}
+
+#endif
+
+#if defined(IMAGE_BL31)
+int prep_n_execute_warm_reset(void)
+{
+#ifdef NXP_COINED_BB
+#if !TRUSTED_BOARD_BOOT
+	snvs_disable_zeroize_lp_gpr();
+#endif
+#else
+	int ret;
+	uint8_t warm_reset = WARM_BOOT_SUCCESS;
+
+	ret = fspi_init(NXP_FLEXSPI_ADDR, NXP_FLEXSPI_FLASH_ADDR);
+
+	if (ret != 0) {
+		ERROR("Failed to initialized driver flexspi-nor.\n");
+		ERROR("exiting warm-reset request.\n");
+		return PSCI_E_INTERN_FAIL;
+	}
+
+	/* Sector starting from NV_STORAGE_BASE_ADDR is already
+	 * erased for writing.
+	 */
+
+#if (ERLY_WRM_RST_FLG_FLSH_UPDT)
+	ret = xspi_write((uint32_t)NV_STORAGE_BASE_ADDR,
+			 &warm_reset,
+			 sizeof(warm_reset));
+#else
+	/* Preparation for writing the Warm reset flag. */
+	ret = xspi_wren((uint32_t)NV_STORAGE_BASE_ADDR);
+
+	/* IP Control Register0 - SF Address to be read */
+	fspi_out32((NXP_FLEXSPI_ADDR + FSPI_IPCR0),
+		   (uint32_t) NV_STORAGE_BASE_ADDR);
+
+	while ((fspi_in32(NXP_FLEXSPI_ADDR + FSPI_INTR) &
+		FSPI_INTR_IPTXWE_MASK) == 0) {
+		;
+	}
+	/* Write TX FIFO Data Register */
+	fspi_out32(NXP_FLEXSPI_ADDR + FSPI_TFDR, (uint32_t) warm_reset);
+
+	fspi_out32(NXP_FLEXSPI_ADDR + FSPI_INTR, FSPI_INTR_IPTXWE);
+
+	/* IP Control Register1 - SEQID_WRITE operation, Size = 1 Byte */
+	fspi_out32(NXP_FLEXSPI_ADDR + FSPI_IPCR1,
+		   (uint32_t)(FSPI_WRITE_SEQ_ID << FSPI_IPCR1_ISEQID_SHIFT) |
+		   (uint16_t) sizeof(warm_reset));
+
+	/* Trigger XSPI-IP-Write cmd only if:
+	 *  - Putting DDR in-self refresh mode is successfully.
+	 *    to complete the writing of the warm-reset flag
+	 *    to flash.
+	 *
+	 * This code is as part of assembly.
+	 */
+#endif
+#endif
+	INFO("Doing DDR Self refresh.\n");
+	_soc_sys_warm_reset();
+
+	/* Expected behaviour is to do the power cycle */
+	while (1 != 0)
+		;
+
+	return -1;
+}
+#endif
diff --git a/plat/nxp/common/warm_reset/plat_warm_rst.h b/plat/nxp/common/warm_reset/plat_warm_rst.h
new file mode 100644
index 0000000..e0c39c5
--- /dev/null
+++ b/plat/nxp/common/warm_reset/plat_warm_rst.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PLAT_WARM_RST_H
+#define PLAT_WARM_RST_H
+
+#ifndef NXP_COINED_BB
+#define ERLY_WRM_RST_FLG_FLSH_UPDT	0
+#endif
+
+#ifndef __ASSEMBLER__
+
+#if defined(IMAGE_BL2)
+uint32_t is_warm_boot(void);
+#endif
+
+#if defined(IMAGE_BL31)
+int prep_n_execute_warm_reset(void);
+int _soc_sys_warm_reset(void);
+#endif
+
+#endif	/* __ASSEMBLER__ */
+
+#endif	/* PLAT_WARM_RST_H */
diff --git a/plat/nxp/common/warm_reset/warm_reset.mk b/plat/nxp/common/warm_reset/warm_reset.mk
new file mode 100644
index 0000000..236004f
--- /dev/null
+++ b/plat/nxp/common/warm_reset/warm_reset.mk
@@ -0,0 +1,20 @@
+#
+# Copyright 2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-----------------------------------------------------------------------------
+ifeq (${WARM_RST_ADDED},)
+
+WARM_RST_ADDED	:=	1
+NXP_NV_SW_MAINT_LAST_EXEC_DATA := yes
+
+$(eval $(call add_define,NXP_WARM_BOOT))
+
+
+WARM_RST_INCLUDES	+=	-I${PLAT_COMMON_PATH}/warm_reset
+WARM_RST_BL31_SOURCES	+=	${PLAT_SOC_PATH}/$(ARCH)/${SOC}_warm_rst.S
+
+WARM_RST_BL_COMM_SOURCES	+=	${PLAT_COMMON_PATH}/warm_reset/plat_warm_reset.c
+
+endif
diff --git a/plat/nxp/soc-lx2160a/aarch64/lx2160a.S b/plat/nxp/soc-lx2160a/aarch64/lx2160a.S
new file mode 100644
index 0000000..4679fc2
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/aarch64/lx2160a.S
@@ -0,0 +1,1824 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+.section .text, "ax"
+
+#include <asm_macros.S>
+
+#include <lib/psci/psci.h>
+#include <nxp_timer.h>
+#include <plat_gic.h>
+#include <pmu.h>
+
+#include <bl31_data.h>
+#include <plat_psci.h>
+#include <platform_def.h>
+
+.global soc_init_start
+.global soc_init_percpu
+.global soc_init_finish
+.global _set_platform_security
+.global _soc_set_start_addr
+
+.global _soc_core_release
+.global _soc_ck_disabled
+.global _soc_core_restart
+.global _soc_core_prep_off
+.global _soc_core_entr_off
+.global _soc_core_exit_off
+.global _soc_sys_reset
+.global _soc_sys_off
+.global _soc_core_prep_stdby
+.global _soc_core_entr_stdby
+.global _soc_core_exit_stdby
+.global _soc_core_prep_pwrdn
+.global _soc_core_entr_pwrdn
+.global _soc_core_exit_pwrdn
+.global _soc_clstr_prep_stdby
+.global _soc_clstr_exit_stdby
+.global _soc_clstr_prep_pwrdn
+.global _soc_clstr_exit_pwrdn
+.global _soc_sys_prep_stdby
+.global _soc_sys_exit_stdby
+.global _soc_sys_prep_pwrdn
+.global _soc_sys_pwrdn_wfi
+.global _soc_sys_exit_pwrdn
+
+.equ TZPC_BASE,			  0x02200000
+.equ TZPCDECPROT_0_SET_BASE, 0x02200804
+.equ TZPCDECPROT_1_SET_BASE, 0x02200810
+.equ TZPCDECPROT_2_SET_BASE, 0x0220081C
+
+#define CLUSTER_3_CORES_MASK 0xC0
+#define CLUSTER_3_IN_RESET  1
+#define CLUSTER_3_NORMAL	0
+
+/* cluster 3 handling no longer based on frequency, but rather on RCW[850],
+ * which is bit 18 of RCWSR27
+ */
+#define CLUSTER_3_RCW_BIT  0x40000
+
+/* retry count for clock-stop acks */
+.equ CLOCK_RETRY_CNT,  800
+
+/* disable prefetching in the A72 core */
+#define  CPUACTLR_DIS_LS_HW_PRE	0x100000000000000
+#define  CPUACTLR_DIS_L2_TLB_PRE   0x200000
+
+/* Function starts the initialization tasks of the soc,
+ * using secondary cores if they are available
+ *
+ * Called from C, saving the non-volatile regs
+ * save these as pairs of registers to maintain the
+ * required 16-byte alignment on the stack
+ *
+ * in:
+ * out:
+ * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11
+ */
+func soc_init_start
+	stp  x4,  x5,  [sp, #-16]!
+	stp  x6,  x7,  [sp, #-16]!
+	stp  x8,  x9,  [sp, #-16]!
+	stp  x10, x11, [sp, #-16]!
+	stp  x12, x13, [sp, #-16]!
+	stp  x18, x30, [sp, #-16]!
+
+	/* make sure the personality has been
+	 * established by releasing cores that
+	 * are marked "to-be-disabled" from reset
+	 */
+	bl  release_disabled  		/* 0-9 */
+
+	/* init the task flags */
+	bl  _init_task_flags   		/* 0-1 */
+
+	/* set SCRATCHRW7 to 0x0 */
+	ldr  x0, =DCFG_SCRATCHRW7_OFFSET
+	mov  x1, xzr
+	bl   _write_reg_dcfg
+
+1:
+	/* restore the aarch32/64 non-volatile registers */
+	ldp  x18, x30, [sp], #16
+	ldp  x12, x13, [sp], #16
+	ldp  x10, x11, [sp], #16
+	ldp  x8,  x9,  [sp], #16
+	ldp  x6,  x7,  [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	ret
+endfunc soc_init_start
+
+
+/* Function performs any soc-specific initialization that is needed on
+ * a per-core basis.
+ * in:  none
+ * out: none
+ * uses x0, x1, x2, x3
+ */
+func soc_init_percpu
+	stp  x4,  x30,  [sp, #-16]!
+
+	bl   plat_my_core_mask
+	mov  x2, x0				/* x2 = core mask */
+
+	/* Check if this core is marked for prefetch disable
+	 */
+	mov   x0, #PREFETCH_DIS_OFFSET
+	bl	_get_global_data		/* 0-1 */
+	tst   x0, x2
+	b.eq  1f
+	bl	_disable_ldstr_pfetch_A72	/* 0 */
+1:
+	mov  x0, #NXP_PMU_ADDR
+	bl enable_timer_base_to_cluster
+	ldp  x4,  x30,  [sp], #16
+	ret
+endfunc soc_init_percpu
+
+
+/* Function completes the initialization tasks of the soc
+ * in:
+ * out:
+ * uses x0, x1, x2, x3, x4
+ */
+func soc_init_finish
+	stp  x4,  x30,  [sp, #-16]!
+
+	ldp   x4,  x30,  [sp], #16
+	ret
+endfunc soc_init_finish
+
+
+/* Function sets the security mechanisms in the SoC to implement the
+ * Platform Security Policy
+ */
+func _set_platform_security
+	mov  x8, x30
+
+#if (!SUPPRESS_TZC)
+	/* initialize the tzpc */
+	bl   init_tzpc
+#endif
+
+#if (!SUPPRESS_SEC)
+	/* initialize secmon */
+#ifdef NXP_SNVS_ENABLED
+	mov x0, #NXP_SNVS_ADDR
+	bl  init_sec_mon
+#endif
+#endif
+
+	mov  x30, x8
+	ret
+endfunc _set_platform_security
+
+
+/* Function writes a 64-bit address to bootlocptrh/l
+ * in:  x0, 64-bit address to write to BOOTLOCPTRL/H
+ * uses x0, x1, x2
+ */
+func _soc_set_start_addr
+	/* Get the 64-bit base address of the dcfg block */
+	ldr  x2, =NXP_DCFG_ADDR
+
+	/* write the 32-bit BOOTLOCPTRL register */
+	mov  x1, x0
+	str  w1, [x2, #DCFG_BOOTLOCPTRL_OFFSET]
+
+	/* write the 32-bit BOOTLOCPTRH register */
+	lsr  x1, x0, #32
+	str  w1, [x2, #DCFG_BOOTLOCPTRH_OFFSET]
+	ret
+endfunc _soc_set_start_addr
+
+/* Function releases a secondary core from reset
+ * in:   x0 = core_mask_lsb
+ * out:  none
+ * uses: x0, x1, x2, x3
+ */
+func _soc_core_release
+	mov   x3, x30
+
+	ldr  x1, =NXP_SEC_REGFILE_ADDR
+	/* write to CORE_HOLD to tell
+	 * the bootrom that this core is
+	 * expected to run.
+	 */
+	str  w0, [x1, #CORE_HOLD_OFFSET]
+
+	/* read-modify-write BRRL to release core */
+	mov  x1, #NXP_RESET_ADDR
+	ldr  w2, [x1, #BRR_OFFSET]
+
+	/* x0 = core mask */
+	orr  w2, w2, w0
+	str  w2, [x1, #BRR_OFFSET]
+	dsb  sy
+	isb
+
+	/* send event */
+	sev
+	isb
+
+	mov   x30, x3
+	ret
+endfunc _soc_core_release
+
+
+/* Function determines if a core is disabled via COREDISABLEDSR
+ * in:  w0  = core_mask_lsb
+ * out: w0  = 0, core not disabled
+ *	  w0 != 0, core disabled
+ * uses x0, x1
+ */
+func _soc_ck_disabled
+
+	/* get base addr of dcfg block */
+	ldr  x1, =NXP_DCFG_ADDR
+
+	/* read COREDISABLEDSR */
+	ldr  w1, [x1, #DCFG_COREDISABLEDSR_OFFSET]
+
+	/* test core bit */
+	and  w0, w1, w0
+
+	ret
+endfunc _soc_ck_disabled
+
+
+/* Part of CPU_ON
+ * Function restarts a core shutdown via _soc_core_entr_off
+ * in:  x0 = core mask lsb (of the target cpu)
+ * out: x0 == 0, on success
+ *	  x0 != 0, on failure
+ * uses x0, x1, x2, x3, x4, x5, x6
+ */
+func _soc_core_restart
+	mov  x6, x30
+	mov  x4, x0
+
+	/* pgm GICD_CTLR - enable secure grp0  */
+	mov  x5, #NXP_GICD_ADDR
+	ldr  w2, [x5, #GICD_CTLR_OFFSET]
+	orr  w2, w2, #GICD_CTLR_EN_GRP_0
+	str  w2, [x5, #GICD_CTLR_OFFSET]
+	dsb sy
+	isb
+
+	/* poll on RWP til write completes */
+4:
+	ldr  w2, [x5, #GICD_CTLR_OFFSET]
+	tst  w2, #GICD_CTLR_RWP
+	b.ne 4b
+
+	/* x4 = core mask lsb
+	* x5 = gicd base addr
+	*/
+	mov  x0, x4
+	bl   get_mpidr_value
+
+	/* x0 = mpidr of target core
+	* x4 = core mask lsb of target core
+	* x5 = gicd base addr
+	*/
+
+	/* generate target list bit */
+	and  x1, x0, #MPIDR_AFFINITY0_MASK
+	mov  x2, #1
+	lsl  x2, x2, x1
+
+	/* get the affinity1 field */
+	and  x1, x0, #MPIDR_AFFINITY1_MASK
+	lsl  x1, x1, #8
+	orr  x2, x2, x1
+
+	/* insert the INTID for SGI15 */
+	orr  x2, x2, #ICC_SGI0R_EL1_INTID
+
+	/* fire the SGI */
+	msr  ICC_SGI0R_EL1, x2
+	dsb  sy
+	isb
+
+	/* load '0' on success */
+	mov  x0, xzr
+
+	mov  x30, x6
+	ret
+endfunc _soc_core_restart
+
+
+/* Part of CPU_OFF
+ * Function programs SoC & GIC registers in preparation for shutting down
+ * the core
+ * in:  x0 = core mask lsb
+ * out: none
+ * uses x0, x1, x2, x3, x4, x5, x6, x7
+ */
+func _soc_core_prep_off
+	mov  x8, x30
+	mov  x7, x0		/* x7 = core mask lsb */
+
+	mrs  x1, CORTEX_A72_ECTLR_EL1
+
+	/* set smp and disable L2 snoops in cpuectlr */
+	orr  x1, x1, #CPUECTLR_SMPEN_EN
+	orr  x1, x1, #CPUECTLR_DISABLE_TWALK_PREFETCH
+	bic  x1, x1, #CPUECTLR_INS_PREFETCH_MASK
+	bic  x1, x1, #CPUECTLR_DAT_PREFETCH_MASK
+
+	/* set retention control in cpuectlr */
+	bic  x1, x1, #CPUECTLR_TIMER_MASK
+	orr  x1, x1, #CPUECTLR_TIMER_8TICKS
+	msr  CORTEX_A72_ECTLR_EL1, x1
+
+	/* get redistributor rd base addr for this core */
+	mov  x0, x7
+	bl   get_gic_rd_base
+	mov  x6, x0
+
+	/* get redistributor sgi base addr for this core */
+	mov  x0, x7
+	bl   get_gic_sgi_base
+	mov  x5, x0
+
+	/* x5 = gicr sgi base addr
+ 	 * x6 = gicr rd  base addr
+	 * x7 = core mask lsb
+	 */
+
+	/* disable SGI 15 at redistributor - GICR_ICENABLER0 */
+	mov  w3, #GICR_ICENABLER0_SGI15
+	str  w3, [x5, #GICR_ICENABLER0_OFFSET]
+2:
+	/* poll on rwp bit in GICR_CTLR */
+	ldr  w4, [x6, #GICR_CTLR_OFFSET]
+	tst  w4, #GICR_CTLR_RWP
+	b.ne 2b
+
+	/* disable GRP1 interrupts at cpu interface */
+	msr  ICC_IGRPEN1_EL3, xzr
+
+	/* disable GRP0 ints at cpu interface */
+	msr  ICC_IGRPEN0_EL1, xzr
+
+	/* program the redistributor - poll on GICR_CTLR.RWP as needed */
+
+	/* define SGI 15 as Grp0 - GICR_IGROUPR0 */
+	ldr  w4, [x5, #GICR_IGROUPR0_OFFSET]
+	bic  w4, w4, #GICR_IGROUPR0_SGI15
+	str  w4, [x5, #GICR_IGROUPR0_OFFSET]
+
+	/* define SGI 15 as Grp0 - GICR_IGRPMODR0 */
+	ldr  w3, [x5, #GICR_IGRPMODR0_OFFSET]
+	bic  w3, w3, #GICR_IGRPMODR0_SGI15
+	str  w3, [x5, #GICR_IGRPMODR0_OFFSET]
+
+	/* set priority of SGI 15 to highest (0x0) - GICR_IPRIORITYR3 */
+	ldr  w4, [x5, #GICR_IPRIORITYR3_OFFSET]
+	bic  w4, w4, #GICR_IPRIORITYR3_SGI15_MASK
+	str  w4, [x5, #GICR_IPRIORITYR3_OFFSET]
+
+	/* enable SGI 15 at redistributor - GICR_ISENABLER0 */
+	mov  w3, #GICR_ISENABLER0_SGI15
+	str  w3, [x5, #GICR_ISENABLER0_OFFSET]
+	dsb  sy
+	isb
+3:
+	/* poll on rwp bit in GICR_CTLR */
+	ldr  w4, [x6, #GICR_CTLR_OFFSET]
+	tst  w4, #GICR_CTLR_RWP
+	b.ne 3b
+
+	/* quiesce the debug interfaces */
+	mrs  x3, osdlr_el1
+	orr  x3, x3, #OSDLR_EL1_DLK_LOCK
+	msr  osdlr_el1, x3
+	isb
+
+	/* enable grp0 ints */
+	mov  x3, #ICC_IGRPEN0_EL1_EN
+	msr  ICC_IGRPEN0_EL1, x3
+
+	/* x5 = gicr sgi base addr
+	 * x6 = gicr rd  base addr
+	 * x7 = core mask lsb
+	 */
+
+	/* clear any pending interrupts */
+	mvn  w1, wzr
+	str  w1, [x5, #GICR_ICPENDR0_OFFSET]
+
+	/* make sure system counter is enabled */
+	ldr  x3, =NXP_TIMER_ADDR
+	ldr  w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
+	tst  w0, #SYS_COUNTER_CNTCR_EN
+	b.ne 4f
+	orr  w0, w0, #SYS_COUNTER_CNTCR_EN
+	str  w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
+4:
+	/* enable the core timer and mask timer interrupt */
+	mov  x1, #CNTP_CTL_EL0_EN
+	orr  x1, x1, #CNTP_CTL_EL0_IMASK
+	msr  cntp_ctl_el0, x1
+
+	isb
+	mov  x30, x8
+	ret
+endfunc _soc_core_prep_off
+
+
+/* Part of CPU_OFF:
+ * Function performs the final steps to shutdown the core
+ * in:  x0 = core mask lsb
+ * out: none
+ * uses x0, x1, x2, x3, x4, x5
+ */
+func _soc_core_entr_off
+	mov  x5, x30
+	mov  x4, x0
+
+1:
+	/* enter low-power state by executing wfi */
+	wfi
+
+	/* see if SGI15 woke us up */
+	mrs  x2, ICC_IAR0_EL1
+	mov  x3, #ICC_IAR0_EL1_SGI15
+	cmp  x2, x3
+	b.ne 2f
+
+	/* deactivate the intrrupts. */
+	msr ICC_EOIR0_EL1, x2
+
+2:
+	/* check if core is turned ON */
+	mov  x0, x4
+	/* Fetched the core state in x0 */
+	bl   _getCoreState
+
+	cmp  x0, #CORE_WAKEUP
+	b.ne 1b
+
+	/* Reached here, exited the wfi */
+
+	mov  x30, x5
+	ret
+endfunc _soc_core_entr_off
+
+
+/* Part of CPU_OFF:
+ * Function starts the process of starting a core back up
+ * in:  x0 = core mask lsb
+ * out: none
+ * uses x0, x1, x2, x3, x4, x5, x6
+ */
+func _soc_core_exit_off
+	mov  x6, x30
+	mov  x5, x0
+
+	/* disable forwarding of GRP0 ints at cpu interface */
+	msr  ICC_IGRPEN0_EL1, xzr
+
+	/* get redistributor sgi base addr for this core */
+	mov  x0, x5
+	bl   get_gic_sgi_base
+	mov  x4, x0
+
+	/* x4 = gicr sgi base addr
+	 * x5 = core mask
+	 */
+
+	/* disable SGI 15 at redistributor - GICR_ICENABLER0 */
+	mov  w1, #GICR_ICENABLER0_SGI15
+	str  w1, [x4, #GICR_ICENABLER0_OFFSET]
+
+	/* get redistributor rd base addr for this core */
+	mov  x0, x5
+	bl   get_gic_rd_base
+	mov  x4, x0
+
+2:
+	/* poll on rwp bit in GICR_CTLR */
+	ldr  w2, [x4, #GICR_CTLR_OFFSET]
+	tst  w2, #GICR_CTLR_RWP
+	b.ne 2b
+
+	/* unlock the debug interfaces */
+	mrs  x3, osdlr_el1
+	bic  x3, x3, #OSDLR_EL1_DLK_LOCK
+	msr  osdlr_el1, x3
+	isb
+
+	dsb sy
+	isb
+	mov  x30, x6
+	ret
+endfunc _soc_core_exit_off
+
+
+/* Function requests a reset of the entire SOC
+ * in:  none
+ * out: none
+ * uses: x0, x1, x2, x3, x4, x5, x6
+ */
+func _soc_sys_reset
+	mov  x6, x30
+
+	ldr  x2, =NXP_RST_ADDR
+	/* clear the RST_REQ_MSK and SW_RST_REQ */
+
+	mov  w0, #0x00000000
+	str  w0, [x2, #RSTCNTL_OFFSET]
+
+	/* initiate the sw reset request */
+	mov  w0, #SW_RST_REQ_INIT
+	str  w0, [x2, #RSTCNTL_OFFSET]
+
+	/* In case this address range is mapped as cacheable,
+	 * flush the write out of the dcaches.
+	 */
+	add  x2, x2, #RSTCNTL_OFFSET
+	dc   cvac, x2
+	dsb  st
+	isb
+
+	/* Function does not return */
+	b  .
+endfunc _soc_sys_reset
+
+
+/* Part of SYSTEM_OFF:
+ * Function turns off the SoC clocks
+ * Note: Function is not intended to return, and the only allowable
+ *	   recovery is POR
+ * in:  none
+ * out: none
+ * uses x0, x1, x2, x3
+ */
+func _soc_sys_off
+
+	/* A-009810: LPM20 entry sequence might cause
+	 * spurious timeout reset request
+	 * workaround: MASK RESET REQ RPTOE
+	 */
+	ldr  x0, =NXP_RESET_ADDR
+	ldr  w1, =RSTRQMR_RPTOE_MASK
+	str  w1, [x0, #RST_RSTRQMR1_OFFSET]
+
+	/* disable sec, QBman, spi and qspi */
+	ldr  x2, =NXP_DCFG_ADDR
+	ldr  x0, =DCFG_DEVDISR1_OFFSET
+	ldr  w1, =DCFG_DEVDISR1_SEC
+	str  w1, [x2, x0]
+	ldr  x0, =DCFG_DEVDISR3_OFFSET
+	ldr  w1, =DCFG_DEVDISR3_QBMAIN
+	str  w1, [x2, x0]
+	ldr  x0, =DCFG_DEVDISR4_OFFSET
+	ldr  w1, =DCFG_DEVDISR4_SPI_QSPI
+	str  w1, [x2, x0]
+
+	/* set TPMWAKEMR0 */
+	ldr  x0, =TPMWAKEMR0_ADDR
+	mov  w1, #0x1
+	str  w1, [x0]
+
+	/* disable icache, dcache, mmu @ EL1 */
+	mov  x1, #SCTLR_I_C_M_MASK
+	mrs  x0, sctlr_el1
+	bic  x0, x0, x1
+	msr  sctlr_el1, x0
+
+	/* disable L2 prefetches */
+	mrs  x0, CORTEX_A72_ECTLR_EL1
+	bic  x1, x1, #CPUECTLR_TIMER_MASK
+	orr  x0, x0, #CPUECTLR_SMPEN_EN
+	orr  x0, x0, #CPUECTLR_TIMER_8TICKS
+	msr  CORTEX_A72_ECTLR_EL1, x0
+	isb
+
+	/* disable CCN snoop domain */
+	mov  x1, #NXP_CCN_HN_F_0_ADDR
+	ldr  x0, =CCN_HN_F_SNP_DMN_CTL_MASK
+	str  x0, [x1, #CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET]
+3:
+	ldr  w2, [x1, #CCN_HN_F_SNP_DMN_CTL_OFFSET]
+	cmp  w2, #0x2
+	b.ne 3b
+
+	mov  x3, #NXP_PMU_ADDR
+
+4:
+	ldr  w1, [x3, #PMU_PCPW20SR_OFFSET]
+	cmp  w1, #PMU_IDLE_CORE_MASK
+	b.ne 4b
+
+	mov  w1, #PMU_IDLE_CLUSTER_MASK
+	str  w1, [x3, #PMU_CLAINACTSETR_OFFSET]
+
+1:
+	ldr  w1, [x3, #PMU_PCPW20SR_OFFSET]
+	cmp  w1, #PMU_IDLE_CORE_MASK
+	b.ne 1b
+
+	mov  w1, #PMU_FLUSH_CLUSTER_MASK
+	str  w1, [x3, #PMU_CLL2FLUSHSETR_OFFSET]
+
+2:
+	ldr  w1, [x3, #PMU_CLL2FLUSHSR_OFFSET]
+	cmp  w1, #PMU_FLUSH_CLUSTER_MASK
+	b.ne 2b
+
+	mov  w1, #PMU_FLUSH_CLUSTER_MASK
+	str  w1, [x3, #PMU_CLSL2FLUSHCLRR_OFFSET]
+
+	mov  w1, #PMU_FLUSH_CLUSTER_MASK
+	str  w1, [x3, #PMU_CLSINACTSETR_OFFSET]
+
+	mov  x2, #DAIF_SET_MASK
+	mrs  x1, spsr_el1
+	orr  x1, x1, x2
+	msr  spsr_el1, x1
+
+	mrs  x1, spsr_el2
+	orr  x1, x1, x2
+	msr  spsr_el2, x1
+
+	/* force the debug interface to be quiescent */
+	mrs  x0, osdlr_el1
+	orr  x0, x0, #0x1
+	msr  osdlr_el1, x0
+
+	/* invalidate all TLB entries at all 3 exception levels */
+	tlbi alle1
+	tlbi alle2
+	tlbi alle3
+
+	/* x3 = pmu base addr */
+
+	/* request lpm20 */
+	ldr  x0, =PMU_POWMGTCSR_OFFSET
+	ldr  w1, =PMU_POWMGTCSR_VAL
+	str  w1, [x3, x0]
+
+5:
+	wfe
+	b.eq  5b
+endfunc _soc_sys_off
+
+
+/* Part of CPU_SUSPEND
+ * Function puts the calling core into standby state
+ * in:  x0 = core mask lsb
+ * out: none
+ * uses x0
+ */
+func _soc_core_entr_stdby
+
+	dsb  sy
+	isb
+	wfi
+
+	ret
+endfunc _soc_core_entr_stdby
+
+
+/* Part of CPU_SUSPEND
+ * Function performs SoC-specific programming prior to standby
+ * in:  x0 = core mask lsb
+ * out: none
+ * uses x0, x1
+ */
+func _soc_core_prep_stdby
+
+	/* clear CORTEX_A72_ECTLR_EL1[2:0] */
+	mrs  x1, CORTEX_A72_ECTLR_EL1
+	bic  x1, x1, #CPUECTLR_TIMER_MASK
+	msr  CORTEX_A72_ECTLR_EL1, x1
+
+	ret
+endfunc _soc_core_prep_stdby
+
+
+/* Part of CPU_SUSPEND
+ * Function performs any SoC-specific cleanup after standby state
+ * in:  x0 = core mask lsb
+ * out: none
+ * uses none
+ */
+func _soc_core_exit_stdby
+
+	ret
+endfunc _soc_core_exit_stdby
+
+
+/* Part of CPU_SUSPEND
+ * Function performs SoC-specific programming prior to power-down
+ * in:  x0 = core mask lsb
+ * out: none
+ * uses none
+ */
+func _soc_core_prep_pwrdn
+
+	/* make sure system counter is enabled */
+	ldr  x2, =NXP_TIMER_ADDR
+	ldr  w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
+	tst  w0, #SYS_COUNTER_CNTCR_EN
+	b.ne 1f
+	orr  w0, w0, #SYS_COUNTER_CNTCR_EN
+	str  w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
+1:
+
+	/* enable dynamic retention control (CPUECTLR[2:0])
+	 * set the SMPEN bit (CPUECTLR[6])
+	 */
+	mrs  x1, CORTEX_A72_ECTLR_EL1
+	bic  x1, x1, #CPUECTLR_RET_MASK
+	orr  x1, x1, #CPUECTLR_TIMER_8TICKS
+	orr  x1, x1, #CPUECTLR_SMPEN_EN
+	msr  CORTEX_A72_ECTLR_EL1, x1
+
+	isb
+	ret
+endfunc _soc_core_prep_pwrdn
+
+
+/* Part of CPU_SUSPEND
+ * Function puts the calling core into a power-down state
+ * in:  x0 = core mask lsb
+ * out: none
+ * uses x0
+ */
+func _soc_core_entr_pwrdn
+
+	/* X0 = core mask lsb */
+
+	dsb  sy
+	isb
+	wfi
+
+	ret
+endfunc _soc_core_entr_pwrdn
+
+
+/* Part of CPU_SUSPEND
+ * Function performs any SoC-specific cleanup after power-down state
+ * in:  x0 = core mask lsb
+ * out: none
+ * uses none
+ */
+func _soc_core_exit_pwrdn
+
+	ret
+endfunc _soc_core_exit_pwrdn
+
+
+/* Part of CPU_SUSPEND
+ * Function performs SoC-specific programming prior to standby
+ * in:  x0 = core mask lsb
+ * out: none
+ * uses x0, x1
+ */
+func _soc_clstr_prep_stdby
+
+	/* clear CORTEX_A72_ECTLR_EL1[2:0] */
+	mrs  x1, CORTEX_A72_ECTLR_EL1
+	bic  x1, x1, #CPUECTLR_TIMER_MASK
+	msr  CORTEX_A72_ECTLR_EL1, x1
+
+	ret
+endfunc _soc_clstr_prep_stdby
+
+
+/* Part of CPU_SUSPEND
+ * Function performs any SoC-specific cleanup after standby state
+ * in:  x0 = core mask lsb
+ * out: none
+ * uses none
+ */
+func _soc_clstr_exit_stdby
+
+	ret
+endfunc _soc_clstr_exit_stdby
+
+
+/* Part of CPU_SUSPEND
+ * Function performs SoC-specific programming prior to power-down
+ * in:  x0 = core mask lsb
+ * out: none
+ * uses none
+ */
+func _soc_clstr_prep_pwrdn
+
+	/* make sure system counter is enabled */
+	ldr  x2, =NXP_TIMER_ADDR
+	ldr  w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
+	tst  w0, #SYS_COUNTER_CNTCR_EN
+	b.ne 1f
+	orr  w0, w0, #SYS_COUNTER_CNTCR_EN
+	str  w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
+1:
+
+	/* enable dynamic retention control (CPUECTLR[2:0])
+	 * set the SMPEN bit (CPUECTLR[6])
+	 */
+	mrs  x1, CORTEX_A72_ECTLR_EL1
+	bic  x1, x1, #CPUECTLR_RET_MASK
+	orr  x1, x1, #CPUECTLR_TIMER_8TICKS
+	orr  x1, x1, #CPUECTLR_SMPEN_EN
+	msr  CORTEX_A72_ECTLR_EL1, x1
+
+	isb
+	ret
+endfunc _soc_clstr_prep_pwrdn
+
+
+/* Part of CPU_SUSPEND
+ * Function performs any SoC-specific cleanup after power-down state
+ * in:  x0 = core mask lsb
+ * out: none
+ * uses none
+ */
+func _soc_clstr_exit_pwrdn
+
+	ret
+endfunc _soc_clstr_exit_pwrdn
+
+
+/* Part of CPU_SUSPEND
+ * Function performs SoC-specific programming prior to standby
+ * in:  x0 = core mask lsb
+ * out: none
+ * uses x0, x1
+ */
+func _soc_sys_prep_stdby
+
+	/* clear CORTEX_A72_ECTLR_EL1[2:0] */
+	mrs  x1, CORTEX_A72_ECTLR_EL1
+	bic  x1, x1, #CPUECTLR_TIMER_MASK
+	msr  CORTEX_A72_ECTLR_EL1, x1
+	ret
+endfunc _soc_sys_prep_stdby
+
+
+/* Part of CPU_SUSPEND
+ * Function performs any SoC-specific cleanup after standby state
+ * in:  x0 = core mask lsb
+ * out: none
+ * uses none
+ */
+func _soc_sys_exit_stdby
+
+	ret
+endfunc _soc_sys_exit_stdby
+
+
+/* Part of CPU_SUSPEND
+ * Function performs SoC-specific programming prior to
+ * suspend-to-power-down
+ * in:  x0 = core mask lsb
+ * out: none
+ * uses x0, x1
+ */
+func _soc_sys_prep_pwrdn
+
+	mrs   x1, CORTEX_A72_ECTLR_EL1
+	/* make sure the smp bit is set */
+	orr   x1, x1, #CPUECTLR_SMPEN_MASK
+	/* set the retention control */
+	orr   x1, x1, #CPUECTLR_RET_8CLK
+	/* disable tablewalk prefetch */
+	orr   x1, x1, #CPUECTLR_DISABLE_TWALK_PREFETCH
+	msr   CORTEX_A72_ECTLR_EL1, x1
+	isb
+
+	ret
+endfunc _soc_sys_prep_pwrdn
+
+
+/* Part of CPU_SUSPEND
+ * Function puts the calling core, and potentially the soc, into a
+ * low-power state
+ * in:  x0 = core mask lsb
+ * out: x0 = 0, success
+ *	  x0 < 0, failure
+ * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14,
+ *	  x15, x16, x17, x18, x19, x20, x21, x28
+ */
+func _soc_sys_pwrdn_wfi
+	mov  x28, x30
+
+	/* disable cluster snooping in the CCN-508 */
+	ldr  x1, =NXP_CCN_HN_F_0_ADDR
+	ldr  x7, [x1, #CCN_HN_F_SNP_DMN_CTL_OFFSET]
+	mov  x6, #CCN_HNF_NODE_COUNT
+1:
+	str  x7, [x1, #CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET]
+	sub  x6, x6, #1
+	add  x1, x1, #CCN_HNF_OFFSET
+	cbnz x6, 1b
+
+	/* x0  = core mask
+	 * x7  = hnf sdcr
+	 */
+
+	ldr  x1, =NXP_PMU_CCSR_ADDR
+	ldr  x2, =NXP_PMU_DCSR_ADDR
+
+	/* enable the stop-request-override */
+	mov  x3, #PMU_POWMGTDCR0_OFFSET
+	mov  x4, #POWMGTDCR_STP_OV_EN
+	str  w4, [x2, x3]
+
+	/* x0  = core mask
+	 * x1  = NXP_PMU_CCSR_ADDR
+	 * x2  = NXP_PMU_DCSR_ADDR
+	 * x7  = hnf sdcr
+	 */
+
+	/* disable prefetching in the A72 core */
+	mrs  x8, CORTEX_A72_CPUACTLR_EL1
+	tst  x8, #CPUACTLR_DIS_LS_HW_PRE
+	b.ne 2f
+	dsb  sy
+	isb
+	/* disable data prefetch */
+	orr  x16, x8, #CPUACTLR_DIS_LS_HW_PRE
+	/* disable tlb prefetch */
+	orr  x16, x16, #CPUACTLR_DIS_L2_TLB_PRE
+	msr  CORTEX_A72_CPUACTLR_EL1, x16
+	isb
+
+	/* x0  = core mask
+	 * x1  = NXP_PMU_CCSR_ADDR
+	 * x2  = NXP_PMU_DCSR_ADDR
+	 * x7  = hnf sdcr
+	 * x8  = cpuactlr
+	 */
+
+2:
+	/* save hnf-sdcr and cpuactlr to stack */
+	stp  x7,  x8,  [sp, #-16]!
+
+	/* x0  = core mask
+	 * x1  = NXP_PMU_CCSR_ADDR
+	 * x2  = NXP_PMU_DCSR_ADDR
+	 */
+
+	/* save the IPSTPCRn registers to stack */
+	mov  x15, #PMU_IPSTPCR0_OFFSET
+	ldr  w9,  [x1, x15]
+	mov  x16, #PMU_IPSTPCR1_OFFSET
+	ldr  w10, [x1, x16]
+	mov  x17, #PMU_IPSTPCR2_OFFSET
+	ldr  w11, [x1, x17]
+	mov  x18, #PMU_IPSTPCR3_OFFSET
+	ldr  w12, [x1, x18]
+	mov  x19, #PMU_IPSTPCR4_OFFSET
+	ldr  w13, [x1, x19]
+	mov  x20, #PMU_IPSTPCR5_OFFSET
+	ldr  w14, [x1, x20]
+
+	stp  x9,  x10,  [sp, #-16]!
+	stp  x11, x12,  [sp, #-16]!
+	stp  x13, x14,  [sp, #-16]!
+
+	/* x0  = core mask
+	 * x1  = NXP_PMU_CCSR_ADDR
+	 * x2  = NXP_PMU_DCSR_ADDR
+	 * x15 = PMU_IPSTPCR0_OFFSET
+	 * x16 = PMU_IPSTPCR1_OFFSET
+	 * x17 = PMU_IPSTPCR2_OFFSET
+	 * x18 = PMU_IPSTPCR3_OFFSET
+	 * x19 = PMU_IPSTPCR4_OFFSET
+	 * x20 = PMU_IPSTPCR5_OFFSET
+	 */
+
+	/* load the full clock mask for IPSTPCR0 */
+	ldr  x3, =DEVDISR1_MASK
+	/* get the exclusions */
+	mov  x21, #PMU_IPPDEXPCR0_OFFSET
+	ldr  w4, [x1, x21]
+	/* apply the exclusions to the mask */
+	bic  w7, w3, w4
+	/* stop the clocks in IPSTPCR0 */
+	str  w7, [x1, x15]
+
+	/* use same procedure for IPSTPCR1-IPSTPCR5 */
+
+	/* stop the clocks in IPSTPCR1 */
+	ldr  x5, =DEVDISR2_MASK
+	mov  x21, #PMU_IPPDEXPCR1_OFFSET
+	ldr  w6, [x1, x21]
+	bic  w8, w5, w6
+	str  w8, [x1, x16]
+
+	/* stop the clocks in IPSTPCR2 */
+	ldr  x3, =DEVDISR3_MASK
+	mov  x21, #PMU_IPPDEXPCR2_OFFSET
+	ldr  w4, [x1, x21]
+	bic  w9, w3, w4
+	str  w9, [x1, x17]
+
+	/* stop the clocks in IPSTPCR3 */
+	ldr  x5,  =DEVDISR4_MASK
+	mov  x21, #PMU_IPPDEXPCR3_OFFSET
+	ldr  w6,  [x1, x21]
+	bic  w10, w5, w6
+	str  w10, [x1, x18]
+
+	/* stop the clocks in IPSTPCR4
+	 *   - exclude the ddr clocks as we are currently executing
+	 *	 out of *some* memory, might be ddr
+	 *   - exclude the OCRAM clk so that we retain any code/data in
+	 *	 OCRAM
+	 *   - may need to exclude the debug clock if we are testing
+	 */
+	ldr  x3, =DEVDISR5_MASK
+	mov  w6, #DEVDISR5_MASK_ALL_MEM
+	bic  w3, w3, w6
+
+	mov  w5, #POLICY_DEBUG_ENABLE
+	cbz  w5, 3f
+	mov  w6, #DEVDISR5_MASK_DBG
+	bic  w3, w3, w6
+3:
+	mov  x21, #PMU_IPPDEXPCR4_OFFSET
+	ldr  w4,  [x1, x21]
+	bic  w11, w3, w4
+	str  w11, [x1, x19]
+
+	/* stop the clocks in IPSTPCR5 */
+	ldr  x5,  =DEVDISR6_MASK
+	mov  x21, #PMU_IPPDEXPCR5_OFFSET
+	ldr  w6,  [x1, x21]
+	bic  w12, w5, w6
+	str  w12, [x1, x20]
+
+	/* x0  = core mask
+	 * x1  = NXP_PMU_CCSR_ADDR
+	 * x2  = NXP_PMU_DCSR_ADDR
+	 * x7  = IPSTPCR0
+	 * x8  = IPSTPCR1
+	 * x9  = IPSTPCR2
+	 * x10 = IPSTPCR3
+	 * x11 = IPSTPCR4
+	 * x12 = IPSTPCR5
+	 */
+
+	/* poll until the clocks are stopped in IPSTPACKSR0 */
+	mov  w4,  #CLOCK_RETRY_CNT
+	mov  x21, #PMU_IPSTPACKSR0_OFFSET
+4:
+	ldr  w5, [x1, x21]
+	cmp  w5, w7
+	b.eq 5f
+	sub  w4, w4, #1
+	cbnz w4, 4b
+
+	/* poll until the clocks are stopped in IPSTPACKSR1 */
+5:
+	mov  w4,  #CLOCK_RETRY_CNT
+	mov  x21, #PMU_IPSTPACKSR1_OFFSET
+6:
+	ldr  w5, [x1, x21]
+	cmp  w5, w8
+	b.eq 7f
+	sub  w4, w4, #1
+	cbnz w4, 6b
+
+	/* poll until the clocks are stopped in IPSTPACKSR2 */
+7:
+	mov  w4,  #CLOCK_RETRY_CNT
+	mov  x21, #PMU_IPSTPACKSR2_OFFSET
+8:
+	ldr  w5, [x1, x21]
+	cmp  w5, w9
+	b.eq 9f
+	sub  w4, w4, #1
+	cbnz w4, 8b
+
+	/* poll until the clocks are stopped in IPSTPACKSR3 */
+9:
+	mov  w4,  #CLOCK_RETRY_CNT
+	mov  x21, #PMU_IPSTPACKSR3_OFFSET
+10:
+	ldr  w5, [x1, x21]
+	cmp  w5, w10
+	b.eq 11f
+	sub  w4, w4, #1
+	cbnz w4, 10b
+
+	/* poll until the clocks are stopped in IPSTPACKSR4 */
+11:
+	mov  w4,  #CLOCK_RETRY_CNT
+	mov  x21, #PMU_IPSTPACKSR4_OFFSET
+12:
+	ldr  w5, [x1, x21]
+	cmp  w5, w11
+	b.eq 13f
+	sub  w4, w4, #1
+	cbnz w4, 12b
+
+	/* poll until the clocks are stopped in IPSTPACKSR5 */
+13:
+	mov  w4,  #CLOCK_RETRY_CNT
+	mov  x21, #PMU_IPSTPACKSR5_OFFSET
+14:
+	ldr  w5, [x1, x21]
+	cmp  w5, w12
+	b.eq 15f
+	sub  w4, w4, #1
+	cbnz w4, 14b
+
+	/* x0  = core mask
+	 * x1  = NXP_PMU_CCSR_ADDR
+	 * x2  = NXP_PMU_DCSR_ADDR
+	 * x7  = IPSTPCR0
+	 * x8  = IPSTPCR1
+	 * x9  = IPSTPCR2
+	 * x10 = IPSTPCR3
+	 * x11 = IPSTPCR4
+	 * x12 = IPSTPCR5
+	 */
+
+15:
+	mov  x3, #NXP_DCFG_ADDR
+
+	/* save the devdisr registers to stack */
+	ldr  w13, [x3, #DCFG_DEVDISR1_OFFSET]
+	ldr  w14, [x3, #DCFG_DEVDISR2_OFFSET]
+	ldr  w15, [x3, #DCFG_DEVDISR3_OFFSET]
+	ldr  w16, [x3, #DCFG_DEVDISR4_OFFSET]
+	ldr  w17, [x3, #DCFG_DEVDISR5_OFFSET]
+	ldr  w18, [x3, #DCFG_DEVDISR6_OFFSET]
+
+	stp  x13, x14,  [sp, #-16]!
+	stp  x15, x16,  [sp, #-16]!
+	stp  x17, x18,  [sp, #-16]!
+
+	/* power down the IP in DEVDISR1 - corresponds to IPSTPCR0 */
+	str  w7,  [x3, #DCFG_DEVDISR1_OFFSET]
+
+	/* power down the IP in DEVDISR2 - corresponds to IPSTPCR1 */
+	str  w8, [x3, #DCFG_DEVDISR2_OFFSET]
+
+	/* power down the IP in DEVDISR3 - corresponds to IPSTPCR2 */
+	str  w9,  [x3, #DCFG_DEVDISR3_OFFSET]
+
+	/* power down the IP in DEVDISR4 - corresponds to IPSTPCR3 */
+	str  w10, [x3, #DCFG_DEVDISR4_OFFSET]
+
+	/* power down the IP in DEVDISR5 - corresponds to IPSTPCR4 */
+	str  w11, [x3, #DCFG_DEVDISR5_OFFSET]
+
+	/* power down the IP in DEVDISR6 - corresponds to IPSTPCR5 */
+	str  w12, [x3, #DCFG_DEVDISR6_OFFSET]
+
+	/* setup register values for the cache-only sequence */
+	mov  x4, #NXP_DDR_ADDR
+	mov  x5, #NXP_DDR2_ADDR
+	mov  x6, x11
+	mov  x7, x17
+	ldr  x12, =PMU_CLAINACTSETR_OFFSET
+	ldr  x13, =PMU_CLSINACTSETR_OFFSET
+	ldr  x14, =PMU_CLAINACTCLRR_OFFSET
+	ldr  x15, =PMU_CLSINACTCLRR_OFFSET
+
+	/* x0  = core mask
+	 * x1  = NXP_PMU_CCSR_ADDR
+	 * x2  = NXP_PMU_DCSR_ADDR
+	 * x3  = NXP_DCFG_ADDR
+	 * x4  = NXP_DDR_ADDR
+	 * x5  = NXP_DDR2_ADDR
+	 * w6  = IPSTPCR4
+	 * w7  = DEVDISR5
+	 * x12 = PMU_CLAINACTSETR_OFFSET
+	 * x13 = PMU_CLSINACTSETR_OFFSET
+	 * x14 = PMU_CLAINACTCLRR_OFFSET
+	 * x15 = PMU_CLSINACTCLRR_OFFSET
+	 */
+
+	mov  x8, #POLICY_DEBUG_ENABLE
+	cbnz x8, 29f
+	/* force the debug interface to be quiescent */
+	mrs  x9, OSDLR_EL1
+	orr  x9, x9, #0x1
+	msr  OSDLR_EL1, x9
+
+	/* enter the cache-only sequence */
+29:
+	bl   final_pwrdown
+
+	/* when we are here, the core has come out of wfi and the
+	 * ddr is back up
+	 */
+
+	mov  x8, #POLICY_DEBUG_ENABLE
+	cbnz x8, 30f
+	/* restart the debug interface */
+	mrs  x9, OSDLR_EL1
+	mov  x10, #1
+	bic  x9, x9, x10
+	msr  OSDLR_EL1, x9
+
+	/* get saved DEVDISR regs off stack */
+30:
+	ldp  x17, x18, [sp], #16
+	ldp  x15, x16, [sp], #16
+	ldp  x13, x14, [sp], #16
+	/* restore DEVDISR regs */
+	str  w18, [x3, #DCFG_DEVDISR6_OFFSET]
+	str  w17, [x3, #DCFG_DEVDISR5_OFFSET]
+	str  w16, [x3, #DCFG_DEVDISR4_OFFSET]
+	str  w15, [x3, #DCFG_DEVDISR3_OFFSET]
+	str  w14, [x3, #DCFG_DEVDISR2_OFFSET]
+	str  w13, [x3, #DCFG_DEVDISR1_OFFSET]
+	isb
+
+	/* get saved IPSTPCRn regs off stack */
+	ldp  x13, x14, [sp], #16
+	ldp  x11, x12, [sp], #16
+	ldp  x9,  x10, [sp], #16
+
+	/* restore IPSTPCRn regs */
+	mov  x15, #PMU_IPSTPCR5_OFFSET
+	str  w14, [x1, x15]
+	mov  x16, #PMU_IPSTPCR4_OFFSET
+	str  w13, [x1, x16]
+	mov  x17, #PMU_IPSTPCR3_OFFSET
+	str  w12, [x1, x17]
+	mov  x18, #PMU_IPSTPCR2_OFFSET
+	str  w11, [x1, x18]
+	mov  x19, #PMU_IPSTPCR1_OFFSET
+	str  w10, [x1, x19]
+	mov  x20, #PMU_IPSTPCR0_OFFSET
+	str  w9,  [x1, x20]
+	isb
+
+	/* poll on IPSTPACKCRn regs til IP clocks are restarted */
+	mov  w4,  #CLOCK_RETRY_CNT
+	mov  x15, #PMU_IPSTPACKSR5_OFFSET
+16:
+	ldr  w5, [x1, x15]
+	and  w5, w5, w14
+	cbz  w5, 17f
+	sub  w4, w4, #1
+	cbnz w4, 16b
+
+17:
+	mov  w4,  #CLOCK_RETRY_CNT
+	mov  x15, #PMU_IPSTPACKSR4_OFFSET
+18:
+	ldr  w5, [x1, x15]
+	and  w5, w5, w13
+	cbz  w5, 19f
+	sub  w4, w4, #1
+	cbnz w4, 18b
+
+19:
+	mov  w4,  #CLOCK_RETRY_CNT
+	mov  x15, #PMU_IPSTPACKSR3_OFFSET
+20:
+	ldr  w5, [x1, x15]
+	and  w5, w5, w12
+	cbz  w5, 21f
+	sub  w4, w4, #1
+	cbnz w4, 20b
+
+21:
+	mov  w4,  #CLOCK_RETRY_CNT
+	mov  x15, #PMU_IPSTPACKSR2_OFFSET
+22:
+	ldr  w5, [x1, x15]
+	and  w5, w5, w11
+	cbz  w5, 23f
+	sub  w4, w4, #1
+	cbnz w4, 22b
+
+23:
+	mov  w4,  #CLOCK_RETRY_CNT
+	mov  x15, #PMU_IPSTPACKSR1_OFFSET
+24:
+	ldr  w5, [x1, x15]
+	and  w5, w5, w10
+	cbz  w5, 25f
+	sub  w4, w4, #1
+	cbnz w4, 24b
+
+25:
+	mov  w4,  #CLOCK_RETRY_CNT
+	mov  x15, #PMU_IPSTPACKSR0_OFFSET
+26:
+	ldr  w5, [x1, x15]
+	and  w5, w5, w9
+	cbz  w5, 27f
+	sub  w4, w4, #1
+	cbnz w4, 26b
+
+27:
+	/* disable the stop-request-override */
+	mov  x8, #PMU_POWMGTDCR0_OFFSET
+	mov  w9, #POWMGTDCR_STP_OV_EN
+	str  w9, [x2, x8]
+	isb
+
+	/* get hnf-sdcr and cpuactlr off stack */
+	ldp  x7, x8, [sp], #16
+
+	/* restore cpuactlr */
+	msr  CORTEX_A72_CPUACTLR_EL1, x8
+	isb
+
+	/* restore snooping in the hnf nodes */
+	ldr  x9, =NXP_CCN_HN_F_0_ADDR
+	mov  x6, #CCN_HNF_NODE_COUNT
+28:
+	str  x7, [x9, #CCN_HN_F_SNP_DMN_CTL_SET_OFFSET]
+	sub  x6, x6, #1
+	add  x9, x9, #CCN_HNF_OFFSET
+	cbnz x6, 28b
+	isb
+
+	mov  x30, x28
+	ret
+endfunc _soc_sys_pwrdn_wfi
+
+
+/* Part of CPU_SUSPEND
+ * Function performs any SoC-specific cleanup after power-down
+ * in:  x0 = core mask lsb
+ * out: none
+ * uses x0,
+ */
+func _soc_sys_exit_pwrdn
+
+	mrs   x1, CORTEX_A72_ECTLR_EL1
+	/* make sure the smp bit is set */
+	orr   x1, x1, #CPUECTLR_SMPEN_MASK
+	/* clr the retention control */
+	mov   x2, #CPUECTLR_RET_8CLK
+	bic   x1, x1, x2
+	/* enable tablewalk prefetch */
+	mov   x2, #CPUECTLR_DISABLE_TWALK_PREFETCH
+	bic   x1, x1, x2
+	msr   CORTEX_A72_ECTLR_EL1, x1
+	isb
+
+	ret
+endfunc _soc_sys_exit_pwrdn
+
+
+/* Function will pwrdown ddr and the final core - it will do this
+ * by loading itself into the icache and then executing from there
+ * in:
+ *   x0  = core mask
+ *   x1  = NXP_PMU_CCSR_ADDR
+ *   x2  = NXP_PMU_DCSR_ADDR
+ *   x3  = NXP_DCFG_ADDR
+ *   x4  = NXP_DDR_ADDR
+ *   x5  = NXP_DDR2_ADDR
+ *   w6  = IPSTPCR4
+ *   w7  = DEVDISR5
+ *   x12 = PMU_CLAINACTSETR_OFFSET
+ *   x13 = PMU_CLSINACTSETR_OFFSET
+ *   x14 = PMU_CLAINACTCLRR_OFFSET
+ *   x15 = PMU_CLSINACTCLRR_OFFSET
+ * out: none
+ * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x13, x14, x15, x16,
+ *	  x17, x18
+ */
+
+/* 4Kb aligned */
+.align 12
+func final_pwrdown
+
+	mov  x0, xzr
+	b	touch_line_0
+start_line_0:
+	mov  x0, #1
+	/* put ddr controller 1 into self-refresh */
+	ldr  w8, [x4, #DDR_CFG_2_OFFSET]
+	orr  w8, w8, #CFG_2_FORCE_REFRESH
+	str  w8, [x4, #DDR_CFG_2_OFFSET]
+
+	/* put ddr controller 2 into self-refresh */
+	ldr  w8, [x5, #DDR_CFG_2_OFFSET]
+	orr  w8, w8, #CFG_2_FORCE_REFRESH
+	str  w8, [x5, #DDR_CFG_2_OFFSET]
+
+	/* stop the clocks in both ddr controllers */
+	mov  w10, #DEVDISR5_MASK_DDR
+	mov  x16, #PMU_IPSTPCR4_OFFSET
+	orr  w9,  w6, w10
+	str  w9,  [x1, x16]
+	isb
+
+	mov  x17, #PMU_IPSTPACKSR4_OFFSET
+touch_line_0:
+	cbz  x0, touch_line_1
+
+start_line_1:
+	/* poll IPSTPACKSR4 until
+	 * ddr controller clocks are stopped.
+	 */
+1:
+	ldr  w8, [x1, x17]
+	and  w8, w8, w10
+	cmp  w8, w10
+	b.ne 1b
+
+	/* shut down power to the ddr controllers */
+	orr w9, w7, #DEVDISR5_MASK_DDR
+	str w9, [x3, #DCFG_DEVDISR5_OFFSET]
+
+	/* disable cluster acp ports */
+	mov  w8, #CLAINACT_DISABLE_ACP
+	str  w8, [x1, x12]
+
+	/* disable skyros ports */
+	mov  w9, #CLSINACT_DISABLE_SKY
+	str  w9, [x1, x13]
+	isb
+
+touch_line_1:
+	cbz  x0, touch_line_2
+
+start_line_2:
+	isb
+3:
+	wfi
+
+	/* if we are here then we are awake
+	 * - bring this device back up
+	 */
+
+	/* enable skyros ports */
+	mov  w9, #CLSINACT_DISABLE_SKY
+	str  w9, [x1, x15]
+
+	/* enable acp ports */
+	mov  w8, #CLAINACT_DISABLE_ACP
+	str  w8, [x1, x14]
+	isb
+
+	/* bring up the ddr controllers */
+	str w7, [x3, #DCFG_DEVDISR5_OFFSET]
+	isb
+	str w6,  [x1, x16]
+	isb
+
+	nop
+touch_line_2:
+	cbz  x0, touch_line_3
+
+start_line_3:
+	/* poll IPSTPACKSR4 until
+	 * ddr controller clocks are running
+	 */
+	mov w10, #DEVDISR5_MASK_DDR
+2:
+	ldr  w8, [x1, x17]
+	and  w8, w8, w10
+	cbnz w8, 2b
+
+	/* take ddr controller 2 out of self-refresh */
+	mov w8, #CFG_2_FORCE_REFRESH
+	ldr w9, [x5, #DDR_CFG_2_OFFSET]
+	bic w9, w9, w8
+	str w9, [x5, #DDR_CFG_2_OFFSET]
+
+	/* take ddr controller 1 out of self-refresh */
+	ldr w9, [x4, #DDR_CFG_2_OFFSET]
+	bic w9, w9, w8
+	str w9, [x4, #DDR_CFG_2_OFFSET]
+	isb
+
+	nop
+	nop
+	nop
+touch_line_3:
+	cbz  x0, start_line_0
+
+	/* execute here after ddr is back up */
+
+	ret
+endfunc final_pwrdown
+
+/* Function returns CLUSTER_3_NORMAL if the cores of cluster 3 are
+ * to be handled normally, and it returns CLUSTER_3_IN_RESET if the cores
+ * are to be held in reset
+ * in:  none
+ * out: x0 = #CLUSTER_3_NORMAL,   cluster 3 treated normal
+ *	  x0 = #CLUSTER_3_IN_RESET, cluster 3 cores held in reset
+ * uses x0, x1, x2
+ */
+func cluster3InReset
+
+	/* default return is treat cores normal */
+	mov  x0, #CLUSTER_3_NORMAL
+
+	/* read RCW_SR27 register */
+	mov  x1, #NXP_DCFG_ADDR
+	ldr  w2, [x1, #RCW_SR27_OFFSET]
+
+	/* test the cluster 3 bit */
+	tst  w2, #CLUSTER_3_RCW_BIT
+	b.eq 1f
+
+	/* if we are here, then the bit was set */
+	mov  x0, #CLUSTER_3_IN_RESET
+1:
+	ret
+endfunc cluster3InReset
+
+
+/* Function checks to see if cores which are to be disabled have been
+ * released from reset - if not, it releases them
+ * Note: there may be special handling of cluster 3 cores depending upon the
+ *	   sys clk frequency
+ * in:  none
+ * out: none
+ * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
+ */
+func release_disabled
+	mov  x9, x30
+
+	/* check if we need to keep cluster 3 cores in reset */
+	bl   cluster3InReset		/*  0-2  */
+	mov  x8, x0
+
+	/* x8 = cluster 3 handling */
+
+	/* read COREDISABLESR */
+	mov  x0, #NXP_DCFG_ADDR
+	ldr  w4, [x0, #DCFG_COREDISABLEDSR_OFFSET]
+	cmp  x8, #CLUSTER_3_IN_RESET
+	b.ne 4f
+
+	/* the cluster 3 cores are to be held in reset, so remove
+	 * them from the disable mask
+	 */
+	bic  x4, x4, #CLUSTER_3_CORES_MASK
+4:
+	/* get the number of cpus on this device */
+	mov   x6, #PLATFORM_CORE_COUNT
+
+	mov  x0, #NXP_RESET_ADDR
+	ldr  w5, [x0, #BRR_OFFSET]
+
+	/* load the core mask for the first core */
+	mov  x7, #1
+
+	/* x4 = COREDISABLESR
+	 * x5 = BRR
+	 * x6 = loop count
+	 * x7 = core mask bit
+	 */
+2:
+	/* check if the core is to be disabled */
+	tst  x4, x7
+	b.eq 1f
+
+	/* see if disabled cores have already been released from reset */
+	tst  x5, x7
+	b.ne 5f
+
+	/* if core has not been released, then release it (0-3) */
+	mov  x0, x7
+	bl   _soc_core_release
+
+	/* record the core state in the data area (0-3) */
+	mov  x0, x7
+	mov  x1, #CORE_STATE_DATA
+	mov  x2, #CORE_DISABLED
+	bl   _setCoreData
+
+1:
+	/* see if this is a cluster 3 core */
+	mov   x3, #CLUSTER_3_CORES_MASK
+	tst   x3, x7
+	b.eq  5f
+
+	/* this is a cluster 3 core - see if it needs to be held in reset */
+	cmp  x8, #CLUSTER_3_IN_RESET
+	b.ne 5f
+
+	/* record the core state as disabled in the data area (0-3) */
+	mov  x0, x7
+	mov  x1, #CORE_STATE_DATA
+	mov  x2, #CORE_DISABLED
+	bl   _setCoreData
+
+5:
+	/* decrement the counter */
+	subs  x6, x6, #1
+	b.le  3f
+
+	/* shift the core mask to the next core */
+	lsl   x7, x7, #1
+	/* continue */
+	b	 2b
+3:
+	cmp  x8, #CLUSTER_3_IN_RESET
+	b.ne 6f
+
+	/* we need to hold the cluster 3 cores in reset,
+	 * so mark them in the COREDISR and COREDISABLEDSR registers as
+	 * "disabled", and the rest of the sw stack will leave them alone
+	 * thinking that they have been disabled
+	 */
+	mov  x0, #NXP_DCFG_ADDR
+	ldr  w1, [x0, #DCFG_COREDISR_OFFSET]
+	orr  w1, w1, #CLUSTER_3_CORES_MASK
+	str  w1, [x0, #DCFG_COREDISR_OFFSET]
+
+	ldr  w2, [x0, #DCFG_COREDISABLEDSR_OFFSET]
+	orr  w2, w2, #CLUSTER_3_CORES_MASK
+	str  w2, [x0, #DCFG_COREDISABLEDSR_OFFSET]
+	dsb  sy
+	isb
+
+#if (PSCI_TEST)
+	/* x0 = NXP_DCFG_ADDR : read COREDISABLESR */
+	ldr  w4, [x0, #DCFG_COREDISABLEDSR_OFFSET]
+	/* read COREDISR */
+	ldr  w3, [x0, #DCFG_COREDISR_OFFSET]
+#endif
+
+6:
+	mov  x30, x9
+	ret
+
+endfunc release_disabled
+
+
+/* Function setc up the TrustZone Address Space Controller (TZASC)
+ * in:  none
+ * out: none
+ * uses x0, x1
+ */
+func init_tzpc
+
+	/* set Non Secure access for all devices protected via TZPC */
+
+	/* decode Protection-0 Set Reg */
+	ldr	x1, =TZPCDECPROT_0_SET_BASE
+	/* set decode region to NS, Bits[7:0] */
+	mov	w0, #0xFF
+	str	w0, [x1]
+
+	/* decode Protection-1 Set Reg */
+	ldr	x1, =TZPCDECPROT_1_SET_BASE
+	/* set decode region to NS, Bits[7:0] */
+	mov	w0, #0xFF
+	str	w0, [x1]
+
+	/* decode Protection-2 Set Reg */
+	ldr	x1, =TZPCDECPROT_2_SET_BASE
+	/* set decode region to NS, Bits[7:0] */
+	mov	w0, #0xFF
+	str	w0, [x1]
+
+	/* entire SRAM as NS */
+	/* secure RAM region size Reg */
+	ldr	x1, =TZPC_BASE
+	/* 0x00000000 = no secure region */
+	mov	w0, #0x00000000
+	str	w0, [x1]
+
+	ret
+endfunc init_tzpc
+
+/* write a register in the DCFG block
+ * in:  x0 = offset
+ * in:  w1 = value to write
+ * uses x0, x1, x2
+ */
+func _write_reg_dcfg
+	ldr  x2, =NXP_DCFG_ADDR
+	str  w1, [x2, x0]
+	ret
+endfunc _write_reg_dcfg
+
+
+/* read a register in the DCFG block
+ * in:  x0 = offset
+ * out: w0 = value read
+ * uses x0, x1, x2
+ */
+func _read_reg_dcfg
+	ldr  x2, =NXP_DCFG_ADDR
+	ldr  w1, [x2, x0]
+	mov  w0, w1
+	ret
+endfunc _read_reg_dcfg
+
+
+/* Function returns an mpidr value for a core, given a core_mask_lsb
+ * in:  x0 = core mask lsb
+ * out: x0 = affinity2:affinity1:affinity0, where affinity is 8-bits
+ * uses x0, x1
+ */
+func get_mpidr_value
+
+	/* convert a core mask to an SoC core number */
+	clz  w0, w0
+	mov  w1, #31
+	sub  w0, w1, w0
+
+	/* get the mpidr core number from the SoC core number */
+	mov  w1, wzr
+	tst  x0, #1
+	b.eq 1f
+	orr  w1, w1, #1
+
+1:
+	/* extract the cluster number */
+	lsr  w0, w0, #1
+	orr  w0, w1, w0, lsl #8
+
+	ret
+endfunc get_mpidr_value
+
+
+/* Function returns the redistributor base address for the core specified
+ * in x1
+ * in:  x0 - core mask lsb of specified core
+ * out: x0 = redistributor rd base address for specified core
+ * uses x0, x1, x2
+ */
+func get_gic_rd_base
+	clz  w1, w0
+	mov  w2, #0x20
+	sub  w2, w2, w1
+	sub  w2, w2, #1
+
+	ldr  x0, =NXP_GICR_ADDR
+	mov  x1, #GIC_RD_OFFSET
+
+	/* x2 = core number
+	 * loop counter
+	 */
+2:
+	cbz  x2, 1f
+	add  x0, x0, x1
+	sub  x2, x2, #1
+	b	2b
+1:
+	ret
+endfunc get_gic_rd_base
+
+
+/* Function returns the redistributor base address for the core specified
+ * in x1
+ * in:  x0 - core mask lsb of specified core
+ * out: x0 = redistributor sgi base address for specified core
+ * uses x0, x1, x2
+ */
+func get_gic_sgi_base
+	clz  w1, w0
+	mov  w2, #0x20
+	sub  w2, w2, w1
+	sub  w2, w2, #1
+
+	ldr  x0, =NXP_GICR_SGI_ADDR
+	mov  x1, #GIC_SGI_OFFSET
+
+	/* loop counter */
+2:
+	cbz  x2, 1f		/* x2 = core number */
+	add  x0, x0, x1
+	sub  x2, x2, #1
+	b	2b
+1:
+	ret
+endfunc get_gic_sgi_base
+
+/* Function writes a register in the RESET block
+ * in:  x0 = offset
+ * in:  w1 = value to write
+ * uses x0, x1, x2
+ */
+func _write_reg_reset
+	ldr  x2, =NXP_RESET_ADDR
+	str  w1, [x2, x0]
+	ret
+endfunc _write_reg_reset
+
+
+/* Function reads a register in the RESET block
+ * in:  x0 = offset
+ * out: w0 = value read
+ * uses x0, x1
+ */
+func _read_reg_reset
+	ldr  x1, =NXP_RESET_ADDR
+	ldr  w0, [x1, x0]
+	ret
+endfunc _read_reg_reset
diff --git a/plat/nxp/soc-lx2160a/aarch64/lx2160a_helpers.S b/plat/nxp/soc-lx2160a/aarch64/lx2160a_helpers.S
new file mode 100644
index 0000000..c364dec
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/aarch64/lx2160a_helpers.S
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+#include <platform_def.h>
+
+.globl	plat_secondary_cold_boot_setup
+.globl	plat_is_my_cpu_primary
+.globl	plat_reset_handler
+.globl  platform_mem_init
+
+
+func platform_mem1_init
+	ret
+endfunc platform_mem1_init
+
+
+func platform_mem_init
+	ret
+endfunc	platform_mem_init
+
+
+func apply_platform_errata
+
+	ret
+endfunc apply_platform_errata
+
+
+func plat_reset_handler
+	mov x29, x30
+	bl  apply_platform_errata
+
+#if defined(IMAGE_BL31)
+	ldr x0, =POLICY_SMMU_PAGESZ_64K
+	cbz x0, 1f
+	/* Set the SMMU page size in the sACR register */
+	bl _set_smmu_pagesz_64
+#endif
+1:
+	mov x30, x29
+
+	ret
+endfunc plat_reset_handler
+
+
+/* void plat_secondary_cold_boot_setup (void);
+ *
+ * This function performs any platform specific actions
+ * needed for a secondary cpu after a cold reset e.g
+ * mark the cpu's presence, mechanism to place it in a
+ * holding pen etc.
+ */
+func plat_secondary_cold_boot_setup
+	/* lx2160a does not do cold boot for secondary CPU */
+cb_panic:
+	b	cb_panic
+endfunc plat_secondary_cold_boot_setup
+
+
+/* unsigned int plat_is_my_cpu_primary (void);
+ *
+ * Find out whether the current cpu is the primary
+ * cpu.
+ */
+func plat_is_my_cpu_primary
+	mrs	x0, mpidr_el1
+	and	x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
+	cmp	x0, 0x0
+	cset	w0, eq
+	ret
+endfunc plat_is_my_cpu_primary
diff --git a/plat/nxp/soc-lx2160a/aarch64/lx2160a_warm_rst.S b/plat/nxp/soc-lx2160a/aarch64/lx2160a_warm_rst.S
new file mode 100644
index 0000000..9dec3f2
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/aarch64/lx2160a_warm_rst.S
@@ -0,0 +1,229 @@
+/*
+ * Copyright 2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+.section .text, "ax"
+
+#include <asm_macros.S>
+
+#ifndef NXP_COINED_BB
+#include <flash_info.h>
+#include <fspi.h>
+#endif
+#include <regs.h>
+#ifdef NXP_COINED_BB
+#include <snvs.h>
+#endif
+
+#include <plat_warm_rst.h>
+#include <platform_def.h>
+
+#define SDRAM_CFG	0x110
+#define SDRAM_CFG_2	0x114
+#define SDRAM_MD_CNTL	0x120
+#define SDRAM_INTERVAL	0x124
+#define TIMING_CFG_10	0x258
+#define DEBUG_2		0xF04
+#define DEBUG_26	0xF64
+#define DDR_DSR2	0xB24
+
+#define DDR_CNTRLR_2	0x2
+#define COUNT_100	1000
+
+	.globl	_soc_sys_warm_reset
+	.align 12
+
+func _soc_sys_warm_reset
+	mov  x3, xzr
+	b    touch_line0
+start_line0:
+	mov  x3, #1
+	mov  x2, #NUM_OF_DDRC
+	ldr x1, =NXP_DDR_ADDR
+1:
+	ldr w0, [x1, #SDRAM_CFG]
+	orr w0, w0, #SDRAM_CFG_MEM_HLT
+	str w0, [x1, #SDRAM_CFG]
+2:
+	ldr w0, [x1, #DEBUG_2]
+	and w0, w0, #DDR_DBG_2_MEM_IDLE
+	cbz w0, 2b
+
+	ldr w0, [x1, #DEBUG_26]
+	orr w0, w0, #DDR_DEBUG_26_BIT_12
+	orr w0, w0, #DDR_DEBUG_26_BIT_13
+	orr w0, w0, #DDR_DEBUG_26_BIT_14
+touch_line0:
+	cbz x3, touch_line1
+
+	orr w0, w0, #DDR_DEBUG_26_BIT_15
+	orr w0, w0, #DDR_DEBUG_26_BIT_16
+	str w0, [x1, #DEBUG_26]
+
+	ldr w0, [x1, #SDRAM_CFG_2]
+	orr w0, w0, #SDRAM_CFG2_FRC_SR
+	str w0,  [x1, #SDRAM_CFG_2]
+
+3:
+	ldr w0, [x1, #DDR_DSR2]
+	orr w0, w0, #DDR_DSR_2_PHY_INIT_CMPLT
+	str w0, [x1, #DDR_DSR2]
+	ldr w0, [x1, #DDR_DSR2]
+        and w0, w0, #DDR_DSR_2_PHY_INIT_CMPLT
+	cbnz w0, 3b
+
+	ldr w0, [x1, #SDRAM_INTERVAL]
+	and w0, w0, #SDRAM_INTERVAL_REFINT_CLEAR
+	str w0, [x1, #SDRAM_INTERVAL]
+touch_line1:
+	cbz x3, touch_line2
+
+	ldr w0, [x1, #SDRAM_MD_CNTL]
+	orr w0, w0, #MD_CNTL_CKE(1)
+	orr w0, w0, #MD_CNTL_MD_EN
+	str w0, [x1, #SDRAM_MD_CNTL]
+
+	ldr w0, [x1, #TIMING_CFG_10]
+	orr w0, w0, #DDR_TIMING_CFG_10_T_STAB
+	str w0, [x1, #TIMING_CFG_10]
+
+	ldr w0, [x1, #SDRAM_CFG_2]
+	and w0, w0, #SDRAM_CFG2_FRC_SR_CLEAR
+	str w0, [x1, #SDRAM_CFG_2]
+
+4:
+	ldr w0, [x1, #DDR_DSR2]
+        and w0, w0, #DDR_DSR_2_PHY_INIT_CMPLT
+        cbz w0, 4b
+	nop
+touch_line2:
+	cbz x3, touch_line3
+
+	ldr w0, [x1, #DEBUG_26]
+	orr w0, w0, #DDR_DEBUG_26_BIT_25
+	and w0, w0, #DDR_DEBUG_26_BIT_24_CLEAR
+	str w0, [x1, #DEBUG_26]
+
+	cmp x2, #DDR_CNTRLR_2
+	b.ne 5f
+	ldr x1, =NXP_DDR2_ADDR
+	mov x2, xzr
+	b 1b
+
+5:
+	mov x5, xzr
+6:
+	add x5, x5, #1
+	cmp x5, #COUNT_100
+	b.ne 6b
+	nop
+touch_line3:
+	cbz x3, touch_line4
+#ifdef NXP_COINED_BB
+        ldr  x1, =NXP_SNVS_ADDR
+        ldr  w0, [x1, #NXP_APP_DATA_LP_GPR_OFFSET]
+
+	/* On Warm Boot is enabled, then zeroth bit
+	 * of SNVS LP GPR register 0 will used
+	 * to save the status of warm-reset as a cause.
+	 */
+        orr  w0, w0, #(1 << NXP_LPGPR_ZEROTH_BIT)
+
+        /* write back */
+        str  w0, [x1, #NXP_APP_DATA_LP_GPR_OFFSET]
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+touch_line4:
+	cbz x3, touch_line6
+#elif !(ERLY_WRM_RST_FLG_FLSH_UPDT)
+        ldr  x1, =NXP_FLEXSPI_ADDR
+        ldr  w0, [x1, #FSPI_IPCMD]
+        orr  w0, w0, #FSPI_IPCMD_TRG_MASK
+        str  w0, [x1, #FSPI_IPCMD]
+7:
+        ldr  w0, [x1, #FSPI_INTR]
+        and  w0, w0, #FSPI_INTR_IPCMDDONE_MASK
+        cmp  w0, #0
+        b.eq 7b
+
+        ldr  w0, [x1, #FSPI_IPTXFCR]
+        orr  w0, w0, #FSPI_IPTXFCR_CLR
+        str  w0, [x1, #FSPI_IPTXFCR]
+
+        ldr  w0, [x1, #FSPI_INTR]
+        orr  w0, w0, #FSPI_INTR_IPCMDDONE_MASK
+        str  w0, [x1, #FSPI_INTR]
+	nop
+touch_line4:
+        cbz x3, touch_line5
+        /* flexspi driver has an api
+         * is_flash_busy().
+         * Impelementation of the api will not
+         * fit-in in 1 cache line.
+         * instead a nop-cycles are introduced to
+         * simulate the wait time for flash write
+         * completion.
+         *
+         * Note: This wait time varies from flash to flash.
+         */
+
+        mov    x0, #FLASH_WR_COMP_WAIT_BY_NOP_COUNT
+8:
+        sub x0, x0, #1
+        nop
+        cmp x0, #0
+        b.ne    8b
+        nop
+        nop
+        nop
+        nop
+        nop
+        nop
+        nop
+        nop
+        nop
+touch_line5:
+        cbz x3, touch_line6
+#endif
+        ldr  x2, =NXP_RST_ADDR
+	/* clear the RST_REQ_MSK and SW_RST_REQ */
+	mov  w0, #0x00000000
+	str  w0, [x2, #RSTCNTL_OFFSET]
+
+	/* initiate the sw reset request */
+	mov  w0, #SW_RST_REQ_INIT
+        str  w0, [x2, #RSTCNTL_OFFSET]
+
+        /* In case this address range is mapped as cacheable,
+         * flush the write out of the dcaches.
+         */
+        add  x2, x2, #RSTCNTL_OFFSET
+        dc   cvac, x2
+        dsb  st
+        isb
+
+        /* Function does not return */
+        b  .
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+touch_line6:
+	cbz x3, start_line0
+
+endfunc _soc_sys_warm_reset
diff --git a/plat/nxp/soc-lx2160a/ddr_fip.mk b/plat/nxp/soc-lx2160a/ddr_fip.mk
new file mode 100644
index 0000000..f14a9e8
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/ddr_fip.mk
@@ -0,0 +1,97 @@
+#
+# Copyright 2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+DDR_PHY_BIN_PATH	?=	./ddr-phy-binary/lx2160a
+
+ifeq (${DDR_IMEM_UDIMM_1D},)
+    DDR_IMEM_UDIMM_1D	:=	${DDR_PHY_BIN_PATH}/ddr4_pmu_train_imem.bin
+endif
+
+ifeq (${DDR_IMEM_UDIMM_2D},)
+    DDR_IMEM_UDIMM_2D	:=	${DDR_PHY_BIN_PATH}/ddr4_2d_pmu_train_imem.bin
+endif
+
+ifeq (${DDR_DMEM_UDIMM_1D},)
+    DDR_DMEM_UDIMM_1D	:=	${DDR_PHY_BIN_PATH}/ddr4_pmu_train_dmem.bin
+endif
+
+ifeq (${DDR_DMEM_UDIMM_2D},)
+    DDR_DMEM_UDIMM_2D	:=	${DDR_PHY_BIN_PATH}/ddr4_2d_pmu_train_dmem.bin
+endif
+
+ifeq (${DDR_IMEM_RDIMM_1D},)
+    DDR_IMEM_RDIMM_1D	:=	${DDR_PHY_BIN_PATH}/ddr4_rdimm_pmu_train_imem.bin
+endif
+
+ifeq (${DDR_IMEM_RDIMM_2D},)
+    DDR_IMEM_RDIMM_2D	:=	${DDR_PHY_BIN_PATH}/ddr4_rdimm2d_pmu_train_imem.bin
+endif
+
+ifeq (${DDR_DMEM_RDIMM_1D},)
+    DDR_DMEM_RDIMM_1D	:=	${DDR_PHY_BIN_PATH}/ddr4_rdimm_pmu_train_dmem.bin
+endif
+
+ifeq (${DDR_DMEM_RDIMM_2D},)
+    DDR_DMEM_RDIMM_2D	:=	${DDR_PHY_BIN_PATH}/ddr4_rdimm2d_pmu_train_dmem.bin
+endif
+
+$(shell mkdir -p '${BUILD_PLAT}')
+
+ifeq (${DDR_FIP_NAME},)
+ifeq (${TRUSTED_BOARD_BOOT},1)
+	DDR_FIP_NAME	:= ddr_fip_sec.bin
+else
+	DDR_FIP_NAME	:= ddr_fip.bin
+endif
+endif
+
+ifneq (${TRUSTED_BOARD_BOOT},1)
+
+DDR_FIP_ARGS += --ddr-immem-udimm-1d ${DDR_IMEM_UDIMM_1D} \
+		--ddr-immem-udimm-2d ${DDR_IMEM_UDIMM_2D} \
+		--ddr-dmmem-udimm-1d ${DDR_DMEM_UDIMM_1D} \
+		--ddr-dmmem-udimm-2d ${DDR_DMEM_UDIMM_2D} \
+		--ddr-immem-rdimm-1d ${DDR_IMEM_RDIMM_1D} \
+		--ddr-immem-rdimm-2d ${DDR_IMEM_RDIMM_2D} \
+		--ddr-dmmem-rdimm-1d ${DDR_DMEM_RDIMM_1D} \
+		--ddr-dmmem-rdimm-2d ${DDR_DMEM_RDIMM_2D}
+endif
+
+
+ifeq (${TRUSTED_BOARD_BOOT},1)
+ifeq (${MBEDTLS_DIR},)
+include plat/nxp/soc-lx2160a/ddr_sb.mk
+else
+include plat/nxp/soc-lx2160a/ddr_tbbr.mk
+
+# Variables for use with Certificate Generation Tool
+CRTTOOLPATH	?=	tools/cert_create
+CRTTOOL		?=	${CRTTOOLPATH}/cert_create${BIN_EXT}
+
+ifneq (${GENERATE_COT},0)
+ddr_certificates: ${DDR_CRT_DEPS} ${CRTTOOL}
+	${Q}${CRTTOOL} ${DDR_CRT_ARGS}
+	@${ECHO_BLANK_LINE}
+	@echo "Built $@ successfully"
+	@echo "DDR certificates can be found in ${BUILD_PLAT}"
+	@${ECHO_BLANK_LINE}
+endif
+endif
+endif
+
+# Variables for use with Firmware Image Package
+FIPTOOLPATH	?=	tools/fiptool
+FIPTOOL		?=	${FIPTOOLPATH}/fiptool${BIN_EXT}
+
+${BUILD_PLAT}/${DDR_FIP_NAME}: ${DDR_FIP_DEPS} ${FIPTOOL}
+	$(eval ${CHECK_DDR_FIP_CMD})
+	${Q}${FIPTOOL} create ${DDR_FIP_ARGS} $@
+	${Q}${FIPTOOL} info $@
+	@${ECHO_BLANK_LINE}
+	@echo "Built $@ successfully"
+	@${ECHO_BLANK_LINE}
+
+fip_ddr: ${BUILD_PLAT}/${DDR_FIP_NAME}
diff --git a/plat/nxp/soc-lx2160a/ddr_sb.mk b/plat/nxp/soc-lx2160a/ddr_sb.mk
new file mode 100644
index 0000000..c11651e
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/ddr_sb.mk
@@ -0,0 +1,43 @@
+#
+# Copyright 2021 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${TRUSTED_BOARD_BOOT},0)
+
+ifeq (${GENERATE_COT},0)
+
+DDR_FIP_ARGS += --ddr-immem-udimm-1d ${DDR_IMEM_UDIMM_1D}.sb \
+		--ddr-immem-udimm-2d ${DDR_IMEM_UDIMM_2D}.sb \
+		--ddr-dmmem-udimm-1d ${DDR_DMEM_UDIMM_1D}.sb \
+		--ddr-dmmem-udimm-2d ${DDR_DMEM_UDIMM_2D}.sb \
+		--ddr-immem-rdimm-1d ${DDR_IMEM_RDIMM_1D}.sb \
+		--ddr-immem-rdimm-2d ${DDR_IMEM_RDIMM_2D}.sb \
+		--ddr-dmmem-rdimm-1d ${DDR_DMEM_RDIMM_1D}.sb \
+		--ddr-dmmem-rdimm-2d ${DDR_DMEM_RDIMM_2D}.sb
+endif
+
+UDIMM_DEPS = ${DDR_IMEM_UDIMM_1D}.sb ${DDR_IMEM_UDIMM_2D}.sb ${DDR_DMEM_UDIMM_1D}.sb ${DDR_DMEM_UDIMM_2D}.sb
+RDIMM_DEPS = ${DDR_IMEM_RDIMM_1D}.sb ${DDR_IMEM_RDIMM_2D}.sb ${DDR_DMEM_RDIMM_1D}.sb ${DDR_DMEM_RDIMM_2D}.sb
+DDR_FIP_DEPS += ${UDIMM_DEPS}
+DDR_FIP_DEPS += ${RDIMM_DEPS}
+
+# Max Size of CSF header (CSF_HDR_SZ = 0x3000).
+# Image will be appended at this offset of the header.
+# Path to CST directory is required to generate the CSF header,
+# and prepend it to image before fip image gets generated
+ifeq (${CST_DIR},)
+  $(error Error: CST_DIR not set)
+endif
+
+ifeq (${DDR_INPUT_FILE},)
+DDR_INPUT_FILE:= drivers/nxp/auth/csf_hdr_parser/${CSF_FILE}
+endif
+
+%.sb: %
+	@echo " Generating CSF Header for $@ $<"
+	$(CST_DIR)/create_hdr_esbc --in $< --out $@ --app_off ${CSF_HDR_SZ} \
+					--app $< ${DDR_INPUT_FILE}
+
+endif
diff --git a/plat/nxp/soc-lx2160a/ddr_tbbr.mk b/plat/nxp/soc-lx2160a/ddr_tbbr.mk
new file mode 100644
index 0000000..deb475b
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/ddr_tbbr.mk
@@ -0,0 +1,95 @@
+#
+# Copyright 2021 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# This file defines the keys and certificates that must be created to establish
+# a Chain of Trust for the DDR FW. These definitions include the
+# command line options passed to the cert_create and fiptool commands for DDR FW.
+# A DDR FW key is used for signing the DDR Firmware. The DDR key is authenticated
+# by the Trusted World Key. Two content certificates are created:
+# For DDR RDIMM Images [ signed by DDR FW Key]
+# For DDR UDIMM Images [ signed by DDR FW Key]
+#
+# Expected environment:
+#
+#   BUILD_PLAT: output directory
+#
+# Build options added by this file:
+#
+#   KEY_ALG
+#   KEY_SIZE
+#   TRUSTED_WORLD_KEY
+#   NON_TRUSTED_WORLD_KEY
+#
+
+# Copy the tbbr.mk from PLAT_TOOL_PATH/cert_create_helper
+# to the ${PLAT_DIR}. So that cert_create is enabled
+# to create certificates for DDR
+$(shell cp ${PLAT_TOOL_PATH}/cert_create_helper/cert_create_tbbr.mk ${PLAT_DIR})
+
+# Certificate generation tool default parameters
+DDR_FW_CERT		:=	${BUILD_PLAT}/ddr_fw_key_cert.crt
+
+# Default non-volatile counter values (overridable by the platform)
+TFW_NVCTR_VAL		?=	0
+NTFW_NVCTR_VAL		?=	0
+
+# Pass the non-volatile counters to the cert_create tool
+$(eval $(call CERT_ADD_CMD_OPT,${TFW_NVCTR_VAL},--tfw-nvctr,DDR_))
+
+$(shell mkdir -p '${BUILD_PLAT}')
+
+ifeq (${DDR_KEY},)
+DDR_KEY=${BUILD_PLAT}/ddr.pem
+endif
+
+ifeq (${TRUSTED_KEY_CERT},)
+$(info Generating: Trusted key certificate as part of DDR cert creation)
+TRUSTED_KEY_CERT	:=	${BUILD_PLAT}/trusted_key.crt
+$(eval $(call TOOL_ADD_PAYLOAD,${TRUSTED_KEY_CERT},--trusted-key-cert,))
+$(eval $(call TOOL_ADD_PAYLOAD,${TRUSTED_KEY_CERT},--trusted-key-cert,,DDR_))
+else
+$(info Using: Trusted key certificate as part of DDR cert creation)
+DDR_FIP_ARGS += --trusted-key-cert ${TRUSTED_KEY_CERT}
+endif
+
+# Add the keys to the cert_create command line options (private keys are NOT
+# packed in the FIP). Developers can use their own keys by specifying the proper
+# build option in the command line when building the Trusted Firmware
+$(if ${KEY_ALG},$(eval $(call CERT_ADD_CMD_OPT,${KEY_ALG},--key-alg,DDR_)))
+$(if ${KEY_SIZE},$(eval $(call CERT_ADD_CMD_OPT,${KEY_SIZE},--key-size,DDR_)))
+$(if ${HASH_ALG},$(eval $(call CERT_ADD_CMD_OPT,${HASH_ALG},--hash-alg,DDR_)))
+$(if ${ROT_KEY},$(eval $(call CERT_ADD_CMD_OPT,${ROT_KEY},--rot-key,DDR_)))
+$(if ${TRUSTED_WORLD_KEY},$(eval $(call CERT_ADD_CMD_OPT,${TRUSTED_WORLD_KEY},--trusted-world-key,DDR_)))
+$(if ${NON_TRUSTED_WORLD_KEY},$(eval $(call CERT_ADD_CMD_OPT,${NON_TRUSTED_WORLD_KEY},--non-trusted-world-key, DDR_)))
+
+# Add the DDR CoT (key cert + img cert)
+$(if ${DDR_KEY},$(eval $(call CERT_ADD_CMD_OPT,${DDR_KEY},--ddr-fw-key,DDR_)))
+$(eval $(call TOOL_ADD_PAYLOAD,${BUILD_PLAT}/ddr_fw_key.crt,--ddr-fw-key-cert,,DDR_))
+$(eval $(call TOOL_ADD_PAYLOAD,${BUILD_PLAT}/ddr_udimm_fw_content.crt,--ddr-udimm-fw-cert,,DDR_))
+$(eval $(call TOOL_ADD_PAYLOAD,${BUILD_PLAT}/ddr_rdimm_fw_content.crt,--ddr-rdimm-fw-cert,,DDR_))
+
+$(eval $(call TOOL_ADD_IMG,DDR_IMEM_UDIMM_1D,--ddr-immem-udimm-1d,DDR_))
+$(eval $(call TOOL_ADD_IMG,DDR_IMEM_UDIMM_2D,--ddr-immem-udimm-2d,DDR_))
+$(eval $(call TOOL_ADD_IMG,DDR_DMEM_UDIMM_1D,--ddr-dmmem-udimm-1d,DDR_))
+$(eval $(call TOOL_ADD_IMG,DDR_DMEM_UDIMM_2D,--ddr-dmmem-udimm-2d,DDR_))
+
+$(eval $(call TOOL_ADD_IMG,DDR_IMEM_RDIMM_1D,--ddr-immem-rdimm-1d,DDR_))
+$(eval $(call TOOL_ADD_IMG,DDR_IMEM_RDIMM_2D,--ddr-immem-rdimm-2d,DDR_))
+$(eval $(call TOOL_ADD_IMG,DDR_DMEM_RDIMM_1D,--ddr-dmmem-rdimm-1d,DDR_))
+$(eval $(call TOOL_ADD_IMG,DDR_DMEM_RDIMM_2D,--ddr-dmmem-rdimm-2d,DDR_))
+
+DDR_FIP_DEPS += ddr_certificates
+
+# Process TBB related flags
+ifneq (${GENERATE_COT},0)
+        # Common cert_create options
+        ifneq (${CREATE_KEYS},0)
+                $(eval DDR_CRT_ARGS += -n)
+                ifneq (${SAVE_KEYS},0)
+                       $(eval DDR_CRT_ARGS += -k)
+                endif
+        endif
+endif
diff --git a/plat/nxp/soc-lx2160a/erratas_soc.c b/plat/nxp/soc-lx2160a/erratas_soc.c
new file mode 100644
index 0000000..8f3aa9f
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/erratas_soc.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <mmio.h>
+
+#ifdef ERRATA_SOC_A050426
+void erratum_a050426(void)
+{
+	uint32_t i, val3, val4;
+
+	/* Enable BIST to access Internal memory locations */
+	val3 = mmio_read_32(0x700117E60);
+	mmio_write_32(0x700117E60, (val3 | 0x80000001));
+	val4 = mmio_read_32(0x700117E90);
+	mmio_write_32(0x700117E90, (val4 & 0xFFDFFFFF));
+
+	/* wriop Internal Memory.*/
+	for (i = 0U; i < 4U; i++) {
+		mmio_write_32(0x706312000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706312400 + (i * 4), 0x55555555);
+		mmio_write_32(0x706312800 + (i * 4), 0x55555555);
+		mmio_write_32(0x706314000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706314400 + (i * 4), 0x55555555);
+		mmio_write_32(0x706314800 + (i * 4), 0x55555555);
+		mmio_write_32(0x706314c00 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x706316000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706320000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706320400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 2U; i++) {
+		mmio_write_32(0x70640a000 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x706518000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706519000 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 4U; i++) {
+		mmio_write_32(0x706522000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706522800 + (i * 4), 0x55555555);
+		mmio_write_32(0x706523000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706523800 + (i * 4), 0x55555555);
+		mmio_write_32(0x706524000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706524800 + (i * 4), 0x55555555);
+		mmio_write_32(0x706608000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706608800 + (i * 4), 0x55555555);
+		mmio_write_32(0x706609000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706609800 + (i * 4), 0x55555555);
+		mmio_write_32(0x70660a000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70660a800 + (i * 4), 0x55555555);
+		mmio_write_32(0x70660b000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70660b800 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x70660c000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70660c800 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 2U; i++) {
+		mmio_write_32(0x706718000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706718800 + (i * 4), 0x55555555);
+	}
+	mmio_write_32(0x706b0a000 + (i * 4), 0x55555555);
+
+	for (i = 0U; i < 4U; i++) {
+		mmio_write_32(0x706b0e000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706b0e800 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 2U; i++) {
+		mmio_write_32(0x706b10000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706b10400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 4U; i++) {
+		mmio_write_32(0x706b14000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706b14800 + (i * 4), 0x55555555);
+		mmio_write_32(0x706b15000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706b15800 + (i * 4), 0x55555555);
+	}
+	mmio_write_32(0x706e12000 + (i * 4), 0x55555555);
+
+	for (i = 0U; i < 4U; i++) {
+		mmio_write_32(0x706e14000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706e14800 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 2U; i++) {
+		mmio_write_32(0x706e16000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706e16400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x706e1a000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706e1a800 + (i * 4), 0x55555555);
+		mmio_write_32(0x706e1b000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706e1b800 + (i * 4), 0x55555555);
+		mmio_write_32(0x706e1c000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706e1c800 + (i * 4), 0x55555555);
+		mmio_write_32(0x706e1e000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706e1e800 + (i * 4), 0x55555555);
+		mmio_write_32(0x706e1f000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706e1f800 + (i * 4), 0x55555555);
+		mmio_write_32(0x706e20000 + (i * 4), 0x55555555);
+		mmio_write_32(0x706e20800 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 4U; i++) {
+		mmio_write_32(0x707108000 + (i * 4), 0x55555555);
+		mmio_write_32(0x707109000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70710a000 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 2U; i++) {
+		mmio_write_32(0x70711c000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70711c800 + (i * 4), 0x55555555);
+		mmio_write_32(0x70711d000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70711d800 + (i * 4), 0x55555555);
+		mmio_write_32(0x70711e000 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 4U; i++) {
+		mmio_write_32(0x707120000 + (i * 4), 0x55555555);
+		mmio_write_32(0x707121000 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x707122000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70725a000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70725b000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70725c000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70725e000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70725e400 + (i * 4), 0x55555555);
+		mmio_write_32(0x70725e800 + (i * 4), 0x55555555);
+		mmio_write_32(0x70725ec00 + (i * 4), 0x55555555);
+		mmio_write_32(0x70725f000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70725f400 + (i * 4), 0x55555555);
+		mmio_write_32(0x707340000 + (i * 4), 0x55555555);
+		mmio_write_32(0x707346000 + (i * 4), 0x55555555);
+		mmio_write_32(0x707484000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70748a000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70748b000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70748c000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70748d000 + (i * 4), 0x55555555);
+	}
+
+	/* EDMA Internal Memory.*/
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70a208000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70a208800 + (i * 4), 0x55555555);
+		mmio_write_32(0x70a209000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70a209800 + (i * 4), 0x55555555);
+	}
+
+	/* PEX1 Internal Memory.*/
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x70a508000 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70a520000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70a528000 + (i * 4), 0x55555555);
+	}
+
+	/* PEX2 Internal Memory.*/
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x70a608000 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70a620000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70a628000 + (i * 4), 0x55555555);
+	}
+
+	/* PEX3 Internal Memory.*/
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70a708000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70a728000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70a730000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70a738000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70a748000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70a758000 + (i * 4), 0x55555555);
+	}
+
+	/* PEX4 Internal Memory.*/
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x70a808000 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70a820000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70a828000 + (i * 4), 0x55555555);
+	}
+
+	/* PEX5 Internal Memory.*/
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70aa08000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70aa28000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70aa30000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70aa38000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70aa48000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70aa58000 + (i * 4), 0x55555555);
+	}
+
+	/* PEX6 Internal Memory.*/
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x70ab08000 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70ab20000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70ab28000 + (i * 4), 0x55555555);
+	}
+
+	/* QDMA Internal Memory.*/
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70b008000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b00c000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b010000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b014000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b018000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b018400 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b01a000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b01a400 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b01c000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b01d000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b01e000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b01e800 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b01f000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b01f800 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b020000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b020400 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b020800 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b020c00 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b022000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b022400 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b024000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b024800 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b025000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b025800 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 4U; i++) {
+		mmio_write_32(0x70b026000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b026200 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70b028000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b028800 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b029000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70b029800 + (i * 4), 0x55555555);
+	}
+
+	/* lnx1_e1000#0 Internal Memory.*/
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x70c00a000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00a200 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00a400 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00a600 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00a800 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00aa00 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00ac00 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00ae00 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00b000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00b200 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00b400 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00b600 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00b800 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00ba00 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00bc00 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00be00 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70c00c000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00c400 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00c800 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00cc00 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00d000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00d400 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00d800 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00dc00 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x70c00e000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c00f000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c012000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c012200 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c012400 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c012600 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c012800 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c012a00 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c012c00 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c012e00 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c013000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c013200 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c013400 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c013600 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c013800 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c013a00 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c013c00 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c013e00 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70c014000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c014400 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c014800 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c014c00 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c015000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c015400 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c015800 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c015c00 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x70c016000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c017000 + (i * 4), 0x55555555);
+	}
+
+	/* lnx1_xfi Internal Memory.*/
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x70c108000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c108200 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c10a000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c10a400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70c10c000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c10c400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x70c10e000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c10e200 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c110000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c110400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70c112000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c112400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x70c114000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c114200 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c116000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c116400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70c118000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c118400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x70c11a000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c11a200 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c11c000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c11c400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70c11e000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c11e400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x70c120000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c120200 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c122000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c122400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70c124000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c124400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x70c126000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c126200 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c128000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c128400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70c12a000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c12a400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x70c12c000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c12c200 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c12e000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c12e400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70c130000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c130400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x70c132000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c132200 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c134000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c134400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70c136000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c136400 + (i * 4), 0x55555555);
+	}
+
+	/* lnx2_xfi Internal Memory.*/
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x70c308000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c308200 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c30a000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c30a400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70c30c000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c30c400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 3U; i++) {
+		mmio_write_32(0x70c30e000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c30e200 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c310000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c310400 + (i * 4), 0x55555555);
+	}
+	for (i = 0U; i < 5U; i++) {
+		mmio_write_32(0x70c312000 + (i * 4), 0x55555555);
+		mmio_write_32(0x70c312400 + (i * 4), 0x55555555);
+	}
+
+	/* Disable BIST */
+
+	mmio_write_32(0x700117E60, val3);
+	mmio_write_32(0x700117E90, val4);
+}
+#endif
diff --git a/plat/nxp/soc-lx2160a/erratas_soc.mk b/plat/nxp/soc-lx2160a/erratas_soc.mk
new file mode 100644
index 0000000..07bed03
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/erratas_soc.mk
@@ -0,0 +1,21 @@
+#
+# Copyright 2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Platform Errata Build flags.
+# These should be enabled by the platform if the erratum workaround needs to be
+# applied.
+
+# Flag to apply erratum 50426 workaround during reset.
+ERRATA_SOC_A050426	?= 0
+
+# Process ERRATA_SOC_A050426 flag
+ifeq (${ERRATA_SOC_A050426}, 1)
+INCL_SOC_ERRATA_SOURCES	:= yes
+$(eval $(call add_define,ERRATA_SOC_A050426))
+endif
+
+ifeq (${INCL_SOC_ERRATA_SOURCES},yes)
+BL2_SOURCES	+= 	${PLAT_SOC_PATH}/erratas_soc.c
+endif
diff --git a/plat/nxp/soc-lx2160a/include/errata.h b/plat/nxp/soc-lx2160a/include/errata.h
new file mode 100644
index 0000000..937824a
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/include/errata.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ERRATA_H
+#define ERRATA_H
+
+#ifdef ERRATA_SOC_A050426
+void erratum_a050426(void);
+#endif
+
+#endif /* ERRATA_H */
diff --git a/plat/nxp/soc-lx2160a/include/soc.h b/plat/nxp/soc-lx2160a/include/soc.h
new file mode 100644
index 0000000..bd23620
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/include/soc.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef _SOC_H
+#define	_SOC_H
+
+/* Chassis specific defines - common across SoC's of a particular platform */
+#include <dcfg_lsch3.h>
+#include <soc_default_base_addr.h>
+#include <soc_default_helper_macros.h>
+
+
+#define NUM_DRAM_REGIONS		3
+#define	NXP_DRAM0_ADDR			0x80000000
+#define NXP_DRAM0_MAX_SIZE		0x80000000	/*  2 GB  */
+
+#define NXP_DRAM1_ADDR			0x2080000000
+#define NXP_DRAM1_MAX_SIZE		0x1F80000000	/* 126 G */
+
+#define NXP_DRAM2_ADDR			0x6000000000
+#define NXP_DRAM2_MAX_SIZE		0x2000000000	/* 128G */
+
+/*DRAM0 Size defined in platform_def.h */
+#define	NXP_DRAM0_SIZE			PLAT_DEF_DRAM0_SIZE
+
+#define DDR_PLL_FIX
+#define NXP_DDR_PHY1_ADDR		0x01400000
+#define NXP_DDR_PHY2_ADDR		0x01600000
+
+#if defined(IMAGE_BL31)
+#define LS_SYS_TIMCTL_BASE		0x2890000
+
+#ifdef LS_SYS_TIMCTL_BASE
+#define PLAT_LS_NSTIMER_FRAME_ID	0
+#define LS_CONFIG_CNTACR		1
+#endif
+#endif
+
+/* Start: Macros used by soc.c: get_boot_dev */
+#define PORSR1_RCW_MASK		0x07800000
+#define PORSR1_RCW_SHIFT	23
+
+#define SDHC1_VAL		0x8
+#define SDHC2_VAL		0x9
+#define I2C1_VAL		0xa
+#define FLEXSPI_NAND2K_VAL	0xc
+#define FLEXSPI_NAND4K_VAL	0xd
+#define FLEXSPI_NOR		0xf
+/* End: Macros used by soc.c: get_boot_dev */
+
+/* bits */
+/* SVR Definition */
+#define SVR_LX2160A		0x04
+#define SVR_LX2120A		0x14
+#define SVR_LX2080A		0x05
+
+/* Number of cores in platform */
+/* Used by common code for array initialization */
+#define NUMBER_OF_CLUSTERS		8
+#define CORES_PER_CLUSTER		2
+#define PLATFORM_CORE_COUNT		NUMBER_OF_CLUSTERS * CORES_PER_CLUSTER
+
+/*
+ * Required LS standard platform porting definitions
+ * for CCN-508
+ */
+#define PLAT_CLUSTER_TO_CCN_ID_MAP 11, 15, 27, 31, 12, 28, 16, 0
+#define PLAT_6CLUSTER_TO_CCN_ID_MAP 11, 15, 27, 31, 12, 28
+
+
+/* Defines required for using XLAT tables from ARM common code */
+#define PLAT_PHY_ADDR_SPACE_SIZE	(1ull << 40)
+#define PLAT_VIRT_ADDR_SPACE_SIZE	(1ull << 40)
+
+/* Clock Divisors */
+#define NXP_PLATFORM_CLK_DIVIDER	2
+#define NXP_UART_CLK_DIVIDER		4
+
+/* Start: Macros used by lx2160a.S */
+#define MPIDR_AFFINITY0_MASK			0x00FF
+#define MPIDR_AFFINITY1_MASK			0xFF00
+#define CPUECTLR_DISABLE_TWALK_PREFETCH		0x4000000000
+#define CPUECTLR_INS_PREFETCH_MASK		0x1800000000
+#define CPUECTLR_DAT_PREFETCH_MASK		0x0300000000
+#define CPUECTLR_RET_8CLK			0x2
+#define OSDLR_EL1_DLK_LOCK			0x1
+#define CNTP_CTL_EL0_EN				0x1
+#define CNTP_CTL_EL0_IMASK			0x2
+/* set to 0 if the clusters are not symmetrical */
+#define SYMMETRICAL_CLUSTERS			1
+/* End: Macros used by lx2160a.S */
+
+/* Start: Macros used by lib/psci files */
+#define SYSTEM_PWR_DOMAINS 1
+#define PLAT_NUM_PWR_DOMAINS   (PLATFORM_CORE_COUNT + \
+				NUMBER_OF_CLUSTERS  + \
+				SYSTEM_PWR_DOMAINS)
+
+/* Power state coordination occurs at the system level */
+#define PLAT_MAX_PWR_LVL  MPIDR_AFFLVL2
+
+/* define retention state */
+#define PLAT_MAX_RET_STATE  (PSCI_LOCAL_STATE_RUN + 1)
+
+/* define power-down state */
+#define PLAT_MAX_OFF_STATE  (PLAT_MAX_RET_STATE + 1)
+/* End: Macros used by lib/psci files */
+
+/* Some data must be aligned on the biggest cache line size in the platform.
+ * This is known only to the platform as it might have a combination of
+ * integrated and external caches.
+ *
+ * CACHE_WRITEBACK_GRANULE is defined in soc.def
+ *
+ * One cache line needed for bakery locks on ARM platforms
+ */
+#define PLAT_PERCPU_BAKERY_LOCK_SIZE (1 * CACHE_WRITEBACK_GRANULE)
+
+#ifndef WDOG_RESET_FLAG
+#define WDOG_RESET_FLAG DEFAULT_SET_VALUE
+#endif
+
+#ifndef WARM_BOOT_SUCCESS
+#define WARM_BOOT_SUCCESS DEFAULT_SET_VALUE
+#endif
+
+#ifndef __ASSEMBLER__
+
+void set_base_freq_CNTFID0(void);
+void soc_init_start(void);
+void soc_init_finish(void);
+void soc_init_percpu(void);
+void _soc_set_start_addr(unsigned long addr);
+void _set_platform_security(void);
+
+#endif
+
+#endif /* _SOC_H */
diff --git a/plat/nxp/soc-lx2160a/lx2160aqds/ddr_init.c b/plat/nxp/soc-lx2160a/lx2160aqds/ddr_init.c
new file mode 100644
index 0000000..d44733c
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/lx2160aqds/ddr_init.c
@@ -0,0 +1,355 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <ddr.h>
+#include <lib/utils.h>
+#include <load_img.h>
+
+#include "plat_common.h"
+#include <platform_def.h>
+
+#ifdef CONFIG_STATIC_DDR
+
+const struct ddr_cfg_regs static_3200 = {
+	.cs[0].bnds = U(0x03FF),
+	.cs[1].bnds = U(0x03FF),
+	.cs[0].config = U(0x80050422),
+	.cs[1].config = U(0x80000422),
+	.cs[2].bnds = U(0x00),
+	.cs[3].bnds = U(0x00),
+	.cs[2].config = U(0x00),
+	.cs[3].config = U(0x00),
+	.timing_cfg[0] = U(0xFFAA0018),
+	.timing_cfg[1] = U(0x646A8844),
+	.timing_cfg[2] = U(0x00058022),
+	.timing_cfg[3] = U(0x13622100),
+	.timing_cfg[4] = U(0x02),
+	.timing_cfg[5] = U(0x07401400),
+	.timing_cfg[7] = U(0x3BB00000),
+	.timing_cfg[8] = U(0x0944AC00),
+	.sdram_cfg[0] = U(0x65044008),
+	.sdram_cfg[1] = U(0x00401011),
+	.sdram_cfg[2] = U(0x00),
+	.sdram_mode[0] = U(0x06010C50),
+	.sdram_mode[1] = U(0x00280400),
+	.sdram_mode[2] = U(0x00),
+	.sdram_mode[3] = U(0x00),
+	.sdram_mode[4] = U(0x00),
+	.sdram_mode[5] = U(0x00),
+	.sdram_mode[6] = U(0x00),
+	.sdram_mode[7] = U(0x00),
+	.sdram_mode[8] = U(0x0500),
+	.sdram_mode[9] = U(0x10240000),
+	.sdram_mode[10] = U(0x00),
+	.sdram_mode[11] = U(0x00),
+	.sdram_mode[12] = U(0x00),
+	.sdram_mode[13] = U(0x00),
+	.sdram_mode[14] = U(0x00),
+	.sdram_mode[15] = U(0x00),
+	.md_cntl = U(0x00),
+	.interval = U(0x30C00000),
+	.data_init = U(0xDEADBEEF),
+	.init_addr = U(0x00),
+	.zq_cntl = U(0x8A090705),
+	.sdram_rcw[0] = U(0x00),
+	.sdram_rcw[1] = U(0x00),
+	.sdram_rcw[2] = U(0x00),
+	.sdram_rcw[3] = U(0x00),
+	.sdram_rcw[4] = U(0x00),
+	.sdram_rcw[5] = U(0x00),
+	.err_disable = U(0x00),
+	.err_int_en = U(0x00),
+};
+
+const struct ddr_cfg_regs static_2900 = {
+	.cs[0].bnds = U(0x03FF),
+	.cs[1].bnds = U(0x03FF),
+	.cs[0].config = U(0x80050422),
+	.cs[1].config = U(0x80000422),
+	.cs[2].bnds = U(0x00),
+	.cs[3].bnds = U(0x00),
+	.cs[2].config = U(0x00),
+	.cs[3].config = U(0x00),
+	.timing_cfg[0] = U(0xFF990018),
+	.timing_cfg[1] = U(0x4F4A4844),
+	.timing_cfg[2] = U(0x0005601F),
+	.timing_cfg[3] = U(0x125F2100),
+	.timing_cfg[4] = U(0x02),
+	.timing_cfg[5] = U(0x07401400),
+	.timing_cfg[7] = U(0x3AA00000),
+	.timing_cfg[8] = U(0x09449B00),
+	.sdram_cfg[0] = U(0x65044008),
+	.sdram_cfg[1] = U(0x00401011),
+	.sdram_cfg[2] = U(0x00),
+	.sdram_mode[0] = U(0x06010C50),
+	.sdram_mode[1] = U(0x00280400),
+	.sdram_mode[2] = U(0x00),
+	.sdram_mode[3] = U(0x00),
+	.sdram_mode[4] = U(0x00),
+	.sdram_mode[5] = U(0x00),
+	.sdram_mode[6] = U(0x00),
+	.sdram_mode[7] = U(0x00),
+	.sdram_mode[8] = U(0x0500),
+	.sdram_mode[9] = U(0x10240000),
+	.sdram_mode[10] = U(0x00),
+	.sdram_mode[11] = U(0x00),
+	.sdram_mode[12] = U(0x00),
+	.sdram_mode[13] = U(0x00),
+	.sdram_mode[14] = U(0x00),
+	.sdram_mode[15] = U(0x00),
+	.md_cntl = U(0x00),
+	.interval = U(0x2C2E0000),
+	.data_init = U(0xDEADBEEF),
+	.init_addr = U(0x00),
+	.zq_cntl = U(0x8A090705),
+	.sdram_rcw[0] = U(0x00),
+	.sdram_rcw[1] = U(0x00),
+	.sdram_rcw[2] = U(0x00),
+	.sdram_rcw[3] = U(0x00),
+	.sdram_rcw[4] = U(0x00),
+	.sdram_rcw[5] = U(0x00),
+	.err_disable = U(0x00),
+	.err_int_en = U(0x00),
+};
+
+const struct ddr_cfg_regs static_2600 = {
+	.cs[0].bnds = U(0x03FF),
+	.cs[1].bnds = U(0x03FF),
+	.cs[0].config = U(0x80050422),
+	.cs[1].config = U(0x80000422),
+	.cs[2].bnds = U(0x00),
+	.cs[3].bnds = U(0x00),
+	.cs[2].config = U(0x00),
+	.cs[3].config = U(0x00),
+	.timing_cfg[0] = U(0xFF880018),
+	.timing_cfg[1] = U(0x2A24F444),
+	.timing_cfg[2] = U(0x007141DC),
+	.timing_cfg[3] = U(0x125B2100),
+	.timing_cfg[4] = U(0x02),
+	.timing_cfg[5] = U(0x06401400),
+	.timing_cfg[7] = U(0x28800000),
+	.timing_cfg[8] = U(0x07338A00),
+	.sdram_cfg[0] = U(0x65044008),
+	.sdram_cfg[1] = U(0x00401011),
+	.sdram_cfg[2] = U(0x00),
+	.sdram_mode[0] = U(0x06010A70),
+	.sdram_mode[1] = U(0x00200400),
+	.sdram_mode[2] = U(0x00),
+	.sdram_mode[3] = U(0x00),
+	.sdram_mode[4] = U(0x00),
+	.sdram_mode[5] = U(0x00),
+	.sdram_mode[6] = U(0x00),
+	.sdram_mode[7] = U(0x00),
+	.sdram_mode[8] = U(0x0500),
+	.sdram_mode[9] = U(0x0C240000),
+	.sdram_mode[10] = U(0x00),
+	.sdram_mode[11] = U(0x00),
+	.sdram_mode[12] = U(0x00),
+	.sdram_mode[13] = U(0x00),
+	.sdram_mode[14] = U(0x00),
+	.sdram_mode[15] = U(0x00),
+	.md_cntl = U(0x00),
+	.interval = U(0x279C0000),
+	.data_init = U(0xDEADBEEF),
+	.init_addr = U(0x00),
+	.zq_cntl = U(0x8A090705),
+	.sdram_rcw[0] = U(0x00),
+	.sdram_rcw[1] = U(0x00),
+	.sdram_rcw[2] = U(0x00),
+	.sdram_rcw[3] = U(0x00),
+	.sdram_rcw[4] = U(0x00),
+	.sdram_rcw[5] = U(0x00),
+	.err_disable = U(0x00),
+	.err_int_en = U(0x00),
+};
+
+const struct dimm_params static_dimm = {
+	.rdimm = U(0),
+	.primary_sdram_width = U(64),
+	.ec_sdram_width = U(8),
+	.n_ranks = U(2),
+	.device_width = U(8),
+	.mirrored_dimm = U(1),
+};
+
+/* Sample code using two UDIMM MT18ASF1G72AZ-2G6B1, on each DDR controller */
+unsigned long long board_static_ddr(struct ddr_info *priv)
+{
+	(void)memcpy(&priv->ddr_reg, &static_2900, sizeof(static_2900));
+	(void)memcpy(&priv->dimm, &static_dimm, sizeof(static_dimm));
+	priv->conf.cs_on_dimm[0] = 0x3;
+	ddr_board_options(priv);
+	compute_ddr_phy(priv);
+
+	return ULL(0x400000000);
+}
+
+#elif defined(CONFIG_DDR_NODIMM)
+/*
+ * Sample code to bypass reading SPD. This is a sample, not recommended
+ * for boards with slots. DDR model number: UDIMM MT18ASF1G72AZ-2G6B1.
+ */
+
+const struct dimm_params ddr_raw_timing = {
+	.n_ranks = U(2),
+	.rank_density = U(4294967296u),
+	.capacity = U(8589934592u),
+	.primary_sdram_width = U(64),
+	.ec_sdram_width = U(8),
+	.device_width = U(8),
+	.die_density = U(0x4),
+	.rdimm = U(0),
+	.mirrored_dimm = U(1),
+	.n_row_addr = U(15),
+	.n_col_addr = U(10),
+	.bank_addr_bits = U(0),
+	.bank_group_bits = U(2),
+	.edc_config = U(2),
+	.burst_lengths_bitmask = U(0x0c),
+	.tckmin_x_ps = 750,
+	.tckmax_ps = 1600,
+	.caslat_x = U(0x00FFFC00),
+	.taa_ps = 13750,
+	.trcd_ps = 13750,
+	.trp_ps = 13750,
+	.tras_ps = 32000,
+	.trc_ps = 457500,
+	.twr_ps = 15000,
+	.trfc1_ps = 260000,
+	.trfc2_ps = 160000,
+	.trfc4_ps = 110000,
+	.tfaw_ps = 21000,
+	.trrds_ps = 3000,
+	.trrdl_ps = 4900,
+	.tccdl_ps = 5000,
+	.refresh_rate_ps = U(7800000),
+};
+
+int ddr_get_ddr_params(struct dimm_params *pdimm,
+			    struct ddr_conf *conf)
+{
+	static const char dimm_model[] = "Fixed DDR on board";
+
+	conf->dimm_in_use[0] = 1;	/* Modify accordingly */
+	memcpy(pdimm, &ddr_raw_timing, sizeof(struct dimm_params));
+	memcpy(pdimm->mpart, dimm_model, sizeof(dimm_model) - 1);
+
+	/* valid DIMM mask, change accordingly, together with dimm_on_ctlr. */
+	return 0x5;
+}
+#endif	/* CONFIG_DDR_NODIMM */
+
+int ddr_board_options(struct ddr_info *priv)
+{
+	struct memctl_opt *popts = &priv->opt;
+	const struct ddr_conf *conf = &priv->conf;
+
+	popts->vref_dimm = U(0x24);		/* range 1, 83.4% */
+	popts->rtt_override = 0;
+	popts->rtt_park = U(240);
+	popts->otf_burst_chop_en = 0;
+	popts->burst_length = U(DDR_BL8);
+	popts->trwt_override = U(1);
+	popts->bstopre = U(0);			/* auto precharge */
+	popts->addr_hash = 1;
+
+	/* Set ODT impedance on PHY side */
+	switch (conf->cs_on_dimm[1]) {
+	case 0xc:	/* Two slots dual rank */
+	case 0x4:	/* Two slots single rank, not valid for interleaving */
+		popts->trwt = U(0xf);
+		popts->twrt = U(0x7);
+		popts->trrt = U(0x7);
+		popts->twwt = U(0x7);
+		popts->vref_phy = U(0x6B);	/* 83.6% */
+		popts->odt = U(60);
+		popts->phy_tx_impedance = U(28);
+		break;
+	case 0:		/* One slot used */
+	default:
+		popts->trwt = U(0x3);
+		popts->twrt = U(0x3);
+		popts->trrt = U(0x3);
+		popts->twwt = U(0x3);
+		popts->vref_phy = U(0x60);	/* 75% */
+		popts->odt = U(48);
+		popts->phy_tx_impedance = U(28);
+		break;
+	}
+
+	return 0;
+}
+
+#ifdef NXP_WARM_BOOT
+long long init_ddr(uint32_t wrm_bt_flg)
+#else
+long long init_ddr(void)
+#endif
+{
+	int spd_addr[] = {0x51U, 0x52U, 0x53U, 0x54U};
+	struct ddr_info info;
+	struct sysinfo sys;
+	long long dram_size;
+
+	zeromem(&sys, sizeof(sys));
+	if (get_clocks(&sys) == 1) {
+		ERROR("System clocks are not set.\n");
+		panic();
+	}
+	debug("platform clock %lu\n", sys.freq_platform);
+	debug("DDR PLL1 %lu\n", sys.freq_ddr_pll0);
+	debug("DDR PLL2 %lu\n", sys.freq_ddr_pll1);
+
+	zeromem(&info, sizeof(info));
+
+	/* Set two DDRC. Unused DDRC will be removed automatically. */
+	info.num_ctlrs = NUM_OF_DDRC;
+	info.spd_addr = spd_addr;
+	info.ddr[0] = (void *)NXP_DDR_ADDR;
+	info.ddr[1] = (void *)NXP_DDR2_ADDR;
+	info.phy[0] = (void *)NXP_DDR_PHY1_ADDR;
+	info.phy[1] = (void *)NXP_DDR_PHY2_ADDR;
+	info.clk = get_ddr_freq(&sys, 0);
+	info.img_loadr = load_img;
+	info.phy_gen2_fw_img_buf = PHY_GEN2_FW_IMAGE_BUFFER;
+	if (info.clk == 0) {
+		info.clk = get_ddr_freq(&sys, 1);
+	}
+	info.dimm_on_ctlr = DDRC_NUM_DIMM;
+
+	info.warm_boot_flag = DDR_WRM_BOOT_NT_SUPPORTED;
+#ifdef NXP_WARM_BOOT
+	info.warm_boot_flag = DDR_COLD_BOOT;
+	if (wrm_bt_flg != 0U) {
+		info.warm_boot_flag = DDR_WARM_BOOT;
+	} else {
+		info.warm_boot_flag = DDR_COLD_BOOT;
+	}
+#endif
+
+	dram_size = dram_init(&info
+#if defined(NXP_HAS_CCN504) || defined(NXP_HAS_CCN508)
+		    , NXP_CCN_HN_F_0_ADDR
+#endif
+		    );
+
+
+	if (dram_size < 0) {
+		ERROR("DDR init failed.\n");
+	}
+
+	return dram_size;
+}
diff --git a/plat/nxp/soc-lx2160a/lx2160aqds/plat_def.h b/plat/nxp/soc-lx2160a/lx2160aqds/plat_def.h
new file mode 100644
index 0000000..f480f92
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/lx2160aqds/plat_def.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PLAT_DEF_H
+#define PLAT_DEF_H
+
+#include <arch.h>
+#include <cortex_a72.h>
+/* Required without TBBR.
+ * To include the defines for DDR PHY
+ * Images.
+ */
+#include <tbbr_img_def.h>
+
+#include <policy.h>
+#include <soc.h>
+
+#if defined(IMAGE_BL31)
+#define LS_SYS_TIMCTL_BASE		0x2890000
+#define PLAT_LS_NSTIMER_FRAME_ID	0
+#define LS_CONFIG_CNTACR		1
+#endif
+
+#define NXP_SYSCLK_FREQ		100000000
+#define NXP_DDRCLK_FREQ		100000000
+
+/* UART related definition */
+#define NXP_CONSOLE_ADDR	NXP_UART_ADDR
+#define NXP_CONSOLE_BAUDRATE	115200
+
+/* Size of cacheable stacks */
+#if defined(IMAGE_BL2)
+#if defined(TRUSTED_BOARD_BOOT)
+#define PLATFORM_STACK_SIZE	0x2000
+#else
+#define PLATFORM_STACK_SIZE	0x1000
+#endif
+#elif defined(IMAGE_BL31)
+#define PLATFORM_STACK_SIZE	0x1000
+#endif
+
+/* SD block buffer */
+#define NXP_SD_BLOCK_BUF_SIZE	(0x8000)
+#define NXP_SD_BLOCK_BUF_ADDR	(NXP_OCRAM_ADDR + NXP_OCRAM_SIZE \
+				- NXP_SD_BLOCK_BUF_SIZE)
+
+#ifdef SD_BOOT
+#define BL2_LIMIT		(NXP_OCRAM_ADDR + NXP_OCRAM_SIZE \
+				- NXP_SD_BLOCK_BUF_SIZE)
+#else
+#define BL2_LIMIT		(NXP_OCRAM_ADDR + NXP_OCRAM_SIZE)
+#endif
+
+/* IO defines as needed by IO driver framework */
+#define MAX_IO_DEVICES		4
+#define MAX_IO_BLOCK_DEVICES	1
+#define MAX_IO_HANDLES		4
+
+#define PHY_GEN2_FW_IMAGE_BUFFER	(NXP_OCRAM_ADDR + CSF_HDR_SZ)
+
+/*
+ * FIP image defines - Offset at which FIP Image would be present
+ * Image would include Bl31 , Bl33 and Bl32 (optional)
+ */
+#ifdef POLICY_FUSE_PROVISION
+#define MAX_FIP_DEVICES		3
+#endif
+
+#ifndef MAX_FIP_DEVICES
+#define MAX_FIP_DEVICES		2
+#endif
+
+/*
+ * ID of the secure physical generic timer interrupt used by the BL32.
+ */
+#define BL32_IRQ_SEC_PHY_TIMER	29
+
+#define BL31_WDOG_SEC		89
+
+#define BL31_NS_WDOG_WS1	108
+
+/*
+ * Define properties of Group 1 Secure and Group 0 interrupts as per GICv3
+ * terminology. On a GICv2 system or mode, the lists will be merged and treated
+ * as Group 0 interrupts.
+ */
+#define PLAT_LS_G1S_IRQ_PROPS(grp) \
+	INTR_PROP_DESC(BL32_IRQ_SEC_PHY_TIMER, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE)
+
+/* SGI 15 and Secure watchdog interrupts assigned to Group 0 */
+#define NXP_IRQ_SEC_SGI_7		15
+
+#define PLAT_LS_G0_IRQ_PROPS(grp)	\
+	INTR_PROP_DESC(BL31_WDOG_SEC, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(BL31_NS_WDOG_WS1, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(NXP_IRQ_SEC_SGI_7, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL)
+#endif
diff --git a/plat/nxp/soc-lx2160a/lx2160aqds/platform.c b/plat/nxp/soc-lx2160a/lx2160aqds/platform.c
new file mode 100644
index 0000000..b00adb5
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/lx2160aqds/platform.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <plat_common.h>
+
+#pragma weak board_enable_povdd
+#pragma weak board_disable_povdd
+
+bool board_enable_povdd(void)
+{
+#ifdef CONFIG_POVDD_ENABLE
+	return true;
+#else
+	return false;
+#endif
+}
+
+bool board_disable_povdd(void)
+{
+#ifdef CONFIG_POVDD_ENABLE
+	return true;
+#else
+	return false;
+#endif
+}
diff --git a/plat/nxp/soc-lx2160a/lx2160aqds/platform.mk b/plat/nxp/soc-lx2160a/lx2160aqds/platform.mk
new file mode 100644
index 0000000..5b95222
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/lx2160aqds/platform.mk
@@ -0,0 +1,91 @@
+#
+# Copyright 2018-2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# board-specific build parameters
+
+BOOT_MODE	?= 	flexspi_nor
+BOARD		?=	lx2160aqds
+POVDD_ENABLE	:=	no
+NXP_COINED_BB	:=	no
+
+ # DDR Compilation Configs
+NUM_OF_DDRC	:=	1
+DDRC_NUM_DIMM	:=	1
+DDRC_NUM_CS	:=	2
+DDR_ECC_EN	:=	yes
+ #enable address decoding feature
+DDR_ADDR_DEC	:=	yes
+APPLY_MAX_CDD	:=	yes
+
+# DDR Errata
+ERRATA_DDR_A011396	:= 1
+ERRATA_DDR_A050450	:= 1
+
+ # On-Board Flash Details
+FLASH_TYPE	:=	MT35XU512A
+XSPI_FLASH_SZ	:=	0x10000000
+NXP_XSPI_NOR_UNIT_SIZE		:=	0x20000
+BL2_BIN_XSPI_NOR_END_ADDRESS	:=	0x100000
+# CONFIG_FSPI_ERASE_4K is required to erase 4K sector sizes. This
+# config is enabled for future use cases.
+FSPI_ERASE_4K	:= 0
+
+ # Platform specific features.
+WARM_BOOT	:=	yes
+
+ # Adding platform specific defines
+
+$(eval $(call add_define_val,BOARD,'"${BOARD}"'))
+
+ifeq (${POVDD_ENABLE},yes)
+$(eval $(call add_define,CONFIG_POVDD_ENABLE))
+endif
+
+ifneq (${FLASH_TYPE},)
+$(eval $(call add_define,CONFIG_${FLASH_TYPE}))
+endif
+
+ifneq (${XSPI_FLASH_SZ},)
+$(eval $(call add_define_val,NXP_FLEXSPI_FLASH_SIZE,${XSPI_FLASH_SZ}))
+endif
+
+ifneq (${FSPI_ERASE_4K},)
+$(eval $(call add_define_val,CONFIG_FSPI_ERASE_4K,${FSPI_ERASE_4K}))
+endif
+
+ifneq (${NUM_OF_DDRC},)
+$(eval $(call add_define_val,NUM_OF_DDRC,${NUM_OF_DDRC}))
+endif
+
+ifneq (${DDRC_NUM_DIMM},)
+$(eval $(call add_define_val,DDRC_NUM_DIMM,${DDRC_NUM_DIMM}))
+endif
+
+ifneq (${DDRC_NUM_CS},)
+$(eval $(call add_define_val,DDRC_NUM_CS,${DDRC_NUM_CS}))
+endif
+
+ifeq (${DDR_ADDR_DEC},yes)
+$(eval $(call add_define,CONFIG_DDR_ADDR_DEC))
+endif
+
+ifeq (${DDR_ECC_EN},yes)
+$(eval $(call add_define,CONFIG_DDR_ECC_EN))
+endif
+
+# Platform can control the base address for non-volatile storage.
+#$(eval $(call add_define_val,NV_STORAGE_BASE_ADDR,'${BL2_BIN_XSPI_NOR_END_ADDRESS} - 2 * ${NXP_XSPI_NOR_UNIT_SIZE}'))
+
+ifeq (${WARM_BOOT},yes)
+$(eval $(call add_define_val,PHY_TRAINING_REGS_ON_FLASH,'${BL2_BIN_XSPI_NOR_END_ADDRESS} - ${NXP_XSPI_NOR_UNIT_SIZE}'))
+endif
+
+ # Adding Platform files build files
+BL2_SOURCES	+=	${BOARD_PATH}/ddr_init.c\
+			${BOARD_PATH}/platform.c
+
+ # Adding SoC build info
+include plat/nxp/soc-lx2160a/soc.mk
diff --git a/plat/nxp/soc-lx2160a/lx2160aqds/platform_def.h b/plat/nxp/soc-lx2160a/lx2160aqds/platform_def.h
new file mode 100644
index 0000000..5fa774e
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/lx2160aqds/platform_def.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PLATFORM_DEF_H
+#define PLATFORM_DEF_H
+
+#include "plat_def.h"
+#include "plat_default_def.h"
+
+#endif
diff --git a/plat/nxp/soc-lx2160a/lx2160aqds/policy.h b/plat/nxp/soc-lx2160a/lx2160aqds/policy.h
new file mode 100644
index 0000000..05d23e2
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/lx2160aqds/policy.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2018-2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef	POLICY_H
+#define	POLICY_H
+
+/* Following defines affect the PLATFORM SECURITY POLICY */
+
+/* set this to 0x0 if the platform is not using/responding to ECC errors
+ * set this to 0x1 if ECC is being used (we have to do some init)
+ */
+#define	POLICY_USING_ECC	0x0
+
+/* Set this to 0x0 to leave the default SMMU page size in sACR
+ * Set this to 0x1 to change the SMMU page size to 64K
+ */
+#define	POLICY_SMMU_PAGESZ_64K	0x1
+
+/*
+ * POLICY_PERF_WRIOP = 0 : No Performance enhancement for WRIOP RN-I
+ * POLICY_PERF_WRIOP = 1 : No Performance enhancement for WRIOP RN-I = 7
+ * POLICY_PERF_WRIOP = 2 : No Performance enhancement for WRIOP RN-I = 23
+ */
+#define	POLICY_PERF_WRIOP	0
+
+/*
+ * set this to '1' if the debug clocks need to remain enabled during
+ * system entry to low-power (LPM20) - this should only be necessary
+ * for testing and NEVER set for normal production
+ */
+#define	POLICY_DEBUG_ENABLE	0
+
+
+#endif /* POLICY_H */
diff --git a/plat/nxp/soc-lx2160a/lx2160ardb/ddr_init.c b/plat/nxp/soc-lx2160a/lx2160ardb/ddr_init.c
new file mode 100644
index 0000000..8669b1d
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/lx2160ardb/ddr_init.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <ddr.h>
+#include <lib/utils.h>
+#include <load_img.h>
+
+#include "plat_common.h"
+#include <platform_def.h>
+
+#ifdef CONFIG_STATIC_DDR
+const struct ddr_cfg_regs static_1600 = {
+	.cs[0].config = U(0xA8050322),
+	.cs[1].config = U(0x80000322),
+	.cs[0].bnds = U(0x3FF),
+	.cs[1].bnds = U(0x3FF),
+	.sdram_cfg[0] = U(0xE5044000),
+	.sdram_cfg[1] = U(0x401011),
+	.timing_cfg[0] = U(0xFF550018),
+	.timing_cfg[1] = U(0xBAB48C42),
+	.timing_cfg[2] = U(0x48C111),
+	.timing_cfg[3] = U(0x10C1000),
+	.timing_cfg[4] = U(0x2),
+	.timing_cfg[5] = U(0x3401400),
+	.timing_cfg[7] = U(0x13300000),
+	.timing_cfg[8] = U(0x2114600),
+	.sdram_mode[0] = U(0x6010210),
+	.sdram_mode[8] = U(0x500),
+	.sdram_mode[9] = U(0x4240000),
+	.interval = U(0x18600000),
+	.data_init = U(0xDEADBEEF),
+	.zq_cntl = U(0x8A090705),
+};
+
+const struct dimm_params static_dimm = {
+	.rdimm = U(0),
+	.primary_sdram_width = U(64),
+	.ec_sdram_width = U(8),
+	.n_ranks = U(2),
+	.device_width = U(8),
+	.mirrored_dimm = U(1),
+};
+
+/* Sample code using two UDIMM MT18ASF1G72AZ-2G6B1, on each DDR controller */
+unsigned long long board_static_ddr(struct ddr_info *priv)
+{
+	memcpy(&priv->ddr_reg, &static_1600, sizeof(static_1600));
+	memcpy(&priv->dimm, &static_dimm, sizeof(static_dimm));
+	priv->conf.cs_on_dimm[0] = 0x3;
+	ddr_board_options(priv);
+	compute_ddr_phy(priv);
+
+	return ULL(0x400000000);
+}
+
+#elif defined(CONFIG_DDR_NODIMM)
+/*
+ * Sample code to bypass reading SPD. This is a sample, not recommended
+ * for boards with slots. DDR model number: UDIMM MT18ASF1G72AZ-2G6B1.
+ */
+
+const struct dimm_params ddr_raw_timing = {
+	.n_ranks = U(2),
+	.rank_density = U(4294967296u),
+	.capacity = U(8589934592u),
+	.primary_sdram_width = U(64),
+	.ec_sdram_width = U(8),
+	.device_width = U(8),
+	.die_density = U(0x4),
+	.rdimm = U(0),
+	.mirrored_dimm = U(1),
+	.n_row_addr = U(15),
+	.n_col_addr = U(10),
+	.bank_addr_bits = U(0),
+	.bank_group_bits = U(2),
+	.edc_config = U(2),
+	.burst_lengths_bitmask = U(0x0c),
+	.tckmin_x_ps = 750,
+	.tckmax_ps = 1600,
+	.caslat_x = U(0x00FFFC00),
+	.taa_ps = 13750,
+	.trcd_ps = 13750,
+	.trp_ps = 13750,
+	.tras_ps = 32000,
+	.trc_ps = 457500,
+	.twr_ps = 15000,
+	.trfc1_ps = 260000,
+	.trfc2_ps = 160000,
+	.trfc4_ps = 110000,
+	.tfaw_ps = 21000,
+	.trrds_ps = 3000,
+	.trrdl_ps = 4900,
+	.tccdl_ps = 5000,
+	.refresh_rate_ps = U(7800000),
+};
+
+int ddr_get_ddr_params(struct dimm_params *pdimm,
+			    struct ddr_conf *conf)
+{
+	static const char dimm_model[] = "Fixed DDR on board";
+
+	conf->dimm_in_use[0] = 1;	/* Modify accordingly */
+	memcpy(pdimm, &ddr_raw_timing, sizeof(struct dimm_params));
+	memcpy(pdimm->mpart, dimm_model, sizeof(dimm_model) - 1);
+
+	/* valid DIMM mask, change accordingly, together with dimm_on_ctlr. */
+	return 0x5;
+}
+#endif	/* CONFIG_DDR_NODIMM */
+
+int ddr_board_options(struct ddr_info *priv)
+{
+	struct memctl_opt *popts = &priv->opt;
+	const struct ddr_conf *conf = &priv->conf;
+
+	popts->vref_dimm = U(0x24);		/* range 1, 83.4% */
+	popts->rtt_override = 0;
+	popts->rtt_park = U(240);
+	popts->otf_burst_chop_en = 0;
+	popts->burst_length = U(DDR_BL8);
+	popts->trwt_override = U(1);
+	popts->bstopre = U(0);			/* auto precharge */
+	popts->addr_hash = 1;
+
+	/* Set ODT impedance on PHY side */
+	switch (conf->cs_on_dimm[1]) {
+	case 0xc:	/* Two slots dual rank */
+	case 0x4:	/* Two slots single rank, not valid for interleaving */
+		popts->trwt = U(0xf);
+		popts->twrt = U(0x7);
+		popts->trrt = U(0x7);
+		popts->twwt = U(0x7);
+		popts->vref_phy = U(0x6B);	/* 83.6% */
+		popts->odt = U(60);
+		popts->phy_tx_impedance = U(28);
+		break;
+	case 0:		/* One slot used */
+	default:
+		popts->trwt = U(0x3);
+		popts->twrt = U(0x3);
+		popts->trrt = U(0x3);
+		popts->twwt = U(0x3);
+		popts->vref_phy = U(0x60);	/* 75% */
+		popts->odt = U(48);
+		popts->phy_tx_impedance = U(28);
+		break;
+	}
+
+	return 0;
+}
+
+long long init_ddr(void)
+{
+	int spd_addr[] = { 0x51, 0x52, 0x53, 0x54 };
+	struct ddr_info info;
+	struct sysinfo sys;
+	long long dram_size;
+
+	zeromem(&sys, sizeof(sys));
+	if (get_clocks(&sys) != 0) {
+		ERROR("System clocks are not set\n");
+		panic();
+	}
+	debug("platform clock %lu\n", sys.freq_platform);
+	debug("DDR PLL1 %lu\n", sys.freq_ddr_pll0);
+	debug("DDR PLL2 %lu\n", sys.freq_ddr_pll1);
+
+	zeromem(&info, sizeof(info));
+
+	/* Set two DDRC. Unused DDRC will be removed automatically. */
+	info.num_ctlrs = NUM_OF_DDRC;
+	info.spd_addr = spd_addr;
+	info.ddr[0] = (void *)NXP_DDR_ADDR;
+	info.ddr[1] = (void *)NXP_DDR2_ADDR;
+	info.phy[0] = (void *)NXP_DDR_PHY1_ADDR;
+	info.phy[1] = (void *)NXP_DDR_PHY2_ADDR;
+	info.clk = get_ddr_freq(&sys, 0);
+	info.img_loadr = load_img;
+	info.phy_gen2_fw_img_buf = PHY_GEN2_FW_IMAGE_BUFFER;
+	if (info.clk == 0) {
+		info.clk = get_ddr_freq(&sys, 1);
+	}
+	info.dimm_on_ctlr = DDRC_NUM_DIMM;
+
+	info.warm_boot_flag = DDR_WRM_BOOT_NT_SUPPORTED;
+
+	dram_size = dram_init(&info
+#if defined(NXP_HAS_CCN504) || defined(NXP_HAS_CCN508)
+		    , NXP_CCN_HN_F_0_ADDR
+#endif
+		    );
+
+
+	if (dram_size < 0) {
+		ERROR("DDR init failed.\n");
+	}
+
+	return dram_size;
+}
diff --git a/plat/nxp/soc-lx2160a/lx2160ardb/plat_def.h b/plat/nxp/soc-lx2160a/lx2160ardb/plat_def.h
new file mode 100644
index 0000000..02f51e7
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/lx2160ardb/plat_def.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PLAT_DEF_H
+#define PLAT_DEF_H
+
+#include <arch.h>
+#include <cortex_a72.h>
+/* Required without TBBR.
+ * To include the defines for DDR PHY
+ * Images.
+ */
+#include <tbbr_img_def.h>
+
+#include <policy.h>
+#include <soc.h>
+
+#if defined(IMAGE_BL31)
+#define LS_SYS_TIMCTL_BASE		0x2890000
+#define PLAT_LS_NSTIMER_FRAME_ID	0
+#define LS_CONFIG_CNTACR		1
+#endif
+
+#define NXP_SYSCLK_FREQ		100000000
+#define NXP_DDRCLK_FREQ		100000000
+
+/* UART related definition */
+#define NXP_CONSOLE_ADDR	NXP_UART_ADDR
+#define NXP_CONSOLE_BAUDRATE	115200
+
+/* Size of cacheable stacks */
+#if defined(IMAGE_BL2)
+#if defined(TRUSTED_BOARD_BOOT)
+#define PLATFORM_STACK_SIZE	0x2000
+#else
+#define PLATFORM_STACK_SIZE	0x1000
+#endif
+#elif defined(IMAGE_BL31)
+#define PLATFORM_STACK_SIZE	0x1000
+#endif
+
+/* SD block buffer */
+#define NXP_SD_BLOCK_BUF_SIZE	(0x8000)
+#define NXP_SD_BLOCK_BUF_ADDR	(NXP_OCRAM_ADDR + NXP_OCRAM_SIZE \
+				- NXP_SD_BLOCK_BUF_SIZE)
+
+#ifdef SD_BOOT
+#define BL2_LIMIT		(NXP_OCRAM_ADDR + NXP_OCRAM_SIZE \
+				- NXP_SD_BLOCK_BUF_SIZE)
+#else
+#define BL2_LIMIT		(NXP_OCRAM_ADDR + NXP_OCRAM_SIZE)
+#endif
+
+/* IO defines as needed by IO driver framework */
+#define MAX_IO_DEVICES		4
+#define MAX_IO_BLOCK_DEVICES	1
+#define MAX_IO_HANDLES		4
+
+#define PHY_GEN2_FW_IMAGE_BUFFER	(NXP_OCRAM_ADDR + CSF_HDR_SZ)
+
+/*
+ * FIP image defines - Offset at which FIP Image would be present
+ * Image would include Bl31 , Bl33 and Bl32 (optional)
+ */
+#ifdef POLICY_FUSE_PROVISION
+#define MAX_FIP_DEVICES		3
+#endif
+
+#ifndef MAX_FIP_DEVICES
+#define MAX_FIP_DEVICES		2
+#endif
+
+/*
+ * ID of the secure physical generic timer interrupt used by the BL32.
+ */
+#define BL32_IRQ_SEC_PHY_TIMER	29
+
+#define BL31_WDOG_SEC		89
+
+#define BL31_NS_WDOG_WS1	108
+
+/*
+ * Define properties of Group 1 Secure and Group 0 interrupts as per GICv3
+ * terminology. On a GICv2 system or mode, the lists will be merged and treated
+ * as Group 0 interrupts.
+ */
+#define PLAT_LS_G1S_IRQ_PROPS(grp) \
+	INTR_PROP_DESC(BL32_IRQ_SEC_PHY_TIMER, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE)
+
+/* SGI 15 and Secure watchdog interrupts assigned to Group 0 */
+#define NXP_IRQ_SEC_SGI_7		15
+
+#define PLAT_LS_G0_IRQ_PROPS(grp)	\
+	INTR_PROP_DESC(BL31_WDOG_SEC, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(BL31_NS_WDOG_WS1, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(NXP_IRQ_SEC_SGI_7, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL)
+#endif
diff --git a/plat/nxp/soc-lx2160a/lx2160ardb/platform.c b/plat/nxp/soc-lx2160a/lx2160ardb/platform.c
new file mode 100644
index 0000000..b00adb5
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/lx2160ardb/platform.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <plat_common.h>
+
+#pragma weak board_enable_povdd
+#pragma weak board_disable_povdd
+
+bool board_enable_povdd(void)
+{
+#ifdef CONFIG_POVDD_ENABLE
+	return true;
+#else
+	return false;
+#endif
+}
+
+bool board_disable_povdd(void)
+{
+#ifdef CONFIG_POVDD_ENABLE
+	return true;
+#else
+	return false;
+#endif
+}
diff --git a/plat/nxp/soc-lx2160a/lx2160ardb/platform.mk b/plat/nxp/soc-lx2160a/lx2160ardb/platform.mk
new file mode 100644
index 0000000..e56fbf1
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/lx2160ardb/platform.mk
@@ -0,0 +1,91 @@
+#
+# Copyright 2021 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# board-specific build parameters
+
+BOOT_MODE	?= 	flexspi_nor
+BOARD		?=	lx2160ardb
+POVDD_ENABLE	:=	no
+NXP_COINED_BB	:=	no
+
+ # DDR Compilation Configs
+NUM_OF_DDRC	:=	2
+DDRC_NUM_DIMM	:=	2
+DDRC_NUM_CS	:=	4
+DDR_ECC_EN	:=	yes
+ #enable address decoding feature
+DDR_ADDR_DEC	:=	yes
+APPLY_MAX_CDD	:=	yes
+
+# DDR Errata
+ERRATA_DDR_A011396	:= 1
+ERRATA_DDR_A050450	:= 1
+
+ # On-Board Flash Details
+FLASH_TYPE	:=	MT35XU512A
+XSPI_FLASH_SZ	:=	0x10000000
+NXP_XSPI_NOR_UNIT_SIZE		:=	0x20000
+BL2_BIN_XSPI_NOR_END_ADDRESS	:=	0x100000
+# CONFIG_FSPI_ERASE_4K is required to erase 4K sector sizes. This
+# config is enabled for future use cases.
+FSPI_ERASE_4K	:= 0
+
+ # Platform specific features.
+WARM_BOOT	:=	no
+
+ # Adding platform specific defines
+
+$(eval $(call add_define_val,BOARD,'"${BOARD}"'))
+
+ifeq (${POVDD_ENABLE},yes)
+$(eval $(call add_define,CONFIG_POVDD_ENABLE))
+endif
+
+ifneq (${FLASH_TYPE},)
+$(eval $(call add_define,CONFIG_${FLASH_TYPE}))
+endif
+
+ifneq (${XSPI_FLASH_SZ},)
+$(eval $(call add_define_val,NXP_FLEXSPI_FLASH_SIZE,${XSPI_FLASH_SZ}))
+endif
+
+ifneq (${FSPI_ERASE_4K},)
+$(eval $(call add_define_val,CONFIG_FSPI_ERASE_4K,${FSPI_ERASE_4K}))
+endif
+
+ifneq (${NUM_OF_DDRC},)
+$(eval $(call add_define_val,NUM_OF_DDRC,${NUM_OF_DDRC}))
+endif
+
+ifneq (${DDRC_NUM_DIMM},)
+$(eval $(call add_define_val,DDRC_NUM_DIMM,${DDRC_NUM_DIMM}))
+endif
+
+ifneq (${DDRC_NUM_CS},)
+$(eval $(call add_define_val,DDRC_NUM_CS,${DDRC_NUM_CS}))
+endif
+
+ifeq (${DDR_ADDR_DEC},yes)
+$(eval $(call add_define,CONFIG_DDR_ADDR_DEC))
+endif
+
+ifeq (${DDR_ECC_EN},yes)
+$(eval $(call add_define,CONFIG_DDR_ECC_EN))
+endif
+
+# Platform can control the base address for non-volatile storage.
+#$(eval $(call add_define_val,NV_STORAGE_BASE_ADDR,'${BL2_BIN_XSPI_NOR_END_ADDRESS} - 2 * ${NXP_XSPI_NOR_UNIT_SIZE}'))
+
+ifeq (${WARM_BOOT},yes)
+$(eval $(call add_define_val,PHY_TRAINING_REGS_ON_FLASH,'${BL2_BIN_XSPI_NOR_END_ADDRESS} - ${NXP_XSPI_NOR_UNIT_SIZE}'))
+endif
+
+ # Adding Platform files build files
+BL2_SOURCES	+=	${BOARD_PATH}/ddr_init.c\
+			${BOARD_PATH}/platform.c
+
+ # Adding SoC build info
+include plat/nxp/soc-lx2160a/soc.mk
diff --git a/plat/nxp/soc-lx2160a/lx2160ardb/platform_def.h b/plat/nxp/soc-lx2160a/lx2160ardb/platform_def.h
new file mode 100644
index 0000000..6660998
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/lx2160ardb/platform_def.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PLATFORM_DEF_H
+#define PLATFORM_DEF_H
+
+#include "plat_def.h"
+#include "plat_default_def.h"
+
+#endif
diff --git a/plat/nxp/soc-lx2160a/lx2160ardb/policy.h b/plat/nxp/soc-lx2160a/lx2160ardb/policy.h
new file mode 100644
index 0000000..19ad6db
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/lx2160ardb/policy.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef POLICY_H
+#define	POLICY_H
+
+/* Following defines affect the PLATFORM SECURITY POLICY */
+
+/* set this to 0x0 if the platform is not using/responding to ECC errors
+ * set this to 0x1 if ECC is being used (we have to do some init)
+ */
+#define  POLICY_USING_ECC 0x0
+
+/* Set this to 0x0 to leave the default SMMU page size in sACR
+ * Set this to 0x1 to change the SMMU page size to 64K
+ */
+#define POLICY_SMMU_PAGESZ_64K 0x1
+
+/*
+ * POLICY_PERF_WRIOP = 0 : No Performance enhancement for WRIOP RN-I
+ * POLICY_PERF_WRIOP = 1 : No Performance enhancement for WRIOP RN-I = 7
+ * POLICY_PERF_WRIOP = 2 : No Performance enhancement for WRIOP RN-I = 23
+ */
+#define POLICY_PERF_WRIOP 0
+
+/*
+ * set this to '1' if the debug clocks need to remain enabled during
+ * system entry to low-power (LPM20) - this should only be necessary
+ * for testing and NEVER set for normal production
+ */
+#define POLICY_DEBUG_ENABLE 0
+
+
+#endif /* POLICY_H */
diff --git a/plat/nxp/soc-lx2160a/lx2162aqds/ddr_init.c b/plat/nxp/soc-lx2160a/lx2162aqds/ddr_init.c
new file mode 100644
index 0000000..73bcc93
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/lx2162aqds/ddr_init.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright 2018-2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <ddr.h>
+#include <lib/utils.h>
+#include <load_img.h>
+
+#include "plat_common.h"
+#include <platform_def.h>
+
+#ifdef CONFIG_STATIC_DDR
+
+const struct ddr_cfg_regs static_3200 = {
+	.cs[0].bnds = U(0x03FFU),
+	.cs[1].bnds = U(0x03FF),
+	.cs[0].config = U(0x80050422),
+	.cs[1].config = U(0x80000422),
+	.cs[2].bnds = U(0x00),
+	.cs[3].bnds = U(0x00),
+	.cs[2].config = U(0x00),
+	.cs[3].config = U(0x00),
+	.timing_cfg[0] = U(0xFFAA0018),
+	.timing_cfg[1] = U(0x646A8844),
+	.timing_cfg[2] = U(0x00058022),
+	.timing_cfg[3] = U(0x13622100),
+	.timing_cfg[4] = U(0x02),
+	.timing_cfg[5] = U(0x07401400),
+	.timing_cfg[7] = U(0x3BB00000),
+	.timing_cfg[8] = U(0x0944AC00),
+	.sdram_cfg[0] = U(0x65044008),
+	.sdram_cfg[1] = U(0x00401011),
+	.sdram_cfg[2] = U(0x00),
+	.sdram_mode[0] = U(0x06010C50),
+	.sdram_mode[1] = U(0x00280400),
+	.sdram_mode[2] = U(0x00),
+	.sdram_mode[3] = U(0x00),
+	.sdram_mode[4] = U(0x00),
+	.sdram_mode[5] = U(0x00),
+	.sdram_mode[6] = U(0x00),
+	.sdram_mode[7] = U(0x00),
+	.sdram_mode[8] = U(0x0500),
+	.sdram_mode[9] = U(0x10240000),
+	.sdram_mode[10] = U(0x00),
+	.sdram_mode[11] = U(0x00),
+	.sdram_mode[12] = U(0x00),
+	.sdram_mode[13] = U(0x00),
+	.sdram_mode[14] = U(0x00),
+	.sdram_mode[15] = U(0x00),
+	.md_cntl = U(0x00),
+	.interval = U(0x30C00000),
+	.data_init = U(0xDEADBEEF),
+	.init_addr = U(0x00),
+	.zq_cntl = U(0x8A090705),
+	.sdram_rcw[0] = U(0x00),
+	.sdram_rcw[1] = U(0x00),
+	.sdram_rcw[2] = U(0x00),
+	.sdram_rcw[3] = U(0x00),
+	.sdram_rcw[4] = U(0x00),
+	.sdram_rcw[5] = U(0x00),
+	.err_disable = U(0x00),
+	.err_int_en = U(0x00),
+};
+
+const struct ddr_cfg_regs static_2900 = {
+	.cs[0].bnds = U(0x03FF),
+	.cs[1].bnds = U(0x03FF),
+	.cs[0].config = U(0x80050422),
+	.cs[1].config = U(0x80000422),
+	.cs[2].bnds = U(0x00),
+	.cs[3].bnds = U(0x00),
+	.cs[2].config = U(0x00),
+	.cs[3].config = U(0x00),
+	.timing_cfg[0] = U(0xFF990018),
+	.timing_cfg[1] = U(0x4F4A4844),
+	.timing_cfg[2] = U(0x0005601F),
+	.timing_cfg[3] = U(0x125F2100),
+	.timing_cfg[4] = U(0x02),
+	.timing_cfg[5] = U(0x07401400),
+	.timing_cfg[7] = U(0x3AA00000),
+	.timing_cfg[8] = U(0x09449B00),
+	.sdram_cfg[0] = U(0x65044008),
+	.sdram_cfg[1] = U(0x00401011),
+	.sdram_cfg[2] = U(0x00),
+	.sdram_mode[0] = U(0x06010C50),
+	.sdram_mode[1] = U(0x00280400),
+	.sdram_mode[2] = U(0x00),
+	.sdram_mode[3] = U(0x00),
+	.sdram_mode[4] = U(0x00),
+	.sdram_mode[5] = U(0x00),
+	.sdram_mode[6] = U(0x00),
+	.sdram_mode[7] = U(0x00),
+	.sdram_mode[8] = U(0x0500),
+	.sdram_mode[9] = U(0x10240000),
+	.sdram_mode[10] = U(0x00),
+	.sdram_mode[11] = U(0x00),
+	.sdram_mode[12] = U(0x00),
+	.sdram_mode[13] = U(0x00),
+	.sdram_mode[14] = U(0x00),
+	.sdram_mode[15] = U(0x00),
+	.md_cntl = U(0x00),
+	.interval = U(0x2C2E0000),
+	.data_init = U(0xDEADBEEF),
+	.init_addr = U(0x00),
+	.zq_cntl = U(0x8A090705),
+	.sdram_rcw[0] = U(0x00),
+	.sdram_rcw[1] = U(0x00),
+	.sdram_rcw[2] = U(0x00),
+	.sdram_rcw[3] = U(0x00),
+	.sdram_rcw[4] = U(0x00),
+	.sdram_rcw[5] = U(0x00),
+	.err_disable = U(0x00),
+	.err_int_en = U(0x00),
+};
+
+const struct ddr_cfg_regs static_2600 = {
+	.cs[0].bnds = U(0x03FF),
+	.cs[1].bnds = U(0x03FF),
+	.cs[0].config = U(0x80050422),
+	.cs[1].config = U(0x80000422),
+	.cs[2].bnds = U(0x00),
+	.cs[3].bnds = U(0x00),
+	.cs[2].config = U(0x00),
+	.cs[3].config = U(0x00),
+	.timing_cfg[0] = U(0xFF880018),
+	.timing_cfg[1] = U(0x2A24F444),
+	.timing_cfg[2] = U(0x007141DC),
+	.timing_cfg[3] = U(0x125B2100),
+	.timing_cfg[4] = U(0x02),
+	.timing_cfg[5] = U(0x06401400),
+	.timing_cfg[7] = U(0x28800000),
+	.timing_cfg[8] = U(0x07338A00),
+	.sdram_cfg[0] = U(0x65044008),
+	.sdram_cfg[1] = U(0x00401011),
+	.sdram_cfg[2] = U(0x00),
+	.sdram_mode[0] = U(0x06010A70),
+	.sdram_mode[1] = U(0x00200400),
+	.sdram_mode[2] = U(0x00),
+	.sdram_mode[3] = U(0x00),
+	.sdram_mode[4] = U(0x00),
+	.sdram_mode[5] = U(0x00),
+	.sdram_mode[6] = U(0x00),
+	.sdram_mode[7] = U(0x00),
+	.sdram_mode[8] = U(0x0500),
+	.sdram_mode[9] = U(0x0C240000),
+	.sdram_mode[10] = U(0x00),
+	.sdram_mode[11] = U(0x00),
+	.sdram_mode[12] = U(0x00),
+	.sdram_mode[13] = U(0x00),
+	.sdram_mode[14] = U(0x00),
+	.sdram_mode[15] = U(0x00),
+	.md_cntl = U(0x00),
+	.interval = U(0x279C0000),
+	.data_init = U(0xDEADBEEF),
+	.init_addr = U(0x00),
+	.zq_cntl = U(0x8A090705),
+	.sdram_rcw[0] = U(0x00),
+	.sdram_rcw[1] = U(0x00),
+	.sdram_rcw[2] = U(0x00),
+	.sdram_rcw[3] = U(0x00),
+	.sdram_rcw[4] = U(0x00),
+	.sdram_rcw[5] = U(0x00),
+	.err_disable = U(0x00),
+	.err_int_en = U(0x00),
+};
+
+const struct dimm_params static_dimm = {
+	.rdimm = 0U,
+	.primary_sdram_width = 64U,
+	.ec_sdram_width = 8U,
+	.n_ranks = 2U,
+	.device_width = 8U,
+	.mirrored_dimm = 1U,
+};
+
+/* Sample code using two UDIMM MT18ASF1G72AZ-2G6B1, on each DDR controller */
+unsigned long long board_static_ddr(struct ddr_info *priv)
+{
+	memcpy(&priv->ddr_reg, &static_2900, sizeof(static_2900));
+	memcpy(&priv->dimm, &static_dimm, sizeof(static_dimm));
+	priv->conf.cs_on_dimm[0] = 0x3;
+	ddr_board_options(priv);
+	compute_ddr_phy(priv);
+
+	return ULL(0x400000000);
+}
+
+#elif defined(CONFIG_DDR_NODIMM)
+/*
+ * Sample code to bypass reading SPD. This is a sample, not recommended
+ * for boards with slots. DDR model number: UDIMM MT18ASF1G72AZ-2G6B1.
+ */
+struct dimm_params ddr_raw_timing = {
+	.n_ranks = 2U,
+	.rank_density = U(0x200000000),
+	.capacity = U(0x400000000),
+	.primary_sdram_width = 64U,
+	.ec_sdram_width = 8U,
+	.device_width = 8U,
+	.die_density = U(0x5),
+	.rdimm = 0U,
+	.mirrored_dimm = 1U,
+	.n_row_addr = 16U,
+	.n_col_addr = 10U,
+	.bank_addr_bits = 0U,
+	.bank_group_bits = 2U,
+	.edc_config = 2U,
+	.burst_lengths_bitmask = U(0x0c),
+	.tckmin_x_ps = 625,
+	.tckmax_ps = 1600,
+	.caslat_x = U(0x15FFFC00),
+	.taa_ps = 13750,
+	.trcd_ps = 13750,
+	.trp_ps = 13750,
+	.tras_ps = 32000,
+	.trc_ps = 457500,
+	.twr_ps = 15000,
+	.trfc1_ps = 350000,
+	.trfc2_ps = 260000,
+	.trfc4_ps = 160000,
+	.tfaw_ps = 21000,
+	.trrds_ps = 2500,
+	.trrdl_ps = 4900,
+	.tccdl_ps = 5000,
+	.refresh_rate_ps = 7800000U,
+};
+
+int ddr_get_ddr_params(struct dimm_params *pdimm,
+		       struct ddr_conf *conf)
+{
+	static const char dimm_model[] = "Fixed DDR on board";
+
+	conf->dimm_in_use[0] = 1;	/* Modify accordingly */
+	memcpy(pdimm, &ddr_raw_timing, sizeof(struct dimm_params));
+	memcpy(pdimm->mpart, dimm_model, sizeof(dimm_model) - 1);
+
+	/* valid DIMM mask, change accordingly, together with dimm_on_ctlr. */
+	return 0x5;
+}
+#endif	/* CONFIG_DDR_NODIMM */
+
+int ddr_board_options(struct ddr_info *priv)
+{
+	struct memctl_opt *popts = &priv->opt;
+	const struct ddr_conf *conf = &priv->conf;
+
+	popts->vref_dimm = U(0x19);		/* range 1, 83.4% */
+	popts->rtt_override = 1U;
+	popts->rtt_override_value = 0x5U;	/* RTT being used as 60 ohm */
+	popts->rtt_park = 120U;
+	popts->otf_burst_chop_en = 0;
+	popts->burst_length = DDR_BL8;
+	popts->trwt_override = 1U;
+	popts->bstopre = 0U;			/* auto precharge */
+	popts->addr_hash = 1;
+
+	/* Set ODT impedance on PHY side */
+	switch (conf->cs_on_dimm[1]) {
+	case 0xc:	/* Two slots dual rank */
+	case 0x4:	/* Two slots single rank, not valid for interleaving */
+		popts->trwt = U(0xf);
+		popts->twrt = U(0x7);
+		popts->trrt = U(0x7);
+		popts->twwt = U(0x7);
+		popts->vref_phy = U(0x6B);	/* 83.6% */
+		popts->odt = 60U;
+		popts->phy_tx_impedance = 28U;
+		break;
+	case 0:		/* Ont slot used */
+	default:
+		popts->trwt = U(0x3);
+		popts->twrt = U(0x3);
+		popts->trrt = U(0x3);
+		popts->twwt = U(0x3);
+		popts->vref_phy = U(0x5D);		/* 72% */
+		popts->odt = 60U;
+		popts->phy_tx_impedance = 28U;
+		break;
+	}
+
+	return 0;
+}
+
+#ifdef NXP_WARM_BOOT
+long long init_ddr(uint32_t wrm_bt_flg)
+#else
+long long init_ddr(void)
+#endif
+{
+	int spd_addr[] = { 0x51, 0x52, 0x53, 0x54 };
+	struct ddr_info info;
+	struct sysinfo sys;
+	long long dram_size;
+
+	zeromem(&sys, sizeof(sys));
+	if (get_clocks(&sys) != 0) {
+		ERROR("System clocks are not set\n");
+		panic();
+	}
+	debug("platform clock %lu\n", sys.freq_platform);
+	debug("DDR PLL1 %lu\n", sys.freq_ddr_pll0);
+	debug("DDR PLL2 %lu\n", sys.freq_ddr_pll1);
+
+	zeromem(&info, sizeof(info));
+
+	/* Set two DDRC. Unused DDRC will be removed automatically. */
+	info.num_ctlrs = NUM_OF_DDRC;
+	info.spd_addr = spd_addr;
+	info.ddr[0] = (void *)NXP_DDR_ADDR;
+	info.ddr[1] = (void *)NXP_DDR2_ADDR;
+	info.phy[0] = (void *)NXP_DDR_PHY1_ADDR;
+	info.phy[1] = (void *)NXP_DDR_PHY2_ADDR;
+	info.clk = get_ddr_freq(&sys, 0);
+	info.img_loadr = load_img;
+	info.phy_gen2_fw_img_buf = PHY_GEN2_FW_IMAGE_BUFFER;
+	if (info.clk == 0) {
+		info.clk = get_ddr_freq(&sys, 1);
+	}
+	info.dimm_on_ctlr = DDRC_NUM_DIMM;
+
+	info.warm_boot_flag = DDR_WRM_BOOT_NT_SUPPORTED;
+#ifdef NXP_WARM_BOOT
+	if (wrm_bt_flg != 0) {
+		info.warm_boot_flag = DDR_WARM_BOOT;
+	} else {
+		info.warm_boot_flag = DDR_COLD_BOOT;
+	}
+#endif
+
+	dram_size = dram_init(&info
+#if defined(NXP_HAS_CCN504) || defined(NXP_HAS_CCN508)
+				    , NXP_CCN_HN_F_0_ADDR
+#endif
+			);
+
+
+	if (dram_size < 0) {
+		ERROR("DDR init failed.\n");
+	}
+
+	return dram_size;
+}
diff --git a/plat/nxp/soc-lx2160a/lx2162aqds/plat_def.h b/plat/nxp/soc-lx2160a/lx2162aqds/plat_def.h
new file mode 100644
index 0000000..de2d244
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/lx2162aqds/plat_def.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2018-2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PLAT_DEF_H
+#define PLAT_DEF_H
+
+#include <arch.h>
+#include <cortex_a72.h>
+/* Required without TBBR.
+ * To include the defines for DDR PHY
+ * Images.
+ */
+#include <tbbr_img_def.h>
+
+#include <policy.h>
+#include <soc.h>
+
+#if defined(IMAGE_BL31)
+#define LS_SYS_TIMCTL_BASE		0x2890000
+#define PLAT_LS_NSTIMER_FRAME_ID	0
+#define LS_CONFIG_CNTACR		1
+#endif
+
+#define NXP_SYSCLK_FREQ		100000000
+#define NXP_DDRCLK_FREQ		100000000
+
+/* UART related definition */
+#define NXP_CONSOLE_ADDR	NXP_UART_ADDR
+#define NXP_CONSOLE_BAUDRATE	115200
+
+/* Size of cacheable stacks */
+#if defined(IMAGE_BL2)
+#if defined(TRUSTED_BOARD_BOOT)
+#define PLATFORM_STACK_SIZE	0x2000
+#else
+#define PLATFORM_STACK_SIZE	0x1000
+#endif
+#elif defined(IMAGE_BL31)
+#define PLATFORM_STACK_SIZE	0x1000
+#endif
+
+/* SD block buffer */
+#define NXP_SD_BLOCK_BUF_SIZE	(0x8000)
+#define NXP_SD_BLOCK_BUF_ADDR	(NXP_OCRAM_ADDR + NXP_OCRAM_SIZE \
+				- NXP_SD_BLOCK_BUF_SIZE)
+
+#ifdef SD_BOOT
+#define BL2_LIMIT		(NXP_OCRAM_ADDR + NXP_OCRAM_SIZE \
+				- NXP_SD_BLOCK_BUF_SIZE)
+#else
+#define BL2_LIMIT		(NXP_OCRAM_ADDR + NXP_OCRAM_SIZE)
+#endif
+
+/* IO defines as needed by IO driver framework */
+#define MAX_IO_DEVICES		4
+#define MAX_IO_BLOCK_DEVICES	1
+#define MAX_IO_HANDLES		4
+
+#define PHY_GEN2_FW_IMAGE_BUFFER	(NXP_OCRAM_ADDR + CSF_HDR_SZ)
+
+/*
+ * FIP image defines - Offset at which FIP Image would be present
+ * Image would include Bl31 , Bl33 and Bl32 (optional)
+ */
+#ifdef POLICY_FUSE_PROVISION
+#define MAX_FIP_DEVICES		3
+#endif
+
+#ifndef MAX_FIP_DEVICES
+#define MAX_FIP_DEVICES		2
+#endif
+
+/*
+ * ID of the secure physical generic timer interrupt used by the BL32.
+ */
+#define BL32_IRQ_SEC_PHY_TIMER	29
+
+#define BL31_WDOG_SEC		89
+
+#define BL31_NS_WDOG_WS1	108
+
+/*
+ * Define properties of Group 1 Secure and Group 0 interrupts as per GICv3
+ * terminology. On a GICv2 system or mode, the lists will be merged and treated
+ * as Group 0 interrupts.
+ */
+#define PLAT_LS_G1S_IRQ_PROPS(grp) \
+	INTR_PROP_DESC(BL32_IRQ_SEC_PHY_TIMER, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE)
+
+/* SGI 15 and Secure watchdog interrupts assigned to Group 0 */
+#define NXP_IRQ_SEC_SGI_7		15
+
+#define PLAT_LS_G0_IRQ_PROPS(grp)	\
+	INTR_PROP_DESC(BL31_WDOG_SEC, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(BL31_NS_WDOG_WS1, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(NXP_IRQ_SEC_SGI_7, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL)
+#endif
diff --git a/plat/nxp/soc-lx2160a/lx2162aqds/platform.c b/plat/nxp/soc-lx2160a/lx2162aqds/platform.c
new file mode 100644
index 0000000..7622cf0
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/lx2162aqds/platform.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <plat_common.h>
+
+#pragma weak board_enable_povdd
+#pragma weak board_disable_povdd
+
+bool board_enable_povdd(void)
+{
+#ifdef CONFIG_POVDD_ENABLE
+	return true;
+#else
+	return false;
+#endif
+}
+
+bool board_disable_povdd(void)
+{
+#ifdef CONFIG_POVDD_ENABLE
+	return true;
+#else
+	return false;
+#endif
+}
diff --git a/plat/nxp/soc-lx2160a/lx2162aqds/platform.mk b/plat/nxp/soc-lx2160a/lx2162aqds/platform.mk
new file mode 100644
index 0000000..fbdcd83
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/lx2162aqds/platform.mk
@@ -0,0 +1,92 @@
+#
+# Copyright 2018-2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# board-specific build parameters
+
+BOOT_MODE	?= 	flexspi_nor
+BOARD		?=	lx2162aqds
+POVDD_ENABLE	:=	no
+NXP_COINED_BB	:=	no
+
+ # DDR Compilation Configs
+NUM_OF_DDRC	:=	1
+DDRC_NUM_DIMM	:=	1
+DDRC_NUM_CS	:=	2
+DDR_ECC_EN	:=	yes
+ #enable address decoding feature
+DDR_ADDR_DEC	:=	yes
+APPLY_MAX_CDD	:=	yes
+
+# DDR Errata
+ERRATA_DDR_A011396	:= 1
+ERRATA_DDR_A050450	:= 1
+
+
+ # On-Board Flash Details
+FLASH_TYPE	:=	MT35XU512A
+XSPI_FLASH_SZ	:=	0x10000000
+NXP_XSPI_NOR_UNIT_SIZE		:=	0x20000
+BL2_BIN_XSPI_NOR_END_ADDRESS	:=	0x100000
+# CONFIG_FSPI_ERASE_4K is required to erase 4K sector sizes. This
+# config is enabled for future use cases.
+FSPI_ERASE_4K	:= 0
+
+ # Platform specific features.
+WARM_BOOT	:=	yes
+
+ # Adding platform specific defines
+
+$(eval $(call add_define_val,BOARD,'"${BOARD}"'))
+
+ifeq (${POVDD_ENABLE},yes)
+$(eval $(call add_define,CONFIG_POVDD_ENABLE))
+endif
+
+ifneq (${FLASH_TYPE},)
+$(eval $(call add_define,CONFIG_${FLASH_TYPE}))
+endif
+
+ifneq (${XSPI_FLASH_SZ},)
+$(eval $(call add_define_val,NXP_FLEXSPI_FLASH_SIZE,${XSPI_FLASH_SZ}))
+endif
+
+ifneq (${FSPI_ERASE_4K},)
+$(eval $(call add_define_val,CONFIG_FSPI_ERASE_4K,${FSPI_ERASE_4K}))
+endif
+
+ifneq (${NUM_OF_DDRC},)
+$(eval $(call add_define_val,NUM_OF_DDRC,${NUM_OF_DDRC}))
+endif
+
+ifneq (${DDRC_NUM_DIMM},)
+$(eval $(call add_define_val,DDRC_NUM_DIMM,${DDRC_NUM_DIMM}))
+endif
+
+ifneq (${DDRC_NUM_CS},)
+$(eval $(call add_define_val,DDRC_NUM_CS,${DDRC_NUM_CS}))
+endif
+
+ifeq (${DDR_ADDR_DEC},yes)
+$(eval $(call add_define,CONFIG_DDR_ADDR_DEC))
+endif
+
+ifeq (${DDR_ECC_EN},yes)
+$(eval $(call add_define,CONFIG_DDR_ECC_EN))
+endif
+
+# Platform can control the base address for non-volatile storage.
+#$(eval $(call add_define_val,NV_STORAGE_BASE_ADDR,'${BL2_BIN_XSPI_NOR_END_ADDRESS} - 2 * ${NXP_XSPI_NOR_UNIT_SIZE}'))
+
+ifeq (${WARM_BOOT},yes)
+$(eval $(call add_define_val,PHY_TRAINING_REGS_ON_FLASH,'${BL2_BIN_XSPI_NOR_END_ADDRESS} - ${NXP_XSPI_NOR_UNIT_SIZE}'))
+endif
+
+ # Adding Platform files build files
+BL2_SOURCES	+=	${BOARD_PATH}/ddr_init.c\
+			${BOARD_PATH}/platform.c
+
+ # Adding SoC build info
+include plat/nxp/soc-lx2160a/soc.mk
diff --git a/plat/nxp/soc-lx2160a/lx2162aqds/platform_def.h b/plat/nxp/soc-lx2160a/lx2162aqds/platform_def.h
new file mode 100644
index 0000000..5fa774e
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/lx2162aqds/platform_def.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PLATFORM_DEF_H
+#define PLATFORM_DEF_H
+
+#include "plat_def.h"
+#include "plat_default_def.h"
+
+#endif
diff --git a/plat/nxp/soc-lx2160a/lx2162aqds/policy.h b/plat/nxp/soc-lx2160a/lx2162aqds/policy.h
new file mode 100644
index 0000000..1095f38
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/lx2162aqds/policy.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef POLICY_H
+#define	POLICY_H
+
+/* Following defines affect the PLATFORM SECURITY POLICY */
+
+/* set this to 0x0 if the platform is not using/responding to ECC errors
+ * set this to 0x1 if ECC is being used (we have to do some init)
+ */
+#define  POLICY_USING_ECC 0x0
+
+/* Set this to 0x0 to leave the default SMMU page size in sACR
+ * Set this to 0x1 to change the SMMU page size to 64K
+ */
+#define POLICY_SMMU_PAGESZ_64K 0x1
+
+/*
+ * POLICY_PERF_WRIOP = 0 : No Performance enhancement for WRIOP RN-I
+ * POLICY_PERF_WRIOP = 1 : No Performance enhancement for WRIOP RN-I = 7
+ * POLICY_PERF_WRIOP = 2 : No Performance enhancement for WRIOP RN-I = 23
+ */
+#define POLICY_PERF_WRIOP 0
+
+/*
+ * set this to '1' if the debug clocks need to remain enabled during
+ * system entry to low-power (LPM20) - this should only be necessary
+ * for testing and NEVER set for normal production
+ */
+#define POLICY_DEBUG_ENABLE 0
+
+
+#endif /* POLICY_H */
diff --git a/plat/nxp/soc-lx2160a/soc.c b/plat/nxp/soc-lx2160a/soc.c
new file mode 100644
index 0000000..e0a2fe9
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/soc.c
@@ -0,0 +1,528 @@
+/*
+ * Copyright 2018-2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+
+#include <arch.h>
+#include <bl31/interrupt_mgmt.h>
+#include <caam.h>
+#include <cassert.h>
+#include <ccn.h>
+#include <common/debug.h>
+#include <dcfg.h>
+#ifdef I2C_INIT
+#include <i2c.h>
+#endif
+#include <lib/mmio.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <ls_interconnect.h>
+#ifdef POLICY_FUSE_PROVISION
+#include <nxp_gpio.h>
+#endif
+#if TRUSTED_BOARD_BOOT
+#include <nxp_smmu.h>
+#endif
+#include <nxp_timer.h>
+#include <plat_console.h>
+#include <plat_gic.h>
+#include <plat_tzc400.h>
+#include <pmu.h>
+#if defined(NXP_SFP_ENABLED)
+#include <sfp.h>
+#endif
+
+#include <errata.h>
+#include <ls_interrupt_mgmt.h>
+#include "plat_common.h"
+#ifdef NXP_NV_SW_MAINT_LAST_EXEC_DATA
+#include <plat_nv_storage.h>
+#endif
+#ifdef NXP_WARM_BOOT
+#include <plat_warm_rst.h>
+#endif
+#include "platform_def.h"
+#include "soc.h"
+
+static struct soc_type soc_list[] =  {
+	SOC_ENTRY(LX2160A, LX2160A, 8, 2),
+	SOC_ENTRY(LX2080A, LX2080A, 8, 1),
+	SOC_ENTRY(LX2120A, LX2120A, 6, 2),
+};
+
+static dcfg_init_info_t dcfg_init_data = {
+			.g_nxp_dcfg_addr = NXP_DCFG_ADDR,
+			.nxp_sysclk_freq = NXP_SYSCLK_FREQ,
+			.nxp_ddrclk_freq = NXP_DDRCLK_FREQ,
+			.nxp_plat_clk_divider = NXP_PLATFORM_CLK_DIVIDER,
+		};
+static const unsigned char master_to_6rn_id_map[] = {
+	PLAT_6CLUSTER_TO_CCN_ID_MAP
+};
+
+static const unsigned char master_to_rn_id_map[] = {
+	PLAT_CLUSTER_TO_CCN_ID_MAP
+};
+
+CASSERT(ARRAY_SIZE(master_to_rn_id_map) == NUMBER_OF_CLUSTERS,
+		assert_invalid_cluster_count_for_ccn_variant);
+
+static const ccn_desc_t plat_six_cluster_ccn_desc = {
+	.periphbase = NXP_CCN_ADDR,
+	.num_masters = ARRAY_SIZE(master_to_6rn_id_map),
+	.master_to_rn_id_map = master_to_6rn_id_map
+};
+
+static const ccn_desc_t plat_ccn_desc = {
+	.periphbase = NXP_CCN_ADDR,
+	.num_masters = ARRAY_SIZE(master_to_rn_id_map),
+	.master_to_rn_id_map = master_to_rn_id_map
+};
+
+/*******************************************************************************
+ * This function returns the number of clusters in the SoC
+ ******************************************************************************/
+static unsigned int get_num_cluster(void)
+{
+	const soc_info_t *soc_info = get_soc_info();
+	uint32_t num_clusters = NUMBER_OF_CLUSTERS;
+	unsigned int i;
+
+	for (i = 0U; i < ARRAY_SIZE(soc_list); i++) {
+		if (soc_list[i].personality == soc_info->personality) {
+			num_clusters = soc_list[i].num_clusters;
+			break;
+		}
+	}
+
+	VERBOSE("NUM of cluster = 0x%x\n", num_clusters);
+
+	return num_clusters;
+}
+
+
+/******************************************************************************
+ * Function returns the base counter frequency
+ * after reading the first entry at CNTFID0 (0x20 offset).
+ *
+ * Function is used by:
+ *   1. ARM common code for PSCI management.
+ *   2. ARM Generic Timer init.
+ *
+ *****************************************************************************/
+unsigned int plat_get_syscnt_freq2(void)
+{
+	unsigned int counter_base_frequency;
+	/*
+	 * Below register specifies the base frequency of the system counter.
+	 * As per NXP Board Manuals:
+	 * The system counter always works with SYS_REF_CLK/4 frequency clock.
+	 *
+	 *
+	 */
+	counter_base_frequency = mmio_read_32(NXP_TIMER_ADDR + CNTFID_OFF);
+
+	return counter_base_frequency;
+}
+
+#ifdef IMAGE_BL2
+
+#ifdef POLICY_FUSE_PROVISION
+static gpio_init_info_t gpio_init_data = {
+	.gpio1_base_addr = NXP_GPIO1_ADDR,
+	.gpio2_base_addr = NXP_GPIO2_ADDR,
+	.gpio3_base_addr = NXP_GPIO3_ADDR,
+	.gpio4_base_addr = NXP_GPIO4_ADDR,
+};
+#endif
+
+static void soc_interconnect_config(void)
+{
+	unsigned long long val = 0x0U;
+
+	uint32_t num_clusters = get_num_cluster();
+
+	if (num_clusters == 6U) {
+		ccn_init(&plat_six_cluster_ccn_desc);
+	} else {
+		ccn_init(&plat_ccn_desc);
+	}
+
+	/*
+	 * Enable Interconnect coherency for the primary CPU's cluster.
+	 */
+	plat_ls_interconnect_enter_coherency(num_clusters);
+
+	val = ccn_read_node_reg(NODE_TYPE_HNI, 13, PCIeRC_RN_I_NODE_ID_OFFSET);
+	val |= (1 << 17);
+	ccn_write_node_reg(NODE_TYPE_HNI, 13, PCIeRC_RN_I_NODE_ID_OFFSET, val);
+
+	/* PCIe is Connected to RN-I 17 which is connected to HN-I 13. */
+	val = ccn_read_node_reg(NODE_TYPE_HNI, 30, PCIeRC_RN_I_NODE_ID_OFFSET);
+	val |= (1 << 17);
+	ccn_write_node_reg(NODE_TYPE_HNI, 30, PCIeRC_RN_I_NODE_ID_OFFSET, val);
+
+	val = ccn_read_node_reg(NODE_TYPE_HNI, 13, SA_AUX_CTRL_REG_OFFSET);
+	val |= SERIALIZE_DEV_nGnRnE_WRITES;
+	ccn_write_node_reg(NODE_TYPE_HNI, 13, SA_AUX_CTRL_REG_OFFSET, val);
+
+	val = ccn_read_node_reg(NODE_TYPE_HNI, 30, SA_AUX_CTRL_REG_OFFSET);
+	val &= ~(ENABLE_RESERVE_BIT53);
+	val |= SERIALIZE_DEV_nGnRnE_WRITES;
+	ccn_write_node_reg(NODE_TYPE_HNI, 30, SA_AUX_CTRL_REG_OFFSET, val);
+
+	val = ccn_read_node_reg(NODE_TYPE_HNI, 13, PoS_CONTROL_REG_OFFSET);
+	val &= ~(HNI_POS_EN);
+	ccn_write_node_reg(NODE_TYPE_HNI, 13, PoS_CONTROL_REG_OFFSET, val);
+
+	val = ccn_read_node_reg(NODE_TYPE_HNI, 30, PoS_CONTROL_REG_OFFSET);
+	val &= ~(HNI_POS_EN);
+	ccn_write_node_reg(NODE_TYPE_HNI, 30, PoS_CONTROL_REG_OFFSET, val);
+
+	val = ccn_read_node_reg(NODE_TYPE_HNI, 13, SA_AUX_CTRL_REG_OFFSET);
+	val &= ~(POS_EARLY_WR_COMP_EN);
+	ccn_write_node_reg(NODE_TYPE_HNI, 13, SA_AUX_CTRL_REG_OFFSET, val);
+
+	val = ccn_read_node_reg(NODE_TYPE_HNI, 30, SA_AUX_CTRL_REG_OFFSET);
+	val &= ~(POS_EARLY_WR_COMP_EN);
+	ccn_write_node_reg(NODE_TYPE_HNI, 30, SA_AUX_CTRL_REG_OFFSET, val);
+
+#if POLICY_PERF_WRIOP
+	uint16_t wriop_rni = 0U;
+
+	if (POLICY_PERF_WRIOP == 1) {
+		wriop_rni = 7U;
+	} else if (POLICY_PERF_WRIOP == 2) {
+		wriop_rni = 23U;
+	} else {
+		ERROR("Incorrect WRIOP selected.\n");
+		panic();
+	}
+
+	val = ccn_read_node_reg(NODE_TYPE_RNI, wriop_rni,
+				SA_AUX_CTRL_REG_OFFSET);
+	val |= ENABLE_WUO;
+	ccn_write_node_reg(NODE_TYPE_HNI, wriop_rni, SA_AUX_CTRL_REG_OFFSET,
+			   val);
+#else
+	val = ccn_read_node_reg(NODE_TYPE_RNI, 17, SA_AUX_CTRL_REG_OFFSET);
+	val |= ENABLE_WUO;
+	ccn_write_node_reg(NODE_TYPE_RNI, 17, SA_AUX_CTRL_REG_OFFSET, val);
+#endif
+}
+
+
+void soc_preload_setup(void)
+{
+	dram_regions_info_t *info_dram_regions = get_dram_regions_info();
+#if defined(NXP_WARM_BOOT)
+	bool warm_reset = is_warm_boot();
+#endif
+	info_dram_regions->total_dram_size =
+#if defined(NXP_WARM_BOOT)
+						init_ddr(warm_reset);
+#else
+						init_ddr();
+#endif
+}
+
+/*******************************************************************************
+ * This function implements soc specific erratas
+ * This is called before DDR is initialized or MMU is enabled
+ ******************************************************************************/
+void soc_early_init(void)
+{
+	dcfg_init(&dcfg_init_data);
+#ifdef POLICY_FUSE_PROVISION
+	gpio_init(&gpio_init_data);
+	sec_init(NXP_CAAM_ADDR);
+#endif
+#if LOG_LEVEL > 0
+	/* Initialize the console to provide early debug support */
+	plat_console_init(NXP_CONSOLE_ADDR,
+				NXP_UART_CLK_DIVIDER, NXP_CONSOLE_BAUDRATE);
+#endif
+
+	enable_timer_base_to_cluster(NXP_PMU_ADDR);
+	soc_interconnect_config();
+
+	enum  boot_device dev = get_boot_dev();
+	/* Mark the buffer for SD in OCRAM as non secure.
+	 * The buffer is assumed to be at end of OCRAM for
+	 * the logic below to calculate TZPC programming
+	 */
+	if (dev == BOOT_DEVICE_EMMC || dev == BOOT_DEVICE_SDHC2_EMMC) {
+		/* Calculate the region in OCRAM which is secure
+		 * The buffer for SD needs to be marked non-secure
+		 * to allow SD to do DMA operations on it
+		 */
+		uint32_t secure_region = (NXP_OCRAM_SIZE
+						- NXP_SD_BLOCK_BUF_SIZE);
+		uint32_t mask = secure_region/TZPC_BLOCK_SIZE;
+
+		mmio_write_32(NXP_OCRAM_TZPC_ADDR, mask);
+
+		/* Add the entry for buffer in MMU Table */
+		mmap_add_region(NXP_SD_BLOCK_BUF_ADDR, NXP_SD_BLOCK_BUF_ADDR,
+				NXP_SD_BLOCK_BUF_SIZE,
+				MT_DEVICE | MT_RW | MT_NS);
+	}
+
+#ifdef ERRATA_SOC_A050426
+	erratum_a050426();
+#endif
+
+#if (TRUSTED_BOARD_BOOT) || defined(POLICY_FUSE_PROVISION)
+	sfp_init(NXP_SFP_ADDR);
+#endif
+
+#if TRUSTED_BOARD_BOOT
+	uint32_t mode;
+
+	/* For secure boot disable SMMU.
+	 * Later when platform security policy comes in picture,
+	 * this might get modified based on the policy
+	 */
+	if (check_boot_mode_secure(&mode) == true) {
+		bypass_smmu(NXP_SMMU_ADDR);
+	}
+
+	/* For Mbedtls currently crypto is not supported via CAAM
+	 * enable it when that support is there. In tbbr.mk
+	 * the CAAM_INTEG is set as 0.
+	 */
+
+#ifndef MBEDTLS_X509
+	/* Initialize the crypto accelerator if enabled */
+	if (is_sec_enabled() == false)
+		INFO("SEC is disabled.\n");
+	else
+		sec_init(NXP_CAAM_ADDR);
+#endif
+#endif
+
+	/*
+	 * Initialize system level generic timer for Layerscape Socs.
+	 */
+	delay_timer_init(NXP_TIMER_ADDR);
+	i2c_init(NXP_I2C_ADDR);
+}
+
+void soc_bl2_prepare_exit(void)
+{
+#if defined(NXP_SFP_ENABLED) && defined(DISABLE_FUSE_WRITE)
+	set_sfp_wr_disable();
+#endif
+}
+
+/*****************************************************************************
+ * This function returns the boot device based on RCW_SRC
+ ****************************************************************************/
+enum boot_device get_boot_dev(void)
+{
+	enum boot_device src = BOOT_DEVICE_NONE;
+	uint32_t porsr1;
+	uint32_t rcw_src;
+
+	porsr1 = read_reg_porsr1();
+
+	rcw_src = (porsr1 & PORSR1_RCW_MASK) >> PORSR1_RCW_SHIFT;
+
+	switch (rcw_src) {
+	case FLEXSPI_NOR:
+		src = BOOT_DEVICE_FLEXSPI_NOR;
+		INFO("RCW BOOT SRC is FLEXSPI NOR\n");
+		break;
+	case FLEXSPI_NAND2K_VAL:
+	case FLEXSPI_NAND4K_VAL:
+		INFO("RCW BOOT SRC is FLEXSPI NAND\n");
+		src = BOOT_DEVICE_FLEXSPI_NAND;
+		break;
+	case SDHC1_VAL:
+		src = BOOT_DEVICE_EMMC;
+		INFO("RCW BOOT SRC is SD\n");
+		break;
+	case SDHC2_VAL:
+		src = BOOT_DEVICE_SDHC2_EMMC;
+		INFO("RCW BOOT SRC is EMMC\n");
+		break;
+	default:
+		break;
+	}
+
+	return src;
+}
+
+
+void soc_mem_access(void)
+{
+	const devdisr5_info_t *devdisr5_info = get_devdisr5_info();
+	dram_regions_info_t *info_dram_regions = get_dram_regions_info();
+	struct tzc400_reg tzc400_reg_list[MAX_NUM_TZC_REGION];
+	int dram_idx, index = 0U;
+
+	for (dram_idx = 0U; dram_idx < info_dram_regions->num_dram_regions;
+	     dram_idx++) {
+		if (info_dram_regions->region[dram_idx].size == 0) {
+			ERROR("DDR init failure, or");
+			ERROR("DRAM regions not populated correctly.\n");
+			break;
+		}
+
+		index = populate_tzc400_reg_list(tzc400_reg_list,
+				dram_idx, index,
+				info_dram_regions->region[dram_idx].addr,
+				info_dram_regions->region[dram_idx].size,
+				NXP_SECURE_DRAM_SIZE, NXP_SP_SHRD_DRAM_SIZE);
+	}
+
+	if (devdisr5_info->ddrc1_present != 0) {
+		INFO("DDR Controller 1.\n");
+		mem_access_setup(NXP_TZC_ADDR, index,
+				tzc400_reg_list);
+		mem_access_setup(NXP_TZC3_ADDR, index,
+				tzc400_reg_list);
+	}
+	if (devdisr5_info->ddrc2_present != 0) {
+		INFO("DDR Controller 2.\n");
+		mem_access_setup(NXP_TZC2_ADDR, index,
+				tzc400_reg_list);
+		mem_access_setup(NXP_TZC4_ADDR, index,
+				tzc400_reg_list);
+	}
+}
+
+#else
+const unsigned char _power_domain_tree_desc[] = {1, 8, 2, 2, 2, 2, 2, 2, 2, 2};
+
+CASSERT(NUMBER_OF_CLUSTERS && NUMBER_OF_CLUSTERS <= 256,
+		assert_invalid_lx2160a_cluster_count);
+
+/******************************************************************************
+ * This function returns the SoC topology
+ ****************************************************************************/
+
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+
+	return _power_domain_tree_desc;
+}
+
+/*******************************************************************************
+ * This function returns the core count within the cluster corresponding to
+ * `mpidr`.
+ ******************************************************************************/
+unsigned int plat_ls_get_cluster_core_count(u_register_t mpidr)
+{
+	return CORES_PER_CLUSTER;
+}
+
+
+void soc_early_platform_setup2(void)
+{
+	dcfg_init(&dcfg_init_data);
+	/*
+	 * Initialize system level generic timer for Socs
+	 */
+	delay_timer_init(NXP_TIMER_ADDR);
+
+#if LOG_LEVEL > 0
+	/* Initialize the console to provide early debug support */
+	plat_console_init(NXP_CONSOLE_ADDR,
+			  NXP_UART_CLK_DIVIDER, NXP_CONSOLE_BAUDRATE);
+#endif
+}
+
+void soc_platform_setup(void)
+{
+	/* Initialize the GIC driver, cpu and distributor interfaces */
+	static uintptr_t target_mask_array[PLATFORM_CORE_COUNT];
+	static interrupt_prop_t ls_interrupt_props[] = {
+		PLAT_LS_G1S_IRQ_PROPS(INTR_GROUP1S),
+		PLAT_LS_G0_IRQ_PROPS(INTR_GROUP0)
+	};
+
+	plat_ls_gic_driver_init(NXP_GICD_ADDR, NXP_GICR_ADDR,
+				PLATFORM_CORE_COUNT,
+				ls_interrupt_props,
+				ARRAY_SIZE(ls_interrupt_props),
+				target_mask_array,
+				plat_core_pos);
+
+	plat_ls_gic_init();
+	enable_init_timer();
+#ifdef LS_SYS_TIMCTL_BASE
+	ls_configure_sys_timer(LS_SYS_TIMCTL_BASE,
+			       LS_CONFIG_CNTACR,
+			       PLAT_LS_NSTIMER_FRAME_ID);
+#endif
+}
+
+/*******************************************************************************
+ * This function initializes the soc from the BL31 module
+ ******************************************************************************/
+void soc_init(void)
+{
+	 /* low-level init of the soc */
+	soc_init_start();
+	soc_init_percpu();
+	_init_global_data();
+	_initialize_psci();
+
+	if (ccn_get_part0_id(NXP_CCN_ADDR) != CCN_508_PART0_ID) {
+		ERROR("Unrecognized CCN variant detected.");
+		ERROR("Only CCN-508 is supported\n");
+		panic();
+	}
+
+	uint32_t num_clusters = get_num_cluster();
+
+	if (num_clusters == 6U) {
+		ccn_init(&plat_six_cluster_ccn_desc);
+	} else {
+		ccn_init(&plat_ccn_desc);
+	}
+
+	plat_ls_interconnect_enter_coherency(num_clusters);
+
+	/* Set platform security policies */
+	_set_platform_security();
+
+	 /* make sure any parallel init tasks are finished */
+	soc_init_finish();
+
+	/* Initialize the crypto accelerator if enabled */
+	if (is_sec_enabled() == false) {
+		INFO("SEC is disabled.\n");
+	} else {
+		sec_init(NXP_CAAM_ADDR);
+	}
+
+}
+
+#ifdef NXP_WDOG_RESTART
+static uint64_t wdog_interrupt_handler(uint32_t id, uint32_t flags,
+					  void *handle, void *cookie)
+{
+	uint8_t data = WDOG_RESET_FLAG;
+
+	wr_nv_app_data(WDT_RESET_FLAG_OFFSET,
+		       (uint8_t *)&data, sizeof(data));
+
+	mmio_write_32(NXP_RST_ADDR + RSTCNTL_OFFSET, SW_RST_REQ_INIT);
+
+	return 0;
+}
+#endif
+
+void soc_runtime_setup(void)
+{
+
+#ifdef NXP_WDOG_RESTART
+	request_intr_type_el3(BL31_NS_WDOG_WS1, wdog_interrupt_handler);
+#endif
+}
+#endif
diff --git a/plat/nxp/soc-lx2160a/soc.def b/plat/nxp/soc-lx2160a/soc.def
new file mode 100644
index 0000000..bd0dd15
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/soc.def
@@ -0,0 +1,201 @@
+#
+# Copyright (c) 2015, 2016 Freescale Semiconductor, Inc.
+# Copyright 2017-2020 NXP Semiconductors
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+#------------------------------------------------------------------------------
+#
+# This file contains the basic architecture definitions that drive the build
+#
+# -----------------------------------------------------------------------------
+
+CORE_TYPE	:=	a72
+
+CACHE_LINE	:=	6
+
+# set to GIC400 or GIC500
+GIC		:=	GIC500
+
+# set to CCI400 or CCN504 or CCN508
+INTERCONNECT	:=	CCN508
+
+# indicate layerscape chassis level - set to 3=LSCH3 or 2=LSCH2
+CHASSIS		:=	3_2
+
+# TZC IP Details TZC used is TZC380 or TZC400
+TZC_ID		:=	TZC400
+
+# CONSOLE Details available is NS16550 or PL011
+CONSOLE		:=	PL011
+
+# Select the DDR PHY generation to be used
+PLAT_DDR_PHY	:=	PHY_GEN2
+
+PHYS_SYS	:=	64
+
+# Area of OCRAM reserved by ROM code
+NXP_ROM_RSVD	:= 0xa000
+
+# Max Size of CSF header. Required to define BL2 TEXT LIMIT in soc.def
+# Input to CST create_hdr_esbc tool
+CSF_HDR_SZ	:= 0x3000
+
+NXP_SFP_VER	:= 3_4
+
+# In IMAGE_BL2, compile time flag for handling Cache coherency
+# with CAAM for BL2 running from OCRAM
+SEC_MEM_NON_COHERENT	:= yes
+
+# Defining the endianness for NXP ESDHC
+NXP_ESDHC_ENDIANNESS	:= LE
+
+# Defining the endianness for NXP SFP
+NXP_SFP_ENDIANNESS	:= LE
+
+# Defining the endianness for NXP GPIO
+NXP_GPIO_ENDIANNESS	:= LE
+
+# Defining the endianness for NXP SNVS
+NXP_SNVS_ENDIANNESS	:= LE
+
+# Defining the endianness for NXP CCSR GUR register
+NXP_GUR_ENDIANNESS	:= LE
+
+# Defining the endianness for NXP FSPI register
+NXP_FSPI_ENDIANNESS	:= LE
+
+# Defining the endianness for NXP SEC
+NXP_SEC_ENDIANNESS	:= LE
+
+# Defining the endianness for NXP DDR
+NXP_DDR_ENDIANNESS	:= LE
+
+NXP_DDR_INTLV_256B	:= 1
+
+# OCRAM MAP for BL2
+# Before BL2
+# 0x18000000 - 0x18009fff -> Used by ROM code
+# 0x1800a000 - 0x1800dfff -> CSF header for BL2
+# (The above area i.e 0x18000000 - 0x1800dfff is available
+#  for DDR PHY images scratch pad region during BL2 run time)
+# For FlexSPI boot
+# 0x1800e000 - 0x18040000 -> Reserved for BL2 binary
+# For SD boot
+# 0x1800e000 - 0x18030000 -> Reserved for BL2 binary
+# 0x18030000 - 0x18040000 -> Reserved for SD buffer
+OCRAM_START_ADDR := 0x18000000
+OCRAM_SIZE := 0x40000
+
+# Location of BL2 on OCRAM
+BL2_BASE_ADDR	:=	$(shell echo $$(( $(OCRAM_START_ADDR) + $(NXP_ROM_RSVD) + $(CSF_HDR_SZ) )))
+# Covert to HEX to be used by create_pbl.mk
+BL2_BASE	:=	$$(echo "obase=16; ${BL2_BASE_ADDR}" | bc)
+
+# BL2_HDR_LOC is at  (OCRAM_ADDR + NXP_ROM_RSVD)
+# This value BL2_HDR_LOC + CSF_HDR_SZ should not overalp with BL2_BASE
+BL2_HDR_LOC_HDR	?=	$(shell echo $$(( $(OCRAM_START_ADDR) + $(NXP_ROM_RSVD) )))
+# Covert to HEX to be used by create_pbl.mk
+BL2_HDR_LOC	:=	$$(echo "obase=16; ${BL2_HDR_LOC_HDR}" | bc)
+
+# SoC ERRATAS to be enabled
+#
+# Core Errata
+ERRATA_A72_859971	:= 1
+
+# SoC Errata
+ERRATA_SOC_A050426	:= 1
+
+ifneq (${CACHE_LINE},)
+$(eval $(call add_define_val,PLATFORM_CACHE_LINE_SHIFT,${CACHE_LINE}))
+$(eval CACHE_WRITEBACK_GRANULE=$(shell echo $$((1 << $(CACHE_LINE)))))
+$(eval $(call add_define_val,CACHE_WRITEBACK_GRANULE,$(CACHE_WRITEBACK_GRANULE)))
+endif
+
+ifneq (${INTERCONNECT},)
+$(eval $(call add_define,NXP_HAS_CCN508))
+endif
+
+ifneq (${CHASSIS},)
+$(eval $(call add_define,CONFIG_CHASSIS_${CHASSIS}))
+endif
+
+ifneq (${PLAT_DDR_PHY},)
+$(eval $(call add_define,NXP_DDR_${PLAT_DDR_PHY}))
+endif
+
+ifneq (${PHYS_SYS},)
+$(eval $(call add_define,CONFIG_PHYS_64BIT))
+endif
+
+ifneq (${CSF_HDR_SZ},)
+$(eval $(call add_define_val,CSF_HDR_SZ,${CSF_HDR_SZ}))
+endif
+
+ifneq (${OCRAM_START_ADDR},)
+$(eval $(call add_define_val,NXP_OCRAM_ADDR,${OCRAM_START_ADDR}))
+endif
+
+ifneq (${OCRAM_SIZE},)
+$(eval $(call add_define_val,NXP_OCRAM_SIZE,${OCRAM_SIZE}))
+endif
+
+ifneq (${NXP_ROM_RSVD},)
+$(eval $(call add_define_val,NXP_ROM_RSVD,${NXP_ROM_RSVD}))
+endif
+
+ifneq (${BL2_BASE_ADDR},)
+$(eval $(call add_define_val,BL2_BASE,${BL2_BASE_ADDR}))
+endif
+
+ifeq (${SEC_MEM_NON_COHERENT},yes)
+$(eval $(call add_define,SEC_MEM_NON_COHERENT))
+endif
+
+ifneq (${NXP_ESDHC_ENDIANNESS},)
+$(eval $(call add_define,NXP_ESDHC_${NXP_ESDHC_ENDIANNESS}))
+endif
+
+ifneq (${NXP_SFP_VER},)
+$(eval $(call add_define,NXP_SFP_VER_${NXP_SFP_VER}))
+endif
+
+ifneq (${NXP_SFP_ENDIANNESS},)
+$(eval $(call add_define,NXP_SFP_${NXP_SFP_ENDIANNESS}))
+endif
+
+ifneq (${NXP_GPIO_ENDIANNESS},)
+$(eval $(call add_define,NXP_GPIO_${NXP_GPIO_ENDIANNESS}))
+endif
+
+ifneq (${NXP_SNVS_ENDIANNESS},)
+$(eval $(call add_define,NXP_SNVS_${NXP_SNVS_ENDIANNESS}))
+endif
+
+ifneq (${NXP_GUR_ENDIANNESS},)
+$(eval $(call add_define,NXP_GUR_${NXP_GUR_ENDIANNESS}))
+endif
+
+ifneq (${NXP_FSPI_ENDIANNESS},)
+$(eval $(call add_define,NXP_FSPI_${NXP_FSPI_ENDIANNESS}))
+endif
+
+# enable dynamic memory mapping
+PLAT_XLAT_TABLES_DYNAMIC :=	1
+
+ifneq (${NXP_SEC_ENDIANNESS},)
+$(eval $(call add_define,NXP_SEC_${NXP_SEC_ENDIANNESS}))
+endif
+
+ifneq (${NXP_DDR_ENDIANNESS},)
+$(eval $(call add_define,NXP_DDR_${NXP_DDR_ENDIANNESS}))
+endif
+
+ifneq (${NXP_DDR_INTLV_256B},)
+$(eval $(call add_define,NXP_DDR_INTLV_256B))
+endif
+
+ifneq (${PLAT_XLAT_TABLES_DYNAMIC},)
+$(eval $(call add_define,PLAT_XLAT_TABLES_DYNAMIC))
+endif
diff --git a/plat/nxp/soc-lx2160a/soc.mk b/plat/nxp/soc-lx2160a/soc.mk
new file mode 100644
index 0000000..b9649b4
--- /dev/null
+++ b/plat/nxp/soc-lx2160a/soc.mk
@@ -0,0 +1,173 @@
+#
+# Copyright 2018-2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+
+ # SoC-specific build parameters
+SOC		:=	lx2160a
+PLAT_PATH	:=	plat/nxp
+PLAT_COMMON_PATH:=	plat/nxp/common
+PLAT_DRIVERS_PATH:=	drivers/nxp
+PLAT_SOC_PATH	:=	${PLAT_PATH}/soc-${SOC}
+BOARD_PATH	:=	${PLAT_SOC_PATH}/${BOARD}
+
+ # get SoC-specific defnitions
+include ${PLAT_SOC_PATH}/soc.def
+
+include ${PLAT_COMMON_PATH}/plat_make_helper/plat_build_macros.mk
+
+ # SoC-specific
+NXP_WDOG_RESTART	:= yes
+
+
+ # Selecting dependent module,
+ # Selecting dependent drivers, and
+ # Adding defines.
+
+ # for features enabled above.
+ifeq (${NXP_WDOG_RESTART}, yes)
+NXP_NV_SW_MAINT_LAST_EXEC_DATA := yes
+LS_EL3_INTERRUPT_HANDLER := yes
+$(eval $(call add_define, NXP_WDOG_RESTART))
+endif
+
+
+ # For Security Features
+DISABLE_FUSE_WRITE	:= 1
+ifeq (${TRUSTED_BOARD_BOOT}, 1)
+ifeq (${GENERATE_COT},1)
+# Save Keys to be used by DDR FIP image
+SAVE_KEYS=1
+endif
+$(eval $(call SET_NXP_MAKE_FLAG,SMMU_NEEDED,BL2))
+$(eval $(call SET_NXP_MAKE_FLAG,SFP_NEEDED,BL2))
+$(eval $(call SET_NXP_MAKE_FLAG,SNVS_NEEDED,BL2))
+# Used by create_pbl tool to
+# create bl2_<boot_mode>_sec.pbl image
+SECURE_BOOT	:= yes
+endif
+$(eval $(call SET_NXP_MAKE_FLAG,CRYPTO_NEEDED,BL_COMM))
+
+
+ # Selecting Drivers for SoC
+$(eval $(call SET_NXP_MAKE_FLAG,DCFG_NEEDED,BL_COMM))
+$(eval $(call SET_NXP_MAKE_FLAG,TIMER_NEEDED,BL_COMM))
+$(eval $(call SET_NXP_MAKE_FLAG,INTERCONNECT_NEEDED,BL_COMM))
+$(eval $(call SET_NXP_MAKE_FLAG,GIC_NEEDED,BL31))
+$(eval $(call SET_NXP_MAKE_FLAG,CONSOLE_NEEDED,BL_COMM))
+$(eval $(call SET_NXP_MAKE_FLAG,PMU_NEEDED,BL_COMM))
+
+$(eval $(call SET_NXP_MAKE_FLAG,DDR_DRIVER_NEEDED,BL2))
+$(eval $(call SET_NXP_MAKE_FLAG,TZASC_NEEDED,BL2))
+$(eval $(call SET_NXP_MAKE_FLAG,I2C_NEEDED,BL2))
+$(eval $(call SET_NXP_MAKE_FLAG,IMG_LOADR_NEEDED,BL2))
+
+
+ # Selecting PSCI & SIP_SVC support
+$(eval $(call SET_NXP_MAKE_FLAG,PSCI_NEEDED,BL31))
+$(eval $(call SET_NXP_MAKE_FLAG,SIPSVC_NEEDED,BL31))
+
+
+ # Selecting Boot Source for the TFA images.
+ifeq (${BOOT_MODE}, flexspi_nor)
+$(eval $(call SET_NXP_MAKE_FLAG,XSPI_NEEDED,BL2))
+$(eval $(call add_define,FLEXSPI_NOR_BOOT))
+else
+ifeq (${BOOT_MODE}, sd)
+$(eval $(call SET_NXP_MAKE_FLAG,SD_MMC_NEEDED,BL2))
+$(eval $(call add_define,SD_BOOT))
+else
+ifeq (${BOOT_MODE}, emmc)
+$(eval $(call SET_NXP_MAKE_FLAG,SD_MMC_NEEDED,BL2))
+$(eval $(call add_define,EMMC_BOOT))
+else
+$(error Un-supported Boot Mode = ${BOOT_MODE})
+endif
+endif
+endif
+
+
+ # Separate DDR-FIP image to be loaded.
+$(eval $(call SET_NXP_MAKE_FLAG,DDR_FIP_IO_NEEDED,BL2))
+
+
+# Source File Addition
+# #####################
+
+PLAT_INCLUDES		+=	-I${PLAT_COMMON_PATH}/include/default\
+				-I${BOARD_PATH}\
+				-I${PLAT_COMMON_PATH}/include/default/ch_${CHASSIS}\
+				-I${PLAT_SOC_PATH}/include
+
+ifeq (${SECURE_BOOT},yes)
+include ${PLAT_COMMON_PATH}/tbbr/tbbr.mk
+endif
+
+ifeq ($(WARM_BOOT),yes)
+include ${PLAT_COMMON_PATH}/warm_reset/warm_reset.mk
+endif
+
+ifeq (${NXP_NV_SW_MAINT_LAST_EXEC_DATA}, yes)
+include ${PLAT_COMMON_PATH}/nv_storage/nv_storage.mk
+endif
+
+ifeq (${PSCI_NEEDED}, yes)
+include ${PLAT_COMMON_PATH}/psci/psci.mk
+endif
+
+ifeq (${SIPSVC_NEEDED}, yes)
+include ${PLAT_COMMON_PATH}/sip_svc/sipsvc.mk
+endif
+
+ifeq (${DDR_FIP_IO_NEEDED}, yes)
+include ${PLAT_COMMON_PATH}/fip_handler/ddr_fip/ddr_fip_io.mk
+endif
+
+ # for fuse-fip & fuse-programming
+ifeq (${FUSE_PROG}, 1)
+include ${PLAT_COMMON_PATH}/fip_handler/fuse_fip/fuse.mk
+endif
+
+ifeq (${IMG_LOADR_NEEDED},yes)
+include $(PLAT_COMMON_PATH)/img_loadr/img_loadr.mk
+endif
+
+ # Adding source files for the above selected drivers.
+include ${PLAT_DRIVERS_PATH}/drivers.mk
+
+ # Adding SoC specific files
+include ${PLAT_SOC_PATH}/erratas_soc.mk
+
+PLAT_INCLUDES		+=	${NV_STORAGE_INCLUDES}\
+				${WARM_RST_INCLUDES}
+
+BL31_SOURCES		+=	${PLAT_SOC_PATH}/$(ARCH)/${SOC}.S\
+				${WARM_RST_BL31_SOURCES}\
+				${PSCI_SOURCES}\
+				${SIPSVC_SOURCES}\
+				${PLAT_COMMON_PATH}/$(ARCH)/bl31_data.S
+
+PLAT_BL_COMMON_SOURCES	+=	${PLAT_COMMON_PATH}/$(ARCH)/ls_helpers.S\
+				${PLAT_SOC_PATH}/aarch64/${SOC}_helpers.S\
+				${NV_STORAGE_SOURCES}\
+				${WARM_RST_BL_COMM_SOURCES}\
+				${PLAT_SOC_PATH}/soc.c
+
+ifeq (${TEST_BL31}, 1)
+BL31_SOURCES		+=	${PLAT_SOC_PATH}/$(ARCH)/bootmain64.S\
+				${PLAT_SOC_PATH}/$(ARCH)/nonboot64.S
+endif
+
+BL2_SOURCES		+=	${DDR_CNTLR_SOURCES}\
+				${TBBR_SOURCES}\
+				${FUSE_SOURCES}
+
+
+ # Adding TFA setup files
+include ${PLAT_PATH}/common/setup/common.mk
+
+
+ # Adding source files to generate separate DDR FIP image
+include ${PLAT_SOC_PATH}/ddr_fip.mk
diff --git a/plat/qemu/common/qemu_spm.c b/plat/qemu/common/qemu_spm.c
index 93dd2b3..c66f47e 100644
--- a/plat/qemu/common/qemu_spm.c
+++ b/plat/qemu/common/qemu_spm.c
@@ -29,20 +29,8 @@
 	{0}
 };
 
-/*
- * Boot information passed to a secure partition during initialisation.
- * Linear indices in MP information will be filled at runtime.
- */
-static spm_mm_mp_info_t sp_mp_info[] = {
-	[0] = {0x80000000, 0},
-	[1] = {0x80000001, 0},
-	[2] = {0x80000002, 0},
-	[3] = {0x80000003, 0},
-	[4] = {0x80000004, 0},
-	[5] = {0x80000005, 0},
-	[6] = {0x80000006, 0},
-	[7] = {0x80000007, 0}
-};
+/* Boot information passed to a secure partition during initialisation. */
+static spm_mm_mp_info_t sp_mp_info[PLATFORM_CORE_COUNT];
 
 spm_mm_boot_info_t plat_qemu_secure_partition_boot_info = {
 	.h.type              = PARAM_SP_IMAGE_BOOT_INFO,
@@ -71,6 +59,25 @@
 	EHF_PRI_DESC(QEMU_PRI_BITS, PLAT_SP_PRI)
 };
 
+static void qemu_initialize_mp_info(spm_mm_mp_info_t *mp_info)
+{
+	unsigned int i, j;
+	spm_mm_mp_info_t *tmp = mp_info;
+
+	for (i = 0; i < PLATFORM_CLUSTER_COUNT; i++) {
+		for (j = 0; j < PLATFORM_MAX_CPUS_PER_CLUSTER; j++) {
+			tmp->mpidr = (0x80000000 | (i << MPIDR_AFF1_SHIFT)) + j;
+			/*
+			 * Linear indices and flags will be filled
+			 * in the spm_mm service.
+			 */
+			tmp->linear_id = 0;
+			tmp->flags = 0;
+			tmp++;
+		}
+	}
+}
+
 int dt_add_ns_buf_node(uintptr_t *base)
 {
 	uintptr_t addr;
@@ -134,5 +141,7 @@
 const spm_mm_boot_info_t *
 plat_get_secure_partition_boot_info(void *cookie)
 {
+	qemu_initialize_mp_info(sp_mp_info);
+
 	return &plat_qemu_secure_partition_boot_info;
 }
diff --git a/plat/qemu/qemu_sbsa/include/platform_def.h b/plat/qemu/qemu_sbsa/include/platform_def.h
index b69c2eb..d971ebe 100644
--- a/plat/qemu/qemu_sbsa/include/platform_def.h
+++ b/plat/qemu/qemu_sbsa/include/platform_def.h
@@ -171,7 +171,7 @@
 
 #if SPM_MM && defined(IMAGE_BL31)
 # define PLAT_SP_IMAGE_MMAP_REGIONS	30
-# define PLAT_SP_IMAGE_MAX_XLAT_TABLES	20
+# define PLAT_SP_IMAGE_MAX_XLAT_TABLES	50
 #endif
 
 /*
@@ -353,7 +353,7 @@
 #define MAP_SECURE_VARSTORE		MAP_REGION_FLAT( \
 					QEMU_SECURE_VARSTORE_BASE, \
 					QEMU_SECURE_VARSTORE_SIZE, \
-					MT_MEMORY | MT_RW | \
+					MT_DEVICE | MT_RW | \
 					MT_SECURE | MT_USER)
 #endif
 
diff --git a/plat/rpi/rpi4/platform.mk b/plat/rpi/rpi4/platform.mk
index 0744bce..99d51fb 100644
--- a/plat/rpi/rpi4/platform.mk
+++ b/plat/rpi/rpi4/platform.mk
@@ -7,6 +7,8 @@
 include lib/libfdt/libfdt.mk
 include lib/xlat_tables_v2/xlat_tables.mk
 
+include drivers/arm/gic/v2/gicv2.mk
+
 PLAT_INCLUDES		:=	-Iplat/rpi/common/include		\
 				-Iplat/rpi/rpi4/include
 
@@ -18,9 +20,6 @@
 BL31_SOURCES		+=	lib/cpus/aarch64/cortex_a72.S		\
 				plat/rpi/common/aarch64/plat_helpers.S	\
 				plat/rpi/rpi4/aarch64/armstub8_header.S	\
-				drivers/arm/gic/common/gic_common.c     \
-				drivers/arm/gic/v2/gicv2_helpers.c      \
-				drivers/arm/gic/v2/gicv2_main.c         \
 				drivers/delay_timer/delay_timer.c	\
 				drivers/gpio/gpio.c			\
 				drivers/rpi3/gpio/rpi3_gpio.c		\
@@ -30,7 +29,8 @@
 				plat/common/plat_psci_common.c		\
 				plat/rpi/common/rpi3_topology.c		\
 				common/fdt_fixup.c			\
-				${LIBFDT_SRCS}
+				${LIBFDT_SRCS}				\
+				${GICV2_SOURCES}
 
 # For now we only support BL31, using the kernel loaded by the GPU firmware.
 RESET_TO_BL31		:=	1
diff --git a/plat/xilinx/common/include/ipi.h b/plat/xilinx/common/include/ipi.h
index 483902e..9c1d0f2 100644
--- a/plat/xilinx/common/include/ipi.h
+++ b/plat/xilinx/common/include/ipi.h
@@ -63,7 +63,7 @@
 int ipi_mb_enquire_status(uint32_t local, uint32_t remote);
 
 /* Trigger notification on the IPI mailbox */
-void ipi_mb_notify(uint32_t local, uint32_t remote, uint32_t is_blocking);
+int ipi_mb_notify(uint32_t local, uint32_t remote, uint32_t is_blocking);
 
 /* Ack IPI mailbox notification */
 void ipi_mb_ack(uint32_t local, uint32_t remote);
diff --git a/plat/xilinx/common/ipi.c b/plat/xilinx/common/ipi.c
index 0b8020b..ca4146e 100644
--- a/plat/xilinx/common/ipi.c
+++ b/plat/xilinx/common/ipi.c
@@ -13,6 +13,7 @@
 
 #include <common/debug.h>
 #include <common/runtime_svc.h>
+#include <drivers/delay_timer.h>
 #include <lib/bakery_lock.h>
 #include <lib/mmio.h>
 
@@ -38,6 +39,9 @@
 /* IPI register bit mask */
 #define IPI_BIT_MASK(I) (ipi_table[(I)].ipi_bit_mask)
 
+/* IPI Timeout */
+#define TIMEOUT_COUNT_US	U(0x4000)
+
 /* IPI configuration table */
 const static struct ipi_config *ipi_table;
 
@@ -156,21 +160,30 @@
  * @remote - remote IPI ID
  * @is_blocking - if to trigger the notification in blocking mode or not.
  *
+ * return - 0 - Success or Error incase of timeout
  * It sets the remote bit in the IPI agent trigger register.
  *
  */
-void ipi_mb_notify(uint32_t local, uint32_t remote, uint32_t is_blocking)
+int ipi_mb_notify(uint32_t local, uint32_t remote, uint32_t is_blocking)
 {
 	uint32_t status;
+	const unsigned int timeout_count = TIMEOUT_COUNT_US;
+	uint64_t timeout;
 
 	mmio_write_32(IPI_REG_BASE(local) + IPI_TRIG_OFFSET,
 		      IPI_BIT_MASK(remote));
 	if (is_blocking) {
+		timeout = timeout_init_us(timeout_count);
 		do {
 			status = mmio_read_32(IPI_REG_BASE(local) +
 					      IPI_OBR_OFFSET);
+			if (timeout_elapsed(timeout)) {
+				return -ETIMEDOUT;
+			}
 		} while (status & IPI_BIT_MASK(remote));
 	}
+
+	return 0;
 }
 
 /* ipi_mb_ack() - Ack IPI mailbox notification from the other end
diff --git a/plat/xilinx/common/ipi_mailbox_service/ipi_mailbox_svc.c b/plat/xilinx/common/ipi_mailbox_service/ipi_mailbox_svc.c
index f531158..cd5d830 100644
--- a/plat/xilinx/common/ipi_mailbox_service/ipi_mailbox_svc.c
+++ b/plat/xilinx/common/ipi_mailbox_service/ipi_mailbox_svc.c
@@ -107,8 +107,8 @@
 		uint32_t is_blocking;
 
 		is_blocking = (x3 & IPI_SMC_NOTIFY_BLOCK_MASK) ? 1 : 0;
-		ipi_mb_notify(ipi_local_id, ipi_remote_id, is_blocking);
-		SMC_RET1(handle, 0);
+		ret = ipi_mb_notify(ipi_local_id, ipi_remote_id, is_blocking);
+		SMC_RET1(handle, ret);
 	}
 	case IPI_MAILBOX_ACK:
 	{
diff --git a/plat/xilinx/common/pm_service/pm_ipi.c b/plat/xilinx/common/pm_service/pm_ipi.c
index 5dcceae..ab8088d 100644
--- a/plat/xilinx/common/pm_service/pm_ipi.c
+++ b/plat/xilinx/common/pm_service/pm_ipi.c
@@ -55,6 +55,7 @@
 					     uint32_t payload[PAYLOAD_ARG_CNT],
 					     uint32_t is_blocking)
 {
+	int status;
 	unsigned int offset = 0;
 	uintptr_t buffer_base = proc->ipi->buffer_base +
 					IPI_BUFFER_TARGET_REMOTE_OFFSET +
@@ -70,10 +71,13 @@
 	}
 
 	/* Generate IPI to remote processor */
-	ipi_mb_notify(proc->ipi->local_ipi_id, proc->ipi->remote_ipi_id,
+	status = ipi_mb_notify(proc->ipi->local_ipi_id, proc->ipi->remote_ipi_id,
 		      is_blocking);
+	if (status == 0) {
+		return PM_RET_SUCCESS;
+	}
 
-	return PM_RET_SUCCESS;
+	return PM_RET_ERROR_TIMEOUT;
 }
 
 /**
diff --git a/plat/xilinx/versal/include/plat_pm_common.h b/plat/xilinx/versal/include/plat_pm_common.h
index 2d00801..fac5096 100644
--- a/plat/xilinx/versal/include/plat_pm_common.h
+++ b/plat/xilinx/versal/include/plat_pm_common.h
@@ -19,6 +19,9 @@
 #define PAYLOAD_ARG_CNT		6U
 #define PAYLOAD_ARG_SIZE	4U	/* size in bytes */
 
+#define NON_SECURE_FLAG		1U
+#define SECURE_FLAG		0U
+
 #define VERSAL_TZ_VERSION_MAJOR		1
 #define VERSAL_TZ_VERSION_MINOR		0
 #define VERSAL_TZ_VERSION		((VERSAL_TZ_VERSION_MAJOR << 16) | \
diff --git a/plat/xilinx/versal/plat_psci.c b/plat/xilinx/versal/plat_psci.c
index fda42df..fa0284c 100644
--- a/plat/xilinx/versal/plat_psci.c
+++ b/plat/xilinx/versal/plat_psci.c
@@ -33,7 +33,7 @@
 
 	/* Send request to PMC to wake up selected ACPU core */
 	pm_req_wakeup(proc->node_id, (versal_sec_entry & 0xFFFFFFFF) | 0x1,
-		      versal_sec_entry >> 32, 0);
+		      versal_sec_entry >> 32, 0, SECURE_FLAG);
 
 	/* Clear power down request */
 	pm_client_wakeup(proc);
@@ -67,7 +67,8 @@
 		PM_STATE_SUSPEND_TO_RAM : PM_STATE_CPU_IDLE;
 
 	/* Send request to PMC to suspend this core */
-	pm_self_suspend(proc->node_id, MAX_LATENCY, state, versal_sec_entry);
+	pm_self_suspend(proc->node_id, MAX_LATENCY, state, versal_sec_entry,
+			SECURE_FLAG);
 
 	/* APU is to be turned off */
 	if (target_state->pwr_domain_state[1] > PLAT_MAX_RET_STATE) {
@@ -123,7 +124,7 @@
 {
 	/* Send the power down request to the PMC */
 	pm_system_shutdown(XPM_SHUTDOWN_TYPE_SHUTDOWN,
-			  pm_get_shutdown_scope());
+			  pm_get_shutdown_scope(), SECURE_FLAG);
 
 	while (1)
 		wfi();
@@ -137,7 +138,7 @@
 {
 	/* Send the system reset request to the PMC */
 	pm_system_shutdown(XPM_SHUTDOWN_TYPE_RESET,
-			  pm_get_shutdown_scope());
+			  pm_get_shutdown_scope(), SECURE_FLAG);
 
 	while (1)
 		wfi();
@@ -168,7 +169,8 @@
 	 * invoking CPU_on function, during which resume address will
 	 * be set.
 	 */
-	pm_self_suspend(proc->node_id, MAX_LATENCY, PM_STATE_CPU_IDLE, 0);
+	pm_self_suspend(proc->node_id, MAX_LATENCY, PM_STATE_CPU_IDLE, 0,
+			SECURE_FLAG);
 }
 
 /**
diff --git a/plat/xilinx/versal/platform.mk b/plat/xilinx/versal/platform.mk
index 16396dc..1007e55 100644
--- a/plat/xilinx/versal/platform.mk
+++ b/plat/xilinx/versal/platform.mk
@@ -60,7 +60,6 @@
 				plat/xilinx/versal/aarch64/versal_common.c
 
 BL31_SOURCES		+=	drivers/arm/cci/cci.c				\
-				lib/cpus/aarch64/cortex_a53.S			\
 				lib/cpus/aarch64/cortex_a72.S			\
 				plat/common/plat_psci_common.c			\
 				plat/xilinx/common/ipi.c			\
diff --git a/plat/xilinx/versal/pm_service/pm_api_sys.c b/plat/xilinx/versal/pm_service/pm_api_sys.c
index 3cdd9d0..a578543 100644
--- a/plat/xilinx/versal/pm_service/pm_api_sys.c
+++ b/plat/xilinx/versal/pm_service/pm_api_sys.c
@@ -38,33 +38,33 @@
 /**
  * Assigning of argument values into array elements.
  */
-#define PM_PACK_PAYLOAD1(pl, mid, arg0) {	\
-	pl[0] = (uint32_t)((uint32_t)((arg0) & 0xFF) | (mid << 8)); \
+#define PM_PACK_PAYLOAD1(pl, mid, flag, arg0) {	\
+	pl[0] = (uint32_t)((uint32_t)((arg0) & 0xFF) | (mid << 8) | ((flag) << 24)); \
 }
 
-#define PM_PACK_PAYLOAD2(pl, mid, arg0, arg1) {		\
-	pl[1] = (uint32_t)(arg1);			\
-	PM_PACK_PAYLOAD1(pl, mid, arg0);		\
+#define PM_PACK_PAYLOAD2(pl, mid, flag, arg0, arg1) {		\
+	pl[1] = (uint32_t)(arg1);				\
+	PM_PACK_PAYLOAD1(pl, mid, flag, arg0);			\
 }
 
-#define PM_PACK_PAYLOAD3(pl, mid, arg0, arg1, arg2) {	\
-	pl[2] = (uint32_t)(arg2);			\
-	PM_PACK_PAYLOAD2(pl, mid, arg0, arg1);		\
+#define PM_PACK_PAYLOAD3(pl, mid, flag, arg0, arg1, arg2) {	\
+	pl[2] = (uint32_t)(arg2);				\
+	PM_PACK_PAYLOAD2(pl, mid, flag, arg0, arg1);		\
 }
 
-#define PM_PACK_PAYLOAD4(pl, mid, arg0, arg1, arg2, arg3) {	\
-	pl[3] = (uint32_t)(arg3);				\
-	PM_PACK_PAYLOAD3(pl, mid, arg0, arg1, arg2);		\
+#define PM_PACK_PAYLOAD4(pl, mid, flag, arg0, arg1, arg2, arg3) {	\
+	pl[3] = (uint32_t)(arg3);					\
+	PM_PACK_PAYLOAD3(pl, mid, flag, arg0, arg1, arg2);		\
 }
 
-#define PM_PACK_PAYLOAD5(pl, mid, arg0, arg1, arg2, arg3, arg4) {	\
+#define PM_PACK_PAYLOAD5(pl, mid, flag, arg0, arg1, arg2, arg3, arg4) {	\
 	pl[4] = (uint32_t)(arg4);					\
-	PM_PACK_PAYLOAD4(pl, mid, arg0, arg1, arg2, arg3);		\
+	PM_PACK_PAYLOAD4(pl, mid, flag, arg0, arg1, arg2, arg3);	\
 }
 
-#define PM_PACK_PAYLOAD6(pl, mid, arg0, arg1, arg2, arg3, arg4, arg5) {	\
-	pl[5] = (uint32_t)(arg5);					\
-	PM_PACK_PAYLOAD5(pl, mid, arg0, arg1, arg2, arg3, arg4);	\
+#define PM_PACK_PAYLOAD6(pl, mid, flag, arg0, arg1, arg2, arg3, arg4, arg5) {	\
+	pl[5] = (uint32_t)(arg5);						\
+	PM_PACK_PAYLOAD5(pl, mid, flag, arg0, arg1, arg2, arg3, arg4);		\
 }
 
 /* PM API functions */
@@ -72,15 +72,17 @@
 /**
  * pm_get_api_version() - Get version number of PMC PM firmware
  * @version	Returns 32-bit version number of PMC Power Management Firmware
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_get_api_version(unsigned int *version)
+enum pm_ret_status pm_get_api_version(unsigned int *version, uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD1(payload, LIBPM_MODULE_ID, PM_GET_API_VERSION);
+	PM_PACK_PAYLOAD1(payload, LIBPM_MODULE_ID, flag, PM_GET_API_VERSION);
 	return pm_ipi_send_sync(primary_proc, payload, version, 1);
 }
 
@@ -88,15 +90,17 @@
  * pm_init_finalize() - Call to notify PMC PM firmware that master has power
  *			management enabled and that it has finished its
  *			initialization
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Status returned by the PMU firmware
  */
-enum pm_ret_status pm_init_finalize(void)
+enum pm_ret_status pm_init_finalize(uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMU */
-	PM_PACK_PAYLOAD1(payload, LIBPM_MODULE_ID, PM_INIT_FINALIZE);
+	PM_PACK_PAYLOAD1(payload, LIBPM_MODULE_ID, flag, PM_INIT_FINALIZE);
 	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
 }
 
@@ -106,6 +110,8 @@
  * @latency	Requested maximum wakeup latency (not supported)
  * @state	Requested state
  * @address	Resume address
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * This is a blocking call, it will return only once PMU has responded.
  * On a wakeup, resume address will be automatically set by PMU.
@@ -115,7 +121,7 @@
 enum pm_ret_status pm_self_suspend(uint32_t nid,
 				   unsigned int latency,
 				   unsigned int state,
-				   uintptr_t address)
+				   uintptr_t address, uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 	unsigned int cpuid = plat_my_core_pos();
@@ -133,7 +139,7 @@
 	pm_client_suspend(proc, state);
 
 	/* Send request to the PLM */
-	PM_PACK_PAYLOAD6(payload, LIBPM_MODULE_ID, PM_SELF_SUSPEND,
+	PM_PACK_PAYLOAD6(payload, LIBPM_MODULE_ID, flag, PM_SELF_SUSPEND,
 			 proc->node_id, latency, state, address,
 			 (address >> 32));
 	return pm_ipi_send_sync(proc, payload, NULL, 0);
@@ -143,13 +149,15 @@
  * pm_abort_suspend() - PM call to announce that a prior suspend request
  *			is to be aborted.
  * @reason	Reason for the abort
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * Calling PU expects the PMU to abort the initiated suspend procedure.
  * This is a non-blocking call without any acknowledge.
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_abort_suspend(enum pm_abort_reason reason)
+enum pm_ret_status pm_abort_suspend(enum pm_abort_reason reason, uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
@@ -160,8 +168,8 @@
 	pm_client_abort_suspend();
 
 	/* Send request to the PLM */
-	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, PM_ABORT_SUSPEND, reason,
-			 primary_proc->node_id);
+	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, flag, PM_ABORT_SUSPEND,
+			 reason, primary_proc->node_id);
 	return pm_ipi_send(primary_proc, payload);
 }
 
@@ -172,16 +180,19 @@
  * @ack		Flag to specify whether acknowledge is requested
  * @latency	Requested wakeup latency (not supported)
  * @state	Requested state (not supported)
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
 enum pm_ret_status pm_req_suspend(uint32_t target, uint8_t ack,
-				  unsigned int latency, unsigned int state)
+				  unsigned int latency, unsigned int state,
+				  uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMU */
-	PM_PACK_PAYLOAD4(payload, LIBPM_MODULE_ID, PM_REQ_SUSPEND, target,
+	PM_PACK_PAYLOAD4(payload, LIBPM_MODULE_ID, flag, PM_REQ_SUSPEND, target,
 			 latency, state);
 	if (ack == IPI_BLOCKING)
 		return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
@@ -197,6 +208,8 @@
  *		1 - resume address specified, 0 - otherwise
  * @address	Resume address
  * @ack		Flag to specify whether acknowledge requested
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * This API function is either used to power up another APU core for SMP
  * (by PSCI) or to power up an entirely different PU or subsystem, such
@@ -206,12 +219,12 @@
  * @return	Returns status, either success or error+reason
  */
 enum pm_ret_status pm_req_wakeup(uint32_t target, uint32_t set_address,
-				 uintptr_t address, uint8_t ack)
+				 uintptr_t address, uint8_t ack, uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC to perform the wake of the PU */
-	PM_PACK_PAYLOAD5(payload, LIBPM_MODULE_ID, PM_REQ_WAKEUP, target,
+	PM_PACK_PAYLOAD5(payload, LIBPM_MODULE_ID, flag, PM_REQ_WAKEUP, target,
 			 set_address, address, ack);
 
 	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
@@ -223,16 +236,18 @@
  * @capabilities	Requested capabilities for the device
  * @qos			Required Quality of Service
  * @ack			Flag to specify whether acknowledge requested
+ * @flag		0 - Call from secure source
+ *			1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
 enum pm_ret_status pm_request_device(uint32_t device_id, uint32_t capabilities,
-				     uint32_t qos, uint32_t ack)
+				     uint32_t qos, uint32_t ack, uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD5(payload, LIBPM_MODULE_ID, PM_REQUEST_DEVICE,
+	PM_PACK_PAYLOAD5(payload, LIBPM_MODULE_ID, flag, PM_REQUEST_DEVICE,
 			 device_id, capabilities, qos, ack);
 
 	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
@@ -241,15 +256,17 @@
 /**
  * pm_release_device() - Release a device
  * @device_id		Device ID
+ * @flag		0 - Call from secure source
+ *			1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_release_device(uint32_t device_id)
+enum pm_ret_status pm_release_device(uint32_t device_id, uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, PM_RELEASE_DEVICE,
+	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, flag, PM_RELEASE_DEVICE,
 			 device_id);
 
 	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
@@ -261,16 +278,19 @@
  * @capabilities	Requested capabilities for the device
  * @latency		Requested maximum latency
  * @qos			Required Quality of Service
+ * @flag		0 - Call from secure source
+ *			1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
 enum pm_ret_status pm_set_requirement(uint32_t device_id, uint32_t capabilities,
-				      uint32_t latency, uint32_t qos)
+				      uint32_t latency, uint32_t qos,
+				      uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD5(payload, LIBPM_MODULE_ID, PM_SET_REQUIREMENT,
+	PM_PACK_PAYLOAD5(payload, LIBPM_MODULE_ID, flag, PM_SET_REQUIREMENT,
 			 device_id, capabilities, latency, qos);
 
 	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
@@ -280,15 +300,18 @@
  * pm_get_device_status() - Get device's status
  * @device_id		Device ID
  * @response		Buffer to store device status response
+ * @flag		0 - Call from secure source
+ *			1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_get_device_status(uint32_t device_id, uint32_t *response)
+enum pm_ret_status pm_get_device_status(uint32_t device_id, uint32_t *response,
+					uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, PM_GET_DEVICE_STATUS,
+	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, flag, PM_GET_DEVICE_STATUS,
 			 device_id);
 
 	return pm_ipi_send_sync(primary_proc, payload, response, 3);
@@ -298,15 +321,17 @@
  * pm_reset_assert() - Assert/De-assert reset
  * @reset	Reset ID
  * @assert	Assert (1) or de-assert (0)
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_reset_assert(uint32_t reset, bool assert)
+enum pm_ret_status pm_reset_assert(uint32_t reset, bool assert, uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, PM_RESET_ASSERT, reset,
+	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, flag, PM_RESET_ASSERT, reset,
 			 assert);
 
 	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
@@ -316,15 +341,19 @@
  * pm_reset_get_status() - Get current status of a reset line
  * @reset	Reset ID
  * @status	Returns current status of selected reset ID
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_reset_get_status(uint32_t reset, uint32_t *status)
+enum pm_ret_status pm_reset_get_status(uint32_t reset, uint32_t *status,
+				       uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, PM_RESET_ASSERT, reset);
+	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, flag, PM_RESET_ASSERT,
+			 reset);
 
 	return pm_ipi_send_sync(primary_proc, payload, status, 1);
 }
@@ -332,10 +361,12 @@
 /**
  * pm_get_callbackdata() - Read from IPI response buffer
  * @data - array of PAYLOAD_ARG_CNT elements
+ * @flag - 0 - Call from secure source
+ *	   1 - Call from non-secure source
  *
  * Read value from ipi buffer response buffer.
  */
-void pm_get_callbackdata(uint32_t *data, size_t count)
+void pm_get_callbackdata(uint32_t *data, size_t count, uint32_t flag)
 {
 	/* Return if interrupt is not from PMU */
 	if (!pm_ipi_irq_status(primary_proc))
@@ -348,15 +379,18 @@
 /**
  * pm_pinctrl_request() - Request a pin
  * @pin		Pin ID
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_pinctrl_request(uint32_t pin)
+enum pm_ret_status pm_pinctrl_request(uint32_t pin, uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, PM_PINCTRL_REQUEST, pin);
+	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, flag, PM_PINCTRL_REQUEST,
+			 pin);
 
 	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
 }
@@ -364,15 +398,18 @@
 /**
  * pm_pinctrl_release() - Release a pin
  * @pin		Pin ID
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_pinctrl_release(uint32_t pin)
+enum pm_ret_status pm_pinctrl_release(uint32_t pin, uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, PM_PINCTRL_RELEASE, pin);
+	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, flag, PM_PINCTRL_RELEASE,
+			 pin);
 
 	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
 }
@@ -381,16 +418,19 @@
  * pm_pinctrl_set_function() - Set pin function
  * @pin		Pin ID
  * @function	Function ID
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_pinctrl_set_function(uint32_t pin, uint32_t function)
+enum pm_ret_status pm_pinctrl_set_function(uint32_t pin, uint32_t function,
+					   uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, PM_PINCTRL_SET_FUNCTION, pin,
-			 function)
+	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, flag,
+			 PM_PINCTRL_SET_FUNCTION, pin, function)
 
 	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
 }
@@ -399,16 +439,19 @@
  * pm_pinctrl_get_function() - Get function set on the pin
  * @pin		Pin ID
  * @function	Function set on the pin
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_pinctrl_get_function(uint32_t pin, uint32_t *function)
+enum pm_ret_status pm_pinctrl_get_function(uint32_t pin, uint32_t *function,
+					   uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, PM_PINCTRL_SET_FUNCTION,
-			 pin);
+	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, flag,
+			 PM_PINCTRL_SET_FUNCTION, pin);
 
 	return pm_ipi_send_sync(primary_proc, payload, function, 1);
 }
@@ -418,17 +461,19 @@
  * @pin		Pin ID
  * @param	Parameter ID
  * @value	Parameter value
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
 enum pm_ret_status pm_pinctrl_set_pin_param(uint32_t pin, uint32_t param,
-					    uint32_t value)
+					    uint32_t value, uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD4(payload, LIBPM_MODULE_ID, PM_PINCTRL_CONFIG_PARAM_SET,
-			 pin, param, value);
+	PM_PACK_PAYLOAD4(payload, LIBPM_MODULE_ID, flag,
+			 PM_PINCTRL_CONFIG_PARAM_SET, pin, param, value);
 
 	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
 }
@@ -438,17 +483,19 @@
  * @pin		Pin ID
  * @param	Parameter ID
  * @value	Buffer to store parameter value
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
 enum pm_ret_status pm_pinctrl_get_pin_param(uint32_t pin, uint32_t param,
-					    uint32_t *value)
+					    uint32_t *value, uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, PM_PINCTRL_CONFIG_PARAM_GET,
-			 pin, param);
+	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, flag,
+			 PM_PINCTRL_CONFIG_PARAM_GET, pin, param);
 
 	return pm_ipi_send_sync(primary_proc, payload, value, 1);
 }
@@ -456,15 +503,18 @@
 /**
  * pm_clock_enable() - Enable the clock
  * @clk_id	Clock ID
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_clock_enable(uint32_t clk_id)
+enum pm_ret_status pm_clock_enable(uint32_t clk_id, uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, PM_CLOCK_ENABLE, clk_id);
+	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, flag, PM_CLOCK_ENABLE,
+			 clk_id);
 
 	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
 }
@@ -472,15 +522,18 @@
 /**
  * pm_clock_disable() - Disable the clock
  * @clk_id	Clock ID
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_clock_disable(uint32_t clk_id)
+enum pm_ret_status pm_clock_disable(uint32_t clk_id, uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, PM_CLOCK_DISABLE, clk_id);
+	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, flag, PM_CLOCK_DISABLE,
+			 clk_id);
 
 	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
 }
@@ -489,15 +542,19 @@
  * pm_clock_get_state() - Get clock status
  * @clk_id	Clock ID
  * @state:	Buffer to store clock status (1: Enabled, 0:Disabled)
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_clock_get_state(uint32_t clk_id, uint32_t *state)
+enum pm_ret_status pm_clock_get_state(uint32_t clk_id, uint32_t *state,
+				      uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, PM_CLOCK_GETSTATE, clk_id);
+	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, flag, PM_CLOCK_GETSTATE,
+			 clk_id);
 
 	return pm_ipi_send_sync(primary_proc, payload, state, 1);
 }
@@ -506,16 +563,19 @@
  * pm_clock_set_divider() - Set divider for the clock
  * @clk_id	Clock ID
  * @divider	Divider value
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_clock_set_divider(uint32_t clk_id, uint32_t divider)
+enum pm_ret_status pm_clock_set_divider(uint32_t clk_id, uint32_t divider,
+					uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, PM_CLOCK_SETDIVIDER, clk_id,
-			 divider);
+	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, flag, PM_CLOCK_SETDIVIDER,
+			 clk_id, divider);
 
 	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
 }
@@ -524,15 +584,19 @@
  * pm_clock_get_divider() - Get divider value for the clock
  * @clk_id	Clock ID
  * @divider:	Buffer to store clock divider value
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_clock_get_divider(uint32_t clk_id, uint32_t *divider)
+enum pm_ret_status pm_clock_get_divider(uint32_t clk_id, uint32_t *divider,
+					uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, PM_CLOCK_GETDIVIDER, clk_id);
+	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, flag, PM_CLOCK_GETDIVIDER,
+			 clk_id);
 
 	return pm_ipi_send_sync(primary_proc, payload, divider, 1);
 }
@@ -541,16 +605,19 @@
  * pm_clock_set_parent() - Set parent for the clock
  * @clk_id	Clock ID
  * @parent	Parent ID
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_clock_set_parent(uint32_t clk_id, uint32_t parent)
+enum pm_ret_status pm_clock_set_parent(uint32_t clk_id, uint32_t parent,
+				       uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, PM_CLOCK_SETPARENT, clk_id,
-			 parent);
+	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, flag, PM_CLOCK_SETPARENT,
+			 clk_id, parent);
 
 	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
 }
@@ -559,15 +626,19 @@
  * pm_clock_get_parent() - Get parent value for the clock
  * @clk_id	Clock ID
  * @parent:	Buffer to store clock parent value
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_clock_get_parent(uint32_t clk_id, uint32_t *parent)
+enum pm_ret_status pm_clock_get_parent(uint32_t clk_id, uint32_t *parent,
+				       uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, PM_CLOCK_GETPARENT, clk_id);
+	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, flag, PM_CLOCK_GETPARENT,
+			 clk_id);
 
 	return pm_ipi_send_sync(primary_proc, payload, parent, 1);
 }
@@ -575,15 +646,19 @@
  * pm_clock_get_rate() - Get the rate value for the clock
  * @clk_id	Clock ID
  * @rate:	Buffer to store clock rate value
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_clock_get_rate(uint32_t clk_id, uint32_t *clk_rate)
+enum pm_ret_status pm_clock_get_rate(uint32_t clk_id, uint32_t *clk_rate,
+				     uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, PM_CLOCK_GETRATE, clk_id);
+	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, flag, PM_CLOCK_GETRATE,
+			 clk_id);
 
 	return pm_ipi_send_sync(primary_proc, payload, clk_rate, 2);
 }
@@ -593,17 +668,19 @@
  * @clk_id	PLL clock ID
  * @param	PLL parameter ID
  * @value	Value to set for PLL parameter
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
 enum pm_ret_status pm_pll_set_param(uint32_t clk_id, uint32_t param,
-				    uint32_t value)
+				    uint32_t value, uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD4(payload, LIBPM_MODULE_ID, PM_PLL_SET_PARAMETER, clk_id,
-			 param, value);
+	PM_PACK_PAYLOAD4(payload, LIBPM_MODULE_ID, flag, PM_PLL_SET_PARAMETER,
+			 clk_id, param, value);
 
 	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
 }
@@ -613,17 +690,19 @@
  * @clk_id	PLL clock ID
  * @param	PLL parameter ID
  * @value:	Buffer to store PLL parameter value
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
 enum pm_ret_status pm_pll_get_param(uint32_t clk_id, uint32_t param,
-				    uint32_t *value)
+				    uint32_t *value, uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, PM_PLL_GET_PARAMETER, clk_id,
-			 param);
+	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, flag, PM_PLL_GET_PARAMETER,
+			 clk_id, param);
 
 	return pm_ipi_send_sync(primary_proc, payload, value, 1);
 }
@@ -632,16 +711,19 @@
  * pm_pll_set_mode() - Set PLL mode
  * @clk_id	PLL clock ID
  * @mode	PLL mode
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_pll_set_mode(uint32_t clk_id, uint32_t mode)
+enum pm_ret_status pm_pll_set_mode(uint32_t clk_id, uint32_t mode,
+				   uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, PM_PLL_SET_MODE, clk_id,
-			 mode);
+	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, flag, PM_PLL_SET_MODE,
+			 clk_id, mode);
 
 	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
 }
@@ -650,15 +732,19 @@
  * pm_pll_get_mode() - Get PLL mode
  * @clk_id	PLL clock ID
  * @mode:	Buffer to store PLL mode
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_pll_get_mode(uint32_t clk_id, uint32_t *mode)
+enum pm_ret_status pm_pll_get_mode(uint32_t clk_id, uint32_t *mode,
+				   uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, PM_PLL_GET_MODE, clk_id);
+	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, flag, PM_PLL_GET_MODE,
+			 clk_id);
 
 	return pm_ipi_send_sync(primary_proc, payload, mode, 1);
 }
@@ -668,16 +754,19 @@
  *			  be powered down forcefully
  * @target	Device ID of the PU node to be forced powered down.
  * @ack		Flag to specify whether acknowledge is requested
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_force_powerdown(uint32_t target, uint8_t ack)
+enum pm_ret_status pm_force_powerdown(uint32_t target, uint8_t ack,
+				      uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, PM_FORCE_POWERDOWN, target,
-			 ack);
+	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, flag, PM_FORCE_POWERDOWN,
+			 target, ack);
 
 	if (ack == IPI_BLOCKING)
 		return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
@@ -689,10 +778,13 @@
  * pm_system_shutdown() - PM call to request a system shutdown or restart
  * @type	Shutdown or restart? 0=shutdown, 1=restart, 2=setscope
  * @subtype	Scope: 0=APU-subsystem, 1=PS, 2=system
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_system_shutdown(uint32_t type, uint32_t subtype)
+enum pm_ret_status pm_system_shutdown(uint32_t type, uint32_t subtype,
+				      uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
@@ -703,8 +795,8 @@
 	}
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, PM_SYSTEM_SHUTDOWN, type,
-			 subtype);
+	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, flag, PM_SYSTEM_SHUTDOWN,
+			 type, subtype);
 
 	return pm_ipi_send_non_blocking(primary_proc, payload);
 }
@@ -716,11 +808,13 @@
 * @arg2	Argument 2 to requested query data call
 * @arg3	Argument 3 to requested query data call
 * @data	Returned output data
+* @flag 0 - Call from secure source
+*	1 - Call from non-secure source
 *
 * This function returns requested data.
 */
 enum pm_ret_status pm_query_data(uint32_t qid, uint32_t arg1, uint32_t arg2,
-				 uint32_t arg3, uint32_t *data)
+				 uint32_t arg3, uint32_t *data, uint32_t flag)
 {
 	uint32_t ret;
 	uint32_t version;
@@ -728,10 +822,10 @@
 	uint32_t fw_api_version;
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD5(payload, LIBPM_MODULE_ID, PM_QUERY_DATA, qid, arg1,
-			 arg2, arg3);
+	PM_PACK_PAYLOAD5(payload, LIBPM_MODULE_ID, flag, PM_QUERY_DATA, qid,
+			 arg1, arg2, arg3);
 
-	ret = pm_feature_check(PM_QUERY_DATA, &version);
+	ret = pm_feature_check(PM_QUERY_DATA, &version, flag);
 	if (PM_RET_SUCCESS == ret) {
 		fw_api_version = version & 0xFFFF ;
 		if ((2U == fw_api_version) &&
@@ -755,29 +849,32 @@
  * @arg1	Argument 1 to requested IOCTL call
  * @arg2	Argument 2 to requested IOCTL call
  * @value	Returned output value
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * This function calls IOCTL to firmware for device control and configuration.
  *
  * @return	Returns status, either success or error+reason
  */
 enum pm_ret_status pm_api_ioctl(uint32_t device_id, uint32_t ioctl_id,
-				uint32_t arg1, uint32_t arg2, uint32_t *value)
+				uint32_t arg1, uint32_t arg2, uint32_t *value,
+				uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	switch (ioctl_id) {
 	case IOCTL_SET_PLL_FRAC_MODE:
-		return pm_pll_set_mode(arg1, arg2);
+		return pm_pll_set_mode(arg1, arg2, flag);
 	case IOCTL_GET_PLL_FRAC_MODE:
-		return pm_pll_get_mode(arg1, value);
+		return pm_pll_get_mode(arg1, value, flag);
 	case IOCTL_SET_PLL_FRAC_DATA:
-		return pm_pll_set_param(arg1, PM_PLL_PARAM_DATA, arg2);
+		return pm_pll_set_param(arg1, PM_PLL_PARAM_DATA, arg2, flag);
 	case IOCTL_GET_PLL_FRAC_DATA:
-		return pm_pll_get_param(arg1, PM_PLL_PARAM_DATA, value);
+		return pm_pll_get_param(arg1, PM_PLL_PARAM_DATA, value, flag);
 	default:
 		/* Send request to the PMC */
-		PM_PACK_PAYLOAD5(payload, LIBPM_MODULE_ID, PM_IOCTL, device_id,
-				 ioctl_id, arg1, arg2);
+		PM_PACK_PAYLOAD5(payload, LIBPM_MODULE_ID, flag, PM_IOCTL,
+				 device_id, ioctl_id, arg1, arg2);
 		return pm_ipi_send_sync(primary_proc, payload, value, 1);
 	}
 }
@@ -787,16 +884,18 @@
  * @target	Device id of the targeted PU or subsystem
  * @wkup_node	Device id of the wakeup peripheral
  * @enable	Enable or disable the specified peripheral as wake source
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
 enum pm_ret_status pm_set_wakeup_source(uint32_t target, uint32_t wkup_device,
-					uint8_t enable)
+					uint8_t enable, uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
-	PM_PACK_PAYLOAD4(payload, LIBPM_MODULE_ID, PM_SET_WAKEUP_SOURCE, target,
-			 wkup_device, enable);
+	PM_PACK_PAYLOAD4(payload, LIBPM_MODULE_ID, flag, PM_SET_WAKEUP_SOURCE,
+			 target, wkup_device, enable);
 	return pm_ipi_send(primary_proc, payload);
 }
 
@@ -804,15 +903,17 @@
  * pm_get_chipid() - Read silicon ID registers
  * @value       Buffer for return values. Must be large enough
  *		to hold 8 bytes.
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return      Returns silicon ID registers
  */
-enum pm_ret_status pm_get_chipid(uint32_t *value)
+enum pm_ret_status pm_get_chipid(uint32_t *value, uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD1(payload, LIBPM_MODULE_ID, PM_GET_CHIPID);
+	PM_PACK_PAYLOAD1(payload, LIBPM_MODULE_ID, flag, PM_GET_CHIPID);
 
 	return pm_ipi_send_sync(primary_proc, payload, value, 2);
 }
@@ -821,10 +922,13 @@
  * pm_feature_check() - Returns the supported API version if supported
  * @api_id	API ID to check
  * @value	Returned supported API version
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_feature_check(uint32_t api_id, unsigned int *version)
+enum pm_ret_status pm_feature_check(uint32_t api_id, unsigned int *version,
+				    uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT], fw_api_version;
 	uint32_t status;
@@ -884,7 +988,8 @@
 		return PM_RET_ERROR_NOFEATURE;
 	}
 
-	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, PM_FEATURE_CHECK, api_id);
+	PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, flag,
+			 PM_FEATURE_CHECK, api_id);
 
 	status = pm_ipi_send_sync(primary_proc, payload, &fw_api_version, 1);
 	if (status != PM_RET_SUCCESS)
@@ -903,16 +1008,18 @@
  * src:        Source device of pdi(DDR, OCM, SD etc)
  * address_low: lower 32-bit Linear memory space address
  * address_high: higher 32-bit Linear memory space address
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return      Returns status, either success or error+reason
  */
-enum pm_ret_status pm_load_pdi(uint32_t src,
-			       uint32_t address_low, uint32_t address_high)
+enum pm_ret_status pm_load_pdi(uint32_t src, uint32_t address_low,
+			       uint32_t address_high, uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMU */
-	PM_PACK_PAYLOAD4(payload, LOADER_MODULE_ID, PM_LOAD_PDI, src,
+	PM_PACK_PAYLOAD4(payload, LOADER_MODULE_ID, flag, PM_LOAD_PDI, src,
 			 address_high, address_low);
 	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
 }
@@ -925,18 +1032,20 @@
  *              (power, temperature and latency)
  * @result      Returns the operating characteristic for the requested device,
  *              specified by the type
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return      Returns status, either success or error+reason
  */
 enum pm_ret_status pm_get_op_characteristic(uint32_t device_id,
 					    enum pm_opchar_type type,
-					    uint32_t *result)
+					    uint32_t *result, uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, PM_GET_OP_CHARACTERISTIC,
-			 device_id, type);
+	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, flag,
+			 PM_GET_OP_CHARACTERISTIC, device_id, type);
 	return pm_ipi_send_sync(primary_proc, payload, result, 1);
 }
 
@@ -946,15 +1055,18 @@
  *			  used by that CPU.
  * @device_id	Device ID
  * @latency	Latency value
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_set_max_latency(uint32_t device_id, uint32_t latency)
+enum pm_ret_status pm_set_max_latency(uint32_t device_id, uint32_t latency,
+				      uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, PM_SET_MAX_LATENCY,
+	PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, flag, PM_SET_MAX_LATENCY,
 			 device_id, latency);
 
 	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
@@ -967,16 +1079,19 @@
  * @event	Event in question
  * @wake	Wake subsystem upon capturing the event if value 1
  * @enable	Enable the registration for value 1, disable for value 0
+ * @flag	0 - Call from secure source
+ *		1 - Call from non-secure source
  *
  * @return	Returns status, either success or error+reason
  */
 enum pm_ret_status pm_register_notifier(uint32_t device_id, uint32_t event,
-					uint32_t wake, uint32_t enable)
+					uint32_t wake, uint32_t enable,
+					uint32_t flag)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
 	/* Send request to the PMC */
-	PM_PACK_PAYLOAD5(payload, LIBPM_MODULE_ID, PM_REGISTER_NOTIFIER,
+	PM_PACK_PAYLOAD5(payload, LIBPM_MODULE_ID, flag, PM_REGISTER_NOTIFIER,
 			 device_id, event, wake, enable);
 
 	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
diff --git a/plat/xilinx/versal/pm_service/pm_api_sys.h b/plat/xilinx/versal/pm_service/pm_api_sys.h
index 84867b6..5a92704 100644
--- a/plat/xilinx/versal/pm_service/pm_api_sys.h
+++ b/plat/xilinx/versal/pm_service/pm_api_sys.h
@@ -14,67 +14,86 @@
  * PM API function declarations
  **********************************************************/
 
-enum pm_ret_status pm_get_api_version(unsigned int *version);
-enum pm_ret_status pm_init_finalize(void);
+enum pm_ret_status pm_get_api_version(unsigned int *version, uint32_t flag);
+enum pm_ret_status pm_init_finalize(uint32_t flag);
 enum pm_ret_status pm_self_suspend(uint32_t nid,
 				   unsigned int latency,
 				   unsigned int state,
-				   uintptr_t address);
-enum pm_ret_status pm_abort_suspend(enum pm_abort_reason reason);
+				   uintptr_t address, uint32_t flag);
+enum pm_ret_status pm_abort_suspend(enum pm_abort_reason reason, uint32_t flag);
 enum pm_ret_status pm_req_suspend(uint32_t target,
 				  uint8_t ack,
 				  unsigned int latency,
-				  unsigned int state);
+				  unsigned int state, uint32_t flag);
 enum pm_ret_status pm_req_wakeup(uint32_t target, uint32_t set_address,
-				 uintptr_t address, uint8_t ack);
+				 uintptr_t address, uint8_t ack, uint32_t flag);
 enum pm_ret_status pm_set_wakeup_source(uint32_t target, uint32_t device_id,
-					uint8_t enable);
+					uint8_t enable, uint32_t flag);
 enum pm_ret_status pm_request_device(uint32_t device_id, uint32_t capabilities,
-				     uint32_t qos, uint32_t ack);
-enum pm_ret_status pm_release_device(uint32_t device_id);
+				     uint32_t qos, uint32_t ack, uint32_t flag);
+enum pm_ret_status pm_release_device(uint32_t device_id, uint32_t flag);
 enum pm_ret_status pm_set_requirement(uint32_t device_id, uint32_t capabilities,
-				      uint32_t latency, uint32_t qos);
-enum pm_ret_status pm_get_device_status(uint32_t device_id, uint32_t *response);
-enum pm_ret_status pm_reset_assert(uint32_t reset, bool assert);
-enum pm_ret_status pm_reset_get_status(uint32_t reset, uint32_t *status);
-void pm_get_callbackdata(uint32_t *data, size_t count);
-enum pm_ret_status pm_pinctrl_request(uint32_t pin);
-enum pm_ret_status pm_pinctrl_release(uint32_t pin);
-enum pm_ret_status pm_pinctrl_set_function(uint32_t pin, uint32_t function);
-enum pm_ret_status pm_pinctrl_get_function(uint32_t pin, uint32_t *function);
+				      uint32_t latency, uint32_t qos,
+				      uint32_t flag);
+enum pm_ret_status pm_get_device_status(uint32_t device_id, uint32_t *response,
+					uint32_t flag);
+enum pm_ret_status pm_reset_assert(uint32_t reset, bool assert, uint32_t flag);
+enum pm_ret_status pm_reset_get_status(uint32_t reset, uint32_t *status,
+				       uint32_t flag);
+void pm_get_callbackdata(uint32_t *data, size_t count, uint32_t flag);
+enum pm_ret_status pm_pinctrl_request(uint32_t pin, uint32_t flag);
+enum pm_ret_status pm_pinctrl_release(uint32_t pin, uint32_t flag);
+enum pm_ret_status pm_pinctrl_set_function(uint32_t pin, uint32_t function,
+					   uint32_t flag);
+enum pm_ret_status pm_pinctrl_get_function(uint32_t pin, uint32_t *function,
+					   uint32_t flag);
 enum pm_ret_status pm_pinctrl_set_pin_param(uint32_t pin, uint32_t param,
-					    uint32_t value);
+					    uint32_t value, uint32_t flag);
 enum pm_ret_status pm_pinctrl_get_pin_param(uint32_t pin, uint32_t param,
-					    uint32_t *value);
-enum pm_ret_status pm_clock_enable(uint32_t clk_id);
-enum pm_ret_status pm_clock_disable(uint32_t clk_id);
-enum pm_ret_status pm_clock_get_state(uint32_t clk_id, uint32_t *state);
-enum pm_ret_status pm_clock_set_divider(uint32_t clk_id, uint32_t divider);
-enum pm_ret_status pm_clock_get_divider(uint32_t clk_id, uint32_t *divider);
-enum pm_ret_status pm_clock_set_parent(uint32_t clk_id, uint32_t parent);
-enum pm_ret_status pm_clock_get_parent(uint32_t clk_id, uint32_t *parent);
-enum pm_ret_status pm_clock_get_rate(uint32_t clk_id, uint32_t *clk_rate);
+					    uint32_t *value, uint32_t flag);
+enum pm_ret_status pm_clock_enable(uint32_t clk_id, uint32_t flag);
+enum pm_ret_status pm_clock_disable(uint32_t clk_id, uint32_t flag);
+enum pm_ret_status pm_clock_get_state(uint32_t clk_id, uint32_t *state,
+				      uint32_t flag);
+enum pm_ret_status pm_clock_set_divider(uint32_t clk_id, uint32_t divider,
+					uint32_t flag);
+enum pm_ret_status pm_clock_get_divider(uint32_t clk_id, uint32_t *divider,
+					uint32_t flag);
+enum pm_ret_status pm_clock_set_parent(uint32_t clk_id, uint32_t parent,
+				       uint32_t flag);
+enum pm_ret_status pm_clock_get_parent(uint32_t clk_id, uint32_t *parent,
+				       uint32_t flag);
+enum pm_ret_status pm_clock_get_rate(uint32_t clk_id, uint32_t *clk_rate,
+				     uint32_t flag);
 enum pm_ret_status pm_pll_set_param(uint32_t clk_id, uint32_t param,
-				    uint32_t value);
+				    uint32_t value, uint32_t flag);
 enum pm_ret_status pm_pll_get_param(uint32_t clk_id, uint32_t param,
-				    uint32_t *value);
-enum pm_ret_status pm_pll_set_mode(uint32_t clk_id, uint32_t mode);
-enum pm_ret_status pm_pll_get_mode(uint32_t clk_id, uint32_t *mode);
-enum pm_ret_status pm_force_powerdown(uint32_t target, uint8_t ack);
-enum pm_ret_status pm_system_shutdown(uint32_t type, uint32_t subtype);
+				    uint32_t *value, uint32_t flag);
+enum pm_ret_status pm_pll_set_mode(uint32_t clk_id, uint32_t mode,
+				   uint32_t flag);
+enum pm_ret_status pm_pll_get_mode(uint32_t clk_id, uint32_t *mode,
+				   uint32_t flag);
+enum pm_ret_status pm_force_powerdown(uint32_t target, uint8_t ack,
+				      uint32_t flag);
+enum pm_ret_status pm_system_shutdown(uint32_t type, uint32_t subtype,
+				      uint32_t flag);
 enum pm_ret_status pm_api_ioctl(uint32_t device_id, uint32_t ioctl_id,
-				uint32_t arg1, uint32_t arg2, uint32_t *value);
+				uint32_t arg1, uint32_t arg2, uint32_t *value,
+				uint32_t flag);
 enum pm_ret_status pm_query_data(uint32_t qid, uint32_t arg1, uint32_t arg2,
-				 uint32_t arg3, uint32_t *data);
+				 uint32_t arg3, uint32_t *data, uint32_t flag);
 unsigned int pm_get_shutdown_scope(void);
-enum pm_ret_status pm_get_chipid(uint32_t *value);
-enum pm_ret_status pm_feature_check(uint32_t api_id, unsigned int *version);
+enum pm_ret_status pm_get_chipid(uint32_t *value, uint32_t flag);
+enum pm_ret_status pm_feature_check(uint32_t api_id, unsigned int *version,
+				    uint32_t flag);
 enum pm_ret_status pm_load_pdi(uint32_t src, uint32_t address_low,
-			       uint32_t address_high);
+			       uint32_t address_high, uint32_t flag);
 enum pm_ret_status pm_get_op_characteristic(uint32_t device_id,
 					    enum pm_opchar_type type,
-					    uint32_t *result);
-enum pm_ret_status pm_set_max_latency(uint32_t device_id, uint32_t latency);
+					    uint32_t *result, uint32_t flag);
+enum pm_ret_status pm_set_max_latency(uint32_t device_id, uint32_t latency,
+				      uint32_t flag);
 enum pm_ret_status pm_register_notifier(uint32_t device_id, uint32_t event,
-					uint32_t wake, uint32_t enable);
+					uint32_t wake, uint32_t enable,
+					uint32_t flag);
 #endif /* PM_API_SYS_H */
diff --git a/plat/xilinx/versal/pm_service/pm_client.c b/plat/xilinx/versal/pm_service/pm_client.c
index 9ab921e..f6c3148 100644
--- a/plat/xilinx/versal/pm_service/pm_client.c
+++ b/plat/xilinx/versal/pm_service/pm_client.c
@@ -149,7 +149,8 @@
 				/* Get device ID from node index */
 				device_id = PERIPH_DEVID(node_idx);
 				ret = pm_set_wakeup_source(node_id,
-							   device_id, 1);
+							   device_id, 1,
+							   SECURE_FLAG);
 				pm_wakeup_nodes_set[node_idx] = !ret;
 			}
 		}
diff --git a/plat/xilinx/versal/pm_service/pm_svc_main.c b/plat/xilinx/versal/pm_service/pm_svc_main.c
index 2ed6d27..55a0956 100644
--- a/plat/xilinx/versal/pm_service/pm_svc_main.c
+++ b/plat/xilinx/versal/pm_service/pm_svc_main.c
@@ -71,6 +71,7 @@
 	enum pm_ret_status ret;
 
 	uint32_t pm_arg[4];
+	uint32_t security_flag = SECURE_FLAG;
 
 	/* Handle case where PM wasn't initialized properly */
 	if (!pm_up)
@@ -81,57 +82,67 @@
 	pm_arg[2] = (uint32_t)x2;
 	pm_arg[3] = (uint32_t)(x2 >> 32);
 
+	/*
+	 * Mark BIT24 payload (i.e 1st bit of pm_arg[3] ) as non-secure (1)
+	 * if smc called is non secure
+	 */
+	if (is_caller_non_secure(flags)) {
+		security_flag = NON_SECURE_FLAG;
+	}
+
 	switch (smc_fid & FUNCID_NUM_MASK) {
 	/* PM API Functions */
 	case PM_SELF_SUSPEND:
 		ret = pm_self_suspend(pm_arg[0], pm_arg[1], pm_arg[2],
-				      pm_arg[3]);
+				      pm_arg[3], security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_FORCE_POWERDOWN:
-		ret = pm_force_powerdown(pm_arg[0], pm_arg[1]);
+		ret = pm_force_powerdown(pm_arg[0], pm_arg[1], security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_REQ_SUSPEND:
 		ret = pm_req_suspend(pm_arg[0], pm_arg[1], pm_arg[2],
-				     pm_arg[3]);
+				     pm_arg[3], security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_ABORT_SUSPEND:
-		ret = pm_abort_suspend(pm_arg[0]);
+		ret = pm_abort_suspend(pm_arg[0], security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_SYSTEM_SHUTDOWN:
-		ret = pm_system_shutdown(pm_arg[0], pm_arg[1]);
+		ret = pm_system_shutdown(pm_arg[0], pm_arg[1], security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_REQ_WAKEUP:
-		ret = pm_req_wakeup(pm_arg[0], pm_arg[1], pm_arg[2], pm_arg[3]);
+		ret = pm_req_wakeup(pm_arg[0], pm_arg[1], pm_arg[2], pm_arg[3],
+				    security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_SET_WAKEUP_SOURCE:
-		ret = pm_set_wakeup_source(pm_arg[0], pm_arg[1], pm_arg[2]);
+		ret = pm_set_wakeup_source(pm_arg[0], pm_arg[1], pm_arg[2],
+					   security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_REQUEST_DEVICE:
 		ret = pm_request_device(pm_arg[0], pm_arg[1], pm_arg[2],
-					pm_arg[3]);
+					pm_arg[3], security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_RELEASE_DEVICE:
-		ret = pm_release_device(pm_arg[0]);
+		ret = pm_release_device(pm_arg[0], security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_SET_REQUIREMENT:
 		ret = pm_set_requirement(pm_arg[0], pm_arg[1], pm_arg[2],
-					 pm_arg[3]);
+					 pm_arg[3], security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_GET_API_VERSION:
 	{
 		uint32_t api_version;
 
-		ret = pm_get_api_version(&api_version);
+		ret = pm_get_api_version(&api_version, security_flag);
 		SMC_RET1(handle, (uint64_t)PM_RET_SUCCESS |
 				 ((uint64_t)api_version << 32));
 	}
@@ -140,68 +151,72 @@
 	{
 		uint32_t buff[3];
 
-		ret = pm_get_device_status(pm_arg[0], buff);
+		ret = pm_get_device_status(pm_arg[0], buff, security_flag);
 		SMC_RET2(handle, (uint64_t)ret | ((uint64_t)buff[0] << 32),
 			 (uint64_t)buff[1] | ((uint64_t)buff[2] << 32));
 	}
 
 	case PM_RESET_ASSERT:
-		ret = pm_reset_assert(pm_arg[0], pm_arg[1]);
+		ret = pm_reset_assert(pm_arg[0], pm_arg[1], security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_RESET_GET_STATUS:
 	{
 		uint32_t reset_status;
 
-		ret = pm_reset_get_status(pm_arg[0], &reset_status);
+		ret = pm_reset_get_status(pm_arg[0], &reset_status,
+					  security_flag);
 		SMC_RET1(handle, (uint64_t)ret |
 			 ((uint64_t)reset_status << 32));
 	}
 
 	case PM_INIT_FINALIZE:
-		ret = pm_init_finalize();
+		ret = pm_init_finalize(security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_GET_CALLBACK_DATA:
 	{
 		uint32_t result[4] = {0};
 
-		pm_get_callbackdata(result, ARRAY_SIZE(result));
+		pm_get_callbackdata(result, ARRAY_SIZE(result), security_flag);
 		SMC_RET2(handle,
 			 (uint64_t)result[0] | ((uint64_t)result[1] << 32),
 			 (uint64_t)result[2] | ((uint64_t)result[3] << 32));
 	}
 
 	case PM_PINCTRL_REQUEST:
-		ret = pm_pinctrl_request(pm_arg[0]);
+		ret = pm_pinctrl_request(pm_arg[0], security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_PINCTRL_RELEASE:
-		ret = pm_pinctrl_release(pm_arg[0]);
+		ret = pm_pinctrl_release(pm_arg[0], security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_PINCTRL_GET_FUNCTION:
 	{
 		uint32_t value = 0;
 
-		ret = pm_pinctrl_get_function(pm_arg[0], &value);
+		ret = pm_pinctrl_get_function(pm_arg[0], &value, security_flag);
 		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)value) << 32);
 	}
 
 	case PM_PINCTRL_SET_FUNCTION:
-		ret = pm_pinctrl_set_function(pm_arg[0], pm_arg[1]);
+		ret = pm_pinctrl_set_function(pm_arg[0], pm_arg[1],
+					      security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_PINCTRL_CONFIG_PARAM_GET:
 	{
 		uint32_t value;
 
-		ret = pm_pinctrl_get_pin_param(pm_arg[0], pm_arg[1], &value);
+		ret = pm_pinctrl_get_pin_param(pm_arg[0], pm_arg[1], &value,
+					       security_flag);
 		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)value) << 32);
 	}
 
 	case PM_PINCTRL_CONFIG_PARAM_SET:
-		ret = pm_pinctrl_set_pin_param(pm_arg[0], pm_arg[1], pm_arg[2]);
+		ret = pm_pinctrl_set_pin_param(pm_arg[0], pm_arg[1], pm_arg[2],
+					       security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_IOCTL:
@@ -209,7 +224,7 @@
 		uint32_t value;
 
 		ret = pm_api_ioctl(pm_arg[0], pm_arg[1], pm_arg[2],
-				   pm_arg[3], &value);
+				   pm_arg[3], &value, security_flag);
 		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)value) << 32);
 	}
 
@@ -218,49 +233,49 @@
 		uint32_t data[8] = { 0 };
 
 		ret = pm_query_data(pm_arg[0], pm_arg[1], pm_arg[2],
-				      pm_arg[3], data);
+				      pm_arg[3], data, security_flag);
 
 		SMC_RET2(handle, (uint64_t)ret  | ((uint64_t)data[0] << 32),
 				 (uint64_t)data[1] | ((uint64_t)data[2] << 32));
 
 	}
 	case PM_CLOCK_ENABLE:
-		ret = pm_clock_enable(pm_arg[0]);
+		ret = pm_clock_enable(pm_arg[0], security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_CLOCK_DISABLE:
-		ret = pm_clock_disable(pm_arg[0]);
+		ret = pm_clock_disable(pm_arg[0], security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_CLOCK_GETSTATE:
 	{
 		uint32_t value;
 
-		ret = pm_clock_get_state(pm_arg[0], &value);
+		ret = pm_clock_get_state(pm_arg[0], &value, security_flag);
 		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)value) << 32);
 	}
 
 	case PM_CLOCK_SETDIVIDER:
-		ret = pm_clock_set_divider(pm_arg[0], pm_arg[1]);
+		ret = pm_clock_set_divider(pm_arg[0], pm_arg[1], security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_CLOCK_GETDIVIDER:
 	{
 		uint32_t value;
 
-		ret = pm_clock_get_divider(pm_arg[0], &value);
+		ret = pm_clock_get_divider(pm_arg[0], &value, security_flag);
 		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)value) << 32);
 	}
 
 	case PM_CLOCK_SETPARENT:
-		ret = pm_clock_set_parent(pm_arg[0], pm_arg[1]);
+		ret = pm_clock_set_parent(pm_arg[0], pm_arg[1], security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_CLOCK_GETPARENT:
 	{
 		uint32_t value;
 
-		ret = pm_clock_get_parent(pm_arg[0], &value);
+		ret = pm_clock_get_parent(pm_arg[0], &value, security_flag);
 		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)value) << 32);
 	}
 
@@ -268,32 +283,34 @@
 	{
 		uint32_t rate[2] = { 0 };
 
-		ret = pm_clock_get_rate(pm_arg[0], rate);
+		ret = pm_clock_get_rate(pm_arg[0], rate, security_flag);
 		SMC_RET2(handle, (uint64_t)ret | ((uint64_t)rate[0] << 32),
 			 rate[1]);
 	}
 
 	case PM_PLL_SET_PARAMETER:
-		ret = pm_pll_set_param(pm_arg[0], pm_arg[1], pm_arg[2]);
+		ret = pm_pll_set_param(pm_arg[0], pm_arg[1], pm_arg[2],
+				       security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_PLL_GET_PARAMETER:
 	{
 		uint32_t value;
 
-		ret = pm_pll_get_param(pm_arg[0], pm_arg[1], &value);
+		ret = pm_pll_get_param(pm_arg[0], pm_arg[1], &value,
+				       security_flag);
 		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)value << 32));
 	}
 
 	case PM_PLL_SET_MODE:
-		ret = pm_pll_set_mode(pm_arg[0], pm_arg[1]);
+		ret = pm_pll_set_mode(pm_arg[0], pm_arg[1], security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_PLL_GET_MODE:
 	{
 		uint32_t mode;
 
-		ret = pm_pll_get_mode(pm_arg[0], &mode);
+		ret = pm_pll_get_mode(pm_arg[0], &mode, security_flag);
 		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)mode << 32));
 	}
 
@@ -305,7 +322,7 @@
 	{
 		uint32_t result[2];
 
-		ret = pm_get_chipid(result);
+		ret = pm_get_chipid(result, security_flag);
 		SMC_RET2(handle, (uint64_t)ret | ((uint64_t)result[0] << 32),
 			 result[1]);
 	}
@@ -314,13 +331,14 @@
 	{
 		uint32_t version;
 
-		ret = pm_feature_check(pm_arg[0], &version);
+		ret = pm_feature_check(pm_arg[0], &version, security_flag);
 		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)version << 32));
 	}
 
 	case PM_LOAD_PDI:
 	{
-		ret = pm_load_pdi(pm_arg[0], pm_arg[1], pm_arg[2]);
+		ret = pm_load_pdi(pm_arg[0], pm_arg[1], pm_arg[2],
+				  security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 	}
 
@@ -328,19 +346,21 @@
 	{
 		uint32_t result;
 
-		ret = pm_get_op_characteristic(pm_arg[0], pm_arg[1], &result);
+		ret = pm_get_op_characteristic(pm_arg[0], pm_arg[1], &result,
+					       security_flag);
 		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)result << 32));
 	}
 
 	case PM_SET_MAX_LATENCY:
 	{
-		ret = pm_set_max_latency(pm_arg[0], pm_arg[1]);
+		ret = pm_set_max_latency(pm_arg[0], pm_arg[1], security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 	}
 
 	case PM_REGISTER_NOTIFIER:
 	{
-		ret = pm_register_notifier(pm_arg[0], pm_arg[1], pm_arg[2], pm_arg[3]);
+		ret = pm_register_notifier(pm_arg[0], pm_arg[1], pm_arg[2],
+					   pm_arg[3], security_flag);
 		SMC_RET1(handle, (uint64_t)ret);
 	}
 
diff --git a/plat/xilinx/zynqmp/aarch64/zynqmp_common.c b/plat/xilinx/zynqmp/aarch64/zynqmp_common.c
index d6313a6..339967c 100644
--- a/plat/xilinx/zynqmp/aarch64/zynqmp_common.c
+++ b/plat/xilinx/zynqmp/aarch64/zynqmp_common.c
@@ -194,6 +194,18 @@
 		.name = "39DR",
 	},
 	{
+		.id = 0x7d,
+		.name = "43DR",
+	},
+	{
+		.id = 0x78,
+		.name = "46DR",
+	},
+	{
+		.id = 0x7f,
+		.name = "47DR",
+	},
+	{
 		.id = 0x7b,
 		.name = "48DR",
 	},
diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c
index b0cbf62..29fc238 100644
--- a/services/spd/tspd/tspd_main.c
+++ b/services/spd/tspd/tspd_main.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -92,6 +92,18 @@
  * This function is the handler registered for S-EL1 interrupts by the TSPD. It
  * validates the interrupt and upon success arranges entry into the TSP at
  * 'tsp_sel1_intr_entry()' for handling the interrupt.
+ * Typically, interrupts for a specific security state get handled in the same
+ * security execption level if the execution is in the same security state. For
+ * example, if a non-secure interrupt gets fired when CPU is executing in NS-EL2
+ * it gets handled in the non-secure world.
+ * However, interrupts belonging to the opposite security state typically demand
+ * a world(context) switch. This is inline with the security principle which
+ * states a secure interrupt has to be handled in the secure world.
+ * Hence, the TSPD in EL3 expects the context(handle) for a secure interrupt to
+ * be non-secure and vice versa.
+ * However, a race condition between non-secure and secure interrupts can lead to
+ * a scenario where the above assumptions do not hold true. This is demonstrated
+ * below through Note 1.
  ******************************************************************************/
 static uint64_t tspd_sel1_interrupt_handler(uint32_t id,
 					    uint32_t flags,
@@ -101,6 +113,60 @@
 	uint32_t linear_id;
 	tsp_context_t *tsp_ctx;
 
+	/* Get a reference to this cpu's TSP context */
+	linear_id = plat_my_core_pos();
+	tsp_ctx = &tspd_sp_context[linear_id];
+
+#if TSP_NS_INTR_ASYNC_PREEMPT
+
+	/*
+	 * Note 1:
+	 * Under the current interrupt routing model, interrupts from other
+	 * world are routed to EL3 when TSP_NS_INTR_ASYNC_PREEMPT is enabled.
+	 * Consider the following scenario:
+	 * 1/ A non-secure payload(like tftf) requests a secure service from
+	 *    TSP by invoking a yielding SMC call.
+	 * 2/ Later, execution jumps to TSP in S-EL1 with the help of TSP
+	 *    Dispatcher in Secure Monitor(EL3).
+	 * 3/ While CPU is executing TSP, a Non-secure interrupt gets fired.
+	 *    this demands a context switch to the non-secure world through
+	 *    secure monitor.
+	 * 4/ Consequently, TSP in S-EL1 get asynchronously pre-empted and
+	 *    execution switches to secure monitor(EL3).
+	 * 5/ EL3 tries to triage the (Non-secure) interrupt based on the
+	 *    highest pending interrupt.
+	 * 6/ However, while the NS Interrupt was pending, secure timer gets
+	 *    fired which makes a S-EL1 interrupt to be pending.
+	 * 7/ Hence, execution jumps to this companion handler of S-EL1
+	 *    interrupt (i.e., tspd_sel1_interrupt_handler) even though the TSP
+	 *    was pre-empted due to non-secure interrupt.
+	 * 8/ The above sequence of events explain how TSP was pre-empted by
+	 *    S-EL1 interrupt indirectly in an asynchronous way.
+	 * 9/ Hence, we track the TSP pre-emption by S-EL1 interrupt using a
+	 *    boolean variable per each core.
+	 * 10/ This helps us to indicate that SMC call for TSP service was
+	 *    pre-empted when execution resumes in non-secure world.
+	 */
+
+	/* Check the security state when the exception was generated */
+	if (get_interrupt_src_ss(flags) == NON_SECURE) {
+		/* Sanity check the pointer to this cpu's context */
+		assert(handle == cm_get_context(NON_SECURE));
+
+		/* Save the non-secure context before entering the TSP */
+		cm_el1_sysregs_context_save(NON_SECURE);
+		tsp_ctx->preempted_by_sel1_intr = false;
+	} else {
+		/* Sanity check the pointer to this cpu's context */
+		assert(handle == cm_get_context(SECURE));
+
+		/* Save the secure context before entering the TSP for S-EL1
+		 * interrupt handling
+		 */
+		cm_el1_sysregs_context_save(SECURE);
+		tsp_ctx->preempted_by_sel1_intr = true;
+	}
+#else
 	/* Check the security state when the exception was generated */
 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
 
@@ -109,10 +175,8 @@
 
 	/* Save the non-secure context before entering the TSP */
 	cm_el1_sysregs_context_save(NON_SECURE);
+#endif
 
-	/* Get a reference to this cpu's TSP context */
-	linear_id = plat_my_core_pos();
-	tsp_ctx = &tspd_sp_context[linear_id];
 	assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE));
 
 	/*
@@ -131,7 +195,6 @@
 		tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx,
 						     CTX_ELR_EL3);
 #if TSP_NS_INTR_ASYNC_PREEMPT
-		/*Need to save the previously interrupted secure context */
 		memcpy(&tsp_ctx->sp_ctx, &tsp_ctx->cpu_ctx, TSPD_SP_CTX_SIZE);
 #endif
 	}
@@ -353,7 +416,20 @@
 		cm_el1_sysregs_context_restore(NON_SECURE);
 		cm_set_next_eret_context(NON_SECURE);
 
+		/* Refer to Note 1 in function tspd_sel1_interrupt_handler()*/
+#if TSP_NS_INTR_ASYNC_PREEMPT
+		if (tsp_ctx->preempted_by_sel1_intr) {
+			/* Reset the flag */
+			tsp_ctx->preempted_by_sel1_intr = false;
+
+			SMC_RET1(ns_cpu_context, SMC_PREEMPTED);
+		} else {
+			SMC_RET0((uint64_t) ns_cpu_context);
+		}
+#else
 		SMC_RET0((uint64_t) ns_cpu_context);
+#endif
+
 
 	/*
 	 * This function ID is used only by the SP to indicate it has
diff --git a/services/spd/tspd/tspd_private.h b/services/spd/tspd/tspd_private.h
index a81eb21..d6c03c9 100644
--- a/services/spd/tspd/tspd_private.h
+++ b/services/spd/tspd/tspd_private.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -188,6 +188,7 @@
 	uint64_t saved_tsp_args[TSP_NUM_ARGS];
 #if TSP_NS_INTR_ASYNC_PREEMPT
 	sp_ctx_regs_t sp_ctx;
+	bool preempted_by_sel1_intr;
 #endif
 } tsp_context_t;
 
diff --git a/services/std_svc/spmd/spmd_main.c b/services/std_svc/spmd/spmd_main.c
index 6aab558..7b20bf1 100644
--- a/services/std_svc/spmd/spmd_main.c
+++ b/services/std_svc/spmd/spmd_main.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -164,7 +164,6 @@
 	for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
 		if (core_id != linear_id) {
 			spm_core_context[core_id].state = SPMC_STATE_OFF;
-			spm_core_context[core_id].secondary_ep.entry_point = 0UL;
 		}
 	}
 
@@ -370,8 +369,8 @@
  ******************************************************************************/
 static uint64_t spmd_ffa_error_return(void *handle, int error_code)
 {
-	SMC_RET8(handle, FFA_ERROR,
-		 FFA_TARGET_INFO_MBZ, error_code,
+	SMC_RET8(handle, (uint32_t) FFA_ERROR,
+		 FFA_TARGET_INFO_MBZ, (uint32_t)error_code,
 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
 }
@@ -406,13 +405,6 @@
 	VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__,
 		msg, parm1, parm2, parm3, parm4);
 
-	switch (msg) {
-	case SPMD_DIRECT_MSG_SET_ENTRY_POINT:
-		return spmd_pm_secondary_core_set_ep(parm1, parm2, parm3);
-	default:
-		break;
-	}
-
 	return -EINVAL;
 }
 
@@ -429,6 +421,7 @@
 			  void *handle,
 			  uint64_t flags)
 {
+	unsigned int linear_id = plat_my_core_pos();
 	spmd_spm_core_context_t *ctx = spmd_get_context();
 	bool secure_origin;
 	int32_t ret;
@@ -437,10 +430,12 @@
 	/* Determine which security state this SMC originated from */
 	secure_origin = is_caller_secure(flags);
 
-	INFO("SPM: 0x%x 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n",
-	     smc_fid, x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5),
-	     SMC_GET_GP(handle, CTX_GPREG_X6),
-	     SMC_GET_GP(handle, CTX_GPREG_X7));
+	VERBOSE("SPM(%u): 0x%x 0x%llx 0x%llx 0x%llx 0x%llx "
+		"0x%llx 0x%llx 0x%llx\n",
+		linear_id, smc_fid, x1, x2, x3, x4,
+		SMC_GET_GP(handle, CTX_GPREG_X5),
+		SMC_GET_GP(handle, CTX_GPREG_X6),
+		SMC_GET_GP(handle, CTX_GPREG_X7));
 
 	switch (smc_fid) {
 	case FFA_ERROR:
@@ -470,14 +465,16 @@
 			(ctx->state == SPMC_STATE_RESET)) {
 			ret = FFA_ERROR_NOT_SUPPORTED;
 		} else if (!secure_origin) {
-			ret = MAKE_FFA_VERSION(spmc_attrs.major_version, spmc_attrs.minor_version);
+			ret = MAKE_FFA_VERSION(spmc_attrs.major_version,
+					       spmc_attrs.minor_version);
 		} else {
-			ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR);
+			ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
+					       FFA_VERSION_MINOR);
 		}
 
-		SMC_RET8(handle, ret, FFA_TARGET_INFO_MBZ, FFA_TARGET_INFO_MBZ,
-			 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
-			 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
+		SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ,
+			 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+			 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
 		break; /* not reached */
 
 	case FFA_FEATURES:
@@ -492,7 +489,7 @@
 		 */
 		if (!is_ffa_fid(x1)) {
 			return spmd_ffa_error_return(handle,
-						      FFA_ERROR_NOT_SUPPORTED);
+						     FFA_ERROR_NOT_SUPPORTED);
 		}
 
 		/* Forward SMC from Normal world to the SPM Core */
@@ -533,6 +530,28 @@
 
 		break; /* not reached */
 
+	case FFA_SECONDARY_EP_REGISTER_SMC64:
+		if (secure_origin) {
+			ret = spmd_pm_secondary_ep_register(x1);
+
+			if (ret < 0) {
+				SMC_RET8(handle, FFA_ERROR_SMC64,
+					FFA_TARGET_INFO_MBZ, ret,
+					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+					FFA_PARAM_MBZ);
+			} else {
+				SMC_RET8(handle, FFA_SUCCESS_SMC64,
+					FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ,
+					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+					FFA_PARAM_MBZ);
+			}
+		}
+
+		return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+		break; /* Not reached */
+
 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
 		if (secure_origin && spmd_is_spmc_message(x1)) {
 			ret = spmd_handle_spmc_message(x3, x4,
diff --git a/services/std_svc/spmd/spmd_pm.c b/services/std_svc/spmd/spmd_pm.c
index 5433e5d..074609c 100644
--- a/services/std_svc/spmd/spmd_pm.c
+++ b/services/std_svc/spmd/spmd_pm.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -7,8 +7,15 @@
 #include <assert.h>
 #include <errno.h>
 #include <lib/el3_runtime/context_mgmt.h>
+#include <lib/spinlock.h>
 #include "spmd_private.h"
 
+static struct {
+	bool secondary_ep_locked;
+	uintptr_t secondary_ep;
+	spinlock_t lock;
+} g_spmd_pm;
+
 /*******************************************************************************
  * spmd_build_spmc_message
  *
@@ -25,16 +32,16 @@
 }
 
 /*******************************************************************************
- * spmd_pm_secondary_core_set_ep
+ * spmd_pm_secondary_ep_register
  ******************************************************************************/
-int spmd_pm_secondary_core_set_ep(unsigned long long mpidr,
-		uintptr_t entry_point, unsigned long long context)
+int spmd_pm_secondary_ep_register(uintptr_t entry_point)
 {
-	int id = plat_core_pos_by_mpidr(mpidr);
+	int ret = FFA_ERROR_INVALID_PARAMETER;
 
-	if ((id < 0) || ((unsigned int)id >= PLATFORM_CORE_COUNT)) {
-		ERROR("%s inconsistent MPIDR (%llx)\n", __func__, mpidr);
-		return -EINVAL;
+	spin_lock(&g_spmd_pm.lock);
+
+	if (g_spmd_pm.secondary_ep_locked == true) {
+		goto out;
 	}
 
 	/*
@@ -42,27 +49,22 @@
 	 * load_address <= entry_point < load_address + binary_size
 	 */
 	if (!spmd_check_address_in_binary_image(entry_point)) {
-		ERROR("%s entry point is not within image boundaries (%llx)\n",
-		      __func__, mpidr);
-		return -EINVAL;
+		ERROR("%s entry point is not within image boundaries\n",
+			__func__);
+		goto out;
 	}
 
-	spmd_spm_core_context_t *ctx = spmd_get_context_by_mpidr(mpidr);
-	spmd_pm_secondary_ep_t *secondary_ep = &ctx->secondary_ep;
-	if (secondary_ep->locked) {
-		ERROR("%s entry locked (%llx)\n", __func__, mpidr);
-		return -EINVAL;
-	}
+	g_spmd_pm.secondary_ep = entry_point;
+	g_spmd_pm.secondary_ep_locked = true;
 
-	/* Fill new entry to corresponding secondary core id and lock it */
-	secondary_ep->entry_point = entry_point;
-	secondary_ep->context = context;
-	secondary_ep->locked = true;
+	VERBOSE("%s %lx\n", __func__, entry_point);
 
-	VERBOSE("%s %d %llx %lx %llx\n",
-		__func__, id, mpidr, entry_point, context);
+	ret = 0;
 
-	return 0;
+out:
+	spin_unlock(&g_spmd_pm.lock);
+
+	return ret;
 }
 
 /*******************************************************************************
@@ -82,18 +84,20 @@
 	assert(ctx->state != SPMC_STATE_ON);
 	assert(spmc_ep_info != NULL);
 
+	spin_lock(&g_spmd_pm.lock);
+
 	/*
-	 * TODO: this might require locking the spmc_ep_info structure,
-	 * or provisioning one structure per cpu
+	 * Leave the possibility that the SPMC does not call
+	 * FFA_SECONDARY_EP_REGISTER in which case re-use the
+	 * primary core address for booting secondary cores.
 	 */
-	if (ctx->secondary_ep.entry_point == 0UL) {
-		goto exit;
+	if (g_spmd_pm.secondary_ep_locked == true) {
+		spmc_ep_info->pc = g_spmd_pm.secondary_ep;
 	}
 
-	spmc_ep_info->pc = ctx->secondary_ep.entry_point;
+	spin_unlock(&g_spmd_pm.lock);
+
 	cm_setup_context(&ctx->cpu_ctx, spmc_ep_info);
-	write_ctx_reg(get_gpregs_ctx(&ctx->cpu_ctx), CTX_GPREG_X0,
-		      ctx->secondary_ep.context);
 
 	/* Mark CPU as initiating ON operation */
 	ctx->state = SPMC_STATE_ON_PENDING;
@@ -106,7 +110,6 @@
 		return;
 	}
 
-exit:
 	ctx->state = SPMC_STATE_ON;
 
 	VERBOSE("CPU %u on!\n", linear_id);
@@ -124,10 +127,6 @@
 	assert(ctx != NULL);
 	assert(ctx->state != SPMC_STATE_OFF);
 
-	if (ctx->secondary_ep.entry_point == 0UL) {
-		goto exit;
-	}
-
 	/* Build an SPMD to SPMC direct message request. */
 	spmd_build_spmc_message(get_gpregs_ctx(&ctx->cpu_ctx), PSCI_CPU_OFF);
 
@@ -136,9 +135,15 @@
 		ERROR("%s failed (%llu) on CPU%u\n", __func__, rc, linear_id);
 	}
 
-	/* TODO expect FFA_DIRECT_MSG_RESP returned from SPMC */
+	/* Expect a direct message response from the SPMC. */
+	u_register_t ffa_resp_func = read_ctx_reg(get_gpregs_ctx(&ctx->cpu_ctx),
+						  CTX_GPREG_X0);
+	if (ffa_resp_func != FFA_MSG_SEND_DIRECT_RESP_SMC32) {
+		ERROR("%s invalid SPMC response (%lx).\n",
+			__func__, ffa_resp_func);
+		return -EINVAL;
+	}
 
-exit:
 	ctx->state = SPMC_STATE_OFF;
 
 	VERBOSE("CPU %u off!\n", linear_id);
diff --git a/services/std_svc/spmd/spmd_private.h b/services/std_svc/spmd/spmd_private.h
index eff0dd9..6d51a58 100644
--- a/services/std_svc/spmd/spmd_private.h
+++ b/services/std_svc/spmd/spmd_private.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -42,12 +42,6 @@
 	SPMC_STATE_ON
 } spmc_state_t;
 
-typedef struct spmd_pm_secondary_ep {
-	uintptr_t entry_point;
-	uintptr_t context;
-	bool locked;
-} spmd_pm_secondary_ep_t;
-
 /*
  * Data structure used by the SPM dispatcher (SPMD) in EL3 to track context of
  * the SPM core (SPMC) at the next lower EL.
@@ -56,7 +50,6 @@
 	uint64_t c_rt_ctx;
 	cpu_context_t cpu_ctx;
 	spmc_state_t state;
-	spmd_pm_secondary_ep_t secondary_ep;
 } spmd_spm_core_context_t;
 
 /*
@@ -69,7 +62,6 @@
 #define SPMC_SECURE_ID_SHIFT			U(15)
 
 #define SPMD_DIRECT_MSG_ENDPOINT_ID		U(FFA_ENDPOINT_ID_MAX - 1)
-#define SPMD_DIRECT_MSG_SET_ENTRY_POINT		U(1)
 
 /* Functions used to enter/exit SPMC synchronously */
 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *ctx);
@@ -94,8 +86,7 @@
 /* SPMC context on current CPU get helper */
 spmd_spm_core_context_t *spmd_get_context(void);
 
-int spmd_pm_secondary_core_set_ep(unsigned long long mpidr,
-		uintptr_t entry_point, unsigned long long context);
+int spmd_pm_secondary_ep_register(uintptr_t entry_point);
 bool spmd_check_address_in_binary_image(uint64_t address);
 
 #endif /* __ASSEMBLER__ */
diff --git a/tools/cert_create/Makefile b/tools/cert_create/Makefile
index c3c8bcf..77d2007 100644
--- a/tools/cert_create/Makefile
+++ b/tools/cert_create/Makefile
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -16,6 +16,12 @@
 include ${MAKE_HELPERS_DIRECTORY}build_macros.mk
 include ${MAKE_HELPERS_DIRECTORY}build_env.mk
 
+ifneq (${PLAT},none)
+TF_PLATFORM_ROOT	:=	../../plat/
+include ${MAKE_HELPERS_DIRECTORY}plat_helpers.mk
+PLAT_CERT_CREATE_HELPER_MK := ${PLAT_DIR}/cert_create_tbbr.mk
+endif
+
 # Common source files.
 OBJECTS := src/cert.o \
            src/cmd_opt.o \
@@ -33,6 +39,10 @@
   $(error Unknown chain of trust ${COT})
 endif
 
+ifneq (,$(wildcard ${PLAT_CERT_CREATE_HELPER_MK}))
+include ${PLAT_CERT_CREATE_HELPER_MK}
+endif
+
 HOSTCCFLAGS := -Wall -std=c99
 
 ifeq (${DEBUG},1)
@@ -51,7 +61,7 @@
 
 # Make soft links and include from local directory otherwise wrong headers
 # could get pulled in from firmware tree.
-INC_DIR := -I ./include -I ${PLAT_INCLUDE} -I ${OPENSSL_DIR}/include
+INC_DIR += -I ./include -I ${PLAT_INCLUDE} -I ${OPENSSL_DIR}/include
 LIB_DIR := -L ${OPENSSL_DIR}/lib
 LIB := -lssl -lcrypto
 
diff --git a/tools/cert_create/include/cert.h b/tools/cert_create/include/cert.h
index daf27a7..e63b474 100644
--- a/tools/cert_create/include/cert.h
+++ b/tools/cert_create/include/cert.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -57,11 +57,20 @@
 
 /* Macro to register the certificates used in the CoT */
 #define REGISTER_COT(_certs) \
-	cert_t *certs = &_certs[0]; \
-	const unsigned int num_certs = sizeof(_certs)/sizeof(_certs[0])
+	cert_t *def_certs = &_certs[0]; \
+	const unsigned int num_def_certs = sizeof(_certs)/sizeof(_certs[0])
+
+/* Macro to register the platform defined certificates used in the CoT */
+#define PLAT_REGISTER_COT(_pdef_certs) \
+	cert_t *pdef_certs = &_pdef_certs[0]; \
+	const unsigned int num_pdef_certs = sizeof(_pdef_certs)/sizeof(_pdef_certs[0])
 
 /* Exported variables */
-extern cert_t *certs;
-extern const unsigned int num_certs;
+extern cert_t *def_certs;
+extern const unsigned int num_def_certs;
+extern cert_t *pdef_certs;
+extern const unsigned int num_pdef_certs;
 
+extern cert_t *certs;
+extern unsigned int num_certs;
 #endif /* CERT_H */
diff --git a/tools/cert_create/include/ext.h b/tools/cert_create/include/ext.h
index 9c0b5c3..e900a6d 100644
--- a/tools/cert_create/include/ext.h
+++ b/tools/cert_create/include/ext.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -75,11 +75,20 @@
 
 /* Macro to register the extensions used in the CoT */
 #define REGISTER_EXTENSIONS(_ext) \
-	ext_t *extensions = &_ext[0]; \
-	const unsigned int num_extensions = sizeof(_ext)/sizeof(_ext[0])
+	ext_t *def_extensions = &_ext[0]; \
+	const unsigned int num_def_extensions = sizeof(_ext)/sizeof(_ext[0])
+
+/* Macro to register the platform defined extensions used in the CoT */
+#define PLAT_REGISTER_EXTENSIONS(_pdef_ext) \
+	ext_t *pdef_extensions = &_pdef_ext[0]; \
+	const unsigned int num_pdef_extensions = sizeof(_pdef_ext)/sizeof(_pdef_ext[0])
 
 /* Exported variables */
-extern ext_t *extensions;
-extern const unsigned int num_extensions;
+extern ext_t *def_extensions;
+extern const unsigned int num_def_extensions;
+extern ext_t *pdef_extensions;
+extern const unsigned int num_pdef_extensions;
 
+extern ext_t *extensions;
+extern unsigned int num_extensions;
 #endif /* EXT_H */
diff --git a/tools/cert_create/include/key.h b/tools/cert_create/include/key.h
index d96d983..128e7f7 100644
--- a/tools/cert_create/include/key.h
+++ b/tools/cert_create/include/key.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -73,11 +73,20 @@
 
 /* Macro to register the keys used in the CoT */
 #define REGISTER_KEYS(_keys) \
-	key_t *keys = &_keys[0]; \
-	const unsigned int num_keys = sizeof(_keys)/sizeof(_keys[0])
+	key_t *def_keys = &_keys[0]; \
+	const unsigned int num_def_keys = sizeof(_keys)/sizeof(_keys[0])
+
+/* Macro to register the platform defined keys used in the CoT */
+#define PLAT_REGISTER_KEYS(_pdef_keys) \
+	key_t *pdef_keys = &_pdef_keys[0]; \
+	const unsigned int num_pdef_keys = sizeof(_pdef_keys)/sizeof(_pdef_keys[0])
 
 /* Exported variables */
-extern key_t *keys;
-extern const unsigned int num_keys;
+extern key_t *def_keys;
+extern const unsigned int num_def_keys;
+extern key_t *pdef_keys;
+extern const unsigned int num_pdef_keys;
 
+extern key_t *keys;
+extern unsigned int num_keys;
 #endif /* KEY_H */
diff --git a/tools/cert_create/src/cert.c b/tools/cert_create/src/cert.c
index 153f555..4b35d73 100644
--- a/tools/cert_create/src/cert.c
+++ b/tools/cert_create/src/cert.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -24,6 +24,9 @@
 #define SERIAL_RAND_BITS	64
 #define RSA_SALT_LEN		32
 
+cert_t *certs;
+unsigned int num_certs;
+
 int rand_serial(BIGNUM *b, ASN1_INTEGER *ai)
 {
 	BIGNUM *btmp;
@@ -220,6 +223,28 @@
 	cert_t *cert;
 	unsigned int i;
 
+	certs = malloc((num_def_certs * sizeof(def_certs[0]))
+#ifdef PDEF_CERTS
+		       + (num_pdef_certs * sizeof(pdef_certs[0]))
+#endif
+		       );
+	if (certs == NULL) {
+		ERROR("%s:%d Failed to allocate memory.\n", __func__, __LINE__);
+		return 1;
+	}
+
+	memcpy(&certs[0], &def_certs[0],
+	       (num_def_certs * sizeof(def_certs[0])));
+
+#ifdef PDEF_CERTS
+	memcpy(&certs[num_def_certs], &pdef_certs[0],
+	       (num_pdef_certs * sizeof(pdef_certs[0])));
+
+	num_certs = num_def_certs + num_pdef_certs;
+#else
+	num_certs = num_def_certs;
+#endif
+
 	for (i = 0; i < num_certs; i++) {
 		cert = &certs[i];
 		cmd_opt.long_opt.name = cert->opt;
diff --git a/tools/cert_create/src/ext.c b/tools/cert_create/src/ext.c
index 65dd3e5..2882123 100644
--- a/tools/cert_create/src/ext.c
+++ b/tools/cert_create/src/ext.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -13,8 +13,12 @@
 #include <openssl/x509v3.h>
 
 #include "cmd_opt.h"
+#include "debug.h"
 #include "ext.h"
 
+ext_t *extensions;
+unsigned int num_extensions;
+
 DECLARE_ASN1_ITEM(ASN1_INTEGER)
 DECLARE_ASN1_ITEM(X509_ALGOR)
 DECLARE_ASN1_ITEM(ASN1_OCTET_STRING)
@@ -51,6 +55,26 @@
 	int nid, ret;
 	unsigned int i;
 
+	extensions = malloc((num_def_extensions * sizeof(def_extensions[0]))
+#ifdef PDEF_EXTS
+			    + (num_pdef_extensions * sizeof(pdef_extensions[0]))
+#endif
+			    );
+	if (extensions == NULL) {
+		ERROR("%s:%d Failed to allocate memory.\n", __func__, __LINE__);
+		return 1;
+	}
+
+	memcpy(&extensions[0], &def_extensions[0],
+	       (num_def_extensions * sizeof(def_extensions[0])));
+#ifdef PDEF_EXTS
+	memcpy(&extensions[num_def_extensions], &pdef_extensions[0],
+		(num_pdef_extensions * sizeof(pdef_extensions[0])));
+	num_extensions = num_def_extensions + num_pdef_extensions;
+#else
+	num_extensions = num_def_extensions;
+#endif
+
 	for (i = 0; i < num_extensions; i++) {
 		ext = &extensions[i];
 		/* Register command line option */
diff --git a/tools/cert_create/src/key.c b/tools/cert_create/src/key.c
index fcc9d53..6435975 100644
--- a/tools/cert_create/src/key.c
+++ b/tools/cert_create/src/key.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -21,6 +21,9 @@
 
 #define MAX_FILENAME_LEN		1024
 
+key_t *keys;
+unsigned int num_keys;
+
 /*
  * Create a new key container
  */
@@ -182,6 +185,28 @@
 	key_t *key;
 	unsigned int i;
 
+	keys = malloc((num_def_keys * sizeof(def_keys[0]))
+#ifdef PDEF_KEYS
+		      + (num_pdef_keys * sizeof(pdef_keys[0]))
+#endif
+		      );
+
+	if (keys == NULL) {
+		ERROR("%s:%d Failed to allocate memory.\n", __func__, __LINE__);
+		return 1;
+	}
+
+	memcpy(&keys[0], &def_keys[0], (num_def_keys * sizeof(def_keys[0])));
+#ifdef PDEF_KEYS
+	memcpy(&keys[num_def_keys], &pdef_keys[0],
+		(num_pdef_keys * sizeof(pdef_keys[0])));
+
+	num_keys = num_def_keys + num_pdef_keys;
+#else
+	num_keys = num_def_keys;
+#endif
+		   ;
+
 	for (i = 0; i < num_keys; i++) {
 		key = &keys[i];
 		if (key->opt != NULL) {
diff --git a/tools/fiptool/Makefile b/tools/fiptool/Makefile
index df8ab5c..b75907d 100644
--- a/tools/fiptool/Makefile
+++ b/tools/fiptool/Makefile
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2014-2021, ARM Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -32,9 +32,23 @@
 
 HOSTCC ?= gcc
 
+ifneq (${PLAT},)
+TF_PLATFORM_ROOT	:=	../../plat/
+include ${MAKE_HELPERS_DIRECTORY}plat_helpers.mk
+PLAT_FIPTOOL_HELPER_MK := ${PLAT_DIR}/plat_fiptool.mk
+endif
+
+ifneq (,$(wildcard ${PLAT_FIPTOOL_HELPER_MK}))
+include ${PLAT_FIPTOOL_HELPER_MK}
+endif
+
 .PHONY: all clean distclean
 
-all: ${PROJECT}
+# Clean before build as old fiptool might be created with
+# including different PLAT_FIPTOOL_HELPER_MK.
+all:
+	${MAKE}	clean
+	${MAKE}	${PROJECT}
 
 ${PROJECT}: ${OBJECTS} Makefile
 	@echo "  HOSTLD  $@"
@@ -43,7 +57,7 @@
 	@echo "Built $@ successfully"
 	@${ECHO_BLANK_LINE}
 
-%.o: %.c %.h Makefile
+%.o: %.c Makefile
 	@echo "  HOSTCC  $<"
 	${Q}${HOSTCC} -c ${CPPFLAGS} ${HOSTCCFLAGS} ${INCLUDE_PATHS} $< -o $@
 
diff --git a/tools/fiptool/fiptool.c b/tools/fiptool/fiptool.c
index 8c5b04a..d92c31d 100644
--- a/tools/fiptool/fiptool.c
+++ b/tools/fiptool/fiptool.c
@@ -215,6 +215,18 @@
 		    toc_entry->cmdline_name);
 		add_image_desc(desc);
 	}
+#ifdef PLAT_DEF_FIP_UUID
+	for (toc_entry = plat_def_toc_entries;
+	     toc_entry->cmdline_name != NULL;
+	     toc_entry++) {
+		image_desc_t *desc;
+
+		desc = new_image_desc(&toc_entry->uuid,
+		    toc_entry->name,
+		    toc_entry->cmdline_name);
+		add_image_desc(desc);
+	}
+#endif
 }
 
 static image_desc_t *lookup_image_desc_from_uuid(const uuid_t *uuid)
@@ -753,6 +765,12 @@
 	for (; toc_entry->cmdline_name != NULL; toc_entry++)
 		printf("  --%-16s FILENAME\t%s\n", toc_entry->cmdline_name,
 		    toc_entry->name);
+#ifdef PLAT_DEF_FIP_UUID
+	toc_entry = plat_def_toc_entries;
+	for (; toc_entry->cmdline_name != NULL; toc_entry++)
+		printf("  --%-16s FILENAME\t%s\n", toc_entry->cmdline_name,
+		    toc_entry->name);
+#endif
 	exit(exit_status);
 }
 
@@ -867,6 +885,12 @@
 	for (; toc_entry->cmdline_name != NULL; toc_entry++)
 		printf("  --%-16s FILENAME\t%s\n", toc_entry->cmdline_name,
 		    toc_entry->name);
+#ifdef PLAT_DEF_FIP_UUID
+	toc_entry = plat_def_toc_entries;
+	for (; toc_entry->cmdline_name != NULL; toc_entry++)
+		printf("  --%-16s FILENAME\t%s\n", toc_entry->cmdline_name,
+		    toc_entry->name);
+#endif
 	exit(exit_status);
 }
 
@@ -1001,6 +1025,12 @@
 	for (; toc_entry->cmdline_name != NULL; toc_entry++)
 		printf("  --%-16s FILENAME\t%s\n", toc_entry->cmdline_name,
 		    toc_entry->name);
+#ifdef PLAT_DEF_FIP_UUID
+	toc_entry = plat_def_toc_entries;
+	for (; toc_entry->cmdline_name != NULL; toc_entry++)
+		printf("  --%-16s FILENAME\t%s\n", toc_entry->cmdline_name,
+		    toc_entry->name);
+#endif
 	printf("\n");
 	printf("If no options are provided, all images will be unpacked.\n");
 	exit(exit_status);
@@ -1126,6 +1156,12 @@
 	for (; toc_entry->cmdline_name != NULL; toc_entry++)
 		printf("  --%-16s\t%s\n", toc_entry->cmdline_name,
 		    toc_entry->name);
+#ifdef PLAT_DEF_FIP_UUID
+	toc_entry = plat_def_toc_entries;
+	for (; toc_entry->cmdline_name != NULL; toc_entry++)
+		printf("  --%-16s\t%s\n", toc_entry->cmdline_name,
+		    toc_entry->name);
+#endif
 	exit(exit_status);
 }
 
diff --git a/tools/fiptool/tbbr_config.h b/tools/fiptool/tbbr_config.h
index 1fc6cad..b926ff0 100644
--- a/tools/fiptool/tbbr_config.h
+++ b/tools/fiptool/tbbr_config.h
@@ -21,4 +21,8 @@
 
 extern toc_entry_t toc_entries[];
 
+#ifdef PLAT_DEF_FIP_UUID
+extern toc_entry_t plat_def_toc_entries[];
+#endif
+
 #endif /* TBBR_CONFIG_H */
diff --git a/tools/nxp/cert_create_helper/cert_create_tbbr.mk b/tools/nxp/cert_create_helper/cert_create_tbbr.mk
new file mode 100644
index 0000000..e3b2e91
--- /dev/null
+++ b/tools/nxp/cert_create_helper/cert_create_tbbr.mk
@@ -0,0 +1,31 @@
+#
+# Copyright 2021 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Compile time defines used by NXP platforms
+
+PLAT_DEF_OID := yes
+
+ifeq (${PLAT_DEF_OID},yes)
+
+$(eval $(call add_define, PLAT_DEF_OID))
+$(eval $(call add_define, PDEF_KEYS))
+$(eval $(call add_define, PDEF_CERTS))
+$(eval $(call add_define, PDEF_EXTS))
+
+
+INC_DIR += -I../../plat/nxp/common/fip_handler/common/
+
+PDEF_CERT_TOOL_PATH		:=	../nxp/cert_create_helper
+PLAT_INCLUDE			+=	-I${PDEF_CERT_TOOL_PATH}/include
+
+PLAT_OBJECTS			+=	${PDEF_CERT_TOOL_PATH}/src/pdef_tbb_cert.o \
+					${PDEF_CERT_TOOL_PATH}/src/pdef_tbb_ext.o \
+					${PDEF_CERT_TOOL_PATH}/src/pdef_tbb_key.o
+
+$(shell rm ${PLAT_OBJECTS})
+
+OBJECTS				+= ${PLAT_OBJECTS}
+endif
diff --git a/tools/nxp/cert_create_helper/include/pdef_tbb_cert.h b/tools/nxp/cert_create_helper/include/pdef_tbb_cert.h
new file mode 100644
index 0000000..f185619
--- /dev/null
+++ b/tools/nxp/cert_create_helper/include/pdef_tbb_cert.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PDEF_TBB_CERT_H
+#define PDEF_TBB_CERT_H
+
+#include <tbbr/tbb_cert.h>
+
+/*
+ * Enumerate the certificates that are used to establish the chain of trust
+ */
+enum {
+	DDR_FW_KEY_CERT = FWU_CERT + 1,
+	DDR_UDIMM_FW_CONTENT_CERT,
+	DDR_RDIMM_FW_CONTENT_CERT
+};
+
+#endif /* PDEF_TBB_CERT_H */
diff --git a/tools/nxp/cert_create_helper/include/pdef_tbb_ext.h b/tools/nxp/cert_create_helper/include/pdef_tbb_ext.h
new file mode 100644
index 0000000..5fb349c
--- /dev/null
+++ b/tools/nxp/cert_create_helper/include/pdef_tbb_ext.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PDEF_TBB_EXT_H
+#define PDEF_TBB_EXT_H
+
+#include <tbbr/tbb_ext.h>
+
+/* Plat Defined TBBR extensions */
+enum {
+	DDR_FW_CONTENT_CERT_PK_EXT = FWU_HASH_EXT + 1,
+	DDR_IMEM_UDIMM_1D_HASH_EXT,
+	DDR_IMEM_UDIMM_2D_HASH_EXT,
+	DDR_DMEM_UDIMM_1D_HASH_EXT,
+	DDR_DMEM_UDIMM_2D_HASH_EXT,
+	DDR_IMEM_RDIMM_1D_HASH_EXT,
+	DDR_IMEM_RDIMM_2D_HASH_EXT,
+	DDR_DMEM_RDIMM_1D_HASH_EXT,
+	DDR_DMEM_RDIMM_2D_HASH_EXT
+};
+
+#endif /* PDEF_TBB_EXT_H */
diff --git a/tools/nxp/cert_create_helper/include/pdef_tbb_key.h b/tools/nxp/cert_create_helper/include/pdef_tbb_key.h
new file mode 100644
index 0000000..b26b651
--- /dev/null
+++ b/tools/nxp/cert_create_helper/include/pdef_tbb_key.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PDEF_TBB_KEY_H
+#define PDEF_TBB_KEY_H
+
+#include <tbbr/tbb_key.h>
+
+/*
+ * Enumerate the pltform defined keys that are used to establish the chain of trust
+ */
+enum {
+	DDR_FW_CONTENT_KEY = NON_TRUSTED_FW_CONTENT_CERT_KEY + 1,
+};
+#endif /* PDEF_TBB_KEY_H */
diff --git a/tools/nxp/cert_create_helper/src/pdef_tbb_cert.c b/tools/nxp/cert_create_helper/src/pdef_tbb_cert.c
new file mode 100644
index 0000000..40bd928
--- /dev/null
+++ b/tools/nxp/cert_create_helper/src/pdef_tbb_cert.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <pdef_tbb_cert.h>
+#include <pdef_tbb_ext.h>
+#include <pdef_tbb_key.h>
+
+static cert_t pdef_tbb_certs[] = {
+	[DDR_FW_KEY_CERT - DDR_FW_KEY_CERT] = {
+		.id = DDR_FW_KEY_CERT,
+		.opt = "ddr-fw-key-cert",
+		.help_msg = "DDR Firmware Key Certificate (output file)",
+		.fn = NULL,
+		.cn = "DDR Firmware Key Certificate",
+		.key = TRUSTED_WORLD_KEY,
+		.issuer = DDR_FW_KEY_CERT,
+		.ext = {
+			TRUSTED_FW_NVCOUNTER_EXT,
+			DDR_FW_CONTENT_CERT_PK_EXT,
+		},
+		.num_ext = 2
+	},
+	[DDR_UDIMM_FW_CONTENT_CERT - DDR_FW_KEY_CERT] = {
+		.id = DDR_UDIMM_FW_CONTENT_CERT,
+		.opt = "ddr-udimm-fw-cert",
+		.help_msg = "DDR UDIMM Firmware Content Certificate (output file)",
+		.fn = NULL,
+		.cn = "DDR UDIMM Firmware Content Certificate",
+		.key = DDR_FW_CONTENT_KEY,
+		.issuer = DDR_UDIMM_FW_CONTENT_CERT,
+		.ext = {
+			TRUSTED_FW_NVCOUNTER_EXT,
+			DDR_IMEM_UDIMM_1D_HASH_EXT,
+			DDR_IMEM_UDIMM_2D_HASH_EXT,
+			DDR_DMEM_UDIMM_1D_HASH_EXT,
+			DDR_DMEM_UDIMM_2D_HASH_EXT,
+		},
+		.num_ext = 5
+	},
+	[DDR_RDIMM_FW_CONTENT_CERT - DDR_FW_KEY_CERT] = {
+		.id = DDR_RDIMM_FW_CONTENT_CERT,
+		.opt = "ddr-rdimm-fw-cert",
+		.help_msg = "DDR RDIMM Firmware Content Certificate (output file)",
+		.fn = NULL,
+		.cn = "DDR RDIMM Firmware Content Certificate",
+		.key = DDR_FW_CONTENT_KEY,
+		.issuer = DDR_RDIMM_FW_CONTENT_CERT,
+		.ext = {
+			TRUSTED_FW_NVCOUNTER_EXT,
+			DDR_IMEM_RDIMM_1D_HASH_EXT,
+			DDR_IMEM_RDIMM_2D_HASH_EXT,
+			DDR_DMEM_RDIMM_1D_HASH_EXT,
+			DDR_DMEM_RDIMM_2D_HASH_EXT,
+		},
+		.num_ext = 5
+	}
+};
+
+PLAT_REGISTER_COT(pdef_tbb_certs);
diff --git a/tools/nxp/cert_create_helper/src/pdef_tbb_ext.c b/tools/nxp/cert_create_helper/src/pdef_tbb_ext.c
new file mode 100644
index 0000000..f6da6dd
--- /dev/null
+++ b/tools/nxp/cert_create_helper/src/pdef_tbb_ext.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <openssl/err.h>
+#include <openssl/x509v3.h>
+
+#if USE_TBBR_DEFS
+#include <tbbr_oid.h>
+#else
+#include <platform_oid.h>
+#endif
+
+#include "ext.h"
+#include "tbbr/tbb_ext.h"
+#include "tbbr/tbb_key.h"
+
+#include <pdef_tbb_ext.h>
+#include <pdef_tbb_key.h>
+
+static ext_t pdef_tbb_ext[] = {
+	[DDR_FW_CONTENT_CERT_PK_EXT - DDR_FW_CONTENT_CERT_PK_EXT] = {
+		.oid = DDR_FW_CONTENT_CERT_PK_OID,
+		.sn = "DDR FirmwareContentCertPK",
+		.ln = "DDR Firmware content certificate public key",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_PKEY,
+		.attr.key = DDR_FW_CONTENT_KEY
+	},
+	[DDR_IMEM_UDIMM_1D_HASH_EXT - DDR_FW_CONTENT_CERT_PK_EXT] = {
+		.oid = DDR_IMEM_UDIMM_1D_HASH_OID,
+		.opt = "ddr-immem-udimm-1d",
+		.help_msg = "DDR Firmware IMEM UDIMM 1D image file",
+		.sn = "DDR UDIMM IMEM 1D FirmwareHash",
+		.ln = "DDR UDIMM IMEM 1D Firmware hash (SHA256)",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_HASH
+	},
+	[DDR_IMEM_UDIMM_2D_HASH_EXT - DDR_FW_CONTENT_CERT_PK_EXT] = {
+		.oid = DDR_IMEM_UDIMM_2D_HASH_OID,
+		.opt = "ddr-immem-udimm-2d",
+		.help_msg = "DDR Firmware IMEM UDIMM 2D image file",
+		.sn = "DDR UDIMM IMEM 2D FirmwareHash",
+		.ln = "DDR UDIMM IMEM 2D Firmware hash (SHA256)",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_HASH
+	},
+	[DDR_DMEM_UDIMM_1D_HASH_EXT - DDR_FW_CONTENT_CERT_PK_EXT] = {
+		.oid = DDR_DMEM_UDIMM_1D_HASH_OID,
+		.opt = "ddr-dmmem-udimm-1d",
+		.help_msg = "DDR Firmware DMEM UDIMM 1D image file",
+		.sn = "DDR UDIMM DMEM 1D FirmwareHash",
+		.ln = "DDR UDIMM DMEM 1D Firmware hash (SHA256)",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_HASH
+	},
+	[DDR_DMEM_UDIMM_2D_HASH_EXT - DDR_FW_CONTENT_CERT_PK_EXT] = {
+		.oid = DDR_DMEM_UDIMM_2D_HASH_OID,
+		.opt = "ddr-dmmem-udimm-2d",
+		.help_msg = "DDR Firmware DMEM UDIMM 2D image file",
+		.sn = "DDR UDIMM DMEM 2D FirmwareHash",
+		.ln = "DDR UDIMM DMEM 2D Firmware hash (SHA256)",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_HASH
+	},
+	[DDR_IMEM_RDIMM_1D_HASH_EXT - DDR_FW_CONTENT_CERT_PK_EXT] = {
+		.oid = DDR_IMEM_RDIMM_1D_HASH_OID,
+		.opt = "ddr-immem-rdimm-1d",
+		.help_msg = "DDR Firmware IMEM RDIMM 1D image file",
+		.sn = "DDR RDIMM IMEM 1D FirmwareHash",
+		.ln = "DDR RDIMM IMEM 1D Firmware hash (SHA256)",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_HASH
+	},
+	[DDR_IMEM_RDIMM_2D_HASH_EXT - DDR_FW_CONTENT_CERT_PK_EXT] = {
+		.oid = DDR_IMEM_RDIMM_2D_HASH_OID,
+		.opt = "ddr-immem-rdimm-2d",
+		.help_msg = "DDR Firmware IMEM RDIMM 2D image file",
+		.sn = "DDR RDIMM IMEM 2D FirmwareHash",
+		.ln = "DDR RDIMM IMEM 2D Firmware hash (SHA256)",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_HASH
+	},
+	[DDR_DMEM_RDIMM_1D_HASH_EXT - DDR_FW_CONTENT_CERT_PK_EXT] = {
+		.oid = DDR_DMEM_RDIMM_1D_HASH_OID,
+		.opt = "ddr-dmmem-rdimm-1d",
+		.help_msg = "DDR Firmware DMEM RDIMM 1D image file",
+		.sn = "DDR RDIMM DMEM 1D FirmwareHash",
+		.ln = "DDR RDIMM DMEM 1D Firmware hash (SHA256)",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_HASH
+	},
+	[DDR_DMEM_RDIMM_2D_HASH_EXT - DDR_FW_CONTENT_CERT_PK_EXT] = {
+		.oid = DDR_DMEM_RDIMM_2D_HASH_OID,
+		.opt = "ddr-dmmem-rdimm-2d",
+		.help_msg = "DDR Firmware DMEM RDIMM 2D image file",
+		.sn = "DDR RDIMM DMEM 2D FirmwareHash",
+		.ln = "DDR RDIMM DMEM 2D Firmware hash (SHA256)",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_HASH
+	}
+};
+
+PLAT_REGISTER_EXTENSIONS(pdef_tbb_ext);
diff --git a/tools/nxp/cert_create_helper/src/pdef_tbb_key.c b/tools/nxp/cert_create_helper/src/pdef_tbb_key.c
new file mode 100644
index 0000000..cf2ebda
--- /dev/null
+++ b/tools/nxp/cert_create_helper/src/pdef_tbb_key.c
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <pdef_tbb_key.h>
+
+static key_t pdef_tbb_keys[] = {
+	[DDR_FW_CONTENT_KEY - DDR_FW_CONTENT_KEY] = {
+		.id = DDR_FW_CONTENT_KEY,
+		.opt = "ddr-fw-key",
+		.help_msg = "DDR Firmware Content Certificate key (input/output file)",
+		.desc = "DDR Firmware Content Certificate key"
+	}
+};
+
+PLAT_REGISTER_KEYS(pdef_tbb_keys);
diff --git a/tools/nxp/create_pbl/Makefile b/tools/nxp/create_pbl/Makefile
new file mode 100644
index 0000000..f971a74
--- /dev/null
+++ b/tools/nxp/create_pbl/Makefile
@@ -0,0 +1,61 @@
+#
+# Copyright 2018-2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+MAKE_HELPERS_DIRECTORY := ../../../make_helpers/
+include ${MAKE_HELPERS_DIRECTORY}build_macros.mk
+include ${MAKE_HELPERS_DIRECTORY}build_env.mk
+
+PROJECT_1 := create_pbl${BIN_EXT}
+OBJECTS_1 := create_pbl.o
+PROJECT_2 := byte_swap${BIN_EXT}
+OBJECTS_2 := byte_swap.o
+V ?= 0
+
+override CPPFLAGS += -D_GNU_SOURCE -D_XOPEN_SOURCE=700
+CFLAGS := -Wall -Werror -pedantic -std=c99
+ifeq (${DEBUG},1)
+  CFLAGS += -g -O0 -DDEBUG
+else
+  CFLAGS += -O2
+endif
+LDLIBS :=
+
+ifeq (${V},0)
+  Q := @
+else
+  Q :=
+endif
+
+INCLUDE_PATHS :=
+
+HOSTCC ?= gcc
+CC = gcc
+
+.PHONY: all clean distclean
+
+all: create_pbl byte_swap
+
+${PROJECT_1}: ${OBJECTS_1} Makefile
+	@echo "  LD      $@"
+	${Q}${HOSTCC} ${OBJECTS_1} -o $@ ${LDLIBS}
+	@${ECHO_BLANK_LINE}
+	@echo "Built $@ successfully"
+	@${ECHO_BLANK_LINE}
+
+${PROJECT_2}: ${OBJECTS_2} Makefile
+	@echo "  LD      $@"
+	${Q}${HOSTCC} ${OBJECTS_2} -o $@ ${LDLIBS}
+	@${ECHO_BLANK_LINE}
+	@echo "Built $@ successfully"
+	@${ECHO_BLANK_LINE}
+
+%.o: %.c %.h Makefile
+	@echo "  CC      $<"
+	${Q}${HOSTCC} -c ${CPPFLAGS} ${CFLAGS} ${INCLUDE_PATHS} $< -o $@
+
+clean:
+	$(call SHELL_DELETE_ALL, ${PROJECT_1} ${OBJECTS_1})
+	$(call SHELL_DELETE_ALL, ${PROJECT_2} ${OBJECTS_2})
diff --git a/tools/nxp/create_pbl/README b/tools/nxp/create_pbl/README
new file mode 100644
index 0000000..3b6f854
--- /dev/null
+++ b/tools/nxp/create_pbl/README
@@ -0,0 +1,65 @@
+Description:
+------------
+Tool 'create_pbl' is a standalone tool to create the PBL images.
+	 where,
+	     On the basis of Chassis,
+	     RCW image is placed first followed by the,
+	     PBI commands to copy the,
+	     Input BL2 image stored on the,
+	     Specified boot source (QSPI or SD or NOR) to the,
+             Specified destination address.
+
+
+Usage in standalone way:
+-----------------------
+
+./create_pbl [options] (mentioned below):
+
+	-r  <RCW file-name>         - name of RCW binary file.
+	-i  <BL2 Bin file-name>     - file to be added to rcw file.
+	-c  <SoC Number>            - SoC numeric identifier, may be one of
+                                  1012,1023,1026.1028,
+                                  1043,1046,1088,2080,
+                                  2088,2160
+	-b  <boot source id>        - Boot source id string, may be one of
+                                  "qspi", "nor", "nand", "sd", "emmc"
+	-d  <Address>               - Destination address where BL2
+	                              image is to be copied
+	-o  <output filename>	    - Name of PBL image generated
+	                              as an output of the tool.
+	-e  <Address>               - [Optional] Entry Point Address
+	                              of the BL2.bin
+	-f  <Address>               - BL2 image offset
+	                              on Boot Source for block copy.
+	                              command for chassis >=3.)
+				      (Must for Ch3, Ignored for Ch2)
+	-h  Help.
+	-s  Secure boot.
+
+		-s 	secure boot
+		-c	SoC Number (see description above)
+		-b	Boot source.
+		-r	RCW binary file.
+		-i	Input file that is to be added to rcw file.
+		-o	Name of output file
+		-f	Source Offset (Block Copy)
+		-d	Destination address to which file has to be copied
+		-h	Help.
+
+Example:
+	./create_pbl -r <RCW file> -i <bl2.bin> -c <chassis_no> -b <boot_source = sd/qspi/nor> -d <Destination_Addr> -o <pbl_image_name>
+
+
+
+Usage at compilation time:
+--------------------------------
+
+	make <compilation command......> pbl RCW=<Path_to_RCW_File>/<rcw_file_name.bin>
+
+Example: QSPI Boot For LS1046ARDB-
+
+	make PLAT=ls1046rdb all fip BOOT_MODE=qspi SPD=opteed BL32=tee.bin BL33=u-boot-ls1046.bin pbl RCW=/home/pankaj/flexbuild/packages/firmware/dash-rcw/ls1046ardb/RR_FFSSPPPN_1133_5506/rcw_1600_qspiboot.bin
+
+Example: QSPI Boot For LX2160ARDB-
+
+	make PLAT=lx2160ardb all fip BOOT_MODE=flexspi_nor SPD=opteed BL32=tee_lx2.bin BL33=u-boot_lx2160.bin pbl RCW=plat/nxp/soc-lx2160/lx2160ardb/rcw_1900_600_1600_19_5_2.bin
diff --git a/tools/nxp/create_pbl/byte_swap.c b/tools/nxp/create_pbl/byte_swap.c
new file mode 100644
index 0000000..1d0bfce
--- /dev/null
+++ b/tools/nxp/create_pbl/byte_swap.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <getopt.h>
+#include <unistd.h>
+
+#define NUM_MEM_BLOCK		1
+#define FOUR_BYTE_ALIGN		4
+#define EIGHT_BYTE_ALIGN	8
+#define SIZE_TWO_PBL_CMD	24
+
+#define SUCCESS			 0
+#define FAILURE			-1
+#define BYTE_SWAP_32(word)	((((word) & 0xff000000) >> 24)|	\
+				(((word) & 0x00ff0000) >>  8) |	\
+				(((word) & 0x0000ff00) <<  8) |	\
+				(((word) & 0x000000ff) << 24))
+
+
+/*
+ * Returns:
+ *     SUCCESS, on successful byte swapping.
+ *     FAILURE, on failure.
+ */
+int do_byteswap(FILE *fp)
+{
+	int bytes = 0;
+	uint32_t  upper_byte;
+	uint32_t  lower_byte;
+	uint32_t  pad = 0U;
+	/* Carries number of Padding bytes to be appended to
+	 * make file size 8 byte aligned.
+	 */
+	int append_bytes;
+	int ret = FAILURE;
+
+	fseek(fp, 0L, SEEK_END);
+	bytes = ftell(fp);
+
+	append_bytes = EIGHT_BYTE_ALIGN - (bytes % EIGHT_BYTE_ALIGN);
+	if (append_bytes != 0) {
+		if (fwrite(&pad, append_bytes, NUM_MEM_BLOCK, fp)
+			!= NUM_MEM_BLOCK) {
+			printf("%s: Error in appending padding bytes.\n",
+				__func__);
+			goto byteswap_err;
+		}
+		bytes += append_bytes;
+	}
+
+	rewind(fp);
+	while (bytes > 0) {
+		if ((fread(&upper_byte, sizeof(upper_byte), NUM_MEM_BLOCK, fp)
+			!= NUM_MEM_BLOCK) && (feof(fp) == 0)) {
+			printf("%s: Error reading upper bytes.\n", __func__);
+			goto byteswap_err;
+		}
+		if ((fread(&lower_byte, sizeof(lower_byte), NUM_MEM_BLOCK, fp)
+			!= NUM_MEM_BLOCK) && (feof(fp) == 0)) {
+			printf("%s: Error reading lower bytes.\n", __func__);
+			goto byteswap_err;
+		}
+		fseek(fp, -8L, SEEK_CUR);
+		upper_byte = BYTE_SWAP_32(upper_byte);
+		lower_byte = BYTE_SWAP_32(lower_byte);
+		if (fwrite(&lower_byte, sizeof(lower_byte), NUM_MEM_BLOCK, fp)
+			!= NUM_MEM_BLOCK) {
+			printf("%s: Error writing lower bytes.\n", __func__);
+			goto byteswap_err;
+		}
+		if (fwrite(&upper_byte, sizeof(upper_byte), NUM_MEM_BLOCK, fp)
+			!= NUM_MEM_BLOCK) {
+			printf("%s: Error writing upper bytes.\n", __func__);
+			goto byteswap_err;
+		}
+		bytes -= EIGHT_BYTE_ALIGN;
+	}
+	ret = SUCCESS;
+
+byteswap_err:
+	return ret;
+}
+
+int main(int argc, char **argv)
+{
+	FILE *fp = NULL;
+	int ret = 0;
+
+	if (argc > 2) {
+		printf("Usage format is byteswap <filename>");
+		return -1;
+	}
+
+	fp = fopen(argv[1], "rb+");
+	if (fp == NULL) {
+		printf("%s: Error opening the input file: %s\n",
+			__func__, argv[1]);
+		return -1;
+	}
+
+	ret = do_byteswap(fp);
+	fclose(fp);
+	return ret;
+}
diff --git a/tools/nxp/create_pbl/create_pbl.c b/tools/nxp/create_pbl/create_pbl.c
new file mode 100644
index 0000000..244b0fb
--- /dev/null
+++ b/tools/nxp/create_pbl/create_pbl.c
@@ -0,0 +1,996 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <getopt.h>
+#include <unistd.h>
+
+#define NUM_MEM_BLOCK		1
+#define FOUR_BYTE_ALIGN		4
+#define EIGHT_BYTE_ALIGN	8
+#define SIZE_TWO_PBL_CMD	24
+
+/* Define for add_boot_ptr_cmd() */
+#define BOOTPTR_ADDR 0x09570604
+#define CSF_ADDR_SB 0x09ee0200
+/* CCSR write command to address 0x1e00400 i.e BOOTLOCPTR */
+#define BOOTPTR_ADDR_CH3 0x31e00400
+/* Load CSF header command */
+#define CSF_ADDR_SB_CH3 0x80220000
+
+#define	MAND_ARG_MASK				0xFFF3
+#define	ARG_INIT_MASK				0xFF00
+#define RCW_FILE_NAME_ARG_MASK			0x0080
+#define IN_FILE_NAME_ARG_MASK			0x0040
+#define CHASSIS_ARG_MASK			0x0020
+#define BOOT_SRC_ARG_MASK			0x0010
+#define ENTRY_POINT_ADDR_ARG_MASK		0x0008
+#define BL2_BIN_STRG_LOC_BOOT_SRC_ARG_MASK	0x0004
+#define BL2_BIN_CPY_DEST_ADDR_ARG_MASK		0x0002
+#define OP_FILE_NAME_ARG_MASK			0x0001
+
+/* Define for add_cpy_cmd() */
+#define OFFSET_MASK		        0x00ffffff
+#define WRITE_CMD_BASE		    0x81000000
+#define MAX_PBI_DATA_LEN_BYTE	64
+
+/* 140 Bytes = Preamble + LOAD RCW command + RCW (128 bytes) + Checksum */
+#define CHS3_CRC_PAYLOAD_START_OFFSET 140
+
+#define PBI_CRC_POLYNOMIAL	0x04c11db7
+
+typedef enum {
+	CHASSIS_UNKNOWN,
+	CHASSIS_2,
+	CHASSIS_3,
+	CHASSIS_3_2,
+	CHASSIS_MAX    /* must be last item in list */
+} chassis_t;
+
+typedef enum {
+	UNKNOWN_BOOT = 0,
+	IFC_NOR_BOOT,
+	IFC_NAND_BOOT,
+	QSPI_BOOT,
+	SD_BOOT,
+	EMMC_BOOT,
+	FLXSPI_NOR_BOOT,
+	FLXSPI_NAND_BOOT,
+	FLXSPI_NAND4K_BOOT,
+	MAX_BOOT    /* must be last item in list */
+} boot_src_t;
+
+/* Base Addresses where PBL image is copied depending on the boot source.
+ * Boot address map varies as per Chassis architecture.
+ */
+#define BASE_ADDR_UNDEFINED  0xFFFFFFFF
+#define BASE_ADDR_QSPI       0x20000000
+#define BASE_ADDR_SD         0x00001000
+#define BASE_ADDR_IFC_NOR    0x30000000
+#define BASE_ADDR_EMMC       0x00001000
+#define BASE_ADDR_FLX_NOR    0x20000000
+#define BASE_ADDR_NAND       0x20000000
+
+uint32_t base_addr_ch3[MAX_BOOT] = {
+	BASE_ADDR_UNDEFINED,
+	BASE_ADDR_IFC_NOR,
+	BASE_ADDR_UNDEFINED,	/*IFC NAND */
+	BASE_ADDR_QSPI,
+	BASE_ADDR_SD,
+	BASE_ADDR_EMMC,
+	BASE_ADDR_UNDEFINED,	/*FLXSPI NOR */
+	BASE_ADDR_UNDEFINED,	/*FLXSPI NAND 2K */
+	BASE_ADDR_UNDEFINED	/*FLXSPI NAND 4K */
+};
+
+uint32_t base_addr_ch32[MAX_BOOT] = {
+	BASE_ADDR_UNDEFINED,
+	BASE_ADDR_UNDEFINED,	/* IFC NOR */
+	BASE_ADDR_UNDEFINED,	/* IFC NAND */
+	BASE_ADDR_UNDEFINED,	/* QSPI */
+	BASE_ADDR_SD,
+	BASE_ADDR_EMMC,
+	BASE_ADDR_FLX_NOR,
+	BASE_ADDR_UNDEFINED,	/*FLXSPI NAND 2K */
+	BASE_ADDR_UNDEFINED	/*FLXSPI NAND 4K */
+};
+
+/* for Chassis 3 */
+uint32_t blk_cpy_hdr_map_ch3[] = {
+
+	0,		    /* Unknown Boot Source */
+	0x80000020,	/* NOR_BOOT */
+	0x0,		/* NAND_BOOT */
+	0x80000062,	/* QSPI_BOOT */
+	0x80000040,	/* SD_BOOT */
+	0x80000041,	/* EMMC_BOOT */
+	0x0,		/* FLEXSPI NOR_BOOT */
+	0x0,	/* FLEX SPI NAND2K BOOT */
+	0x0,	/* CHASIS3_2_NAND4K_BOOT */
+};
+
+uint32_t blk_cpy_hdr_map_ch32[] = {
+	0,		    /* Unknown Boot Source */
+	0x0,		/* NOR_BOOT */
+	0x0,		/* NAND_BOOT */
+	0x0,		/* QSPI_BOOT */
+	0x80000008,	/* SD_BOOT */
+	0x80000009,	/* EMMC_BOOT */
+	0x8000000F,	/* FLEXSPI NOR_BOOT */
+	0x8000000C,	/* FLEX SPI NAND2K BOOT */
+	0x8000000D,	/* CHASIS3_2_NAND4K_BOOT */
+};
+
+char *boot_src_string[] = {
+	"UNKNOWN_BOOT",
+	"IFC_NOR_BOOT",
+	"IFC_NAND_BOOT",
+	"QSPI_BOOT",
+	"SD_BOOT",
+	"EMMC_BOOT",
+	"FLXSPI_NOR_BOOT",
+	"FLXSPI_NAND_BOOT",
+	"FLXSPI_NAND4K_BOOT",
+};
+
+enum stop_command {
+	STOP_COMMAND = 0,
+	CRC_STOP_COMMAND
+};
+
+/* Structure will get populated in the main function
+ * as part of parsing the command line arguments.
+ * All member parameters are mandatory except:
+ *	-ep
+ *	-src_addr
+ */
+struct pbl_image {
+	char *rcw_nm;		/* Input RCW File */
+	char *sec_imgnm;	/* Input BL2 binary */
+	char *imagefile;	/* Generated output file */
+	boot_src_t boot_src;	/* Boot Source - QSPI, SD, NOR, NAND etc */
+	uint32_t src_addr;	/* Source Address */
+	uint32_t addr;		/* Load address */
+	uint32_t ep;		/* Entry point <opt> default is load address */
+	chassis_t chassis;	/* Chassis type */
+} pblimg;
+
+#define SUCCESS			 0
+#define FAILURE			-1
+#define CRC_STOP_CMD_ARM	0x08610040
+#define CRC_STOP_CMD_ARM_CH3	0x808f0000
+#define STOP_CMD_ARM_CH3	0x80ff0000
+#define BYTE_SWAP_32(word)	((((word) & 0xff000000) >> 24)|	\
+				(((word) & 0x00ff0000) >>  8) |	\
+				(((word) & 0x0000ff00) <<  8) |	\
+				(((word) & 0x000000ff) << 24))
+
+#define PBI_LEN_MASK	0xFFF00000
+#define PBI_LEN_SHIFT	20
+#define NUM_RCW_WORD	35
+#define PBI_LEN_ADD		6
+
+#define MAX_CRC_ENTRIES 256
+
+/* SoC numeric identifier */
+#define SOC_LS1012 1012
+#define SOC_LS1023 1023
+#define SOC_LS1026 1026
+#define SOC_LS1028 1028
+#define SOC_LS1043 1043
+#define SOC_LS1046 1046
+#define SOC_LS1088 1088
+#define SOC_LS2080 2080
+#define SOC_LS2088 2088
+#define SOC_LX2160 2160
+
+static uint32_t pbl_size;
+bool sb_flag;
+
+/***************************************************************************
+ * Description	:	CRC32 Lookup Table
+ ***************************************************************************/
+static uint32_t crc32_lookup[] = {
+	 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
+	 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
+	 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
+	 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
+	 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
+	 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
+	 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
+	 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
+	 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
+	 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
+	 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
+	 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
+	 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
+	 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
+	 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
+	 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
+	 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
+	 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
+	 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
+	 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
+	 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
+	 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
+	 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
+	 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
+	 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
+	 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
+	 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
+	 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
+	 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
+	 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
+	 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
+	 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
+	 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
+	 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
+	 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
+	 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
+	 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
+	 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
+	 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
+	 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
+	 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
+	 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+	 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
+	 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
+	 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
+	 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
+	 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
+	 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
+	 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
+	 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
+	 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
+	 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
+	 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
+	 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
+	 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
+	 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
+	 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
+	 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
+	 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
+	 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
+	 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
+	 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
+	 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
+	 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
+	};
+
+
+static void print_usage(void)
+{
+	printf("\nCorrect Usage of Tool is:\n");
+	printf("\n ./create_pbl [options] (mentioned below):\n\n");
+	printf("\t-r  <RCW file-name>     - name of RCW binary file.\n");
+	printf("\t-i  <BL2 Bin file-name> - file to be added to rcw file.\n");
+	printf("\t-c  <Number>            - Chassis Architecture (=2 or =3\n");
+	printf("\t                          or =4 for 3.2).\n");
+	printf("\t-b  <qspi/nor/nand/sd>  - Boot source.\n");
+	printf("\t-d  <Address>           - Destination address where BL2\n");
+	printf("\t                          image is to be copied\n");
+	printf("\t-o  <output filename>	  - Name of PBL image generated\n");
+	printf("\t                          as an output of the tool.\n");
+	printf("\t-f  <Address>           - BL2 image Src Offset\n");
+	printf("\t                          on Boot Source for block copy.\n");
+	printf("\t                          command for chassis >=3.)\n");
+	printf("\t-e  <Address>           - [Optional] Entry Point Address\n");
+	printf("\t                          of the BL2.bin\n");
+	printf("\t-s  Secure Boot.\n");
+	printf("\t-h  Help.\n");
+	printf("\n\n");
+	exit(0);
+
+}
+
+/***************************************************************************
+ * Function	:	crypto_calculate_checksum()
+ * Arguments	:	data - Pointer to FILE
+ *			num - Number of 32 bit words for checksum
+ * Return	:	Checksum Value
+ * Description	:	Calculate Checksum over the data
+ ***************************************************************************/
+uint32_t crypto_calculate_checksum(FILE *fp_rcw_pbi_op, uint32_t num)
+{
+	uint32_t i;
+	uint64_t sum = 0;
+	uint32_t word;
+
+	fseek(fp_rcw_pbi_op, 0L, SEEK_SET);
+	for (i = 0; i < num ; i++) {
+		if ((fread(&word, sizeof(word), NUM_MEM_BLOCK, fp_rcw_pbi_op))
+			< NUM_MEM_BLOCK) {
+			printf("%s: Error reading word.\n", __func__);
+			return FAILURE;
+		}
+		sum = sum + word;
+		sum = sum & 0xFFFFFFFF;
+	}
+	return (uint32_t)sum;
+}
+
+/***************************************************************************
+ * Function	:	add_pbi_stop_cmd
+ * Arguments	:	fp_rcw_pbi_op - output rcw_pbi file pointer
+ * Return	:	SUCCESS or FAILURE
+ * Description	:	This function insert pbi stop command.
+ ***************************************************************************/
+int add_pbi_stop_cmd(FILE *fp_rcw_pbi_op, enum stop_command flag)
+{
+	int ret = FAILURE;
+	int32_t pbi_stop_cmd;
+	uint32_t pbi_crc = 0xffffffff, i, j, c;
+	uint32_t crc_table[MAX_CRC_ENTRIES];
+	uint8_t data;
+
+	switch (pblimg.chassis) {
+	case CHASSIS_2:
+		pbi_stop_cmd = BYTE_SWAP_32(CRC_STOP_CMD_ARM);
+		break;
+	case CHASSIS_3:
+	case CHASSIS_3_2:
+		/* Based on flag add the corresponsding cmd
+		 * -- stop cmd or stop with CRC cmd
+		 */
+		if (flag == CRC_STOP_COMMAND) {
+			pbi_stop_cmd = CRC_STOP_CMD_ARM_CH3;
+		} else {
+			pbi_stop_cmd = STOP_CMD_ARM_CH3;
+		}
+		break;
+	case CHASSIS_UNKNOWN:
+	case CHASSIS_MAX:
+	default:
+		printf("Internal Error: Invalid Chassis val = %d.\n",
+			pblimg.chassis);
+		goto pbi_stop_err;
+	}
+
+	if (fwrite(&pbi_stop_cmd, sizeof(pbi_stop_cmd), NUM_MEM_BLOCK,
+			fp_rcw_pbi_op) != NUM_MEM_BLOCK) {
+		printf("%s: Error in Writing PBI STOP CMD\n", __func__);
+		goto pbi_stop_err;
+	}
+
+	if (flag == CRC_STOP_COMMAND) {
+		for (i = 0; i < MAX_CRC_ENTRIES; i++) {
+			c = i << 24;
+			for (j = 0; j < 8; j++) {
+				c = (c & 0x80000000) ?
+					PBI_CRC_POLYNOMIAL ^ (c << 1) : c << 1;
+			}
+
+			crc_table[i] = c;
+		}
+	}
+
+	switch (pblimg.chassis) {
+	case CHASSIS_2:
+		/* Chassis 2: CRC is calculated on  RCW + PBL cmd.*/
+		fseek(fp_rcw_pbi_op, 0L, SEEK_SET);
+		break;
+	case CHASSIS_3:
+	case CHASSIS_3_2:
+		/* Chassis 3: CRC is calculated on  PBL cmd only. */
+		fseek(fp_rcw_pbi_op, CHS3_CRC_PAYLOAD_START_OFFSET, SEEK_SET);
+		break;
+	case CHASSIS_UNKNOWN:
+	case CHASSIS_MAX:
+		printf("%s: Unknown Chassis.\n", __func__);
+		goto pbi_stop_err;
+	}
+
+	while ((fread(&data, sizeof(data), NUM_MEM_BLOCK, fp_rcw_pbi_op))
+		== NUM_MEM_BLOCK) {
+		if (flag == CRC_STOP_COMMAND) {
+			if (pblimg.chassis == CHASSIS_2) {
+				pbi_crc = crc_table
+					  [((pbi_crc >> 24) ^ (data)) & 0xff] ^
+					  (pbi_crc << 8);
+			} else {
+				pbi_crc =  (pbi_crc >> 8) ^
+					   crc32_lookup[((pbi_crc) ^
+							   (data)) & 0xff];
+			}
+		}
+	}
+
+	switch (pblimg.chassis) {
+	case CHASSIS_2:
+		pbi_crc = BYTE_SWAP_32(pbi_crc);
+		break;
+	case CHASSIS_3:
+	case CHASSIS_3_2:
+		if (flag == CRC_STOP_COMMAND) {
+			pbi_crc = pbi_crc ^ 0xFFFFFFFF;
+		} else {
+			pbi_crc = 0x00000000;
+		}
+		break;
+	case CHASSIS_UNKNOWN:
+	case CHASSIS_MAX:
+		printf("%s: Unknown Chassis.\n", __func__);
+		goto pbi_stop_err;
+	}
+
+	if (fwrite(&pbi_crc, sizeof(pbi_crc), NUM_MEM_BLOCK, fp_rcw_pbi_op)
+		!= NUM_MEM_BLOCK) {
+		printf("%s: Error in Writing PBI PBI CRC\n", __func__);
+		goto pbi_stop_err;
+	}
+	ret = SUCCESS;
+
+pbi_stop_err:
+	return ret;
+}
+
+/*
+ * Returns:
+ *     File size in bytes, on Success.
+ *     FAILURE, on failure.
+ */
+int get_filesize(const char *c)
+{
+	FILE *fp;
+	int ret = FAILURE;
+
+	fp = fopen(c, "rb");
+	if (fp == NULL) {
+		fprintf(stderr, "%s: Error in opening the file: %s\n",
+			__func__, c);
+		goto filesize_err;
+	}
+
+	fseek(fp, 0L, SEEK_END);
+	ret = ftell(fp);
+	fclose(fp);
+
+filesize_err:
+	return ret;
+}
+
+/***************************************************************************
+ * Function	:	get_bootptr
+ * Arguments	:	fp_rcw_pbi_op - Pointer to output file
+ * Return	:	SUCCESS or FAILURE
+ * Description	:	Add bootptr pbi command to output file
+ ***************************************************************************/
+int add_boot_ptr_cmd(FILE *fp_rcw_pbi_op)
+{
+	uint32_t bootptr_addr;
+	int ret = FAILURE;
+
+	switch (pblimg.chassis) {
+	case CHASSIS_2:
+		if (sb_flag == true)
+			bootptr_addr = BYTE_SWAP_32(CSF_ADDR_SB);
+		else
+			bootptr_addr = BYTE_SWAP_32(BOOTPTR_ADDR);
+		pblimg.ep    = BYTE_SWAP_32(pblimg.ep);
+		break;
+	case CHASSIS_3:
+	case CHASSIS_3_2:
+		if (sb_flag == true)
+			bootptr_addr = CSF_ADDR_SB_CH3;
+		else
+			bootptr_addr = BOOTPTR_ADDR_CH3;
+		break;
+	case CHASSIS_UNKNOWN:
+	case CHASSIS_MAX:
+	default:
+		printf("Internal Error: Invalid Chassis val = %d.\n",
+			pblimg.chassis);
+		goto bootptr_err;
+	}
+
+	if (fwrite(&bootptr_addr, sizeof(bootptr_addr), NUM_MEM_BLOCK,
+		fp_rcw_pbi_op) != NUM_MEM_BLOCK) {
+		printf("%s: Error in Writing PBI Words:[%d].\n",
+			 __func__, ret);
+		goto bootptr_err;
+	}
+
+	if (pblimg.ep != 0) {
+		if (fwrite(&pblimg.ep, sizeof(pblimg.ep), NUM_MEM_BLOCK,
+			fp_rcw_pbi_op) != NUM_MEM_BLOCK) {
+			printf("%s: Error in Writing PBI Words\n", __func__);
+			goto bootptr_err;
+		}
+	}
+
+	printf("\nBoot Location Pointer= %x\n", BYTE_SWAP_32(pblimg.ep));
+	ret = SUCCESS;
+
+bootptr_err:
+	return ret;
+}
+
+/***************************************************************************
+ * Function	:	add_blk_cpy_cmd
+ * Arguments	:	pbi_word - pointer to pbi commands
+ *			args - Command  line args flag.
+ * Return	:	SUCCESS or FAILURE
+ * Description	:	Add pbi commands for block copy cmd in pbi_words
+ ***************************************************************************/
+int add_blk_cpy_cmd(FILE *fp_rcw_pbi_op, uint16_t args)
+{
+	uint32_t blk_cpy_hdr;
+	uint32_t file_size, new_file_size;
+	uint32_t align = 4;
+	int ret = FAILURE;
+	int num_pad_bytes = 0;
+
+	if ((args & BL2_BIN_STRG_LOC_BOOT_SRC_ARG_MASK) == 0) {
+		printf("ERROR: Offset not specified for Block Copy Cmd.\n");
+		printf("\tSee Usage and use -f option\n");
+		goto blk_copy_err;
+	}
+
+	switch (pblimg.chassis) {
+	case CHASSIS_3:
+		/* Block copy command */
+		blk_cpy_hdr = blk_cpy_hdr_map_ch3[pblimg.boot_src];
+		pblimg.src_addr += base_addr_ch3[pblimg.boot_src];
+		break;
+	case CHASSIS_3_2:
+		/* Block copy command */
+		blk_cpy_hdr = blk_cpy_hdr_map_ch32[pblimg.boot_src];
+		pblimg.src_addr += base_addr_ch32[pblimg.boot_src];
+		break;
+	default:
+		printf("%s: Error invalid chassis type for this command.\n",
+				__func__);
+		goto blk_copy_err;
+	}
+
+	file_size = get_filesize(pblimg.sec_imgnm);
+	if (file_size > 0) {
+		new_file_size = (file_size + (file_size % align));
+
+		/* Add Block copy command */
+		if (fwrite(&blk_cpy_hdr, sizeof(blk_cpy_hdr), NUM_MEM_BLOCK,
+			fp_rcw_pbi_op) != NUM_MEM_BLOCK) {
+			printf("%s: Error writing blk_cpy_hdr to the file.\n",
+				 __func__);
+			goto blk_copy_err;
+		}
+
+		if ((args & BL2_BIN_STRG_LOC_BOOT_SRC_ARG_MASK) == 0)
+			num_pad_bytes = pblimg.src_addr % 4;
+
+		/* Add Src address word */
+		if (fwrite(&pblimg.src_addr + num_pad_bytes,
+			   sizeof(pblimg.src_addr), NUM_MEM_BLOCK,
+			   fp_rcw_pbi_op) != NUM_MEM_BLOCK) {
+			printf("%s: Error writing BLK SRC Addr to the file.\n",
+				 __func__);
+			goto blk_copy_err;
+		}
+
+		/* Add Dest address word */
+		if (fwrite(&pblimg.addr, sizeof(pblimg.addr),
+			NUM_MEM_BLOCK, fp_rcw_pbi_op) != NUM_MEM_BLOCK) {
+			printf("%s: Error writing DST Addr to the file.\n",
+			__func__);
+			goto blk_copy_err;
+		}
+
+		/* Add size */
+		if (fwrite(&new_file_size, sizeof(new_file_size),
+			NUM_MEM_BLOCK, fp_rcw_pbi_op) != NUM_MEM_BLOCK) {
+			printf("%s: Error writing size to the file.\n",
+				__func__);
+			goto blk_copy_err;
+		}
+	}
+
+	ret = SUCCESS;
+
+blk_copy_err:
+	return ret;
+}
+
+/***************************************************************************
+ * Function	:	add_cpy_cmd
+ * Arguments	:	pbi_word - pointer to pbi commands
+ * Return	:	SUCCESS or FAILURE
+ * Description	:	Append pbi commands for copying BL2 image to the
+ *			load address stored in pbl_image.addr
+ ***************************************************************************/
+int add_cpy_cmd(FILE *fp_rcw_pbi_op)
+{
+	uint32_t ALTCBAR_ADDRESS = BYTE_SWAP_32(0x09570158);
+	uint32_t WAIT_CMD_WRITE_ADDRESS = BYTE_SWAP_32(0x096100c0);
+	uint32_t WAIT_CMD = BYTE_SWAP_32(0x000FFFFF);
+	int file_size;
+	uint32_t pbi_cmd, altcbar;
+	uint8_t pbi_data[MAX_PBI_DATA_LEN_BYTE];
+	uint32_t dst_offset;
+	FILE *fp_img = NULL;
+	int ret = FAILURE;
+
+	altcbar = pblimg.addr;
+	dst_offset = pblimg.addr;
+	fp_img = fopen(pblimg.sec_imgnm, "rb");
+	if (fp_img == NULL) {
+		printf("%s: Error in opening the file: %s\n", __func__,
+		      pblimg.sec_imgnm);
+		goto add_cpy_err;
+	}
+	file_size = get_filesize(pblimg.sec_imgnm);
+	altcbar = 0xfff00000 & altcbar;
+	altcbar = BYTE_SWAP_32(altcbar >> 16);
+	if (fwrite(&ALTCBAR_ADDRESS, sizeof(ALTCBAR_ADDRESS), NUM_MEM_BLOCK,
+		fp_rcw_pbi_op) != NUM_MEM_BLOCK) {
+		printf("%s: Error in writing address of ALTCFG CMD.\n",
+			 __func__);
+		goto add_cpy_err;
+	}
+	if (fwrite(&altcbar, sizeof(altcbar), NUM_MEM_BLOCK, fp_rcw_pbi_op)
+		!= NUM_MEM_BLOCK) {
+		printf("%s: Error in writing ALTCFG CMD.\n", __func__);
+		goto add_cpy_err;
+	}
+	if (fwrite(&WAIT_CMD_WRITE_ADDRESS, sizeof(WAIT_CMD_WRITE_ADDRESS),
+		NUM_MEM_BLOCK, fp_rcw_pbi_op) != NUM_MEM_BLOCK) {
+		printf("%s: Error in writing address of WAIT_CMD.\n",
+			__func__);
+		goto add_cpy_err;
+	}
+	if (fwrite(&WAIT_CMD, sizeof(WAIT_CMD), NUM_MEM_BLOCK, fp_rcw_pbi_op)
+		!= NUM_MEM_BLOCK) {
+		printf("%s: Error in writing WAIT_CMD.\n", __func__);
+		goto add_cpy_err;
+	}
+	do {
+		memset(pbi_data, 0, MAX_PBI_DATA_LEN_BYTE);
+
+		ret = fread(&pbi_data, MAX_PBI_DATA_LEN_BYTE,
+				NUM_MEM_BLOCK, fp_img);
+		if ((ret != NUM_MEM_BLOCK) && (!feof(fp_img))) {
+			printf("%s: Error writing ALTCFG Word: [%d].\n",
+				__func__, ret);
+			goto add_cpy_err;
+		}
+
+		dst_offset &= OFFSET_MASK;
+		pbi_cmd = WRITE_CMD_BASE | dst_offset;
+		pbi_cmd = BYTE_SWAP_32(pbi_cmd);
+		if (fwrite(&pbi_cmd, sizeof(pbi_cmd), NUM_MEM_BLOCK,
+			fp_rcw_pbi_op) != NUM_MEM_BLOCK) {
+			printf("%s: Error writing ALTCFG Word write cmd.\n",
+				 __func__);
+			goto add_cpy_err;
+		}
+		if (fwrite(&pbi_data,  MAX_PBI_DATA_LEN_BYTE, NUM_MEM_BLOCK,
+			fp_rcw_pbi_op) != NUM_MEM_BLOCK) {
+			printf("%s: Error writing ALTCFG_Word.\n", __func__);
+			goto add_cpy_err;
+		}
+		dst_offset += MAX_PBI_DATA_LEN_BYTE;
+		file_size -= MAX_PBI_DATA_LEN_BYTE;
+	} while (!feof(fp_img));
+
+	ret = SUCCESS;
+
+add_cpy_err:
+	if (fp_img != NULL) {
+		fclose(fp_img);
+	}
+	return ret;
+}
+
+int main(int argc, char **argv)
+{
+	FILE *file = NULL;
+	char *ptr;
+	int opt;
+	int tmp;
+	uint16_t args = ARG_INIT_MASK;
+	FILE *fp_rcw_pbi_ip = NULL, *fp_rcw_pbi_op = NULL;
+	uint32_t word, word_1;
+	int ret = FAILURE;
+	bool bootptr_flag = false;
+	enum stop_command flag_stop_cmd = CRC_STOP_COMMAND;
+
+	/* Initializing the global structure to zero. */
+	memset(&pblimg, 0x0, sizeof(struct pbl_image));
+
+	while ((opt = getopt(argc, argv,
+			     ":b:f:r:i:e:d:c:o:h:s")) != -1) {
+		switch (opt) {
+		case 'd':
+			pblimg.addr = strtoull(optarg, &ptr, 16);
+			if (*ptr != 0) {
+				fprintf(stderr, "CMD Error: invalid load or destination address %s\n", optarg);
+				goto exit_main;
+			}
+			args |= BL2_BIN_CPY_DEST_ADDR_ARG_MASK;
+			break;
+		case 'r':
+			pblimg.rcw_nm = optarg;
+			file = fopen(pblimg.rcw_nm, "r");
+			if (file == NULL) {
+				printf("CMD Error: Opening the RCW File.\n");
+				goto exit_main;
+			} else {
+				args |= RCW_FILE_NAME_ARG_MASK;
+				fclose(file);
+			}
+			break;
+		case 'e':
+			bootptr_flag = true;
+			pblimg.ep = strtoull(optarg, &ptr, 16);
+			if (*ptr != 0) {
+				fprintf(stderr,
+				"CMD Error: Invalid entry point %s\n", optarg);
+				goto exit_main;
+			}
+			break;
+		case 'h':
+			print_usage();
+			break;
+		case 'i':
+			pblimg.sec_imgnm = optarg;
+			file = fopen(pblimg.sec_imgnm, "r");
+			if (file == NULL) {
+				printf("CMD Error: Opening Input file.\n");
+				goto exit_main;
+			} else {
+				args |= IN_FILE_NAME_ARG_MASK;
+				fclose(file);
+			}
+			break;
+		case 'c':
+			tmp = atoi(optarg);
+			switch (tmp) {
+			case SOC_LS1012:
+			case SOC_LS1023:
+			case SOC_LS1026:
+			case SOC_LS1043:
+			case SOC_LS1046:
+				pblimg.chassis = CHASSIS_2;
+				break;
+			case SOC_LS1088:
+			case SOC_LS2080:
+			case SOC_LS2088:
+				pblimg.chassis = CHASSIS_3;
+				break;
+			case SOC_LS1028:
+			case SOC_LX2160:
+				pblimg.chassis = CHASSIS_3_2;
+				break;
+			default:
+			printf("CMD Error: Invalid SoC Val = %d.\n", tmp);
+				goto exit_main;
+			}
+
+			args |= CHASSIS_ARG_MASK;
+			break;
+		case 'o':
+			pblimg.imagefile = optarg;
+			args |= OP_FILE_NAME_ARG_MASK;
+			break;
+		case 's':
+			sb_flag = true;
+			break;
+		case 'b':
+			if (strcmp(optarg, "qspi") == 0) {
+				pblimg.boot_src = QSPI_BOOT;
+			} else if (strcmp(optarg, "nor") == 0) {
+				pblimg.boot_src = IFC_NOR_BOOT;
+			} else if (strcmp(optarg, "nand") == 0) {
+				pblimg.boot_src = IFC_NAND_BOOT;
+			} else if (strcmp(optarg, "sd") == 0) {
+				pblimg.boot_src = SD_BOOT;
+			} else if (strcmp(optarg, "emmc") == 0) {
+				pblimg.boot_src = EMMC_BOOT;
+			} else if (strcmp(optarg, "flexspi_nor") == 0) {
+				pblimg.boot_src = FLXSPI_NOR_BOOT;
+			} else if (strcmp(optarg, "flexspi_nand") == 0) {
+				pblimg.boot_src = FLXSPI_NAND_BOOT;
+			} else if (strcmp(optarg, "flexspi_nand2k") == 0) {
+				pblimg.boot_src = FLXSPI_NAND4K_BOOT;
+			} else {
+				printf("CMD Error: Invalid boot source.\n");
+				goto exit_main;
+			}
+			args |= BOOT_SRC_ARG_MASK;
+			break;
+		case 'f':
+			pblimg.src_addr = strtoull(optarg, &ptr, 16);
+			if (*ptr != 0) {
+				fprintf(stderr,
+				"CMD Error: Invalid src offset %s\n", optarg);
+				goto exit_main;
+			}
+			args |= BL2_BIN_STRG_LOC_BOOT_SRC_ARG_MASK;
+			break;
+		default:
+			/* issue a warning and skip the unknown arg */
+			printf("Cmd Warning: Invalid Arg = %c.\n", opt);
+		}
+	}
+
+	if ((args & MAND_ARG_MASK) != MAND_ARG_MASK) {
+		print_usage();
+	}
+
+	fp_rcw_pbi_ip = fopen(pblimg.rcw_nm, "rb");
+	if (fp_rcw_pbi_ip == NULL) {
+		printf("%s: Error in opening the rcw file: %s\n",
+			__func__, pblimg.rcw_nm);
+		goto exit_main;
+	}
+
+	fp_rcw_pbi_op = fopen(pblimg.imagefile, "wb+");
+	if (fp_rcw_pbi_op == NULL) {
+		printf("%s: Error opening the input file: %s\n",
+			__func__, pblimg.imagefile);
+		goto exit_main;
+	}
+
+	printf("\nInput Boot Source: %s\n", boot_src_string[pblimg.boot_src]);
+	printf("Input RCW File: %s\n", pblimg.rcw_nm);
+	printf("Input BL2 Binary File: %s\n", pblimg.sec_imgnm);
+	printf("Input load address for BL2 Binary File: 0x%x\n", pblimg.addr);
+
+	printf("Chassis Type: %d\n", pblimg.chassis);
+	switch (pblimg.chassis) {
+	case CHASSIS_2:
+		if (fread(&word, sizeof(word), NUM_MEM_BLOCK, fp_rcw_pbi_ip)
+			!= NUM_MEM_BLOCK) {
+			printf("%s: Error in reading word from the rcw file.\n",
+				__func__);
+			goto exit_main;
+		}
+		while (BYTE_SWAP_32(word) != 0x08610040) {
+			if (BYTE_SWAP_32(word) == 0x09550000
+				|| BYTE_SWAP_32(word) == 0x000f400c) {
+				break;
+			}
+			if (fwrite(&word, sizeof(word), NUM_MEM_BLOCK,
+				fp_rcw_pbi_op) != NUM_MEM_BLOCK) {
+				printf("%s: [CH2] Error in Writing PBI Words\n",
+				__func__);
+				goto exit_main;
+			}
+			if (fread(&word, sizeof(word), NUM_MEM_BLOCK,
+				fp_rcw_pbi_ip) != NUM_MEM_BLOCK) {
+				printf("%s: [CH2] Error in Reading PBI Words\n",
+					__func__);
+				goto exit_main;
+			}
+		}
+
+		if (bootptr_flag == true) {
+			/* Add command to set boot_loc ptr */
+			ret = add_boot_ptr_cmd(fp_rcw_pbi_op);
+			if (ret != SUCCESS) {
+				goto exit_main;
+			}
+		}
+
+		/* Write acs write commands to output file */
+		ret = add_cpy_cmd(fp_rcw_pbi_op);
+		if (ret != SUCCESS) {
+			goto exit_main;
+		}
+
+		/* Add stop command after adding pbi commands
+		 * For Chasis 2.0 platforms it is always CRC &
+		 * Stop command
+		 */
+		flag_stop_cmd = CRC_STOP_COMMAND;
+		ret = add_pbi_stop_cmd(fp_rcw_pbi_op, flag_stop_cmd);
+		if (ret != SUCCESS) {
+			goto exit_main;
+		}
+
+	break;
+
+	case CHASSIS_3:
+	case CHASSIS_3_2:
+		if (fread(&word, sizeof(word), NUM_MEM_BLOCK, fp_rcw_pbi_ip)
+			!= NUM_MEM_BLOCK) {
+			printf("%s: Error reading PBI Cmd.\n", __func__);
+			goto exit_main;
+		}
+		while (word != 0x808f0000 && word != 0x80ff0000) {
+			pbl_size++;
+			/* 11th words in RCW has PBL length. Update it
+			 * with new length. 2 comamnds get added
+			 * Block copy + CCSR Write/CSF header write
+			 */
+			if (pbl_size == 11) {
+				word_1 = (word & PBI_LEN_MASK)
+					+ (PBI_LEN_ADD << 20);
+				word = word & ~PBI_LEN_MASK;
+				word = word | word_1;
+			}
+			/* Update the CRC command */
+			/* Check load command..
+			 * add a check if command is Stop with CRC
+			 * or stop without checksum
+			 */
+			if (pbl_size == 35) {
+				word = crypto_calculate_checksum(fp_rcw_pbi_op,
+						NUM_RCW_WORD - 1);
+				if (word == FAILURE) {
+					goto exit_main;
+				}
+			}
+			if (fwrite(&word, sizeof(word),	NUM_MEM_BLOCK,
+				fp_rcw_pbi_op) != NUM_MEM_BLOCK) {
+				printf("%s: [CH3] Error in Writing PBI Words\n",
+					__func__);
+				goto exit_main;
+			}
+			if (fread(&word, sizeof(word), NUM_MEM_BLOCK,
+				fp_rcw_pbi_ip) != NUM_MEM_BLOCK) {
+				printf("%s: [CH3] Error in Reading PBI Words\n",
+					 __func__);
+				goto exit_main;
+			}
+
+			if (word == CRC_STOP_CMD_ARM_CH3) {
+				flag_stop_cmd = CRC_STOP_COMMAND;
+			} else if (word == STOP_CMD_ARM_CH3) {
+				flag_stop_cmd = STOP_COMMAND;
+			}
+		}
+		if (bootptr_flag == true) {
+			/* Add command to set boot_loc ptr */
+			ret = add_boot_ptr_cmd(fp_rcw_pbi_op);
+			if (ret != SUCCESS) {
+				printf("%s: add_boot_ptr_cmd return failure.\n",
+					__func__);
+				goto exit_main;
+			}
+		}
+
+		/* Write acs write commands to output file */
+		ret = add_blk_cpy_cmd(fp_rcw_pbi_op, args);
+		if (ret != SUCCESS) {
+			printf("%s: Function add_blk_cpy_cmd return failure.\n",
+				 __func__);
+			goto exit_main;
+		}
+
+		/* Add stop command after adding pbi commands */
+		ret = add_pbi_stop_cmd(fp_rcw_pbi_op, flag_stop_cmd);
+		if (ret != SUCCESS) {
+			goto exit_main;
+		}
+
+	break;
+
+	default:
+		printf("%s: Unknown chassis type.\n",
+				__func__);
+	}
+
+	if (ret == SUCCESS) {
+		printf("Output file successfully created with name: %s\n\n",
+			   pblimg.imagefile);
+	}
+
+exit_main:
+	if (fp_rcw_pbi_op != NULL) {
+		fclose(fp_rcw_pbi_op);
+	}
+	if (fp_rcw_pbi_ip != NULL) {
+		fclose(fp_rcw_pbi_ip);
+	}
+
+	return ret;
+}
diff --git a/tools/nxp/create_pbl/create_pbl.mk b/tools/nxp/create_pbl/create_pbl.mk
new file mode 100644
index 0000000..b68882e
--- /dev/null
+++ b/tools/nxp/create_pbl/create_pbl.mk
@@ -0,0 +1,52 @@
+#
+# Copyright 2018-2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+
+CREATE_PBL	?=	${CREATE_PBL_TOOL_PATH}/create_pbl${BIN_EXT}
+BYTE_SWAP	?=	${CREATE_PBL_PLAT_TOOL_PATH}/byte_swap${BIN_EXT}
+
+HOST_GCC	:= gcc
+
+#SWAP is required for Chassis 2 platforms - LS102, ls1043 and ls1046 for QSPI
+ifeq (${SOC},ls1046a)
+SOC_NUM :=	1046a
+SWAP	= 	1
+CH	=	2
+else ifeq (${SOC},ls1043a)
+SOC_NUM :=	1043a
+SWAP	= 	1
+CH	=	2
+else ifeq (${SOC},ls1012a)
+SOC_NUM :=	1012a
+SWAP	= 	1
+CH	=	2
+else ifeq (${SOC},ls1088a)
+SOC_NUM :=	1088a
+CH	=	3
+else ifeq (${SOC},ls2088a)
+SOC_NUM :=	2088a
+CH	=	3
+else ifeq (${SOC},lx2160a)
+SOC_NUM :=	2160a
+CH	=	3
+else ifeq (${SOC},ls1028a)
+SOC_NUM :=	1028a
+CH	=	3
+else
+$(error "Check SOC Not defined in create_pbl.mk.")
+endif
+
+ifeq (${CH},2)
+
+include ${CREATE_PBL_TOOL_PATH}/pbl_ch2.mk
+
+endif #CH2
+
+ifeq (${CH},3)
+
+include ${CREATE_PBL_TOOL_PATH}/pbl_ch3.mk
+
+endif #CH3
diff --git a/tools/nxp/create_pbl/pbl_ch2.mk b/tools/nxp/create_pbl/pbl_ch2.mk
new file mode 100644
index 0000000..e6f1d8b
--- /dev/null
+++ b/tools/nxp/create_pbl/pbl_ch2.mk
@@ -0,0 +1,60 @@
+#
+# Copyright 2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+
+CREATE_PBL	?=	${CREATE_PBL_TOOL_PATH}/create_pbl${BIN_EXT}
+BYTE_SWAP	?=	${CREATE_PBL_TOOL_PATH}/byte_swap${BIN_EXT}
+
+HOST_GCC	:= gcc
+
+.PHONY: pbl
+pbl:	${BUILD_PLAT}/bl2.bin
+ifeq ($(SECURE_BOOT),yes)
+pbl: ${BUILD_PLAT}/bl2.bin
+ifeq ($(RCW),"")
+	${Q}echo "Platform ${PLAT} requires rcw file. Please set RCW to point to the right RCW file for boot mode ${BOOT_MODE}"
+else
+	# Generate header for bl2.bin
+	$(Q)$(CST_DIR)/create_hdr_isbc --in ${BUILD_PLAT}/bl2.bin --out ${BUILD_PLAT}/hdr_bl2 ${BL2_INPUT_FILE}
+	# Compile create_pbl tool
+	${Q}${MAKE} CPPFLAGS="-DVERSION='\"${VERSION_STRING}\"'" --no-print-directory -C ${CREATE_PBL_TOOL_PATH};\
+	# Add bl2.bin to RCW
+	${CREATE_PBL} -r ${RCW} -i ${BUILD_PLAT}/bl2.bin -b ${BOOT_MODE} -c ${SOC_NUM} -d ${BL2_BASE} -e ${BL2_BASE}\
+			-o ${BUILD_PLAT}/bl2_${BOOT_MODE}.pbl ;\
+	# Add header to RCW
+	${CREATE_PBL} -r ${BUILD_PLAT}/bl2_${BOOT_MODE}.pbl -i ${BUILD_PLAT}/hdr_bl2 -b ${BOOT_MODE} -c ${SOC_NUM} \
+			-d ${BL2_HDR_LOC} -e ${BL2_HDR_LOC} -o ${BUILD_PLAT}/bl2_${BOOT_MODE}_sec.pbl -s;\
+	rm ${BUILD_PLAT}/bl2_${BOOT_MODE}.pbl
+# Swapping of RCW is required for QSPi Chassis 2 devices
+ifeq (${BOOT_MODE}, qspi)
+ifeq ($(SWAP),1)
+	${Q}echo "Byteswapping RCW for QSPI"
+	${BYTE_SWAP} ${BUILD_PLAT}/bl2_${BOOT_MODE}_sec.pbl;
+endif # SWAP
+endif # BOOT_MODE
+	cd ${CREATE_PBL_TOOL_PATH}; ${MAKE} clean ; cd -;
+endif
+else  # NON SECURE_BOOT
+ifeq ($(RCW),"")
+	${Q}echo "Platform ${PLAT} requires rcw file. Please set RCW to point to the right RCW file for boot mode ${BOOT_MODE}"
+else
+	# -a option appends the image for Chassis 3 devices in case of non secure boot
+	${Q}${MAKE} CPPFLAGS="-DVERSION='\"${VERSION_STRING}\"'" --no-print-directory -C ${CREATE_PBL_TOOL_PATH};
+	${CREATE_PBL} -r ${RCW} -i ${BUILD_PLAT}/bl2.bin -b ${BOOT_MODE} -c ${SOC_NUM} -d ${BL2_BASE} -e ${BL2_BASE} \
+	-o ${BUILD_PLAT}/bl2_${BOOT_MODE}.pbl ;
+# Swapping of RCW is required for QSPi Chassis 2 devices
+ifeq (${BOOT_MODE}, qspi)
+ifeq ($(SWAP),1)
+	${Q}echo "Byteswapping RCW for QSPI"
+	${BYTE_SWAP} ${BUILD_PLAT}/bl2_${BOOT_MODE}.pbl;
+endif # SWAP
+endif # BOOT_MODE
+	cd ${CREATE_PBL_TOOL_PATH}; ${MAKE} clean ; cd -;
+endif
+endif # SECURE_BOOT
+
+
+
diff --git a/tools/nxp/create_pbl/pbl_ch3.mk b/tools/nxp/create_pbl/pbl_ch3.mk
new file mode 100644
index 0000000..e9dbfb0
--- /dev/null
+++ b/tools/nxp/create_pbl/pbl_ch3.mk
@@ -0,0 +1,71 @@
+#
+# Copyright 2018-2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+SHELL=/bin/bash
+
+CREATE_PBL	?=	${CREATE_PBL_TOOL_PATH}/create_pbl${BIN_EXT}
+BYTE_SWAP	?=	${CREATE_PBL_TOOL_PATH}/byte_swap${BIN_EXT}
+
+HOST_GCC	:= gcc
+
+BL2_SRC_OFFSET ?= 0x9000
+BL2_HDR_SRC_OFFSET ?= 0x5000
+bl2_hdr_loc=$(shell echo $$(( $(BL2_HDR_SRC_OFFSET) / 1024 )))
+bl2_loc=$(shell echo $$(( $(BL2_SRC_OFFSET) / 1024 )))
+
+.PHONY: pbl
+pbl:	${BUILD_PLAT}/bl2.bin
+ifeq ($(SECURE_BOOT),yes)
+pbl: ${BUILD_PLAT}/bl2.bin
+ifeq ($(RCW),"")
+	${Q}echo "Platform ${PLAT} requires rcw file. Please set RCW to point to the right RCW file for boot mode ${BOOT_MODE}"
+else
+	# Generate header for bl2.bin
+	$(Q)$(CST_DIR)/create_hdr_isbc --in ${BUILD_PLAT}/bl2.bin --out ${BUILD_PLAT}/hdr_bl2 ${BL2_INPUT_FILE}
+
+	# Compile create_pbl tool
+	${Q}${MAKE} CPPFLAGS="-DVERSION='\"${VERSION_STRING}\"'" --no-print-directory -C ${CREATE_PBL_TOOL_PATH};\
+
+	# Add Block Copy command for bl2.bin to RCW
+	${CREATE_PBL} -r ${RCW} -i ${BUILD_PLAT}/bl2.bin -b ${BOOT_MODE} -c ${SOC_NUM} -d ${BL2_BASE} -e ${BL2_BASE}\
+			-o ${BUILD_PLAT}/bl2_${BOOT_MODE}.pbl -f ${BL2_SRC_OFFSET};\
+
+	# Add Block Copy command and Load CSF header command to RCW
+	${CREATE_PBL} -r ${BUILD_PLAT}/bl2_${BOOT_MODE}.pbl -i ${BUILD_PLAT}/hdr_bl2 -b ${BOOT_MODE} -c ${SOC_NUM} \
+			-d ${BL2_HDR_LOC} -e ${BL2_HDR_LOC} -s -f ${BL2_HDR_SRC_OFFSET}	\
+			-o ${BUILD_PLAT}/rcw_sec.pbl
+
+	# Sign and add "Load Security Header command to PBI commands
+	$(Q)$(CST_DIR)/create_hdr_pbi --out ${BUILD_PLAT}/bl2_${BOOT_MODE}_sec.pbl --in ${BUILD_PLAT}/rcw_sec.pbl ${PBI_INPUT_FILE}
+
+	# Append the bl2_hdr to the RCW image
+	@echo "${bl2_hdr_loc}"
+	dd if=${BUILD_PLAT}/hdr_bl2 of=${BUILD_PLAT}/bl2_${BOOT_MODE}_sec.pbl bs=1K seek=${bl2_hdr_loc}
+
+	# Append the bl2.bin to the RCW image
+	@echo "${bl2_loc}"
+	dd if=${BUILD_PLAT}/bl2.bin of=${BUILD_PLAT}/bl2_${BOOT_MODE}_sec.pbl bs=1K seek=${bl2_loc}
+
+	rm ${BUILD_PLAT}/bl2_${BOOT_MODE}.pbl
+	cd ${CREATE_PBL_TOOL_PATH}; ${MAKE} clean ; cd -;
+endif
+else  #SECURE_BOOT
+ifeq ($(RCW),"")
+	${Q}echo "Platform ${PLAT} requires rcw file. Please set RCW to point to the right RCW file for boot mode ${BOOT_MODE}"
+else
+	${Q}${MAKE} CPPFLAGS="-DVERSION='\"${VERSION_STRING}\"'" --no-print-directory -C ${CREATE_PBL_TOOL_PATH};
+
+	# Add Block Copy command and populate boot loc ptrfor bl2.bin to RCW
+	${CREATE_PBL} -r ${RCW} -i ${BUILD_PLAT}/bl2.bin -b ${BOOT_MODE} -c ${SOC_NUM} -d ${BL2_BASE} -e ${BL2_BASE} \
+	-o ${BUILD_PLAT}/bl2_${BOOT_MODE}.pbl -f ${BL2_SRC_OFFSET};
+
+	# Append the bl2.bin to the RCW image
+	@echo "bl2_loc is ${bl2_offset}"
+	dd if=${BUILD_PLAT}/bl2.bin of=${BUILD_PLAT}/bl2_${BOOT_MODE}.pbl bs=1K seek=${bl2_loc}
+
+	cd ${CREATE_PBL_TOOL_PATH}; ${MAKE} clean ; cd -;
+endif
+endif # SECURE_BOOT
diff --git a/tools/nxp/plat_fiptool/plat_def_uuid_config.c b/tools/nxp/plat_fiptool/plat_def_uuid_config.c
new file mode 100644
index 0000000..fdb4b93
--- /dev/null
+++ b/tools/nxp/plat_fiptool/plat_def_uuid_config.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+
+#include <firmware_image_package.h>
+
+#include "tbbr_config.h"
+
+toc_entry_t plat_def_toc_entries[] = {
+	/* DDR PHY firmwares */
+	{
+		.name = "DDR UDIMM PHY IMEM 1d FW",
+		.uuid = UUID_DDR_IMEM_UDIMM_1D,
+		.cmdline_name = "ddr-immem-udimm-1d"
+	},
+	{
+		.name = "DDR UDIMM PHY IMEM 2d FW",
+		.uuid = UUID_DDR_IMEM_UDIMM_2D,
+		.cmdline_name = "ddr-immem-udimm-2d"
+	},
+	{
+		.name = "DDR UDIMM PHY DMEM 1d FW",
+		.uuid = UUID_DDR_DMEM_UDIMM_1D,
+		.cmdline_name = "ddr-dmmem-udimm-1d"
+	},
+	{
+		.name = "DDR UDIMM PHY DMEM 2d FW",
+		.uuid = UUID_DDR_DMEM_UDIMM_2D,
+		.cmdline_name = "ddr-dmmem-udimm-2d"
+	},
+	{
+		.name = "DDR RDIMM PHY IMEM 1d FW",
+		.uuid = UUID_DDR_IMEM_RDIMM_1D,
+		.cmdline_name = "ddr-immem-rdimm-1d"
+	},
+	{
+		.name = "DDR RDIMM PHY IMEM 2d FW",
+		.uuid = UUID_DDR_IMEM_RDIMM_2D,
+		.cmdline_name = "ddr-immem-rdimm-2d"
+	},
+	{
+		.name = "DDR RDIMM PHY DMEM 1d FW",
+		.uuid = UUID_DDR_DMEM_RDIMM_1D,
+		.cmdline_name = "ddr-dmmem-rdimm-1d"
+	},
+	{
+		.name = "DDR RDIMM PHY DMEM 2d FW",
+		.uuid = UUID_DDR_DMEM_RDIMM_2D,
+		.cmdline_name = "ddr-dmmem-rdimm-2d"
+	},
+	{
+		.name = "FUSE PROV FW",
+		.uuid = UUID_FUSE_PROV,
+		.cmdline_name = "fuse-prov"
+	},
+	{
+		.name = "FUSE UPGRADE FW",
+		.uuid = UUID_FUSE_UP,
+		.cmdline_name = "fuse-upgrade"
+	},
+
+	/* Key Certificates */
+	{
+		.name = "DDR Firmware key certificate",
+		.uuid = UUID_DDR_FW_KEY_CERT,
+		.cmdline_name = "ddr-fw-key-cert"
+	},
+
+	/* Content certificates */
+	{
+		.name = "DDR UDIMM Firmware content certificate",
+		.uuid = UUID_DDR_UDIMM_FW_CONTENT_CERT,
+		.cmdline_name = "ddr-udimm-fw-cert"
+	},
+	{
+		.name = "DDR RDIMM Firmware content certificate",
+		.uuid = UUID_DDR_RDIMM_FW_CONTENT_CERT,
+		.cmdline_name = "ddr-rdimm-fw-cert"
+	},
+
+	{
+		.name = NULL,
+		.uuid = { {0} },
+		.cmdline_name = NULL,
+	}
+};
diff --git a/tools/nxp/plat_fiptool/plat_fiptool.mk b/tools/nxp/plat_fiptool/plat_fiptool.mk
new file mode 100644
index 0000000..ca2962a
--- /dev/null
+++ b/tools/nxp/plat_fiptool/plat_fiptool.mk
@@ -0,0 +1,33 @@
+#
+# Copyright (c) 2021, NXP. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Name of the platform defined source file name,
+# which contains platform defined UUID entries populated
+# in the plat_def_toc_entries[].
+PLAT_DEF_UUID_CONFIG_FILE_NAME	:= plat_def_uuid_config
+
+PLAT_DEF_UUID_CONFIG_FILE_PATH := ../nxp/plat_fiptool
+
+PLAT_DEF_OID := yes
+PLAT_DEF_UUID := yes
+PLAT_DEF_UUID_OID_CONFIG_PATH := ../../plat/nxp/common/fip_handler/common
+
+
+INCLUDE_PATHS += -I${PLAT_DEF_UUID_OID_CONFIG_PATH} \
+		 -I./
+# Clean the stale object file.
+$(shell rm ${PLAT_DEF_UUID_CONFIG_FILE_PATH}/${PLAT_DEF_UUID_CONFIG_FILE_NAME}.o)
+
+ifeq (${PLAT_DEF_OID},yes)
+HOSTCCFLAGS += -DPLAT_DEF_OID
+endif
+
+ifeq (${PLAT_DEF_UUID},yes)
+HOSTCCFLAGS += -DPLAT_DEF_FIP_UUID
+PLAT_OBJECTS += ${PLAT_DEF_UUID_CONFIG_FILE_PATH}/${PLAT_DEF_UUID_CONFIG_FILE_NAME}.o
+endif
+
+OBJECTS += ${PLAT_OBJECTS}