Merge pull request #1882 from ambroise-arm/av/a15-errata

Apply workarounds for errata of Cortex-A15
diff --git a/.checkpatch.conf b/.checkpatch.conf
index 50ab716..2a53961 100644
--- a/.checkpatch.conf
+++ b/.checkpatch.conf
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -69,3 +69,23 @@
 # "Use of volatile is usually wrong: see Documentation/volatile-considered-harmful.txt"
 # We allow the usage of the volatile keyword in TF.
 --ignore VOLATILE
+
+# BRACES reports this kind of messages:
+# braces {} are not necessary for any arm of this statement
+--ignore BRACES
+
+# PREFER_KERNEL_TYPES reports this kind of messages (when using --strict):
+# "Prefer kernel type 'u32' over 'uint32_t'"
+--ignore PREFER_KERNEL_TYPES
+
+# USLEEP_RANGE reports this kind of messages (when using --strict):
+# "usleep_range is preferred over udelay; see Documentation/timers/timers-howto.txt"
+--ignore USLEEP_RANGE
+
+# COMPARISON_TO_NULL reports this kind of messages (when using --strict):
+# Comparison to NULL could be written ""
+--ignore COMPARISON_TO_NULL
+
+# UNNECESSARY_PARENTHESES reports this kind of messages (when using --strict):
+# Unnecessary parentheses around ""
+--ignore UNNECESSARY_PARENTHESES
diff --git a/Makefile b/Makefile
index 6386bef..e39b353 100644
--- a/Makefile
+++ b/Makefile
@@ -371,7 +371,7 @@
 
 ifeq ($(ENABLE_PIE),1)
     TF_CFLAGS		+=	-fpie
-    TF_LDFLAGS		+=	-pie
+    TF_LDFLAGS		+=	-pie --no-dynamic-linker
 else
     PIE_FOUND		:=	$(findstring --enable-default-pie,${GCC_V_OUTPUT})
     ifneq ($(PIE_FOUND),)
@@ -841,13 +841,18 @@
 
 checkpatch:		locate-checkpatch
 	@echo "  CHECKING STYLE"
+	@if test -n "${CHECKPATCH_OPTS}"; then				\
+		echo "    with ${CHECKPATCH_OPTS} option(s)";		\
+	fi
 	${Q}COMMON_COMMIT=$$(git merge-base HEAD ${BASE_COMMIT});	\
 	for commit in `git rev-list $$COMMON_COMMIT..HEAD`; do		\
 		printf "\n[*] Checking style of '$$commit'\n\n";	\
 		git log --format=email "$$commit~..$$commit"		\
-			-- ${CHECK_PATHS} | ${CHECKPATCH} - || true;	\
+			-- ${CHECK_PATHS} |				\
+			${CHECKPATCH} ${CHECKPATCH_OPTS} - || true;	\
 		git diff --format=email "$$commit~..$$commit"		\
-			-- ${CHECK_PATHS} | ${CHECKPATCH} - || true;	\
+			-- ${CHECK_PATHS} |				\
+			${CHECKPATCH}  ${CHECKPATCH_OPTS} - || true;	\
 	done
 
 certtool: ${CRTTOOL}
diff --git a/bl1/aarch32/bl1_exceptions.S b/bl1/aarch32/bl1_exceptions.S
index 6728278..f2af9ab 100644
--- a/bl1/aarch32/bl1_exceptions.S
+++ b/bl1/aarch32/bl1_exceptions.S
@@ -71,7 +71,7 @@
 	 */
 	ldr	lr, [r8, #ENTRY_POINT_INFO_PC_OFFSET]
 	ldr	r1, [r8, #(ENTRY_POINT_INFO_PC_OFFSET + 4)]
-	msr	spsr, r1
+	msr	spsr_xc, r1
 
 	/* Some BL32 stages expect lr_svc to provide the BL33 entry address */
 	cps	#MODE32_svc
diff --git a/bl2/aarch32/bl2_el3_entrypoint.S b/bl2/aarch32/bl2_el3_entrypoint.S
index 35da133..9b4da6b 100644
--- a/bl2/aarch32/bl2_el3_entrypoint.S
+++ b/bl2/aarch32/bl2_el3_entrypoint.S
@@ -78,7 +78,7 @@
 	 */
 	ldr	lr, [r8, #ENTRY_POINT_INFO_PC_OFFSET]
 	ldr	r1, [r8, #(ENTRY_POINT_INFO_PC_OFFSET + 4)]
-	msr	spsr, r1
+	msr	spsr_xc, r1
 
 	/* Some BL32 stages expect lr_svc to provide the BL33 entry address */
 	cps	#MODE32_svc
diff --git a/bl2/aarch32/bl2_entrypoint.S b/bl2/aarch32/bl2_entrypoint.S
index 23d1513..102fd2f 100644
--- a/bl2/aarch32/bl2_entrypoint.S
+++ b/bl2/aarch32/bl2_entrypoint.S
@@ -42,12 +42,13 @@
 	stcopr	r0, VBAR
 	isb
 
-	/* -----------------------------------------------------
-	 * Enable the instruction cache
-	 * -----------------------------------------------------
+	/* --------------------------------------------------------
+	 * Enable the instruction cache - disable speculative loads
+	 * --------------------------------------------------------
 	 */
 	ldcopr	r0, SCTLR
 	orr	r0, r0, #SCTLR_I_BIT
+	bic	r0, r0, #SCTLR_DSSBS_BIT
 	stcopr	r0, SCTLR
 	isb
 
diff --git a/bl2/aarch64/bl2_entrypoint.S b/bl2/aarch64/bl2_entrypoint.S
index 611b807..c820cd1 100644
--- a/bl2/aarch64/bl2_entrypoint.S
+++ b/bl2/aarch64/bl2_entrypoint.S
@@ -41,12 +41,14 @@
 
 	/* ---------------------------------------------
 	 * Enable the instruction cache, stack pointer
-	 * and data access alignment checks
+	 * and data access alignment checks and disable
+	 * speculative loads.
 	 * ---------------------------------------------
 	 */
 	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
 	mrs	x0, sctlr_el1
 	orr	x0, x0, x1
+	bic	x0, x0, #SCTLR_DSSBS_BIT
 	msr	sctlr_el1, x0
 	isb
 
diff --git a/bl2u/aarch32/bl2u_entrypoint.S b/bl2u/aarch32/bl2u_entrypoint.S
index 67566df..6391f53 100644
--- a/bl2u/aarch32/bl2u_entrypoint.S
+++ b/bl2u/aarch32/bl2u_entrypoint.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -41,12 +41,13 @@
 	stcopr	r0, VBAR
 	isb
 
-	/* -----------------------------------------------------
-	 * Enable the instruction cache
-	 * -----------------------------------------------------
+	/* --------------------------------------------------------
+	 * Enable the instruction cache - disable speculative loads
+	 * --------------------------------------------------------
 	 */
 	ldcopr	r0, SCTLR
 	orr	r0, r0, #SCTLR_I_BIT
+	bic	r0, r0, #SCTLR_DSSBS_BIT
 	stcopr	r0, SCTLR
 	isb
 
diff --git a/bl2u/aarch64/bl2u_entrypoint.S b/bl2u/aarch64/bl2u_entrypoint.S
index 591f5f6..452869e 100644
--- a/bl2u/aarch64/bl2u_entrypoint.S
+++ b/bl2u/aarch64/bl2u_entrypoint.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -38,12 +38,14 @@
 
 	/* ---------------------------------------------
 	 * Enable the instruction cache, stack pointer
-	 * and data access alignment checks
+	 * and data access alignment checks and disable
+	 * speculative loads.
 	 * ---------------------------------------------
 	 */
 	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
 	mrs	x0, sctlr_el1
 	orr	x0, x0, x1
+	bic	x0, x0, #SCTLR_DSSBS_BIT
 	msr	sctlr_el1, x0
 	isb
 
diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S
index 710b458..cd08ce7 100644
--- a/bl32/tsp/aarch64/tsp_entrypoint.S
+++ b/bl32/tsp/aarch64/tsp_entrypoint.S
@@ -63,12 +63,14 @@
 
 	/* ---------------------------------------------
 	 * Enable the instruction cache, stack pointer
-	 * and data access alignment checks
+	 * and data access alignment checks and disable
+	 * speculative loads.
 	 * ---------------------------------------------
 	 */
 	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
 	mrs	x0, sctlr_el1
 	orr	x0, x0, x1
+	bic	x0, x0, #SCTLR_DSSBS_BIT
 	msr	sctlr_el1, x0
 	isb
 
diff --git a/drivers/arm/css/scmi/scmi_private.h b/drivers/arm/css/scmi/scmi_private.h
index 6530573..61437f6 100644
--- a/drivers/arm/css/scmi/scmi_private.h
+++ b/drivers/arm/css/scmi/scmi_private.h
@@ -152,4 +152,9 @@
 	assert(ch->info && ch->info->scmi_mbx_mem);
 }
 
+/*
+ * SCMI vendor specific protocol
+ */
+#define SCMI_SYS_VENDOR_EXT_PROTO_ID		0x80
+
 #endif /* SCMI_PRIVATE_H */
diff --git a/drivers/arm/css/scmi/vendor/scmi_sq.c b/drivers/arm/css/scmi/vendor/scmi_sq.c
new file mode 100644
index 0000000..2ae7ca1
--- /dev/null
+++ b/drivers/arm/css/scmi/vendor/scmi_sq.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/css/scmi.h>
+
+#include "scmi_private.h"
+#include "scmi_sq.h"
+
+#include <sq_common.h>
+
+/* SCMI messge ID to get the available DRAM region */
+#define SCMI_VENDOR_EXT_MEMINFO_GET_MSG		0x3
+
+/*
+ * API to get the available DRAM region
+ */
+int scmi_get_draminfo(void *p, struct draminfo *info)
+{
+	mailbox_mem_t *mbx_mem;
+	int token = 0, ret;
+	scmi_channel_t *ch = (scmi_channel_t *)p;
+	struct dram_info_resp response;
+
+	validate_scmi_channel(ch);
+
+	scmi_get_channel(ch);
+
+	mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+	mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_SYS_VENDOR_EXT_PROTO_ID,
+			SCMI_VENDOR_EXT_MEMINFO_GET_MSG, token);
+	mbx_mem->len = 8;
+	mbx_mem->flags = SCMI_FLAG_RESP_POLL;
+
+	scmi_send_sync_command(ch);
+
+	/*
+	 * Ensure that any read to the SCPI payload area is done after reading
+	 * the MHU register. If these 2 reads were reordered then the CPU would
+	 * read invalid payload data
+	 */
+	dmbld();
+
+	/* Get the return values */
+	SCMI_PAYLOAD_RET_VAL1(mbx_mem->payload, ret);
+
+	memcpy(&response, (void *)mbx_mem->payload, sizeof(response));
+
+	scmi_put_channel(ch);
+
+	*info = response.info;
+
+	return ret;
+}
diff --git a/drivers/arm/css/scmi/vendor/scmi_sq.h b/drivers/arm/css/scmi/vendor/scmi_sq.h
new file mode 100644
index 0000000..aee1a3a
--- /dev/null
+++ b/drivers/arm/css/scmi/vendor/scmi_sq.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SCMI_SQ_H
+#define SCMI_SQ_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <sq_common.h>
+
+/* Structure to represent available DRAM region */
+struct dram_info_resp {
+	int status;
+	int reserved;
+	struct draminfo info;
+};
+
+/* API to get the available DRAM region */
+int scmi_get_draminfo(void *p, struct draminfo *info);
+
+#endif /* SCMI_SQ_H */
diff --git a/drivers/intel/soc/stratix10/io/s10_memmap_qspi.c b/drivers/intel/soc/stratix10/io/s10_memmap_qspi.c
new file mode 100644
index 0000000..7d7d55f
--- /dev/null
+++ b/drivers/intel/soc/stratix10/io/s10_memmap_qspi.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <platform_def.h>
+
+#include <common/debug.h>
+#include <drivers/io/io_driver.h>
+#include <drivers/io/io_memmap.h>
+#include <drivers/io/io_storage.h>
+#include <lib/utils.h>
+
+#include "drivers/qspi/cadence_qspi.h"
+
+/* As we need to be able to keep state for seek, only one file can be open
+ * at a time. Make this a structure and point to the entity->info. When we
+ * can malloc memory we can change this to support more open files.
+ */
+typedef struct {
+	/* Use the 'in_use' flag as any value for base and file_pos could be
+	 * valid.
+	 */
+	int		in_use;
+	uintptr_t	base;
+	size_t		file_pos;
+	size_t		size;
+} file_state_t;
+
+static file_state_t current_file = {0};
+
+/* Identify the device type as memmap */
+static io_type_t device_type_memmap(void)
+{
+	return IO_TYPE_MEMMAP;
+}
+
+/* Memmap device functions */
+static int memmap_dev_open(const uintptr_t dev_spec, io_dev_info_t **dev_info);
+static int memmap_block_open(io_dev_info_t *dev_info, const uintptr_t spec,
+			     io_entity_t *entity);
+static int memmap_block_seek(io_entity_t *entity, int mode,
+			     ssize_t offset);
+static int memmap_block_len(io_entity_t *entity, size_t *length);
+static int memmap_block_read(io_entity_t *entity, uintptr_t buffer,
+			     size_t length, size_t *length_read);
+static int memmap_block_write(io_entity_t *entity, const uintptr_t buffer,
+			      size_t length, size_t *length_written);
+static int memmap_block_close(io_entity_t *entity);
+static int memmap_dev_close(io_dev_info_t *dev_info);
+
+
+static const io_dev_connector_t memmap_dev_connector = {
+	.dev_open = memmap_dev_open
+};
+
+
+static const io_dev_funcs_t memmap_dev_funcs = {
+	.type = device_type_memmap,
+	.open = memmap_block_open,
+	.seek = memmap_block_seek,
+	.size = memmap_block_len,
+	.read = memmap_block_read,
+	.write = memmap_block_write,
+	.close = memmap_block_close,
+	.dev_init = NULL,
+	.dev_close = memmap_dev_close,
+};
+
+
+/* No state associated with this device so structure can be const */
+static const io_dev_info_t memmap_dev_info = {
+	.funcs = &memmap_dev_funcs,
+	.info = (uintptr_t)NULL
+};
+
+
+/* Open a connection to the memmap device */
+static int memmap_dev_open(const uintptr_t dev_spec __unused,
+			   io_dev_info_t **dev_info)
+{
+	assert(dev_info != NULL);
+	*dev_info = (io_dev_info_t *)&memmap_dev_info; /* cast away const */
+
+	return 0;
+}
+
+
+
+/* Close a connection to the memmap device */
+static int memmap_dev_close(io_dev_info_t *dev_info)
+{
+	/* NOP */
+	/* TODO: Consider tracking open files and cleaning them up here */
+	return 0;
+}
+
+
+/* Open a file on the memmap device */
+static int memmap_block_open(io_dev_info_t *dev_info, const uintptr_t spec,
+			     io_entity_t *entity)
+{
+	int result = -ENOMEM;
+	const io_block_spec_t *block_spec = (io_block_spec_t *)spec;
+
+	/* Since we need to track open state for seek() we only allow one open
+	 * spec at a time. When we have dynamic memory we can malloc and set
+	 * entity->info.
+	 */
+	if (current_file.in_use == 0) {
+		assert(block_spec != NULL);
+		assert(entity != NULL);
+
+		current_file.in_use = 1;
+		current_file.base = block_spec->offset;
+		/* File cursor offset for seek and incremental reads etc. */
+		current_file.file_pos = 0;
+		current_file.size = block_spec->length;
+		entity->info = (uintptr_t)&current_file;
+		result = 0;
+	} else {
+		WARN("A Memmap device is already active. Close first.\n");
+	}
+
+	return result;
+}
+
+
+/* Seek to a particular file offset on the memmap device */
+static int memmap_block_seek(io_entity_t *entity, int mode, ssize_t offset)
+{
+	int result = -ENOENT;
+	file_state_t *fp;
+
+	/* We only support IO_SEEK_SET for the moment. */
+	if (mode == IO_SEEK_SET) {
+		assert(entity != NULL);
+
+		fp = (file_state_t *) entity->info;
+
+		/* Assert that new file position is valid */
+		assert((offset >= 0) && (offset < fp->size));
+
+		/* Reset file position */
+		fp->file_pos = offset;
+		result = 0;
+	}
+
+	return result;
+}
+
+
+/* Return the size of a file on the memmap device */
+static int memmap_block_len(io_entity_t *entity, size_t *length)
+{
+	assert(entity != NULL);
+	assert(length != NULL);
+
+	*length = ((file_state_t *)entity->info)->size;
+
+	return 0;
+}
+
+
+/* Read data from a file on the memmap device */
+static int memmap_block_read(io_entity_t *entity, uintptr_t buffer,
+			     size_t length, size_t *length_read)
+{
+	file_state_t *fp;
+	size_t pos_after;
+
+	assert(entity != NULL);
+	assert(length_read != NULL);
+
+	fp = (file_state_t *) entity->info;
+
+	/* Assert that file position is valid for this read operation */
+	pos_after = fp->file_pos + length;
+	assert((pos_after >= fp->file_pos) && (pos_after <= fp->size));
+
+	//memcpy((void *)buffer, (void *)(fp->base + fp->file_pos), length);
+	cad_qspi_read((void *)buffer, fp->base + fp->file_pos, length);
+	*length_read = length;
+
+	/* Set file position after read */
+	fp->file_pos = pos_after;
+
+	return 0;
+}
+
+
+/* Write data to a file on the memmap device */
+static int memmap_block_write(io_entity_t *entity, const uintptr_t buffer,
+			      size_t length, size_t *length_written)
+{
+	file_state_t *fp;
+	size_t pos_after;
+
+	assert(entity != NULL);
+	assert(length_written != NULL);
+
+	fp = (file_state_t *) entity->info;
+
+	/* Assert that file position is valid for this write operation */
+	pos_after = fp->file_pos + length;
+	assert((pos_after >= fp->file_pos) && (pos_after <= fp->size));
+
+	memcpy((void *)(fp->base + fp->file_pos), (void *)buffer, length);
+
+	*length_written = length;
+
+	/* Set file position after write */
+	fp->file_pos = pos_after;
+
+	return 0;
+}
+
+
+/* Close a file on the memmap device */
+static int memmap_block_close(io_entity_t *entity)
+{
+	assert(entity != NULL);
+
+	entity->info = 0;
+
+	/* This would be a mem free() if we had malloc.*/
+	zeromem((void *)&current_file, sizeof(current_file));
+
+	return 0;
+}
+
+
+/* Exported functions */
+
+/* Register the memmap driver with the IO abstraction */
+int register_io_dev_memmap(const io_dev_connector_t **dev_con)
+{
+	int result;
+
+	assert(dev_con != NULL);
+
+	result = io_register_device(&memmap_dev_info);
+	if (result == 0)
+		*dev_con = &memmap_dev_connector;
+
+	return result;
+}
diff --git a/drivers/io/io_fip.c b/drivers/io/io_fip.c
index d4771b5..eaaf090 100644
--- a/drivers/io/io_fip.c
+++ b/drivers/io/io_fip.c
@@ -85,7 +85,6 @@
 }
 
 
-/* TODO: We could check version numbers or do a package checksum? */
 static inline int is_valid_header(fip_toc_header_t *header)
 {
 	if ((header->name == TOC_HEADER_NAME) && (header->serial_number != 0)) {
diff --git a/drivers/io/io_storage.c b/drivers/io/io_storage.c
index c9ff31b..e444f87 100644
--- a/drivers/io/io_storage.c
+++ b/drivers/io/io_storage.c
@@ -189,9 +189,6 @@
 	return result;
 }
 
-
-/* TODO: Consider whether an explicit "shutdown" API should be included */
-
 /* Close a connection to a device */
 int io_dev_close(uintptr_t dev_handle)
 {
diff --git a/drivers/st/mmc/stm32_sdmmc2.c b/drivers/st/mmc/stm32_sdmmc2.c
index 06de112..f453ce9 100644
--- a/drivers/st/mmc/stm32_sdmmc2.c
+++ b/drivers/st/mmc/stm32_sdmmc2.c
@@ -723,6 +723,7 @@
 	mdelay(1);
 
 	sdmmc2_params.clk_rate = stm32mp_clk_get_rate(sdmmc2_params.clock_id);
+	sdmmc2_params.device_info->ocr_voltage = OCR_3_2_3_3 | OCR_3_3_3_4;
 
 	return mmc_init(&stm32_sdmmc2_ops, sdmmc2_params.clk_rate,
 			sdmmc2_params.bus_width, sdmmc2_params.flags,
diff --git a/drivers/synopsys/emmc/dw_mmc.c b/drivers/synopsys/emmc/dw_mmc.c
index 0c5c645..4cd1226 100644
--- a/drivers/synopsys/emmc/dw_mmc.c
+++ b/drivers/synopsys/emmc/dw_mmc.c
@@ -243,6 +243,11 @@
 		op = CMD_WAIT_PRVDATA_COMPLETE;
 		break;
 	case 8:
+		if (dw_params.mmc_dev_type == MMC_IS_EMMC)
+			op = CMD_DATA_TRANS_EXPECT | CMD_WAIT_PRVDATA_COMPLETE;
+		else
+			op = CMD_WAIT_PRVDATA_COMPLETE;
+		break;
 	case 17:
 	case 18:
 		op = CMD_DATA_TRANS_EXPECT | CMD_WAIT_PRVDATA_COMPLETE;
@@ -252,6 +257,9 @@
 		op = CMD_WRITE | CMD_DATA_TRANS_EXPECT |
 		     CMD_WAIT_PRVDATA_COMPLETE;
 		break;
+	case 51:
+		op = CMD_DATA_TRANS_EXPECT;
+		break;
 	default:
 		op = 0;
 		break;
@@ -337,7 +345,6 @@
 	uintptr_t base;
 
 	assert(((buf & DWMMC_ADDRESS_MASK) == 0) &&
-	       ((size % MMC_BLOCK_SIZE) == 0) &&
 	       (dw_params.desc_size > 0) &&
 	       ((dw_params.reg_base & MMC_BLOCK_MASK) == 0) &&
 	       ((dw_params.desc_base & MMC_BLOCK_MASK) == 0) &&
@@ -352,6 +359,12 @@
 	base = dw_params.reg_base;
 	desc = (struct dw_idmac_desc *)dw_params.desc_base;
 	mmio_write_32(base + DWMMC_BYTCNT, size);
+
+	if (size < MMC_BLOCK_SIZE)
+		mmio_write_32(base + DWMMC_BLKSIZ, size);
+	else
+		mmio_write_32(base + DWMMC_BLKSIZ, MMC_BLOCK_SIZE);
+
 	mmio_write_32(base + DWMMC_RINTSTS, ~0);
 	for (i = 0; i < desc_cnt; i++) {
 		desc[i].des0 = IDMAC_DES0_OWN | IDMAC_DES0_CH | IDMAC_DES0_DIC;
@@ -375,11 +388,22 @@
 	flush_dcache_range(dw_params.desc_base,
 			   desc_cnt * DWMMC_DMA_MAX_BUFFER_SIZE);
 
+
 	return 0;
 }
 
 static int dw_read(int lba, uintptr_t buf, size_t size)
 {
+	uint32_t data = 0;
+	int timeout = TIMEOUT;
+
+	do {
+		data = mmio_read_32(dw_params.reg_base + DWMMC_RINTSTS);
+		udelay(50);
+	} while (!(data & INT_DTO) && timeout-- > 0);
+
+	inv_dcache_range(buf, size);
+
 	return 0;
 }
 
@@ -401,6 +425,9 @@
 		(params->bus_width == MMC_BUS_WIDTH_8)));
 
 	memcpy(&dw_params, params, sizeof(dw_mmc_params_t));
+	mmio_write_32(dw_params.reg_base + DWMMC_FIFOTH, 0x103ff);
 	mmc_init(&dw_mmc_ops, params->clk_rate, params->bus_width,
 		 params->flags, info);
+
+	dw_params.mmc_dev_type = info->mmc_dev_type;
 }
diff --git a/fdts/stm32mp157a-dk1.dts b/fdts/stm32mp157a-dk1.dts
index cf0fe28..68188be 100644
--- a/fdts/stm32mp157a-dk1.dts
+++ b/fdts/stm32mp157a-dk1.dts
@@ -173,6 +173,7 @@
 /* ATF Specific */
 #include <dt-bindings/clock/stm32mp1-clksrc.h>
 #include "stm32mp15-ddr3-1x4Gb-1066-binG.dtsi"
+#include "stm32mp157c-security.dtsi"
 
 / {
 	aliases {
@@ -188,14 +189,6 @@
 		gpio25 = &gpioz;
 		i2c3 = &i2c4;
 	};
-
-	soc {
-		stgen: stgen@5C008000 {
-			compatible = "st,stm32-stgen";
-			reg = <0x5C008000 0x1000>;
-			status = "okay";
-		};
-	};
 };
 
 /* CLOCK init */
diff --git a/fdts/stm32mp157c-ed1.dts b/fdts/stm32mp157c-ed1.dts
index 0fadffb..820e413 100644
--- a/fdts/stm32mp157c-ed1.dts
+++ b/fdts/stm32mp157c-ed1.dts
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
 /*
- * Copyright (C) STMicroelectronics 2017 - All Rights Reserved
+ * Copyright (C) STMicroelectronics 2017-2019 - All Rights Reserved
  * Author: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
  */
 /dts-v1/;
@@ -191,6 +191,7 @@
 /* ATF Specific */
 #include <dt-bindings/clock/stm32mp1-clksrc.h>
 #include "stm32mp15-ddr3-2x4Gb-1066-binG.dtsi"
+#include "stm32mp157c-security.dtsi"
 
 / {
 	aliases {
@@ -208,14 +209,6 @@
 		gpio25 = &gpioz;
 		i2c3 = &i2c4;
 	};
-
-	soc {
-		stgen: stgen@5C008000 {
-			compatible = "st,stm32-stgen";
-			reg = <0x5C008000 0x1000>;
-			status = "okay";
-		};
-	};
 };
 
 /* CLOCK init */
diff --git a/fdts/stm32mp157c-security.dtsi b/fdts/stm32mp157c-security.dtsi
new file mode 100644
index 0000000..fb04e7d
--- /dev/null
+++ b/fdts/stm32mp157c-security.dtsi
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017-2019, STMicroelectronics - All Rights Reserved
+ *
+ * SPDX-License-Identifier:	GPL-2.0+	BSD-3-Clause
+ */
+
+/ {
+	soc {
+		stgen: stgen@5C008000 {
+			compatible = "st,stm32-stgen";
+			reg = <0x5C008000 0x1000>;
+			status = "okay";
+		};
+	};
+};
+
+&bsec {
+	mac_addr: mac_addr@e4 {
+		reg = <0xe4 0x6>;
+		status = "okay";
+		secure-status = "okay";
+	};
+	/* Spare field to align on 32-bit OTP granularity  */
+	spare_ns_ea: spare_ns_ea@ea {
+		reg = <0xea 0x2>;
+		status = "okay";
+		secure-status = "okay";
+	};
+	board_id: board_id@ec {
+		reg = <0xec 0x4>;
+		status = "okay";
+		secure-status = "okay";
+	};
+};
diff --git a/include/drivers/arm/css/css_mhu_doorbell.h b/include/drivers/arm/css/css_mhu_doorbell.h
index ecee563..e6f7a1b 100644
--- a/include/drivers/arm/css/css_mhu_doorbell.h
+++ b/include/drivers/arm/css/css_mhu_doorbell.h
@@ -12,7 +12,7 @@
 #include <lib/mmio.h>
 
 /* MHUv2 Base Address */
-#define MHUV2_BASE_ADDR		PLAT_CSS_MHU_BASE
+#define MHUV2_BASE_ADDR		PLAT_MHUV2_BASE
 
 /* MHUv2 Control Registers Offsets */
 #define MHU_V2_MSG_NO_CAP_OFFSET		0xF80
diff --git a/include/drivers/io/io_storage.h b/include/drivers/io/io_storage.h
index ec6db3f..084c67c 100644
--- a/include/drivers/io/io_storage.h
+++ b/include/drivers/io/io_storage.h
@@ -79,8 +79,6 @@
  * re-initialisation */
 int io_dev_init(uintptr_t dev_handle, const uintptr_t init_params);
 
-/* TODO: Consider whether an explicit "shutdown" API should be included */
-
 /* Close a connection to a device */
 int io_dev_close(uintptr_t dev_handle);
 
diff --git a/include/drivers/synopsys/dw_mmc.h b/include/drivers/synopsys/dw_mmc.h
index 7031e0f..2004355 100644
--- a/include/drivers/synopsys/dw_mmc.h
+++ b/include/drivers/synopsys/dw_mmc.h
@@ -16,6 +16,7 @@
 	int		clk_rate;
 	int		bus_width;
 	unsigned int	flags;
+	enum mmc_device_type	mmc_dev_type;
 } dw_mmc_params_t;
 
 void dw_mmc_init(dw_mmc_params_t *params, struct mmc_device_info *info);
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
index de11583..a24bf90 100644
--- a/lib/aarch64/misc_helpers.S
+++ b/lib/aarch64/misc_helpers.S
@@ -531,8 +531,8 @@
 #endif
 	/*
 	 * Calculate the offset based on return address in x30.
-	 * Assume that this funtion is called within a page of the start of
-	 * of fixup region.
+	 * Assume that this function is called within a page at the start of
+	 * fixup region.
 	 */
 	and	x2, x30, #~(PAGE_SIZE - 1)
 	sub	x0, x2, x6	/* Diff(S) = Current Address - Compiled Address */
@@ -580,13 +580,13 @@
 	 *
 	 * r_offset is address of reference
 	 * r_info is symbol index and type of relocation (in this case
-	 * 0x403 which corresponds to R_AARCH64_RELATIV).
+	 * 0x403 which corresponds to R_AARCH64_RELATIVE).
 	 * r_addend is constant part of expression.
 	 *
 	 * Size of Elf64_Rela structure is 24 bytes.
 	 */
 1:
-	/* Assert that the relocation type is R_AARCH64_RELATIV */
+	/* Assert that the relocation type is R_AARCH64_RELATIVE */
 #if ENABLE_ASSERTIONS
 	ldr	x3, [x1, #8]
 	cmp	x3, #0x403
diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c
index 0e6a6fa..7957b61 100644
--- a/lib/xlat_tables_v2/xlat_tables_core.c
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -325,8 +325,9 @@
 
 	return action;
 }
+
 /*
- * Recursive function that writes to the translation tables and unmaps the
+ * Function that writes to the translation tables and unmaps the
  * specified region.
  */
 static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
@@ -337,70 +338,137 @@
 {
 	assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
 
-	uint64_t *subtable;
-	uint64_t desc;
+	/*
+	 * data structure to track DESC_TABLE entry before iterate into subtable
+	 * of next translation level. it will be used to restore previous level
+	 * after finish subtable iteration.
+	 */
+	struct desc_table_unmap {
+		uint64_t *table_base;
+		uintptr_t table_idx_va;
+		unsigned int idx;
+	} desc_tables[XLAT_TABLE_LEVEL_MAX + 1] = {
+		{NULL, 0U, XLAT_TABLE_ENTRIES}, };
 
+	unsigned int this_level = level;
+	uint64_t *this_base = table_base;
+	unsigned int max_entries = table_entries;
+	size_t level_size = XLAT_BLOCK_SIZE(this_level);
+	unsigned int table_idx;
 	uintptr_t table_idx_va;
-	uintptr_t table_idx_end_va; /* End VA of this entry */
 
 	uintptr_t region_end_va = mm->base_va + mm->size - 1U;
 
-	unsigned int table_idx;
-
 	table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
 	table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
 
-	while (table_idx < table_entries) {
+	while (this_base != NULL) {
 
-		table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1U;
+		uint64_t desc;
+		uint64_t desc_type;
+		uintptr_t table_idx_end_va; /* End VA of this entry */
+		action_t action;
 
-		desc = table_base[table_idx];
-		uint64_t desc_type = desc & DESC_MASK;
+		/* finish current xlat level iteration. */
+		if (table_idx >= max_entries) {
+			if (this_level > ctx->base_level) {
+				xlat_table_dec_regions_count(ctx, this_base);
+			}
 
-		action_t action = xlat_tables_unmap_region_action(mm,
-				table_idx_va, table_idx_end_va, level,
-				desc_type);
+			if (this_level > level) {
+				uint64_t *subtable;
 
-		if (action == ACTION_WRITE_BLOCK_ENTRY) {
+				/* back from subtable iteration, restore
+				 * previous DESC_TABLE entry.
+				 */
+				this_level--;
+				this_base = desc_tables[this_level].table_base;
+				table_idx = desc_tables[this_level].idx;
+				table_idx_va =
+					desc_tables[this_level].table_idx_va;
+				level_size = XLAT_BLOCK_SIZE(this_level);
+
+				if (this_level == level) {
+					max_entries = table_entries;
+				} else {
+					max_entries = XLAT_TABLE_ENTRIES;
+				}
+
+				desc = this_base[table_idx];
+				subtable =  (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+				/*
+				 * If the subtable is now empty, remove its reference.
+				 */
+				if (xlat_table_is_empty(ctx, subtable)) {
+					this_base[table_idx] = INVALID_DESC;
+					xlat_arch_tlbi_va(table_idx_va,
+							ctx->xlat_regime);
+				}
+				table_idx++;
+				table_idx_va += level_size;
+
+			} else {
+				/* reached end of top level, exit.*/
+				this_base = NULL;
+				break;
+			}
+
+		}
+
+		/* If reached the end of the region, stop iterating entries in
+		 * current xlat level.
+		 */
+		if (region_end_va <= table_idx_va) {
+			table_idx = max_entries;
+			continue;
+		}
+
+
+		table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(this_level) - 1U;
+
+		desc = this_base[table_idx];
+		desc_type = desc & DESC_MASK;
+
+		action = xlat_tables_unmap_region_action(mm, table_idx_va,
+							 table_idx_end_va,
+							 this_level,
+							 desc_type);
 
-			table_base[table_idx] = INVALID_DESC;
+		if (action == ACTION_WRITE_BLOCK_ENTRY) {
+			this_base[table_idx] = INVALID_DESC;
 			xlat_arch_tlbi_va(table_idx_va, ctx->xlat_regime);
 
+			table_idx++;
+			table_idx_va += level_size;
 		} else if (action == ACTION_RECURSE_INTO_TABLE) {
 
+			uint64_t *subtable;
+			uintptr_t base_va;
+
 			subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
 
-			/* Recurse to write into subtable */
-			xlat_tables_unmap_region(ctx, mm, table_idx_va,
-						 subtable, XLAT_TABLE_ENTRIES,
-						 level + 1U);
-#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
-			xlat_clean_dcache_range((uintptr_t)subtable,
-				XLAT_TABLE_ENTRIES * sizeof(uint64_t));
-#endif
-			/*
-			 * If the subtable is now empty, remove its reference.
-			 */
-			if (xlat_table_is_empty(ctx, subtable)) {
-				table_base[table_idx] = INVALID_DESC;
-				xlat_arch_tlbi_va(table_idx_va,
-						  ctx->xlat_regime);
-			}
+			desc_tables[this_level].table_base = this_base;
+			desc_tables[this_level].table_idx_va = table_idx_va;
+			base_va = table_idx_va;
+			desc_tables[this_level].idx = table_idx;
 
+			this_base = subtable;
+			this_level++;
+
+			max_entries = XLAT_TABLE_ENTRIES;
+			level_size = XLAT_BLOCK_SIZE(this_level);
+
+			table_idx_va = xlat_tables_find_start_va(mm,
+					base_va, this_level);
+			table_idx = xlat_tables_va_to_index(base_va,
+					table_idx_va, this_level);
 		} else {
 			assert(action == ACTION_NONE);
-		}
 
-		table_idx++;
-		table_idx_va += XLAT_BLOCK_SIZE(level);
-
-		/* If reached the end of the region, exit */
-		if (region_end_va <= table_idx_va)
-			break;
+			table_idx++;
+			table_idx_va += level_size;
+		}
 	}
-
-	if (level > ctx->base_level)
-		xlat_table_dec_regions_count(ctx, table_base);
 }
 
 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
@@ -537,105 +605,169 @@
 }
 
 /*
- * Recursive function that writes to the translation tables and maps the
+ * Function that writes to the translation tables and maps the
  * specified region. On success, it returns the VA of the last byte that was
  * successfully mapped. On error, it returns the VA of the next entry that
  * should have been mapped.
  */
 static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
-				   uintptr_t table_base_va,
+				   const uintptr_t table_base_va,
 				   uint64_t *const table_base,
 				   unsigned int table_entries,
 				   unsigned int level)
 {
+
 	assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
 
+	/*
+	 * data structure to track DESC_TABLE entry before iterate into subtable
+	 * of next translation level. it will be used to restore previous level
+	 * after finish subtable iteration.
+	 */
+	struct desc_table_map {
+		uint64_t *table_base;
+		uintptr_t table_idx_va;
+		unsigned int idx;
+	} desc_tables[XLAT_TABLE_LEVEL_MAX + 1] = {
+		{NULL, 0U, XLAT_TABLE_ENTRIES}, };
+
+	unsigned int this_level = level;
+	uint64_t *this_base = table_base;
+	unsigned int max_entries = table_entries;
+	size_t level_size = XLAT_BLOCK_SIZE(this_level);
 	uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
 
 	uintptr_t table_idx_va;
-	unsigned long long table_idx_pa;
-
-	uint64_t *subtable;
-	uint64_t desc;
-
 	unsigned int table_idx;
 
 	table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
 	table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
 
-#if PLAT_XLAT_TABLES_DYNAMIC
-	if (level > ctx->base_level)
-		xlat_table_inc_regions_count(ctx, table_base);
+	while (this_base != NULL) {
+
+		uint64_t desc;
+		uint64_t desc_type;
+		unsigned long long table_idx_pa;
+		action_t action;
+
+		/* finish current xlat level iteration. */
+		if (table_idx >= max_entries) {
+			if (this_level <= level) {
+				this_base = NULL;
+				break;
+			} else {
+
+				/* back from subtable iteration, restore
+				 * previous DESC_TABLE entry.
+				 */
+				this_level--;
+				level_size = XLAT_BLOCK_SIZE(this_level);
+				this_base = desc_tables[this_level].table_base;
+				table_idx = desc_tables[this_level].idx;
+				if (this_level == level) {
+					max_entries = table_entries;
+				} else {
+					max_entries = XLAT_TABLE_ENTRIES;
+				}
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+				uintptr_t subtable;
+				desc = this_base[table_idx];
+				subtable = (uintptr_t)(desc & TABLE_ADDR_MASK);
+				xlat_clean_dcache_range(subtable,
+					XLAT_TABLE_ENTRIES * sizeof(uint64_t));
 #endif
 
-	while (table_idx < table_entries) {
+				table_idx++;
+				table_idx_va =
+					desc_tables[this_level].table_idx_va +
+					level_size;
+			}
+		}
 
-		desc = table_base[table_idx];
+		desc = this_base[table_idx];
+		desc_type = desc & DESC_MASK;
 
 		table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
 
-		action_t action = xlat_tables_map_region_action(mm,
-			(uint32_t)(desc & DESC_MASK), table_idx_pa,
-			table_idx_va, level);
-
-		if (action == ACTION_WRITE_BLOCK_ENTRY) {
+		/* If reached the end of the region, simply exit since we
+		 * already write all BLOCK entries and create all required
+		 * subtables.
+		 */
+		if (mm_end_va <= table_idx_va) {
+			this_base = NULL;
+			break;
+		}
 
-			table_base[table_idx] =
-				xlat_desc(ctx, (uint32_t)mm->attr, table_idx_pa,
-					  level);
+		action = xlat_tables_map_region_action(mm, desc_type,
+				table_idx_pa, table_idx_va, this_level);
 
+		if (action == ACTION_WRITE_BLOCK_ENTRY) {
+			this_base[table_idx] = xlat_desc(ctx, mm->attr,
+					table_idx_pa, this_level);
+			table_idx++;
+			table_idx_va += level_size;
 		} else if (action == ACTION_CREATE_NEW_TABLE) {
-			uintptr_t end_va;
 
-			subtable = xlat_table_get_empty(ctx);
+			uintptr_t base_va;
+
+			uint64_t *subtable = xlat_table_get_empty(ctx);
 			if (subtable == NULL) {
-				/* Not enough free tables to map this region */
+				/* Not enough free tables to map this region. */
 				return table_idx_va;
 			}
 
 			/* Point to new subtable from this one. */
-			table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
+			this_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
 
-			/* Recurse to write into subtable */
-			end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
-					       subtable, XLAT_TABLE_ENTRIES,
-					       level + 1U);
-#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
-			xlat_clean_dcache_range((uintptr_t)subtable,
-				XLAT_TABLE_ENTRIES * sizeof(uint64_t));
-#endif
-			if (end_va !=
-				(table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
-				return end_va;
+			desc_tables[this_level].table_base = this_base;
+			desc_tables[this_level].table_idx_va = table_idx_va;
+			desc_tables[this_level].idx = table_idx;
+			base_va = table_idx_va;
 
-		} else if (action == ACTION_RECURSE_INTO_TABLE) {
-			uintptr_t end_va;
+			this_level++;
+			this_base = subtable;
+			level_size = XLAT_BLOCK_SIZE(this_level);
+			table_idx_va = xlat_tables_find_start_va(mm, base_va,
+					this_level);
+			table_idx = xlat_tables_va_to_index(base_va,
+					table_idx_va, this_level);
+			max_entries = XLAT_TABLE_ENTRIES;
 
-			subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
-			/* Recurse to write into subtable */
-			end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
-					       subtable, XLAT_TABLE_ENTRIES,
-					       level + 1U);
-#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
-			xlat_clean_dcache_range((uintptr_t)subtable,
-				XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#if PLAT_XLAT_TABLES_DYNAMIC
+			if (this_level > ctx->base_level) {
+				xlat_table_inc_regions_count(ctx, subtable);
+			}
 #endif
-			if (end_va !=
-				(table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
-				return end_va;
 
-		} else {
+		} else if (action == ACTION_RECURSE_INTO_TABLE) {
 
-			assert(action == ACTION_NONE);
+			uintptr_t base_va;
+			uint64_t *subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
 
-		}
+			desc_tables[this_level].table_base = this_base;
+			desc_tables[this_level].table_idx_va = table_idx_va;
+			desc_tables[this_level].idx = table_idx;
+			base_va = table_idx_va;
 
-		table_idx++;
-		table_idx_va += XLAT_BLOCK_SIZE(level);
+			this_level++;
+			level_size = XLAT_BLOCK_SIZE(this_level);
+			table_idx_va = xlat_tables_find_start_va(mm, base_va,
+					this_level);
+			table_idx = xlat_tables_va_to_index(base_va,
+					table_idx_va, this_level);
+			this_base = subtable;
+			max_entries = XLAT_TABLE_ENTRIES;
 
-		/* If reached the end of the region, exit */
-		if (mm_end_va <= table_idx_va)
-			break;
+#if PLAT_XLAT_TABLES_DYNAMIC
+			if (this_level > ctx->base_level) {
+				xlat_table_inc_regions_count(ctx, subtable);
+			}
+#endif
+		} else {
+			assert(action == ACTION_NONE);
+			table_idx++;
+			table_idx_va += level_size;
+		}
 	}
 
 	return table_idx_va - 1U;
diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c
index f5848a2..7d0449a 100644
--- a/lib/xlat_tables_v2/xlat_tables_utils.c
+++ b/lib/xlat_tables_v2/xlat_tables_utils.c
@@ -109,7 +109,7 @@
 		"%s(%d invalid descriptors omitted)\n";
 
 /*
- * Recursive function that reads the translation tables passed as an argument
+ * Function that reads the translation tables passed as an argument
  * and prints their status.
  */
 static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
@@ -118,10 +118,23 @@
 {
 	assert(level <= XLAT_TABLE_LEVEL_MAX);
 
-	uint64_t desc;
-	uintptr_t table_idx_va = table_base_va;
+	/*
+	 * data structure to track DESC_TABLE entry before iterate into subtable
+	 * of next translation level. it will be restored after return from
+	 * subtable iteration.
+	 */
+	struct desc_table {
+		const uint64_t *table_base;
+		uintptr_t table_idx_va;
+		unsigned int idx;
+	} desc_tables[XLAT_TABLE_LEVEL_MAX + 1] = {
+		{NULL, 0U, XLAT_TABLE_ENTRIES}, };
+	unsigned int this_level = level;
+	const uint64_t *this_base = table_base;
+	unsigned int max_entries = table_entries;
+	size_t level_size = XLAT_BLOCK_SIZE(this_level);
 	unsigned int table_idx = 0U;
-	size_t level_size = XLAT_BLOCK_SIZE(level);
+	uintptr_t table_idx_va = table_base_va;
 
 	/*
 	 * Keep track of how many invalid descriptors are counted in a row.
@@ -131,67 +144,110 @@
 	 */
 	int invalid_row_count = 0;
 
-	while (table_idx < table_entries) {
-
-		desc = table_base[table_idx];
-
-		if ((desc & DESC_MASK) == INVALID_DESC) {
-
-			if (invalid_row_count == 0) {
-				printf("%sVA:0x%lx size:0x%zx\n",
-				       level_spacers[level],
-				       table_idx_va, level_size);
-			}
-			invalid_row_count++;
-
-		} else {
-
+	while (this_base != NULL) {
+		/* finish current xlat level */
+		if (table_idx >= max_entries) {
 			if (invalid_row_count > 1) {
 				printf(invalid_descriptors_ommited,
-				       level_spacers[level],
-				       invalid_row_count - 1);
+					  level_spacers[this_level],
+					  invalid_row_count - 1);
 			}
 			invalid_row_count = 0;
 
-			/*
-			 * Check if this is a table or a block. Tables are only
-			 * allowed in levels other than 3, but DESC_PAGE has the
-			 * same value as DESC_TABLE, so we need to check.
-			 */
-			if (((desc & DESC_MASK) == TABLE_DESC) &&
-					(level < XLAT_TABLE_LEVEL_MAX)) {
-				/*
-				 * Do not print any PA for a table descriptor,
-				 * as it doesn't directly map physical memory
-				 * but instead points to the next translation
-				 * table in the translation table walk.
+			/* no parent level to iterate. */
+			if (this_level <= level) {
+				this_base = NULL;
+				table_idx = max_entries + 1;
+			} else {
+				/* retore previous DESC_TABLE entry and start
+				 * to iterate.
 				 */
-				printf("%sVA:0x%lx size:0x%zx\n",
-				       level_spacers[level],
-				       table_idx_va, level_size);
+				this_level--;
+				level_size = XLAT_BLOCK_SIZE(this_level);
+				this_base = desc_tables[this_level].table_base;
+				table_idx = desc_tables[this_level].idx;
+				table_idx_va =
+					desc_tables[this_level].table_idx_va;
+				if (this_level == level) {
+					max_entries = table_entries;
+				} else {
+					max_entries = XLAT_TABLE_ENTRIES;
+				}
 
-				uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
+				assert(this_base != NULL);
+			}
+		} else {
+			uint64_t desc = this_base[table_idx];
 
-				xlat_tables_print_internal(ctx, table_idx_va,
-					(uint64_t *)addr_inner,
-					XLAT_TABLE_ENTRIES, level + 1U);
+			if ((desc & DESC_MASK) == INVALID_DESC) {
+				if (invalid_row_count == 0) {
+					printf("%sVA:0x%lx size:0x%zx\n",
+						  level_spacers[this_level],
+						  table_idx_va, level_size);
+				}
+				invalid_row_count++;
+				table_idx++;
+				table_idx_va += level_size;
 			} else {
-				printf("%sVA:0x%lx PA:0x%llx size:0x%zx ",
-				       level_spacers[level], table_idx_va,
-				       (uint64_t)(desc & TABLE_ADDR_MASK),
-				       level_size);
-				xlat_desc_print(ctx, desc);
-				printf("\n");
-			}
-		}
+				if (invalid_row_count > 1) {
+					printf(invalid_descriptors_ommited,
+						  level_spacers[this_level],
+						  invalid_row_count - 1);
+				}
+				invalid_row_count = 0;
+				/*
+				 * Check if this is a table or a block. Tables
+				 * are only allowed in levels other than 3, but
+				 * DESC_PAGE has the same value as DESC_TABLE,
+				 * so we need to check.
+				 */
 
-		table_idx++;
-		table_idx_va += level_size;
-	}
+				if (((desc & DESC_MASK) == TABLE_DESC) &&
+				    (this_level < XLAT_TABLE_LEVEL_MAX)) {
+					uintptr_t addr_inner;
 
-	if (invalid_row_count > 1) {
-		printf(invalid_descriptors_ommited,
-		       level_spacers[level], invalid_row_count - 1);
+					/*
+					 * Do not print any PA for a table
+					 * descriptor, as it doesn't directly
+					 * map physical memory but instead
+					 * points to the next translation
+					 * table in the translation table walk.
+					 */
+					printf("%sVA:0x%lx size:0x%zx\n",
+					       level_spacers[this_level],
+					       table_idx_va, level_size);
+
+					addr_inner = desc & TABLE_ADDR_MASK;
+					/* save current xlat level */
+					desc_tables[this_level].table_base =
+						this_base;
+					desc_tables[this_level].idx =
+						table_idx + 1;
+					desc_tables[this_level].table_idx_va =
+						table_idx_va + level_size;
+
+					/* start iterating next level entries */
+					this_base = (uint64_t *)addr_inner;
+					max_entries = XLAT_TABLE_ENTRIES;
+					this_level++;
+					level_size =
+						XLAT_BLOCK_SIZE(this_level);
+					table_idx = 0U;
+				} else {
+					printf("%sVA:0x%lx PA:0x%llx size:0x%zx ",
+					       level_spacers[this_level],
+					       table_idx_va,
+					       (uint64_t)(desc & TABLE_ADDR_MASK),
+					       level_size);
+					xlat_desc_print(ctx, desc);
+					printf("\n");
+
+					table_idx++;
+					table_idx_va += level_size;
+
+				}
+			}
+		}
 	}
 }
 
diff --git a/plat/arm/board/juno/include/platform_def.h b/plat/arm/board/juno/include/platform_def.h
index ddbc9b7..e096e33 100644
--- a/plat/arm/board/juno/include/platform_def.h
+++ b/plat/arm/board/juno/include/platform_def.h
@@ -222,6 +222,7 @@
 
 /* MHU related constants */
 #define PLAT_CSS_MHU_BASE		UL(0x2b1f0000)
+#define PLAT_MHUV2_BASE			PLAT_CSS_MHU_BASE
 
 /*
  * Base address of the first memory region used for communication between AP
diff --git a/plat/arm/board/n1sdp/include/platform_def.h b/plat/arm/board/n1sdp/include/platform_def.h
index 83b9f52..03bd380 100644
--- a/plat/arm/board/n1sdp/include/platform_def.h
+++ b/plat/arm/board/n1sdp/include/platform_def.h
@@ -61,6 +61,7 @@
 
 #define PLAT_ARM_NSTIMER_FRAME_ID		0
 #define PLAT_CSS_MHU_BASE			0x45000000
+#define PLAT_MHUV2_BASE				PLAT_CSS_MHU_BASE
 #define PLAT_MAX_PWR_LVL			1
 
 #define PLAT_ARM_G1S_IRQS			ARM_G1S_IRQS,			\
diff --git a/plat/arm/board/rde1edge/include/platform_def.h b/plat/arm/board/rde1edge/include/platform_def.h
index 954a1cd..3b3ade0 100644
--- a/plat/arm/board/rde1edge/include/platform_def.h
+++ b/plat/arm/board/rde1edge/include/platform_def.h
@@ -16,6 +16,7 @@
 #define CSS_SGI_MAX_PE_PER_CPU		2
 
 #define PLAT_CSS_MHU_BASE		UL(0x45400000)
+#define PLAT_MHUV2_BASE			PLAT_CSS_MHU_BASE
 
 /* Base address of DMC-620 instances */
 #define RDE1EDGE_DMC620_BASE0		UL(0x4e000000)
diff --git a/plat/arm/board/rdn1edge/include/platform_def.h b/plat/arm/board/rdn1edge/include/platform_def.h
index 2ca0dd4..8480c08 100644
--- a/plat/arm/board/rdn1edge/include/platform_def.h
+++ b/plat/arm/board/rdn1edge/include/platform_def.h
@@ -16,6 +16,7 @@
 #define CSS_SGI_MAX_PE_PER_CPU		1
 
 #define PLAT_CSS_MHU_BASE		UL(0x45400000)
+#define PLAT_MHUV2_BASE			PLAT_CSS_MHU_BASE
 
 /* Base address of DMC-620 instances */
 #define RDN1EDGE_DMC620_BASE0		UL(0x4e000000)
diff --git a/plat/arm/board/sgi575/include/platform_def.h b/plat/arm/board/sgi575/include/platform_def.h
index 6aea522..ec51c9e 100644
--- a/plat/arm/board/sgi575/include/platform_def.h
+++ b/plat/arm/board/sgi575/include/platform_def.h
@@ -16,6 +16,7 @@
 #define CSS_SGI_MAX_PE_PER_CPU		1
 
 #define PLAT_CSS_MHU_BASE		UL(0x45000000)
+#define PLAT_MHUV2_BASE			PLAT_CSS_MHU_BASE
 
 /* Base address of DMC-620 instances */
 #define SGI575_DMC620_BASE0		UL(0x4e000000)
diff --git a/plat/arm/css/sgm/include/sgm_base_platform_def.h b/plat/arm/css/sgm/include/sgm_base_platform_def.h
index 4647e74..8581844 100644
--- a/plat/arm/css/sgm/include/sgm_base_platform_def.h
+++ b/plat/arm/css/sgm/include/sgm_base_platform_def.h
@@ -86,6 +86,7 @@
 
 /* MHU related constants */
 #define PLAT_CSS_MHU_BASE		0x2b1f0000
+#define PLAT_MHUV2_BASE			PLAT_CSS_MHU_BASE
 
 #define PLAT_ARM_TRUSTED_ROM_BASE	0x00000000
 #define PLAT_ARM_TRUSTED_ROM_SIZE	0x00080000
diff --git a/plat/intel/soc/stratix10/aarch64/stratix10_private.h b/plat/intel/soc/stratix10/aarch64/stratix10_private.h
index 89851ef..f437202 100644
--- a/plat/intel/soc/stratix10/aarch64/stratix10_private.h
+++ b/plat/intel/soc/stratix10/aarch64/stratix10_private.h
@@ -29,6 +29,6 @@
 } boot_source_type;
 
 void enable_nonsecure_access(void);
-void stratix10_io_setup(void);
+void stratix10_io_setup(int boot_source);
 
 #endif
diff --git a/plat/intel/soc/stratix10/bl2_plat_setup.c b/plat/intel/soc/stratix10/bl2_plat_setup.c
index 71e862f..9a2f9d3 100644
--- a/plat/intel/soc/stratix10/bl2_plat_setup.c
+++ b/plat/intel/soc/stratix10/bl2_plat_setup.c
@@ -30,6 +30,9 @@
 #include "s10_handoff.h"
 #include "s10_pinmux.h"
 #include "aarch64/stratix10_private.h"
+#include "include/s10_mailbox.h"
+#include "drivers/qspi/cadence_qspi.h"
+
 
 const mmap_region_t plat_stratix10_mmap[] = {
 	MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE,
@@ -109,8 +112,18 @@
 	switch (boot_source) {
 	case BOOT_SOURCE_SDMMC:
 		dw_mmc_init(&params, &info);
-		stratix10_io_setup();
+		stratix10_io_setup(boot_source);
+		break;
+
+	case BOOT_SOURCE_QSPI:
+		mailbox_set_qspi_open();
+		mailbox_set_qspi_direct();
+		cad_qspi_init(0, QSPI_CONFIG_CPHA, QSPI_CONFIG_CPOL,
+			QSPI_CONFIG_CSDA, QSPI_CONFIG_CSDADS,
+			QSPI_CONFIG_CSEOT, QSPI_CONFIG_CSSOT, 0);
+		stratix10_io_setup(boot_source);
 		break;
+
 	default:
 		ERROR("Unsupported boot source\n");
 		panic();
diff --git a/plat/intel/soc/stratix10/drivers/qspi/cadence_qspi.c b/plat/intel/soc/stratix10/drivers/qspi/cadence_qspi.c
new file mode 100644
index 0000000..506a633
--- /dev/null
+++ b/plat/intel/soc/stratix10/drivers/qspi/cadence_qspi.c
@@ -0,0 +1,824 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <common/debug.h>
+#include <lib/mmio.h>
+#include <string.h>
+#include <drivers/delay_timer.h>
+#include <drivers/console.h>
+
+#include "cadence_qspi.h"
+#include <platform_def.h>
+
+#define LESS(a, b)   (((a) < (b)) ? (a) : (b))
+#define MORE(a, b)   (((a) > (b)) ? (a) : (b))
+
+
+uint32_t qspi_device_size;
+int cad_qspi_cs;
+
+int cad_qspi_idle(void)
+{
+	return (mmio_read_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG)
+			& CAD_QSPI_CFG_IDLE) >> 31;
+}
+
+int cad_qspi_set_baudrate_div(uint32_t div)
+{
+	if (div > 0xf)
+		return CAD_INVALID;
+
+	mmio_clrsetbits_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG,
+			~CAD_QSPI_CFG_BAUDDIV_MSK,
+			CAD_QSPI_CFG_BAUDDIV(div));
+
+	return 0;
+}
+
+int cad_qspi_configure_dev_size(uint32_t addr_bytes,
+		uint32_t bytes_per_dev, uint32_t bytes_per_block)
+{
+
+	mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_DEVSZ,
+			CAD_QSPI_DEVSZ_ADDR_BYTES(addr_bytes) |
+			CAD_QSPI_DEVSZ_BYTES_PER_PAGE(bytes_per_dev) |
+			CAD_QSPI_DEVSZ_BYTES_PER_BLOCK(bytes_per_block));
+	return 0;
+}
+
+int cad_qspi_set_read_config(uint32_t opcode, uint32_t instr_type,
+		uint32_t addr_type, uint32_t data_type,
+		uint32_t mode_bit, uint32_t dummy_clk_cycle)
+{
+	mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_DEVRD,
+			CAD_QSPI_DEV_OPCODE(opcode) |
+			CAD_QSPI_DEV_INST_TYPE(instr_type) |
+			CAD_QSPI_DEV_ADDR_TYPE(addr_type) |
+			CAD_QSPI_DEV_DATA_TYPE(data_type) |
+			CAD_QSPI_DEV_MODE_BIT(mode_bit) |
+			CAD_QSPI_DEV_DUMMY_CLK_CYCLE(dummy_clk_cycle));
+
+	return 0;
+}
+
+int cat_qspi_set_write_config(uint32_t addr_type, uint32_t data_type,
+		uint32_t mode_bit, uint32_t dummy_clk_cycle)
+{
+	mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_DEVWR,
+			CAD_QSPI_DEV_ADDR_TYPE(addr_type) |
+			CAD_QSPI_DEV_DATA_TYPE(data_type) |
+			CAD_QSPI_DEV_MODE_BIT(mode_bit) |
+			CAD_QSPI_DEV_DUMMY_CLK_CYCLE(dummy_clk_cycle));
+
+	return 0;
+}
+
+int cad_qspi_timing_config(uint32_t clkphase, uint32_t clkpol, uint32_t csda,
+		uint32_t csdads, uint32_t cseot, uint32_t cssot,
+		uint32_t rddatacap)
+{
+	uint32_t cfg = mmio_read_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG);
+
+	cfg &= CAD_QSPI_CFG_SELCLKPHASE_CLR_MSK &
+		CAD_QSPI_CFG_SELCLKPOL_CLR_MSK;
+	cfg |= CAD_QSPI_SELCLKPHASE(clkphase) | CAD_QSPI_SELCLKPOL(clkpol);
+
+	mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG, cfg);
+
+	mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_DELAY,
+		CAD_QSPI_DELAY_CSSOT(cssot) | CAD_QSPI_DELAY_CSEOT(cseot) |
+		CAD_QSPI_DELAY_CSDADS(csdads) | CAD_QSPI_DELAY_CSDA(csda));
+
+	return 0;
+}
+
+int cad_qspi_stig_cmd_helper(int cs, uint32_t cmd)
+{
+	uint32_t count = 0;
+
+	/* chip select */
+	mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG,
+			(mmio_read_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG)
+			 & CAD_QSPI_CFG_CS_MSK) | CAD_QSPI_CFG_CS(cs));
+
+	mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_FLASHCMD, cmd);
+	mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_FLASHCMD,
+			cmd | CAD_QSPI_FLASHCMD_EXECUTE);
+
+	do {
+		uint32_t reg = mmio_read_32(CAD_QSPI_OFFSET +
+					CAD_QSPI_FLASHCMD);
+		if (!(reg & CAD_QSPI_FLASHCMD_EXECUTE_STAT))
+			break;
+		count++;
+	} while (count < CAD_QSPI_COMMAND_TIMEOUT);
+
+	if (count >= CAD_QSPI_COMMAND_TIMEOUT) {
+		ERROR("Error sending QSPI command %x, timed out\n",
+				cmd);
+		return CAD_QSPI_ERROR;
+	}
+
+	return 0;
+}
+
+int cad_qspi_stig_cmd(uint32_t opcode, uint32_t dummy)
+{
+	if (dummy > ((1 << CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX) - 1)) {
+		ERROR("Faulty dummy bytes\n");
+		return -1;
+	}
+
+	return cad_qspi_stig_cmd_helper(cad_qspi_cs,
+			CAD_QSPI_FLASHCMD_OPCODE(opcode) |
+			CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES(dummy));
+}
+
+int cad_qspi_stig_read_cmd(uint32_t opcode, uint32_t dummy, uint32_t num_bytes,
+		uint32_t *output)
+{
+	if (dummy > ((1 << CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX) - 1)) {
+		ERROR("Faulty dummy byes\n");
+		return -1;
+	}
+
+	if ((num_bytes > 8) || (num_bytes == 0))
+		return -1;
+
+	uint32_t cmd =
+		CAD_QSPI_FLASHCMD_OPCODE(opcode) |
+		CAD_QSPI_FLASHCMD_ENRDDATA(1) |
+		CAD_QSPI_FLASHCMD_NUMRDDATABYTES(num_bytes - 1) |
+		CAD_QSPI_FLASHCMD_ENCMDADDR(0) |
+		CAD_QSPI_FLASHCMD_ENMODEBIT(0) |
+		CAD_QSPI_FLASHCMD_NUMADDRBYTES(0) |
+		CAD_QSPI_FLASHCMD_ENWRDATA(0) |
+		CAD_QSPI_FLASHCMD_NUMWRDATABYTES(0) |
+		CAD_QSPI_FLASHCMD_NUMDUMMYBYTES(dummy);
+
+	if (cad_qspi_stig_cmd_helper(cad_qspi_cs, cmd)) {
+		ERROR("failed to send stig cmd");
+		return -1;
+	}
+
+	output[0] = mmio_read_32(CAD_QSPI_OFFSET + CAD_QSPI_FLASHCMD_RDDATA0);
+
+	if (num_bytes > 4) {
+		output[1] = mmio_read_32(CAD_QSPI_OFFSET +
+				CAD_QSPI_FLASHCMD_RDDATA1);
+	}
+
+	return 0;
+}
+
+int cad_qspi_stig_wr_cmd(uint32_t opcode, uint32_t dummy, uint32_t num_bytes,
+		uint32_t *input)
+{
+	if (dummy > ((1 << CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX) - 1)) {
+		ERROR("Faulty dummy byes\n");
+		return -1;
+	}
+
+	if ((num_bytes > 8) || (num_bytes == 0))
+		return -1;
+
+	uint32_t cmd = CAD_QSPI_FLASHCMD_OPCODE(opcode) |
+		CAD_QSPI_FLASHCMD_ENRDDATA(0) |
+		CAD_QSPI_FLASHCMD_NUMRDDATABYTES(0) |
+		CAD_QSPI_FLASHCMD_ENCMDADDR(0) |
+		CAD_QSPI_FLASHCMD_ENMODEBIT(0) |
+		CAD_QSPI_FLASHCMD_NUMADDRBYTES(0) |
+		CAD_QSPI_FLASHCMD_ENWRDATA(1) |
+		CAD_QSPI_FLASHCMD_NUMWRDATABYTES(num_bytes - 1) |
+		CAD_QSPI_FLASHCMD_NUMDUMMYBYTES(dummy);
+
+	mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_FLASHCMD_WRDATA0, input[0]);
+
+	if (num_bytes > 4)
+		mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_FLASHCMD_WRDATA1,
+				input[1]);
+
+	return cad_qspi_stig_cmd_helper(cad_qspi_cs, cmd);
+}
+
+int cad_qspi_stig_addr_cmd(uint32_t opcode, uint32_t dummy, uint32_t addr)
+{
+	uint32_t cmd;
+
+	if (dummy > ((1 << CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX) - 1))
+		return -1;
+
+	cmd = CAD_QSPI_FLASHCMD_OPCODE(opcode) |
+		CAD_QSPI_FLASHCMD_NUMDUMMYBYTES(dummy) |
+		CAD_QSPI_FLASHCMD_ENCMDADDR(1) |
+		CAD_QSPI_FLASHCMD_NUMADDRBYTES(2);
+
+	mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_FLASHCMD_ADDR, addr);
+
+	return cad_qspi_stig_cmd_helper(cad_qspi_cs, cmd);
+}
+
+int cad_qspi_device_bank_select(uint32_t bank)
+{
+	int status = 0;
+
+	status = cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_WREN, 0);
+	if (status != 0)
+		return status;
+
+	status = cad_qspi_stig_wr_cmd(CAD_QSPI_STIG_OPCODE_WREN_EXT_REG,
+			0, 1, &bank);
+	if (status != 0)
+		return status;
+
+	return cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_WRDIS, 0);
+}
+
+int cad_qspi_device_status(uint32_t *status)
+{
+	return cad_qspi_stig_read_cmd(CAD_QSPI_STIG_OPCODE_RDSR, 0, 1, status);
+}
+
+#if CAD_QSPI_MICRON_N25Q_SUPPORT
+int cad_qspi_n25q_enable(void)
+{
+	cad_qspi_set_read_config(QSPI_FAST_READ, CAD_QSPI_INST_SINGLE,
+			CAD_QSPI_ADDR_FASTREAD, CAT_QSPI_ADDR_SINGLE_IO, 1,
+			0);
+	return 0;
+}
+
+int cad_qspi_n25q_wait_for_program_and_erase(int program_only)
+{
+	uint32_t status, flag_sr;
+	int count = 0;
+
+	while (count < CAD_QSPI_COMMAND_TIMEOUT) {
+		status = cad_qspi_device_status(&status);
+		if (status != 0) {
+			ERROR("Error getting device status\n");
+			return -1;
+		}
+		if (!CAD_QSPI_STIG_SR_BUSY(status))
+			break;
+		count++;
+	}
+
+	if (count >= CAD_QSPI_COMMAND_TIMEOUT) {
+		ERROR("Timed out waiting for idle\n");
+		return -1;
+	}
+
+	count = 0;
+
+	while (count < CAD_QSPI_COMMAND_TIMEOUT) {
+		status = cad_qspi_stig_read_cmd(CAD_QSPI_STIG_OPCODE_RDFLGSR,
+				0, 1, &flag_sr);
+		if (status != 0) {
+			ERROR("Error waiting program and erase.\n");
+			return status;
+		}
+
+		if ((program_only &&
+			CAD_QSPI_STIG_FLAGSR_PROGRAMREADY(flag_sr)) ||
+			(!program_only &&
+			CAD_QSPI_STIG_FLAGSR_ERASEREADY(flag_sr)))
+			break;
+	}
+
+	if (count >= CAD_QSPI_COMMAND_TIMEOUT)
+		ERROR("Timed out waiting for program and erase\n");
+
+	if ((program_only && CAD_QSPI_STIG_FLAGSR_PROGRAMERROR(flag_sr)) ||
+			(!program_only &&
+			CAD_QSPI_STIG_FLAGSR_ERASEERROR(flag_sr))) {
+		ERROR("Error programming/erasing flash\n");
+		cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_CLFSR, 0);
+		return -1;
+	}
+
+	return 0;
+}
+#endif
+
+int cad_qspi_indirect_read_start_bank(uint32_t flash_addr, uint32_t num_bytes)
+{
+	mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_INDRDSTADDR, flash_addr);
+	mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_INDRDCNT, num_bytes);
+	mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_INDRD,
+			CAD_QSPI_INDRD_START |
+			CAD_QSPI_INDRD_IND_OPS_DONE);
+
+	return 0;
+}
+
+
+int cad_qspi_indirect_write_start_bank(uint32_t flash_addr,
+					uint32_t num_bytes)
+{
+	mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_INDWRSTADDR, flash_addr);
+	mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_INDWRCNT, num_bytes);
+	mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_INDWR,
+			CAD_QSPI_INDWR_START |
+			CAD_QSPI_INDWR_INDDONE);
+
+	return 0;
+}
+
+int cad_qspi_indirect_write_finish(void)
+{
+#if CAD_QSPI_MICRON_N25Q_SUPPORT
+	return cad_qspi_n25q_wait_for_program_and_erase(1);
+#else
+	return 0;
+#endif
+
+}
+
+int cad_qspi_enable(void)
+{
+	int status;
+
+	mmio_setbits_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG, CAD_QSPI_CFG_ENABLE);
+
+#if CAD_QSPI_MICRON_N25Q_SUPPORT
+	status = cad_qspi_n25q_enable();
+	if (status != 0)
+		return status;
+#endif
+	return 0;
+}
+
+int cad_qspi_enable_subsector_bank(uint32_t addr)
+{
+	int status = 0;
+
+	status = cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_WREN, 0);
+	if (status != 0)
+		return status;
+
+	status = cad_qspi_stig_addr_cmd(CAD_QSPI_STIG_OPCODE_SUBSEC_ERASE, 0,
+					addr);
+	if (status != 0)
+		return status;
+
+#if CAD_QSPI_MICRON_N25Q_SUPPORT
+	status = cad_qspi_n25q_wait_for_program_and_erase(0);
+#endif
+	return status;
+}
+
+int cad_qspi_erase_subsector(uint32_t addr)
+{
+	int status = 0;
+
+	status = cad_qspi_device_bank_select(addr >> 24);
+	if (status != 0)
+		return status;
+
+	return cad_qspi_enable_subsector_bank(addr);
+}
+
+int cad_qspi_erase_sector(uint32_t addr)
+{
+	int status = 0;
+
+	status = cad_qspi_device_bank_select(addr >> 24);
+	if (status != 0)
+		return status;
+
+	status = cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_WREN, 0);
+	if (status != 0)
+		return status;
+
+	status = cad_qspi_stig_addr_cmd(CAD_QSPI_STIG_OPCODE_SEC_ERASE, 0,
+					addr);
+	if (status != 0)
+		return status;
+
+#if CAD_QSPI_MICRON_N25Q_SUPPORT
+	status = cad_qspi_n25q_wait_for_program_and_erase(0);
+#endif
+	return status;
+}
+
+void cad_qspi_calibration(uint32_t dev_clk, uint32_t qspi_clk_mhz)
+{
+	int status;
+	uint32_t dev_sclk_mhz = 27; /*min value to get biggest 0xF div factor*/
+	uint32_t data_cap_delay;
+	uint32_t sample_rdid;
+	uint32_t rdid;
+	uint32_t div_actual;
+	uint32_t div_bits;
+	int first_pass, last_pass;
+
+	/*1.  Set divider to bigger value (slowest SCLK)
+	 *2.  RDID and save the value
+	 */
+	div_actual = (qspi_clk_mhz + (dev_sclk_mhz - 1)) / dev_sclk_mhz;
+	div_bits = (((div_actual + 1) / 2) - 1);
+	status = cad_qspi_set_baudrate_div(0xf);
+
+	status = cad_qspi_stig_read_cmd(CAD_QSPI_STIG_OPCODE_RDID,
+					0, 3, &sample_rdid);
+	if (status != 0)
+		return;
+
+	/*3. Set divider to the intended frequency
+	 *4.  Set the read delay = 0
+	 *5.  RDID and check whether the value is same as item 2
+	 *6.  Increase read delay and compared the value against item 2
+	 *7.  Find the range of read delay that have same as
+	 *    item 2 and divide it to 2
+	 */
+	div_actual = (qspi_clk_mhz + (dev_clk - 1)) / dev_clk;
+	div_bits = (((div_actual + 1) / 2) - 1);
+	status = cad_qspi_set_baudrate_div(div_bits);
+	if (status != 0)
+		return;
+
+	data_cap_delay = 0;
+	first_pass = -1;
+	last_pass = -1;
+
+	do {
+		if (status != 0)
+			break;
+		status = cad_qspi_stig_read_cmd(CAD_QSPI_STIG_OPCODE_RDID, 0,
+						3, &rdid);
+		if (status != 0)
+			break;
+		if (rdid == sample_rdid) {
+			if (first_pass == -1)
+				first_pass = data_cap_delay;
+			else
+				last_pass = data_cap_delay;
+		}
+
+		data_cap_delay++;
+
+		mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_RDDATACAP,
+				CAD_QSPI_RDDATACAP_BYP(1) |
+				CAD_QSPI_RDDATACAP_DELAY(data_cap_delay));
+
+	} while (data_cap_delay < 0x10);
+
+	if (first_pass > 0) {
+		int diff = first_pass - last_pass;
+
+		data_cap_delay = first_pass + diff / 2;
+	}
+
+	mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_RDDATACAP,
+			CAD_QSPI_RDDATACAP_BYP(1) |
+			CAD_QSPI_RDDATACAP_DELAY(data_cap_delay));
+	status = cad_qspi_stig_read_cmd(CAD_QSPI_STIG_OPCODE_RDID, 0, 3, &rdid);
+
+	if (status != 0)
+		return;
+}
+
+int cad_qspi_int_disable(uint32_t mask)
+{
+	if (cad_qspi_idle() == 0)
+		return -1;
+
+	if ((CAD_QSPI_INT_STATUS_ALL & mask) == 0)
+		return -1;
+
+	mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_IRQMSK, mask);
+	return 0;
+}
+
+void cad_qspi_set_chip_select(int cs)
+{
+	cad_qspi_cs = cs;
+}
+
+int cad_qspi_init(uint32_t desired_clk_freq, uint32_t clk_phase,
+			uint32_t clk_pol, uint32_t csda, uint32_t csdads,
+			uint32_t cseot, uint32_t cssot, uint32_t rddatacap)
+{
+	int status = 0;
+	uint32_t qspi_desired_clk_freq;
+	uint32_t rdid = 0;
+	uint32_t cap_code;
+
+	INFO("Initializing Qspi\n");
+
+	if (cad_qspi_idle() == 0) {
+		ERROR("device not idle");
+		return -1;
+	}
+
+
+	status = cad_qspi_timing_config(clk_phase, clk_pol, csda, csdads,
+					cseot, cssot, rddatacap);
+
+	if (status != 0) {
+		ERROR("config set timing failure\n");
+		return status;
+	}
+
+	mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_REMAPADDR,
+			CAD_QSPI_REMAPADDR_VALUE_SET(0));
+
+	status = cad_qspi_int_disable(CAD_QSPI_INT_STATUS_ALL);
+	if (status != 0) {
+		ERROR("failed disable\n");
+		return status;
+	}
+
+	cad_qspi_set_baudrate_div(0xf);
+	status = cad_qspi_enable();
+	if (status != 0) {
+		ERROR("failed enable\n");
+		return status;
+	}
+
+	qspi_desired_clk_freq = 100;
+	cad_qspi_calibration(qspi_desired_clk_freq, 50000000);
+
+	status = cad_qspi_stig_read_cmd(CAD_QSPI_STIG_OPCODE_RDID, 0, 3,
+					&rdid);
+
+	if (status != 0) {
+		ERROR("Error reading RDID\n");
+		return status;
+	}
+
+	/*
+	 * NOTE: The Size code seems to be a form of BCD (binary coded decimal).
+	 * The first nibble is the 10's digit and the second nibble is the 1's
+	 * digit in the number of bytes.
+	 *
+	 * Capacity ID samples:
+	 * 0x15 :   16 Mb =>   2 MiB => 1 << 21 ; BCD=15
+	 * 0x16 :   32 Mb =>   4 MiB => 1 << 22 ; BCD=16
+	 * 0x17 :   64 Mb =>   8 MiB => 1 << 23 ; BCD=17
+	 * 0x18 :  128 Mb =>  16 MiB => 1 << 24 ; BCD=18
+	 * 0x19 :  256 Mb =>  32 MiB => 1 << 25 ; BCD=19
+	 * 0x1a
+	 * 0x1b
+	 * 0x1c
+	 * 0x1d
+	 * 0x1e
+	 * 0x1f
+	 * 0x20 :  512 Mb =>  64 MiB => 1 << 26 ; BCD=20
+	 * 0x21 : 1024 Mb => 128 MiB => 1 << 27 ; BCD=21
+	 */
+
+	cap_code = CAD_QSPI_STIG_RDID_CAPACITYID(rdid);
+
+	if (!(((cap_code >> 4) > 0x9) || ((cap_code & 0xf) > 0x9))) {
+		uint32_t decoded_cap = ((cap_code >> 4) * 10) +
+					(cap_code & 0xf);
+		qspi_device_size = 1 << (decoded_cap + 6);
+		INFO("QSPI Capacity: %x\n\n", qspi_device_size);
+
+	} else {
+		ERROR("Invalid CapacityID encountered: 0x%02x\n",
+				cap_code);
+		return -1;
+	}
+
+	cad_qspi_configure_dev_size(S10_QSPI_ADDR_BYTES,
+				S10_QSPI_BYTES_PER_DEV, S10_BYTES_PER_BLOCK);
+
+	INFO("Flash size: %d Bytes\n", qspi_device_size);
+
+	return status;
+}
+
+int cad_qspi_indirect_page_bound_write(uint32_t offset,
+		uint8_t *buffer, uint32_t len)
+{
+	int status = 0, i;
+	uint32_t write_count, write_capacity, *write_data, space,
+		write_fill_level, sram_partition;
+
+	status = cad_qspi_indirect_write_start_bank(offset, len);
+	if (status != 0)
+		return status;
+
+	write_count = 0;
+	sram_partition = CAD_QSPI_SRAMPART_ADDR(mmio_read_32(CAD_QSPI_OFFSET +
+			 CAD_QSPI_SRAMPART));
+	write_capacity = (uint32_t) CAD_QSPI_SRAM_FIFO_ENTRY_COUNT -
+			sram_partition;
+
+	while (write_count < len) {
+		write_fill_level = CAD_QSPI_SRAMFILL_INDWRPART(
+					mmio_read_32(CAD_QSPI_OFFSET +
+							CAD_QSPI_SRAMFILL));
+		space = LESS(write_capacity - write_fill_level,
+				(len - write_count) / sizeof(uint32_t));
+		write_data = (uint32_t *)(buffer + write_count);
+		for (i = 0; i < space; ++i)
+			mmio_write_32(CAD_QSPIDATA_OFST, *write_data++);
+
+		write_count += space * sizeof(uint32_t);
+	}
+	return cad_qspi_indirect_write_finish();
+}
+
+int cad_qspi_read_bank(uint8_t *buffer, uint32_t offset, uint32_t size)
+{
+	int status;
+	uint32_t read_count = 0, *read_data;
+	int level = 1, count = 0, i;
+
+	status = cad_qspi_indirect_read_start_bank(offset, size);
+
+	if (status != 0)
+		return status;
+
+	while (read_count < size) {
+		do {
+			level = CAD_QSPI_SRAMFILL_INDRDPART(
+				mmio_read_32(CAD_QSPI_OFFSET +
+					CAD_QSPI_SRAMFILL));
+			read_data = (uint32_t *)(buffer + read_count);
+			for (i = 0; i < level; ++i)
+				*read_data++ = mmio_read_32(CAD_QSPIDATA_OFST);
+
+			read_count += level * sizeof(uint32_t);
+			count++;
+		} while (level > 0);
+	}
+
+	return 0;
+}
+
+int cad_qspi_write_bank(uint32_t offset, uint8_t *buffer, uint32_t size)
+{
+	int status = 0;
+	uint32_t page_offset  = offset & (CAD_QSPI_PAGE_SIZE - 1);
+	uint32_t write_size = LESS(size, CAD_QSPI_PAGE_SIZE - page_offset);
+
+	while (size) {
+		status = cad_qspi_indirect_page_bound_write(offset, buffer,
+							write_size);
+		if (status != 0)
+			break;
+
+		offset  += write_size;
+		buffer  += write_size;
+		size -= write_size;
+		write_size = LESS(size, CAD_QSPI_PAGE_SIZE);
+	}
+	return status;
+}
+
+int cad_qspi_read(void *buffer, uint32_t  offset, uint32_t  size)
+{
+	uint32_t bank_count, bank_addr, bank_offset, copy_len;
+	uint8_t *read_data;
+	int i, status;
+
+	status = 0;
+
+	if ((offset >= qspi_device_size) ||
+			(offset + size - 1 >= qspi_device_size) ||
+			(size == 0) ||
+			((long) ((int *)buffer) & 0x3)  ||
+			(offset & 0x3) ||
+			(size & 0x3)) {
+		ERROR("Invalid read parameter");
+		return -1;
+	}
+
+	if (CAD_QSPI_INDRD_RD_STAT(mmio_read_32(CAD_QSPI_OFFSET +
+						CAD_QSPI_INDRD))) {
+		ERROR("Read in progress");
+		return -1;
+	}
+
+	/*
+	 * bank_count : Number of bank(s) affected, including partial banks.
+	 * bank_addr  : Aligned address of the first bank,
+	 *		including partial bank.
+	 * bank_ofst  : The offset of the bank to read.
+	 *		Only used when reading the first bank.
+	 */
+	bank_count = CAD_QSPI_BANK_ADDR(offset + size - 1) -
+			CAD_QSPI_BANK_ADDR(offset) + 1;
+	bank_addr  = offset & CAD_QSPI_BANK_ADDR_MSK;
+	bank_offset  = offset & (CAD_QSPI_BANK_SIZE - 1);
+
+	read_data = (uint8_t *)buffer;
+
+	copy_len = LESS(size, CAD_QSPI_BANK_SIZE - bank_offset);
+
+	for (i = 0; i < bank_count; ++i) {
+		status = cad_qspi_device_bank_select(CAD_QSPI_BANK_ADDR(
+								bank_addr));
+		if (status != 0)
+			break;
+		status = cad_qspi_read_bank(read_data, bank_offset, copy_len);
+		if (status != 0)
+			break;
+
+		bank_addr += CAD_QSPI_BANK_SIZE;
+		read_data += copy_len;
+		size -= copy_len;
+		bank_offset = 0;
+		copy_len = LESS(size, CAD_QSPI_BANK_SIZE);
+	}
+
+	return status;
+}
+
+int cad_qspi_erase(uint32_t offset, uint32_t size)
+{
+	int status = 0;
+	uint32_t subsector_offset  = offset & (CAD_QSPI_SUBSECTOR_SIZE - 1);
+	uint32_t erase_size = LESS(size,
+				CAD_QSPI_SUBSECTOR_SIZE - subsector_offset);
+
+	while (size) {
+		status = cad_qspi_erase_subsector(offset);
+		if (status != 0)
+			break;
+
+		offset  += erase_size;
+		size -= erase_size;
+		erase_size = LESS(size, CAD_QSPI_SUBSECTOR_SIZE);
+	}
+	return status;
+}
+
+int cad_qspi_write(void *buffer, uint32_t offset, uint32_t size)
+{
+	int status, i;
+	uint32_t bank_count, bank_addr, bank_offset, copy_len;
+	uint8_t *write_data;
+
+	status = 0;
+
+	if ((offset >= qspi_device_size) ||
+			(offset + size - 1 >= qspi_device_size) ||
+			(size == 0) ||
+			((long)buffer & 0x3)  ||
+			(offset & 0x3) ||
+			(size & 0x3))
+		return -2;
+
+	if (CAD_QSPI_INDWR_RDSTAT(mmio_read_32(CAD_QSPI_OFFSET +
+						CAD_QSPI_INDWR))) {
+		ERROR("QSPI Error: Write in progress\n");
+		return -1;
+	}
+
+	bank_count = CAD_QSPI_BANK_ADDR(offset + size - 1) -
+			CAD_QSPI_BANK_ADDR(offset) + 1;
+	bank_addr = offset & CAD_QSPI_BANK_ADDR_MSK;
+	bank_offset = offset & (CAD_QSPI_BANK_SIZE - 1);
+
+	write_data = buffer;
+
+	copy_len = LESS(size, CAD_QSPI_BANK_SIZE - bank_offset);
+
+	for (i = 0; i < bank_count; ++i) {
+		status = cad_qspi_device_bank_select(
+				CAD_QSPI_BANK_ADDR(bank_addr));
+		if (status != 0)
+			break;
+
+		status = cad_qspi_write_bank(bank_offset, write_data,
+						copy_len);
+		if (status != 0)
+			break;
+
+		bank_addr += CAD_QSPI_BANK_SIZE;
+		write_data += copy_len;
+		size -= copy_len;
+		bank_offset = 0;
+
+		copy_len = LESS(size, CAD_QSPI_BANK_SIZE);
+	}
+	return status;
+}
+
+int cad_qspi_update(void *Buffer, uint32_t offset, uint32_t size)
+{
+	int status = 0;
+
+	status = cad_qspi_erase(offset, size);
+	if (status != 0)
+		return status;
+
+	return cad_qspi_write(Buffer, offset, size);
+}
+
+void cad_qspi_reset(void)
+{
+	cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_RESET_EN, 0);
+	cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_RESET_MEM, 0);
+}
+
diff --git a/plat/intel/soc/stratix10/drivers/qspi/cadence_qspi.h b/plat/intel/soc/stratix10/drivers/qspi/cadence_qspi.h
new file mode 100644
index 0000000..e419161
--- /dev/null
+++ b/plat/intel/soc/stratix10/drivers/qspi/cadence_qspi.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CAD_QSPI_H__
+#define __CAD_QSPI_H__
+
+#define CAD_QSPI_MICRON_N25Q_SUPPORT		1
+
+#define CAD_QSPI_OFFSET				0xff8d2000
+
+#define CAD_INVALID				-1
+#define CAD_QSPI_ERROR				-2
+
+#define CAD_QSPI_ADDR_FASTREAD			0
+#define CAD_QSPI_ADDR_FASTREAD_DUAL_IO		1
+#define CAD_QSPI_ADDR_FASTREAD_QUAD_IO		2
+#define CAT_QSPI_ADDR_SINGLE_IO			0
+#define CAT_QSPI_ADDR_DUAL_IO			1
+#define CAT_QSPI_ADDR_QUAD_IO			2
+
+#define CAD_QSPI_BANK_ADDR(x)			((x) >> 24)
+#define CAD_QSPI_BANK_ADDR_MSK			0xff000000
+
+#define CAD_QSPI_COMMAND_TIMEOUT		0x10000000
+
+#define CAD_QSPI_CFG				0x0
+#define CAD_QSPI_CFG_BAUDDIV_MSK		0xff87ffff
+#define CAD_QSPI_CFG_BAUDDIV(x)			(((x) << 19) & 0x780000)
+#define CAD_QSPI_CFG_CS_MSK			~0x3c00
+#define CAD_QSPI_CFG_CS(x)			(((x) << 11))
+#define CAD_QSPI_CFG_ENABLE			(1 << 0)
+#define CAD_QSPI_CFG_ENDMA_CLR_MSK		0xffff7fff
+#define CAD_QSPI_CFG_IDLE			(1 << 31)
+#define CAD_QSPI_CFG_SELCLKPHASE_CLR_MSK	0xfffffffb
+#define CAD_QSPI_CFG_SELCLKPOL_CLR_MSK		0xfffffffd
+
+#define CAD_QSPIDATA_OFST			0xff900000
+
+#define CAD_QSPI_DELAY				0xc
+#define CAD_QSPI_DELAY_CSSOT(x)			(((x) & 0xff) << 0)
+#define CAD_QSPI_DELAY_CSEOT(x)			(((x) & 0xff) << 8)
+#define CAD_QSPI_DELAY_CSDADS(x)		(((x) & 0xff) << 16)
+#define CAD_QSPI_DELAY_CSDA(x)			(((x) & 0xff) << 24)
+
+#define CAD_QSPI_DEVSZ				0x14
+#define CAD_QSPI_DEVSZ_ADDR_BYTES(x)		((x) << 0)
+#define CAD_QSPI_DEVSZ_BYTES_PER_PAGE(x)	((x) << 4)
+#define CAD_QSPI_DEVSZ_BYTES_PER_BLOCK(x)	((x) << 16)
+
+#define CAD_QSPI_DEVWR				0x8
+#define CAD_QSPI_DEVRD				0x4
+#define CAD_QSPI_DEV_OPCODE(x)			(((x) & 0xff) << 0)
+#define CAD_QSPI_DEV_INST_TYPE(x)		(((x) & 0x03) << 8)
+#define CAD_QSPI_DEV_ADDR_TYPE(x)		(((x) & 0x03) << 12)
+#define CAD_QSPI_DEV_DATA_TYPE(x)		(((x) & 0x03) << 16)
+#define CAD_QSPI_DEV_MODE_BIT(x)		(((x) & 0x01) << 20)
+#define CAD_QSPI_DEV_DUMMY_CLK_CYCLE(x)		(((x) & 0x0f) << 24)
+
+#define CAD_QSPI_FLASHCMD			0x90
+#define CAD_QSPI_FLASHCMD_ADDR			0x94
+#define CAD_QSPI_FLASHCMD_EXECUTE		0x1
+#define CAD_QSPI_FLASHCMD_EXECUTE_STAT		0x2
+#define CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX	5
+#define CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES(x)	(((x) << 7) & 0x000f80)
+#define CAD_QSPI_FLASHCMD_OPCODE(x)		(((x) & 0xff) << 24)
+#define CAD_QSPI_FLASHCMD_ENRDDATA(x)		(((x) & 1) << 23)
+#define CAD_QSPI_FLASHCMD_NUMRDDATABYTES(x)	(((x) & 0xf) << 20)
+#define CAD_QSPI_FLASHCMD_ENCMDADDR(x)		(((x) & 1) << 19)
+#define CAD_QSPI_FLASHCMD_ENMODEBIT(x)		(((x) & 1) << 18)
+#define CAD_QSPI_FLASHCMD_NUMADDRBYTES(x)	(((x) & 0x3) << 16)
+#define CAD_QSPI_FLASHCMD_ENWRDATA(x)		(((x) & 1) << 15)
+#define CAD_QSPI_FLASHCMD_NUMWRDATABYTES(x)	(((x) & 0x7) << 12)
+#define CAD_QSPI_FLASHCMD_NUMDUMMYBYTES(x)	(((x) & 0x1f) << 7)
+#define CAD_QSPI_FLASHCMD_RDDATA0		0xa0
+#define CAD_QSPI_FLASHCMD_RDDATA1		0xa4
+#define CAD_QSPI_FLASHCMD_WRDATA0		0xa8
+#define CAD_QSPI_FLASHCMD_WRDATA1		0xac
+
+#define CAD_QSPI_RDDATACAP			0x10
+#define CAD_QSPI_RDDATACAP_BYP(x)		(((x) & 1) << 0)
+#define CAD_QSPI_RDDATACAP_DELAY(x)		(((x) & 0xf) << 1)
+
+#define CAD_QSPI_REMAPADDR			0x24
+#define CAD_QSPI_REMAPADDR_VALUE_SET(x)		(((x) & 0xffffffff) << 0)
+
+#define CAD_QSPI_SRAMPART			0x18
+#define CAD_QSPI_SRAMFILL			0x2c
+#define CAD_QSPI_SRAMPART_ADDR(x)		(((x) >> 0) & 0x3ff)
+#define CAD_QSPI_SRAM_FIFO_ENTRY_COUNT		(512 / sizeof(uint32_t))
+#define CAD_QSPI_SRAMFILL_INDWRPART(x)		(((x) >> 16) & 0x00ffff)
+#define CAD_QSPI_SRAMFILL_INDRDPART(x)		(((x) >> 0) & 0x00ffff)
+
+#define CAD_QSPI_SELCLKPHASE(x)			(((x) & 1) << 2)
+#define CAD_QSPI_SELCLKPOL(x)			(((x) & 1) << 1)
+
+#define CAD_QSPI_STIG_FLAGSR_PROGRAMREADY(x)	(((x) >> 7) & 1)
+#define CAD_QSPI_STIG_FLAGSR_ERASEREADY(x)	(((x) >> 7) & 1)
+#define CAD_QSPI_STIG_FLAGSR_ERASEERROR(x)	(((x) >> 5) & 1)
+#define CAD_QSPI_STIG_FLAGSR_PROGRAMERROR(x)	(((x) >> 4) & 1)
+#define CAD_QSPI_STIG_OPCODE_CLFSR		0x50
+#define CAD_QSPI_STIG_OPCODE_RDID		0x9f
+#define CAD_QSPI_STIG_OPCODE_WRDIS		0x4
+#define CAD_QSPI_STIG_OPCODE_WREN		0x6
+#define CAD_QSPI_STIG_OPCODE_SUBSEC_ERASE	0x20
+#define CAD_QSPI_STIG_OPCODE_SEC_ERASE		0xd8
+#define CAD_QSPI_STIG_OPCODE_WREN_EXT_REG	0xc5
+#define CAD_QSPI_STIG_OPCODE_DIE_ERASE		0xc4
+#define CAD_QSPI_STIG_OPCODE_BULK_ERASE		0xc7
+#define CAD_QSPI_STIG_OPCODE_RDSR		0x5
+#define CAD_QSPI_STIG_OPCODE_RDFLGSR		0x70
+#define CAD_QSPI_STIG_OPCODE_RESET_EN		0x66
+#define CAD_QSPI_STIG_OPCODE_RESET_MEM		0x99
+#define CAD_QSPI_STIG_RDID_CAPACITYID(x)	(((x) >> 16) & 0xff)
+#define CAD_QSPI_STIG_SR_BUSY(x)		(((x) >> 0) & 1)
+
+
+#define CAD_QSPI_INST_SINGLE			0
+#define CAD_QSPI_INST_DUAL			1
+#define CAD_QSPI_INST_QUAD			2
+
+#define CAD_QSPI_INDRDSTADDR			0x68
+#define CAD_QSPI_INDRDCNT			0x6c
+#define CAD_QSPI_INDRD				0x60
+#define CAD_QSPI_INDRD_RD_STAT(x)		(((x) >> 2) & 1)
+#define CAD_QSPI_INDRD_START			1
+#define CAD_QSPI_INDRD_IND_OPS_DONE		0x20
+
+#define CAD_QSPI_INDWR				0x70
+#define CAD_QSPI_INDWR_RDSTAT(x)		(((x) >> 2) & 1)
+#define CAD_QSPI_INDWRSTADDR			0x78
+#define CAD_QSPI_INDWRCNT			0x7c
+#define CAD_QSPI_INDWR				0x70
+#define CAD_QSPI_INDWR_START			0x1
+#define CAD_QSPI_INDWR_INDDONE			0x20
+
+#define CAD_QSPI_INT_STATUS_ALL			0x0000ffff
+
+#define CAD_QSPI_N25Q_DIE_SIZE			0x02000000
+#define CAD_QSPI_BANK_SIZE			0x01000000
+#define CAD_QSPI_PAGE_SIZE			0x00000100
+
+#define CAD_QSPI_IRQMSK				0x44
+
+#define CAD_QSPI_SUBSECTOR_SIZE			0x1000
+
+#define S10_QSPI_ADDR_BYTES			2
+#define S10_QSPI_BYTES_PER_DEV			256
+#define S10_BYTES_PER_BLOCK			16
+
+#define QSPI_FAST_READ				0xb
+
+// QSPI CONFIGURATIONS
+
+#define QSPI_CONFIG_CPOL			1
+#define QSPI_CONFIG_CPHA			1
+
+#define QSPI_CONFIG_CSSOT			0x14
+#define QSPI_CONFIG_CSEOT			0x14
+#define QSPI_CONFIG_CSDADS			0xff
+#define QSPI_CONFIG_CSDA			0xc8
+
+int cad_qspi_init(uint32_t desired_clk_freq, uint32_t clk_phase,
+	uint32_t clk_pol, uint32_t csda, uint32_t csdads,
+	uint32_t cseot, uint32_t cssot, uint32_t rddatacap);
+void cad_qspi_set_chip_select(int cs);
+int cad_qspi_erase(uint32_t offset, uint32_t size);
+int cad_qspi_write(void *buffer, uint32_t offset, uint32_t size);
+int cad_qspi_read(void *buffer, uint32_t offset, uint32_t size);
+int cad_qspi_update(void *buffer, uint32_t offset, uint32_t size);
+
+#endif
+
diff --git a/plat/intel/soc/stratix10/plat_storage.c b/plat/intel/soc/stratix10/plat_storage.c
index cedcf1e..f5fd871 100644
--- a/plat/intel/soc/stratix10/plat_storage.c
+++ b/plat/intel/soc/stratix10/plat_storage.c
@@ -21,17 +21,21 @@
 #include <lib/utils.h>
 #include <common/tbbr/tbbr_img_def.h>
 #include "platform_def.h"
+#include "aarch64/stratix10_private.h"
 
-#define STRATIX10_FIP_BASE	(0)
-#define STRATIX10_FIP_MAX_SIZE	(0x1000000)
-#define STRATIX10_MMC_DATA_BASE	(0xffe3c000)
-#define STRATIX10_MMC_DATA_SIZE	(0x2000)
+#define STRATIX10_FIP_BASE		(0)
+#define STRATIX10_FIP_MAX_SIZE		(0x1000000)
+#define STRATIX10_MMC_DATA_BASE		(0xffe3c000)
+#define STRATIX10_MMC_DATA_SIZE		(0x2000)
+#define STRATIX10_QSPI_DATA_BASE	(0x3C00000)
+#define STRATIX10_QSPI_DATA_SIZE	(0x1000000)
 
-static const io_dev_connector_t *mmc_dev_con;
+
 static const io_dev_connector_t *fip_dev_con;
+static const io_dev_connector_t *boot_dev_con;
 
 static uintptr_t fip_dev_handle;
-static uintptr_t mmc_dev_handle;
+static uintptr_t boot_dev_handle;
 
 static const io_uuid_spec_t bl2_uuid_spec = {
 	.uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2,
@@ -46,47 +50,35 @@
 };
 
 uintptr_t a2_lba_offset;
+const char a2[] = {0xa2, 0x0};
 
 static const io_block_spec_t gpt_block_spec = {
 	.offset = 0,
 	.length = MMC_BLOCK_SIZE
 };
 
-static int check_mmc(const uintptr_t spec);
 static int check_fip(const uintptr_t spec);
+static int check_dev(const uintptr_t spec);
 
-static io_block_spec_t mmc_fip_spec = {
+static io_block_dev_spec_t boot_dev_spec;
+static int (*register_io_dev)(const io_dev_connector_t **);
+
+static io_block_spec_t fip_spec = {
 	.offset		= STRATIX10_FIP_BASE,
 	.length		= STRATIX10_FIP_MAX_SIZE,
 };
 
-const char a2[] = {0xa2, 0x0};
-
-static const io_block_dev_spec_t mmc_dev_spec = {
-	.buffer	= {
-		.offset = STRATIX10_MMC_DATA_BASE,
-		.length = MMC_BLOCK_SIZE,
-	},
-
-	.ops	= {
-		.read	= mmc_read_blocks,
-		.write	= mmc_write_blocks,
-	},
-
-	.block_size = MMC_BLOCK_SIZE,
-};
-
 struct plat_io_policy {
-	uintptr_t	*dev_handle;
-	uintptr_t	image_spec;
-	int		(*check)(const uintptr_t spec);
+	uintptr_t       *dev_handle;
+	uintptr_t       image_spec;
+	int             (*check)(const uintptr_t spec);
 };
 
 static const struct plat_io_policy policies[] = {
 	[FIP_IMAGE_ID] = {
-		&mmc_dev_handle,
-		(uintptr_t)&mmc_fip_spec,
-		check_mmc
+		&boot_dev_handle,
+		(uintptr_t)&fip_spec,
+		check_dev
 	},
 	[BL2_IMAGE_ID] = {
 	  &fip_dev_handle,
@@ -104,20 +96,20 @@
 		check_fip
 	},
 	[GPT_IMAGE_ID] = {
-		&mmc_dev_handle,
+		&boot_dev_handle,
 		(uintptr_t) &gpt_block_spec,
-		check_mmc
+		check_dev
 	},
 };
 
-static int check_mmc(const uintptr_t spec)
+static int check_dev(const uintptr_t spec)
 {
 	int result;
 	uintptr_t local_handle;
 
-	result = io_dev_init(mmc_dev_handle, (uintptr_t)NULL);
+	result = io_dev_init(boot_dev_handle, (uintptr_t)NULL);
 	if (result == 0) {
-		result = io_open(mmc_dev_handle, spec, &local_handle);
+		result = io_open(boot_dev_handle, spec, &local_handle);
 		if (result == 0)
 			io_close(local_handle);
 	}
@@ -138,26 +130,48 @@
 	return result;
 }
 
-void stratix10_io_setup(void)
+void stratix10_io_setup(int boot_source)
 {
 	int result;
 
+	switch (boot_source) {
+	case BOOT_SOURCE_SDMMC:
+		register_io_dev = &register_io_dev_block;
+		boot_dev_spec.buffer.offset	= STRATIX10_MMC_DATA_BASE;
+		boot_dev_spec.buffer.length	= MMC_BLOCK_SIZE;
+		boot_dev_spec.ops.read		= mmc_read_blocks;
+		boot_dev_spec.ops.write		= mmc_write_blocks;
+		boot_dev_spec.block_size	= MMC_BLOCK_SIZE;
+		break;
+
+	case BOOT_SOURCE_QSPI:
+		register_io_dev = &register_io_dev_memmap;
+		fip_spec.offset = fip_spec.offset + STRATIX10_QSPI_DATA_BASE;
+		break;
+
-	result = register_io_dev_block(&mmc_dev_con);
+	default:
+		ERROR("Unsupported boot source\n");
+		panic();
+		break;
+	}
+
+	result = (*register_io_dev)(&boot_dev_con);
 	assert(result == 0);
 
 	result = register_io_dev_fip(&fip_dev_con);
 	assert(result == 0);
 
-	result = io_dev_open(mmc_dev_con, (uintptr_t)&mmc_dev_spec,
-			&mmc_dev_handle);
+	result = io_dev_open(boot_dev_con, (uintptr_t)&boot_dev_spec,
+			&boot_dev_handle);
 	assert(result == 0);
 
 	result = io_dev_open(fip_dev_con, (uintptr_t)NULL, &fip_dev_handle);
 	assert(result == 0);
 
-	partition_init(GPT_IMAGE_ID);
-
-	mmc_fip_spec.offset = get_partition_entry(a2)->start;
+	if (boot_source == BOOT_SOURCE_SDMMC) {
+		partition_init(GPT_IMAGE_ID);
+		fip_spec.offset = get_partition_entry(a2)->start;
+	}
 
 	(void)result;
 }
diff --git a/plat/intel/soc/stratix10/platform.mk b/plat/intel/soc/stratix10/platform.mk
index debdea1..1f06fbd 100644
--- a/plat/intel/soc/stratix10/platform.mk
+++ b/plat/intel/soc/stratix10/platform.mk
@@ -31,7 +31,7 @@
 		drivers/io/io_block.c					\
 		drivers/io/io_fip.c					\
 		drivers/gpio/gpio.c					\
-		drivers/io/io_memmap.c					\
+		drivers/intel/soc/stratix10/io/s10_memmap_qspi.c	\
 		plat/intel/soc/stratix10/bl2_plat_setup.c		\
 		plat/intel/soc/stratix10/plat_storage.c			\
                 plat/intel/soc/stratix10/bl2_plat_mem_params_desc.c	\
@@ -44,7 +44,9 @@
 		lib/cpus/aarch64/cortex_a53.S				\
 		plat/intel/soc/stratix10/stratix10_image_load.c		\
 		plat/intel/soc/stratix10/soc/s10_system_manager.c	\
-                common/desc_image_load.c
+		common/desc_image_load.c				\
+		plat/intel/soc/stratix10/soc/s10_mailbox.c		\
+		plat/intel/soc/stratix10/drivers/qspi/cadence_qspi.c
 
 BL31_SOURCES	+=	drivers/arm/cci/cci.c				\
 		lib/cpus/aarch64/cortex_a53.S				\
diff --git a/plat/socionext/synquacer/drivers/scp/sq_scmi.c b/plat/socionext/synquacer/drivers/scp/sq_scmi.c
new file mode 100644
index 0000000..e2013cc
--- /dev/null
+++ b/plat/socionext/synquacer/drivers/scp/sq_scmi.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/css/css_mhu_doorbell.h>
+#include <drivers/arm/css/css_scp.h>
+#include <drivers/arm/css/scmi.h>
+#include <plat/arm/css/common/css_pm.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+
+#include <scmi_sq.h>
+#include <sq_common.h>
+
+/*
+ * This file implements the SCP helper functions using SCMI protocol.
+ */
+
+DEFINE_BAKERY_LOCK(sq_scmi_lock);
+#define SQ_SCMI_LOCK_GET_INSTANCE	(&sq_scmi_lock)
+
+#define SQ_SCMI_PAYLOAD_BASE		PLAT_SQ_SCP_COM_SHARED_MEM_BASE
+#define MHU_CPU_INTR_S_SET_OFFSET	0x308
+
+const uint32_t sq_core_pos_to_scmi_dmn_id_map[PLATFORM_CORE_COUNT] = {
+	0,   1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11,
+	12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23
+};
+
+static scmi_channel_plat_info_t sq_scmi_plat_info = {
+		.scmi_mbx_mem = SQ_SCMI_PAYLOAD_BASE,
+		.db_reg_addr = PLAT_SQ_MHU_BASE + MHU_CPU_INTR_S_SET_OFFSET,
+		.db_preserve_mask = 0xfffffffe,
+		.db_modify_mask = 0x1,
+		.ring_doorbell = &mhu_ring_doorbell,
+};
+
+/*
+ * SCMI power state parameter bit field encoding for SynQuacer platform.
+ *
+ * 31  20 19       16 15      12 11       8 7        4 3         0
+ * +-------------------------------------------------------------+
+ * | SBZ | Max level |  Level 3 |  Level 2 |  Level 1 |  Level 0 |
+ * |     |           |   state  |   state  |   state  |   state  |
+ * +-------------------------------------------------------------+
+ *
+ * `Max level` encodes the highest level that has a valid power state
+ * encoded in the power state.
+ */
+#define SCMI_PWR_STATE_MAX_PWR_LVL_SHIFT	16
+#define SCMI_PWR_STATE_MAX_PWR_LVL_WIDTH	4
+#define SCMI_PWR_STATE_MAX_PWR_LVL_MASK		\
+				((1 << SCMI_PWR_STATE_MAX_PWR_LVL_WIDTH) - 1)
+#define SCMI_SET_PWR_STATE_MAX_PWR_LVL(_power_state, _max_level)		\
+		(_power_state) |= ((_max_level) & SCMI_PWR_STATE_MAX_PWR_LVL_MASK)\
+				<< SCMI_PWR_STATE_MAX_PWR_LVL_SHIFT
+#define SCMI_GET_PWR_STATE_MAX_PWR_LVL(_power_state)		\
+		(((_power_state) >> SCMI_PWR_STATE_MAX_PWR_LVL_SHIFT)	\
+				& SCMI_PWR_STATE_MAX_PWR_LVL_MASK)
+
+#define SCMI_PWR_STATE_LVL_WIDTH		4
+#define SCMI_PWR_STATE_LVL_MASK			\
+				((1 << SCMI_PWR_STATE_LVL_WIDTH) - 1)
+#define SCMI_SET_PWR_STATE_LVL(_power_state, _level, _level_state)		\
+		(_power_state) |= ((_level_state) & SCMI_PWR_STATE_LVL_MASK)	\
+				<< (SCMI_PWR_STATE_LVL_WIDTH * (_level))
+#define SCMI_GET_PWR_STATE_LVL(_power_state, _level)		\
+		(((_power_state) >> (SCMI_PWR_STATE_LVL_WIDTH * (_level))) &	\
+				SCMI_PWR_STATE_LVL_MASK)
+
+/*
+ * The SCMI power state enumeration for a power domain level
+ */
+typedef enum {
+	scmi_power_state_off = 0,
+	scmi_power_state_on = 1,
+	scmi_power_state_sleep = 2,
+} scmi_power_state_t;
+
+/*
+ * The global handle for invoking the SCMI driver APIs after the driver
+ * has been initialized.
+ */
+static void *sq_scmi_handle;
+
+/* The SCMI channel global object */
+static scmi_channel_t channel;
+
+/*
+ * Helper function to turn off a CPU power domain and
+ * its parent power domains if applicable.
+ */
+void sq_scmi_off(const struct psci_power_state *target_state)
+{
+	int lvl = 0, ret;
+	uint32_t scmi_pwr_state = 0;
+
+	/* At-least the CPU level should be specified to be OFF */
+	assert(target_state->pwr_domain_state[SQ_PWR_LVL0] ==
+							SQ_LOCAL_STATE_OFF);
+
+	for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
+		if (target_state->pwr_domain_state[lvl] == SQ_LOCAL_STATE_RUN)
+			break;
+
+		assert(target_state->pwr_domain_state[lvl] ==
+							SQ_LOCAL_STATE_OFF);
+		SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, lvl,
+				scmi_power_state_off);
+	}
+
+	SCMI_SET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state, lvl - 1);
+
+	ret = scmi_pwr_state_set(sq_scmi_handle,
+		sq_core_pos_to_scmi_dmn_id_map[plat_my_core_pos()],
+		scmi_pwr_state);
+
+	if (ret != SCMI_E_QUEUED && ret != SCMI_E_SUCCESS) {
+		ERROR("SCMI set power state command return 0x%x unexpected\n",
+				ret);
+		panic();
+	}
+}
+
+/*
+ * Helper function to turn ON a CPU power domain and
+ *its parent power domains if applicable.
+ */
+void sq_scmi_on(u_register_t mpidr)
+{
+	int lvl = 0, ret, core_pos;
+	uint32_t scmi_pwr_state = 0;
+
+	for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
+		SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, lvl,
+				scmi_power_state_on);
+
+	SCMI_SET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state, lvl - 1);
+
+	core_pos = plat_core_pos_by_mpidr(mpidr);
+	assert(core_pos >= 0 && core_pos < PLATFORM_CORE_COUNT);
+
+	ret = scmi_pwr_state_set(sq_scmi_handle,
+		sq_core_pos_to_scmi_dmn_id_map[core_pos],
+		scmi_pwr_state);
+
+	if (ret != SCMI_E_QUEUED && ret != SCMI_E_SUCCESS) {
+		ERROR("SCMI set power state command return 0x%x unexpected\n",
+				ret);
+		panic();
+	}
+}
+
+void __dead2 sq_scmi_system_off(int state)
+{
+	int ret;
+
+	/*
+	 * Disable GIC CPU interface to prevent pending interrupt from waking
+	 * up the AP from WFI.
+	 */
+	sq_gic_cpuif_disable();
+
+	/*
+	 * Issue SCMI command. First issue a graceful
+	 * request and if that fails force the request.
+	 */
+	ret = scmi_sys_pwr_state_set(sq_scmi_handle,
+			SCMI_SYS_PWR_FORCEFUL_REQ,
+			state);
+
+	if (ret != SCMI_E_SUCCESS) {
+		ERROR("SCMI system power state set 0x%x returns unexpected 0x%x\n",
+			state, ret);
+		panic();
+	}
+	wfi();
+	ERROR("SCMI set power state: operation not handled.\n");
+	panic();
+}
+
+/*
+ * Helper function to reset the system via SCMI.
+ */
+void __dead2 sq_scmi_sys_reboot(void)
+{
+	sq_scmi_system_off(SCMI_SYS_PWR_COLD_RESET);
+}
+
+static int scmi_ap_core_init(scmi_channel_t *ch)
+{
+#if PROGRAMMABLE_RESET_ADDRESS
+	uint32_t version;
+	int ret;
+
+	ret = scmi_proto_version(ch, SCMI_AP_CORE_PROTO_ID, &version);
+	if (ret != SCMI_E_SUCCESS) {
+		WARN("SCMI AP core protocol version message failed\n");
+		return -1;
+	}
+
+	if (!is_scmi_version_compatible(SCMI_AP_CORE_PROTO_VER, version)) {
+		WARN("SCMI AP core protocol version 0x%x incompatible with driver version 0x%x\n",
+						version, SCMI_AP_CORE_PROTO_VER);
+		return -1;
+	}
+	INFO("SCMI AP core protocol version 0x%x detected\n", version);
+#endif
+	return 0;
+}
+
+void __init plat_sq_pwrc_setup(void)
+{
+	channel.info = &sq_scmi_plat_info;
+	channel.lock = SQ_SCMI_LOCK_GET_INSTANCE;
+	sq_scmi_handle = scmi_init(&channel);
+	if (sq_scmi_handle == NULL) {
+		ERROR("SCMI Initialization failed\n");
+		panic();
+	}
+	if (scmi_ap_core_init(&channel) < 0) {
+		ERROR("SCMI AP core protocol initialization failed\n");
+		panic();
+	}
+}
+
+uint32_t sq_scmi_get_draminfo(struct draminfo *info)
+{
+	scmi_get_draminfo(sq_scmi_handle, info);
+
+	return 0;
+}
diff --git a/plat/socionext/synquacer/drivers/scp/sq_scp.c b/plat/socionext/synquacer/drivers/scp/sq_scp.c
new file mode 100644
index 0000000..e494022
--- /dev/null
+++ b/plat/socionext/synquacer/drivers/scp/sq_scp.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <sq_common.h>
+#include "sq_scpi.h"
+
+/*
+ * Helper function to get dram information from SCP.
+ */
+uint32_t sq_scp_get_draminfo(struct draminfo *info)
+{
+#if SQ_USE_SCMI_DRIVER
+	sq_scmi_get_draminfo(info);
+#else
+	scpi_get_draminfo(info);
+#endif
+	return 0;
+}
diff --git a/plat/socionext/synquacer/drivers/scpi/sq_scpi.h b/plat/socionext/synquacer/drivers/scpi/sq_scpi.h
index 38cc19e..eb6ce5c 100644
--- a/plat/socionext/synquacer/drivers/scpi/sq_scpi.h
+++ b/plat/socionext/synquacer/drivers/scpi/sq_scpi.h
@@ -78,5 +78,6 @@
 					scpi_power_state_t cluster_state,
 					scpi_power_state_t css_state);
 uint32_t scpi_sys_power_state(scpi_system_state_t system_state);
+uint32_t scpi_get_draminfo(struct draminfo *info);
 
 #endif /* SQ_SCPI_H */
diff --git a/plat/socionext/synquacer/include/platform_def.h b/plat/socionext/synquacer/include/platform_def.h
index 0cec81b..7e54b39 100644
--- a/plat/socionext/synquacer/include/platform_def.h
+++ b/plat/socionext/synquacer/include/platform_def.h
@@ -16,6 +16,16 @@
 #define PLATFORM_CORE_COUNT		(PLAT_CLUSTER_COUNT *	\
 					 PLAT_MAX_CORES_PER_CLUSTER)
 
+/* Macros to read the SQ power domain state */
+#define SQ_PWR_LVL0		MPIDR_AFFLVL0
+#define SQ_PWR_LVL1		MPIDR_AFFLVL1
+#define SQ_PWR_LVL2		MPIDR_AFFLVL2
+
+#define SQ_CORE_PWR_STATE(state)	(state)->pwr_domain_state[SQ_PWR_LVL0]
+#define SQ_CLUSTER_PWR_STATE(state)	(state)->pwr_domain_state[SQ_PWR_LVL1]
+#define SQ_SYSTEM_PWR_STATE(state)	((PLAT_MAX_PWR_LVL > SQ_PWR_LVL1) ?\
+				(state)->pwr_domain_state[SQ_PWR_LVL2] : 0)
+
 #define PLAT_MAX_PWR_LVL		U(1)
 #define PLAT_MAX_RET_STATE		U(1)
 #define PLAT_MAX_OFF_STATE		U(2)
@@ -70,6 +80,7 @@
 #define DRAMINFO_BASE			0x2E00FFC0
 
 #define PLAT_SQ_MHU_BASE		0x45000000
+#define PLAT_MHUV2_BASE			0xFFFFFFFF /* MHUV2 is not supported */
 
 #define PLAT_SQ_SCP_COM_SHARED_MEM_BASE		0x45400000
 #define SCPI_CMD_GET_DRAMINFO			0x1
diff --git a/plat/socionext/synquacer/include/sq_common.h b/plat/socionext/synquacer/include/sq_common.h
index abd9090..a985822 100644
--- a/plat/socionext/synquacer/include/sq_common.h
+++ b/plat/socionext/synquacer/include/sq_common.h
@@ -9,6 +9,7 @@
 
 #include <stdint.h>
 
+#include <lib/psci/psci.h>
 #include <lib/xlat_tables/xlat_tables_v2.h>
 
 struct draminfo {
@@ -22,7 +23,7 @@
 	uint64_t	size3;
 };
 
-uint32_t scpi_get_draminfo(struct draminfo *info);
+uint32_t sq_scp_get_draminfo(struct draminfo *info);
 
 void plat_sq_pwrc_setup(void);
 
@@ -41,4 +42,12 @@
 void sq_mmap_setup(uintptr_t total_base, size_t total_size,
 		   const struct mmap_region *mmap);
 
+/* SCMI API for power management by SCP */
+void sq_scmi_off(const struct psci_power_state *target_state);
+void sq_scmi_on(u_register_t mpidr);
+void __dead2 sq_scmi_sys_reboot(void);
+void __dead2 sq_scmi_system_off(int state);
+/* SCMI API for vendor specific protocol */
+uint32_t sq_scmi_get_draminfo(struct draminfo *info);
+
 #endif /* SQ_COMMON_H */
diff --git a/plat/socionext/synquacer/platform.mk b/plat/socionext/synquacer/platform.mk
index 53c39a0..f5e72cb 100644
--- a/plat/socionext/synquacer/platform.mk
+++ b/plat/socionext/synquacer/platform.mk
@@ -10,9 +10,10 @@
 override USE_COHERENT_MEM		:= 1
 override SEPARATE_CODE_AND_RODATA	:= 1
 override ENABLE_SVE_FOR_NS		:= 0
-
 # Enable workarounds for selected Cortex-A53 erratas.
 ERRATA_A53_855873		:= 1
+# Enable SCMI support
+SQ_USE_SCMI_DRIVER		?= 0
 
 # Libraries
 include lib/xlat_tables_v2/xlat_tables.mk
@@ -20,7 +21,9 @@
 PLAT_PATH		:=	plat/socionext/synquacer
 PLAT_INCLUDES		:=	-I$(PLAT_PATH)/include		\
 				-I$(PLAT_PATH)/drivers/scpi	\
-				-I$(PLAT_PATH)/drivers/mhu
+				-I$(PLAT_PATH)/drivers/mhu \
+				-Idrivers/arm/css/scmi \
+				-Idrivers/arm/css/scmi/vendor
 
 PLAT_BL_COMMON_SOURCES	+=	$(PLAT_PATH)/sq_helpers.S		\
 				drivers/arm/pl011/aarch64/pl011_console.S \
@@ -40,12 +43,27 @@
 				$(PLAT_PATH)/sq_topology.c		\
 				$(PLAT_PATH)/sq_psci.c			\
 				$(PLAT_PATH)/sq_gicv3.c			\
-				$(PLAT_PATH)/sq_xlat_setup.c		\
-				$(PLAT_PATH)/drivers/scpi/sq_scpi.c	\
+				$(PLAT_PATH)/sq_xlat_setup.c	\
+				$(PLAT_PATH)/drivers/scp/sq_scp.c
+
+ifeq (${SQ_USE_SCMI_DRIVER},0)
+BL31_SOURCES		+=	$(PLAT_PATH)/drivers/scpi/sq_scpi.c	\
 				$(PLAT_PATH)/drivers/mhu/sq_mhu.c
+else
+BL31_SOURCES		+=	$(PLAT_PATH)/drivers/scp/sq_scmi.c		\
+				drivers/arm/css/scmi/scmi_common.c		\
+				drivers/arm/css/scmi/scmi_pwr_dmn_proto.c	\
+				drivers/arm/css/scmi/scmi_sys_pwr_proto.c	\
+				drivers/arm/css/scmi/vendor/scmi_sq.c	\
+				drivers/arm/css/mhu/css_mhu_doorbell.c
+endif
 
 ifeq (${ENABLE_SPM},1)
 $(eval $(call add_define,PLAT_EXTRA_LD_SCRIPT))
 
 BL31_SOURCES		+=	$(PLAT_PATH)/sq_spm.c
 endif
+
+ifeq (${SQ_USE_SCMI_DRIVER},1)
+$(eval $(call add_define,SQ_USE_SCMI_DRIVER))
+endif
diff --git a/plat/socionext/synquacer/sq_bl31_setup.c b/plat/socionext/synquacer/sq_bl31_setup.c
index fef84ef..c78fe91 100644
--- a/plat/socionext/synquacer/sq_bl31_setup.c
+++ b/plat/socionext/synquacer/sq_bl31_setup.c
@@ -14,7 +14,6 @@
 #include <common/debug.h>
 #include <drivers/arm/pl011.h>
 #include <lib/mmio.h>
-
 #include <sq_common.h>
 
 static console_pl011_t console;
@@ -83,7 +82,7 @@
 #ifdef SPD_opteed
 	struct draminfo di = {0};
 
-	scpi_get_draminfo(&di);
+	sq_scp_get_draminfo(&di);
 
 	/*
 	 * Check if OP-TEE has been loaded in Secure RAM allocated
@@ -154,7 +153,7 @@
 {
 	struct draminfo *di = (struct draminfo *)(unsigned long)DRAMINFO_BASE;
 
-	scpi_get_draminfo(di);
+	sq_scp_get_draminfo(di);
 }
 
 void bl31_plat_arch_setup(void)
diff --git a/plat/socionext/synquacer/sq_psci.c b/plat/socionext/synquacer/sq_psci.c
index 134224d..731b19a 100644
--- a/plat/socionext/synquacer/sq_psci.c
+++ b/plat/socionext/synquacer/sq_psci.c
@@ -19,26 +19,20 @@
 #include <sq_common.h>
 #include "sq_scpi.h"
 
-/* Macros to read the SQ power domain state */
-#define SQ_PWR_LVL0	MPIDR_AFFLVL0
-#define SQ_PWR_LVL1	MPIDR_AFFLVL1
-#define SQ_PWR_LVL2	MPIDR_AFFLVL2
-
-#define SQ_CORE_PWR_STATE(state)	(state)->pwr_domain_state[SQ_PWR_LVL0]
-#define SQ_CLUSTER_PWR_STATE(state)	(state)->pwr_domain_state[SQ_PWR_LVL1]
-#define SQ_SYSTEM_PWR_STATE(state)	((PLAT_MAX_PWR_LVL > SQ_PWR_LVL1) ?\
-				(state)->pwr_domain_state[SQ_PWR_LVL2] : 0)
-
 uintptr_t sq_sec_entrypoint;
 
 int sq_pwr_domain_on(u_register_t mpidr)
 {
+#if SQ_USE_SCMI_DRIVER
+	sq_scmi_on(mpidr);
+#else
 	/*
 	 * SCP takes care of powering up parent power domains so we
 	 * only need to care about level 0
 	 */
 	scpi_set_sq_power_state(mpidr, scpi_power_on, scpi_power_on,
 				 scpi_power_on);
+#endif
 
 	return PSCI_E_SUCCESS;
 }
@@ -70,6 +64,7 @@
 	sq_gic_cpuif_enable();
 }
 
+#if !SQ_USE_SCMI_DRIVER
 static void sq_power_down_common(const psci_power_state_t *target_state)
 {
 	uint32_t cluster_state = scpi_power_on;
@@ -97,10 +92,15 @@
 				 cluster_state,
 				 system_state);
 }
+#endif
 
 void sq_pwr_domain_off(const psci_power_state_t *target_state)
 {
+#if SQ_USE_SCMI_DRIVER
+	sq_scmi_off(target_state);
+#else
 	sq_power_down_common(target_state);
+#endif
 }
 
 void __dead2 sq_system_off(void)
@@ -135,6 +135,9 @@
 
 void __dead2 sq_system_reset(void)
 {
+#if SQ_USE_SCMI_DRIVER
+	sq_scmi_sys_reboot();
+#else
 	uint32_t response;
 
 	/* Send the system reset request to the SCP */
@@ -147,6 +150,7 @@
 	wfi();
 	ERROR("SQ System Reset: operation not handled.\n");
 	panic();
+#endif
 }
 
 void sq_cpu_standby(plat_local_state_t cpu_state)
diff --git a/readme.rst b/readme.rst
index cb8f800..1df8637 100644
--- a/readme.rst
+++ b/readme.rst
@@ -255,7 +255,7 @@
 
 Arm welcomes any feedback on TF-A. If you think you have found a security
 vulnerability, please report this using the process defined in the TF-A
-`Security Centre`_. For all other feedback, please use the
+`Security Center`_. For all other feedback, please use the
 `GitHub issue tracker`_.
 
 Arm licensees may contact Arm directly via their partner managers.
@@ -263,14 +263,14 @@
 Security advisories
 ~~~~~~~~~~~~~~~~~~~
 
-`Security Advisory TFV-1`_
-`Security Advisory TFV-2`_
-`Security Advisory TFV-3`_
-`Security Advisory TFV-4`_
-`Security Advisory TFV-5`_
-`Security Advisory TFV-6`_
-`Security Advisory TFV-7`_
-`Security Advisory TFV-8`_
+-  `Security Advisory TFV-1`_
+-  `Security Advisory TFV-2`_
+-  `Security Advisory TFV-3`_
+-  `Security Advisory TFV-4`_
+-  `Security Advisory TFV-5`_
+-  `Security Advisory TFV-6`_
+-  `Security Advisory TFV-7`_
+-  `Security Advisory TFV-8`_
 
 
 --------------
diff --git a/tools/cert_create/src/tbbr/tbb_ext.c b/tools/cert_create/src/tbbr/tbb_ext.c
index d0038a2..ee5377f 100644
--- a/tools/cert_create/src/tbbr/tbb_ext.c
+++ b/tools/cert_create/src/tbbr/tbb_ext.c
@@ -19,10 +19,6 @@
 #include "tbbr/tbb_ext.h"
 #include "tbbr/tbb_key.h"
 
-/* TODO: get these values from the command line */
-#define TRUSTED_WORLD_NVCTR_VALUE	0
-#define NORMAL_WORLD_NVCTR_VALUE	0
-
 static ext_t tbb_ext[] = {
 	[TRUSTED_FW_NVCOUNTER_EXT] = {
 		.oid = TRUSTED_FW_NVCOUNTER_OID,
diff --git a/tools/fiptool/tbbr_config.h b/tools/fiptool/tbbr_config.h
index 2d89777..1fc6cad 100644
--- a/tools/fiptool/tbbr_config.h
+++ b/tools/fiptool/tbbr_config.h
@@ -11,7 +11,6 @@
 
 #include <uuid.h>
 
-/* TODO: Update this number as required */
 #define TOC_HEADER_SERIAL_NUMBER 0x12345678
 
 typedef struct toc_entry {