Merge pull request #1396 from ruchi393/multiple_fip

Extend FIP io driver to support multiple FIP devices
diff --git a/Makefile b/Makefile
index f230f4a..180c558 100644
--- a/Makefile
+++ b/Makefile
@@ -151,6 +151,7 @@
 TF_CFLAGS_aarch64	=	-march=armv8-a
 endif
 
+TF_CFLAGS_aarch32	+=	-mno-unaligned-access
 TF_CFLAGS_aarch64	+=	-mgeneral-regs-only -mstrict-align
 
 ASFLAGS_aarch32		=	$(march32-directive)
diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S
index 0d1077c..58e8afb 100644
--- a/bl31/aarch64/bl31_entrypoint.S
+++ b/bl31/aarch64/bl31_entrypoint.S
@@ -170,15 +170,12 @@
 	 * enter coherency (as CPUs already are); and there's no reason to have
 	 * caches disabled either.
 	 */
-	mov	x0, #DISABLE_DCACHE
-	bl	bl31_plat_enable_mmu
-
 #if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
-	mrs	x0, sctlr_el3
-	orr	x0, x0, #SCTLR_C_BIT
-	msr	sctlr_el3, x0
-	isb
+	mov	x0, xzr
+#else
+	mov	x0, #DISABLE_DCACHE
 #endif
+	bl	bl31_plat_enable_mmu
 
 	bl	psci_warmboot_entrypoint
 
diff --git a/bl32/sp_min/aarch32/entrypoint.S b/bl32/sp_min/aarch32/entrypoint.S
index 87ef3f3..d6853cc 100644
--- a/bl32/sp_min/aarch32/entrypoint.S
+++ b/bl32/sp_min/aarch32/entrypoint.S
@@ -298,20 +298,17 @@
 	 * enter coherency (as CPUs already are); and there's no reason to have
 	 * caches disabled either.
 	 */
+#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
+	mov	r0, #0
+#else
 	mov	r0, #DISABLE_DCACHE
+#endif
 	bl	bl32_plat_enable_mmu
 
 #if SP_MIN_WITH_SECURE_FIQ
 	route_fiq_to_sp_min r0
 #endif
 
-#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
-	ldcopr	r0, SCTLR
-	orr	r0, r0, #SCTLR_C_BIT
-	stcopr	r0, SCTLR
-	isb
-#endif
-
 	bl	sp_min_warm_boot
 	bl	smc_get_next_ctx
 	/* r0 points to `smc_ctx_t` */
diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S
index 489183c..5d9da85 100644
--- a/bl32/tsp/aarch64/tsp_entrypoint.S
+++ b/bl32/tsp/aarch64/tsp_entrypoint.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -247,41 +247,13 @@
 	bl	plat_set_my_stack
 
 	/* --------------------------------------------
-	 * Enable the MMU with the DCache disabled. It
-	 * is safe to use stacks allocated in normal
-	 * memory as a result. All memory accesses are
-	 * marked nGnRnE when the MMU is disabled. So
-	 * all the stack writes will make it to memory.
-	 * All memory accesses are marked Non-cacheable
-	 * when the MMU is enabled but D$ is disabled.
-	 * So used stack memory is guaranteed to be
-	 * visible immediately after the MMU is enabled
-	 * Enabling the DCache at the same time as the
-	 * MMU can lead to speculatively fetched and
-	 * possibly stale stack memory being read from
-	 * other caches. This can lead to coherency
-	 * issues.
+	 * Enable MMU and D-caches together.
 	 * --------------------------------------------
 	 */
-	mov	x0, #DISABLE_DCACHE
+	mov	x0, #0
 	bl	bl32_plat_enable_mmu
 
 	/* ---------------------------------------------
-	 * Enable the Data cache now that the MMU has
-	 * been enabled. The stack has been unwound. It
-	 * will be written first before being read. This
-	 * will invalidate any stale cache lines resi-
-	 * -dent in other caches. We assume that
-	 * interconnect coherency has been enabled for
-	 * this cluster by EL3 firmware.
-	 * ---------------------------------------------
-	 */
-	mrs	x0, sctlr_el1
-	orr	x0, x0, #SCTLR_C_BIT
-	msr	sctlr_el1, x0
-	isb
-
-	/* ---------------------------------------------
 	 * Enter C runtime to perform any remaining
 	 * book keeping
 	 * ---------------------------------------------
diff --git a/docs/firmware-design.rst b/docs/firmware-design.rst
index e3500c2..8aa7622 100644
--- a/docs/firmware-design.rst
+++ b/docs/firmware-design.rst
@@ -306,6 +306,8 @@
 -  If the BL1 dynamic configuration file, ``TB_FW_CONFIG``, is available, then
    load it to the platform defined address and make it available to BL2 via
    ``arg0``.
+-  Configure the system timer and program the `CNTFRQ_EL0` for use by NS-BL1U
+   and NS-BL2U firmware update images.
 
 Firmware Update detection and execution
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/docs/plat/allwinner.rst b/docs/plat/allwinner.rst
index a7e84a3..825fded 100644
--- a/docs/plat/allwinner.rst
+++ b/docs/plat/allwinner.rst
@@ -4,9 +4,11 @@
 Trusted Firmware-A (TF-A) implements the EL3 firmware layer for Allwinner
 SoCs with ARMv8 cores. Only BL31 is used to provide proper EL3 setup and
 PSCI runtime services.
+
 U-Boot's SPL acts as a loader, loading both BL31 and BL33 (typically U-Boot).
 Loading is done from SD card, eMMC or SPI flash, also via an USB debug
 interface (FEL).
+
 BL31 lives in SRAM A2, which is documented to be accessible from secure
 world only.
 
@@ -27,3 +29,13 @@
     make CROSS_COMPILE=aarch64-linux-gnu- PLAT=sun50i_a64 DEBUG=1 bl31
 
 .. _U-Boot documentation: http://git.denx.de/?p=u-boot.git;f=board/sunxi/README.sunxi64;hb=HEAD
+
+Trusted OS dispatcher
+=====================
+
+One can boot Trusted OS(OP-TEE OS, bl32 image) along side bl31 image on Allwinner A64.
+
+In order to include the 'opteed' dispatcher in the image, pass 'SPD=opteed' on the command line
+while compiling the bl31 image and make sure the loader (SPL) loads the Trusted OS binary to
+the beginning of DRAM (0x40000000).
+
diff --git a/docs/porting-guide.rst b/docs/porting-guide.rst
index a737cf4..5462cc1 100644
--- a/docs/porting-guide.rst
+++ b/docs/porting-guide.rst
@@ -1997,6 +1997,25 @@
 (that was copied during ``bl31_early_platform_setup()``) if the image exists. It
 should return NULL otherwise.
 
+Function : bl31_plat_enable_mmu [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : uint32_t
+    Return   : void
+
+This function enables the MMU. The boot code calls this function with MMU and
+caches disabled. This function should program necessary registers to enable
+translation, and upon return, the MMU on the calling PE must be enabled.
+
+The function must honor flags passed in the first argument. These flags are
+defined by the translation library, and can be found in the file
+``include/lib/xlat_tables/xlat_mmu_helpers.h``.
+
+On DynamIQ systems, this function must not use stack while enabling MMU, which
+is how the function in xlat table library version 2 is implementated.
+
 Function : plat\_get\_syscnt\_freq2() [mandatory]
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
diff --git a/docs/user-guide.rst b/docs/user-guide.rst
index a40615d..68a74ed 100644
--- a/docs/user-guide.rst
+++ b/docs/user-guide.rst
@@ -454,6 +454,10 @@
    management operations. This option defaults to 0 and if it is enabled,
    then it implies ``WARMBOOT_ENABLE_DCACHE_EARLY`` is also enabled.
 
+   Note that, when ``HW_ASSISTED_COHERENCY`` is enabled, version 2 of
+   translation library (xlat tables v2) must be used; version 1 of translation
+   library is not supported.
+
 -  ``JUNO_AARCH32_EL3_RUNTIME``: This build flag enables you to execute EL3
    runtime software in AArch32 mode, which is required to run AArch32 on Juno.
    By default this flag is set to '0'. Enabling this flag builds BL1 and BL2 in
diff --git a/include/common/aarch32/asm_macros.S b/include/common/aarch32/asm_macros.S
index 7432222..f7d0595 100644
--- a/include/common/aarch32/asm_macros.S
+++ b/include/common/aarch32/asm_macros.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,20 @@
 #include <asm_macros_common.S>
 #include <spinlock.h>
 
+/*
+ * TLBI instruction with type specifier that implements the workaround for
+ * errata 813419 of Cortex-A57.
+ */
+#if ERRATA_A57_813419
+#define TLB_INVALIDATE(_reg, _coproc) \
+	stcopr	_reg, _coproc; \
+	dsb	ish; \
+	stcopr	_reg, _coproc
+#else
+#define TLB_INVALIDATE(_reg, _coproc) \
+	stcopr	_reg, _coproc
+#endif
+
 #define WORD_SIZE	4
 
 	/*
diff --git a/include/common/aarch64/asm_macros.S b/include/common/aarch64/asm_macros.S
index 7c8e643..5b05045 100644
--- a/include/common/aarch64/asm_macros.S
+++ b/include/common/aarch64/asm_macros.S
@@ -10,6 +10,20 @@
 #include <asm_macros_common.S>
 #include <spinlock.h>
 
+/*
+ * TLBI instruction with type specifier that implements the workaround for
+ * errata 813419 of Cortex-A57.
+ */
+#if ERRATA_A57_813419
+#define TLB_INVALIDATE(_type) \
+	tlbi	_type; \
+	dsb	ish; \
+	tlbi	_type
+#else
+#define TLB_INVALIDATE(_type) \
+	tlbi	_type
+#endif
+
 
 	.macro	func_prologue
 	stp	x29, x30, [sp, #-0x10]!
diff --git a/include/common/ep_info.h b/include/common/ep_info.h
index 3c2fe44..99a0390 100644
--- a/include/common/ep_info.h
+++ b/include/common/ep_info.h
@@ -29,33 +29,38 @@
 /* The following are used to set/get image attributes. */
 #define PARAM_EP_SECURITY_MASK		U(0x1)
 
+/* Secure or Non-secure image */
 #define GET_SECURITY_STATE(x) (x & PARAM_EP_SECURITY_MASK)
 #define SET_SECURITY_STATE(x, security) \
 			((x) = ((x) & ~PARAM_EP_SECURITY_MASK) | (security))
 
-#define EP_EE_MASK	U(0x2)
-#define EP_EE_SHIFT	1
-#define EP_EE_LITTLE	U(0x0)
-#define EP_EE_BIG	U(0x2)
-#define EP_GET_EE(x) (x & EP_EE_MASK)
-#define EP_SET_EE(x, ee) ((x) = ((x) & ~EP_EE_MASK) | (ee))
+/* Endianness of the image. */
+#define EP_EE_MASK		U(0x2)
+#define EP_EE_SHIFT		U(1)
+#define EP_EE_LITTLE		U(0x0)
+#define EP_EE_BIG		U(0x2)
+#define EP_GET_EE(x)		((x) & EP_EE_MASK)
+#define EP_SET_EE(x, ee)	((x) = ((x) & ~EP_EE_MASK) | (ee))
 
-#define EP_ST_MASK	U(0x4)
-#define EP_ST_DISABLE	U(0x0)
-#define EP_ST_ENABLE	U(0x4)
-#define EP_GET_ST(x) (x & EP_ST_MASK)
-#define EP_SET_ST(x, ee) ((x) = ((x) & ~EP_ST_MASK) | (ee))
+/* Enable or disable access to the secure timer from secure images. */
+#define EP_ST_MASK		U(0x4)
+#define EP_ST_DISABLE		U(0x0)
+#define EP_ST_ENABLE		U(0x4)
+#define EP_GET_ST(x)		((x) & EP_ST_MASK)
+#define EP_SET_ST(x, ee)	((x) = ((x) & ~EP_ST_MASK) | (ee))
 
-#define EP_EXE_MASK	U(0x8)
-#define NON_EXECUTABLE	U(0x0)
-#define EXECUTABLE	U(0x8)
-#define EP_GET_EXE(x) (x & EP_EXE_MASK)
-#define EP_SET_EXE(x, ee) ((x) = ((x) & ~EP_EXE_MASK) | (ee))
+/* Determine if an image is executable or not. */
+#define EP_EXE_MASK		U(0x8)
+#define NON_EXECUTABLE		U(0x0)
+#define EXECUTABLE		U(0x8)
+#define EP_GET_EXE(x)		((x) & EP_EXE_MASK)
+#define EP_SET_EXE(x, ee)	((x) = ((x) & ~EP_EXE_MASK) | (ee))
 
+/* Flag to indicate the first image that is executed. */
 #define EP_FIRST_EXE_MASK	U(0x10)
 #define EP_FIRST_EXE		U(0x10)
-#define EP_GET_FIRST_EXE(x) ((x) & EP_FIRST_EXE_MASK)
-#define EP_SET_FIRST_EXE(x, ee) ((x) = ((x) & ~EP_FIRST_EXE_MASK) | (ee))
+#define EP_GET_FIRST_EXE(x)	((x) & EP_FIRST_EXE_MASK)
+#define EP_SET_FIRST_EXE(x, ee)	((x) = ((x) & ~EP_FIRST_EXE_MASK) | (ee))
 
 #ifndef __ASSEMBLY__
 
diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h
index 910341a..a940b63 100644
--- a/include/lib/aarch32/arch.h
+++ b/include/lib/aarch32/arch.h
@@ -340,7 +340,7 @@
 /*
  * TTBR definitions
  */
-#define TTBR_CNP_BIT		0x1
+#define TTBR_CNP_BIT		U(0x1)
 
 /*
  * CTR definitions
diff --git a/include/lib/aarch32/smccc_helpers.h b/include/lib/aarch32/smccc_helpers.h
index 240dd13..731c26f 100644
--- a/include/lib/aarch32/smccc_helpers.h
+++ b/include/lib/aarch32/smccc_helpers.h
@@ -129,13 +129,6 @@
 	SMC_RET3(_h, (_r0), (_r1), (_r2));	\
 }
 
-/* Return a UUID in the SMC return registers */
-#define SMC_UUID_RET(_h, _uuid) \
-	SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \
-			 ((const uint32_t *) &(_uuid))[1], \
-			 ((const uint32_t *) &(_uuid))[2], \
-			 ((const uint32_t *) &(_uuid))[3])
-
 /*
  * Helper macro to retrieve the SMC parameters from smc_ctx_t.
  */
diff --git a/include/lib/aarch64/smccc_helpers.h b/include/lib/aarch64/smccc_helpers.h
index 1b33a0d..4d9217b 100644
--- a/include/lib/aarch64/smccc_helpers.h
+++ b/include/lib/aarch64/smccc_helpers.h
@@ -67,13 +67,6 @@
 #define SMC_SET_EL3(_h, _e, _v)					\
 	write_ctx_reg((get_el3state_ctx(_h)), (_e), (_v))
 
-/* Return a UUID in the SMC return registers */
-#define SMC_UUID_RET(_h, _uuid)					\
-	SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0],	\
-			 ((const uint32_t *) &(_uuid))[1],	\
-			 ((const uint32_t *) &(_uuid))[2],	\
-			 ((const uint32_t *) &(_uuid))[3])
-
 /*
  * Helper macro to retrieve the SMC parameters from cpu_context_t.
  */
diff --git a/include/lib/smccc.h b/include/lib/smccc.h
index cb722b0..a07e510 100644
--- a/include/lib/smccc.h
+++ b/include/lib/smccc.h
@@ -84,5 +84,32 @@
 		{ _n0, _n1, _n2, _n3, _n4, _n5 }			\
 	}
 
+/*
+ * Return a UUID in the SMC return registers.
+ *
+ * Acccording to section 5.3 of the SMCCC, UUIDs are returned as a single
+ * 128-bit value using the SMC32 calling convention. This value is mapped to
+ * argument registers x0-x3 on AArch64 (resp. r0-r3 on AArch32). x0 for example
+ * shall hold bytes 0 to 3, with byte 0 in the low-order bits.
+ */
+static inline uint32_t smc_uuid_word(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3)
+{
+	return ((uint32_t) b0) | (((uint32_t) b1) << 8) |
+		(((uint32_t) b2) << 16) | (((uint32_t) b3) << 24);
+}
+
+#define SMC_UUID_RET(_h, _uuid)							\
+	SMC_RET4(handle,							\
+		smc_uuid_word((_uuid).time_low[0], (_uuid).time_low[1],		\
+			      (_uuid).time_low[2], (_uuid).time_low[3]),	\
+		smc_uuid_word((_uuid).time_mid[0], (_uuid).time_mid[1],		\
+			      (_uuid).time_hi_and_version[0],			\
+			      (_uuid).time_hi_and_version[1]),			\
+		smc_uuid_word((_uuid).clock_seq_hi_and_reserved,		\
+			      (_uuid).clock_seq_low, (_uuid).node[0],		\
+			      (_uuid).node[1]),					\
+		smc_uuid_word((_uuid).node[2], (_uuid).node[3],			\
+			      (_uuid).node[4], (_uuid).node[5]))
+
 #endif /*__ASSEMBLY__*/
 #endif /* __SMCCC_H__ */
diff --git a/include/lib/utils_def.h b/include/lib/utils_def.h
index 7335103..1bdf3c4 100644
--- a/include/lib/utils_def.h
+++ b/include/lib/utils_def.h
@@ -127,8 +127,8 @@
  * expected.
  */
 #define ARM_ARCH_AT_LEAST(_maj, _min) \
-	((ARM_ARCH_MAJOR > _maj) || \
-	 ((ARM_ARCH_MAJOR == _maj) && (ARM_ARCH_MINOR >= _min)))
+	((ARM_ARCH_MAJOR > (_maj)) || \
+	 ((ARM_ARCH_MAJOR == (_maj)) && (ARM_ARCH_MINOR >= (_min))))
 
 /*
  * Import an assembly or linker symbol as a C expression with the specified
diff --git a/include/lib/xlat_tables/xlat_mmu_helpers.h b/include/lib/xlat_tables/xlat_mmu_helpers.h
index 7795317..b6c53e2 100644
--- a/include/lib/xlat_tables/xlat_mmu_helpers.h
+++ b/include/lib/xlat_tables/xlat_mmu_helpers.h
@@ -48,10 +48,15 @@
 #ifdef AARCH32
 /* AArch32 specific translation table API */
 void enable_mmu_secure(unsigned int flags);
+
+void enable_mmu_direct(unsigned int flags);
 #else
 /* AArch64 specific translation table APIs */
 void enable_mmu_el1(unsigned int flags);
 void enable_mmu_el3(unsigned int flags);
+
+void enable_mmu_direct_el1(unsigned int flags);
+void enable_mmu_direct_el3(unsigned int flags);
 #endif /* AARCH32 */
 
 int xlat_arch_is_granule_size_supported(size_t size);
diff --git a/include/lib/xlat_tables/xlat_tables_v2.h b/include/lib/xlat_tables/xlat_tables_v2.h
index 98f00d7..20a9ea1 100644
--- a/include/lib/xlat_tables/xlat_tables_v2.h
+++ b/include/lib/xlat_tables/xlat_tables_v2.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -8,12 +8,12 @@
 #define __XLAT_TABLES_V2_H__
 
 #include <xlat_tables_defs.h>
+#include <xlat_tables_v2_helpers.h>
 
 #ifndef __ASSEMBLY__
 #include <stddef.h>
 #include <stdint.h>
 #include <xlat_mmu_helpers.h>
-#include <xlat_tables_v2_helpers.h>
 
 /*
  * Default granularity size for an mmap_region_t.
@@ -123,10 +123,8 @@
 /*
  * Translation regimes supported by this library.
  */
-typedef enum xlat_regime {
-	EL1_EL0_REGIME,
-	EL3_REGIME,
-} xlat_regime_t;
+#define EL1_EL0_REGIME		1
+#define EL3_REGIME		3
 
 /*
  * Declare the translation context type.
@@ -161,12 +159,12 @@
  *   (resp. PLAT_PHY_ADDR_SPACE_SIZE) for the translation context describing the
  *   BL image currently executing.
  */
-#define REGISTER_XLAT_CONTEXT(_ctx_name, _mmap_count, _xlat_tables_count,	\
-			_virt_addr_space_size, _phy_addr_space_size)		\
-	_REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count,	\
-					 _xlat_tables_count,		\
-					 _virt_addr_space_size,		\
-					 _phy_addr_space_size,		\
+#define REGISTER_XLAT_CONTEXT(_ctx_name, _mmap_count, _xlat_tables_count, \
+			_virt_addr_space_size, _phy_addr_space_size)	\
+	_REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count),	\
+					 (_xlat_tables_count),		\
+					 (_virt_addr_space_size),	\
+					 (_phy_addr_space_size),	\
 					 IMAGE_XLAT_DEFAULT_REGIME,	\
 					"xlat_table")
 
@@ -175,20 +173,20 @@
  *
  * _xlat_regime:
  *   Specify the translation regime managed by this xlat_ctx_t instance. The
- *   values are the one from xlat_regime_t enumeration.
+ *   values are the one from the EL*_REGIME definitions.
  *
  * _section_name:
  *   Specify the name of the section where the translation tables have to be
  *   placed by the linker.
  */
-#define REGISTER_XLAT_CONTEXT2(_ctx_name, _mmap_count, _xlat_tables_count,	\
-			_virt_addr_space_size, _phy_addr_space_size,		\
-			_xlat_regime, _section_name)				\
-	_REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count,	\
-					 _xlat_tables_count,		\
-					 _virt_addr_space_size,		\
-					 _phy_addr_space_size,		\
-					 _xlat_regime, _section_name)
+#define REGISTER_XLAT_CONTEXT2(_ctx_name, _mmap_count, _xlat_tables_count, \
+			_virt_addr_space_size, _phy_addr_space_size,	\
+			_xlat_regime, _section_name)			\
+	_REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count),	\
+					 (_xlat_tables_count),		\
+					 (_virt_addr_space_size),	\
+					 (_phy_addr_space_size),	\
+					 (_xlat_regime), (_section_name))
 
 /******************************************************************************
  * Generic translation table APIs.
diff --git a/include/lib/xlat_tables/xlat_tables_v2_helpers.h b/include/lib/xlat_tables/xlat_tables_v2_helpers.h
index de1c2d4..56b9a93 100644
--- a/include/lib/xlat_tables/xlat_tables_v2_helpers.h
+++ b/include/lib/xlat_tables/xlat_tables_v2_helpers.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -16,6 +16,13 @@
 #error "Do not include this header file directly. Include xlat_tables_v2.h instead."
 #endif
 
+/* Offsets into mmu_cfg_params array. All parameters are 32 bits wide. */
+#define MMU_CFG_MAIR0		0
+#define MMU_CFG_TCR		1
+#define MMU_CFG_TTBR0_LO	2
+#define MMU_CFG_TTBR0_HI	3
+#define MMU_CFG_PARAM_MAX	4
+
 #ifndef __ASSEMBLY__
 
 #include <cassert.h>
@@ -24,6 +31,9 @@
 #include <xlat_tables_arch.h>
 #include <xlat_tables_defs.h>
 
+/* Parameters of register values required when enabling MMU */
+extern uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
 /* Forward declaration */
 struct mmap_region;
 
@@ -99,10 +109,8 @@
 	unsigned int initialized;
 
 	/*
-	 * Translation regime managed by this xlat_ctx_t. It takes the values of
-	 * the enumeration xlat_regime_t. The type is "int" to avoid a circular
-	 * dependency on xlat_tables_v2.h, but this member must be treated as
-	 * xlat_regime_t.
+	 * Translation regime managed by this xlat_ctx_t. It should be one of
+	 * the EL*_REGIME defines.
 	 */
 	int xlat_regime;
 };
@@ -147,7 +155,7 @@
 		.va_max_address = (_virt_addr_space_size) - 1,			\
 		.pa_max_address = (_phy_addr_space_size) - 1,			\
 		.mmap = _ctx_name##_mmap,					\
-		.mmap_num = _mmap_count,					\
+		.mmap_num = (_mmap_count),					\
 		.base_level = GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_size),	\
 		.base_table = _ctx_name##_base_xlat_table,			\
 		.base_table_entries =						\
@@ -162,6 +170,8 @@
 		.initialized = 0,						\
 	}
 
+#endif /*__ASSEMBLY__*/
+
 #if AARCH64
 
 /*
@@ -187,6 +197,4 @@
 
 #endif /* AARCH64 */
 
-#endif /*__ASSEMBLY__*/
-
 #endif /* __XLAT_TABLES_V2_HELPERS_H__ */
diff --git a/lib/xlat_tables/aarch32/xlat_tables.c b/lib/xlat_tables/aarch32/xlat_tables.c
index 720d446..dd63939 100644
--- a/lib/xlat_tables/aarch32/xlat_tables.c
+++ b/lib/xlat_tables/aarch32/xlat_tables.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -130,3 +130,8 @@
 	/* Ensure the MMU enable takes effect immediately */
 	isb();
 }
+
+void enable_mmu_direct(unsigned int flags)
+{
+	enable_mmu_secure(flags);
+}
diff --git a/lib/xlat_tables/aarch64/xlat_tables.c b/lib/xlat_tables/aarch64/xlat_tables.c
index a72c645..5717516 100644
--- a/lib/xlat_tables/aarch64/xlat_tables.c
+++ b/lib/xlat_tables/aarch64/xlat_tables.c
@@ -181,6 +181,11 @@
 									\
 		/* Ensure the MMU enable takes effect immediately */	\
 		isb();							\
+	}								\
+									\
+	void enable_mmu_direct_el##_el(unsigned int flags)		\
+	{								\
+		enable_mmu_el##_el(flags);				\
 	}
 
 /* Define EL1 and EL3 variants of the function enabling the MMU */
diff --git a/lib/xlat_tables/xlat_tables_private.h b/lib/xlat_tables/xlat_tables_private.h
index 50d6bd5..810c48e 100644
--- a/lib/xlat_tables/xlat_tables_private.h
+++ b/lib/xlat_tables/xlat_tables_private.h
@@ -11,6 +11,10 @@
 #include <platform_def.h>
 #include <xlat_tables_arch.h>
 
+#if HW_ASSISTED_COHERENCY
+#error xlat tables v2 must be used with HW_ASSISTED_COHERENCY
+#endif
+
 /*
  * If the platform hasn't defined a physical and a virtual address space size
  * default to ADDR_SPACE_SIZE.
diff --git a/lib/xlat_tables_v2/aarch32/enable_mmu.S b/lib/xlat_tables_v2/aarch32/enable_mmu.S
new file mode 100644
index 0000000..97cdde7
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch32/enable_mmu.S
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <xlat_tables_v2.h>
+
+	.global	enable_mmu_direct
+
+func enable_mmu_direct
+	/* Assert that MMU is turned off */
+#if ENABLE_ASSERTIONS
+	ldcopr  r1, SCTLR
+	tst	r1, #SCTLR_M_BIT
+	ASM_ASSERT(eq)
+#endif
+
+	/* Invalidate TLB entries */
+	TLB_INVALIDATE(r0, TLBIALL)
+
+	mov	r3, r0
+	ldr	r0, =mmu_cfg_params
+
+	/* MAIR0 */
+	ldr	r1, [r0, #(MMU_CFG_MAIR0 << 2)]
+	stcopr	r1, MAIR0
+
+	/* TTBCR */
+	ldr	r2, [r0, #(MMU_CFG_TCR << 2)]
+	stcopr	r2, TTBCR
+
+	/* TTBR0 */
+	ldr	r1, [r0, #(MMU_CFG_TTBR0_LO << 2)]
+	ldr	r2, [r0, #(MMU_CFG_TTBR0_HI << 2)]
+	stcopr16	r1, r2, TTBR0_64
+
+	/* TTBR1 is unused right now; set it to 0. */
+	mov	r1, #0
+	mov	r2, #0
+	stcopr16	r1, r2, TTBR1_64
+
+	/*
+	 * Ensure all translation table writes have drained into memory, the TLB
+	 * invalidation is complete, and translation register writes are
+	 * committed before enabling the MMU
+	 */
+	dsb	ish
+	isb
+
+	/* Enable enable MMU by honoring flags */
+	ldcopr  r1, SCTLR
+	ldr	r2, =(SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT)
+	orr	r1, r1, r2
+
+	/* Clear C bit if requested */
+	tst	r3, #DISABLE_DCACHE
+	bicne	r1, r1, #SCTLR_C_BIT
+
+	stcopr	r1, SCTLR
+	isb
+
+	bx	lr
+endfunc enable_mmu_direct
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
index f66f802..6e97192 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -18,6 +18,8 @@
 #error ARMv7 target does not support LPAE MMU descriptors
 #endif
 
+uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
 /*
  * Returns 1 if the provided granule size is supported, 0 otherwise.
  */
@@ -59,7 +61,7 @@
 	tlbimvaais(TLBI_ADDR(va));
 }
 
-void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime __unused)
+void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime __unused)
 {
 	/*
 	 * Ensure the translation table write has drained into memory before
@@ -109,22 +111,16 @@
  * Function for enabling the MMU in Secure PL1, assuming that the page tables
  * have already been created.
  ******************************************************************************/
-void enable_mmu_arch(unsigned int flags,
-		uint64_t *base_table,
+void setup_mmu_cfg(unsigned int flags,
+		const uint64_t *base_table,
 		unsigned long long max_pa,
 		uintptr_t max_va)
 {
-	u_register_t mair0, ttbcr, sctlr;
+	u_register_t mair0, ttbcr;
 	uint64_t ttbr0;
 
 	assert(IS_IN_SECURE());
 
-	sctlr = read_sctlr();
-	assert((sctlr & SCTLR_M_BIT) == 0);
-
-	/* Invalidate TLBs at the current exception level */
-	tlbiall();
-
 	/* Set attributes in the right indices of the MAIR */
 	mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
 	mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
@@ -185,30 +181,9 @@
 	ttbr0 |= TTBR_CNP_BIT;
 #endif
 
-	/* Now program the relevant system registers */
-	write_mair0(mair0);
-	write_ttbcr(ttbcr);
-	write64_ttbr0(ttbr0);
-	write64_ttbr1(0);
-
-	/*
-	 * Ensure all translation table writes have drained
-	 * into memory, the TLB invalidation is complete,
-	 * and translation register writes are committed
-	 * before enabling the MMU
-	 */
-	dsbish();
-	isb();
-
-	sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
-
-	if (flags & DISABLE_DCACHE)
-		sctlr &= ~SCTLR_C_BIT;
-	else
-		sctlr |= SCTLR_C_BIT;
-
-	write_sctlr(sctlr);
-
-	/* Ensure the MMU enable takes effect immediately */
-	isb();
+	/* Now populate MMU configuration */
+	mmu_cfg_params[MMU_CFG_MAIR0] = mair0;
+	mmu_cfg_params[MMU_CFG_TCR] = ttbcr;
+	mmu_cfg_params[MMU_CFG_TTBR0_LO] = (uint32_t) ttbr0;
+	mmu_cfg_params[MMU_CFG_TTBR0_HI] = ttbr0 >> 32;
 }
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h b/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h
index 509395d..9b41f4d 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h
@@ -14,7 +14,7 @@
  * Return the execute-never mask that will prevent instruction fetch at the
  * given translation regime.
  */
-static inline uint64_t xlat_arch_regime_get_xn_desc(xlat_regime_t regime __unused)
+static inline uint64_t xlat_arch_regime_get_xn_desc(int regime __unused)
 {
 	return UPPER_ATTRS(XN);
 }
diff --git a/lib/xlat_tables_v2/aarch64/enable_mmu.S b/lib/xlat_tables_v2/aarch64/enable_mmu.S
new file mode 100644
index 0000000..a72c7fa
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch64/enable_mmu.S
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <xlat_tables_v2.h>
+
+	.global	enable_mmu_direct_el1
+	.global	enable_mmu_direct_el3
+
+	/* Macros to read and write to system register for a given EL. */
+	.macro _msr reg_name, el, gp_reg
+	msr	\reg_name\()_el\()\el, \gp_reg
+	.endm
+
+	.macro _mrs gp_reg, reg_name, el
+	mrs	\gp_reg, \reg_name\()_el\()\el
+	.endm
+
+	.macro define_mmu_enable_func el
+	func enable_mmu_direct_\()el\el
+#if ENABLE_ASSERTIONS
+		_mrs	x1, sctlr, \el
+		tst	x1, #SCTLR_M_BIT
+		ASM_ASSERT(eq)
+#endif
+
+		/* Invalidate TLB entries */
+		.if \el == 1
+		TLB_INVALIDATE(vmalle1)
+		.else
+		.if \el == 3
+		TLB_INVALIDATE(alle3)
+		.else
+		.error "EL must be 1 or 3"
+		.endif
+		.endif
+
+		mov	x7, x0
+		ldr	x0, =mmu_cfg_params
+
+		/* MAIR */
+		ldr	w1, [x0, #(MMU_CFG_MAIR0 << 2)]
+		_msr	mair, \el, x1
+
+		/* TCR */
+		ldr	w2, [x0, #(MMU_CFG_TCR << 2)]
+		_msr	tcr, \el, x2
+
+		/* TTBR */
+		ldr	w3, [x0, #(MMU_CFG_TTBR0_LO << 2)]
+		ldr	w4, [x0, #(MMU_CFG_TTBR0_HI << 2)]
+		orr	x3, x3, x4, lsl #32
+		_msr	ttbr0, \el, x3
+
+		/*
+		 * Ensure all translation table writes have drained into memory, the TLB
+		 * invalidation is complete, and translation register writes are
+		 * committed before enabling the MMU
+		 */
+		dsb	ish
+		isb
+
+		/* Set and clear required fields of SCTLR */
+		_mrs	x4, sctlr, \el
+		mov_imm	x5, SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT
+		orr	x4, x4, x5
+
+		/* Additionally, amend SCTLR fields based on flags */
+		bic	x5, x4, #SCTLR_C_BIT
+		tst	x7, #DISABLE_DCACHE
+		csel	x4, x5, x4, ne
+
+		_msr	sctlr, \el, x4
+		isb
+
+		ret
+	endfunc enable_mmu_direct_\()el\el
+	.endm
+
+	/*
+	 * Define MMU-enabling functions for EL1 and EL3:
+	 *
+	 *  enable_mmu_direct_el1
+	 *  enable_mmu_direct_el3
+	 */
+	define_mmu_enable_func 1
+	define_mmu_enable_func 3
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
index c501e70..4bbbe54 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -7,15 +7,14 @@
 #include <arch.h>
 #include <arch_helpers.h>
 #include <assert.h>
-#include <bl_common.h>
 #include <cassert.h>
-#include <common_def.h>
 #include <sys/types.h>
-#include <utils.h>
 #include <utils_def.h>
 #include <xlat_tables_v2.h>
 #include "../xlat_tables_private.h"
 
+uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
 /*
  * Returns 1 if the provided granule size is supported, 0 otherwise.
  */
@@ -126,7 +125,7 @@
 #endif
 }
 
-void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime)
+void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime)
 {
 	/*
 	 * Ensure the translation table write has drained into memory before
@@ -183,70 +182,13 @@
 	return el;
 }
 
-/*******************************************************************************
- * Macro generating the code for the function enabling the MMU in the given
- * exception level, assuming that the pagetables have already been created.
- *
- *   _el:		Exception level at which the function will run
- *   _tlbi_fct:		Function to invalidate the TLBs at the current
- *			exception level
- ******************************************************************************/
-#define DEFINE_ENABLE_MMU_EL(_el, _tlbi_fct)				\
-	static void enable_mmu_internal_el##_el(int flags,		\
-						uint64_t mair,		\
-						uint64_t tcr,		\
-						uint64_t ttbr)		\
-	{								\
-		uint32_t sctlr = read_sctlr_el##_el();			\
-		assert((sctlr & SCTLR_M_BIT) == 0);			\
-									\
-		/* Invalidate TLBs at the current exception level */	\
-		_tlbi_fct();						\
-									\
-		write_mair_el##_el(mair);				\
-		write_tcr_el##_el(tcr);					\
-									\
-		/* Set TTBR bits as well */				\
-		if (ARM_ARCH_AT_LEAST(8, 2)) {				\
-			/* Enable CnP bit so as to share page tables */	\
-			/* with all PEs. This is mandatory for */	\
-			/* ARMv8.2 implementations. */			\
-			ttbr |= TTBR_CNP_BIT;				\
-		}							\
-		write_ttbr0_el##_el(ttbr);				\
-									\
-		/* Ensure all translation table writes have drained */	\
-		/* into memory, the TLB invalidation is complete, */	\
-		/* and translation register writes are committed */	\
-		/* before enabling the MMU */				\
-		dsbish();						\
-		isb();							\
-									\
-		sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;			\
-		if (flags & DISABLE_DCACHE)				\
-			sctlr &= ~SCTLR_C_BIT;				\
-		else							\
-			sctlr |= SCTLR_C_BIT;				\
-									\
-		write_sctlr_el##_el(sctlr);				\
-									\
-		/* Ensure the MMU enable takes effect immediately */	\
-		isb();							\
-	}
-
-/* Define EL1 and EL3 variants of the function enabling the MMU */
-#if IMAGE_EL == 1
-DEFINE_ENABLE_MMU_EL(1, tlbivmalle1)
-#elif IMAGE_EL == 3
-DEFINE_ENABLE_MMU_EL(3, tlbialle3)
-#endif
-
-void enable_mmu_arch(unsigned int flags,
-		uint64_t *base_table,
+void setup_mmu_cfg(unsigned int flags,
+		const uint64_t *base_table,
 		unsigned long long max_pa,
 		uintptr_t max_va)
 {
 	uint64_t mair, ttbr, tcr;
+	uintptr_t virtual_addr_space_size;
 
 	/* Set attributes in the right indices of the MAIR. */
 	mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
@@ -256,27 +198,25 @@
 	ttbr = (uint64_t) base_table;
 
 	/*
-	 * Set TCR bits as well.
-	 */
-
-	/*
 	 * Limit the input address ranges and memory region sizes translated
 	 * using TTBR0 to the given virtual address space size.
 	 */
-	assert(max_va < UINTPTR_MAX);
-	uintptr_t virtual_addr_space_size = max_va + 1;
+	assert(max_va < ((uint64_t) UINTPTR_MAX));
+
+	virtual_addr_space_size = max_va + 1;
 	assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
+
 	/*
 	 * __builtin_ctzll(0) is undefined but here we are guaranteed that
 	 * virtual_addr_space_size is in the range [1,UINTPTR_MAX].
 	 */
-	tcr = 64 - __builtin_ctzll(virtual_addr_space_size);
+	tcr = (uint64_t) 64 - __builtin_ctzll(virtual_addr_space_size);
 
 	/*
 	 * Set the cacheability and shareability attributes for memory
 	 * associated with translation table walks.
 	 */
-	if (flags & XLAT_TABLE_NC) {
+	if ((flags & XLAT_TABLE_NC) != 0) {
 		/* Inner & outer non-cacheable non-shareable. */
 		tcr |= TCR_SH_NON_SHAREABLE |
 			TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC;
@@ -299,10 +239,23 @@
 	 * translated using TTBR1_EL1.
 	 */
 	tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
-	enable_mmu_internal_el1(flags, mair, tcr, ttbr);
 #elif IMAGE_EL == 3
 	assert(IS_IN_EL(3));
 	tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
-	enable_mmu_internal_el3(flags, mair, tcr, ttbr);
 #endif
+
+	mmu_cfg_params[MMU_CFG_MAIR0] = (uint32_t) mair;
+	mmu_cfg_params[MMU_CFG_TCR] = (uint32_t) tcr;
+
+	/* Set TTBR bits as well */
+	if (ARM_ARCH_AT_LEAST(8, 2)) {
+		/*
+		 * Enable CnP bit so as to share page tables with all PEs. This
+		 * is mandatory for ARMv8.2 implementations.
+		 */
+		ttbr |= TTBR_CNP_BIT;
+	}
+
+	mmu_cfg_params[MMU_CFG_TTBR0_LO] = (uint32_t) ttbr;
+	mmu_cfg_params[MMU_CFG_TTBR0_HI] = (uint32_t) (ttbr >> 32);
 }
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h b/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h
index d201590..39b0a65 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h
@@ -15,7 +15,7 @@
  * Return the execute-never mask that will prevent instruction fetch at all ELs
  * that are part of the given translation regime.
  */
-static inline uint64_t xlat_arch_regime_get_xn_desc(xlat_regime_t regime)
+static inline uint64_t xlat_arch_regime_get_xn_desc(int regime)
 {
 	if (regime == EL1_EL0_REGIME) {
 		return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
diff --git a/lib/xlat_tables_v2/xlat_tables.mk b/lib/xlat_tables_v2/xlat_tables.mk
index 06dd844..b25c805 100644
--- a/lib/xlat_tables_v2/xlat_tables.mk
+++ b/lib/xlat_tables_v2/xlat_tables.mk
@@ -1,11 +1,14 @@
 #
-# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
 
 XLAT_TABLES_LIB_SRCS	:=	$(addprefix lib/xlat_tables_v2/,	\
+				${ARCH}/enable_mmu.S			\
 				${ARCH}/xlat_tables_arch.c		\
-				xlat_tables_internal.c)
+				xlat_tables_context.c			\
+				xlat_tables_core.c			\
+				xlat_tables_utils.c)
 
 INCLUDES		+=	-Ilib/xlat_tables_v2/${ARCH}
diff --git a/lib/xlat_tables_v2/xlat_tables_context.c b/lib/xlat_tables_v2/xlat_tables_context.c
new file mode 100644
index 0000000..0964b49
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables_context.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <platform_def.h>
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+#include "xlat_tables_private.h"
+
+/*
+ * Each platform can define the size of its physical and virtual address spaces.
+ * If the platform hasn't defined one or both of them, default to
+ * ADDR_SPACE_SIZE. The latter is deprecated, though.
+ */
+#if ERROR_DEPRECATED
+# ifdef ADDR_SPACE_SIZE
+#  error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
+# endif
+#elif defined(ADDR_SPACE_SIZE)
+# ifndef PLAT_PHY_ADDR_SPACE_SIZE
+#  define PLAT_PHY_ADDR_SPACE_SIZE	ADDR_SPACE_SIZE
+# endif
+# ifndef PLAT_VIRT_ADDR_SPACE_SIZE
+#  define PLAT_VIRT_ADDR_SPACE_SIZE	ADDR_SPACE_SIZE
+# endif
+#endif
+
+/*
+ * Allocate and initialise the default translation context for the BL image
+ * currently executing.
+ */
+REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
+		PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
+
+void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
+		     unsigned int attr)
+{
+	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
+
+	mmap_add_region_ctx(&tf_xlat_ctx, &mm);
+}
+
+void mmap_add(const mmap_region_t *mm)
+{
+	mmap_add_ctx(&tf_xlat_ctx, mm);
+}
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
+			    size_t size, unsigned int attr)
+{
+	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
+
+	return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
+}
+
+int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
+{
+	return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
+					base_va, size);
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+void init_xlat_tables(void)
+{
+	init_xlat_tables_ctx(&tf_xlat_ctx);
+}
+
+/*
+ * If dynamic allocation of new regions is disabled then by the time we call the
+ * function enabling the MMU, we'll have registered all the memory regions to
+ * map for the system's lifetime. Therefore, at this point we know the maximum
+ * physical address that will ever be mapped.
+ *
+ * If dynamic allocation is enabled then we can't make any such assumption
+ * because the maximum physical address could get pushed while adding a new
+ * region. Therefore, in this case we have to assume that the whole address
+ * space size might be mapped.
+ */
+#ifdef PLAT_XLAT_TABLES_DYNAMIC
+#define MAX_PHYS_ADDR	tf_xlat_ctx.pa_max_address
+#else
+#define MAX_PHYS_ADDR	tf_xlat_ctx.max_pa
+#endif
+
+#ifdef AARCH32
+
+void enable_mmu_secure(unsigned int flags)
+{
+	setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+			tf_xlat_ctx.va_max_address);
+	enable_mmu_direct(flags);
+}
+
+#else
+
+void enable_mmu_el1(unsigned int flags)
+{
+	setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+			tf_xlat_ctx.va_max_address);
+	enable_mmu_direct_el1(flags);
+}
+
+void enable_mmu_el3(unsigned int flags)
+{
+	setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+			tf_xlat_ctx.va_max_address);
+	enable_mmu_direct_el3(flags);
+}
+
+#endif /* AARCH32 */
diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_core.c
similarity index 60%
rename from lib/xlat_tables_v2/xlat_tables_internal.c
rename to lib/xlat_tables_v2/xlat_tables_core.c
index 5beb51e..f555524 100644
--- a/lib/xlat_tables_v2/xlat_tables_internal.c
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -4,47 +4,20 @@
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
-#include <arch.h>
 #include <arch_helpers.h>
 #include <assert.h>
-#include <common_def.h>
 #include <debug.h>
 #include <errno.h>
 #include <platform_def.h>
 #include <string.h>
 #include <types.h>
-#include <utils.h>
+#include <utils_def.h>
 #include <xlat_tables_arch_private.h>
 #include <xlat_tables_defs.h>
 #include <xlat_tables_v2.h>
 
 #include "xlat_tables_private.h"
 
-/*
- * Each platform can define the size of its physical and virtual address spaces.
- * If the platform hasn't defined one or both of them, default to
- * ADDR_SPACE_SIZE. The latter is deprecated, though.
- */
-#if ERROR_DEPRECATED
-# ifdef ADDR_SPACE_SIZE
-#  error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
-# endif
-#elif defined(ADDR_SPACE_SIZE)
-# ifndef PLAT_PHY_ADDR_SPACE_SIZE
-#  define PLAT_PHY_ADDR_SPACE_SIZE	ADDR_SPACE_SIZE
-# endif
-# ifndef PLAT_VIRT_ADDR_SPACE_SIZE
-#  define PLAT_VIRT_ADDR_SPACE_SIZE	ADDR_SPACE_SIZE
-# endif
-#endif
-
-/*
- * Allocate and initialise the default translation context for the BL image
- * currently executing.
- */
-REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
-		PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
-
 #if PLAT_XLAT_TABLES_DYNAMIC
 
 /*
@@ -94,7 +67,7 @@
 	ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]--;
 }
 
-/* Returns 0 if the speficied table isn't empty, otherwise 1. */
+/* Returns 0 if the specified table isn't empty, otherwise 1. */
 static int xlat_table_is_empty(xlat_ctx_t *ctx, const uint64_t *table)
 {
 	return !ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)];
@@ -115,8 +88,8 @@
 /*
  * Returns a block/page table descriptor for the given level and attributes.
  */
-static uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
-			  unsigned long long addr_pa, int level)
+uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
+		   unsigned long long addr_pa, int level)
 {
 	uint64_t desc;
 	int mem_type;
@@ -509,7 +482,7 @@
 /*
  * Recursive function that writes to the translation tables and maps the
  * specified region. On success, it returns the VA of the last byte that was
- * succesfully mapped. On error, it returns the VA of the next entry that
+ * successfully mapped. On error, it returns the VA of the next entry that
  * should have been mapped.
  */
 static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
@@ -609,23 +582,6 @@
 	return table_idx_va - 1;
 }
 
-void print_mmap(mmap_region_t *const mmap)
-{
-#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
-	tf_printf("mmap:\n");
-	mmap_region_t *mm = mmap;
-
-	while (mm->size) {
-		tf_printf(" VA:%p  PA:0x%llx  size:0x%zx  attr:0x%x",
-				(void *)mm->base_va, mm->base_pa,
-				mm->size, mm->attr);
-		tf_printf(" granularity:0x%zx\n", mm->granularity);
-		++mm;
-	};
-	tf_printf("\n");
-#endif
-}
-
 /*
  * Function that verifies that a region can be mapped.
  * Returns:
@@ -802,7 +758,7 @@
 	 * that there is free space.
 	 */
 	assert(mm_last->size == 0U);
-	
+
 	/* Make room for new region by moving other regions up by one place */
 	mm_destination = mm_cursor + 1;
 	memmove(mm_destination, mm_cursor,
@@ -823,14 +779,6 @@
 		ctx->max_va = end_va;
 }
 
-void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
-		     unsigned int attr)
-{
-	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
-	mmap_add_region_ctx(&tf_xlat_ctx, &mm);
-}
-
-
 void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
 {
 	while (mm->size) {
@@ -839,11 +787,6 @@
 	}
 }
 
-void mmap_add(const mmap_region_t *mm)
-{
-	mmap_add_ctx(&tf_xlat_ctx, mm);
-}
-
 #if PLAT_XLAT_TABLES_DYNAMIC
 
 int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
@@ -945,13 +888,6 @@
 	return 0;
 }
 
-int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
-			    size_t size, unsigned int attr)
-{
-	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
-	return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
-}
-
 /*
  * Removes the region with given base Virtual Address and size from the given
  * context.
@@ -1027,219 +963,8 @@
 	return 0;
 }
 
-int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
-{
-	return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
-					base_va, size);
-}
-
 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
 
-#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
-
-/* Print the attributes of the specified block descriptor. */
-static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
-{
-	int mem_type_index = ATTR_INDEX_GET(desc);
-	xlat_regime_t xlat_regime = ctx->xlat_regime;
-
-	if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
-		tf_printf("MEM");
-	} else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
-		tf_printf("NC");
-	} else {
-		assert(mem_type_index == ATTR_DEVICE_INDEX);
-		tf_printf("DEV");
-	}
-
-	const char *priv_str = "(PRIV)";
-	const char *user_str = "(USER)";
-
-	/*
-	 * Showing Privileged vs Unprivileged only makes sense for EL1&0
-	 * mappings
-	 */
-	const char *ro_str = "-RO";
-	const char *rw_str = "-RW";
-	const char *no_access_str = "-NOACCESS";
-
-	if (xlat_regime == EL3_REGIME) {
-		/* For EL3, the AP[2] bit is all what matters */
-		tf_printf("%s", (desc & LOWER_ATTRS(AP_RO)) ? ro_str : rw_str);
-	} else {
-		const char *ap_str = (desc & LOWER_ATTRS(AP_RO)) ? ro_str : rw_str;
-		tf_printf("%s", ap_str);
-		tf_printf("%s", priv_str);
-		/*
-		 * EL0 can only have the same permissions as EL1 or no
-		 * permissions at all.
-		 */
-		tf_printf("%s",
-			  (desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED))
-			  ? ap_str : no_access_str);
-		tf_printf("%s", user_str);
-	}
-
-	const char *xn_str = "-XN";
-	const char *exec_str = "-EXEC";
-
-	if (xlat_regime == EL3_REGIME) {
-		/* For EL3, the XN bit is all what matters */
-		tf_printf("%s", (UPPER_ATTRS(XN) & desc) ? xn_str : exec_str);
-	} else {
-		/* For EL0 and EL1, we need to know who has which rights */
-		tf_printf("%s", (UPPER_ATTRS(PXN) & desc) ? xn_str : exec_str);
-		tf_printf("%s", priv_str);
-
-		tf_printf("%s", (UPPER_ATTRS(UXN) & desc) ? xn_str : exec_str);
-		tf_printf("%s", user_str);
-	}
-
-	tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
-}
-
-static const char * const level_spacers[] = {
-	"[LV0] ",
-	"  [LV1] ",
-	"    [LV2] ",
-	"      [LV3] "
-};
-
-static const char *invalid_descriptors_ommited =
-		"%s(%d invalid descriptors omitted)\n";
-
-/*
- * Recursive function that reads the translation tables passed as an argument
- * and prints their status.
- */
-static void xlat_tables_print_internal(xlat_ctx_t *ctx,
-		const uintptr_t table_base_va,
-		uint64_t *const table_base, const int table_entries,
-		const unsigned int level)
-{
-	assert(level <= XLAT_TABLE_LEVEL_MAX);
-
-	uint64_t desc;
-	uintptr_t table_idx_va = table_base_va;
-	int table_idx = 0;
-
-	size_t level_size = XLAT_BLOCK_SIZE(level);
-
-	/*
-	 * Keep track of how many invalid descriptors are counted in a row.
-	 * Whenever multiple invalid descriptors are found, only the first one
-	 * is printed, and a line is added to inform about how many descriptors
-	 * have been omitted.
-	 */
-	int invalid_row_count = 0;
-
-	while (table_idx < table_entries) {
-
-		desc = table_base[table_idx];
-
-		if ((desc & DESC_MASK) == INVALID_DESC) {
-
-			if (invalid_row_count == 0) {
-				tf_printf("%sVA:%p size:0x%zx\n",
-					  level_spacers[level],
-					  (void *)table_idx_va, level_size);
-			}
-			invalid_row_count++;
-
-		} else {
-
-			if (invalid_row_count > 1) {
-				tf_printf(invalid_descriptors_ommited,
-					  level_spacers[level],
-					  invalid_row_count - 1);
-			}
-			invalid_row_count = 0;
-
-			/*
-			 * Check if this is a table or a block. Tables are only
-			 * allowed in levels other than 3, but DESC_PAGE has the
-			 * same value as DESC_TABLE, so we need to check.
-			 */
-			if (((desc & DESC_MASK) == TABLE_DESC) &&
-					(level < XLAT_TABLE_LEVEL_MAX)) {
-				/*
-				 * Do not print any PA for a table descriptor,
-				 * as it doesn't directly map physical memory
-				 * but instead points to the next translation
-				 * table in the translation table walk.
-				 */
-				tf_printf("%sVA:%p size:0x%zx\n",
-					  level_spacers[level],
-					  (void *)table_idx_va, level_size);
-
-				uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
-
-				xlat_tables_print_internal(ctx, table_idx_va,
-					(uint64_t *)addr_inner,
-					XLAT_TABLE_ENTRIES, level + 1);
-			} else {
-				tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
-					  level_spacers[level],
-					  (void *)table_idx_va,
-					  (unsigned long long)(desc & TABLE_ADDR_MASK),
-					  level_size);
-				xlat_desc_print(ctx, desc);
-				tf_printf("\n");
-			}
-		}
-
-		table_idx++;
-		table_idx_va += level_size;
-	}
-
-	if (invalid_row_count > 1) {
-		tf_printf(invalid_descriptors_ommited,
-			  level_spacers[level], invalid_row_count - 1);
-	}
-}
-
-#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
-
-void xlat_tables_print(xlat_ctx_t *ctx)
-{
-#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
-	const char *xlat_regime_str;
-	if (ctx->xlat_regime == EL1_EL0_REGIME) {
-		xlat_regime_str = "1&0";
-	} else {
-		assert(ctx->xlat_regime == EL3_REGIME);
-		xlat_regime_str = "3";
-	}
-	VERBOSE("Translation tables state:\n");
-	VERBOSE("  Xlat regime:     EL%s\n", xlat_regime_str);
-	VERBOSE("  Max allowed PA:  0x%llx\n", ctx->pa_max_address);
-	VERBOSE("  Max allowed VA:  %p\n", (void *) ctx->va_max_address);
-	VERBOSE("  Max mapped PA:   0x%llx\n", ctx->max_pa);
-	VERBOSE("  Max mapped VA:   %p\n", (void *) ctx->max_va);
-
-	VERBOSE("  Initial lookup level: %i\n", ctx->base_level);
-	VERBOSE("  Entries @initial lookup level: %i\n",
-		ctx->base_table_entries);
-
-	int used_page_tables;
-#if PLAT_XLAT_TABLES_DYNAMIC
-	used_page_tables = 0;
-	for (unsigned int i = 0; i < ctx->tables_num; ++i) {
-		if (ctx->tables_mapped_regions[i] != 0)
-			++used_page_tables;
-	}
-#else
-	used_page_tables = ctx->next_table;
-#endif
-	VERBOSE("  Used %i sub-tables out of %i (spare: %i)\n",
-		used_page_tables, ctx->tables_num,
-		ctx->tables_num - used_page_tables);
-
-	xlat_tables_print_internal(ctx, 0, ctx->base_table,
-				   ctx->base_table_entries, ctx->base_level);
-#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
-}
-
 void init_xlat_tables_ctx(xlat_ctx_t *ctx)
 {
 	assert(ctx != NULL);
@@ -1249,7 +974,7 @@
 
 	mmap_region_t *mm = ctx->mmap;
 
-	print_mmap(mm);
+	xlat_mmap_print(mm);
 
 	/* All tables must be zeroed before mapping any region. */
 
@@ -1286,394 +1011,3 @@
 
 	xlat_tables_print(ctx);
 }
-
-void init_xlat_tables(void)
-{
-	init_xlat_tables_ctx(&tf_xlat_ctx);
-}
-
-/*
- * If dynamic allocation of new regions is disabled then by the time we call the
- * function enabling the MMU, we'll have registered all the memory regions to
- * map for the system's lifetime. Therefore, at this point we know the maximum
- * physical address that will ever be mapped.
- *
- * If dynamic allocation is enabled then we can't make any such assumption
- * because the maximum physical address could get pushed while adding a new
- * region. Therefore, in this case we have to assume that the whole address
- * space size might be mapped.
- */
-#ifdef PLAT_XLAT_TABLES_DYNAMIC
-#define MAX_PHYS_ADDR	tf_xlat_ctx.pa_max_address
-#else
-#define MAX_PHYS_ADDR	tf_xlat_ctx.max_pa
-#endif
-
-#ifdef AARCH32
-
-void enable_mmu_secure(unsigned int flags)
-{
-	enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
-			tf_xlat_ctx.va_max_address);
-}
-
-#else
-
-void enable_mmu_el1(unsigned int flags)
-{
-	enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
-			tf_xlat_ctx.va_max_address);
-}
-
-void enable_mmu_el3(unsigned int flags)
-{
-	enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
-			tf_xlat_ctx.va_max_address);
-}
-
-#endif /* AARCH32 */
-
-/*
- * Do a translation table walk to find the block or page descriptor that maps
- * virtual_addr.
- *
- * On success, return the address of the descriptor within the translation
- * table. Its lookup level is stored in '*out_level'.
- * On error, return NULL.
- *
- * xlat_table_base
- *   Base address for the initial lookup level.
- * xlat_table_base_entries
- *   Number of entries in the translation table for the initial lookup level.
- * virt_addr_space_size
- *   Size in bytes of the virtual address space.
- */
-static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
-				       void *xlat_table_base,
-				       int xlat_table_base_entries,
-				       unsigned long long virt_addr_space_size,
-				       int *out_level)
-{
-	unsigned int start_level;
-	uint64_t *table;
-	int entries;
-
-	VERBOSE("%s(%p)\n", __func__, (void *)virtual_addr);
-
-	start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);
-	VERBOSE("Starting translation table walk from level %i\n", start_level);
-
-	table = xlat_table_base;
-	entries = xlat_table_base_entries;
-
-	for (unsigned int level = start_level;
-	     level <= XLAT_TABLE_LEVEL_MAX;
-	     ++level) {
-		int idx;
-		uint64_t desc;
-		uint64_t desc_type;
-
-		VERBOSE("Table address: %p\n", (void *)table);
-
-		idx = XLAT_TABLE_IDX(virtual_addr, level);
-		VERBOSE("Index into level %i table: %i\n", level, idx);
-		if (idx >= entries) {
-			VERBOSE("Invalid address\n");
-			return NULL;
-		}
-
-		desc = table[idx];
-		desc_type = desc & DESC_MASK;
-		VERBOSE("Descriptor at level %i: 0x%llx\n", level,
-				(unsigned long long)desc);
-
-		if (desc_type == INVALID_DESC) {
-			VERBOSE("Invalid entry (memory not mapped)\n");
-			return NULL;
-		}
-
-		if (level == XLAT_TABLE_LEVEL_MAX) {
-			/*
-			 * There can't be table entries at the final lookup
-			 * level.
-			 */
-			assert(desc_type == PAGE_DESC);
-			VERBOSE("Descriptor mapping a memory page (size: 0x%llx)\n",
-				(unsigned long long)XLAT_BLOCK_SIZE(XLAT_TABLE_LEVEL_MAX));
-			*out_level = level;
-			return &table[idx];
-		}
-
-		if (desc_type == BLOCK_DESC) {
-			VERBOSE("Descriptor mapping a memory block (size: 0x%llx)\n",
-				(unsigned long long)XLAT_BLOCK_SIZE(level));
-			*out_level = level;
-			return &table[idx];
-		}
-
-		assert(desc_type == TABLE_DESC);
-		VERBOSE("Table descriptor, continuing xlat table walk...\n");
-		table = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
-		entries = XLAT_TABLE_ENTRIES;
-	}
-
-	/*
-	 * This shouldn't be reached, the translation table walk should end at
-	 * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
-	 */
-	assert(0);
-
-	return NULL;
-}
-
-
-static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
-		uint32_t *attributes, uint64_t **table_entry,
-		unsigned long long *addr_pa, int *table_level)
-{
-	uint64_t *entry;
-	uint64_t desc;
-	int level;
-	unsigned long long virt_addr_space_size;
-
-	/*
-	 * Sanity-check arguments.
-	 */
-	assert(ctx != NULL);
-	assert(ctx->initialized);
-	assert(ctx->xlat_regime == EL1_EL0_REGIME || ctx->xlat_regime == EL3_REGIME);
-
-	virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1;
-	assert(virt_addr_space_size > 0);
-
-	entry = find_xlat_table_entry(base_va,
-				ctx->base_table,
-				ctx->base_table_entries,
-				virt_addr_space_size,
-				&level);
-	if (entry == NULL) {
-		WARN("Address %p is not mapped.\n", (void *)base_va);
-		return -EINVAL;
-	}
-
-	if (addr_pa != NULL) {
-		*addr_pa = *entry & TABLE_ADDR_MASK;
-	}
-
-	if (table_entry != NULL) {
-		*table_entry = entry;
-	}
-
-	if (table_level != NULL) {
-		*table_level = level;
-	}
-
-	desc = *entry;
-
-#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
-	VERBOSE("Attributes: ");
-	xlat_desc_print(ctx, desc);
-	tf_printf("\n");
-#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
-
-	assert(attributes != NULL);
-	*attributes = 0;
-
-	int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
-
-	if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
-		*attributes |= MT_MEMORY;
-	} else if (attr_index == ATTR_NON_CACHEABLE_INDEX) {
-		*attributes |= MT_NON_CACHEABLE;
-	} else {
-		assert(attr_index == ATTR_DEVICE_INDEX);
-		*attributes |= MT_DEVICE;
-	}
-
-	int ap2_bit = (desc >> AP2_SHIFT) & 1;
-
-	if (ap2_bit == AP2_RW)
-		*attributes |= MT_RW;
-
-	if (ctx->xlat_regime == EL1_EL0_REGIME) {
-		int ap1_bit = (desc >> AP1_SHIFT) & 1;
-		if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
-			*attributes |= MT_USER;
-	}
-
-	int ns_bit = (desc >> NS_SHIFT) & 1;
-
-	if (ns_bit == 1)
-		*attributes |= MT_NS;
-
-	uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
-
-	if ((desc & xn_mask) == xn_mask) {
-		*attributes |= MT_EXECUTE_NEVER;
-	} else {
-		assert((desc & xn_mask) == 0);
-	}
-
-	return 0;
-}
-
-
-int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
-		       uint32_t *attributes)
-{
-	return get_mem_attributes_internal(ctx, base_va, attributes,
-					   NULL, NULL, NULL);
-}
-
-
-int change_mem_attributes(xlat_ctx_t *ctx,
-			uintptr_t base_va,
-			size_t size,
-			uint32_t attr)
-{
-	/* Note: This implementation isn't optimized. */
-
-	assert(ctx != NULL);
-	assert(ctx->initialized);
-
-	unsigned long long virt_addr_space_size =
-		(unsigned long long)ctx->va_max_address + 1;
-	assert(virt_addr_space_size > 0);
-
-	if (!IS_PAGE_ALIGNED(base_va)) {
-		WARN("%s: Address %p is not aligned on a page boundary.\n",
-		     __func__, (void *)base_va);
-		return -EINVAL;
-	}
-
-	if (size == 0) {
-		WARN("%s: Size is 0.\n", __func__);
-		return -EINVAL;
-	}
-
-	if ((size % PAGE_SIZE) != 0) {
-		WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
-		     __func__, size);
-		return -EINVAL;
-	}
-
-	if (((attr & MT_EXECUTE_NEVER) == 0) && ((attr & MT_RW) != 0)) {
-		WARN("%s() doesn't allow to remap memory as read-write and executable.\n",
-		     __func__);
-		return -EINVAL;
-	}
-
-	int pages_count = size / PAGE_SIZE;
-
-	VERBOSE("Changing memory attributes of %i pages starting from address %p...\n",
-		pages_count, (void *)base_va);
-
-	uintptr_t base_va_original = base_va;
-
-	/*
-	 * Sanity checks.
-	 */
-	for (int i = 0; i < pages_count; ++i) {
-		uint64_t *entry;
-		uint64_t desc;
-		int level;
-
-		entry = find_xlat_table_entry(base_va,
-					      ctx->base_table,
-					      ctx->base_table_entries,
-					      virt_addr_space_size,
-					      &level);
-		if (entry == NULL) {
-			WARN("Address %p is not mapped.\n", (void *)base_va);
-			return -EINVAL;
-		}
-
-		desc = *entry;
-
-		/*
-		 * Check that all the required pages are mapped at page
-		 * granularity.
-		 */
-		if (((desc & DESC_MASK) != PAGE_DESC) ||
-			(level != XLAT_TABLE_LEVEL_MAX)) {
-			WARN("Address %p is not mapped at the right granularity.\n",
-			     (void *)base_va);
-			WARN("Granularity is 0x%llx, should be 0x%x.\n",
-			     (unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
-			return -EINVAL;
-		}
-
-		/*
-		 * If the region type is device, it shouldn't be executable.
-		 */
-		int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
-		if (attr_index == ATTR_DEVICE_INDEX) {
-			if ((attr & MT_EXECUTE_NEVER) == 0) {
-				WARN("Setting device memory as executable at address %p.",
-				     (void *)base_va);
-				return -EINVAL;
-			}
-		}
-
-		base_va += PAGE_SIZE;
-	}
-
-	/* Restore original value. */
-	base_va = base_va_original;
-
-	VERBOSE("%s: All pages are mapped, now changing their attributes...\n",
-		__func__);
-
-	for (int i = 0; i < pages_count; ++i) {
-
-		uint32_t old_attr, new_attr;
-		uint64_t *entry;
-		int level;
-		unsigned long long addr_pa;
-
-		get_mem_attributes_internal(ctx, base_va, &old_attr,
-					    &entry, &addr_pa, &level);
-
-		VERBOSE("Old attributes: 0x%x\n", old_attr);
-
-		/*
-		 * From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and
-		 * MT_USER/MT_PRIVILEGED are taken into account. Any other
-		 * information is ignored.
-		 */
-
-		/* Clean the old attributes so that they can be rebuilt. */
-		new_attr = old_attr & ~(MT_RW|MT_EXECUTE_NEVER|MT_USER);
-
-		/*
-		 * Update attributes, but filter out the ones this function
-		 * isn't allowed to change.
-		 */
-		new_attr |= attr & (MT_RW|MT_EXECUTE_NEVER|MT_USER);
-
-		VERBOSE("New attributes: 0x%x\n", new_attr);
-
-		/*
-		 * The break-before-make sequence requires writing an invalid
-		 * descriptor and making sure that the system sees the change
-		 * before writing the new descriptor.
-		 */
-		*entry = INVALID_DESC;
-
-		/* Invalidate any cached copy of this mapping in the TLBs. */
-		xlat_arch_tlbi_va_regime(base_va, ctx->xlat_regime);
-
-		/* Ensure completion of the invalidation. */
-		xlat_arch_tlbi_va_sync();
-
-		/* Write new descriptor */
-		*entry = xlat_desc(ctx, new_attr, addr_pa, level);
-
-		base_va += PAGE_SIZE;
-	}
-
-	/* Ensure that the last descriptor writen is seen by the system. */
-	dsbish();
-
-	return 0;
-}
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
index 157dd03..4a54ec5 100644
--- a/lib/xlat_tables_v2/xlat_tables_private.h
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -50,7 +50,7 @@
  * S-EL1.
  */
 void xlat_arch_tlbi_va(uintptr_t va);
-void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime);
+void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime);
 
 /*
  * This function has to be called at the end of any code that uses the function
@@ -59,7 +59,7 @@
 void xlat_arch_tlbi_va_sync(void);
 
 /* Print VA, PA, size and attributes of all regions in the mmap array. */
-void print_mmap(mmap_region_t *const mmap);
+void xlat_mmap_print(mmap_region_t *const mmap);
 
 /*
  * Print the current state of the translation tables by reading them from
@@ -68,6 +68,12 @@
 void xlat_tables_print(xlat_ctx_t *ctx);
 
 /*
+ * Returns a block/page table descriptor for the given level and attributes.
+ */
+uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
+		   unsigned long long addr_pa, int level);
+
+/*
  * Architecture-specific initialization code.
  */
 
@@ -81,7 +87,7 @@
 unsigned long long xlat_arch_get_max_supported_pa(void);
 
 /* Enable MMU and configure it to use the specified translation tables. */
-void enable_mmu_arch(unsigned int flags, uint64_t *base_table,
+void setup_mmu_cfg(unsigned int flags, const uint64_t *base_table,
 		unsigned long long max_pa, uintptr_t max_va);
 
 /*
diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c
new file mode 100644
index 0000000..5a78434
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables_utils.c
@@ -0,0 +1,562 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform_def.h>
+#include <types.h>
+#include <utils_def.h>
+#include <xlat_tables_arch_private.h>
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+#include "xlat_tables_private.h"
+
+#if LOG_LEVEL < LOG_LEVEL_VERBOSE
+
+void xlat_mmap_print(__unused mmap_region_t *const mmap)
+{
+	/* Empty */
+}
+
+void xlat_tables_print(__unused xlat_ctx_t *ctx)
+{
+	/* Empty */
+}
+
+#else /* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+void xlat_mmap_print(mmap_region_t *const mmap)
+{
+	tf_printf("mmap:\n");
+	const mmap_region_t *mm = mmap;
+
+	while (mm->size != 0U) {
+		tf_printf(" VA:0x%lx  PA:0x%llx  size:0x%zx  attr:0x%x "
+			  "granularity:0x%zx\n", mm->base_va, mm->base_pa,
+			  mm->size, mm->attr, mm->granularity);
+		++mm;
+	};
+	tf_printf("\n");
+}
+
+/* Print the attributes of the specified block descriptor. */
+static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
+{
+	int mem_type_index = ATTR_INDEX_GET(desc);
+	int xlat_regime = ctx->xlat_regime;
+
+	if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
+		tf_printf("MEM");
+	} else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
+		tf_printf("NC");
+	} else {
+		assert(mem_type_index == ATTR_DEVICE_INDEX);
+		tf_printf("DEV");
+	}
+
+	if (xlat_regime == EL3_REGIME) {
+		/* For EL3 only check the AP[2] and XN bits. */
+		tf_printf((desc & LOWER_ATTRS(AP_RO)) ? "-RO" : "-RW");
+		tf_printf((desc & UPPER_ATTRS(XN)) ? "-XN" : "-EXEC");
+	} else {
+		assert(xlat_regime == EL1_EL0_REGIME);
+		/*
+		 * For EL0 and EL1:
+		 * - In AArch64 PXN and UXN can be set independently but in
+		 *   AArch32 there is no UXN (XN affects both privilege levels).
+		 *   For consistency, we set them simultaneously in both cases.
+		 * - RO and RW permissions must be the same in EL1 and EL0. If
+		 *   EL0 can access that memory region, so can EL1, with the
+		 *   same permissions.
+		 */
+#if ENABLE_ASSERTIONS
+		uint64_t xn_mask = xlat_arch_regime_get_xn_desc(EL1_EL0_REGIME);
+		uint64_t xn_perm = desc & xn_mask;
+
+		assert((xn_perm == xn_mask) || (xn_perm == 0ULL));
+#endif
+		tf_printf((desc & LOWER_ATTRS(AP_RO)) ? "-RO" : "-RW");
+		/* Only check one of PXN and UXN, the other one is the same. */
+		tf_printf((desc & UPPER_ATTRS(PXN)) ? "-XN" : "-EXEC");
+		/*
+		 * Privileged regions can only be accessed from EL1, user
+		 * regions can be accessed from EL1 and EL0.
+		 */
+		tf_printf((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED))
+			  ? "-USER" : "-PRIV");
+	}
+
+	tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
+}
+
+static const char * const level_spacers[] = {
+	"[LV0] ",
+	"  [LV1] ",
+	"    [LV2] ",
+	"      [LV3] "
+};
+
+static const char *invalid_descriptors_ommited =
+		"%s(%d invalid descriptors omitted)\n";
+
+/*
+ * Recursive function that reads the translation tables passed as an argument
+ * and prints their status.
+ */
+static void xlat_tables_print_internal(xlat_ctx_t *ctx,
+		const uintptr_t table_base_va,
+		uint64_t *const table_base, const int table_entries,
+		const unsigned int level)
+{
+	assert(level <= XLAT_TABLE_LEVEL_MAX);
+
+	uint64_t desc;
+	uintptr_t table_idx_va = table_base_va;
+	int table_idx = 0;
+
+	size_t level_size = XLAT_BLOCK_SIZE(level);
+
+	/*
+	 * Keep track of how many invalid descriptors are counted in a row.
+	 * Whenever multiple invalid descriptors are found, only the first one
+	 * is printed, and a line is added to inform about how many descriptors
+	 * have been omitted.
+	 */
+	int invalid_row_count = 0;
+
+	while (table_idx < table_entries) {
+
+		desc = table_base[table_idx];
+
+		if ((desc & DESC_MASK) == INVALID_DESC) {
+
+			if (invalid_row_count == 0) {
+				tf_printf("%sVA:%p size:0x%zx\n",
+					  level_spacers[level],
+					  (void *)table_idx_va, level_size);
+			}
+			invalid_row_count++;
+
+		} else {
+
+			if (invalid_row_count > 1) {
+				tf_printf(invalid_descriptors_ommited,
+					  level_spacers[level],
+					  invalid_row_count - 1);
+			}
+			invalid_row_count = 0;
+
+			/*
+			 * Check if this is a table or a block. Tables are only
+			 * allowed in levels other than 3, but DESC_PAGE has the
+			 * same value as DESC_TABLE, so we need to check.
+			 */
+			if (((desc & DESC_MASK) == TABLE_DESC) &&
+					(level < XLAT_TABLE_LEVEL_MAX)) {
+				/*
+				 * Do not print any PA for a table descriptor,
+				 * as it doesn't directly map physical memory
+				 * but instead points to the next translation
+				 * table in the translation table walk.
+				 */
+				tf_printf("%sVA:%p size:0x%zx\n",
+					  level_spacers[level],
+					  (void *)table_idx_va, level_size);
+
+				uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
+
+				xlat_tables_print_internal(ctx, table_idx_va,
+					(uint64_t *)addr_inner,
+					XLAT_TABLE_ENTRIES, level + 1);
+			} else {
+				tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
+					  level_spacers[level],
+					  (void *)table_idx_va,
+					  (unsigned long long)(desc & TABLE_ADDR_MASK),
+					  level_size);
+				xlat_desc_print(ctx, desc);
+				tf_printf("\n");
+			}
+		}
+
+		table_idx++;
+		table_idx_va += level_size;
+	}
+
+	if (invalid_row_count > 1) {
+		tf_printf(invalid_descriptors_ommited,
+			  level_spacers[level], invalid_row_count - 1);
+	}
+}
+
+void xlat_tables_print(xlat_ctx_t *ctx)
+{
+	const char *xlat_regime_str;
+	if (ctx->xlat_regime == EL1_EL0_REGIME) {
+		xlat_regime_str = "1&0";
+	} else {
+		assert(ctx->xlat_regime == EL3_REGIME);
+		xlat_regime_str = "3";
+	}
+	VERBOSE("Translation tables state:\n");
+	VERBOSE("  Xlat regime:     EL%s\n", xlat_regime_str);
+	VERBOSE("  Max allowed PA:  0x%llx\n", ctx->pa_max_address);
+	VERBOSE("  Max allowed VA:  %p\n", (void *) ctx->va_max_address);
+	VERBOSE("  Max mapped PA:   0x%llx\n", ctx->max_pa);
+	VERBOSE("  Max mapped VA:   %p\n", (void *) ctx->max_va);
+
+	VERBOSE("  Initial lookup level: %i\n", ctx->base_level);
+	VERBOSE("  Entries @initial lookup level: %i\n",
+		ctx->base_table_entries);
+
+	int used_page_tables;
+#if PLAT_XLAT_TABLES_DYNAMIC
+	used_page_tables = 0;
+	for (unsigned int i = 0; i < ctx->tables_num; ++i) {
+		if (ctx->tables_mapped_regions[i] != 0)
+			++used_page_tables;
+	}
+#else
+	used_page_tables = ctx->next_table;
+#endif
+	VERBOSE("  Used %i sub-tables out of %i (spare: %i)\n",
+		used_page_tables, ctx->tables_num,
+		ctx->tables_num - used_page_tables);
+
+	xlat_tables_print_internal(ctx, 0, ctx->base_table,
+				   ctx->base_table_entries, ctx->base_level);
+}
+
+#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+/*
+ * Do a translation table walk to find the block or page descriptor that maps
+ * virtual_addr.
+ *
+ * On success, return the address of the descriptor within the translation
+ * table. Its lookup level is stored in '*out_level'.
+ * On error, return NULL.
+ *
+ * xlat_table_base
+ *   Base address for the initial lookup level.
+ * xlat_table_base_entries
+ *   Number of entries in the translation table for the initial lookup level.
+ * virt_addr_space_size
+ *   Size in bytes of the virtual address space.
+ */
+static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
+				       void *xlat_table_base,
+				       int xlat_table_base_entries,
+				       unsigned long long virt_addr_space_size,
+				       int *out_level)
+{
+	unsigned int start_level;
+	uint64_t *table;
+	int entries;
+
+	start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);
+
+	table = xlat_table_base;
+	entries = xlat_table_base_entries;
+
+	for (unsigned int level = start_level;
+	     level <= XLAT_TABLE_LEVEL_MAX;
+	     ++level) {
+		int idx;
+		uint64_t desc;
+		uint64_t desc_type;
+
+		idx = XLAT_TABLE_IDX(virtual_addr, level);
+		if (idx >= entries) {
+			WARN("Missing xlat table entry at address 0x%lx\n",
+			     virtual_addr);
+			return NULL;
+		}
+
+		desc = table[idx];
+		desc_type = desc & DESC_MASK;
+
+		if (desc_type == INVALID_DESC) {
+			VERBOSE("Invalid entry (memory not mapped)\n");
+			return NULL;
+		}
+
+		if (level == XLAT_TABLE_LEVEL_MAX) {
+			/*
+			 * Only page descriptors allowed at the final lookup
+			 * level.
+			 */
+			assert(desc_type == PAGE_DESC);
+			*out_level = level;
+			return &table[idx];
+		}
+
+		if (desc_type == BLOCK_DESC) {
+			*out_level = level;
+			return &table[idx];
+		}
+
+		assert(desc_type == TABLE_DESC);
+		table = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+		entries = XLAT_TABLE_ENTRIES;
+	}
+
+	/*
+	 * This shouldn't be reached, the translation table walk should end at
+	 * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
+	 */
+	assert(0);
+
+	return NULL;
+}
+
+
+static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
+		uint32_t *attributes, uint64_t **table_entry,
+		unsigned long long *addr_pa, int *table_level)
+{
+	uint64_t *entry;
+	uint64_t desc;
+	int level;
+	unsigned long long virt_addr_space_size;
+
+	/*
+	 * Sanity-check arguments.
+	 */
+	assert(ctx != NULL);
+	assert(ctx->initialized);
+	assert(ctx->xlat_regime == EL1_EL0_REGIME || ctx->xlat_regime == EL3_REGIME);
+
+	virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1;
+	assert(virt_addr_space_size > 0);
+
+	entry = find_xlat_table_entry(base_va,
+				ctx->base_table,
+				ctx->base_table_entries,
+				virt_addr_space_size,
+				&level);
+	if (entry == NULL) {
+		WARN("Address %p is not mapped.\n", (void *)base_va);
+		return -EINVAL;
+	}
+
+	if (addr_pa != NULL) {
+		*addr_pa = *entry & TABLE_ADDR_MASK;
+	}
+
+	if (table_entry != NULL) {
+		*table_entry = entry;
+	}
+
+	if (table_level != NULL) {
+		*table_level = level;
+	}
+
+	desc = *entry;
+
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+	VERBOSE("Attributes: ");
+	xlat_desc_print(ctx, desc);
+	tf_printf("\n");
+#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+	assert(attributes != NULL);
+	*attributes = 0;
+
+	int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+
+	if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
+		*attributes |= MT_MEMORY;
+	} else if (attr_index == ATTR_NON_CACHEABLE_INDEX) {
+		*attributes |= MT_NON_CACHEABLE;
+	} else {
+		assert(attr_index == ATTR_DEVICE_INDEX);
+		*attributes |= MT_DEVICE;
+	}
+
+	int ap2_bit = (desc >> AP2_SHIFT) & 1;
+
+	if (ap2_bit == AP2_RW)
+		*attributes |= MT_RW;
+
+	if (ctx->xlat_regime == EL1_EL0_REGIME) {
+		int ap1_bit = (desc >> AP1_SHIFT) & 1;
+		if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
+			*attributes |= MT_USER;
+	}
+
+	int ns_bit = (desc >> NS_SHIFT) & 1;
+
+	if (ns_bit == 1)
+		*attributes |= MT_NS;
+
+	uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
+
+	if ((desc & xn_mask) == xn_mask) {
+		*attributes |= MT_EXECUTE_NEVER;
+	} else {
+		assert((desc & xn_mask) == 0);
+	}
+
+	return 0;
+}
+
+
+int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
+		       uint32_t *attributes)
+{
+	return get_mem_attributes_internal(ctx, base_va, attributes,
+					   NULL, NULL, NULL);
+}
+
+
+int change_mem_attributes(xlat_ctx_t *ctx,
+			uintptr_t base_va,
+			size_t size,
+			uint32_t attr)
+{
+	/* Note: This implementation isn't optimized. */
+
+	assert(ctx != NULL);
+	assert(ctx->initialized);
+
+	unsigned long long virt_addr_space_size =
+		(unsigned long long)ctx->va_max_address + 1;
+	assert(virt_addr_space_size > 0);
+
+	if (!IS_PAGE_ALIGNED(base_va)) {
+		WARN("%s: Address %p is not aligned on a page boundary.\n",
+		     __func__, (void *)base_va);
+		return -EINVAL;
+	}
+
+	if (size == 0) {
+		WARN("%s: Size is 0.\n", __func__);
+		return -EINVAL;
+	}
+
+	if ((size % PAGE_SIZE) != 0) {
+		WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
+		     __func__, size);
+		return -EINVAL;
+	}
+
+	if (((attr & MT_EXECUTE_NEVER) == 0) && ((attr & MT_RW) != 0)) {
+		WARN("%s: Mapping memory as read-write and executable not allowed.\n",
+		     __func__);
+		return -EINVAL;
+	}
+
+	int pages_count = size / PAGE_SIZE;
+
+	VERBOSE("Changing memory attributes of %i pages starting from address %p...\n",
+		pages_count, (void *)base_va);
+
+	uintptr_t base_va_original = base_va;
+
+	/*
+	 * Sanity checks.
+	 */
+	for (int i = 0; i < pages_count; ++i) {
+		uint64_t *entry;
+		uint64_t desc;
+		int level;
+
+		entry = find_xlat_table_entry(base_va,
+					      ctx->base_table,
+					      ctx->base_table_entries,
+					      virt_addr_space_size,
+					      &level);
+		if (entry == NULL) {
+			WARN("Address %p is not mapped.\n", (void *)base_va);
+			return -EINVAL;
+		}
+
+		desc = *entry;
+
+		/*
+		 * Check that all the required pages are mapped at page
+		 * granularity.
+		 */
+		if (((desc & DESC_MASK) != PAGE_DESC) ||
+			(level != XLAT_TABLE_LEVEL_MAX)) {
+			WARN("Address %p is not mapped at the right granularity.\n",
+			     (void *)base_va);
+			WARN("Granularity is 0x%llx, should be 0x%x.\n",
+			     (unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
+			return -EINVAL;
+		}
+
+		/*
+		 * If the region type is device, it shouldn't be executable.
+		 */
+		int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+		if (attr_index == ATTR_DEVICE_INDEX) {
+			if ((attr & MT_EXECUTE_NEVER) == 0) {
+				WARN("Setting device memory as executable at address %p.",
+				     (void *)base_va);
+				return -EINVAL;
+			}
+		}
+
+		base_va += PAGE_SIZE;
+	}
+
+	/* Restore original value. */
+	base_va = base_va_original;
+
+	for (int i = 0; i < pages_count; ++i) {
+
+		uint32_t old_attr, new_attr;
+		uint64_t *entry;
+		int level;
+		unsigned long long addr_pa;
+
+		get_mem_attributes_internal(ctx, base_va, &old_attr,
+					    &entry, &addr_pa, &level);
+
+		/*
+		 * From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and
+		 * MT_USER/MT_PRIVILEGED are taken into account. Any other
+		 * information is ignored.
+		 */
+
+		/* Clean the old attributes so that they can be rebuilt. */
+		new_attr = old_attr & ~(MT_RW | MT_EXECUTE_NEVER | MT_USER);
+
+		/*
+		 * Update attributes, but filter out the ones this function
+		 * isn't allowed to change.
+		 */
+		new_attr |= attr & (MT_RW | MT_EXECUTE_NEVER | MT_USER);
+
+		/*
+		 * The break-before-make sequence requires writing an invalid
+		 * descriptor and making sure that the system sees the change
+		 * before writing the new descriptor.
+		 */
+		*entry = INVALID_DESC;
+
+		/* Invalidate any cached copy of this mapping in the TLBs. */
+		xlat_arch_tlbi_va_regime(base_va, ctx->xlat_regime);
+
+		/* Ensure completion of the invalidation. */
+		xlat_arch_tlbi_va_sync();
+
+		/* Write new descriptor */
+		*entry = xlat_desc(ctx, new_attr, addr_pa, level);
+
+		base_va += PAGE_SIZE;
+	}
+
+	/* Ensure that the last descriptor writen is seen by the system. */
+	dsbish();
+
+	return 0;
+}
diff --git a/maintainers.rst b/maintainers.rst
index 2dd20ac..c01d97a 100644
--- a/maintainers.rst
+++ b/maintainers.rst
@@ -7,190 +7,170 @@
 acknowledgement from these sub-maintainers may be required before the
 maintainers merge a contribution.
 
-Maintainers
------------
+Main maintainers
+----------------
+:M: Dan Handley <dan.handley@arm.com>
+:G: `danh-arm`_
+:M: Dimitris Papastamos <dimitrs.papastamos@arm.com>
+:G: `dp-arm`_
+:M: Soby Mathew <soby.mathew@arm.com>
+:G: `soby-mathew`_
 
-Dan Handley (dan.handley@arm.com, `danh-arm`_)
+Allwinner ARMv8 platform port
+-----------------------------
+:M: Andre Przywara <andre.przywara@arm.com>
+:G: `Andre-ARM`_
+:M: Samuel Holland <samuel@sholland.org>
+:G: `smaeul`_
+:F: docs/plat/allwinner.rst
+:F: plat/allwinner/
 
-David Cunado (david.cunado@arm.com, `davidcunado-arm`_)
+Armv7-A architecture port
+-------------------------
+:M: Etienne Carriere <etienne.carriere@linaro.org>
+:G: `etienne-lms`_
 
-OPTEE and QEMU platform sub-maintainer
---------------------------------------
+eMMC/UFS drivers
+----------------
+:M: Haojian Zhuang <haojian.zhuang@linaro.org>
+:G: `hzhuang1`_
+:F: drivers/emmc/
+:F: drivers/partition/
+:F: drivers/synopsys/emmc/
+:F: drivers/synopsys/ufs/
+:F: drivers/ufs/
+:F: include/drivers/dw_ufs.h
+:F: include/drivers/emmc.h
+:F: include/drivers/ufs.h
+:F: include/drivers/synopsys/dw_mmc.h
 
-Jens Wiklander (jens.wiklander@linaro.org, `jenswi-linaro`_)
+HiSilicon HiKey and HiKey960 platform ports
+-------------------------------------------
+:M: Haojian Zhuang <haojian.zhuang@linaro.org>
+:G: `hzhuang1`_
+:F: docs/plat/hikey.rst
+:F: docs/plat/hikey960.rst
+:F: plat/hisilicon/hikey/
+:F: plat/hisilicon/hikey960/
 
-Files:
-
--  docs/plat/qemu.rst
--  docs/spd/optee-dispatcher.rst
--  services/spd/opteed/\*
--  plat/qemu/\*
-
-TLK/Trusty SPDs and NVidia platforms sub-maintainer
----------------------------------------------------
-
-Varun Wadekar (vwadekar@nvidia.com, `vwadekar`_)
-
-Files:
-
--  docs/plat/nvidia-tegra.rst
--  docs/spd/tlk-dispatcher.rst
--  docs/spd/trusty-dispatcher.rst
--  include/bl32/payloads/tlk.h
--  include/lib/cpus/aarch64/denver.h
--  lib/cpus/aarch64/denver.S
--  services/spd/tlkd/\*
--  services/spd/trusty/\*
--  plat/nvidia/\*
-
-eMMC/UFS drivers and HiSilicon HiKey and HiKey960 platform sub-maintainer
--------------------------------------------------------------------------
-
-Haojian Zhuang (haojian.zhuang@linaro.org, `hzhuang1`_)
-
-Files:
-
--  docs/plat/hikey.rst
--  docs/plat/hikey960.rst
--  drivers/emmc/\*
--  drivers/partition/\*
--  drivers/synopsys/emmc/\*
--  drivers/synopsys/ufs/\*
--  drivers/ufs/\*
--  include/drivers/dw\_ufs.h
--  include/drivers/emmc.h
--  include/drivers/ufs.h
--  include/drivers/synopsys/dw\_mmc.h
--  plat/hisilicon/hikey/\*
--  plat/hisilicon/hikey960/\*
-
-Allwinner ARMv8 platform sub-maintainer
----------------------------------------
-
-Andre Przywara (andre.przywara@arm.com, `Andre-ARM`_)
-
-Files:
-
--  docs/plat/allwinner.rst
--  plat/allwinner/\*
-
-HiSilicon Poplar platform sub-maintainer
-----------------------------------------
-
-Shawn Guo (shawn.guo@linaro.org, `shawnguo2`_)
-
-Files:
-
--  docs/plat/poplar.rst
--  plat/hisilicon/poplar/\*
-
-MediaTek platform sub-maintainer
---------------------------------
-
-Yidi Lin (林以廸 yidi.lin@mediatek.com, `mtk09422`_)
-
-Files:
-
--  plat/mediatek/\*
-
-NXP QorIQ Layerscape platform sub-maintainer
---------------------------------------
-Jiafei Pan (jiafei.pan@nxp.com, `qoriq-open-source`_)
-
-Files:
-
--  docs/plat/ls1043a.rst
--  plat/layerscape/\*
-
-NXP i.MX 8 platform sub-maintainer
---------------------------------------
-
-Anson Huang (Anson.Huang@nxp.com, `Anson-Huang`_)
-
-Files:
-
--  docs/plat/imx8.rst
--  plat/imx/\*
-
-Raspberry Pi 3 platform sub-maintainer
---------------------------------------
-
-Antonio Niño Díaz (antonio.ninodiaz@arm.com, `antonio-nino-diaz-arm`_)
-
-Files:
-
--  docs/plat/rpi3.rst
--  plat/rpi3/\*
-
-RockChip platform sub-maintainer
---------------------------------
-
-Tony Xie (tony.xie@rock-chips.com, `TonyXie06`_
-or `rockchip-linux`_)
-
-Files:
-
--  plat/rockchip/\*
-
-Synquacer platform sub-maintainer
----------------------------------
-
-Sumit Garg (sumit.garg@linaro.org, `b49020`_)
-
-Files:
-
-- docs/plat/synquacer.rst
-- plat/socionext/synquacer/\*
-
-Texas Instruments platform sub-maintainer
------------------------------------------
+HiSilicon Poplar platform port
+------------------------------
+:M: Shawn Guo <shawn.guo@linaro.org>
+:G: `shawnguo2`_
+:F: docs/plat/poplar.rst
+:F: plat/hisilicon/poplar/
 
-Andrew F. Davis (afd@ti.com, `glneo`_)
+MediaTek platform ports
+-----------------------
+:M: Yidi Lin (林以廸) <yidi.lin@mediatek.com>
+:G: `mtk09422`_
+:F: plat/mediatek/
 
-Files:
+NVidia platform ports
+---------------------
+:M: Varun Wadekar <vwadekar@nvidia.com>
+:G: `vwadekar`_
+:F: docs/plat/nvidia-tegra.rst
+:F: include/lib/cpus/aarch64/denver.h
+:F: lib/cpus/aarch64/denver.S
+:F: plat/nvidia/
 
-- docs/plat/ti-k3.rst
-- plat/ti/\*
+NXP QorIQ Layerscape platform ports
+-----------------------------------
+:M: Jiafei Pan <jiafei.pan@nxp.com>
+:G: `qoriq-open-source`_
+:F: docs/plat/ls1043a.rst
+:F: plat/layerscape/
 
-UniPhier platform sub-maintainer
---------------------------------
+NXP i.MX 8 platform port
+------------------------
+:M: Anson Huang <Anson.Huang@nxp.com>
+:G: `Anson-Huang`_
+:F: docs/plat/imx8.rst
+:F: plat/imx/
 
-Masahiro Yamada (yamada.masahiro@socionext.com, `masahir0y`_)
+OP-TEE dispatcher
+-----------------
+:M: Jens Wiklander <jens.wiklander@linaro.org>
+:G: `jenswi-linaro`_
+:F: docs/spd/optee-dispatcher.rst
+:F: services/spd/opteed/
 
-Files:
+QEMU platform port
+------------------
+:M: Jens Wiklander <jens.wiklander@linaro.org>
+:G: `jenswi-linaro`_
+:F: docs/plat/qemu.rst
+:F: plat/qemu/
 
-- docs/plat/socionext-uniphier.rst
-- plat/socionext/uniphier/\*
+Raspberry Pi 3 platform port
+----------------------------
+:M: Antonio Niño Díaz <antonio.ninodiaz@arm.com>
+:G: `antonio-nino-diaz-arm`_
+:F: docs/plat/rpi3.rst
+:F: plat/rpi3/
 
-Xilinx platform sub-maintainer
-------------------------------
+RockChip platform port
+----------------------
+:M: Tony Xie <tony.xie@rock-chips.com>
+:G: `TonyXie06`_
+:G: `rockchip-linux`_
+:F: plat/rockchip/
 
-Siva Durga Prasad Paladugu (siva.durga.paladugu@xilinx.com, `sivadur`_)
+Synquacer platform port
+-----------------------
+:M: Sumit Garg <sumit.garg@linaro.org>
+:G: `b49020`_
+:F: docs/plat/synquacer.rst
+:F: plat/socionext/synquacer/
 
-Files:
+Texas Instruments platform port
+-------------------------------
+:M: Andrew F. Davis <afd@ti.com>
+:G: `glneo`_
+:F: docs/plat/ti-k3.rst
+:F: plat/ti/
 
--  docs/plat/xilinx-zynqmp.rst
--  plat/xilinx/\*
+TLK/Trusty secure payloads
+--------------------------
+:M: Varun Wadekar <vwadekar@nvidia.com>
+:G: `vwadekar`_
+:F: docs/spd/tlk-dispatcher.rst
+:F: docs/spd/trusty-dispatcher.rst
+:F: include/bl32/payloads/tlk.h
+:F: services/spd/tlkd/
+:F: services/spd/trusty/
 
-Armv7-A architecture sub-maintainer
------------------------------------
+UniPhier platform port
+----------------------
+:M: Masahiro Yamada <yamada.masahiro@socionext.com>
+:G: `masahir0y`_
+:F: docs/plat/socionext-uniphier.rst
+:F: plat/socionext/uniphier/
 
-Etienne Carriere (etienne.carriere@linaro.org, `etienne-lms`_)
+Xilinx platform port
+--------------------
+:M: Siva Durga Prasad Paladugu <siva.durga.paladugu@xilinx.com>
+:G: `sivadur`_
+:F: docs/plat/xilinx-zynqmp.rst
+:F: plat/xilinx/
 
+.. _Andre-ARM: https://github.com/Andre-ARM
+.. _Anson-Huang: https://github.com/Anson-Huang
 .. _antonio-nino-diaz-arm: https://github.com/antonio-nino-diaz-arm
+.. _b49020: https://github.com/b49020
 .. _danh-arm: https://github.com/danh-arm
-.. _davidcunado-arm: https://github.com/davidcunado-arm
-.. _jenswi-linaro: https://github.com/jenswi-linaro
-.. _vwadekar: https://github.com/vwadekar
+.. _dp-arm: https://github.com/dp-arm
+.. _etienne-lms: https://github.com/etienne-lms
+.. _glneo: https://github.com/glneo
 .. _hzhuang1: https://github.com/hzhuang1
-.. _shawnguo2: https://github.com/shawnguo2
+.. _jenswi-linaro: https://github.com/jenswi-linaro
 .. _masahir0y: https://github.com/masahir0y
 .. _mtk09422: https://github.com/mtk09422
-.. _TonyXie06: https://github.com/TonyXie06
-.. _glneo: https://github.com/glneo
-.. _sivadur: https://github.com/sivadur
-.. _rockchip-linux: https://github.com/rockchip-linux
-.. _etienne-lms: https://github.com/etienne-lms
 .. _qoriq-open-source: https://github.com/qoriq-open-source
-.. _Andre-ARM: https://github.com/Andre-ARM
-.. _b49020: https://github.com/b49020
+.. _rockchip-linux: https://github.com/rockchip-linux
+.. _shawnguo2: https://github.com/shawnguo2
+.. _sivadur: https://github.com/sivadur
+.. _soby-mathew: https://github.com/soby-mathew
+.. _TonyXie06: https://github.com/TonyXie06
+.. _vwadekar: https://github.com/vwadekar
diff --git a/plat/allwinner/common/include/platform_def.h b/plat/allwinner/common/include/platform_def.h
index ca7db2f..d039188 100644
--- a/plat/allwinner/common/include/platform_def.h
+++ b/plat/allwinner/common/include/platform_def.h
@@ -39,7 +39,13 @@
 #define PLATFORM_CORE_COUNT		(PLATFORM_CLUSTER_COUNT * \
 					 PLATFORM_MAX_CPUS_PER_CLUSTER)
 #define PLATFORM_MAX_CPUS_PER_CLUSTER	4
-#define PLATFORM_MMAP_REGIONS		4
+#define PLATFORM_MMAP_REGIONS		3
 #define PLATFORM_STACK_SIZE		(0x1000 / PLATFORM_CORE_COUNT)
 
+#ifndef SPD_none
+#ifndef BL32_BASE
+#define BL32_BASE			SUNXI_DRAM_BASE
+#endif
+#endif
+
 #endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/allwinner/common/sunxi_bl31_setup.c b/plat/allwinner/common/sunxi_bl31_setup.c
index d1f1aa1..e910ee5 100644
--- a/plat/allwinner/common/sunxi_bl31_setup.c
+++ b/plat/allwinner/common/sunxi_bl31_setup.c
@@ -18,6 +18,7 @@
 
 #include "sunxi_private.h"
 
+static entry_point_info_t bl32_image_ep_info;
 static entry_point_info_t bl33_image_ep_info;
 
 static console_16550_t console;
@@ -34,6 +35,13 @@
 	console_16550_register(SUNXI_UART0_BASE, SUNXI_UART0_CLK_IN_HZ,
 			       SUNXI_UART0_BAUDRATE, &console);
 
+#ifdef BL32_BASE
+	/* Populate entry point information for BL32 */
+	SET_PARAM_HEAD(&bl32_image_ep_info, PARAM_EP, VERSION_1, 0);
+	SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
+	bl32_image_ep_info.pc = BL32_BASE;
+#endif
+
 	/* Populate entry point information for BL33 */
 	SET_PARAM_HEAD(&bl33_image_ep_info, PARAM_EP, VERSION_1, 0);
 	/*
@@ -56,6 +64,22 @@
 
 void bl31_platform_setup(void)
 {
+	const char *soc_name;
+	uint16_t soc_id = sunxi_read_soc_id();
+
+	switch (soc_id) {
+	case 0x1689:
+		soc_name = "A64/H64/R18";
+		break;
+	case 0x1718:
+		soc_name = "H5";
+		break;
+	default:
+		soc_name = "unknown";
+		break;
+	}
+	NOTICE("BL31: Detected Allwinner %s SoC (%04x)\n", soc_name, soc_id);
+
 	generic_delay_timer_init();
 
 	/* Configure the interrupt controller */
@@ -72,7 +96,12 @@
 entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
 {
 	assert(sec_state_is_valid(type) != 0);
-	assert(type == NON_SECURE);
+
+	if (type == NON_SECURE)
+		return &bl33_image_ep_info;
+
+	if ((type == SECURE) && bl32_image_ep_info.pc)
+		return &bl32_image_ep_info;
 
-	return &bl33_image_ep_info;
+	return NULL;
 }
diff --git a/plat/allwinner/common/sunxi_common.c b/plat/allwinner/common/sunxi_common.c
index e36c8b0..fc9bf20 100644
--- a/plat/allwinner/common/sunxi_common.c
+++ b/plat/allwinner/common/sunxi_common.c
@@ -4,14 +4,15 @@
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
+#include <mmio.h>
 #include <platform.h>
 #include <platform_def.h>
 #include <sunxi_def.h>
 #include <xlat_tables_v2.h>
 
+#include "sunxi_private.h"
+
 static mmap_region_t sunxi_mmap[PLATFORM_MMAP_REGIONS + 1] = {
-	MAP_REGION_FLAT(SUNXI_ROM_BASE, SUNXI_ROM_SIZE,
-			MT_MEMORY | MT_RO | MT_SECURE),
 	MAP_REGION_FLAT(SUNXI_SRAM_BASE, SUNXI_SRAM_SIZE,
 			MT_MEMORY | MT_RW | MT_SECURE),
 	MAP_REGION_FLAT(SUNXI_DEV_BASE, SUNXI_DEV_SIZE,
@@ -54,3 +55,19 @@
 
 	enable_mmu_el3(0);
 }
+
+#define SRAM_VER_REG (SUNXI_SYSCON_BASE + 0x24)
+uint16_t sunxi_read_soc_id(void)
+{
+	uint32_t reg = mmio_read_32(SRAM_VER_REG);
+
+	/* Set bit 15 to prepare for the SOCID read. */
+	mmio_write_32(SRAM_VER_REG, reg | BIT(15));
+
+	reg = mmio_read_32(SRAM_VER_REG);
+
+	/* deactivate the SOCID access again */
+	mmio_write_32(SRAM_VER_REG, reg & ~BIT(15));
+
+	return reg >> 16;
+}
diff --git a/plat/allwinner/common/sunxi_cpu_ops.c b/plat/allwinner/common/sunxi_cpu_ops.c
index be72dee..aaee65c 100644
--- a/plat/allwinner/common/sunxi_cpu_ops.c
+++ b/plat/allwinner/common/sunxi_cpu_ops.c
@@ -18,7 +18,7 @@
 	if (mmio_read_32(SUNXI_CPU_POWER_CLAMP_REG(cluster, core)) == 0xff)
 		return;
 
-	INFO("PSCI: Disabling power to cluster %d core %d\n", cluster, core);
+	VERBOSE("PSCI: Disabling power to cluster %d core %d\n", cluster, core);
 
 	mmio_write_32(SUNXI_CPU_POWER_CLAMP_REG(cluster, core), 0xff);
 }
@@ -28,7 +28,7 @@
 	if (mmio_read_32(SUNXI_CPU_POWER_CLAMP_REG(cluster, core)) == 0)
 		return;
 
-	INFO("PSCI: Enabling power to cluster %d core %d\n", cluster, core);
+	VERBOSE("PSCI: Enabling power to cluster %d core %d\n", cluster, core);
 
 	/* Power enable sequence from original Allwinner sources */
 	mmio_write_32(SUNXI_CPU_POWER_CLAMP_REG(cluster, core), 0xfe);
@@ -40,7 +40,7 @@
 
 void sunxi_cpu_off(unsigned int cluster, unsigned int core)
 {
-	INFO("PSCI: Powering off cluster %d core %d\n", cluster, core);
+	VERBOSE("PSCI: Powering off cluster %d core %d\n", cluster, core);
 
 	/* Deassert DBGPWRDUP */
 	mmio_clrbits_32(SUNXI_CPUCFG_DBG_REG0, BIT(core));
@@ -54,7 +54,7 @@
 
 void sunxi_cpu_on(unsigned int cluster, unsigned int core)
 {
-	INFO("PSCI: Powering on cluster %d core %d\n", cluster, core);
+	VERBOSE("PSCI: Powering on cluster %d core %d\n", cluster, core);
 
 	/* Assert CPU core reset */
 	mmio_clrbits_32(SUNXI_CPUCFG_RST_CTRL_REG(cluster), BIT(core));
diff --git a/plat/allwinner/common/sunxi_pm.c b/plat/allwinner/common/sunxi_pm.c
index fcab130..2a1f223 100644
--- a/plat/allwinner/common/sunxi_pm.c
+++ b/plat/allwinner/common/sunxi_pm.c
@@ -76,8 +76,7 @@
 static int sunxi_validate_ns_entrypoint(uintptr_t ns_entrypoint)
 {
 	/* The non-secure entry point must be in DRAM */
-	if (ns_entrypoint >= SUNXI_DRAM_BASE &&
-	    ns_entrypoint < SUNXI_DRAM_BASE + SUNXI_DRAM_SIZE)
+	if (ns_entrypoint >= SUNXI_DRAM_BASE)
 		return PSCI_E_SUCCESS;
 
 	return PSCI_E_INVALID_ADDRESS;
diff --git a/plat/allwinner/common/sunxi_private.h b/plat/allwinner/common/sunxi_private.h
index b9f0fb4..e45f494 100644
--- a/plat/allwinner/common/sunxi_private.h
+++ b/plat/allwinner/common/sunxi_private.h
@@ -12,6 +12,7 @@
 void sunxi_cpu_on(unsigned int cluster, unsigned int core);
 void sunxi_disable_secondary_cpus(unsigned int primary_cpu);
 
+uint16_t sunxi_read_soc_id(void);
 void sunxi_security_setup(void);
 
 #endif /* __SUNXI_PRIVATE_H__ */
diff --git a/plat/allwinner/common/sunxi_security.c b/plat/allwinner/common/sunxi_security.c
index e760072..80fed6a 100644
--- a/plat/allwinner/common/sunxi_security.c
+++ b/plat/allwinner/common/sunxi_security.c
@@ -25,9 +25,9 @@
  */
 void sunxi_security_setup(void)
 {
+#ifdef SUNXI_SPC_BASE
 	int i;
 
-#ifdef SUNXI_SPC_BASE
 	INFO("Configuring SPC Controller\n");
 	/* SPC setup: set all devices to non-secure */
 	for (i = 0; i < 6; i++)
diff --git a/plat/allwinner/sun50i_a64/include/sunxi_mmap.h b/plat/allwinner/sun50i_a64/include/sunxi_mmap.h
index cb202a8..7d46487 100644
--- a/plat/allwinner/sun50i_a64/include/sunxi_mmap.h
+++ b/plat/allwinner/sun50i_a64/include/sunxi_mmap.h
@@ -27,7 +27,6 @@
 #define SUNXI_CPU_MBIST_BASE		0x01502000
 #define SUNXI_CPUCFG_BASE		0x01700000
 #define SUNXI_SYSCON_BASE		0x01c00000
-#define SUNXI_SRAM_VER_REG		(SUNXI_SYSCON_BASE + 0x24)
 #define SUNXI_DMA_BASE			0x01c02000
 #define SUNXI_KEYMEM_BASE		0x01c0b000
 #define SUNXI_SMHC0_BASE		0x01c0f000
diff --git a/plat/arm/common/arm_bl1_setup.c b/plat/arm/common/arm_bl1_setup.c
index e5e7304..d141f64 100644
--- a/plat/arm/common/arm_bl1_setup.c
+++ b/plat/arm/common/arm_bl1_setup.c
@@ -118,6 +118,12 @@
 #if LOAD_IMAGE_V2
 	arm_load_tb_fw_config();
 #endif
+	/*
+	 * Allow access to the System counter timer module and program
+	 * counter frequency for non secure images during FWU
+	 */
+	arm_configure_sys_timer();
+	write_cntfrq_el0(plat_get_syscnt_freq2());
 }
 
 void bl1_platform_setup(void)
diff --git a/plat/common/aarch64/plat_common.c b/plat/common/aarch64/plat_common.c
index 409ae55..5f2972c 100644
--- a/plat/common/aarch64/plat_common.c
+++ b/plat/common/aarch64/plat_common.c
@@ -18,8 +18,6 @@
  * provide typical implementations that may be re-used by multiple
  * platforms but may also be overridden by a platform if required.
  */
-#pragma weak bl31_plat_enable_mmu
-#pragma weak bl32_plat_enable_mmu
 #pragma weak bl31_plat_runtime_setup
 #if !ERROR_DEPRECATED
 #pragma weak plat_get_syscnt_freq2
@@ -33,16 +31,6 @@
 
 #pragma weak plat_ea_handler
 
-void bl31_plat_enable_mmu(uint32_t flags)
-{
-	enable_mmu_el3(flags);
-}
-
-void bl32_plat_enable_mmu(uint32_t flags)
-{
-	enable_mmu_el1(flags);
-}
-
 void bl31_plat_runtime_setup(void)
 {
 #if MULTI_CONSOLE_API
diff --git a/plat/common/aarch64/platform_helpers.S b/plat/common/aarch64/platform_helpers.S
index 033a12f..a413f5f 100644
--- a/plat/common/aarch64/platform_helpers.S
+++ b/plat/common/aarch64/platform_helpers.S
@@ -17,6 +17,8 @@
 	.weak	plat_disable_acp
 	.weak	bl1_plat_prepare_exit
 	.weak	plat_panic_handler
+	.weak	bl31_plat_enable_mmu
+	.weak	bl32_plat_enable_mmu
 
 #if !ENABLE_PLAT_COMPAT
 	.globl	platform_get_core_pos
@@ -164,3 +166,23 @@
 	wfi
 	b	plat_panic_handler
 endfunc plat_panic_handler
+
+	/* -----------------------------------------------------
+	 * void bl31_plat_enable_mmu(uint32_t flags);
+	 *
+	 * Enable MMU in BL31.
+	 * -----------------------------------------------------
+	 */
+func bl31_plat_enable_mmu
+	b	enable_mmu_direct_el3
+endfunc bl31_plat_enable_mmu
+
+	/* -----------------------------------------------------
+	 * void bl32_plat_enable_mmu(uint32_t flags);
+	 *
+	 * Enable MMU in BL32.
+	 * -----------------------------------------------------
+	 */
+func bl32_plat_enable_mmu
+	b	enable_mmu_direct_el1
+endfunc bl32_plat_enable_mmu
diff --git a/plat/ti/k3/common/k3_psci.c b/plat/ti/k3/common/k3_psci.c
index 91602c8..4d6428b 100644
--- a/plat/ti/k3/common/k3_psci.c
+++ b/plat/ti/k3/common/k3_psci.c
@@ -17,12 +17,18 @@
 
 static void k3_cpu_standby(plat_local_state_t cpu_state)
 {
-	/*
-	 * Enter standby state
-	 * dsb is good practice before using wfi to enter low power states
-	 */
+	unsigned int scr;
+
+	scr = read_scr_el3();
+	/* Enable the Non secure interrupt to wake the CPU */
+	write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT);
+	isb();
+	/* dsb is good practice before using wfi to enter low power states */
 	dsb();
+	/* Enter standby state */
 	wfi();
+	/* Restore SCR */
+	write_scr_el3(scr);
 }
 
 static int k3_pwr_domain_on(u_register_t mpidr)
diff --git a/plat/ti/k3/common/k3_topology.c b/plat/ti/k3/common/k3_topology.c
index a77c8f3..d7ac0a5 100644
--- a/plat/ti/k3/common/k3_topology.c
+++ b/plat/ti/k3/common/k3_topology.c
@@ -9,6 +9,7 @@
 
 /* The power domain tree descriptor */
 static unsigned char power_domain_tree_desc[] = {
+	PLATFORM_SYSTEM_COUNT,
 	PLATFORM_CLUSTER_COUNT,
 	K3_CLUSTER0_CORE_COUNT,
 #if K3_CLUSTER1_MSMC_PORT != UNUSED
diff --git a/plat/ti/k3/include/platform_def.h b/plat/ti/k3/include/platform_def.h
index 8856af2..ebc9c47 100644
--- a/plat/ti/k3/include/platform_def.h
+++ b/plat/ti/k3/include/platform_def.h
@@ -62,9 +62,10 @@
 
 #define PLATFORM_CLUSTER_OFFSET		K3_CLUSTER0_MSMC_PORT
 
-#define PLAT_NUM_PWR_DOMAINS		(PLATFORM_CLUSTER_COUNT + \
+#define PLAT_NUM_PWR_DOMAINS		(PLATFORM_SYSTEM_COUNT + \
+					PLATFORM_CLUSTER_COUNT + \
 					PLATFORM_CORE_COUNT)
-#define PLAT_MAX_PWR_LVL		MPIDR_AFFLVL1
+#define PLAT_MAX_PWR_LVL		MPIDR_AFFLVL2
 
 /*******************************************************************************
  * Memory layout constants
diff --git a/tools/cert_create/src/main.c b/tools/cert_create/src/main.c
index 4abfe6d..ed56620 100644
--- a/tools/cert_create/src/main.c
+++ b/tools/cert_create/src/main.c
@@ -140,8 +140,6 @@
 		i++;
 	}
 	printf("\n");
-
-	exit(0);
 }
 
 static int get_key_alg(const char *key_alg_str)
@@ -334,7 +332,7 @@
 			break;
 		case 'h':
 			print_help(argv[0], cmd_opt);
-			break;
+			exit(0);
 		case 'k':
 			save_keys = 1;
 			break;