Merge pull request #678 from soby-mathew/sm/PSCI_AArch32

Introduce AArch32 support for PSCI library
diff --git a/Makefile b/Makefile
index a68335b..bdfb4f0 100644
--- a/Makefile
+++ b/Makefile
@@ -45,6 +45,8 @@
 # Default values for build configurations
 ################################################################################
 
+# The Target build architecture. Supported values are: aarch64, aarch32.
+ARCH				:= aarch64
 # Build verbosity
 V				:= 0
 # Debug build
@@ -54,6 +56,8 @@
 PLAT				:= ${DEFAULT_PLAT}
 # SPD choice
 SPD				:= none
+# The AArch32 Secure Payload to be built as BL32 image
+AARCH32_SP			:= none
 # Base commit to perform code check on
 BASE_COMMIT			:= origin/master
 # NS timer register save and restore
@@ -198,14 +202,20 @@
 NM			:=	${CROSS_COMPILE}nm
 PP			:=	${CROSS_COMPILE}gcc -E
 
+ASFLAGS_aarch64		=	-mgeneral-regs-only
+TF_CFLAGS_aarch64	=	-mgeneral-regs-only -mstrict-align
+
+ASFLAGS_aarch32		=	-march=armv8-a
+TF_CFLAGS_aarch32	=	-march=armv8-a
+
 ASFLAGS			+= 	-nostdinc -ffreestanding -Wa,--fatal-warnings	\
 				-Werror -Wmissing-include-dirs			\
-				-mgeneral-regs-only -D__ASSEMBLY__		\
+				-D__ASSEMBLY__ $(ASFLAGS_$(ARCH))		\
 				${DEFINES} ${INCLUDES}
 TF_CFLAGS		+= 	-nostdinc -ffreestanding -Wall			\
 				-Werror -Wmissing-include-dirs			\
-				-mgeneral-regs-only -mstrict-align		\
 				-std=c99 -c -Os					\
+				$(TF_CFLAGS_$(ARCH))				\
 				${DEFINES} ${INCLUDES}
 TF_CFLAGS		+=	-ffunction-sections -fdata-sections
 
@@ -220,26 +230,26 @@
 
 BL_COMMON_SOURCES	+=	common/bl_common.c			\
 				common/tf_printf.c			\
-				common/aarch64/debug.S			\
-				lib/aarch64/cache_helpers.S		\
-				lib/aarch64/misc_helpers.S		\
-				plat/common/aarch64/platform_helpers.S	\
+				common/${ARCH}/debug.S			\
+				lib/${ARCH}/cache_helpers.S		\
+				lib/${ARCH}/misc_helpers.S		\
+				plat/common/${ARCH}/platform_helpers.S	\
 				${STDLIB_SRCS}
 
 INCLUDES		+=	-Iinclude/bl1				\
 				-Iinclude/bl31				\
 				-Iinclude/common			\
-				-Iinclude/common/aarch64		\
+				-Iinclude/common/${ARCH}		\
 				-Iinclude/drivers			\
 				-Iinclude/drivers/arm			\
 				-Iinclude/drivers/auth			\
 				-Iinclude/drivers/io			\
 				-Iinclude/drivers/ti/uart		\
 				-Iinclude/lib				\
-				-Iinclude/lib/aarch64			\
-				-Iinclude/lib/cpus/aarch64		\
+				-Iinclude/lib/${ARCH}			\
+				-Iinclude/lib/cpus/${ARCH}		\
 				-Iinclude/lib/el3_runtime		\
-				-Iinclude/lib/el3_runtime/aarch64	\
+				-Iinclude/lib/el3_runtime/${ARCH}	\
 				-Iinclude/lib/psci			\
 				-Iinclude/plat/common			\
 				-Iinclude/services			\
@@ -267,6 +277,9 @@
 ################################################################################
 
 ifneq (${SPD},none)
+ifeq (${ARCH},aarch32)
+	$(error "Error: SPD is incompatible with AArch32.")
+endif
 ifdef EL3_PAYLOAD_BASE
         $(warning "SPD and EL3_PAYLOAD_BASE are incompatible build options.")
         $(warning "The SPD and its BL32 companion will be present but ignored.")
@@ -299,6 +312,8 @@
 
 include ${PLAT_MAKEFILE_FULL}
 
+# Platform compatibility is not supported in AArch32
+ifneq (${ARCH},aarch32)
 # If the platform has not defined ENABLE_PLAT_COMPAT, then enable it by default
 ifndef ENABLE_PLAT_COMPAT
 ENABLE_PLAT_COMPAT := 1
@@ -308,6 +323,7 @@
 ifneq (${ENABLE_PLAT_COMPAT}, 0)
 include plat/compat/plat_compat.mk
 endif
+endif
 
 # Include the CPU specific operations makefile, which provides default
 # values for all CPU errata workarounds and CPU specific optimisations.
@@ -468,11 +484,18 @@
                 $(eval $(call add_define,PRELOADED_BL33_BASE))
         endif
 endif
+# Define the AARCH32/AARCH64 flag based on the ARCH flag
+ifeq (${ARCH},aarch32)
+        $(eval $(call add_define,AARCH32))
+else
+        $(eval $(call add_define,AARCH64))
+endif
 
 ################################################################################
 # Include BL specific makefiles
 ################################################################################
-
+# BL31 is not needed and BL1, BL2 & BL2U are not currently supported in AArch32
+ifneq (${ARCH},aarch32)
 ifdef BL1_SOURCES
 NEED_BL1 := yes
 include bl1/bl1.mk
@@ -496,7 +519,27 @@
 include bl31/bl31.mk
 endif
 endif
+endif
+
+ifeq (${ARCH},aarch32)
+NEED_BL32 := yes
 
+################################################################################
+# Build `AARCH32_SP` as BL32 image for AArch32
+################################################################################
+ifneq (${AARCH32_SP},none)
+# We expect to locate an sp.mk under the specified AARCH32_SP directory
+AARCH32_SP_MAKE	:=	$(wildcard bl32/${AARCH32_SP}/${AARCH32_SP}.mk)
+
+ifeq (${AARCH32_SP_MAKE},)
+  $(error Error: No bl32/${AARCH32_SP}/${AARCH32_SP}.mk located)
+endif
+
+$(info Including ${AARCH32_SP_MAKE})
+include ${AARCH32_SP_MAKE}
+endif
+
+endif
 
 ################################################################################
 # Build targets
@@ -665,7 +708,8 @@
 	@echo "  bl2            Build the BL2 binary"
 	@echo "  bl2u           Build the BL2U binary"
 	@echo "  bl31           Build the BL31 binary"
-	@echo "  bl32           Build the BL32 binary"
+	@echo "  bl32           Build the BL32 binary. If ARCH=aarch32, then "
+	@echo "                 this builds secure payload specified by AARCH32_SP"
 	@echo "  certificates   Build the certificates (requires 'GENERATE_COT=1')"
 	@echo "  fip            Build the Firmware Image Package (FIP)"
 	@echo "  fwu_fip        Build the FWU Firmware Image Package (FIP)"
diff --git a/bl1/bl1_main.c b/bl1/bl1_main.c
index 3cca176..cb1bc18 100644
--- a/bl1/bl1_main.c
+++ b/bl1/bl1_main.c
@@ -38,6 +38,7 @@
 #include <platform.h>
 #include <platform_def.h>
 #include <smcc_helpers.h>
+#include <utils.h>
 #include "bl1_private.h"
 #include <uuid.h>
 
diff --git a/bl2/bl2.mk b/bl2/bl2.mk
index 1e82078..d790738 100644
--- a/bl2/bl2.mk
+++ b/bl2/bl2.mk
@@ -32,6 +32,6 @@
 				bl2/aarch64/bl2_entrypoint.S		\
 				bl2/aarch64/bl2_arch_setup.c		\
 				common/aarch64/early_exceptions.S	\
-				lib/locks/exclusive/spinlock.S
+				lib/locks/exclusive/aarch64/spinlock.S
 
 BL2_LINKERFILE		:=	bl2/bl2.ld.S
diff --git a/bl32/sp_min/aarch32/entrypoint.S b/bl32/sp_min/aarch32/entrypoint.S
new file mode 100644
index 0000000..33d35b9
--- /dev/null
+++ b/bl32/sp_min/aarch32/entrypoint.S
@@ -0,0 +1,331 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <context.h>
+#include <runtime_svc.h>
+#include <smcc_helpers.h>
+#include <smcc_macros.S>
+#include <xlat_tables.h>
+
+	.globl	sp_min_vector_table
+	.globl	sp_min_entrypoint
+	.globl	sp_min_warm_entrypoint
+
+func sp_min_vector_table
+	b	sp_min_entrypoint
+	b	plat_panic_handler	/* Undef */
+	b	handle_smc		/* Syscall */
+	b	plat_panic_handler	/* Prefetch abort */
+	b	plat_panic_handler	/* Data abort */
+	b	plat_panic_handler	/* Reserved */
+	b	plat_panic_handler	/* IRQ */
+	b	plat_panic_handler	/* FIQ */
+endfunc sp_min_vector_table
+
+func handle_smc
+	smcc_save_gp_mode_regs
+
+	/* r0 points to smc_context */
+	mov	r2, r0				/* handle */
+	ldcopr	r0, SCR
+
+	/* Save SCR in stack */
+	push	{r0}
+	and	r3, r0, #SCR_NS_BIT		/* flags */
+
+	/* Switch to Secure Mode*/
+	bic	r0, #SCR_NS_BIT
+	stcopr	r0, SCR
+	isb
+	ldr	r0, [r2, #SMC_CTX_GPREG_R0]	/* smc_fid */
+	/* Check whether an SMC64 is issued */
+	tst	r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
+	beq	1f	/* SMC32 is detected */
+	mov	r0, #SMC_UNK
+	str	r0, [r2, #SMC_CTX_GPREG_R0]
+	mov	r0, r2
+	b	2f	/* Skip handling the SMC */
+1:
+	mov	r1, #0				/* cookie */
+	bl	handle_runtime_svc
+2:
+	/* r0 points to smc context */
+
+	/* Restore SCR from stack */
+	pop	{r1}
+	stcopr	r1, SCR
+	isb
+
+	b	sp_min_exit
+endfunc handle_smc
+
+/*
+ * The Cold boot/Reset entrypoint for SP_MIN
+ */
+func sp_min_entrypoint
+
+	/*
+	 * The caches and TLBs are disabled at reset. If any implementation
+	 * allows the caches/TLB to be hit while they are disabled, ensure
+	 * that they are invalidated here
+	 */
+
+	/* Make sure we are in Secure Mode*/
+	ldcopr	r0, SCR
+	bic	r0, #SCR_NS_BIT
+	stcopr	r0, SCR
+	isb
+
+	/* Switch to monitor mode */
+	cps	#MODE32_mon
+	isb
+
+	/*
+	 * Set sane values for NS SCTLR as well.
+	 * Switch to non secure mode for this.
+	 */
+	ldr	r0, =(SCTLR_RES1)
+	ldcopr	r1, SCR
+	orr	r2, r1, #SCR_NS_BIT
+	stcopr	r2, SCR
+	isb
+
+	ldcopr	r2, SCTLR
+	orr	r0, r0, r2
+	stcopr	r0, SCTLR
+	isb
+
+	stcopr	r1, SCR
+	isb
+
+	/*
+	 * Set the CPU endianness before doing anything that might involve
+	 * memory reads or writes.
+	 */
+	ldcopr	r0, SCTLR
+	bic	r0, r0, #SCTLR_EE_BIT
+	stcopr	r0, SCTLR
+	isb
+
+	/* Run the CPU Specific Reset handler */
+	bl	reset_handler
+
+	/*
+	 * Enable the instruction cache and data access
+	 * alignment checks
+	 */
+	ldcopr	r0, SCTLR
+	ldr	r1, =(SCTLR_RES1 | SCTLR_A_BIT | SCTLR_I_BIT)
+	orr	r0, r0, r1
+	stcopr	r0, SCTLR
+	isb
+
+	/* Set the vector tables */
+	ldr	r0, =sp_min_vector_table
+	stcopr	r0, VBAR
+	stcopr	r0, MVBAR
+	isb
+
+	/*
+	 * Enable the SIF bit to disable instruction fetches
+	 * from Non-secure memory.
+	 */
+	ldcopr	r0, SCR
+	orr	r0, r0, #SCR_SIF_BIT
+	stcopr	r0, SCR
+
+	/*
+	 * Enable the SError interrupt now that the exception vectors have been
+	 * setup.
+	 */
+	cpsie   a
+	isb
+
+	/* Enable access to Advanced SIMD registers */
+	ldcopr	r0, NSACR
+	bic	r0, r0, #NSASEDIS_BIT
+	orr	r0, r0, #(NASCR_CP10_BIT | NASCR_CP11_BIT)
+	stcopr	r0, NSACR
+	isb
+
+	/*
+	 * Enable access to Advanced SIMD, Floating point and to the Trace
+	 * functionality as well.
+	 */
+	ldcopr	r0, CPACR
+	bic	r0, r0, #ASEDIS_BIT
+	bic	r0, r0, #TRCDIS_BIT
+	orr	r0, r0, #CPACR_ENABLE_FP_ACCESS
+	stcopr	r0, CPACR
+	isb
+
+	vmrs	r0, FPEXC
+	orr	r0, r0, #FPEXC_EN_BIT
+	vmsr	FPEXC, r0
+
+	/* Detect whether Warm or Cold boot */
+	bl	plat_get_my_entrypoint
+	cmp	r0, #0
+	/* If warm boot detected, jump to warm boot entry */
+	bxne	r0
+
+	/* Setup C runtime stack */
+	bl	plat_set_my_stack
+
+	/* Perform platform specific memory initialization */
+	bl	platform_mem_init
+
+	/* Initialize the C Runtime Environment */
+
+	/*
+	 * Invalidate the RW memory used by SP_MIN image. This includes
+	 * the data and NOBITS sections. This is done to safeguard against
+	 * possible corruption of this memory by dirty cache lines in a system
+	 * cache as a result of use by an earlier boot loader stage.
+	 */
+	ldr	r0, =__RW_START__
+	ldr	r1, =__RW_END__
+	sub	r1, r1, r0
+	bl	inv_dcache_range
+
+	ldr	r0, =__BSS_START__
+	ldr	r1, =__BSS_SIZE__
+	bl	zeromem
+
+#if USE_COHERENT_MEM
+	ldr	r0, =__COHERENT_RAM_START__
+	ldr	r1, =__COHERENT_RAM_UNALIGNED_SIZE__
+	bl	zeromem
+#endif
+
+	/* Perform platform specific early arch. setup */
+	bl	sp_min_early_platform_setup
+	bl	sp_min_plat_arch_setup
+
+	/* Jump to the main function */
+	bl	sp_min_main
+
+	/* -------------------------------------------------------------
+	 * Clean the .data & .bss sections to main memory. This ensures
+	 * that any global data which was initialised by the primary CPU
+	 * is visible to secondary CPUs before they enable their data
+	 * caches and participate in coherency.
+	 * -------------------------------------------------------------
+	 */
+	ldr	r0, =__DATA_START__
+	ldr	r1, =__DATA_END__
+	sub	r1, r1, r0
+	bl	clean_dcache_range
+
+	ldr	r0, =__BSS_START__
+	ldr	r1, =__BSS_END__
+	sub	r1, r1, r0
+	bl	clean_dcache_range
+
+	/* Program the registers in cpu_context and exit monitor mode */
+	mov	r0, #NON_SECURE
+	bl	cm_get_context
+
+	/* Restore the SCR */
+	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
+	stcopr	r2, SCR
+	isb
+
+	/* Restore the SCTLR  */
+	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
+	stcopr	r2, SCTLR
+
+	bl	smc_get_next_ctx
+	/* The other cpu_context registers have been copied to smc context */
+	b	sp_min_exit
+endfunc sp_min_entrypoint
+
+/*
+ * The Warm boot entrypoint for SP_MIN.
+ */
+func sp_min_warm_entrypoint
+
+	/* Setup C runtime stack */
+	bl	plat_set_my_stack
+
+	/* --------------------------------------------
+	 * Enable the MMU with the DCache disabled. It
+	 * is safe to use stacks allocated in normal
+	 * memory as a result. All memory accesses are
+	 * marked nGnRnE when the MMU is disabled. So
+	 * all the stack writes will make it to memory.
+	 * All memory accesses are marked Non-cacheable
+	 * when the MMU is enabled but D$ is disabled.
+	 * So used stack memory is guaranteed to be
+	 * visible immediately after the MMU is enabled
+	 * Enabling the DCache at the same time as the
+	 * MMU can lead to speculatively fetched and
+	 * possibly stale stack memory being read from
+	 * other caches. This can lead to coherency
+	 * issues.
+	 * --------------------------------------------
+	 */
+	mov	r0, #DISABLE_DCACHE
+	bl	bl32_plat_enable_mmu
+
+	bl	sp_min_warm_boot
+
+	/* Program the registers in cpu_context and exit monitor mode */
+	mov	r0, #NON_SECURE
+	bl	cm_get_context
+
+	/* Restore the SCR */
+	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
+	stcopr	r2, SCR
+	isb
+
+	/* Restore the SCTLR  */
+	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
+	stcopr	r2, SCTLR
+
+	bl	smc_get_next_ctx
+
+	/* The other cpu_context registers have been copied to smc context */
+	b	sp_min_exit
+endfunc sp_min_warm_entrypoint
+
+/*
+ * The function to restore the registers from SMC context and return
+ * to the mode restored to SPSR.
+ *
+ * Arguments : r0 must point to the SMC context to restore from.
+ */
+func sp_min_exit
+	smcc_restore_gp_mode_regs
+	eret
+endfunc sp_min_exit
diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S
new file mode 100644
index 0000000..b158db1
--- /dev/null
+++ b/bl32/sp_min/sp_min.ld.S
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform_def.h>
+
+OUTPUT_FORMAT(elf32-littlearm)
+OUTPUT_ARCH(arm)
+ENTRY(sp_min_vector_table)
+
+MEMORY {
+    RAM (rwx): ORIGIN = BL32_BASE, LENGTH = BL32_LIMIT - BL32_BASE
+}
+
+
+SECTIONS
+{
+    . = BL32_BASE;
+   ASSERT(. == ALIGN(4096),
+          "BL32_BASE address is not aligned on a page boundary.")
+
+#if SEPARATE_CODE_AND_RODATA
+    .text . : {
+        __TEXT_START__ = .;
+        *entrypoint.o(.text*)
+        *(.text*)
+        . = NEXT(4096);
+        __TEXT_END__ = .;
+    } >RAM
+
+    .rodata . : {
+        __RODATA_START__ = .;
+        *(.rodata*)
+
+        /* Ensure 4-byte alignment for descriptors and ensure inclusion */
+        . = ALIGN(4);
+        __RT_SVC_DESCS_START__ = .;
+        KEEP(*(rt_svc_descs))
+        __RT_SVC_DESCS_END__ = .;
+
+        /*
+         * Ensure 4-byte alignment for cpu_ops so that its fields are also
+         * aligned. Also ensure cpu_ops inclusion.
+         */
+        . = ALIGN(4);
+        __CPU_OPS_START__ = .;
+        KEEP(*(cpu_ops))
+        __CPU_OPS_END__ = .;
+
+        . = NEXT(4096);
+        __RODATA_END__ = .;
+    } >RAM
+#else
+    ro . : {
+        __RO_START__ = .;
+        *entrypoint.o(.text*)
+        *(.text*)
+        *(.rodata*)
+
+        /* Ensure 4-byte alignment for descriptors and ensure inclusion */
+        . = ALIGN(4);
+        __RT_SVC_DESCS_START__ = .;
+        KEEP(*(rt_svc_descs))
+        __RT_SVC_DESCS_END__ = .;
+
+        /*
+         * Ensure 4-byte alignment for cpu_ops so that its fields are also
+         * aligned. Also ensure cpu_ops inclusion.
+         */
+        . = ALIGN(4);
+        __CPU_OPS_START__ = .;
+        KEEP(*(cpu_ops))
+        __CPU_OPS_END__ = .;
+
+        __RO_END_UNALIGNED__ = .;
+
+        /*
+         * Memory page(s) mapped to this section will be marked as
+         * read-only, executable.  No RW data from the next section must
+         * creep in.  Ensure the rest of the current memory block is unused.
+         */
+        . = NEXT(4096);
+        __RO_END__ = .;
+    } >RAM
+#endif
+
+    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
+           "cpu_ops not defined for this platform.")
+    /*
+     * Define a linker symbol to mark start of the RW memory area for this
+     * image.
+     */
+    __RW_START__ = . ;
+
+    .data . : {
+        __DATA_START__ = .;
+        *(.data*)
+        __DATA_END__ = .;
+    } >RAM
+
+    stacks (NOLOAD) : {
+        __STACKS_START__ = .;
+        *(tzfw_normal_stacks)
+        __STACKS_END__ = .;
+    } >RAM
+
+    /*
+     * The .bss section gets initialised to 0 at runtime.
+     * Its base address must be 16-byte aligned.
+     */
+    .bss (NOLOAD) : ALIGN(16) {
+        __BSS_START__ = .;
+        *(.bss*)
+        *(COMMON)
+#if !USE_COHERENT_MEM
+        /*
+         * Bakery locks are stored in normal .bss memory
+         *
+         * Each lock's data is spread across multiple cache lines, one per CPU,
+         * but multiple locks can share the same cache line.
+         * The compiler will allocate enough memory for one CPU's bakery locks,
+         * the remaining cache lines are allocated by the linker script
+         */
+        . = ALIGN(CACHE_WRITEBACK_GRANULE);
+        __BAKERY_LOCK_START__ = .;
+        *(bakery_lock)
+        . = ALIGN(CACHE_WRITEBACK_GRANULE);
+        __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(. - __BAKERY_LOCK_START__);
+        . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
+        __BAKERY_LOCK_END__ = .;
+#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
+    ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE,
+        "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
+#endif
+#endif
+
+#if ENABLE_PMF
+        /*
+         * Time-stamps are stored in normal .bss memory
+         *
+         * The compiler will allocate enough memory for one CPU's time-stamps,
+         * the remaining memory for other CPU's is allocated by the
+         * linker script
+         */
+        . = ALIGN(CACHE_WRITEBACK_GRANULE);
+        __PMF_TIMESTAMP_START__ = .;
+        KEEP(*(pmf_timestamp_array))
+        . = ALIGN(CACHE_WRITEBACK_GRANULE);
+        __PMF_PERCPU_TIMESTAMP_END__ = .;
+        __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
+        . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
+        __PMF_TIMESTAMP_END__ = .;
+#endif /* ENABLE_PMF */
+
+        __BSS_END__ = .;
+    } >RAM
+
+    /*
+     * The xlat_table section is for full, aligned page tables (4K).
+     * Removing them from .bss avoids forcing 4K alignment on
+     * the .bss section and eliminates the unecessary zero init
+     */
+    xlat_table (NOLOAD) : {
+        *(xlat_table)
+    } >RAM
+
+     __BSS_SIZE__ = SIZEOF(.bss);
+
+#if USE_COHERENT_MEM
+    /*
+     * The base address of the coherent memory section must be page-aligned (4K)
+     * to guarantee that the coherent data are stored on their own pages and
+     * are not mixed with normal data.  This is required to set up the correct
+     * memory attributes for the coherent data page tables.
+     */
+    coherent_ram (NOLOAD) : ALIGN(4096) {
+        __COHERENT_RAM_START__ = .;
+        /*
+         * Bakery locks are stored in coherent memory
+         *
+         * Each lock's data is contiguous and fully allocated by the compiler
+         */
+        *(bakery_lock)
+        *(tzfw_coherent_mem)
+        __COHERENT_RAM_END_UNALIGNED__ = .;
+        /*
+         * Memory page(s) mapped to this section will be marked
+         * as device memory.  No other unexpected data must creep in.
+         * Ensure the rest of the current memory page is unused.
+         */
+        . = NEXT(4096);
+        __COHERENT_RAM_END__ = .;
+    } >RAM
+
+    __COHERENT_RAM_UNALIGNED_SIZE__ =
+        __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
+#endif
+
+    /*
+     * Define a linker symbol to mark end of the RW memory area for this
+     * image.
+     */
+    __RW_END__ = .;
+
+   __BL32_END__ = .;
+}
diff --git a/bl32/sp_min/sp_min.mk b/bl32/sp_min/sp_min.mk
new file mode 100644
index 0000000..a8b572e
--- /dev/null
+++ b/bl32/sp_min/sp_min.mk
@@ -0,0 +1,63 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# Neither the name of ARM nor the names of its contributors may be used
+# to endorse or promote products derived from this software without specific
+# prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+
+ifneq (${ARCH}, aarch32)
+	$(error SP_MIN is only supported on AArch32 platforms)
+endif
+
+include lib/psci/psci_lib.mk
+
+INCLUDES		+=	-Iinclude/bl32/sp_min
+
+BL32_SOURCES		+=	bl32/sp_min/sp_min_main.c		\
+				bl32/sp_min/aarch32/entrypoint.S	\
+				common/runtime_svc.c			\
+				services/std_svc/std_svc_setup.c	\
+				${PSCI_LIB_SOURCES}
+
+ifeq (${ENABLE_PMF}, 1)
+BL32_SOURCES		+=	lib/pmf/pmf_main.c
+endif
+
+BL32_LINKERFILE	:=	bl32/sp_min/sp_min.ld.S
+
+# Include the platform-specific SP_MIN Makefile
+# If no platform-specific SP_MIN Makefile exists, it means SP_MIN is not supported
+# on this platform.
+SP_MIN_PLAT_MAKEFILE := $(wildcard ${PLAT_DIR}/sp_min/sp_min-${PLAT}.mk)
+ifeq (,${SP_MIN_PLAT_MAKEFILE})
+  $(error SP_MIN is not supported on platform ${PLAT})
+else
+  include ${SP_MIN_PLAT_MAKEFILE}
+endif
+
+RESET_TO_SP_MIN	:= 1
+$(eval $(call add_define,RESET_TO_SP_MIN))
+$(eval $(call assert_boolean,RESET_TO_SP_MIN))
diff --git a/bl32/sp_min/sp_min_main.c b/bl32/sp_min/sp_min_main.c
new file mode 100644
index 0000000..31cab3d
--- /dev/null
+++ b/bl32/sp_min/sp_min_main.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <platform_sp_min.h>
+#include <psci.h>
+#include <runtime_svc.h>
+#include <smcc_helpers.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <types.h>
+#include "sp_min_private.h"
+
+/* Pointers to per-core cpu contexts */
+static void *sp_min_cpu_ctx_ptr[PLATFORM_CORE_COUNT];
+
+/* SP_MIN only stores the non secure smc context */
+static smc_ctx_t sp_min_smc_context[PLATFORM_CORE_COUNT];
+
+/******************************************************************************
+ * Define the smcc helper library API's
+ *****************************************************************************/
+void *smc_get_ctx(int security_state)
+{
+	assert(security_state == NON_SECURE);
+	return &sp_min_smc_context[plat_my_core_pos()];
+}
+
+void smc_set_next_ctx(int security_state)
+{
+	assert(security_state == NON_SECURE);
+	/* SP_MIN stores only non secure smc context. Nothing to do here */
+}
+
+void *smc_get_next_ctx(void)
+{
+	return &sp_min_smc_context[plat_my_core_pos()];
+}
+
+/*******************************************************************************
+ * This function returns a pointer to the most recent 'cpu_context' structure
+ * for the calling CPU that was set as the context for the specified security
+ * state. NULL is returned if no such structure has been specified.
+ ******************************************************************************/
+void *cm_get_context(uint32_t security_state)
+{
+	assert(security_state == NON_SECURE);
+	return sp_min_cpu_ctx_ptr[plat_my_core_pos()];
+}
+
+/*******************************************************************************
+ * This function sets the pointer to the current 'cpu_context' structure for the
+ * specified security state for the calling CPU
+ ******************************************************************************/
+void cm_set_context(void *context, uint32_t security_state)
+{
+	assert(security_state == NON_SECURE);
+	sp_min_cpu_ctx_ptr[plat_my_core_pos()] = context;
+}
+
+/*******************************************************************************
+ * This function returns a pointer to the most recent 'cpu_context' structure
+ * for the CPU identified by `cpu_idx` that was set as the context for the
+ * specified security state. NULL is returned if no such structure has been
+ * specified.
+ ******************************************************************************/
+void *cm_get_context_by_index(unsigned int cpu_idx,
+				unsigned int security_state)
+{
+	assert(security_state == NON_SECURE);
+	return sp_min_cpu_ctx_ptr[cpu_idx];
+}
+
+/*******************************************************************************
+ * This function sets the pointer to the current 'cpu_context' structure for the
+ * specified security state for the CPU identified by CPU index.
+ ******************************************************************************/
+void cm_set_context_by_index(unsigned int cpu_idx, void *context,
+				unsigned int security_state)
+{
+	assert(security_state == NON_SECURE);
+	sp_min_cpu_ctx_ptr[cpu_idx] = context;
+}
+
+static void copy_cpu_ctx_to_smc_stx(const regs_t *cpu_reg_ctx,
+				smc_ctx_t *next_smc_ctx)
+{
+	next_smc_ctx->r0 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R0);
+	next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR);
+	next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR);
+}
+
+/*******************************************************************************
+ * This function invokes the PSCI library interface to initialize the
+ * non secure cpu context and copies the relevant cpu context register values
+ * to smc context. These registers will get programmed during `smc_exit`.
+ ******************************************************************************/
+static void sp_min_prepare_next_image_entry(void)
+{
+	entry_point_info_t *next_image_info;
+
+	/* Program system registers to proceed to non-secure */
+	next_image_info = sp_min_plat_get_bl33_ep_info();
+	assert(next_image_info);
+	assert(NON_SECURE == GET_SECURITY_STATE(next_image_info->h.attr));
+
+	INFO("SP_MIN: Preparing exit to normal world\n");
+
+	psci_prepare_next_non_secure_ctx(next_image_info);
+	smc_set_next_ctx(NON_SECURE);
+
+	/* Copy r0, lr and spsr from cpu context to SMC context */
+	copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
+			smc_get_next_ctx());
+}
+
+/******************************************************************************
+ * The SP_MIN main function. Do the platform and PSCI Library setup. Also
+ * initialize the runtime service framework.
+ *****************************************************************************/
+void sp_min_main(void)
+{
+	/* Perform platform setup in TSP MIN */
+	sp_min_platform_setup();
+
+	/*
+	 * Initialize the PSCI library and perform the remaining generic
+	 * architectural setup from PSCI.
+	 */
+	psci_setup((uintptr_t)sp_min_warm_entrypoint);
+
+	/*
+	 * Initialize the runtime services e.g. psci
+	 * This is where the monitor mode will be initialized
+	 */
+	INFO("SP_MIN: Initializing runtime services\n");
+	runtime_svc_init();
+
+	/*
+	 * We are ready to enter the next EL. Prepare entry into the image
+	 * corresponding to the desired security state after the next ERET.
+	 */
+	sp_min_prepare_next_image_entry();
+}
+
+/******************************************************************************
+ * This function is invoked during warm boot. Invoke the PSCI library
+ * warm boot entry point which takes care of Architectural and platform setup/
+ * restore. Copy the relevant cpu_context register values to smc context which
+ * will get programmed during `smc_exit`.
+ *****************************************************************************/
+void sp_min_warm_boot(void)
+{
+	smc_ctx_t *next_smc_ctx;
+
+	psci_warmboot_entrypoint();
+
+	smc_set_next_ctx(NON_SECURE);
+
+	next_smc_ctx = smc_get_next_ctx();
+	memset(next_smc_ctx, 0, sizeof(smc_ctx_t));
+
+	copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
+			next_smc_ctx);
+}
diff --git a/bl32/sp_min/sp_min_private.h b/bl32/sp_min/sp_min_private.h
new file mode 100644
index 0000000..0042f40
--- /dev/null
+++ b/bl32/sp_min/sp_min_private.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SP_MIN_H__
+#define __SP_MIN_H__
+
+void sp_min_warm_entrypoint(void);
+void sp_min_main(void);
+void sp_min_warm_boot(void);
+
+#endif /* __SP_MIN_H__ */
diff --git a/bl32/tsp/tsp.mk b/bl32/tsp/tsp.mk
index a502428..2f3391e 100644
--- a/bl32/tsp/tsp.mk
+++ b/bl32/tsp/tsp.mk
@@ -37,7 +37,7 @@
 				bl32/tsp/tsp_interrupt.c		\
 				bl32/tsp/tsp_timer.c			\
 				common/aarch64/early_exceptions.S	\
-				lib/locks/exclusive/spinlock.S
+				lib/locks/exclusive/aarch64/spinlock.S
 
 BL32_LINKERFILE		:=	bl32/tsp/tsp.ld.S
 
diff --git a/common/aarch32/debug.S b/common/aarch32/debug.S
new file mode 100644
index 0000000..01ec1e3
--- /dev/null
+++ b/common/aarch32/debug.S
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+	.globl	do_panic
+
+	/***********************************************************
+	 * The common implementation of do_panic for all BL stages
+	 ***********************************************************/
+func do_panic
+	b	plat_panic_handler
+endfunc do_panic
+
diff --git a/common/runtime_svc.c b/common/runtime_svc.c
index b8af6cd..df0d64c 100644
--- a/common/runtime_svc.c
+++ b/common/runtime_svc.c
@@ -52,6 +52,34 @@
 					/ sizeof(rt_svc_desc_t))
 
 /*******************************************************************************
+ * Function to invoke the registered `handle` corresponding to the smc_fid.
+ ******************************************************************************/
+uintptr_t handle_runtime_svc(uint32_t smc_fid,
+			     void *cookie,
+			     void *handle,
+			     unsigned int flags)
+{
+	u_register_t x1, x2, x3, x4;
+	int index, idx;
+	const rt_svc_desc_t *rt_svc_descs;
+
+	assert(handle);
+	idx = get_unique_oen_from_smc_fid(smc_fid);
+	assert(idx >= 0 && idx < MAX_RT_SVCS);
+
+	index = rt_svc_descs_indices[idx];
+	if (index < 0 || index >= RT_SVC_DECS_NUM)
+		SMC_RET1(handle, SMC_UNK);
+
+	rt_svc_descs = (rt_svc_desc_t *) RT_SVC_DESCS_START;
+
+	get_smc_params_from_ctx(handle, x1, x2, x3, x4);
+
+	return rt_svc_descs[index].handle(smc_fid, x1, x2, x3, x4, cookie,
+						handle, flags);
+}
+
+/*******************************************************************************
  * Simple routine to sanity check a runtime service descriptor before using it
  ******************************************************************************/
 static int32_t validate_rt_svc_desc(const rt_svc_desc_t *desc)
diff --git a/common/tf_printf.c b/common/tf_printf.c
index ad0b90a..8c1857e 100644
--- a/common/tf_printf.c
+++ b/common/tf_printf.c
@@ -27,7 +27,11 @@
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
 #include <debug.h>
+#include <limits.h>
 #include <stdarg.h>
 #include <stdint.h>
 
@@ -49,6 +53,85 @@
 		putchar(*str++);
 }
 
+#ifdef AARCH32
+#define unsigned_num_print(unum, radix)			\
+	do {						\
+		if ((radix) == 16)			\
+			unsigned_hex_print(unum);	\
+		else if ((radix) == 10)			\
+			unsigned_dec_print(unum);	\
+		else					\
+			string_print("tf_printf : Unsupported radix");\
+	} while (0);
+
+/*
+ * Utility function to print an unsigned number in decimal format for AArch32.
+ * The function doesn't support printing decimal integers higher than 32 bits
+ * to avoid having to implement 64-bit integer compiler library functions.
+ */
+static void unsigned_dec_print(unsigned long long int unum)
+{
+	unsigned int local_num;
+	/* Just need enough space to store 32 bit decimal integer */
+	unsigned char num_buf[10];
+	int i = 0, rem;
+
+	if (unum > UINT_MAX) {
+		string_print("tf_printf : decimal numbers higher than 32 bits"
+				" not supported\n");
+		return;
+	}
+
+	local_num = (unsigned int)unum;
+
+	do {
+		rem = local_num % 10;
+		num_buf[i++] = '0' + rem;
+	} while (local_num /= 10);
+
+	while (--i >= 0)
+		putchar(num_buf[i]);
+}
+
+/*
+ * Utility function to print an unsigned number in hexadecimal format for
+ * AArch32. The function doesn't use 64-bit integer arithmetic to avoid
+ * having to implement 64-bit compiler library functions. It splits the
+ * 64 bit number into two 32 bit numbers and converts them into equivalent
+ * ASCII characters.
+ */
+static void unsigned_hex_print(unsigned long long int unum)
+{
+	/* Just need enough space to store 16 characters */
+	unsigned char num_buf[16];
+	int i = 0, rem;
+	uint32_t num_local = 0, num_msb = 0;
+
+	/* Get the LSB of 64 bit unum */
+	num_local = (uint32_t)unum;
+	/* Get the MSB of 64 bit unum. This works only on Little Endian */
+	assert((read_sctlr() & SCTLR_EE_BIT) == 0);
+	num_msb = *(((uint32_t *) &unum) + 1);
+
+	do {
+		do {
+			rem = (num_local & 0xf);
+			if (rem < 0xa)
+				num_buf[i++] = '0' + rem;
+			else
+				num_buf[i++] = 'a' + (rem - 0xa);
+		} while (num_local >>= 4);
+
+		num_local = num_msb;
+		num_msb = 0;
+	} while (num_local);
+
+	while (--i >= 0)
+		putchar(num_buf[i]);
+}
+
+#else
+
 static void unsigned_num_print(unsigned long long int unum, unsigned int radix)
 {
 	/* Just need enough space to store 64 bit decimal integer */
@@ -66,6 +149,7 @@
 	while (--i >= 0)
 		putchar(num_buf[i]);
 }
+#endif /* AARCH32 */
 
 /*******************************************************************
  * Reduced format print for Trusted firmware.
diff --git a/docs/user-guide.md b/docs/user-guide.md
index 948e3ab..a6959b1 100644
--- a/docs/user-guide.md
+++ b/docs/user-guide.md
@@ -208,11 +208,21 @@
     platform name must be subdirectory of any depth under `plat/`, and must
     contain a platform makefile named `platform.mk`.
 
+*   `ARCH` : Choose the target build architecture for ARM Trusted Firmware.
+    It can take either `aarch64` or `aarch32` as values. By default, it is
+    defined to `aarch64`.
+
 *   `SPD`: Choose a Secure Payload Dispatcher component to be built into the
-    Trusted Firmware. The value should be the path to the directory containing
-    the SPD source, relative to `services/spd/`; the directory is expected to
+    Trusted Firmware. This build option is only valid if `ARCH=aarch64`. The
+    value should be the path to the directory containing the SPD source,
+    relative to `services/spd/`; the directory is expected to
     contain a makefile called `<spd-value>.mk`.
 
+*   `AARCH32_SP` : Choose the AArch32 Secure Payload component to be built as
+    as the BL32 image when `ARCH=aarch32`. The value should be the path to the
+    directory containing the SP source, relative to the `bl32/`; the directory
+    is expected to contain a makefile called `<aarch32_sp-value>.mk`.
+
 *   `V`: Verbose build. If assigned anything other than 0, the build commands
     are printed. Default is 0.
 
diff --git a/drivers/arm/gic/v3/gicv3_main.c b/drivers/arm/gic/v3/gicv3_main.c
index e017033..d13e2c9 100644
--- a/drivers/arm/gic/v3/gicv3_main.c
+++ b/drivers/arm/gic/v3/gicv3_main.c
@@ -75,8 +75,12 @@
 	       plat_driver_data->g1s_interrupt_num == 0);
 
 	/* Check for system register support */
+#ifdef AARCH32
+	assert(read_id_pfr1() & (ID_PFR1_GIC_MASK << ID_PFR1_GIC_SHIFT));
+#else
 	assert(read_id_aa64pfr0_el1() &
 			(ID_AA64PFR0_GIC_MASK << ID_AA64PFR0_GIC_SHIFT));
+#endif /* AARCH32 */
 
 	/* The GIC version should be 3.0 */
 	gic_version = gicd_read_pidr2(plat_driver_data->gicd_base);
diff --git a/drivers/arm/gic/v3/gicv3_private.h b/drivers/arm/gic/v3/gicv3_private.h
index 9aa8338..1344a88 100644
--- a/drivers/arm/gic/v3/gicv3_private.h
+++ b/drivers/arm/gic/v3/gicv3_private.h
@@ -79,9 +79,13 @@
  * Macro to convert a GICR_TYPER affinity value into a MPIDR value. Bits[31:24]
  * are zeroes.
  */
+#ifdef AARCH32
+#define mpidr_from_gicr_typer(typer_val)	(((typer_val) >> 32) & 0xffffff)
+#else
 #define mpidr_from_gicr_typer(typer_val)				 \
-	((((typer_val >> 56) & MPIDR_AFFLVL_MASK) << MPIDR_AFF3_SHIFT) | \
-	 ((typer_val >> 32) & 0xffffff))
+	(((((typer_val) >> 56) & MPIDR_AFFLVL_MASK) << MPIDR_AFF3_SHIFT) | \
+	 (((typer_val) >> 32) & 0xffffff))
+#endif
 
 /*******************************************************************************
  * Private GICv3 function prototypes for accessing entire registers.
diff --git a/drivers/arm/pl011/aarch32/pl011_console.S b/drivers/arm/pl011/aarch32/pl011_console.S
new file mode 100644
index 0000000..21ed7ab
--- /dev/null
+++ b/drivers/arm/pl011/aarch32/pl011_console.S
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <pl011.h>
+
+/*
+ * Pull in generic functions to provide backwards compatibility for
+ * platform makefiles
+ */
+#include "../../../console/aarch32/console.S"
+
+	.globl	console_core_init
+	.globl	console_core_putc
+	.globl	console_core_getc
+
+
+	/* -----------------------------------------------
+	 * int console_core_init(uintptr_t base_addr,
+	 * unsigned int uart_clk, unsigned int baud_rate)
+	 * Function to initialize the console without a
+	 * C Runtime to print debug information. This
+	 * function will be accessed by console_init and
+	 * crash reporting.
+	 * In: r0 - console base address
+	 *     r1 - Uart clock in Hz
+	 *     r2 - Baud rate
+	 * Out: return 1 on success else 0 on error
+	 * Clobber list : r1, r2, r3
+	 * -----------------------------------------------
+	 */
+func console_core_init
+	/* Check the input base address */
+	cmp	r0, #0
+	beq	core_init_fail
+#if !PL011_GENERIC_UART
+	/* Check baud rate and uart clock for sanity */
+	cmp	r1, #0
+	beq	core_init_fail
+	cmp	r2, #0
+	beq	core_init_fail
+	/* Disable the UART before initialization */
+	ldr	r3, [r0, #UARTCR]
+	bic	r3, r3, #PL011_UARTCR_UARTEN
+	str	r3, [r0, #UARTCR]
+	/* Program the baudrate */
+	/* Divisor =  (Uart clock * 4) / baudrate */
+	lsl	r1, r1, #2
+	udiv	r2, r1, r2
+	/* IBRD = Divisor >> 6 */
+	lsr	r1, r2, #6
+	/* Write the IBRD */
+	str	r1, [r0, #UARTIBRD]
+	/* FBRD = Divisor & 0x3F */
+	and	r1, r2, #0x3f
+	/* Write the FBRD */
+	str	r1, [r0, #UARTFBRD]
+	mov	r1, #PL011_LINE_CONTROL
+	str	r1, [r0, #UARTLCR_H]
+	/* Clear any pending errors */
+	mov	r1, #0
+	str	r1, [r0, #UARTECR]
+	/* Enable tx, rx, and uart overall */
+	ldr	r1, =(PL011_UARTCR_RXE | PL011_UARTCR_TXE | PL011_UARTCR_UARTEN)
+	str	r1, [r0, #UARTCR]
+#endif
+	mov	r0, #1
+	bx	lr
+core_init_fail:
+	mov	r0, #0
+	bx	lr
+endfunc console_core_init
+
+	/* --------------------------------------------------------
+	 * int console_core_putc(int c, uintptr_t base_addr)
+	 * Function to output a character over the console. It
+	 * returns the character printed on success or -1 on error.
+	 * In : r0 - character to be printed
+	 *      r1 - console base address
+	 * Out : return -1 on error else return character.
+	 * Clobber list : r2
+	 * --------------------------------------------------------
+	 */
+func console_core_putc
+	/* Check the input parameter */
+	cmp	r1, #0
+	beq	putc_error
+	/* Prepend '\r' to '\n' */
+	cmp	r0, #0xA
+	bne	2f
+1:
+	/* Check if the transmit FIFO is full */
+	ldr	r2, [r1, #UARTFR]
+	tst	r2, #PL011_UARTFR_TXFF_BIT
+	beq	1b
+	mov	r2, #0xD
+	str	r2, [r1, #UARTDR]
+2:
+	/* Check if the transmit FIFO is full */
+	ldr	r2, [r1, #UARTFR]
+	tst	r2, #PL011_UARTFR_TXFF_BIT
+	beq	2b
+	str	r0, [r1, #UARTDR]
+	bx	lr
+putc_error:
+	mov	r0, #-1
+	bx	lr
+endfunc console_core_putc
+
+	/* ---------------------------------------------
+	 * int console_core_getc(uintptr_t base_addr)
+	 * Function to get a character from the console.
+	 * It returns the character grabbed on success
+	 * or -1 on error.
+	 * In : r0 - console base address
+	 * Clobber list : r0, r1
+	 * ---------------------------------------------
+	 */
+func console_core_getc
+	cmp	r0, #0
+	beq	getc_error
+1:
+	/* Check if the receive FIFO is empty */
+	ldr	r1, [r0, #UARTFR]
+	tst	r1, #PL011_UARTFR_RXFE_BIT
+	beq	1b
+	ldr	r1, [r0, #UARTDR]
+	mov	r0, r1
+	bx	lr
+getc_error:
+	mov	r0, #-1
+	bx	lr
+endfunc console_core_getc
diff --git a/drivers/arm/pl011/aarch64/pl011_console.S b/drivers/arm/pl011/aarch64/pl011_console.S
new file mode 100644
index 0000000..11e3df7
--- /dev/null
+++ b/drivers/arm/pl011/aarch64/pl011_console.S
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <pl011.h>
+
+/*
+ * Pull in generic functions to provide backwards compatibility for
+ * platform makefiles
+ */
+#include "../../../console/aarch64/console.S"
+
+
+	.globl	console_core_init
+	.globl	console_core_putc
+	.globl	console_core_getc
+
+
+	/* -----------------------------------------------
+	 * int console_core_init(uintptr_t base_addr,
+	 * unsigned int uart_clk, unsigned int baud_rate)
+	 * Function to initialize the console without a
+	 * C Runtime to print debug information. This
+	 * function will be accessed by console_init and
+	 * crash reporting.
+	 * In: x0 - console base address
+	 *     w1 - Uart clock in Hz
+	 *     w2 - Baud rate
+	 * Out: return 1 on success else 0 on error
+	 * Clobber list : x1, x2, x3, x4
+	 * -----------------------------------------------
+	 */
+func console_core_init
+	/* Check the input base address */
+	cbz	x0, core_init_fail
+#if !PL011_GENERIC_UART
+	/* Check baud rate and uart clock for sanity */
+	cbz	w1, core_init_fail
+	cbz	w2, core_init_fail
+	/* Disable uart before programming */
+	ldr	w3, [x0, #UARTCR]
+	mov	w4, #PL011_UARTCR_UARTEN
+	bic	w3, w3, w4
+	str	w3, [x0, #UARTCR]
+	/* Program the baudrate */
+	/* Divisor =  (Uart clock * 4) / baudrate */
+	lsl	w1, w1, #2
+	udiv	w2, w1, w2
+	/* IBRD = Divisor >> 6 */
+	lsr	w1, w2, #6
+	/* Write the IBRD */
+	str	w1, [x0, #UARTIBRD]
+	/* FBRD = Divisor & 0x3F */
+	and	w1, w2, #0x3f
+	/* Write the FBRD */
+	str	w1, [x0, #UARTFBRD]
+	mov	w1, #PL011_LINE_CONTROL
+	str	w1, [x0, #UARTLCR_H]
+	/* Clear any pending errors */
+	str	wzr, [x0, #UARTECR]
+	/* Enable tx, rx, and uart overall */
+	mov	w1, #(PL011_UARTCR_RXE | PL011_UARTCR_TXE | PL011_UARTCR_UARTEN)
+	str	w1, [x0, #UARTCR]
+#endif
+	mov	w0, #1
+	ret
+core_init_fail:
+	mov	w0, wzr
+	ret
+endfunc console_core_init
+
+	/* --------------------------------------------------------
+	 * int console_core_putc(int c, uintptr_t base_addr)
+	 * Function to output a character over the console. It
+	 * returns the character printed on success or -1 on error.
+	 * In : w0 - character to be printed
+	 *      x1 - console base address
+	 * Out : return -1 on error else return character.
+	 * Clobber list : x2
+	 * --------------------------------------------------------
+	 */
+func console_core_putc
+	/* Check the input parameter */
+	cbz	x1, putc_error
+	/* Prepend '\r' to '\n' */
+	cmp	w0, #0xA
+	b.ne	2f
+1:
+	/* Check if the transmit FIFO is full */
+	ldr	w2, [x1, #UARTFR]
+	tbnz	w2, #PL011_UARTFR_TXFF_BIT, 1b
+	mov	w2, #0xD
+	str	w2, [x1, #UARTDR]
+2:
+	/* Check if the transmit FIFO is full */
+	ldr	w2, [x1, #UARTFR]
+	tbnz	w2, #PL011_UARTFR_TXFF_BIT, 2b
+	str	w0, [x1, #UARTDR]
+	ret
+putc_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_putc
+
+	/* ---------------------------------------------
+	 * int console_core_getc(uintptr_t base_addr)
+	 * Function to get a character from the console.
+	 * It returns the character grabbed on success
+	 * or -1 on error.
+	 * In : x0 - console base address
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func console_core_getc
+	cbz	x0, getc_error
+1:
+	/* Check if the receive FIFO is empty */
+	ldr	w1, [x0, #UARTFR]
+	tbnz	w1, #PL011_UARTFR_RXFE_BIT, 1b
+	ldr	w1, [x0, #UARTDR]
+	mov	w0, w1
+	ret
+getc_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_getc
diff --git a/drivers/arm/pl011/pl011_console.S b/drivers/arm/pl011/pl011_console.S
index 5e97e91..44aafc2 100644
--- a/drivers/arm/pl011/pl011_console.S
+++ b/drivers/arm/pl011/pl011_console.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -27,127 +27,7 @@
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
-#include <arch.h>
-#include <asm_macros.S>
-#include <pl011.h>
 
-/*
- * Pull in generic functions to provide backwards compatibility for
- * platform makefiles
- */
-#include "../../console/console.S"
-
-
-	.globl	console_core_init
-	.globl	console_core_putc
-	.globl	console_core_getc
-
-
-	/* -----------------------------------------------
-	 * int console_core_init(uintptr_t base_addr,
-	 * unsigned int uart_clk, unsigned int baud_rate)
-	 * Function to initialize the console without a
-	 * C Runtime to print debug information. This
-	 * function will be accessed by console_init and
-	 * crash reporting.
-	 * In: x0 - console base address
-	 *     w1 - Uart clock in Hz
-	 *     w2 - Baud rate
-	 * Out: return 1 on success else 0 on error
-	 * Clobber list : x1, x2, x3, x4
-	 * -----------------------------------------------
-	 */
-func console_core_init
-	/* Check the input base address */
-	cbz	x0, core_init_fail
-#if !PL011_GENERIC_UART
-	/* Check baud rate and uart clock for sanity */
-	cbz	w1, core_init_fail
-	cbz	w2, core_init_fail
-	/* Disable uart before programming */
-	ldr	w3, [x0, #UARTCR]
-	mov	w4, #PL011_UARTCR_UARTEN
-	bic	w3, w3, w4
-	str	w3, [x0, #UARTCR]
-	/* Program the baudrate */
-	/* Divisor =  (Uart clock * 4) / baudrate */
-	lsl	w1, w1, #2
-	udiv	w2, w1, w2
-	/* IBRD = Divisor >> 6 */
-	lsr	w1, w2, #6
-	/* Write the IBRD */
-	str	w1, [x0, #UARTIBRD]
-	/* FBRD = Divisor & 0x3F */
-	and	w1, w2, #0x3f
-	/* Write the FBRD */
-	str	w1, [x0, #UARTFBRD]
-	mov	w1, #PL011_LINE_CONTROL
-	str	w1, [x0, #UARTLCR_H]
-	/* Clear any pending errors */
-	str	wzr, [x0, #UARTECR]
-	/* Enable tx, rx, and uart overall */
-	mov	w1, #(PL011_UARTCR_RXE | PL011_UARTCR_TXE | PL011_UARTCR_UARTEN)
-	str	w1, [x0, #UARTCR]
+#if !ERROR_DEPRECATED
+#include "./aarch64/pl011_console.S"
 #endif
-	mov	w0, #1
-	ret
-core_init_fail:
-	mov	w0, wzr
-	ret
-endfunc console_core_init
-
-	/* --------------------------------------------------------
-	 * int console_core_putc(int c, uintptr_t base_addr)
-	 * Function to output a character over the console. It
-	 * returns the character printed on success or -1 on error.
-	 * In : w0 - character to be printed
-	 *      x1 - console base address
-	 * Out : return -1 on error else return character.
-	 * Clobber list : x2
-	 * --------------------------------------------------------
-	 */
-func console_core_putc
-	/* Check the input parameter */
-	cbz	x1, putc_error
-	/* Prepend '\r' to '\n' */
-	cmp	w0, #0xA
-	b.ne	2f
-1:
-	/* Check if the transmit FIFO is full */
-	ldr	w2, [x1, #UARTFR]
-	tbnz	w2, #PL011_UARTFR_TXFF_BIT, 1b
-	mov	w2, #0xD
-	str	w2, [x1, #UARTDR]
-2:
-	/* Check if the transmit FIFO is full */
-	ldr	w2, [x1, #UARTFR]
-	tbnz	w2, #PL011_UARTFR_TXFF_BIT, 2b
-	str	w0, [x1, #UARTDR]
-	ret
-putc_error:
-	mov	w0, #-1
-	ret
-endfunc console_core_putc
-
-	/* ---------------------------------------------
-	 * int console_core_getc(uintptr_t base_addr)
-	 * Function to get a character from the console.
-	 * It returns the character grabbed on success
-	 * or -1 on error.
-	 * In : x0 - console base address
-	 * Clobber list : x0, x1
-	 * ---------------------------------------------
-	 */
-func console_core_getc
-	cbz	x0, getc_error
-1:
-	/* Check if the receive FIFO is empty */
-	ldr	w1, [x0, #UARTFR]
-	tbnz	w1, #PL011_UARTFR_RXFE_BIT, 1b
-	ldr	w1, [x0, #UARTDR]
-	mov	w0, w1
-	ret
-getc_error:
-	mov	w0, #-1
-	ret
-endfunc console_core_getc
diff --git a/drivers/arm/tzc/tzc400.c b/drivers/arm/tzc/tzc400.c
index e533587..ca088c3 100644
--- a/drivers/arm/tzc/tzc400.c
+++ b/drivers/arm/tzc/tzc400.c
@@ -206,7 +206,7 @@
 	 * Do address range check based on TZC configuration. A 64bit address is
 	 * the max and expected case.
 	 */
-	assert(((region_top <= (UINT64_MAX >> (64 - tzc400.addr_width))) &&
+	assert(((region_top <= _tzc_get_max_top_addr(tzc400.addr_width)) &&
 		(region_base < region_top)));
 
 	/* region_base and (region_top + 1) must be 4KB aligned */
diff --git a/drivers/arm/tzc/tzc_common_private.c b/drivers/arm/tzc/tzc_common_private.c
index dae6c3a..8b1ddf4 100644
--- a/drivers/arm/tzc/tzc_common_private.c
+++ b/drivers/arm/tzc/tzc_common_private.c
@@ -28,6 +28,8 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <arch.h>
+#include <arch_helpers.h>
 #include <mmio.h>
 #include <tzc_common.h>
 
@@ -199,4 +201,35 @@
 
 	return id;
 }
+
+#ifdef AARCH32
+static unsigned long long _tzc_get_max_top_addr(int addr_width)
+{
+	/*
+	 * Assume at least 32 bit wide address and initialize the max.
+	 * This function doesn't use 64-bit integer arithmetic to avoid
+	 * having to implement additional compiler library functions.
+	 */
+	unsigned long long addr_mask = 0xFFFFFFFF;
+	uint32_t *addr_ptr = (uint32_t *)&addr_mask;
+
+	assert(addr_width >= 32);
+
+	/* This logic works only on little - endian platforms */
+	assert((read_sctlr() & SCTLR_EE_BIT) == 0);
+
+	/*
+	 * If required address width is greater than 32, populate the higher
+	 * 32 bits of the 64 bit field with the max address.
+	 */
+	if (addr_width > 32)
+		*(addr_ptr + 1) = ((1 << (addr_width - 32)) - 1);
+
+	return addr_mask;
+}
+#else
+#define _tzc_get_max_top_addr(addr_width)\
+	(UINT64_MAX >> (64 - (addr_width)))
+#endif /* AARCH32 */
+
 #endif
diff --git a/drivers/arm/tzc/tzc_dmc500.c b/drivers/arm/tzc/tzc_dmc500.c
index b2f0bf6..24e587c 100644
--- a/drivers/arm/tzc/tzc_dmc500.c
+++ b/drivers/arm/tzc/tzc_dmc500.c
@@ -211,7 +211,7 @@
 	 * Do address range check based on DMC-TZ configuration. A 43bit address
 	 * is the max and expected case.
 	 */
-	assert(((region_top <= (UINT64_MAX >> (64 - 43))) &&
+	assert(((region_top <= _tzc_get_max_top_addr(43)) &&
 		(region_base < region_top)));
 
 	/* region_base and (region_top + 1) must be 4KB aligned */
diff --git a/drivers/cadence/uart/aarch64/cdns_console.S b/drivers/cadence/uart/aarch64/cdns_console.S
new file mode 100644
index 0000000..2c7960d
--- /dev/null
+++ b/drivers/cadence/uart/aarch64/cdns_console.S
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <cadence/cdns_uart.h>
+
+        .globl  console_core_init
+        .globl  console_core_putc
+        .globl  console_core_getc
+
+	/* -----------------------------------------------
+	 * int console_core_init(unsigned long base_addr,
+	 * unsigned int uart_clk, unsigned int baud_rate)
+	 * Function to initialize the console without a
+	 * C Runtime to print debug information. This
+	 * function will be accessed by console_init and
+	 * crash reporting.
+	 * We assume that the bootloader already set up
+	 * the HW (baud, ...) and only enable the trans-
+	 * mitter and receiver here.
+	 * In: x0 - console base address
+	 *     w1 - Uart clock in Hz
+	 *     w2 - Baud rate
+	 * Out: return 1 on success else 0 on error
+	 * Clobber list : x1, x2, x3
+	 * -----------------------------------------------
+	 */
+func console_core_init
+	/* Check the input base address */
+	cbz	x0, core_init_fail
+	/* Check baud rate and uart clock for sanity */
+	cbz	w1, core_init_fail
+	cbz	w2, core_init_fail
+
+	/* RX/TX enabled & reset */
+	mov	w3, #(R_UART_CR_TX_EN | R_UART_CR_RX_EN | R_UART_CR_TXRST | R_UART_CR_RXRST)
+	str	w3, [x0, #R_UART_CR]
+
+	mov	w0, #1
+	ret
+core_init_fail:
+	mov	w0, wzr
+	ret
+endfunc console_core_init
+
+	/* --------------------------------------------------------
+	 * int console_core_putc(int c, unsigned long base_addr)
+	 * Function to output a character over the console. It
+	 * returns the character printed on success or -1 on error.
+	 * In : w0 - character to be printed
+	 *      x1 - console base address
+	 * Out : return -1 on error else return character.
+	 * Clobber list : x2
+	 * --------------------------------------------------------
+	 */
+func console_core_putc
+	/* Check the input parameter */
+	cbz	x1, putc_error
+	/* Prepend '\r' to '\n' */
+	cmp	w0, #0xA
+	b.ne	2f
+1:
+	/* Check if the transmit FIFO is full */
+	ldr	w2, [x1, #R_UART_SR]
+	tbnz	w2, #UART_SR_INTR_TFUL_BIT, 1b
+	mov	w2, #0xD
+	str	w2, [x1, #R_UART_TX]
+2:
+	/* Check if the transmit FIFO is full */
+	ldr	w2, [x1, #R_UART_SR]
+	tbnz	w2, #UART_SR_INTR_TFUL_BIT, 2b
+	str	w0, [x1, #R_UART_TX]
+	ret
+putc_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_putc
+
+	/* ---------------------------------------------
+	 * int console_core_getc(unsigned long base_addr)
+	 * Function to get a character from the console.
+	 * It returns the character grabbed on success
+	 * or -1 on error.
+	 * In : x0 - console base address
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func console_core_getc
+	cbz	x0, getc_error
+1:
+	/* Check if the receive FIFO is empty */
+	ldr	w1, [x0, #R_UART_SR]
+	tbnz	w1, #UART_SR_INTR_REMPTY_BIT, 1b
+	ldr	w1, [x0, #R_UART_RX]
+	mov	w0, w1
+	ret
+getc_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_getc
diff --git a/drivers/cadence/uart/cdns_console.S b/drivers/cadence/uart/cdns_console.S
index 2c7960d..f727838 100644
--- a/drivers/cadence/uart/cdns_console.S
+++ b/drivers/cadence/uart/cdns_console.S
@@ -27,101 +27,7 @@
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
-#include <arch.h>
-#include <asm_macros.S>
-#include <cadence/cdns_uart.h>
-
-        .globl  console_core_init
-        .globl  console_core_putc
-        .globl  console_core_getc
-
-	/* -----------------------------------------------
-	 * int console_core_init(unsigned long base_addr,
-	 * unsigned int uart_clk, unsigned int baud_rate)
-	 * Function to initialize the console without a
-	 * C Runtime to print debug information. This
-	 * function will be accessed by console_init and
-	 * crash reporting.
-	 * We assume that the bootloader already set up
-	 * the HW (baud, ...) and only enable the trans-
-	 * mitter and receiver here.
-	 * In: x0 - console base address
-	 *     w1 - Uart clock in Hz
-	 *     w2 - Baud rate
-	 * Out: return 1 on success else 0 on error
-	 * Clobber list : x1, x2, x3
-	 * -----------------------------------------------
-	 */
-func console_core_init
-	/* Check the input base address */
-	cbz	x0, core_init_fail
-	/* Check baud rate and uart clock for sanity */
-	cbz	w1, core_init_fail
-	cbz	w2, core_init_fail
-
-	/* RX/TX enabled & reset */
-	mov	w3, #(R_UART_CR_TX_EN | R_UART_CR_RX_EN | R_UART_CR_TXRST | R_UART_CR_RXRST)
-	str	w3, [x0, #R_UART_CR]
-
-	mov	w0, #1
-	ret
-core_init_fail:
-	mov	w0, wzr
-	ret
-endfunc console_core_init
-
-	/* --------------------------------------------------------
-	 * int console_core_putc(int c, unsigned long base_addr)
-	 * Function to output a character over the console. It
-	 * returns the character printed on success or -1 on error.
-	 * In : w0 - character to be printed
-	 *      x1 - console base address
-	 * Out : return -1 on error else return character.
-	 * Clobber list : x2
-	 * --------------------------------------------------------
-	 */
-func console_core_putc
-	/* Check the input parameter */
-	cbz	x1, putc_error
-	/* Prepend '\r' to '\n' */
-	cmp	w0, #0xA
-	b.ne	2f
-1:
-	/* Check if the transmit FIFO is full */
-	ldr	w2, [x1, #R_UART_SR]
-	tbnz	w2, #UART_SR_INTR_TFUL_BIT, 1b
-	mov	w2, #0xD
-	str	w2, [x1, #R_UART_TX]
-2:
-	/* Check if the transmit FIFO is full */
-	ldr	w2, [x1, #R_UART_SR]
-	tbnz	w2, #UART_SR_INTR_TFUL_BIT, 2b
-	str	w0, [x1, #R_UART_TX]
-	ret
-putc_error:
-	mov	w0, #-1
-	ret
-endfunc console_core_putc
 
-	/* ---------------------------------------------
-	 * int console_core_getc(unsigned long base_addr)
-	 * Function to get a character from the console.
-	 * It returns the character grabbed on success
-	 * or -1 on error.
-	 * In : x0 - console base address
-	 * Clobber list : x0, x1
-	 * ---------------------------------------------
-	 */
-func console_core_getc
-	cbz	x0, getc_error
-1:
-	/* Check if the receive FIFO is empty */
-	ldr	w1, [x0, #R_UART_SR]
-	tbnz	w1, #UART_SR_INTR_REMPTY_BIT, 1b
-	ldr	w1, [x0, #R_UART_RX]
-	mov	w0, w1
-	ret
-getc_error:
-	mov	w0, #-1
-	ret
-endfunc console_core_getc
+#if !ERROR_DEPRECATED
+#include "./aarch64/cdns_console.S"
+#endif
diff --git a/drivers/console/aarch32/console.S b/drivers/console/aarch32/console.S
new file mode 100644
index 0000000..2993345
--- /dev/null
+++ b/drivers/console/aarch32/console.S
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <asm_macros.S>
+
+	.globl	console_init
+	.globl	console_uninit
+	.globl	console_putc
+	.globl	console_getc
+
+	/*
+	 *  The console base is in the data section and not in .bss
+	 *  even though it is zero-init. In particular, this allows
+	 *  the console functions to start using this variable before
+	 *  the runtime memory is initialized for images which do not
+	 *  need to copy the .data section from ROM to RAM.
+	 */
+.section .data.console_base ; .align 2
+	console_base: .word 0x0
+
+	/* -----------------------------------------------
+	 * int console_init(uintptr_t base_addr,
+	 * unsigned int uart_clk, unsigned int baud_rate)
+	 * Function to initialize the console without a
+	 * C Runtime to print debug information. It saves
+	 * the console base to the data section.
+	 * In: r0 - console base address
+	 *     r1 - Uart clock in Hz
+	 *     r2 - Baud rate
+	 * out: return 1 on success else 0 on error
+	 * Clobber list : r1 - r3
+	 * -----------------------------------------------
+	 */
+func console_init
+	/* Check the input base address */
+	cmp	r0, #0
+	beq	init_fail
+	ldr	r3, =console_base
+	str	r0, [r3]
+	b	console_core_init
+init_fail:
+	bx	lr
+endfunc console_init
+
+	/* -----------------------------------------------
+	 * void console_uninit(void)
+	 * Function to finish the use of console driver.
+	 * It sets the console_base as NULL so that any
+	 * further invocation of `console_putc` or
+	 * `console_getc` APIs would return error.
+	 * -----------------------------------------------
+	 */
+func console_uninit
+	mov	r0, #0
+	ldr	r3, =console_base
+	str	r0, [r3]
+	bx	lr
+endfunc console_uninit
+
+	/* ---------------------------------------------
+	 * int console_putc(int c)
+	 * Function to output a character over the
+	 * console. It returns the character printed on
+	 * success or -1 on error.
+	 * In : r0 - character to be printed
+	 * Out : return -1 on error else return character.
+	 * Clobber list : r1, r2
+	 * ---------------------------------------------
+	 */
+func console_putc
+	ldr	r2, =console_base
+	ldr	r1, [r2]
+	b	console_core_putc
+endfunc console_putc
+
+	/* ---------------------------------------------
+	 * int console_getc(void)
+	 * Function to get a character from the console.
+	 * It returns the character grabbed on success
+	 * or -1 on error.
+	 * Clobber list : r0, r1
+	 * ---------------------------------------------
+	 */
+func console_getc
+	ldr	r1, =console_base
+	ldr	r0, [r1]
+	b	console_core_getc
+endfunc console_getc
diff --git a/drivers/console/aarch32/skeleton_console.S b/drivers/console/aarch32/skeleton_console.S
new file mode 100644
index 0000000..383874e
--- /dev/null
+++ b/drivers/console/aarch32/skeleton_console.S
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <asm_macros.S>
+
+	/*
+	 * This file contains a skeleton console implementation that can
+	 * be used as basis for a real console implementation by platforms
+	 * that do not contain PL011 hardware.
+	 */
+
+	.globl	console_core_init
+	.globl	console_core_putc
+	.globl	console_core_getc
+
+	/* -----------------------------------------------
+	 * int console_core_init(uintptr_t base_addr,
+	 * unsigned int uart_clk, unsigned int baud_rate)
+	 * Function to initialize the console without a
+	 * C Runtime to print debug information. This
+	 * function will be accessed by console_init and
+	 * crash reporting.
+	 * In: r0 - console base address
+	 *     r1 - Uart clock in Hz
+	 *     r2 - Baud rate
+	 * Out: return 1 on success else 0 on error
+	 * Clobber list : r1, r2
+	 * -----------------------------------------------
+	 */
+func console_core_init
+	/* Check the input base address */
+	cmp	r0, #0
+	beq	core_init_fail
+	/* Check baud rate and uart clock for sanity */
+	cmp	r1, #0
+	beq	core_init_fail
+	cmp	r2, #0
+	beq	core_init_fail
+	/* Insert implementation here */
+	mov	r0, #1
+	bx	lr
+core_init_fail:
+	mov	r0, #0
+	bx	lr
+endfunc console_core_init
+
+	/* --------------------------------------------------------
+	 * int console_core_putc(int c, uintptr_t base_addr)
+	 * Function to output a character over the console. It
+	 * returns the character printed on success or -1 on error.
+	 * In : r0 - character to be printed
+	 *      r1 - console base address
+	 * Out : return -1 on error else return character.
+	 * Clobber list : r2
+	 * --------------------------------------------------------
+	 */
+func console_core_putc
+	/* Check the input parameter */
+	cmp	r1, #0
+	beq	putc_error
+	/* Insert implementation here */
+	bx	lr
+putc_error:
+	mov	r0, #-1
+	bx	lr
+endfunc console_core_putc
+
+	/* ---------------------------------------------
+	 * int console_core_getc(uintptr_t base_addr)
+	 * Function to get a character from the console.
+	 * It returns the character grabbed on success
+	 * or -1 on error.
+	 * In : r0 - console base address
+	 * Clobber list : r0, r1
+	 * ---------------------------------------------
+	 */
+func console_core_getc
+	cmp	r0, #0
+	beq	getc_error
+	/* Insert implementation here */
+	bx	lr
+getc_error:
+	mov	r0, #-1
+	bx	lr
+endfunc console_core_getc
diff --git a/drivers/console/aarch64/console.S b/drivers/console/aarch64/console.S
new file mode 100644
index 0000000..bdd5f4c
--- /dev/null
+++ b/drivers/console/aarch64/console.S
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <asm_macros.S>
+
+	.globl	console_init
+	.globl	console_uninit
+	.globl	console_putc
+	.globl	console_getc
+
+	/*
+	 *  The console base is in the data section and not in .bss
+	 *  even though it is zero-init. In particular, this allows
+	 *  the console functions to start using this variable before
+	 *  the runtime memory is initialized for images which do not
+	 *  need to copy the .data section from ROM to RAM.
+	 */
+.section .data.console_base ; .align 3
+	console_base: .quad 0x0
+
+	/* -----------------------------------------------
+	 * int console_init(uintptr_t base_addr,
+	 * unsigned int uart_clk, unsigned int baud_rate)
+	 * Function to initialize the console without a
+	 * C Runtime to print debug information. It saves
+	 * the console base to the data section.
+	 * In: x0 - console base address
+	 *     w1 - Uart clock in Hz
+	 *     w2 - Baud rate
+	 * out: return 1 on success else 0 on error
+	 * Clobber list : x1 - x4
+	 * -----------------------------------------------
+	 */
+func console_init
+	/* Check the input base address */
+	cbz	x0, init_fail
+	adrp	x3, console_base
+	str	x0, [x3, :lo12:console_base]
+	b	console_core_init
+init_fail:
+	ret
+endfunc console_init
+
+	/* -----------------------------------------------
+	 * void console_uninit(void)
+	 * Function to finish the use of console driver.
+	 * It sets the console_base as NULL so that any
+	 * further invocation of `console_putc` or
+	 * `console_getc` APIs would return error.
+	 * -----------------------------------------------
+	 */
+func console_uninit
+	mov	x0, #0
+	adrp	x3, console_base
+	str	x0, [x3, :lo12:console_base]
+	ret
+endfunc console_uninit
+
+	/* ---------------------------------------------
+	 * int console_putc(int c)
+	 * Function to output a character over the
+	 * console. It returns the character printed on
+	 * success or -1 on error.
+	 * In : x0 - character to be printed
+	 * Out : return -1 on error else return character.
+	 * Clobber list : x1, x2
+	 * ---------------------------------------------
+	 */
+func console_putc
+	adrp	x2, console_base
+	ldr	x1, [x2, :lo12:console_base]
+	b	console_core_putc
+endfunc console_putc
+
+	/* ---------------------------------------------
+	 * int console_getc(void)
+	 * Function to get a character from the console.
+	 * It returns the character grabbed on success
+	 * or -1 on error.
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func console_getc
+	adrp	x1, console_base
+	ldr	x0, [x1, :lo12:console_base]
+	b	console_core_getc
+endfunc console_getc
diff --git a/drivers/console/aarch64/skeleton_console.S b/drivers/console/aarch64/skeleton_console.S
new file mode 100644
index 0000000..1583ee7
--- /dev/null
+++ b/drivers/console/aarch64/skeleton_console.S
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <asm_macros.S>
+
+	/*
+	 * This file contains a skeleton console implementation that can
+	 * be used as basis for a real console implementation by platforms
+	 * that do not contain PL011 hardware.
+	 */
+
+	.globl	console_core_init
+	.globl	console_core_putc
+	.globl	console_core_getc
+
+	/* -----------------------------------------------
+	 * int console_core_init(uintptr_t base_addr,
+	 * unsigned int uart_clk, unsigned int baud_rate)
+	 * Function to initialize the console without a
+	 * C Runtime to print debug information. This
+	 * function will be accessed by console_init and
+	 * crash reporting.
+	 * In: x0 - console base address
+	 *     w1 - Uart clock in Hz
+	 *     w2 - Baud rate
+	 * Out: return 1 on success else 0 on error
+	 * Clobber list : x1, x2
+	 * -----------------------------------------------
+	 */
+func console_core_init
+	/* Check the input base address */
+	cbz	x0, core_init_fail
+	/* Check baud rate and uart clock for sanity */
+	cbz	w1, core_init_fail
+	cbz	w2, core_init_fail
+	/* Insert implementation here */
+	mov	w0, #1
+	ret
+core_init_fail:
+	mov	w0, wzr
+	ret
+endfunc console_core_init
+
+	/* --------------------------------------------------------
+	 * int console_core_putc(int c, uintptr_t base_addr)
+	 * Function to output a character over the console. It
+	 * returns the character printed on success or -1 on error.
+	 * In : w0 - character to be printed
+	 *      x1 - console base address
+	 * Out : return -1 on error else return character.
+	 * Clobber list : x2
+	 * --------------------------------------------------------
+	 */
+func console_core_putc
+	/* Check the input parameter */
+	cbz	x1, putc_error
+	/* Insert implementation here */
+	ret
+putc_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_putc
+
+	/* ---------------------------------------------
+	 * int console_core_getc(uintptr_t base_addr)
+	 * Function to get a character from the console.
+	 * It returns the character grabbed on success
+	 * or -1 on error.
+	 * In : x0 - console base address
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func console_core_getc
+	cbz	x0, getc_error
+	/* Insert implementation here */
+	ret
+getc_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_getc
diff --git a/drivers/console/console.S b/drivers/console/console.S
index 797b564..e861298 100644
--- a/drivers/console/console.S
+++ b/drivers/console/console.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -27,87 +27,7 @@
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
-#include <asm_macros.S>
-
-	.globl	console_init
-	.globl	console_uninit
-	.globl	console_putc
-	.globl	console_getc
-
-	/*
-	 *  The console base is in the data section and not in .bss
-	 *  even though it is zero-init. In particular, this allows
-	 *  the console functions to start using this variable before
-	 *  the runtime memory is initialized for images which do not
-	 *  need to copy the .data section from ROM to RAM.
-	 */
-.section .data.console_base ; .align 3
-	console_base: .quad 0x0
-
-	/* -----------------------------------------------
-	 * int console_init(uintptr_t base_addr,
-	 * unsigned int uart_clk, unsigned int baud_rate)
-	 * Function to initialize the console without a
-	 * C Runtime to print debug information. It saves
-	 * the console base to the data section.
-	 * In: x0 - console base address
-	 *     w1 - Uart clock in Hz
-	 *     w2 - Baud rate
-	 * out: return 1 on success else 0 on error
-	 * Clobber list : x1 - x4
-	 * -----------------------------------------------
-	 */
-func console_init
-	/* Check the input base address */
-	cbz	x0, init_fail
-	adrp	x3, console_base
-	str	x0, [x3, :lo12:console_base]
-	b	console_core_init
-init_fail:
-	ret
-endfunc console_init
-
-	/* -----------------------------------------------
-	 * void console_uninit(void)
-	 * Function to finish the use of console driver.
-	 * It sets the console_base as NULL so that any
-	 * further invocation of `console_putc` or
-	 * `console_getc` APIs would return error.
-	 * -----------------------------------------------
-	 */
-func console_uninit
-	mov	x0, #0
-	adrp	x3, console_base
-	str	x0, [x3, :lo12:console_base]
-	ret
-endfunc console_uninit
-
-	/* ---------------------------------------------
-	 * int console_putc(int c)
-	 * Function to output a character over the
-	 * console. It returns the character printed on
-	 * success or -1 on error.
-	 * In : x0 - character to be printed
-	 * Out : return -1 on error else return character.
-	 * Clobber list : x1, x2
-	 * ---------------------------------------------
-	 */
-func console_putc
-	adrp	x2, console_base
-	ldr	x1, [x2, :lo12:console_base]
-	b	console_core_putc
-endfunc console_putc
 
-	/* ---------------------------------------------
-	 * int console_getc(void)
-	 * Function to get a character from the console.
-	 * It returns the character grabbed on success
-	 * or -1 on error.
-	 * Clobber list : x0, x1
-	 * ---------------------------------------------
-	 */
-func console_getc
-	adrp	x1, console_base
-	ldr	x0, [x1, :lo12:console_base]
-	b	console_core_getc
-endfunc console_getc
+#if !ERROR_DEPRECATED
+#include "./aarch64/console.S"
+#endif
diff --git a/drivers/console/skeleton_console.S b/drivers/console/skeleton_console.S
index 083d3c7..ddfd834 100644
--- a/drivers/console/skeleton_console.S
+++ b/drivers/console/skeleton_console.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -27,80 +27,7 @@
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
-#include <asm_macros.S>
-
-	/*
-	 * This file contains a skeleton console implementation that can
-	 * be used as basis for a real console implementation by platforms
-	 * that do not contain PL011 hardware.
-	 */
-
-	.globl	console_core_init
-	.globl	console_core_putc
-	.globl	console_core_getc
-
-	/* -----------------------------------------------
-	 * int console_core_init(uintptr_t base_addr,
-	 * unsigned int uart_clk, unsigned int baud_rate)
-	 * Function to initialize the console without a
-	 * C Runtime to print debug information. This
-	 * function will be accessed by console_init and
-	 * crash reporting.
-	 * In: x0 - console base address
-	 *     w1 - Uart clock in Hz
-	 *     w2 - Baud rate
-	 * Out: return 1 on success else 0 on error
-	 * Clobber list : x1, x2
-	 * -----------------------------------------------
-	 */
-func console_core_init
-	/* Check the input base address */
-	cbz	x0, core_init_fail
-	/* Check baud rate and uart clock for sanity */
-	cbz	w1, core_init_fail
-	cbz	w2, core_init_fail
-	/* Insert implementation here */
-	mov	w0, #1
-	ret
-core_init_fail:
-	mov	w0, wzr
-	ret
-endfunc console_core_init
-
-	/* --------------------------------------------------------
-	 * int console_core_putc(int c, uintptr_t base_addr)
-	 * Function to output a character over the console. It
-	 * returns the character printed on success or -1 on error.
-	 * In : w0 - character to be printed
-	 *      x1 - console base address
-	 * Out : return -1 on error else return character.
-	 * Clobber list : x2
-	 * --------------------------------------------------------
-	 */
-func console_core_putc
-	/* Check the input parameter */
-	cbz	x1, putc_error
-	/* Insert implementation here */
-	ret
-putc_error:
-	mov	w0, #-1
-	ret
-endfunc console_core_putc
 
-	/* ---------------------------------------------
-	 * int console_core_getc(uintptr_t base_addr)
-	 * Function to get a character from the console.
-	 * It returns the character grabbed on success
-	 * or -1 on error.
-	 * In : x0 - console base address
-	 * Clobber list : x0, x1
-	 * ---------------------------------------------
-	 */
-func console_core_getc
-	cbz	x0, getc_error
-	/* Insert implementation here */
-	ret
-getc_error:
-	mov	w0, #-1
-	ret
-endfunc console_core_getc
+#if !ERROR_DEPRECATED
+#include "./aarch64/skeleton_console.S"
+#endif
diff --git a/drivers/ti/uart/16550_console.S b/drivers/ti/uart/16550_console.S
index ebb4615..90b12e5 100644
--- a/drivers/ti/uart/16550_console.S
+++ b/drivers/ti/uart/16550_console.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -28,128 +28,6 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include <arch.h>
-#include <asm_macros.S>
-#include <uart_16550.h>
-
-	.globl	console_core_init
-	.globl	console_core_putc
-	.globl	console_core_getc
-
-	/* -----------------------------------------------
-	 * int console_core_init(unsigned long base_addr,
-	 * unsigned int uart_clk, unsigned int baud_rate)
-	 * Function to initialize the console without a
-	 * C Runtime to print debug information. This
-	 * function will be accessed by console_init and
-	 * crash reporting.
-	 * In: x0 - console base address
-	 *     w1 - Uart clock in Hz
-	 *     w2 - Baud rate
-	 * Out: return 1 on success
-	 * Clobber list : x1, x2, x3
-	 * -----------------------------------------------
-	 */
-func console_core_init
-	/* Check the input base address */
-	cbz	x0, init_fail
-	/* Check baud rate and uart clock for sanity */
-	cbz	w1, init_fail
-	cbz	w2, init_fail
-
-	/* Program the baudrate */
-	/* Divisor =  Uart clock / (16 * baudrate) */
-	lsl	w2, w2, #4
-	udiv	w2, w1, w2
-	and	w1, w2, #0xff		/* w1 = DLL */
-	lsr	w2, w2, #8
-	and	w2, w2, #0xff		/* w2 = DLLM */
-	ldr	w3, [x0, #UARTLCR]
-	orr	w3, w3, #UARTLCR_DLAB
-	str	w3, [x0, #UARTLCR]	/* enable DLL, DLLM programming */
-	str	w1, [x0, #UARTDLL]	/* program DLL */
-	str	w2, [x0, #UARTDLLM]	/* program DLLM */
-	mov	w2, #~UARTLCR_DLAB
-	and	w3, w3, w2
-	str	w3, [x0, #UARTLCR]	/* disable DLL, DLLM programming */
-
-	/* 8n1 */
-	mov	w3, #3
-	str	w3, [x0, #UARTLCR]
-	/* no interrupt */
-	mov	w3, #0
-	str	w3, [x0, #UARTIER]
-	/* enable fifo, DMA */
-	mov	w3, #(UARTFCR_FIFOEN | UARTFCR_DMAEN)
-	str	w3, [x0, #UARTFCR]
-	/* DTR + RTS */
-	mov	w3, #3
-	str	w3, [x0, #UARTMCR]
-	mov	w0, #1
-init_fail:
-	ret
-endfunc console_core_init
-
-	/* --------------------------------------------------------
-	 * int console_core_putc(int c, unsigned int base_addr)
-	 * Function to output a character over the console. It
-	 * returns the character printed on success or -1 on error.
-	 * In : w0 - character to be printed
-	 *      x1 - console base address
-	 * Out : return -1 on error else return character.
-	 * Clobber list : x2
-	 * --------------------------------------------------------
-	 */
-func console_core_putc
-	/* Check the input parameter */
-	cbz	x1, putc_error
-
-	/* Prepend '\r' to '\n' */
-	cmp	w0, #0xA
-	b.ne	2f
-	/* Check if the transmit FIFO is full */
-1:	ldr	w2, [x1, #UARTLSR]
-	and	w2, w2, #(UARTLSR_TEMT | UARTLSR_THRE)
-	cmp	w2, #(UARTLSR_TEMT | UARTLSR_THRE)
-	b.ne	1b
-	mov	w2, #0xD		/* '\r' */
-	str	w2, [x1, #UARTTX]
-	ldr	w2, [x1, #UARTFCR]
-	orr	w2, w2, #UARTFCR_TXCLR
-	str	w2, [x1, #UARTFCR]
-
-	/* Check if the transmit FIFO is full */
-2:	ldr	w2, [x1, #UARTLSR]
-	and	w2, w2, #(UARTLSR_TEMT | UARTLSR_THRE)
-	cmp	w2, #(UARTLSR_TEMT | UARTLSR_THRE)
-	b.ne	2b
-	str	w0, [x1, #UARTTX]
-	ldr	w2, [x1, #UARTFCR]
-	orr	w2, w2, #UARTFCR_TXCLR
-	str	w2, [x1, #UARTFCR]
-	ret
-putc_error:
-	mov	w0, #-1
-	ret
-endfunc console_core_putc
-
-	/* ---------------------------------------------
-	 * int console_core_getc(void)
-	 * Function to get a character from the console.
-	 * It returns the character grabbed on success
-	 * or -1 on error.
-	 * In : w0 - console base address
-	 * Out : return -1 on error else return character.
-	 * Clobber list : x0, x1
-	 * ---------------------------------------------
-	 */
-func console_core_getc
-	/* Check if the receive FIFO is empty */
-1:	ldr	w1, [x0, #UARTLSR]
-	tbz	w1, #UARTLSR_RDR, 1b
-	ldr	w0, [x0, #UARTRX]
-	ret
-getc_error:
-	mov	w0, #-1
-	ret
-endfunc console_core_getc
+#if !ERROR_DEPRECATED
+#include "./aarch64/16550_console.S"
+#endif
diff --git a/drivers/ti/uart/aarch64/16550_console.S b/drivers/ti/uart/aarch64/16550_console.S
new file mode 100644
index 0000000..0535381
--- /dev/null
+++ b/drivers/ti/uart/aarch64/16550_console.S
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <uart_16550.h>
+
+	.globl	console_core_init
+	.globl	console_core_putc
+	.globl	console_core_getc
+
+	/* -----------------------------------------------
+	 * int console_core_init(unsigned long base_addr,
+	 * unsigned int uart_clk, unsigned int baud_rate)
+	 * Function to initialize the console without a
+	 * C Runtime to print debug information. This
+	 * function will be accessed by console_init and
+	 * crash reporting.
+	 * In: x0 - console base address
+	 *     w1 - Uart clock in Hz
+	 *     w2 - Baud rate
+	 * Out: return 1 on success
+	 * Clobber list : x1, x2, x3
+	 * -----------------------------------------------
+	 */
+func console_core_init
+	/* Check the input base address */
+	cbz	x0, init_fail
+	/* Check baud rate and uart clock for sanity */
+	cbz	w1, init_fail
+	cbz	w2, init_fail
+
+	/* Program the baudrate */
+	/* Divisor =  Uart clock / (16 * baudrate) */
+	lsl	w2, w2, #4
+	udiv	w2, w1, w2
+	and	w1, w2, #0xff		/* w1 = DLL */
+	lsr	w2, w2, #8
+	and	w2, w2, #0xff		/* w2 = DLLM */
+	ldr	w3, [x0, #UARTLCR]
+	orr	w3, w3, #UARTLCR_DLAB
+	str	w3, [x0, #UARTLCR]	/* enable DLL, DLLM programming */
+	str	w1, [x0, #UARTDLL]	/* program DLL */
+	str	w2, [x0, #UARTDLLM]	/* program DLLM */
+	mov	w2, #~UARTLCR_DLAB
+	and	w3, w3, w2
+	str	w3, [x0, #UARTLCR]	/* disable DLL, DLLM programming */
+
+	/* 8n1 */
+	mov	w3, #3
+	str	w3, [x0, #UARTLCR]
+	/* no interrupt */
+	mov	w3, #0
+	str	w3, [x0, #UARTIER]
+	/* enable fifo, DMA */
+	mov	w3, #(UARTFCR_FIFOEN | UARTFCR_DMAEN)
+	str	w3, [x0, #UARTFCR]
+	/* DTR + RTS */
+	mov	w3, #3
+	str	w3, [x0, #UARTMCR]
+	mov	w0, #1
+init_fail:
+	ret
+endfunc console_core_init
+
+	/* --------------------------------------------------------
+	 * int console_core_putc(int c, unsigned int base_addr)
+	 * Function to output a character over the console. It
+	 * returns the character printed on success or -1 on error.
+	 * In : w0 - character to be printed
+	 *      x1 - console base address
+	 * Out : return -1 on error else return character.
+	 * Clobber list : x2
+	 * --------------------------------------------------------
+	 */
+func console_core_putc
+	/* Check the input parameter */
+	cbz	x1, putc_error
+
+	/* Prepend '\r' to '\n' */
+	cmp	w0, #0xA
+	b.ne	2f
+	/* Check if the transmit FIFO is full */
+1:	ldr	w2, [x1, #UARTLSR]
+	and	w2, w2, #(UARTLSR_TEMT | UARTLSR_THRE)
+	cmp	w2, #(UARTLSR_TEMT | UARTLSR_THRE)
+	b.ne	1b
+	mov	w2, #0xD		/* '\r' */
+	str	w2, [x1, #UARTTX]
+	ldr	w2, [x1, #UARTFCR]
+	orr	w2, w2, #UARTFCR_TXCLR
+	str	w2, [x1, #UARTFCR]
+
+	/* Check if the transmit FIFO is full */
+2:	ldr	w2, [x1, #UARTLSR]
+	and	w2, w2, #(UARTLSR_TEMT | UARTLSR_THRE)
+	cmp	w2, #(UARTLSR_TEMT | UARTLSR_THRE)
+	b.ne	2b
+	str	w0, [x1, #UARTTX]
+	ldr	w2, [x1, #UARTFCR]
+	orr	w2, w2, #UARTFCR_TXCLR
+	str	w2, [x1, #UARTFCR]
+	ret
+putc_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_putc
+
+	/* ---------------------------------------------
+	 * int console_core_getc(void)
+	 * Function to get a character from the console.
+	 * It returns the character grabbed on success
+	 * or -1 on error.
+	 * In : w0 - console base address
+	 * Out : return -1 on error else return character.
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func console_core_getc
+	/* Check if the receive FIFO is empty */
+1:	ldr	w1, [x0, #UARTLSR]
+	tbz	w1, #UARTLSR_RDR, 1b
+	ldr	w0, [x0, #UARTRX]
+	ret
+getc_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_getc
diff --git a/include/bl32/sp_min/platform_sp_min.h b/include/bl32/sp_min/platform_sp_min.h
new file mode 100644
index 0000000..ae9dd58
--- /dev/null
+++ b/include/bl32/sp_min/platform_sp_min.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PLATFORM_SP_MIN_H__
+#define __PLATFORM_SP_MIN_H__
+
+/*******************************************************************************
+ * Mandatory SP_MIN functions
+ ******************************************************************************/
+void sp_min_early_platform_setup(void);
+void sp_min_plat_arch_setup(void);
+void sp_min_platform_setup(void);
+entry_point_info_t *sp_min_plat_get_bl33_ep_info(void);
+
+#endif /* __PLATFORM_SP_MIN_H__ */
diff --git a/include/common/aarch32/asm_macros.S b/include/common/aarch32/asm_macros.S
new file mode 100644
index 0000000..11e45bb
--- /dev/null
+++ b/include/common/aarch32/asm_macros.S
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __ASM_MACROS_S__
+#define __ASM_MACROS_S__
+
+#include <arch.h>
+#include <asm_macros_common.S>
+
+#define WORD_SIZE	4
+
+	/*
+	 * Co processor register accessors
+	 */
+	.macro ldcopr reg, coproc, opc1, CRn, CRm, opc2
+	mrc	\coproc, \opc1, \reg, \CRn, \CRm, \opc2
+	.endm
+
+	.macro ldcopr16 reg1, reg2, coproc, opc1, CRm
+	mrrc	\coproc, \opc1, \reg1, \reg2, \CRm
+	.endm
+
+	.macro stcopr reg, coproc, opc1, CRn, CRm, opc2
+	mcr	\coproc, \opc1, \reg, \CRn, \CRm, \opc2
+	.endm
+
+	.macro stcopr16 reg1, reg2, coproc, opc1, CRm
+	mcrr	\coproc, \opc1, \reg1, \reg2, \CRm
+	.endm
+
+	/* Cache line size helpers */
+	.macro	dcache_line_size  reg, tmp
+	ldcopr	\tmp, CTR
+	ubfx	\tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH
+	mov	\reg, #WORD_SIZE
+	lsl	\reg, \reg, \tmp
+	.endm
+
+	.macro	icache_line_size  reg, tmp
+	ldcopr	\tmp, CTR
+	and	\tmp, \tmp, #CTR_IMINLINE_MASK
+	mov	\reg, #WORD_SIZE
+	lsl	\reg, \reg, \tmp
+	.endm
+
+	/*
+	 * This macro calculates the base address of the current CPU's multi
+	 * processor(MP) stack using the plat_my_core_pos() index, the name of
+	 * the stack storage and the size of each stack.
+	 * Out: r0 = physical address of stack base
+	 * Clobber: r14, r1, r2
+	 */
+	.macro get_my_mp_stack _name, _size
+	bl  plat_my_core_pos
+	ldr r2, =(\_name + \_size)
+	mov r1, #\_size
+	mla r0, r0, r1, r2
+	.endm
+
+	/*
+	 * This macro calculates the base address of a uniprocessor(UP) stack
+	 * using the name of the stack storage and the size of the stack
+	 * Out: r0 = physical address of stack base
+	 */
+	.macro get_up_stack _name, _size
+	ldr r0, =(\_name + \_size)
+	.endm
+
+#endif /* __ASM_MACROS_S__ */
diff --git a/include/common/aarch32/assert_macros.S b/include/common/aarch32/assert_macros.S
new file mode 100644
index 0000000..f35fc6a
--- /dev/null
+++ b/include/common/aarch32/assert_macros.S
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __ASSERT_MACROS_S__
+#define __ASSERT_MACROS_S__
+
+	/*
+	 * Assembler macro to enable asm_assert. We assume that the stack is
+	 * initialized prior to invoking this macro.
+	 */
+#define ASM_ASSERT(_cc) \
+.ifndef .L_assert_filename ;\
+	.pushsection .rodata.str1.1, "aS" ;\
+	.L_assert_filename: ;\
+			.string	__FILE__ ;\
+	.popsection ;\
+.endif ;\
+	b##_cc	300f ;\
+	ldr	r0, =.L_assert_filename ;\
+	mov	r1, #__LINE__ ;\
+	b	. ;\
+300:
+
+#endif /* __ASSERT_MACROS_S__ */
diff --git a/include/common/asm_macros_common.S b/include/common/asm_macros_common.S
index ee59a93..023124b 100644
--- a/include/common/asm_macros_common.S
+++ b/include/common/asm_macros_common.S
@@ -30,8 +30,6 @@
 #ifndef __ASM_MACROS_COMMON_S__
 #define __ASM_MACROS_COMMON_S__
 
-#include <arch.h>
-
 	/*
 	 * This macro is used to create a function label and place the
 	 * code into a separate text section based on the function name
diff --git a/include/common/bl_common.h b/include/common/bl_common.h
index 3aa0836..942843c 100644
--- a/include/common/bl_common.h
+++ b/include/common/bl_common.h
@@ -50,7 +50,11 @@
  * 'entry_point_info' structure at their correct offsets.
  ******************************************************************************/
 #define ENTRY_POINT_INFO_PC_OFFSET	0x08
+#ifdef AARCH32
+#define ENTRY_POINT_INFO_ARGS_OFFSET	0x10
+#else
 #define ENTRY_POINT_INFO_ARGS_OFFSET	0x18
+#endif
 
 /* The following are used to set/get image attributes. */
 #define PARAM_EP_SECURITY_MASK		(0x1)
@@ -192,6 +196,13 @@
 	u_register_t arg7;
 } aapcs64_params_t;
 
+typedef struct aapcs32_params {
+	u_register_t arg0;
+	u_register_t arg1;
+	u_register_t arg2;
+	u_register_t arg3;
+} aapcs32_params_t;
+
 /***************************************************************************
  * This structure provides version information and the size of the
  * structure, attributes for the structure it represents
@@ -216,7 +227,11 @@
 	param_header_t h;
 	uintptr_t pc;
 	uint32_t spsr;
+#ifdef AARCH32
+	aapcs32_params_t args;
+#else
 	aapcs64_params_t args;
+#endif
 } entry_point_info_t;
 
 /*****************************************************************************
diff --git a/include/common/runtime_svc.h b/include/common/runtime_svc.h
index adafcee..514f334 100644
--- a/include/common/runtime_svc.h
+++ b/include/common/runtime_svc.h
@@ -43,10 +43,17 @@
  * Constants to allow the assembler access a runtime service
  * descriptor
  */
+#ifdef AARCH32
+#define RT_SVC_SIZE_LOG2	4
+#define RT_SVC_DESC_INIT	8
+#define RT_SVC_DESC_HANDLE	12
+#else
 #define RT_SVC_SIZE_LOG2	5
-#define SIZEOF_RT_SVC_DESC	(1 << RT_SVC_SIZE_LOG2)
 #define RT_SVC_DESC_INIT	16
 #define RT_SVC_DESC_HANDLE	24
+#endif /* AARCH32 */
+#define SIZEOF_RT_SVC_DESC	(1 << RT_SVC_SIZE_LOG2)
+
 
 /*
  * The function identifier has 6 bits for the owning entity number and
@@ -123,10 +130,22 @@
 					((call_type & FUNCID_TYPE_MASK) \
 					 << FUNCID_OEN_WIDTH))
 
+/*
+ * This macro generates the unique owning entity number from the SMC Function
+ * ID.  This unique oen is used to access an entry in the
+ * 'rt_svc_descs_indices' array to invoke the corresponding runtime service
+ * handler during SMC handling.
+ */
+#define get_unique_oen_from_smc_fid(fid)		\
+	get_unique_oen(((fid) >> FUNCID_OEN_SHIFT),	\
+			((fid) >> FUNCID_TYPE_SHIFT))
+
 /*******************************************************************************
  * Function & variable prototypes
  ******************************************************************************/
 void runtime_svc_init(void);
+uintptr_t handle_runtime_svc(uint32_t smc_fid, void *cookie, void *handle,
+						unsigned int flags);
 extern uintptr_t __RT_SVC_DESCS_START__;
 extern uintptr_t __RT_SVC_DESCS_END__;
 void init_crash_reporting(void);
diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h
new file mode 100644
index 0000000..e571ddc
--- /dev/null
+++ b/include/lib/aarch32/arch.h
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __ARCH_H__
+#define __ARCH_H__
+
+/*******************************************************************************
+ * MIDR bit definitions
+ ******************************************************************************/
+#define MIDR_IMPL_MASK		0xff
+#define MIDR_IMPL_SHIFT		24
+#define MIDR_VAR_SHIFT		20
+#define MIDR_VAR_BITS		4
+#define MIDR_REV_SHIFT		0
+#define MIDR_REV_BITS		4
+#define MIDR_PN_MASK		0xfff
+#define MIDR_PN_SHIFT		4
+
+/*******************************************************************************
+ * MPIDR macros
+ ******************************************************************************/
+#define MPIDR_CPU_MASK		MPIDR_AFFLVL_MASK
+#define MPIDR_CLUSTER_MASK	(MPIDR_AFFLVL_MASK << MPIDR_AFFINITY_BITS)
+#define MPIDR_AFFINITY_BITS	8
+#define MPIDR_AFFLVL_MASK	0xff
+#define MPIDR_AFFLVL_SHIFT	3
+#define MPIDR_AFF0_SHIFT	0
+#define MPIDR_AFF1_SHIFT	8
+#define MPIDR_AFF2_SHIFT	16
+#define MPIDR_AFFINITY_MASK	0x00ffffff
+#define MPIDR_AFFLVL0		0
+#define MPIDR_AFFLVL1		1
+#define MPIDR_AFFLVL2		2
+
+#define MPIDR_AFFLVL0_VAL(mpidr) \
+		(((mpidr) >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL1_VAL(mpidr) \
+		(((mpidr) >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL2_VAL(mpidr) \
+		(((mpidr) >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK)
+
+/*
+ * The MPIDR_MAX_AFFLVL count starts from 0. Take care to
+ * add one while using this macro to define array sizes.
+ */
+#define MPIDR_MAX_AFFLVL	2
+
+/* Data Cache set/way op type defines */
+#define DC_OP_ISW			0x0
+#define DC_OP_CISW			0x1
+#define DC_OP_CSW			0x2
+
+/*******************************************************************************
+ * Generic timer memory mapped registers & offsets
+ ******************************************************************************/
+#define CNTCR_OFF			0x000
+#define CNTFID_OFF			0x020
+
+#define CNTCR_EN			(1 << 0)
+#define CNTCR_HDBG			(1 << 1)
+#define CNTCR_FCREQ(x)			((x) << 8)
+
+/*******************************************************************************
+ * System register bit definitions
+ ******************************************************************************/
+/* CLIDR definitions */
+#define LOUIS_SHIFT		21
+#define LOC_SHIFT		24
+#define CLIDR_FIELD_WIDTH	3
+
+/* CSSELR definitions */
+#define LEVEL_SHIFT		1
+
+/* ID_PFR1 definitions */
+#define ID_PFR1_VIRTEXT_SHIFT	12
+#define ID_PFR1_VIRTEXT_MASK	0xf
+#define GET_VIRT_EXT(id)	(((id) >> ID_PFR1_VIRTEXT_SHIFT) \
+				 & ID_PFR1_VIRTEXT_MASK)
+#define ID_PFR1_GIC_SHIFT	28
+#define ID_PFR1_GIC_MASK	0xf
+
+/* SCTLR definitions */
+#define SCTLR_RES1	((1 << 23) | (1 << 22) | (1 << 11) | (1 << 4) | \
+			(1 << 3) | SCTLR_CP15BEN_BIT | SCTLR_NTWI_BIT | SCTLR_NTWE_BIT)
+#define SCTLR_M_BIT		(1 << 0)
+#define SCTLR_A_BIT		(1 << 1)
+#define SCTLR_C_BIT		(1 << 2)
+#define SCTLR_CP15BEN_BIT	(1 << 5)
+#define SCTLR_ITD_BIT		(1 << 7)
+#define SCTLR_I_BIT		(1 << 12)
+#define SCTLR_V_BIT		(1 << 13)
+#define SCTLR_NTWI_BIT		(1 << 16)
+#define SCTLR_NTWE_BIT		(1 << 18)
+#define SCTLR_WXN_BIT		(1 << 19)
+#define SCTLR_UWXN_BIT		(1 << 20)
+#define SCTLR_EE_BIT		(1 << 25)
+#define SCTLR_TRE_BIT		(1 << 28)
+#define SCTLR_AFE_BIT		(1 << 29)
+#define SCTLR_TE_BIT		(1 << 30)
+
+/* HSCTLR definitions */
+#define HSCTLR_RES1 	((1 << 29) | (1 << 28) | (1 << 23) | (1 << 22)	\
+			| (1 << 18) | (1 << 16) | (1 << 11) | (1 << 4)	\
+			| (1 << 3) | HSCTLR_CP15BEN_BIT)
+#define HSCTLR_M_BIT		(1 << 0)
+#define HSCTLR_A_BIT		(1 << 1)
+#define HSCTLR_C_BIT		(1 << 2)
+#define HSCTLR_CP15BEN_BIT	(1 << 5)
+#define HSCTLR_ITD_BIT		(1 << 7)
+#define HSCTLR_SED_BIT		(1 << 8)
+#define HSCTLR_I_BIT		(1 << 12)
+#define HSCTLR_WXN_BIT		(1 << 19)
+#define HSCTLR_EE_BIT		(1 << 25)
+#define HSCTLR_TE_BIT		(1 << 30)
+
+/* CPACR definitions */
+#define CPACR_FPEN(x)	((x) << 20)
+#define CPACR_FP_TRAP_PL0	0x1
+#define CPACR_FP_TRAP_ALL	0x2
+#define CPACR_FP_TRAP_NONE	0x3
+
+/* SCR definitions */
+#define SCR_TWE_BIT		(1 << 13)
+#define SCR_TWI_BIT		(1 << 12)
+#define SCR_SIF_BIT		(1 << 9)
+#define SCR_HCE_BIT		(1 << 8)
+#define SCR_SCD_BIT		(1 << 7)
+#define SCR_NET_BIT		(1 << 6)
+#define SCR_AW_BIT		(1 << 5)
+#define SCR_FW_BIT		(1 << 4)
+#define SCR_EA_BIT		(1 << 3)
+#define SCR_FIQ_BIT		(1 << 2)
+#define SCR_IRQ_BIT		(1 << 1)
+#define SCR_NS_BIT		(1 << 0)
+#define SCR_VALID_BIT_MASK	0x33ff
+
+#define GET_NS_BIT(scr)		((scr) & SCR_NS_BIT)
+
+/* HCR definitions */
+#define HCR_AMO_BIT		(1 << 5)
+#define HCR_IMO_BIT		(1 << 4)
+#define HCR_FMO_BIT		(1 << 3)
+
+/* CNTHCTL definitions */
+#define EVNTEN_BIT		(1 << 2)
+#define PL1PCEN_BIT		(1 << 1)
+#define PL1PCTEN_BIT		(1 << 0)
+
+/* CNTKCTL definitions */
+#define PL0PTEN_BIT		(1 << 9)
+#define PL0VTEN_BIT		(1 << 8)
+#define PL0PCTEN_BIT		(1 << 0)
+#define PL0VCTEN_BIT		(1 << 1)
+#define EVNTEN_BIT		(1 << 2)
+#define EVNTDIR_BIT		(1 << 3)
+#define EVNTI_SHIFT		4
+#define EVNTI_MASK		0xf
+
+/* HCPTR definitions */
+#define TCPAC_BIT		(1 << 31)
+#define TTA_BIT			(1 << 20)
+#define TCP11_BIT		(1 << 10)
+#define TCP10_BIT		(1 << 10)
+
+/* NASCR definitions */
+#define NSASEDIS_BIT		(1 << 15)
+#define NASCR_CP11_BIT		(1 << 11)
+#define NASCR_CP10_BIT		(1 << 10)
+
+/* CPACR definitions */
+#define ASEDIS_BIT		(1 << 31)
+#define TRCDIS_BIT		(1 << 28)
+#define CPACR_CP11_SHIFT	22
+#define CPACR_CP10_SHIFT	20
+#define CPACR_ENABLE_FP_ACCESS	(0x3 << CPACR_CP11_SHIFT |\
+					0x3 << CPACR_CP10_SHIFT)
+
+/* FPEXC definitions */
+#define FPEXC_EN_BIT		(1 << 30)
+
+/* SPSR/CPSR definitions */
+#define SPSR_FIQ_BIT		(1 << 0)
+#define SPSR_IRQ_BIT		(1 << 1)
+#define SPSR_ABT_BIT		(1 << 2)
+#define SPSR_AIF_SHIFT		6
+#define SPSR_AIF_MASK		0x7
+
+#define SPSR_E_SHIFT		9
+#define SPSR_E_MASK		0x1
+#define SPSR_E_LITTLE		0
+#define SPSR_E_BIG		1
+
+#define SPSR_T_SHIFT		5
+#define SPSR_T_MASK		0x1
+#define SPSR_T_ARM		0
+#define SPSR_T_THUMB		1
+
+#define SPSR_MODE_SHIFT		0
+#define SPSR_MODE_MASK		0x7
+
+
+#define DISABLE_ALL_EXCEPTIONS \
+		(SPSR_FIQ_BIT | SPSR_IRQ_BIT | SPSR_ABT_BIT)
+
+/*
+ * TTBCR definitions
+ */
+/* The ARM Trusted Firmware uses the long descriptor format */
+#define TTBCR_EAE_BIT		(1 << 31)
+
+#define TTBCR_SH1_NON_SHAREABLE		(0x0 << 28)
+#define TTBCR_SH1_OUTER_SHAREABLE	(0x2 << 28)
+#define TTBCR_SH1_INNER_SHAREABLE	(0x3 << 28)
+
+#define TTBCR_RGN1_OUTER_NC	(0x0 << 26)
+#define TTBCR_RGN1_OUTER_WBA	(0x1 << 26)
+#define TTBCR_RGN1_OUTER_WT	(0x2 << 26)
+#define TTBCR_RGN1_OUTER_WBNA	(0x3 << 26)
+
+#define TTBCR_RGN1_INNER_NC	(0x0 << 24)
+#define TTBCR_RGN1_INNER_WBA	(0x1 << 24)
+#define TTBCR_RGN1_INNER_WT	(0x2 << 24)
+#define TTBCR_RGN1_INNER_WBNA	(0x3 << 24)
+
+#define TTBCR_EPD1_BIT		(1 << 23)
+#define TTBCR_A1_BIT		(1 << 22)
+
+#define TTBCR_T1SZ_SHIFT	16
+#define TTBCR_T1SZ_MASK		(0x7)
+
+#define TTBCR_SH0_NON_SHAREABLE		(0x0 << 12)
+#define TTBCR_SH0_OUTER_SHAREABLE	(0x2 << 12)
+#define TTBCR_SH0_INNER_SHAREABLE	(0x3 << 12)
+
+#define TTBCR_RGN0_OUTER_NC	(0x0 << 10)
+#define TTBCR_RGN0_OUTER_WBA	(0x1 << 10)
+#define TTBCR_RGN0_OUTER_WT	(0x2 << 10)
+#define TTBCR_RGN0_OUTER_WBNA	(0x3 << 10)
+
+#define TTBCR_RGN0_INNER_NC	(0x0 << 8)
+#define TTBCR_RGN0_INNER_WBA	(0x1 << 8)
+#define TTBCR_RGN0_INNER_WT	(0x2 << 8)
+#define TTBCR_RGN0_INNER_WBNA	(0x3 << 8)
+
+#define TTBCR_EPD0_BIT		(1 << 7)
+#define TTBCR_T0SZ_SHIFT	0
+#define TTBCR_T0SZ_MASK		(0x7)
+
+#define MODE_RW_SHIFT		0x4
+#define MODE_RW_MASK		0x1
+#define MODE_RW_32		0x1
+
+#define MODE32_SHIFT		0
+#define MODE32_MASK		0x1f
+#define MODE32_usr		0x10
+#define MODE32_fiq		0x11
+#define MODE32_irq		0x12
+#define MODE32_svc		0x13
+#define MODE32_mon		0x16
+#define MODE32_abt		0x17
+#define MODE32_hyp		0x1a
+#define MODE32_und		0x1b
+#define MODE32_sys		0x1f
+
+#define GET_M32(mode)		(((mode) >> MODE32_SHIFT) & MODE32_MASK)
+
+#define SPSR_MODE32(mode, isa, endian, aif)		\
+	(MODE_RW_32 << MODE_RW_SHIFT |			\
+	((mode) & MODE32_MASK) << MODE32_SHIFT |	\
+	((isa) & SPSR_T_MASK) << SPSR_T_SHIFT |		\
+	((endian) & SPSR_E_MASK) << SPSR_E_SHIFT |	\
+	((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT)
+
+/*
+ * CTR definitions
+ */
+#define CTR_CWG_SHIFT		24
+#define CTR_CWG_MASK		0xf
+#define CTR_ERG_SHIFT		20
+#define CTR_ERG_MASK		0xf
+#define CTR_DMINLINE_SHIFT	16
+#define CTR_DMINLINE_WIDTH	4
+#define CTR_DMINLINE_MASK	((1 << 4) - 1)
+#define CTR_L1IP_SHIFT		14
+#define CTR_L1IP_MASK		0x3
+#define CTR_IMINLINE_SHIFT	0
+#define CTR_IMINLINE_MASK	0xf
+
+#define MAX_CACHE_LINE_SIZE	0x800 /* 2KB */
+
+/*******************************************************************************
+ * Definitions of register offsets and fields in the CNTCTLBase Frame of the
+ * system level implementation of the Generic Timer.
+ ******************************************************************************/
+#define CNTNSAR			0x4
+#define CNTNSAR_NS_SHIFT(x)	(x)
+
+#define CNTACR_BASE(x)		(0x40 + ((x) << 2))
+#define CNTACR_RPCT_SHIFT	0x0
+#define CNTACR_RVCT_SHIFT	0x1
+#define CNTACR_RFRQ_SHIFT	0x2
+#define CNTACR_RVOFF_SHIFT	0x3
+#define CNTACR_RWVT_SHIFT	0x4
+#define CNTACR_RWPT_SHIFT	0x5
+
+/* MAIR macros */
+#define MAIR0_ATTR_SET(attr, index)	((attr) << ((index) << 3))
+#define MAIR1_ATTR_SET(attr, index)	((attr) << (((index) - 3) << 3))
+
+/* System register defines The format is: coproc, opt1, CRn, CRm, opt2 */
+#define SCR		p15, 0, c1, c1, 0
+#define SCTLR		p15, 0, c1, c0, 0
+#define MPIDR		p15, 0, c0, c0, 5
+#define MIDR		p15, 0, c0, c0, 0
+#define VBAR		p15, 0, c12, c0, 0
+#define MVBAR		p15, 0, c12, c0, 1
+#define NSACR		p15, 0, c1, c1, 2
+#define CPACR		p15, 0, c1, c0, 2
+#define DCCIMVAC	p15, 0, c7, c14, 1
+#define DCCMVAC		p15, 0, c7, c10, 1
+#define DCIMVAC		p15, 0, c7, c6, 1
+#define DCCISW		p15, 0, c7, c14, 2
+#define DCCSW		p15, 0, c7, c10, 2
+#define DCISW		p15, 0, c7, c6, 2
+#define CTR		p15, 0, c0, c0, 1
+#define CNTFRQ		p15, 0, c14, c0, 0
+#define ID_PFR1		p15, 0, c0, c1, 1
+#define MAIR0		p15, 0, c10, c2, 0
+#define MAIR1		p15, 0, c10, c2, 1
+#define TTBCR		p15, 0, c2, c0, 2
+#define TTBR0		p15, 0, c2, c0, 0
+#define TTBR1		p15, 0, c2, c0, 1
+#define TLBIALL		p15, 0, c8, c7, 0
+#define TLBIALLIS	p15, 0, c8, c3, 0
+#define TLBIMVA		p15, 0, c8, c7, 1
+#define TLBIMVAA	p15, 0, c8, c7, 3
+#define HSCTLR		p15, 4, c1, c0, 0
+#define HCR		p15, 4, c1, c1, 0
+#define HCPTR		p15, 4, c1, c1, 2
+#define CNTHCTL		p15, 4, c14, c1, 0
+#define VPIDR		p15, 4, c0, c0, 0
+#define VMPIDR		p15, 4, c0, c0, 5
+#define ISR		p15, 0, c12, c1, 0
+#define CLIDR		p15, 1, c0, c0, 1
+#define CSSELR		p15, 2, c0, c0, 0
+#define CCSIDR		p15, 1, c0, c0, 0
+
+/* GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRn, CRm, opt2 */
+#define ICC_IAR1	p15, 0, c12, c12, 0
+#define ICC_IAR0	p15, 0, c12, c8, 0
+#define ICC_EOIR1	p15, 0, c12, c12, 1
+#define ICC_EOIR0	p15, 0, c12, c8, 1
+#define ICC_HPPIR1	p15, 0, c12, c12, 2
+#define ICC_HPPIR0	p15, 0, c12, c8, 2
+#define ICC_BPR1	p15, 0, c12, c12, 3
+#define ICC_BPR0	p15, 0, c12, c8, 3
+#define ICC_DIR		p15, 0, c12, c11, 1
+#define ICC_PMR		p15, 0, c4, c6, 0
+#define ICC_RPR		p15, 0, c12, c11, 3
+#define ICC_CTLR	p15, 0, c12, c12, 4
+#define ICC_MCTLR	p15, 6, c12, c12, 4
+#define ICC_SRE		p15, 0, c12, c12, 5
+#define ICC_HSRE	p15, 4, c12, c9, 5
+#define ICC_MSRE	p15, 6, c12, c12, 5
+#define ICC_IGRPEN0	p15, 0, c12, c12, 6
+#define ICC_IGRPEN1	p15, 0, c12, c12, 7
+#define ICC_MGRPEN1	p15, 6, c12, c12, 7
+
+/* 64 bit system register defines The format is: coproc, opt1, CRm */
+#define TTBR0_64	p15, 0, c2
+#define TTBR1_64	p15, 1, c2
+#define CNTVOFF_64	p15, 4, c14
+#define VTTBR_64	p15, 6, c2
+#define CNTPCT_64	p15, 0, c14
+
+/* 64 bit GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRm */
+#define ICC_SGI1R_EL1_64	p15, 0, c12
+#define ICC_ASGI1R_EL1_64	p15, 1, c12
+#define ICC_SGI0R_EL1_64	p15, 2, c12
+
+#endif /* __ARCH_H__ */
diff --git a/include/lib/aarch32/arch_helpers.h b/include/lib/aarch32/arch_helpers.h
new file mode 100644
index 0000000..ddf660b
--- /dev/null
+++ b/include/lib/aarch32/arch_helpers.h
@@ -0,0 +1,292 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __ARCH_HELPERS_H__
+#define __ARCH_HELPERS_H__
+
+#include <arch.h>	/* for additional register definitions */
+#include <stdint.h>
+#include <types.h>
+
+/**********************************************************************
+ * Macros which create inline functions to read or write CPU system
+ * registers
+ *********************************************************************/
+
+#define _DEFINE_COPROCR_WRITE_FUNC(_name, coproc, opc1, CRn, CRm, opc2)	\
+static inline void write_## _name(u_register_t v)			\
+{									\
+	__asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+#define _DEFINE_COPROCR_READ_FUNC(_name, coproc, opc1, CRn, CRm, opc2)	\
+static inline u_register_t read_ ## _name(void)				\
+{									\
+	u_register_t v;							\
+	__asm__ volatile ("mrc "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : "=r" (v));\
+	return v;							\
+}
+
+/*
+ *  The undocumented %Q and %R extended asm are used to implemented the below
+ *  64 bit `mrrc` and `mcrr` instructions. It works only on Little Endian
+ *  systems for GCC versions < 4.6. Above GCC 4.6, both Little Endian and
+ *  Big Endian systems generate the right instruction encoding.
+ */
+#if !(__GNUC__ > (4) || __GNUC__ == (4) && __GNUC_MINOR__ >= (6))
+#error "GCC 4.6 or above is required to build AArch32 Trusted Firmware"
+#endif
+
+#define _DEFINE_COPROCR_WRITE_FUNC_64(_name, coproc, opc1, CRm)		\
+static inline void write64_## _name(uint64_t v)				\
+{									\
+	__asm__ volatile ("mcrr "#coproc","#opc1", %Q0, %R0,"#CRm : : "r" (v));\
+}
+
+#define _DEFINE_COPROCR_READ_FUNC_64(_name, coproc, opc1, CRm)		\
+static inline uint64_t read64_## _name(void)				\
+{	uint64_t v;							\
+	__asm__ volatile ("mrrc "#coproc","#opc1", %Q0, %R0,"#CRm : "=r" (v));\
+	return v;							\
+}
+
+#define _DEFINE_SYSREG_READ_FUNC(_name, _reg_name)			\
+static inline u_register_t read_ ## _name(void)				\
+{									\
+	u_register_t v;							\
+	__asm__ volatile ("mrs %0, " #_reg_name : "=r" (v));		\
+	return v;							\
+}
+
+#define _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)			\
+static inline void write_ ## _name(u_register_t v)			\
+{									\
+	__asm__ volatile ("msr " #_reg_name ", %0" : : "r" (v));	\
+}
+
+#define _DEFINE_SYSREG_WRITE_CONST_FUNC(_name, _reg_name)		\
+static inline void write_ ## _name(const u_register_t v)		\
+{									\
+	__asm__ volatile ("msr " #_reg_name ", %0" : : "i" (v));	\
+}
+
+/* Define read function for coproc register */
+#define DEFINE_COPROCR_READ_FUNC(_name, ...) 				\
+	_DEFINE_COPROCR_READ_FUNC(_name, __VA_ARGS__)
+
+/* Define read & write function for coproc register */
+#define DEFINE_COPROCR_RW_FUNCS(_name, ...) 				\
+	_DEFINE_COPROCR_READ_FUNC(_name, __VA_ARGS__)			\
+	_DEFINE_COPROCR_WRITE_FUNC(_name, __VA_ARGS__)
+
+/* Define 64 bit read function for coproc register */
+#define DEFINE_COPROCR_READ_FUNC_64(_name, ...) 			\
+	_DEFINE_COPROCR_READ_FUNC_64(_name, __VA_ARGS__)
+
+/* Define 64 bit read & write function for coproc register */
+#define DEFINE_COPROCR_RW_FUNCS_64(_name, ...) 				\
+	_DEFINE_COPROCR_READ_FUNC_64(_name, __VA_ARGS__)		\
+	_DEFINE_COPROCR_WRITE_FUNC_64(_name, __VA_ARGS__)
+
+/* Define read & write function for system register */
+#define DEFINE_SYSREG_RW_FUNCS(_name)					\
+	_DEFINE_SYSREG_READ_FUNC(_name, _name)				\
+	_DEFINE_SYSREG_WRITE_FUNC(_name, _name)
+
+/**********************************************************************
+ * Macros to create inline functions for tlbi operations
+ *********************************************************************/
+
+#define _DEFINE_TLBIOP_FUNC(_op, coproc, opc1, CRn, CRm, opc2)		\
+static inline void tlbi##_op(void)					\
+{									\
+	u_register_t v = 0;						\
+	__asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+#define _DEFINE_TLBIOP_PARAM_FUNC(_op, coproc, opc1, CRn, CRm, opc2)	\
+static inline void tlbi##_op(u_register_t v)				\
+{									\
+	__asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+/* Define function for simple TLBI operation */
+#define DEFINE_TLBIOP_FUNC(_op, ...)					\
+	_DEFINE_TLBIOP_FUNC(_op, __VA_ARGS__)
+
+/* Define function for TLBI operation with register parameter */
+#define DEFINE_TLBIOP_PARAM_FUNC(_op, ...)				\
+	_DEFINE_TLBIOP_PARAM_FUNC(_op, __VA_ARGS__)
+
+/**********************************************************************
+ * Macros to create inline functions for DC operations
+ *********************************************************************/
+#define _DEFINE_DCOP_PARAM_FUNC(_op, coproc, opc1, CRn, CRm, opc2)	\
+static inline void dc##_op(u_register_t v)				\
+{									\
+	__asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+/* Define function for DC operation with register parameter */
+#define DEFINE_DCOP_PARAM_FUNC(_op, ...)				\
+	_DEFINE_DCOP_PARAM_FUNC(_op, __VA_ARGS__)
+
+/**********************************************************************
+ * Macros to create inline functions for system instructions
+ *********************************************************************/
+ /* Define function for simple system instruction */
+#define DEFINE_SYSOP_FUNC(_op)						\
+static inline void _op(void)						\
+{									\
+	__asm__ (#_op);							\
+}
+
+
+/* Define function for system instruction with type specifier */
+#define DEFINE_SYSOP_TYPE_FUNC(_op, _type)				\
+static inline void _op ## _type(void)					\
+{									\
+	__asm__ (#_op " " #_type);					\
+}
+
+/* Define function for system instruction with register parameter */
+#define DEFINE_SYSOP_TYPE_PARAM_FUNC(_op, _type)			\
+static inline void _op ## _type(u_register_t v)				\
+{									\
+	 __asm__ (#_op " " #_type ", %0" : : "r" (v));			\
+}
+
+void flush_dcache_range(uintptr_t addr, size_t size);
+void clean_dcache_range(uintptr_t addr, size_t size);
+void inv_dcache_range(uintptr_t addr, size_t size);
+
+DEFINE_SYSOP_FUNC(wfi)
+DEFINE_SYSOP_FUNC(wfe)
+DEFINE_SYSOP_FUNC(sev)
+DEFINE_SYSOP_TYPE_FUNC(dsb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
+DEFINE_SYSOP_FUNC(isb)
+
+DEFINE_SYSREG_RW_FUNCS(spsr)
+DEFINE_SYSREG_RW_FUNCS(cpsr)
+
+/*******************************************************************************
+ * System register accessor prototypes
+ ******************************************************************************/
+DEFINE_COPROCR_READ_FUNC(mpidr, MPIDR)
+DEFINE_COPROCR_READ_FUNC(midr, MIDR)
+DEFINE_COPROCR_READ_FUNC(id_pfr1, ID_PFR1)
+DEFINE_COPROCR_READ_FUNC(isr, ISR)
+DEFINE_COPROCR_READ_FUNC(clidr, CLIDR)
+DEFINE_COPROCR_READ_FUNC_64(cntpct, CNTPCT_64)
+
+DEFINE_COPROCR_RW_FUNCS(scr, SCR)
+DEFINE_COPROCR_RW_FUNCS(ctr, CTR)
+DEFINE_COPROCR_RW_FUNCS(sctlr, SCTLR)
+DEFINE_COPROCR_RW_FUNCS(hsctlr, HSCTLR)
+DEFINE_COPROCR_RW_FUNCS(hcr, HCR)
+DEFINE_COPROCR_RW_FUNCS(hcptr, HCPTR)
+DEFINE_COPROCR_RW_FUNCS(cntfrq, CNTFRQ)
+DEFINE_COPROCR_RW_FUNCS(cnthctl, CNTHCTL)
+DEFINE_COPROCR_RW_FUNCS(mair0, MAIR0)
+DEFINE_COPROCR_RW_FUNCS(mair1, MAIR1)
+DEFINE_COPROCR_RW_FUNCS(ttbcr, TTBCR)
+DEFINE_COPROCR_RW_FUNCS(ttbr0, TTBR0)
+DEFINE_COPROCR_RW_FUNCS_64(ttbr0, TTBR0_64)
+DEFINE_COPROCR_RW_FUNCS(ttbr1, TTBR1)
+DEFINE_COPROCR_RW_FUNCS(vpidr, VPIDR)
+DEFINE_COPROCR_RW_FUNCS(vmpidr, VMPIDR)
+DEFINE_COPROCR_RW_FUNCS_64(vttbr, VTTBR_64)
+DEFINE_COPROCR_RW_FUNCS_64(ttbr1, TTBR1_64)
+DEFINE_COPROCR_RW_FUNCS_64(cntvoff, CNTVOFF_64)
+DEFINE_COPROCR_RW_FUNCS(csselr, CSSELR)
+
+DEFINE_COPROCR_RW_FUNCS(icc_sre_el1, ICC_SRE)
+DEFINE_COPROCR_RW_FUNCS(icc_sre_el2, ICC_HSRE)
+DEFINE_COPROCR_RW_FUNCS(icc_sre_el3, ICC_MSRE)
+DEFINE_COPROCR_RW_FUNCS(icc_pmr_el1, ICC_PMR)
+DEFINE_COPROCR_RW_FUNCS(icc_igrpen1_el3, ICC_MGRPEN1)
+DEFINE_COPROCR_RW_FUNCS(icc_igrpen0_el1, ICC_IGRPEN0)
+DEFINE_COPROCR_RW_FUNCS(icc_hppir0_el1, ICC_HPPIR0)
+DEFINE_COPROCR_RW_FUNCS(icc_hppir1_el1, ICC_HPPIR1)
+DEFINE_COPROCR_RW_FUNCS(icc_iar0_el1, ICC_IAR0)
+DEFINE_COPROCR_RW_FUNCS(icc_iar1_el1, ICC_IAR1)
+DEFINE_COPROCR_RW_FUNCS(icc_eoir0_el1, ICC_EOIR0)
+DEFINE_COPROCR_RW_FUNCS(icc_eoir1_el1, ICC_EOIR1)
+
+/*
+ * TLBI operation prototypes
+ */
+DEFINE_TLBIOP_FUNC(all, TLBIALL)
+DEFINE_TLBIOP_FUNC(allis, TLBIALLIS)
+DEFINE_TLBIOP_PARAM_FUNC(mva, TLBIMVA)
+DEFINE_TLBIOP_PARAM_FUNC(mvaa, TLBIMVAA)
+
+/*
+ * DC operation prototypes
+ */
+DEFINE_DCOP_PARAM_FUNC(civac, DCCIMVAC)
+DEFINE_DCOP_PARAM_FUNC(ivac, DCIMVAC)
+DEFINE_DCOP_PARAM_FUNC(cvac, DCCMVAC)
+
+/* Previously defined accessor functions with incomplete register names  */
+#define dsb()			dsbsy()
+
+#define IS_IN_SECURE() \
+	(GET_NS_BIT(read_scr()) == 0)
+
+ /*
+  * If EL3 is AArch32, then secure PL1 and monitor mode correspond to EL3
+  */
+#define IS_IN_EL3() \
+	((GET_M32(read_cpsr()) == MODE32_mon) ||	\
+		(IS_IN_SECURE() && (GET_M32(read_cpsr()) != MODE32_usr)))
+
+/* Macros for compatibility with AArch64 system registers */
+#define read_mpidr_el1()	read_mpidr()
+
+#define read_scr_el3()		read_scr()
+#define write_scr_el3(_v)	write_scr(_v)
+
+#define read_hcr_el2()		read_hcr()
+#define write_hcr_el2(_v)	write_hcr(_v)
+
+#define read_cpacr_el1()	read_cpacr()
+#define write_cpacr_el1(_v)	write_cpacr(_v)
+
+#define read_cntfrq_el0()	read_cntfrq()
+#define write_cntfrq_el0(_v)	write_cntfrq(_v)
+#define read_isr_el1()		read_isr()
+
+#define read_cntpct_el0()	read64_cntpct()
+
+#endif /* __ARCH_HELPERS_H__ */
diff --git a/include/lib/aarch32/smcc_helpers.h b/include/lib/aarch32/smcc_helpers.h
new file mode 100644
index 0000000..5aeca22
--- /dev/null
+++ b/include/lib/aarch32/smcc_helpers.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SMCC_HELPERS_H__
+#define __SMCC_HELPERS_H__
+
+#include <smcc.h>
+
+/* These are offsets to registers in smc_ctx_t */
+#define SMC_CTX_GPREG_R0	0x0
+#define SMC_CTX_GPREG_R1	0x4
+#define SMC_CTX_GPREG_R2	0x8
+#define SMC_CTX_GPREG_R3	0xC
+#define SMC_CTX_GPREG_R4	0x10
+#define SMC_CTX_SP_USR		0x34
+#define SMC_CTX_SPSR_MON	0x78
+#define SMC_CTX_LR_MON		0x7C
+#define SMC_CTX_SIZE		0x80
+
+#ifndef __ASSEMBLY__
+#include <cassert.h>
+#include <types.h>
+
+/*
+ * The generic structure to save arguments and callee saved registers during
+ * an SMC. Also this structure is used to store the result return values after
+ * the completion of SMC service.
+ */
+typedef struct smc_ctx {
+	u_register_t r0;
+	u_register_t r1;
+	u_register_t r2;
+	u_register_t r3;
+	u_register_t r4;
+	u_register_t r5;
+	u_register_t r6;
+	u_register_t r7;
+	u_register_t r8;
+	u_register_t r9;
+	u_register_t r10;
+	u_register_t r11;
+	u_register_t r12;
+	/* spsr_usr doesn't exist */
+	u_register_t sp_usr;
+	u_register_t lr_usr;
+	u_register_t spsr_irq;
+	u_register_t sp_irq;
+	u_register_t lr_irq;
+	u_register_t spsr_fiq;
+	u_register_t sp_fiq;
+	u_register_t lr_fiq;
+	u_register_t spsr_svc;
+	u_register_t sp_svc;
+	u_register_t lr_svc;
+	u_register_t spsr_abt;
+	u_register_t sp_abt;
+	u_register_t lr_abt;
+	u_register_t spsr_und;
+	u_register_t sp_und;
+	u_register_t lr_und;
+	u_register_t spsr_mon;
+	/* No need to save 'sp_mon' because we are already in monitor mode */
+	u_register_t lr_mon;
+} smc_ctx_t;
+
+/*
+ * Compile time assertions related to the 'smc_context' structure to
+ * ensure that the assembler and the compiler view of the offsets of
+ * the structure members is the same.
+ */
+CASSERT(SMC_CTX_GPREG_R0 == __builtin_offsetof(smc_ctx_t, r0), \
+	assert_smc_ctx_greg_r0_offset_mismatch);
+CASSERT(SMC_CTX_GPREG_R1 == __builtin_offsetof(smc_ctx_t, r1), \
+	assert_smc_ctx_greg_r1_offset_mismatch);
+CASSERT(SMC_CTX_GPREG_R2 == __builtin_offsetof(smc_ctx_t, r2), \
+	assert_smc_ctx_greg_r2_offset_mismatch);
+CASSERT(SMC_CTX_GPREG_R3 == __builtin_offsetof(smc_ctx_t, r3), \
+	assert_smc_ctx_greg_r3_offset_mismatch);
+CASSERT(SMC_CTX_GPREG_R4 == __builtin_offsetof(smc_ctx_t, r4), \
+	assert_smc_ctx_greg_r4_offset_mismatch);
+CASSERT(SMC_CTX_SP_USR == __builtin_offsetof(smc_ctx_t, sp_usr), \
+	assert_smc_ctx_sp_usr_offset_mismatch);
+CASSERT(SMC_CTX_LR_MON == __builtin_offsetof(smc_ctx_t, lr_mon), \
+	assert_smc_ctx_lr_mon_offset_mismatch);
+CASSERT(SMC_CTX_SPSR_MON == __builtin_offsetof(smc_ctx_t, spsr_mon), \
+	assert_smc_ctx_spsr_mon_offset_mismatch);
+
+CASSERT(SMC_CTX_SIZE == sizeof(smc_ctx_t), assert_smc_ctx_size_mismatch);
+
+/* Convenience macros to return from SMC handler */
+#define SMC_RET0(_h) {				\
+	return (uintptr_t)(_h);			\
+}
+#define SMC_RET1(_h, _r0) {			\
+	((smc_ctx_t *)(_h))->r0 = (_r0);	\
+	SMC_RET0(_h);				\
+}
+#define SMC_RET2(_h, _r0, _r1) {		\
+	((smc_ctx_t *)(_h))->r1 = (_r1);	\
+	SMC_RET1(_h, (_r0));			\
+}
+#define SMC_RET3(_h, _r0, _r1, _r2) {		\
+	((smc_ctx_t *)(_h))->r2 = (_r2);	\
+	SMC_RET2(_h, (_r0), (_r1));		\
+}
+#define SMC_RET4(_h, _r0, _r1, _r2, _r3) {	\
+	((smc_ctx_t *)(_h))->r3 = (_r3);	\
+	SMC_RET3(_h, (_r0), (_r1), (_r2));	\
+}
+
+/* Return a UUID in the SMC return registers */
+#define SMC_UUID_RET(_h, _uuid) \
+	SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \
+			 ((const uint32_t *) &(_uuid))[1], \
+			 ((const uint32_t *) &(_uuid))[2], \
+			 ((const uint32_t *) &(_uuid))[3])
+
+/*
+ * Helper macro to retrieve the SMC parameters from smc_ctx_t.
+ */
+#define get_smc_params_from_ctx(_hdl, _r1, _r2, _r3, _r4) {	\
+		_r1 = ((smc_ctx_t *)_hdl)->r1;		\
+		_r2 = ((smc_ctx_t *)_hdl)->r2;		\
+		_r3 = ((smc_ctx_t *)_hdl)->r3;		\
+		_r4 = ((smc_ctx_t *)_hdl)->r4;		\
+		}
+
+/* ------------------------------------------------------------------------
+ * Helper APIs for setting and retrieving appropriate `smc_ctx_t`.
+ * These functions need to implemented by the BL including this library.
+ * ------------------------------------------------------------------------
+ */
+
+/* Get the pointer to `smc_ctx_t` corresponding to the security state. */
+void *smc_get_ctx(int security_state);
+
+/* Set the next `smc_ctx_t` corresponding to the security state. */
+void smc_set_next_ctx(int security_state);
+
+/* Get the pointer to next `smc_ctx_t` already set by `smc_set_next_ctx()`. */
+void *smc_get_next_ctx(void);
+
+#endif /*__ASSEMBLY__*/
+#endif /* __SMCC_HELPERS_H__ */
diff --git a/include/lib/aarch32/smcc_macros.S b/include/lib/aarch32/smcc_macros.S
new file mode 100644
index 0000000..c80c3e4
--- /dev/null
+++ b/include/lib/aarch32/smcc_macros.S
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __SMCC_MACROS_S__
+#define __SMCC_MACROS_S__
+
+#include <arch.h>
+
+/*
+ * Macro to save the General purpose registers including the banked
+ * registers to the SMC context on entry due a SMC call. On return, r0
+ * contains the pointer to the `smc_context_t`.
+ */
+	.macro smcc_save_gp_mode_regs
+	push	{r0-r3, lr}
+
+	ldcopr	r0, SCR
+	and	r0, r0, #SCR_NS_BIT
+	bl	smc_get_ctx
+
+	/* Save r4 - r12 in the SMC context */
+	add	r1, r0, #SMC_CTX_GPREG_R4
+	stm	r1!, {r4-r12}
+
+	/*
+	 * Pop r0 - r3, lr to r4 - r7, lr from stack and then save
+	 * it to SMC context.
+	 */
+	pop	{r4-r7, lr}
+	stm	r0, {r4-r7}
+
+	/* Save the banked registers including the current SPSR and LR */
+	mrs	r4, sp_usr
+	mrs	r5, lr_usr
+	mrs	r6, spsr_irq
+	mrs	r7, sp_irq
+	mrs	r8, lr_irq
+	mrs	r9, spsr_fiq
+	mrs	r10, sp_fiq
+	mrs	r11, lr_fiq
+	mrs	r12, spsr_svc
+	stm	r1!, {r4-r12}
+
+	mrs	r4, sp_svc
+	mrs	r5, lr_svc
+	mrs	r6, spsr_abt
+	mrs	r7, sp_abt
+	mrs	r8, lr_abt
+	mrs	r9, spsr_und
+	mrs	r10, sp_und
+	mrs	r11, lr_und
+	mrs	r12, spsr
+	stm	r1!, {r4-r12, lr}
+
+	.endm
+
+/*
+ * Macro to restore the General purpose registers including the banked
+ * registers from the SMC context prior to exit from the SMC call.
+ * r0 must point to the `smc_context_t` to restore from.
+ */
+	.macro smcc_restore_gp_mode_regs
+
+	/* Restore the banked registers including the current SPSR and LR */
+	add	r1, r0, #SMC_CTX_SP_USR
+	ldm	r1!, {r4-r12}
+	msr	sp_usr, r4
+	msr	lr_usr, r5
+	msr	spsr_irq, r6
+	msr	sp_irq, r7
+	msr	lr_irq, r8
+	msr	spsr_fiq, r9
+	msr	sp_fiq, r10
+	msr	lr_fiq, r11
+	msr	spsr_svc, r12
+
+	ldm	r1!, {r4-r12, lr}
+	msr	sp_svc, r4
+	msr	lr_svc, r5
+	msr	spsr_abt, r6
+	msr	sp_abt, r7
+	msr	lr_abt, r8
+	msr	spsr_und, r9
+	msr	sp_und, r10
+	msr	lr_und, r11
+	msr	spsr, r12
+
+	/* Restore the rest of the general purpose registers */
+	ldm	r0, {r0-r12}
+	.endm
+
+#endif /* __SMCC_MACROS_S__ */
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
index 07bbd89..fa5cb12 100644
--- a/include/lib/aarch64/arch.h
+++ b/include/lib/aarch64/arch.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -334,8 +334,6 @@
 #define CTR_IMINLINE_MASK	0xf
 
 #define MAX_CACHE_LINE_SIZE	0x800 /* 2KB */
-#define SIZE_FROM_LOG2_WORDS(n)	(4 << (n))
-
 
 /* Physical timer control register bit fields shifts and masks */
 #define CNTP_CTL_ENABLE_SHIFT   0
diff --git a/include/lib/aarch64/smcc_helpers.h b/include/lib/aarch64/smcc_helpers.h
index 617a5bc..6e63383 100644
--- a/include/lib/aarch64/smcc_helpers.h
+++ b/include/lib/aarch64/smcc_helpers.h
@@ -82,5 +82,17 @@
 			 ((const uint32_t *) &(_uuid))[2],	\
 			 ((const uint32_t *) &(_uuid))[3])
 
+/*
+ * Helper macro to retrieve the SMC parameters from cpu_context_t.
+ */
+#define get_smc_params_from_ctx(_hdl, _x1, _x2, _x3, _x4)	\
+	do {							\
+		const gp_regs_t *regs = get_gpregs_ctx(_hdl);	\
+		_x1 = read_ctx_reg(regs, CTX_GPREG_X1);		\
+		_x2 = read_ctx_reg(regs, CTX_GPREG_X2);		\
+		_x3 = read_ctx_reg(regs, CTX_GPREG_X3);		\
+		_x4 = read_ctx_reg(regs, CTX_GPREG_X4);		\
+	} while (0)
+
 #endif /*__ASSEMBLY__*/
 #endif /* __SMCC_HELPERS_H__ */
diff --git a/include/lib/cpus/aarch32/aem_generic.h b/include/lib/cpus/aarch32/aem_generic.h
new file mode 100644
index 0000000..9b31367
--- /dev/null
+++ b/include/lib/cpus/aarch32/aem_generic.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __AEM_GENERIC_H__
+#define __AEM_GENERIC_H__
+
+/* BASE AEM midr for revision 0 */
+#define BASE_AEM_MIDR 0x410FD0F0
+
+#endif /* __AEM_GENERIC_H__ */
diff --git a/include/lib/cpus/aarch32/cpu_macros.S b/include/lib/cpus/aarch32/cpu_macros.S
new file mode 100644
index 0000000..f58f3e9
--- /dev/null
+++ b/include/lib/cpus/aarch32/cpu_macros.S
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __CPU_MACROS_S__
+#define __CPU_MACROS_S__
+
+#include <arch.h>
+
+#define CPU_IMPL_PN_MASK	(MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \
+				(MIDR_PN_MASK << MIDR_PN_SHIFT)
+
+	/*
+	 * Define the offsets to the fields in cpu_ops structure.
+	 */
+	.struct 0
+CPU_MIDR: /* cpu_ops midr */
+	.space  4
+/* Reset fn is needed during reset */
+CPU_RESET_FUNC: /* cpu_ops reset_func */
+	.space  4
+CPU_PWR_DWN_CORE: /* cpu_ops core_pwr_dwn */
+	.space  4
+CPU_PWR_DWN_CLUSTER: /* cpu_ops cluster_pwr_dwn */
+	.space  4
+CPU_OPS_SIZE = .
+
+	/*
+	 * Convenience macro to declare cpu_ops structure.
+	 * Make sure the structure fields are as per the offsets
+	 * defined above.
+	 */
+	.macro declare_cpu_ops _name:req, _midr:req, _noresetfunc = 0
+	.section cpu_ops, "a"
+	.align 2
+	.type cpu_ops_\_name, %object
+	.word \_midr
+	.if \_noresetfunc
+	.word 0
+	.else
+	.word \_name\()_reset_func
+	.endif
+	.word \_name\()_core_pwr_dwn
+	.word \_name\()_cluster_pwr_dwn
+	.endm
+
+#endif /* __CPU_MACROS_S__ */
diff --git a/include/lib/el3_runtime/aarch32/context.h b/include/lib/el3_runtime/aarch32/context.h
new file mode 100644
index 0000000..5108141
--- /dev/null
+++ b/include/lib/el3_runtime/aarch32/context.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CONTEXT_H__
+#define __CONTEXT_H__
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'regs'
+ * structure at their correct offsets.
+ ******************************************************************************/
+#define CTX_REGS_OFFSET		0x0
+#define CTX_GPREG_R0		0x0
+#define CTX_GPREG_R1		0x4
+#define CTX_GPREG_R2		0x8
+#define CTX_GPREG_R3		0xC
+#define CTX_LR			0x10
+#define CTX_SCR			0x14
+#define CTX_SPSR		0x18
+#define CTX_NS_SCTLR		0x1C
+#define CTX_REGS_END		0x20
+
+#ifndef __ASSEMBLY__
+
+#include <cassert.h>
+#include <stdint.h>
+
+/*
+ * Common constants to help define the 'cpu_context' structure and its
+ * members below.
+ */
+#define WORD_SHIFT		2
+#define DEFINE_REG_STRUCT(name, num_regs)	\
+	typedef struct name {			\
+		uint32_t _regs[num_regs];	\
+	}  __aligned(8) name##_t
+
+/* Constants to determine the size of individual context structures */
+#define CTX_REG_ALL		(CTX_REGS_END >> WORD_SHIFT)
+
+DEFINE_REG_STRUCT(regs, CTX_REG_ALL);
+
+#undef CTX_REG_ALL
+
+#define read_ctx_reg(ctx, offset)	((ctx)->_regs[offset >> WORD_SHIFT])
+#define write_ctx_reg(ctx, offset, val)	(((ctx)->_regs[offset >> WORD_SHIFT]) \
+					 = val)
+typedef struct cpu_context {
+	regs_t regs_ctx;
+} cpu_context_t;
+
+/* Macros to access members of the 'cpu_context_t' structure */
+#define get_regs_ctx(h)		(&((cpu_context_t *) h)->regs_ctx)
+
+/*
+ * Compile time assertions related to the 'cpu_context' structure to
+ * ensure that the assembler and the compiler view of the offsets of
+ * the structure members is the same.
+ */
+CASSERT(CTX_REGS_OFFSET == __builtin_offsetof(cpu_context_t, regs_ctx), \
+	assert_core_context_regs_offset_mismatch);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __CONTEXT_H__ */
diff --git a/include/lib/el3_runtime/context_mgmt.h b/include/lib/el3_runtime/context_mgmt.h
index 672ea11..b264fc3 100644
--- a/include/lib/el3_runtime/context_mgmt.h
+++ b/include/lib/el3_runtime/context_mgmt.h
@@ -42,11 +42,6 @@
  * Function & variable prototypes
  ******************************************************************************/
 void cm_init(void);
-void *cm_get_context_by_mpidr(uint64_t mpidr,
-			      uint32_t security_state) __deprecated;
-void cm_set_context_by_mpidr(uint64_t mpidr,
-			     void *context,
-			     uint32_t security_state) __deprecated;
 void *cm_get_context_by_index(unsigned int cpu_idx,
 			      unsigned int security_state);
 void cm_set_context_by_index(unsigned int cpu_idx,
@@ -54,12 +49,12 @@
 			     unsigned int security_state);
 void *cm_get_context(uint32_t security_state);
 void cm_set_context(void *context, uint32_t security_state);
-void cm_init_context(uint64_t mpidr,
-		     const struct entry_point_info *ep) __deprecated;
 void cm_init_my_context(const struct entry_point_info *ep);
 void cm_init_context_by_index(unsigned int cpu_idx,
 			      const struct entry_point_info *ep);
 void cm_prepare_el3_exit(uint32_t security_state);
+
+#ifndef AARCH32
 void cm_el1_sysregs_context_save(uint32_t security_state);
 void cm_el1_sysregs_context_restore(uint32_t security_state);
 void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint);
@@ -71,6 +66,16 @@
 void cm_set_next_eret_context(uint32_t security_state);
 uint32_t cm_get_scr_el3(uint32_t security_state);
 
+
+void cm_init_context(uint64_t mpidr,
+		     const struct entry_point_info *ep) __deprecated;
+
+void *cm_get_context_by_mpidr(uint64_t mpidr,
+			      uint32_t security_state) __deprecated;
+void cm_set_context_by_mpidr(uint64_t mpidr,
+			     void *context,
+			     uint32_t security_state) __deprecated;
+
 /* Inline definitions */
 
 /*******************************************************************************
@@ -98,4 +103,5 @@
 			 "msr	spsel, #0\n"
 			 : : "r" (context));
 }
+#endif /* AARCH32 */
 #endif /* __CM_H__ */
diff --git a/include/lib/el3_runtime/cpu_data.h b/include/lib/el3_runtime/cpu_data.h
index 4fc801b..910b153 100644
--- a/include/lib/el3_runtime/cpu_data.h
+++ b/include/lib/el3_runtime/cpu_data.h
@@ -31,16 +31,28 @@
 #ifndef __CPU_DATA_H__
 #define __CPU_DATA_H__
 
+#ifdef AARCH32
+
+#if CRASH_REPORTING
+#error "Crash reporting is not supported in AArch32"
+#endif
+#define CPU_DATA_CPU_OPS_PTR		0x0
+
+#else /* AARCH32 */
+
 /* Offsets for the cpu_data structure */
 #define CPU_DATA_CRASH_BUF_OFFSET	0x18
+/* need enough space in crash buffer to save 8 registers */
+#define CPU_DATA_CRASH_BUF_SIZE		64
+#define CPU_DATA_CPU_OPS_PTR		0x10
+
+#endif /* AARCH32 */
+
 #if CRASH_REPORTING
 #define CPU_DATA_LOG2SIZE		7
 #else
 #define CPU_DATA_LOG2SIZE		6
 #endif
-/* need enough space in crash buffer to save 8 registers */
-#define CPU_DATA_CRASH_BUF_SIZE		64
-#define CPU_DATA_CPU_OPS_PTR		0x10
 
 #ifndef __ASSEMBLY__
 
@@ -77,7 +89,9 @@
  * used for this.
  ******************************************************************************/
 typedef struct cpu_data {
+#ifndef AARCH32
 	void *cpu_context[2];
+#endif
 	uintptr_t cpu_ops_ptr;
 #if CRASH_REPORTING
 	u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3];
@@ -104,12 +118,15 @@
 
 struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
 
+#ifndef AARCH32
 /* Return the cpu_data structure for the current CPU. */
 static inline struct cpu_data *_cpu_data(void)
 {
 	return (cpu_data_t *)read_tpidr_el3();
 }
-
+#else
+struct cpu_data *_cpu_data(void);
+#endif
 
 /**************************************************************************
  * APIs for initialising and accessing per-cpu data
diff --git a/include/lib/psci/psci.h b/include/lib/psci/psci.h
index c3e9ef7..a583fef 100644
--- a/include/lib/psci/psci.h
+++ b/include/lib/psci/psci.h
@@ -359,6 +359,8 @@
 int psci_setup(uintptr_t mailbox_ep);
 void psci_warmboot_entrypoint(void);
 void psci_register_spd_pm_hook(const spd_pm_ops_t *pm);
+void psci_prepare_next_non_secure_ctx(
+			  struct entry_point_info *next_image_info);
 
 #endif /*__ASSEMBLY__*/
 
diff --git a/include/lib/stdlib/machine/_types.h b/include/lib/stdlib/machine/_types.h
index 7e993c4..fb1083b 100644
--- a/include/lib/stdlib/machine/_types.h
+++ b/include/lib/stdlib/machine/_types.h
@@ -31,6 +31,10 @@
  *	From: @(#)types.h	8.3 (Berkeley) 1/5/94
  * $FreeBSD$
  */
+/*
+ * Portions copyright (c) 2016, ARM Limited and Contributors.
+ * All rights reserved.
+ */
 
 #ifndef _MACHINE__TYPES_H_
 #define	_MACHINE__TYPES_H_
@@ -48,19 +52,56 @@
 typedef	unsigned short		__uint16_t;
 typedef	int			__int32_t;
 typedef	unsigned int		__uint32_t;
+
+
+/*
+ * Standard type definitions which are different in AArch64 and AArch32
+ */
+#ifdef	AARCH32
+typedef	long long		__int64_t;
+typedef	unsigned long long	__uint64_t;
+typedef	__int32_t	__critical_t;
+typedef	__int32_t	__intfptr_t;
+typedef	__int32_t	__intptr_t;
+typedef	__int32_t	__ptrdiff_t;		/* ptr1 - ptr2 */
+typedef	__int32_t	__register_t;
+typedef	__int32_t	__segsz_t;		/* segment size (in pages) */
+typedef	__uint32_t	__size_t;		/* sizeof() */
+typedef	__int32_t	__ssize_t;		/* byte count or error */
+typedef	__uint32_t	__uintfptr_t;
+typedef	__uint32_t	__uintptr_t;
+typedef	__uint32_t	__u_register_t;
+typedef	__uint32_t	__vm_offset_t;
+typedef	__uint32_t	__vm_paddr_t;
+typedef	__uint32_t	__vm_size_t;
+#elif defined AARCH64
 typedef	long			__int64_t;
 typedef	unsigned long		__uint64_t;
+typedef	__int64_t	__critical_t;
+typedef	__int64_t	__intfptr_t;
+typedef	__int64_t	__intptr_t;
+typedef	__int64_t	__ptrdiff_t;		/* ptr1 - ptr2 */
+typedef	__int64_t	__register_t;
+typedef	__int64_t	__segsz_t;		/* segment size (in pages) */
+typedef	__uint64_t	__size_t;		/* sizeof() */
+typedef	__int64_t	__ssize_t;		/* byte count or error */
+typedef	__uint64_t	__uintfptr_t;
+typedef	__uint64_t	__uintptr_t;
+typedef	__uint64_t	__u_register_t;
+typedef	__uint64_t	__vm_offset_t;
+typedef	__uint64_t	__vm_paddr_t;
+typedef	__uint64_t	__vm_size_t;
+#else
+#error "Only AArch32 or AArch64 supported"
+#endif /* AARCH32 */
 
 /*
  * Standard type definitions.
  */
 typedef	__int32_t	__clock_t;		/* clock()... */
-typedef	__int64_t	__critical_t;
 typedef	double		__double_t;
 typedef	float		__float_t;
-typedef	__int64_t	__intfptr_t;
 typedef	__int64_t	__intmax_t;
-typedef	__int64_t	__intptr_t;
 typedef	__int32_t	__int_fast8_t;
 typedef	__int32_t	__int_fast16_t;
 typedef	__int32_t	__int_fast32_t;
@@ -69,15 +110,8 @@
 typedef	__int16_t	__int_least16_t;
 typedef	__int32_t	__int_least32_t;
 typedef	__int64_t	__int_least64_t;
-typedef	__int64_t	__ptrdiff_t;		/* ptr1 - ptr2 */
-typedef	__int64_t	__register_t;
-typedef	__int64_t	__segsz_t;		/* segment size (in pages) */
-typedef	__uint64_t	__size_t;		/* sizeof() */
-typedef	__int64_t	__ssize_t;		/* byte count or error */
 typedef	__int64_t	__time_t;		/* time()... */
-typedef	__uint64_t	__uintfptr_t;
 typedef	__uint64_t	__uintmax_t;
-typedef	__uint64_t	__uintptr_t;
 typedef	__uint32_t	__uint_fast8_t;
 typedef	__uint32_t	__uint_fast16_t;
 typedef	__uint32_t	__uint_fast32_t;
@@ -86,12 +120,8 @@
 typedef	__uint16_t	__uint_least16_t;
 typedef	__uint32_t	__uint_least32_t;
 typedef	__uint64_t	__uint_least64_t;
-typedef	__uint64_t	__u_register_t;
-typedef	__uint64_t	__vm_offset_t;
 typedef	__int64_t	__vm_ooffset_t;
-typedef	__uint64_t	__vm_paddr_t;
 typedef	__uint64_t	__vm_pindex_t;
-typedef	__uint64_t	__vm_size_t;
 
 /*
  * Unusual type definitions.
diff --git a/include/lib/utils.h b/include/lib/utils.h
index 0936cbb..a234e3c 100644
--- a/include/lib/utils.h
+++ b/include/lib/utils.h
@@ -38,6 +38,8 @@
 #define IS_POWER_OF_TWO(x)			\
 	(((x) & ((x) - 1)) == 0)
 
+#define SIZE_FROM_LOG2_WORDS(n)		(4 << (n))
+
 /*
  * The round_up() macro rounds up a value to the given boundary in a
  * type-agnostic yet type-safe manner. The boundary must be a power of two.
diff --git a/include/lib/xlat_tables.h b/include/lib/xlat_tables.h
index b51a1de..3f35e45 100644
--- a/include/lib/xlat_tables.h
+++ b/include/lib/xlat_tables.h
@@ -188,9 +188,14 @@
 				size_t size, unsigned int attr);
 void mmap_add(const mmap_region_t *mm);
 
+#ifdef AARCH32
+/* AArch32 specific translation table API */
+void enable_mmu_secure(uint32_t flags);
+#else
 /* AArch64 specific translation table APIs */
 void enable_mmu_el1(unsigned int flags);
 void enable_mmu_el3(unsigned int flags);
+#endif /* AARCH32 */
 
 #endif /*__ASSEMBLY__*/
 #endif /* __XLAT_TABLES_H__ */
diff --git a/include/plat/arm/common/arm_def.h b/include/plat/arm/common/arm_def.h
index 0b3e66b..4a4dfd4 100644
--- a/include/plat/arm/common/arm_def.h
+++ b/include/plat/arm/common/arm_def.h
@@ -321,9 +321,12 @@
 # error "Unsupported ARM_TSP_RAM_LOCATION_ID value"
 #endif
 
+/* BL32 is mandatory in AArch32 */
+#ifndef AARCH32
 #ifdef SPD_none
 #undef BL32_BASE
 #endif /* SPD_none */
+#endif
 
 /*******************************************************************************
  * FWU Images: NS_BL1U, BL2U & NS_BL2U defines.
diff --git a/include/plat/arm/common/plat_arm.h b/include/plat/arm/common/plat_arm.h
index 0ffdb5c..25aab24 100644
--- a/include/plat/arm/common/plat_arm.h
+++ b/include/plat/arm/common/plat_arm.h
@@ -167,6 +167,9 @@
 /* TSP utility functions */
 void arm_tsp_early_platform_setup(void);
 
+/* SP_MIN utility functions */
+void arm_sp_min_early_platform_setup(void);
+
 /* FIP TOC validity check */
 int arm_io_is_toc_valid(void);
 
diff --git a/lib/aarch32/cache_helpers.S b/lib/aarch32/cache_helpers.S
new file mode 100644
index 0000000..d0e5cd0
--- /dev/null
+++ b/lib/aarch32/cache_helpers.S
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+	.globl	flush_dcache_range
+	.globl	clean_dcache_range
+	.globl	inv_dcache_range
+	.globl	dcsw_op_louis
+	.globl	dcsw_op_all
+	.globl	dcsw_op_level1
+	.globl	dcsw_op_level2
+	.globl	dcsw_op_level3
+
+/*
+ * This macro can be used for implementing various data cache operations `op`
+ */
+.macro do_dcache_maintenance_by_mva op, coproc, opc1, CRn, CRm, opc2
+	dcache_line_size r2, r3
+	add	r1, r0, r1
+	sub	r3, r2, #1
+	bic	r0, r0, r3
+loop_\op:
+	stcopr	r0, \coproc, \opc1, \CRn, \CRm, \opc2
+	add	r0, r0, r2
+	cmp	r0, r1
+	blo	loop_\op
+	dsb	sy
+	bx	lr
+.endm
+
+	/* ------------------------------------------
+	 * Clean+Invalidate from base address till
+	 * size. 'r0' = addr, 'r1' = size
+	 * ------------------------------------------
+	 */
+func flush_dcache_range
+	do_dcache_maintenance_by_mva cimvac, DCCIMVAC
+endfunc flush_dcache_range
+
+	/* ------------------------------------------
+	 * Clean from base address till size.
+	 * 'r0' = addr, 'r1' = size
+	 * ------------------------------------------
+	 */
+func clean_dcache_range
+	do_dcache_maintenance_by_mva cmvac, DCCMVAC
+endfunc clean_dcache_range
+
+	/* ------------------------------------------
+	 * Invalidate from base address till
+	 * size. 'r0' = addr, 'r1' = size
+	 * ------------------------------------------
+	 */
+func inv_dcache_range
+	do_dcache_maintenance_by_mva imvac, DCIMVAC
+endfunc inv_dcache_range
+
+	/* ----------------------------------------------------------------
+	 * Data cache operations by set/way to the level specified
+	 *
+	 * The main function, do_dcsw_op requires:
+	 * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+	 * as defined in arch.h
+	 * r1: The cache level to begin operation from
+	 * r2: clidr_el1
+	 * r3: The last cache level to operate on
+	 * and will carry out the operation on each data cache from level 0
+	 * to the level in r3 in sequence
+	 *
+	 * The dcsw_op macro sets up the r2 and r3 parameters based on
+	 * clidr_el1 cache information before invoking the main function
+	 * ----------------------------------------------------------------
+	 */
+
+	.macro	dcsw_op shift, fw, ls
+	ldcopr	r2, CLIDR
+	ubfx	r3, r2, \shift, \fw
+	lsl	r3, r3, \ls
+	mov	r1, #0
+	b	do_dcsw_op
+	.endm
+
+func do_dcsw_op
+	push	{r4-r12,lr}
+	adr	r11, dcsw_loop_table	// compute cache op based on the operation type
+	add	r6, r11, r0, lsl #3	// cache op is 2x32-bit instructions
+loop1:
+	add	r10, r1, r1, LSR #1	// Work out 3x current cache level
+	mov	r12, r2, LSR r10	// extract cache type bits from clidr
+	and	r12, r12, #7   		// mask the bits for current cache only
+	cmp	r12, #2			// see what cache we have at this level
+	blt	level_done      	// no cache or only instruction cache at this level
+
+	stcopr	r1, CSSELR		// select current cache level in csselr
+	isb				// isb to sych the new cssr&csidr
+	ldcopr	r12, CCSIDR		// read the new ccsidr
+	and	r10, r12, #7   		// extract the length of the cache lines
+	add	r10, r10, #4        	// add 4 (r10 = line length offset)
+	ubfx	r4, r12, #3, #10	// r4 = maximum way number (right aligned)
+	clz	r5, r4            	// r5 = the bit position of the way size increment
+	mov	r9, r4			// r9 working copy of the aligned max way number
+
+loop2:
+	ubfx	r7, r12, #13, #15	// r7 = max set number (right aligned)
+
+loop3:
+	orr	r0, r1, r9, LSL r5	// factor in the way number and cache level into r0
+	orr	r0, r0, r7, LSL r10	// factor in the set number
+
+	blx	r6
+	subs	r7, r7, #1              // decrement the set number
+	bge	loop3
+	subs	r9, r9, #1              // decrement the way number
+	bge	loop2
+level_done:
+	add	r1, r1, #2		// increment the cache number
+	cmp	r3, r1
+	dsb	sy			// ensure completion of previous cache maintenance instruction
+	bgt	loop1
+
+	mov	r6, #0
+	stcopr	r6, CSSELR		//select cache level 0 in csselr
+	dsb	sy
+	isb
+	pop	{r4-r12,pc}
+
+dcsw_loop_table:
+	stcopr	r0, DCISW
+	bx	lr
+	stcopr	r0, DCCISW
+	bx	lr
+	stcopr	r0, DCCSW
+	bx	lr
+
+endfunc do_dcsw_op
+
+	/* ---------------------------------------------------------------
+	 * Data cache operations by set/way till PoU.
+	 *
+	 * The function requires :
+	 * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+	 * as defined in arch.h
+	 * ---------------------------------------------------------------
+	 */
+func dcsw_op_louis
+	dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc	dcsw_op_louis
+
+	/* ---------------------------------------------------------------
+	 * Data cache operations by set/way till PoC.
+	 *
+	 * The function requires :
+	 * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+	 * as defined in arch.h
+	 * ---------------------------------------------------------------
+	 */
+func dcsw_op_all
+	dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc	dcsw_op_all
+
+
+	/* ---------------------------------------------------------------
+	 *  Helper macro for data cache operations by set/way for the
+	 *  level specified
+	 * ---------------------------------------------------------------
+	 */
+	.macro	dcsw_op_level level
+	ldcopr	r2, CLIDR
+	mov	r3, \level
+	sub	r1, r3, #2
+	b	do_dcsw_op
+	.endm
+
+	/* ---------------------------------------------------------------
+	 * Data cache operations by set/way for level 1 cache
+	 *
+	 * The main function, do_dcsw_op requires:
+	 * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+	 * as defined in arch.h
+	 * ---------------------------------------------------------------
+	 */
+func dcsw_op_level1
+	dcsw_op_level #(1 << LEVEL_SHIFT)
+endfunc dcsw_op_level1
+
+	/* ---------------------------------------------------------------
+	 * Data cache operations by set/way for level 2 cache
+	 *
+	 * The main function, do_dcsw_op requires:
+	 * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+	 * as defined in arch.h
+	 * ---------------------------------------------------------------
+	 */
+func dcsw_op_level2
+	dcsw_op_level #(2 << LEVEL_SHIFT)
+endfunc dcsw_op_level2
+
+	/* ---------------------------------------------------------------
+	 * Data cache operations by set/way for level 3 cache
+	 *
+	 * The main function, do_dcsw_op requires:
+	 * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+	 * as defined in arch.h
+	 * ---------------------------------------------------------------
+	 */
+func dcsw_op_level3
+	dcsw_op_level #(3 << LEVEL_SHIFT)
+endfunc dcsw_op_level3
diff --git a/lib/aarch32/misc_helpers.S b/lib/aarch32/misc_helpers.S
new file mode 100644
index 0000000..63ac1a7
--- /dev/null
+++ b/lib/aarch32/misc_helpers.S
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+
+	.globl	zeromem
+
+/* -----------------------------------------------------------------------
+ * void zeromem(void *mem, unsigned int length);
+ *
+ * Initialise a memory region to 0.
+ * The memory address and length must be 4-byte aligned.
+ * -----------------------------------------------------------------------
+ */
+func zeromem
+#if ASM_ASSERTION
+	tst	r0, #0x3
+	ASM_ASSERT(eq)
+	tst	r1, #0x3
+	ASM_ASSERT(eq)
+#endif
+	add	r2, r0, r1
+	mov	r1, #0
+z_loop:
+	cmp	r2, r0
+	beq	z_end
+	str	r1, [r0], #4
+	b	z_loop
+z_end:
+	bx	lr
+endfunc zeromem
diff --git a/lib/cpus/aarch32/aem_generic.S b/lib/cpus/aarch32/aem_generic.S
new file mode 100644
index 0000000..10ea4e4
--- /dev/null
+++ b/lib/cpus/aarch32/aem_generic.S
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <aem_generic.h>
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cpu_macros.S>
+
+func aem_generic_core_pwr_dwn
+	/* Assert if cache is enabled */
+#if ASM_ASSERTION
+	ldcopr	r0, SCTLR
+	tst	r0, #SCTLR_C_BIT
+	ASM_ASSERT(eq)
+#endif
+	/* ---------------------------------------------
+	 * Flush L1 cache to PoU.
+	 * ---------------------------------------------
+	 */
+	mov	r0, #DC_OP_CISW
+	b	dcsw_op_louis
+endfunc aem_generic_core_pwr_dwn
+
+
+func aem_generic_cluster_pwr_dwn
+	/* Assert if cache is enabled */
+#if ASM_ASSERTION
+	ldcopr	r0, SCTLR
+	tst	r0, #SCTLR_C_BIT
+	ASM_ASSERT(eq)
+#endif
+	/* ---------------------------------------------
+	 * Flush L1 and L2 caches to PoC.
+	 * ---------------------------------------------
+	 */
+	mov	r0, #DC_OP_CISW
+	b	dcsw_op_all
+endfunc aem_generic_cluster_pwr_dwn
+
+/* cpu_ops for Base AEM FVP */
+declare_cpu_ops aem_generic, BASE_AEM_MIDR, 1
diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S
new file mode 100644
index 0000000..927a6f5
--- /dev/null
+++ b/lib/cpus/aarch32/cpu_helpers.S
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cpu_data.h>
+#include <cpu_macros.S>
+
+	/*
+	 * The reset handler common to all platforms.  After a matching
+	 * cpu_ops structure entry is found, the correponding reset_handler
+	 * in the cpu_ops is invoked. The reset handler is invoked very early
+	 * in the boot sequence and it is assumed that we can clobber r0 - r10
+	 * without the need to follow AAPCS.
+	 * Clobbers: r0 - r10
+	 */
+	.globl	reset_handler
+func reset_handler
+	mov	r10, lr
+
+	/* The plat_reset_handler can clobber r0 - r9 */
+	bl	plat_reset_handler
+
+	/* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
+	bl	get_cpu_ops_ptr
+
+#if ASM_ASSERTION
+	cmp	r0, #0
+	ASM_ASSERT(ne)
+#endif
+
+	/* Get the cpu_ops reset handler */
+	ldr	r1, [r0, #CPU_RESET_FUNC]
+	cmp	r1, #0
+	mov	lr, r10
+	bxne	r1
+	bx	lr
+endfunc reset_handler
+
+	/*
+	 * The prepare core power down function for all platforms.  After
+	 * the cpu_ops pointer is retrieved from cpu_data, the corresponding
+	 * pwr_dwn_core in the cpu_ops is invoked. Follows AAPCS.
+	 */
+	.globl	prepare_core_pwr_dwn
+func prepare_core_pwr_dwn
+	push	{lr}
+	bl	_cpu_data
+	pop	{lr}
+
+	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
+#if ASM_ASSERTION
+	cmp	r1, #0
+	ASM_ASSERT(ne)
+#endif
+
+	/* Get the cpu_ops core_pwr_dwn handler */
+	ldr	r0, [r1, #CPU_PWR_DWN_CORE]
+	bx	r0
+endfunc prepare_core_pwr_dwn
+
+	/*
+	 * The prepare cluster power down function for all platforms.  After
+	 * the cpu_ops pointer is retrieved from cpu_data, the corresponding
+	 * pwr_dwn_cluster in the cpu_ops is invoked. Follows AAPCS.
+	 */
+	.globl	prepare_cluster_pwr_dwn
+func prepare_cluster_pwr_dwn
+	push	{lr}
+	bl	_cpu_data
+	pop	{lr}
+
+	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
+#if ASM_ASSERTION
+	cmp	r1, #0
+	ASM_ASSERT(ne)
+#endif
+
+	/* Get the cpu_ops cluster_pwr_dwn handler */
+	ldr	r0, [r1, #CPU_PWR_DWN_CLUSTER]
+	bx	r0
+endfunc prepare_cluster_pwr_dwn
+
+	/*
+	 * Initializes the cpu_ops_ptr if not already initialized
+	 * in cpu_data. This must only be called after the data cache
+	 * is enabled. AAPCS is followed.
+	 */
+	.globl	init_cpu_ops
+func init_cpu_ops
+	push	{r4 - r6, lr}
+	bl	_cpu_data
+	mov	r6, r0
+	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
+	cmp	r1, #0
+	bne	1f
+	bl	get_cpu_ops_ptr
+#if ASM_ASSERTION
+	cmp	r0, #0
+	ASM_ASSERT(ne)
+#endif
+	str	r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
+1:
+	pop	{r4 - r6, pc}
+endfunc init_cpu_ops
+
+	/*
+	 * The below function returns the cpu_ops structure matching the
+	 * midr of the core. It reads the MIDR and finds the matching
+	 * entry in cpu_ops entries. Only the implementation and part number
+	 * are used to match the entries.
+	 * Return :
+	 *     r0 - The matching cpu_ops pointer on Success
+	 *     r0 - 0 on failure.
+	 * Clobbers: r0 - r5
+	 */
+	.globl	get_cpu_ops_ptr
+func get_cpu_ops_ptr
+	/* Get the cpu_ops start and end locations */
+	ldr	r4, =(__CPU_OPS_START__ + CPU_MIDR)
+	ldr	r5, =(__CPU_OPS_END__ + CPU_MIDR)
+
+	/* Initialize the return parameter */
+	mov	r0, #0
+
+	/* Read the MIDR_EL1 */
+	ldcopr	r2, MIDR
+	ldr	r3, =CPU_IMPL_PN_MASK
+
+	/* Retain only the implementation and part number using mask */
+	and	r2, r2, r3
+1:
+	/* Check if we have reached end of list */
+	cmp	r4, r5
+	bge	error_exit
+
+	/* load the midr from the cpu_ops */
+	ldr	r1, [r4], #CPU_OPS_SIZE
+	and	r1, r1, r3
+
+	/* Check if midr matches to midr of this core */
+	cmp	r1, r2
+	bne	1b
+
+	/* Subtract the increment and offset to get the cpu-ops pointer */
+	sub	r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
+error_exit:
+	bx	lr
+endfunc get_cpu_ops_ptr
diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c
new file mode 100644
index 0000000..6915ded
--- /dev/null
+++ b/lib/el3_runtime/aarch32/context_mgmt.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <smcc_helpers.h>
+#include <string.h>
+
+/*******************************************************************************
+ * Context management library initialisation routine. This library is used by
+ * runtime services to share pointers to 'cpu_context' structures for the secure
+ * and non-secure states. Management of the structures and their associated
+ * memory is not done by the context management library e.g. the PSCI service
+ * manages the cpu context used for entry from and exit to the non-secure state.
+ * The Secure payload manages the context(s) corresponding to the secure state.
+ * It also uses this library to get access to the non-secure
+ * state cpu context pointers.
+ ******************************************************************************/
+void cm_init(void)
+{
+	/*
+	 * The context management library has only global data to initialize, but
+	 * that will be done when the BSS is zeroed out
+	 */
+}
+
+/*******************************************************************************
+ * The following function initializes the cpu_context 'ctx' for
+ * first use, and sets the initial entrypoint state as specified by the
+ * entry_point_info structure.
+ *
+ * The security state to initialize is determined by the SECURE attribute
+ * of the entry_point_info. The function returns a pointer to the initialized
+ * context and sets this as the next context to return to.
+ *
+ * The EE and ST attributes are used to configure the endianness and secure
+ * timer availability for the new execution context.
+ *
+ * To prepare the register state for entry call cm_prepare_el3_exit() and
+ * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
+ * cm_e1_sysreg_context_restore().
+ ******************************************************************************/
+static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
+{
+	unsigned int security_state;
+	uint32_t scr, sctlr;
+	regs_t *reg_ctx;
+
+	assert(ctx);
+
+	security_state = GET_SECURITY_STATE(ep->h.attr);
+
+	/* Clear any residual register values from the context */
+	memset(ctx, 0, sizeof(*ctx));
+
+	/*
+	 * Base the context SCR on the current value, adjust for entry point
+	 * specific requirements
+	 */
+	scr = read_scr();
+	scr &= ~(SCR_NS_BIT | SCR_HCE_BIT);
+
+	if (security_state != SECURE)
+		scr |= SCR_NS_BIT;
+
+	/*
+	 * Set up SCTLR for the Non Secure context.
+	 * EE bit is taken from the entrypoint attributes
+	 * M, C and I bits must be zero (as required by PSCI specification)
+	 *
+	 * The target exception level is based on the spsr mode requested.
+	 * If execution is requested to hyp mode, HVC is enabled
+	 * via SCR.HCE.
+	 *
+	 * Always compute the SCTLR_EL1 value and save in the cpu_context
+	 * - the HYP registers are set up by cm_preapre_ns_entry() as they
+	 * are not part of the stored cpu_context
+	 *
+	 * TODO: In debug builds the spsr should be validated and checked
+	 * against the CPU support, security state, endianness and pc
+	 */
+	if (security_state != SECURE) {
+		sctlr = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
+		sctlr |= SCTLR_RES1;
+		write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr);
+	}
+
+	if (GET_M32(ep->spsr) == MODE32_hyp)
+		scr |= SCR_HCE_BIT;
+
+	reg_ctx = get_regs_ctx(ctx);
+
+	write_ctx_reg(reg_ctx, CTX_SCR, scr);
+	write_ctx_reg(reg_ctx, CTX_LR, ep->pc);
+	write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr);
+
+	/*
+	 * Store the r0-r3 value from the entrypoint into the context
+	 * Use memcpy as we are in control of the layout of the structures
+	 */
+	memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t));
+}
+
+/*******************************************************************************
+ * The following function initializes the cpu_context for a CPU specified by
+ * its `cpu_idx` for first use, and sets the initial entrypoint state as
+ * specified by the entry_point_info structure.
+ ******************************************************************************/
+void cm_init_context_by_index(unsigned int cpu_idx,
+			      const entry_point_info_t *ep)
+{
+	cpu_context_t *ctx;
+	ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
+	cm_init_context_common(ctx, ep);
+}
+
+/*******************************************************************************
+ * The following function initializes the cpu_context for the current CPU
+ * for first use, and sets the initial entrypoint state as specified by the
+ * entry_point_info structure.
+ ******************************************************************************/
+void cm_init_my_context(const entry_point_info_t *ep)
+{
+	cpu_context_t *ctx;
+	ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
+	cm_init_context_common(ctx, ep);
+}
+
+/*******************************************************************************
+ * Prepare the CPU system registers for first entry into secure or normal world
+ *
+ * If execution is requested to hyp mode, HSCTLR is initialized
+ * If execution is requested to non-secure PL1, and the CPU supports
+ * HYP mode then HYP mode is disabled by configuring all necessary HYP mode
+ * registers.
+ ******************************************************************************/
+void cm_prepare_el3_exit(uint32_t security_state)
+{
+	uint32_t sctlr, scr, hcptr;
+	cpu_context_t *ctx = cm_get_context(security_state);
+
+	assert(ctx);
+
+	if (security_state == NON_SECURE) {
+		scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR);
+		if (scr & SCR_HCE_BIT) {
+			/* Use SCTLR value to initialize HSCTLR */
+			sctlr = read_ctx_reg(get_regs_ctx(ctx),
+						 CTX_NS_SCTLR);
+			sctlr |= HSCTLR_RES1;
+			/* Temporarily set the NS bit to access HSCTLR */
+			write_scr(read_scr() | SCR_NS_BIT);
+			/*
+			 * Make sure the write to SCR is complete so that
+			 * we can access HSCTLR
+			 */
+			isb();
+			write_hsctlr(sctlr);
+			isb();
+
+			write_scr(read_scr() & ~SCR_NS_BIT);
+			isb();
+		} else if (read_id_pfr1() &
+			(ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) {
+			/* Set the NS bit to access HCR, HCPTR, CNTHCTL, VPIDR, VMPIDR */
+			write_scr(read_scr() | SCR_NS_BIT);
+			isb();
+
+			/* PL2 present but unused, need to disable safely */
+			write_hcr(0);
+
+			/* HSCTLR : can be ignored when bypassing */
+
+			/* HCPTR : disable all traps TCPAC, TTA, TCP */
+			hcptr = read_hcptr();
+			hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT);
+			write_hcptr(hcptr);
+
+			/* Enable EL1 access to timer */
+			write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT);
+
+			/* Reset CNTVOFF_EL2 */
+			write64_cntvoff(0);
+
+			/* Set VPIDR, VMPIDR to match MIDR, MPIDR */
+			write_vpidr(read_midr());
+			write_vmpidr(read_mpidr());
+
+			/*
+			 * Reset VTTBR.
+			 * Needed because cache maintenance operations depend on
+			 * the VMID even when non-secure EL1&0 stage 2 address
+			 * translation are disabled.
+			 */
+			write64_vttbr(0);
+			isb();
+
+			write_scr(read_scr() & ~SCR_NS_BIT);
+			isb();
+		}
+	}
+}
diff --git a/lib/el3_runtime/aarch32/cpu_data.S b/lib/el3_runtime/aarch32/cpu_data.S
new file mode 100644
index 0000000..b97911f
--- /dev/null
+++ b/lib/el3_runtime/aarch32/cpu_data.S
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm_macros.S>
+#include <cpu_data.h>
+
+	.globl	_cpu_data
+	.globl	_cpu_data_by_index
+
+/* -----------------------------------------------------------------
+ * cpu_data_t *_cpu_data(void)
+ *
+ * Return the cpu_data structure for the current CPU.
+ * -----------------------------------------------------------------
+ */
+func _cpu_data
+	push	{lr}
+	bl	plat_my_core_pos
+	pop	{lr}
+	b	_cpu_data_by_index
+endfunc _cpu_data
+
+/* -----------------------------------------------------------------
+ * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index)
+ *
+ * Return the cpu_data structure for the CPU with given linear index
+ *
+ * This can be called without a valid stack.
+ * clobbers: r0, r1
+ * -----------------------------------------------------------------
+ */
+func _cpu_data_by_index
+	ldr	r1, =percpu_data
+	add	r0, r1, r0, LSL #CPU_DATA_LOG2SIZE
+	bx	lr
+endfunc _cpu_data_by_index
diff --git a/lib/locks/exclusive/aarch32/spinlock.S b/lib/locks/exclusive/aarch32/spinlock.S
new file mode 100644
index 0000000..f3a2bc3
--- /dev/null
+++ b/lib/locks/exclusive/aarch32/spinlock.S
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm_macros.S>
+
+	.globl	spin_lock
+	.globl	spin_unlock
+
+
+func spin_lock
+	mov	r2, #1
+1:
+	ldrex	r1, [r0]
+	cmp	r1, #0
+	wfene
+	strexeq	r1, r2, [r0]
+	cmpeq	r1, #0
+	bne	1b
+	dmb
+	bx	lr
+endfunc spin_lock
+
+
+func spin_unlock
+	mov	r1, #0
+	stl	r1, [r0]
+	bx	lr
+endfunc spin_unlock
diff --git a/lib/locks/exclusive/aarch64/spinlock.S b/lib/locks/exclusive/aarch64/spinlock.S
new file mode 100644
index 0000000..1ca5912
--- /dev/null
+++ b/lib/locks/exclusive/aarch64/spinlock.S
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm_macros.S>
+
+	.globl	spin_lock
+	.globl	spin_unlock
+
+
+func spin_lock
+	mov	w2, #1
+	sevl
+l1:	wfe
+l2:	ldaxr	w1, [x0]
+	cbnz	w1, l1
+	stxr	w1, w2, [x0]
+	cbnz	w1, l2
+	ret
+endfunc spin_lock
+
+
+func spin_unlock
+	stlr	wzr, [x0]
+	ret
+endfunc spin_unlock
diff --git a/lib/locks/exclusive/spinlock.S b/lib/locks/exclusive/spinlock.S
index 772f14e..9c945f9 100644
--- a/lib/locks/exclusive/spinlock.S
+++ b/lib/locks/exclusive/spinlock.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -28,25 +28,6 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include <asm_macros.S>
-
-	.globl	spin_lock
-	.globl	spin_unlock
-
-
-func spin_lock
-	mov	w2, #1
-	sevl
-l1:	wfe
-l2:	ldaxr	w1, [x0]
-	cbnz	w1, l1
-	stxr	w1, w2, [x0]
-	cbnz	w1, l2
-	ret
-endfunc spin_lock
-
-
-func spin_unlock
-	stlr	wzr, [x0]
-	ret
-endfunc spin_unlock
+#if !ERROR_DEPRECATED
+#include "./aarch64/spinlock.S"
+#endif
diff --git a/lib/psci/aarch32/psci_helpers.S b/lib/psci/aarch32/psci_helpers.S
new file mode 100644
index 0000000..36d5d7d
--- /dev/null
+++ b/lib/psci/aarch32/psci_helpers.S
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm_macros.S>
+#include <platform_def.h>
+#include <psci.h>
+
+	.globl	psci_do_pwrdown_cache_maintenance
+	.globl	psci_do_pwrup_cache_maintenance
+	.globl	psci_power_down_wfi
+
+/* -----------------------------------------------------------------------
+ * void psci_do_pwrdown_cache_maintenance(unsigned int power level);
+ *
+ * This function performs cache maintenance for the specified power
+ * level. The levels of cache affected are determined by the power
+ * level which is passed as the argument i.e. level 0 results
+ * in a flush of the L1 cache. Both the L1 and L2 caches are flushed
+ * for a higher power level.
+ *
+ * Additionally, this function also ensures that stack memory is correctly
+ * flushed out to avoid coherency issues due to a change in its memory
+ * attributes after the data cache is disabled.
+ * -----------------------------------------------------------------------
+ */
+func psci_do_pwrdown_cache_maintenance
+	push	{r4, lr}
+
+	/* ----------------------------------------------
+	 * Turn OFF cache and do stack maintenance
+	 * prior to cpu operations . This sequence is
+	 * different from AArch64 because in AArch32 the
+	 * assembler routines for cpu operations utilize
+	 * the stack whereas in AArch64 it doesn't.
+	 * ----------------------------------------------
+	 */
+	mov	r4, r0
+	bl	do_stack_maintenance
+
+	/* ---------------------------------------------
+	 * Determine how many levels of cache will be
+	 * subject to cache maintenance. Power level
+	 * 0 implies that only the cpu is being powered
+	 * down. Only the L1 data cache needs to be
+	 * flushed to the PoU in this case. For a higher
+	 * power level we are assuming that a flush
+	 * of L1 data and L2 unified cache is enough.
+	 * This information should be provided by the
+	 * platform.
+	 * ---------------------------------------------
+	 */
+	cmp	r4, #PSCI_CPU_PWR_LVL
+	pop	{r4,lr}
+
+	beq	prepare_core_pwr_dwn
+	b	prepare_cluster_pwr_dwn
+endfunc psci_do_pwrdown_cache_maintenance
+
+
+/* -----------------------------------------------------------------------
+ * void psci_do_pwrup_cache_maintenance(void);
+ *
+ * This function performs cache maintenance after this cpu is powered up.
+ * Currently, this involves managing the used stack memory before turning
+ * on the data cache.
+ * -----------------------------------------------------------------------
+ */
+func psci_do_pwrup_cache_maintenance
+	push	{lr}
+
+	/* ---------------------------------------------
+	 * Ensure any inflight stack writes have made it
+	 * to main memory.
+	 * ---------------------------------------------
+	 */
+	dmb	st
+
+	/* ---------------------------------------------
+	 * Calculate and store the size of the used
+	 * stack memory in r1. Calculate and store the
+	 * stack base address in r0.
+	 * ---------------------------------------------
+	 */
+	bl	plat_get_my_stack
+	mov	r1, sp
+	sub	r1, r0, r1
+	mov	r0, sp
+	bl	inv_dcache_range
+
+	/* ---------------------------------------------
+	 * Enable the data cache.
+	 * ---------------------------------------------
+	 */
+	ldcopr	r0, SCTLR
+	orr	r0, r0, #SCTLR_C_BIT
+	stcopr	r0, SCTLR
+	isb
+
+	pop	{pc}
+endfunc psci_do_pwrup_cache_maintenance
+
+	/* ---------------------------------------------
+	 * void do_stack_maintenance(void)
+	 * Do stack maintenance by flushing the used
+	 * stack to the main memory and invalidating the
+	 * remainder.
+	 * ---------------------------------------------
+	 */
+func do_stack_maintenance
+	push	{r4, lr}
+	bl	plat_get_my_stack
+
+	/* Turn off the D-cache */
+	ldcopr	r1, SCTLR
+	bic	r1, #SCTLR_C_BIT
+	stcopr	r1, SCTLR
+	isb
+
+	/* ---------------------------------------------
+	 * Calculate and store the size of the used
+	 * stack memory in r1.
+	 * ---------------------------------------------
+	 */
+	mov	r4, r0
+	mov	r1, sp
+	sub	r1, r0, r1
+	mov	r0, sp
+	bl	flush_dcache_range
+
+	/* ---------------------------------------------
+	 * Calculate and store the size of the unused
+	 * stack memory in r1. Calculate and store the
+	 * stack base address in r0.
+	 * ---------------------------------------------
+	 */
+	sub	r0, r4, #PLATFORM_STACK_SIZE
+	sub	r1, sp, r0
+	bl	inv_dcache_range
+
+	pop	{r4, pc}
+endfunc do_stack_maintenance
+
+/* -----------------------------------------------------------------------
+ * This function is called to indicate to the power controller that it
+ * is safe to power down this cpu. It should not exit the wfi and will
+ * be released from reset upon power up.
+ * -----------------------------------------------------------------------
+ */
+func psci_power_down_wfi
+	dsb	sy		// ensure write buffer empty
+	wfi
+	bl	plat_panic_handler
+endfunc psci_power_down_wfi
diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c
index e87e8c0..68cdd6e 100644
--- a/lib/psci/psci_common.c
+++ b/lib/psci/psci_common.c
@@ -592,10 +592,57 @@
  * This function determines the full entrypoint information for the requested
  * PSCI entrypoint on power on/resume and returns it.
  ******************************************************************************/
+#ifdef AARCH32
 static int psci_get_ns_ep_info(entry_point_info_t *ep,
 			       uintptr_t entrypoint,
 			       u_register_t context_id)
 {
+	u_register_t ep_attr;
+	unsigned int aif, ee, mode;
+	u_register_t scr = read_scr();
+	u_register_t ns_sctlr, sctlr;
+
+	/* Switch to non secure state */
+	write_scr(scr | SCR_NS_BIT);
+	isb();
+	ns_sctlr = read_sctlr();
+
+	sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
+
+	/* Return to original state */
+	write_scr(scr);
+	isb();
+	ee = 0;
+
+	ep_attr = NON_SECURE | EP_ST_DISABLE;
+	if (sctlr & SCTLR_EE_BIT) {
+		ep_attr |= EP_EE_BIG;
+		ee = 1;
+	}
+	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
+
+	ep->pc = entrypoint;
+	memset(&ep->args, 0, sizeof(ep->args));
+	ep->args.arg0 = context_id;
+
+	mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
+
+	/*
+	 * TODO: Choose async. exception bits if HYP mode is not
+	 * implemented according to the values of SCR.{AW, FW} bits
+	 */
+	aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
+
+	ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
+
+	return PSCI_E_SUCCESS;
+}
+
+#else
+static int psci_get_ns_ep_info(entry_point_info_t *ep,
+			       uintptr_t entrypoint,
+			       u_register_t context_id)
+{
 	u_register_t ep_attr, sctlr;
 	unsigned int daif, ee, mode;
 	u_register_t ns_scr_el3 = read_scr_el3();
@@ -646,6 +693,7 @@
 
 	return PSCI_E_SUCCESS;
 }
+#endif
 
 /*******************************************************************************
  * This function validates the entrypoint with the platform layer if the
diff --git a/lib/psci/psci_lib.mk b/lib/psci/psci_lib.mk
index 662e14a..8daa831 100644
--- a/lib/psci/psci_lib.mk
+++ b/lib/psci/psci_lib.mk
@@ -29,11 +29,10 @@
 #
 
 PSCI_LIB_SOURCES	:=	lib/el3_runtime/cpu_data_array.c	\
-				lib/el3_runtime/aarch64/context.S	\
-				lib/el3_runtime/aarch64/cpu_data.S	\
-				lib/el3_runtime/aarch64/context_mgmt.c	\
-				lib/cpus/aarch64/cpu_helpers.S		\
-				lib/locks/exclusive/spinlock.S		\
+				lib/el3_runtime/${ARCH}/cpu_data.S	\
+				lib/el3_runtime/${ARCH}/context_mgmt.c	\
+				lib/cpus/${ARCH}/cpu_helpers.S		\
+				lib/locks/exclusive/${ARCH}/spinlock.S	\
 				lib/psci/psci_off.c			\
 				lib/psci/psci_on.c			\
 				lib/psci/psci_suspend.c			\
@@ -41,7 +40,11 @@
 				lib/psci/psci_main.c			\
 				lib/psci/psci_setup.c			\
 				lib/psci/psci_system_off.c		\
-				lib/psci/aarch64/psci_helpers.S
+				lib/psci/${ARCH}/psci_helpers.S
+
+ifeq (${ARCH}, aarch64)
+PSCI_LIB_SOURCES	+=	lib/el3_runtime/aarch64/context.S
+endif
 
 ifeq (${USE_COHERENT_MEM}, 1)
 PSCI_LIB_SOURCES		+=	lib/locks/bakery/bakery_lock_coherent.c
diff --git a/lib/psci/psci_setup.c b/lib/psci/psci_setup.c
index d35e000..20d0635 100644
--- a/lib/psci/psci_setup.c
+++ b/lib/psci/psci_setup.c
@@ -278,3 +278,15 @@
 	/* Initialize the cpu_ops pointer. */
 	init_cpu_ops();
 }
+
+/******************************************************************************
+ * PSCI Library interface to initialize the cpu context for the next non
+ * secure image during cold boot. The relevant registers in the cpu context
+ * need to be retrieved and programmed on return from this interface.
+ *****************************************************************************/
+void psci_prepare_next_non_secure_ctx(entry_point_info_t *next_image_info)
+{
+	assert(GET_SECURITY_STATE(next_image_info->h.attr) == NON_SECURE);
+	cm_init_my_context(next_image_info);
+	cm_prepare_el3_exit(NON_SECURE);
+}
diff --git a/lib/xlat_tables/aarch32/xlat_tables.c b/lib/xlat_tables/aarch32/xlat_tables.c
new file mode 100644
index 0000000..a97cf31
--- /dev/null
+++ b/lib/xlat_tables/aarch32/xlat_tables.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cassert.h>
+#include <platform_def.h>
+#include <utils.h>
+#include <xlat_tables.h>
+#include "../xlat_tables_private.h"
+
+/*
+ * The virtual address space size must be a power of two. As we start the initial
+ * lookup at level 1, it must also be between 2 GB and 4 GB. See section
+ * G4.6.5 in the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information.
+ */
+CASSERT(ADDR_SPACE_SIZE >= (1ull << 31) && ADDR_SPACE_SIZE <= (1ull << 32) &&
+	IS_POWER_OF_TWO(ADDR_SPACE_SIZE), assert_valid_addr_space_size);
+
+#define NUM_L1_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
+
+static uint64_t l1_xlation_table[NUM_L1_ENTRIES]
+		__aligned(NUM_L1_ENTRIES * sizeof(uint64_t));
+
+void init_xlat_tables(void)
+{
+	unsigned long long max_pa;
+	uintptr_t max_va;
+	print_mmap();
+	init_xlation_table(0, l1_xlation_table, 1, &max_va, &max_pa);
+	assert(max_va < ADDR_SPACE_SIZE);
+}
+
+/*******************************************************************************
+ * Function for enabling the MMU in Secure PL1, assuming that the
+ * page-tables have already been created.
+ ******************************************************************************/
+void enable_mmu_secure(unsigned int flags)
+{
+	unsigned int mair0, ttbcr, sctlr;
+	uint64_t ttbr0;
+
+	assert(IS_IN_SECURE());
+	assert((read_sctlr() & SCTLR_M_BIT) == 0);
+
+	/* Set attributes in the right indices of the MAIR */
+	mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
+	mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
+			ATTR_IWBWA_OWBWA_NTR_INDEX);
+	mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
+			ATTR_NON_CACHEABLE_INDEX);
+	write_mair0(mair0);
+
+	/* Invalidate TLBs at the current exception level */
+	tlbiall();
+
+	/*
+	 * Set TTBCR bits as well. Set TTBR0 table properties as Inner
+	 * & outer WBWA & shareable. Disable TTBR1.
+	 */
+	ttbcr = TTBCR_EAE_BIT |
+		TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
+		TTBCR_RGN0_INNER_WBA |
+		(32 - __builtin_ctzl((uintptr_t)ADDR_SPACE_SIZE));
+	ttbcr |= TTBCR_EPD1_BIT;
+	write_ttbcr(ttbcr);
+
+	/* Set TTBR0 bits as well */
+	ttbr0 = (uintptr_t) l1_xlation_table;
+	write64_ttbr0(ttbr0);
+	write64_ttbr1(0);
+
+	/*
+	 * Ensure all translation table writes have drained
+	 * into memory, the TLB invalidation is complete,
+	 * and translation register writes are committed
+	 * before enabling the MMU
+	 */
+	dsb();
+	isb();
+
+	sctlr = read_sctlr();
+	sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
+
+	if (flags & DISABLE_DCACHE)
+		sctlr &= ~SCTLR_C_BIT;
+	else
+		sctlr |= SCTLR_C_BIT;
+
+	write_sctlr(sctlr);
+
+	/* Ensure the MMU enable takes effect immediately */
+	isb();
+}
diff --git a/lib/xlat_tables/xlat_tables_common.c b/lib/xlat_tables/xlat_tables_common.c
index 33784c2..bc7fed7 100644
--- a/lib/xlat_tables/xlat_tables_common.c
+++ b/lib/xlat_tables/xlat_tables_common.c
@@ -289,17 +289,17 @@
 		if (!mm->size)
 			return attr; /* Reached end of list */
 
-		if (mm->base_va >= base_va + size)
+		if (mm->base_va > base_va + size - 1)
 			return attr; /* Next region is after area so end */
 
-		if (mm->base_va + mm->size <= base_va)
+		if (mm->base_va + mm->size - 1 < base_va)
 			continue; /* Next region has already been overtaken */
 
 		if (mm->attr == attr)
 			continue; /* Region doesn't override attribs so skip */
 
 		if (mm->base_va > base_va ||
-			mm->base_va + mm->size < base_va + size)
+			mm->base_va + mm->size - 1 < base_va + size - 1)
 			return -1; /* Region doesn't fully cover our area */
 
 		attr = mm->attr;
@@ -328,7 +328,7 @@
 		if (!mm->size) {
 			/* Done mapping regions; finish zeroing the table */
 			desc = INVALID_DESC;
-		} else if (mm->base_va + mm->size <= base_va) {
+		} else if (mm->base_va + mm->size - 1 < base_va) {
 			/* This area is after the region so get next region */
 			++mm;
 			continue;
@@ -337,7 +337,7 @@
 		debug_print("%s VA:%p size:0x%x ", get_level_spacer(level),
 				(void *)base_va, level_size);
 
-		if (mm->base_va >= base_va + level_size) {
+		if (mm->base_va > base_va + level_size - 1) {
 			/* Next region is after this area. Nothing to map yet */
 			desc = INVALID_DESC;
 		} else {
@@ -369,7 +369,7 @@
 
 		*table++ = desc;
 		base_va += level_size;
-	} while ((base_va & level_index_mask) && (base_va < ADDR_SPACE_SIZE));
+	} while ((base_va & level_index_mask) && (base_va - 1 < ADDR_SPACE_SIZE - 1));
 
 	return mm;
 }
diff --git a/plat/arm/board/common/board_common.mk b/plat/arm/board/common/board_common.mk
index a5636d5..49136e6 100644
--- a/plat/arm/board/common/board_common.mk
+++ b/plat/arm/board/common/board_common.mk
@@ -31,8 +31,10 @@
 PLAT_INCLUDES		+=	-Iinclude/plat/arm/board/common/			\
 				-Iinclude/plat/arm/board/common/drivers
 
-PLAT_BL_COMMON_SOURCES	+=	drivers/arm/pl011/pl011_console.S			\
-				plat/arm/board/common/aarch64/board_arm_helpers.S
+PLAT_BL_COMMON_SOURCES	+=	drivers/arm/pl011/${ARCH}/pl011_console.S
+ifeq (${ARCH}, aarch64)
+PLAT_BL_COMMON_SOURCES	+=	plat/arm/board/common/aarch64/board_arm_helpers.S
+endif
 
 BL1_SOURCES		+=	plat/arm/board/common/drivers/norflash/norflash.c
 
diff --git a/plat/arm/board/fvp/aarch32/fvp_helpers.S b/plat/arm/board/fvp/aarch32/fvp_helpers.S
new file mode 100644
index 0000000..373036c
--- /dev/null
+++ b/plat/arm/board/fvp/aarch32/fvp_helpers.S
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+#include "../drivers/pwrc/fvp_pwrc.h"
+#include "../fvp_def.h"
+
+	.globl	plat_get_my_entrypoint
+	.globl	plat_is_my_cpu_primary
+
+	/* ---------------------------------------------------------------------
+	 * unsigned long plat_get_my_entrypoint (void);
+	 *
+	 * Main job of this routine is to distinguish between a cold and warm
+	 * boot. On FVP, this information can be queried from the power
+	 * controller. The Power Control SYS Status Register (PSYSR) indicates
+	 * the wake-up reason for the CPU.
+	 *
+	 * For a cold boot, return 0.
+	 * For a warm boot, read the mailbox and return the address it contains.
+	 *
+	 * TODO: PSYSR is a common register and should be
+	 * 	accessed using locks. Since it is not possible
+	 * 	to use locks immediately after a cold reset
+	 * 	we are relying on the fact that after a cold
+	 * 	reset all cpus will read the same WK field
+	 * ---------------------------------------------------------------------
+	 */
+func plat_get_my_entrypoint
+	/* ---------------------------------------------------------------------
+	 * When bit PSYSR.WK indicates either "Wake by PPONR" or "Wake by GIC
+	 * WakeRequest signal" then it is a warm boot.
+	 * ---------------------------------------------------------------------
+	 */
+	ldcopr	r2, MPIDR
+	ldr	r1, =PWRC_BASE
+	str	r2, [r1, #PSYSR_OFF]
+	ldr	r2, [r1, #PSYSR_OFF]
+	ubfx	r2, r2, #PSYSR_WK_SHIFT, #PSYSR_WK_WIDTH
+	cmp	r2, #WKUP_PPONR
+	beq	warm_reset
+	cmp	r2, #WKUP_GICREQ
+	beq	warm_reset
+
+	/* Cold reset */
+	mov	r0, #0
+	bx	lr
+
+warm_reset:
+	/* ---------------------------------------------------------------------
+	 * A mailbox is maintained in the trusted SRAM. It is flushed out of the
+	 * caches after every update using normal memory so it is safe to read
+	 * it here with SO attributes.
+	 * ---------------------------------------------------------------------
+	 */
+	ldr	r0, =PLAT_ARM_TRUSTED_MAILBOX_BASE
+	ldr	r0, [r0]
+	cmp	r0, #0
+	beq	_panic
+	bx	lr
+
+	/* ---------------------------------------------------------------------
+	 * The power controller indicates this is a warm reset but the mailbox
+	 * is empty. This should never happen!
+	 * ---------------------------------------------------------------------
+	 */
+_panic:
+	b	_panic
+endfunc plat_get_my_entrypoint
+
+	/* -----------------------------------------------------
+	 * unsigned int plat_is_my_cpu_primary (void);
+	 *
+	 * Find out whether the current cpu is the primary
+	 * cpu.
+	 * -----------------------------------------------------
+	 */
+func plat_is_my_cpu_primary
+	ldcopr	r0, MPIDR
+	ldr	r1, =(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
+	and	r0, r1
+	cmp	r0, #FVP_PRIMARY_CPU
+	moveq	r0, #1
+	movne	r0, #0
+	bx	lr
+endfunc plat_is_my_cpu_primary
diff --git a/plat/arm/board/fvp/fvp_common.c b/plat/arm/board/fvp/fvp_common.c
index 002cff6..fbbe34e 100644
--- a/plat/arm/board/fvp/fvp_common.c
+++ b/plat/arm/board/fvp/fvp_common.c
@@ -121,6 +121,9 @@
 #endif
 #if IMAGE_BL32
 const mmap_region_t plat_arm_mmap[] = {
+#ifdef AARCH32
+	ARM_MAP_SHARED_RAM,
+#endif
 	V2M_MAP_IOFPGA,
 	MAP_DEVICE0,
 	MAP_DEVICE1,
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
index 1ea9822..2865569 100644
--- a/plat/arm/board/fvp/platform.mk
+++ b/plat/arm/board/fvp/platform.mk
@@ -69,6 +69,9 @@
 				plat/common/plat_gicv2.c		\
 				plat/arm/common/arm_gicv2.c
 else ifeq (${FVP_USE_GIC_DRIVER}, FVP_GICV3_LEGACY)
+  ifeq (${ARCH}, aarch32)
+    $(error "GICV3 Legacy driver not supported for AArch32 build")
+  endif
 FVP_GIC_SOURCES		:=	drivers/arm/gic/arm_gic.c		\
 				drivers/arm/gic/gic_v2.c		\
 				drivers/arm/gic/gic_v3.c		\
@@ -98,12 +101,15 @@
 
 PLAT_BL_COMMON_SOURCES	:=	plat/arm/board/fvp/fvp_common.c
 
-FVP_CPU_LIBS		:=	lib/cpus/aarch64/aem_generic.S			\
-				lib/cpus/aarch64/cortex_a35.S			\
+FVP_CPU_LIBS		:=	lib/cpus/${ARCH}/aem_generic.S
+
+ifeq (${ARCH}, aarch64)
+FVP_CPU_LIBS		+=	lib/cpus/aarch64/cortex_a35.S			\
 				lib/cpus/aarch64/cortex_a53.S			\
 				lib/cpus/aarch64/cortex_a57.S			\
 				lib/cpus/aarch64/cortex_a72.S			\
 				lib/cpus/aarch64/cortex_a73.S
+endif
 
 BL1_SOURCES		+=	drivers/io/io_semihosting.c			\
 				lib/semihosting/semihosting.c			\
diff --git a/plat/arm/board/fvp/sp_min/fvp_sp_min_setup.c b/plat/arm/board/fvp/sp_min/fvp_sp_min_setup.c
new file mode 100644
index 0000000..d3bef82
--- /dev/null
+++ b/plat/arm/board/fvp/sp_min/fvp_sp_min_setup.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <plat_arm.h>
+#include "../fvp_private.h"
+
+void sp_min_early_platform_setup(void)
+{
+	arm_sp_min_early_platform_setup();
+
+	/* Initialize the platform config for future decision making */
+	fvp_config_setup();
+
+	/*
+	 * Initialize the correct interconnect for this cluster during cold
+	 * boot. No need for locks as no other CPU is active.
+	 */
+	fvp_interconnect_init();
+
+	/*
+	 * Enable coherency in interconnect for the primary CPU's cluster.
+	 * Earlier bootloader stages might already do this (e.g. Trusted
+	 * Firmware's BL1 does it) but we can't assume so. There is no harm in
+	 * executing this code twice anyway.
+	 * FVP PSCI code will enable coherency for other clusters.
+	 */
+	fvp_interconnect_enable();
+}
diff --git a/plat/arm/board/fvp/sp_min/sp_min-fvp.mk b/plat/arm/board/fvp/sp_min/sp_min-fvp.mk
new file mode 100644
index 0000000..a788782
--- /dev/null
+++ b/plat/arm/board/fvp/sp_min/sp_min-fvp.mk
@@ -0,0 +1,42 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# Neither the name of ARM nor the names of its contributors may be used
+# to endorse or promote products derived from this software without specific
+# prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+
+# SP_MIN source files specific to FVP platform
+BL32_SOURCES		+=	plat/arm/board/fvp/aarch32/fvp_helpers.S	\
+				plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.c	\
+				plat/arm/board/fvp/fvp_pm.c			\
+				plat/arm/board/fvp/fvp_topology.c		\
+				plat/arm/board/fvp/sp_min/fvp_sp_min_setup.c	\
+				${FVP_CPU_LIBS}					\
+				${FVP_GIC_SOURCES}				\
+				${FVP_INTERCONNECT_SOURCES}			\
+				${FVP_SECURITY_SOURCES}
+
+include plat/arm/common/sp_min/arm_sp_min.mk
\ No newline at end of file
diff --git a/plat/arm/common/aarch32/arm_helpers.S b/plat/arm/common/aarch32/arm_helpers.S
new file mode 100644
index 0000000..0839913
--- /dev/null
+++ b/plat/arm/common/aarch32/arm_helpers.S
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <asm_macros.S>
+#include <platform_def.h>
+
+	.weak	plat_arm_calc_core_pos
+	.weak	plat_my_core_pos
+
+	/* -----------------------------------------------------
+	 *  unsigned int plat_my_core_pos(void)
+	 *  This function uses the plat_arm_calc_core_pos()
+	 *  definition to get the index of the calling CPU.
+	 * -----------------------------------------------------
+	 */
+func plat_my_core_pos
+	ldcopr	r0, MPIDR
+	b	plat_arm_calc_core_pos
+endfunc plat_my_core_pos
+
+	/* -----------------------------------------------------
+	 *  unsigned int plat_arm_calc_core_pos(uint64_t mpidr)
+	 *  Helper function to calculate the core position.
+	 *  With this function: CorePos = (ClusterId * 4) +
+	 *  				  CoreId
+	 * -----------------------------------------------------
+	 */
+func plat_arm_calc_core_pos
+	and	r1, r0, #MPIDR_CPU_MASK
+	and	r0, r0, #MPIDR_CLUSTER_MASK
+	add	r0, r1, r0, LSR #6
+	bx	lr
+endfunc plat_arm_calc_core_pos
diff --git a/plat/arm/common/arm_common.c b/plat/arm/common/arm_common.c
index 93355fe..c53723d 100644
--- a/plat/arm/common/arm_common.c
+++ b/plat/arm/common/arm_common.c
@@ -134,6 +134,7 @@
 /*******************************************************************************
  * Gets SPSR for BL33 entry
  ******************************************************************************/
+#ifndef AARCH32
 uint32_t arm_get_spsr_for_bl33_entry(void)
 {
 	unsigned long el_status;
@@ -154,6 +155,28 @@
 	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
 	return spsr;
 }
+#else
+/*******************************************************************************
+ * Gets SPSR for BL33 entry
+ ******************************************************************************/
+uint32_t arm_get_spsr_for_bl33_entry(void)
+{
+	unsigned int hyp_status, mode, spsr;
+
+	hyp_status = GET_VIRT_EXT(read_id_pfr1());
+
+	mode = (hyp_status) ? MODE32_hyp : MODE32_svc;
+
+	/*
+	 * TODO: Consider the possibility of specifying the SPSR in
+	 * the FIP ToC and allowing the platform to have a say as
+	 * well.
+	 */
+	spsr = SPSR_MODE32(mode, plat_get_ns_image_entrypoint() & 0x1,
+			SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
+	return spsr;
+}
+#endif /* AARCH32 */
 
 /*******************************************************************************
  * Configures access to the system counter timer module.
diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk
index 03b9fe4..0b961ea 100644
--- a/plat/arm/common/arm_common.mk
+++ b/plat/arm/common/arm_common.mk
@@ -28,23 +28,30 @@
 # POSSIBILITY OF SUCH DAMAGE.
 #
 
-# On ARM standard platorms, the TSP can execute from Trusted SRAM, Trusted
-# DRAM (if available) or the TZC secured area of DRAM.
-# Trusted SRAM is the default.
+ifeq (${ARCH}, aarch64)
+  # On ARM standard platorms, the TSP can execute from Trusted SRAM, Trusted
+  # DRAM (if available) or the TZC secured area of DRAM.
+  # Trusted SRAM is the default.
 
-ARM_TSP_RAM_LOCATION	:=	tsram
-ifeq (${ARM_TSP_RAM_LOCATION}, tsram)
-  ARM_TSP_RAM_LOCATION_ID = ARM_TRUSTED_SRAM_ID
-else ifeq (${ARM_TSP_RAM_LOCATION}, tdram)
-  ARM_TSP_RAM_LOCATION_ID = ARM_TRUSTED_DRAM_ID
-else ifeq (${ARM_TSP_RAM_LOCATION}, dram)
-  ARM_TSP_RAM_LOCATION_ID = ARM_DRAM_ID
-else
-  $(error "Unsupported ARM_TSP_RAM_LOCATION value")
-endif
+  ARM_TSP_RAM_LOCATION	:=	tsram
+  ifeq (${ARM_TSP_RAM_LOCATION}, tsram)
+    ARM_TSP_RAM_LOCATION_ID = ARM_TRUSTED_SRAM_ID
+  else ifeq (${ARM_TSP_RAM_LOCATION}, tdram)
+    ARM_TSP_RAM_LOCATION_ID = ARM_TRUSTED_DRAM_ID
+  else ifeq (${ARM_TSP_RAM_LOCATION}, dram)
+    ARM_TSP_RAM_LOCATION_ID = ARM_DRAM_ID
+  else
+    $(error "Unsupported ARM_TSP_RAM_LOCATION value")
+  endif
 
-# Process flags
-$(eval $(call add_define,ARM_TSP_RAM_LOCATION_ID))
+  # Process flags
+  $(eval $(call add_define,ARM_TSP_RAM_LOCATION_ID))
+
+  # Process ARM_BL31_IN_DRAM flag
+  ARM_BL31_IN_DRAM		:=	0
+  $(eval $(call assert_boolean,ARM_BL31_IN_DRAM))
+  $(eval $(call add_define,ARM_BL31_IN_DRAM))
+endif
 
 # For the original power-state parameter format, the State-ID can be encoded
 # according to the recommended encoding or zero. This flag determines which
@@ -83,7 +90,7 @@
 $(eval $(call add_define,ARM_BL31_IN_DRAM))
 
 # Enable PSCI_STAT_COUNT/RESIDENCY APIs on ARM platforms
-ENABLE_PSCI_STAT = 1
+ENABLE_PSCI_STAT		:=	1
 
 # On ARM platforms, separate the code and read-only data sections to allow
 # mapping the former as executable and the latter as execute-never.
@@ -91,15 +98,17 @@
 
 
 PLAT_INCLUDES		+=	-Iinclude/common/tbbr				\
-				-Iinclude/plat/arm/common			\
-				-Iinclude/plat/arm/common/aarch64
+				-Iinclude/plat/arm/common
 
+ifeq (${ARCH}, aarch64)
+PLAT_INCLUDES		+=	-Iinclude/plat/arm/common/aarch64
+endif
 
 PLAT_BL_COMMON_SOURCES	+=	lib/xlat_tables/xlat_tables_common.c		\
-				lib/xlat_tables/aarch64/xlat_tables.c		\
-				plat/arm/common/aarch64/arm_helpers.S		\
+				lib/xlat_tables/${ARCH}/xlat_tables.c		\
+				plat/arm/common/${ARCH}/arm_helpers.S		\
 				plat/arm/common/arm_common.c			\
-				plat/common/aarch64/plat_common.c
+				plat/common/${ARCH}/plat_common.c
 
 BL1_SOURCES		+=	drivers/arm/sp805/sp805.c			\
 				drivers/io/io_fip.c				\
diff --git a/plat/arm/common/arm_gicv3.c b/plat/arm/common/arm_gicv3.c
index a20fd56..ac309f2 100644
--- a/plat/arm/common/arm_gicv3.c
+++ b/plat/arm/common/arm_gicv3.c
@@ -77,7 +77,7 @@
 	 * can use GIC system registers to manage interrupts and does
 	 * not need GIC interface base addresses to be configured.
 	 */
-#if IMAGE_BL31
+#if (AARCH32 && IMAGE_BL32) || (IMAGE_BL31 && !AARCH32)
 	gicv3_driver_init(&arm_gic_data);
 #endif
 }
diff --git a/plat/arm/common/sp_min/arm_sp_min.mk b/plat/arm/common/sp_min/arm_sp_min.mk
new file mode 100644
index 0000000..8a4d598
--- /dev/null
+++ b/plat/arm/common/sp_min/arm_sp_min.mk
@@ -0,0 +1,37 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# Neither the name of ARM nor the names of its contributors may be used
+# to endorse or promote products derived from this software without specific
+# prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+
+# SP MIN source files common to ARM standard platforms
+BL32_SOURCES		+=	plat/arm/common/arm_pm.c			\
+				plat/arm/common/arm_topology.c			\
+				plat/arm/common/sp_min/arm_sp_min_setup.c	\
+				plat/common/aarch32/platform_mp_stack.S		\
+				plat/common/plat_psci_common.c
+
diff --git a/plat/arm/common/sp_min/arm_sp_min_setup.c b/plat/arm/common/sp_min/arm_sp_min_setup.c
new file mode 100644
index 0000000..927f30f
--- /dev/null
+++ b/plat/arm/common/sp_min/arm_sp_min_setup.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <console.h>
+#include <mmio.h>
+#include <plat_arm.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <platform_sp_min.h>
+
+#define BL32_END (uintptr_t)(&__BL32_END__)
+
+#if USE_COHERENT_MEM
+/*
+ * The next 2 constants identify the extents of the coherent memory region.
+ * These addresses are used by the MMU setup code and therefore they must be
+ * page-aligned.  It is the responsibility of the linker script to ensure that
+ * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
+ * page-aligned addresses.
+ */
+#define BL32_COHERENT_RAM_BASE (uintptr_t)(&__COHERENT_RAM_START__)
+#define BL32_COHERENT_RAM_LIMIT (uintptr_t)(&__COHERENT_RAM_END__)
+#endif
+
+
+static entry_point_info_t bl33_image_ep_info;
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak sp_min_early_platform_setup
+#pragma weak sp_min_platform_setup
+#pragma weak sp_min_plat_arch_setup
+
+#ifndef RESET_TO_SP_MIN
+#error (" RESET_TO_SP_MIN flag is expected to be set.")
+#endif
+
+
+/*******************************************************************************
+ * Return a pointer to the 'entry_point_info' structure of the next image for the
+ * security state specified. BL33 corresponds to the non-secure image type
+ * while BL32 corresponds to the secure image type. A NULL pointer is returned
+ * if the image does not exist.
+ ******************************************************************************/
+entry_point_info_t *sp_min_plat_get_bl33_ep_info(void)
+{
+	entry_point_info_t *next_image_info;
+
+	next_image_info = &bl33_image_ep_info;
+
+	/*
+	 * None of the images on the ARM development platforms can have 0x0
+	 * as the entrypoint
+	 */
+	if (next_image_info->pc)
+		return next_image_info;
+	else
+		return NULL;
+}
+
+/*******************************************************************************
+ * Perform early platform setup. We expect SP_MIN is the first boot loader
+ * image and RESET_TO_SP_MIN build option to be set.
+ ******************************************************************************/
+void arm_sp_min_early_platform_setup(void)
+{
+	/* Initialize the console to provide early debug support */
+	console_init(PLAT_ARM_BOOT_UART_BASE, PLAT_ARM_BOOT_UART_CLK_IN_HZ,
+				ARM_CONSOLE_BAUDRATE);
+
+	/* Populate entry point information for BL33 */
+	SET_PARAM_HEAD(&bl33_image_ep_info,
+				PARAM_EP,
+				VERSION_1,
+				0);
+	/*
+	 * Tell SP_MIN where the non-trusted software image
+	 * is located and the entry state information
+	 */
+#ifdef PRELOADED_BL33_BASE
+	bl33_image_ep_info.pc = PRELOADED_BL33_BASE;
+#else
+	bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
+#endif
+	bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry();
+	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+}
+
+void sp_min_early_platform_setup(void)
+{
+	arm_sp_min_early_platform_setup();
+
+	/*
+	 * Initialize Interconnect for this cluster during cold boot.
+	 * No need for locks as no other CPU is active.
+	 */
+	plat_arm_interconnect_init();
+
+	/*
+	 * Enable Interconnect coherency for the primary CPU's cluster.
+	 * Earlier bootloader stages might already do this (e.g. Trusted
+	 * Firmware's BL1 does it) but we can't assume so. There is no harm in
+	 * executing this code twice anyway.
+	 * Platform specific PSCI code will enable coherency for other
+	 * clusters.
+	 */
+	plat_arm_interconnect_enter_coherency();
+}
+
+/*******************************************************************************
+ * Perform platform specific setup for SP_MIN
+ ******************************************************************************/
+void sp_min_platform_setup(void)
+{
+	/* Initialize the GIC driver, cpu and distributor interfaces */
+	plat_arm_gic_driver_init();
+	plat_arm_gic_init();
+
+	/*
+	 * Do initial security configuration to allow DRAM/device access
+	 * (if earlier BL has not already done so).
+	 * TODO: If RESET_TO_SP_MIN is not set, the security setup needs
+	 * to be skipped.
+	 */
+	plat_arm_security_setup();
+
+	/* Enable and initialize the System level generic timer */
+	mmio_write_32(ARM_SYS_CNTCTL_BASE + CNTCR_OFF,
+			CNTCR_FCREQ(0) | CNTCR_EN);
+
+	/* Allow access to the System counter timer module */
+	arm_configure_sys_timer();
+
+	/* Initialize power controller before setting up topology */
+	plat_arm_pwrc_setup();
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this only initializes the MMU
+ ******************************************************************************/
+void sp_min_plat_arch_setup(void)
+{
+
+	arm_setup_page_tables(BL32_BASE,
+			      (BL32_END - BL32_BASE),
+			      BL_CODE_BASE,
+			      BL_CODE_LIMIT,
+			      BL_RO_DATA_BASE,
+			      BL_RO_DATA_LIMIT
+#if USE_COHERENT_MEM
+			      , BL32_COHERENT_RAM_BASE,
+			      BL32_COHERENT_RAM_LIMIT
+#endif
+			      );
+
+	enable_mmu_secure(0);
+}
diff --git a/plat/common/aarch32/plat_common.c b/plat/common/aarch32/plat_common.c
new file mode 100644
index 0000000..a5b9535
--- /dev/null
+++ b/plat/common/aarch32/plat_common.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform.h>
+#include <xlat_tables.h>
+
+/*
+ * The following platform setup functions are weakly defined. They
+ * provide typical implementations that may be re-used by multiple
+ * platforms but may also be overridden by a platform if required.
+ */
+#pragma weak bl32_plat_enable_mmu
+
+void bl32_plat_enable_mmu(uint32_t flags)
+{
+	enable_mmu_secure(flags);
+}
diff --git a/plat/common/aarch32/platform_helpers.S b/plat/common/aarch32/platform_helpers.S
new file mode 100644
index 0000000..481dd68
--- /dev/null
+++ b/plat/common/aarch32/platform_helpers.S
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+	.weak	plat_my_core_pos
+	.weak	plat_reset_handler
+	.weak	platform_mem_init
+	.weak	plat_panic_handler
+
+	/* -----------------------------------------------------
+	 *  int plat_my_core_pos(void);
+	 *  With this function: CorePos	= (ClusterId * 4) +
+	 *				  CoreId
+	 * -----------------------------------------------------
+	 */
+func plat_my_core_pos
+	ldcopr	r0, MPIDR
+	and	r1, r0, #MPIDR_CPU_MASK
+	and	r0, r0, #MPIDR_CLUSTER_MASK
+	add	r0, r1, r0, LSR #6
+	bx	lr
+endfunc	plat_my_core_pos
+
+	/* -----------------------------------------------------
+	 * Placeholder function which should be redefined by
+	 * each platform.
+	 * -----------------------------------------------------
+	 */
+func plat_reset_handler
+	bx	lr
+endfunc plat_reset_handler
+
+	/* ---------------------------------------------------------------------
+	 * Placeholder function which should be redefined by
+	 * each platform.
+	 * ---------------------------------------------------------------------
+	 */
+func platform_mem_init
+	bx	lr
+endfunc platform_mem_init
+
+	/* -----------------------------------------------------
+	 * void plat_panic_handler(void) __dead2;
+	 * Endless loop by default.
+	 * -----------------------------------------------------
+	 */
+func plat_panic_handler
+	b	plat_panic_handler
+endfunc plat_panic_handler
diff --git a/plat/common/aarch32/platform_mp_stack.S b/plat/common/aarch32/platform_mp_stack.S
new file mode 100644
index 0000000..a015436
--- /dev/null
+++ b/plat/common/aarch32/platform_mp_stack.S
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+	.globl	plat_get_my_stack
+	.globl	plat_set_my_stack
+
+	/* -----------------------------------------------------
+	 * uintptr_t plat_get_my_stack (u_register_t mpidr)
+	 *
+	 * For a given CPU, this function returns the stack
+	 * pointer for a stack allocated in device memory.
+	 * -----------------------------------------------------
+	 */
+func plat_get_my_stack
+	mov	r3, lr
+	get_my_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+	bx	r3
+endfunc	plat_get_my_stack
+
+	/* -----------------------------------------------------
+	 * void plat_set_my_stack ()
+	 *
+	 * For the current CPU, this function sets the stack
+	 * pointer to a stack allocated in normal memory.
+	 * -----------------------------------------------------
+	 */
+func plat_set_my_stack
+	mov	r3, lr
+	get_my_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+	mov	sp, r0
+	bx	r3
+endfunc plat_set_my_stack
+
+	/* -----------------------------------------------------
+	 * Per-cpu stacks in normal memory. Each cpu gets a
+	 * stack of PLATFORM_STACK_SIZE bytes.
+	 * -----------------------------------------------------
+	 */
+declare_stack platform_normal_stacks, tzfw_normal_stacks, \
+		PLATFORM_STACK_SIZE, PLATFORM_CORE_COUNT
diff --git a/plat/common/plat_gicv3.c b/plat/common/plat_gicv3.c
index 249caf8..c961d62 100644
--- a/plat/common/plat_gicv3.c
+++ b/plat/common/plat_gicv3.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -186,6 +186,11 @@
 #pragma weak plat_ic_acknowledge_interrupt
 #pragma weak plat_ic_end_of_interrupt
 
+/* In AArch32, the secure group1 interrupts are targeted to Secure PL1 */
+#ifdef AARCH32
+#define IS_IN_EL1()	IS_IN_SECURE()
+#endif
+
 /*
  * This function returns the highest priority pending interrupt at
  * the Interrupt controller
diff --git a/plat/compat/plat_compat.mk b/plat/compat/plat_compat.mk
index d9d50f6..a1cdd80 100644
--- a/plat/compat/plat_compat.mk
+++ b/plat/compat/plat_compat.mk
@@ -33,6 +33,9 @@
 				 PSCI_EXTENDED_STATE_ID is not set")
 endif
 
+ifneq (${ARCH}, aarch64)
+  $(error "PSCI Compatibility mode is only supported for AArch64 platforms")
+endif
 
 PLAT_BL_COMMON_SOURCES	+=	plat/compat/aarch64/plat_helpers_compat.S
 
diff --git a/plat/mediatek/mt8173/platform.mk b/plat/mediatek/mt8173/platform.mk
index 8f48230..c815110 100644
--- a/plat/mediatek/mt8173/platform.mk
+++ b/plat/mediatek/mt8173/platform.mk
@@ -50,7 +50,7 @@
 				drivers/arm/gic/arm_gic.c			\
 				drivers/arm/gic/gic_v2.c			\
 				drivers/arm/gic/gic_v3.c			\
-				drivers/console/console.S			\
+				drivers/console/aarch64/console.S		\
 				drivers/delay_timer/delay_timer.c		\
 				drivers/delay_timer/generic_delay_timer.c	\
 				lib/cpus/aarch64/aem_generic.S			\
diff --git a/plat/nvidia/tegra/common/tegra_common.mk b/plat/nvidia/tegra/common/tegra_common.mk
index 03ca773..3c07032 100644
--- a/plat/nvidia/tegra/common/tegra_common.mk
+++ b/plat/nvidia/tegra/common/tegra_common.mk
@@ -48,9 +48,9 @@
 
 BL31_SOURCES		+=	drivers/arm/gic/gic_v2.c			\
 				drivers/arm/gic/gic_v3.c			\
-				drivers/console/console.S			\
+				drivers/console/aarch64/console.S		\
 				drivers/delay_timer/delay_timer.c		\
-				drivers/ti/uart/16550_console.S			\
+				drivers/ti/uart/aarch64/16550_console.S		\
 				plat/common/aarch64/platform_mp_stack.S		\
 				plat/common/plat_psci_common.c			\
 				${COMMON_DIR}/aarch64/tegra_helpers.S		\
diff --git a/plat/qemu/platform.mk b/plat/qemu/platform.mk
index 9542198..aa08bd3 100644
--- a/plat/qemu/platform.mk
+++ b/plat/qemu/platform.mk
@@ -37,7 +37,7 @@
 
 
 PLAT_BL_COMMON_SOURCES	:=	plat/qemu/qemu_common.c			\
-				drivers/arm/pl011/pl011_console.S	\
+				drivers/arm/pl011/aarch64/pl011_console.S \
 				lib/xlat_tables/xlat_tables_common.c	\
 				lib/xlat_tables/aarch64/xlat_tables.c
 
diff --git a/plat/rockchip/rk3368/platform.mk b/plat/rockchip/rk3368/platform.mk
index 1dca4c5..73a56e3 100644
--- a/plat/rockchip/rk3368/platform.mk
+++ b/plat/rockchip/rk3368/platform.mk
@@ -55,8 +55,8 @@
 
 BL31_SOURCES		+=	${RK_GIC_SOURCES}				\
 				drivers/arm/cci/cci.c				\
-				drivers/console/console.S			\
-				drivers/ti/uart/16550_console.S			\
+				drivers/console/aarch64/console.S		\
+				drivers/ti/uart/aarch64/16550_console.S		\
 				drivers/delay_timer/delay_timer.c		\
 				drivers/delay_timer/generic_delay_timer.c	\
 				lib/cpus/aarch64/cortex_a53.S			\
diff --git a/plat/rockchip/rk3399/platform.mk b/plat/rockchip/rk3399/platform.mk
index e8d4d41..94992f6 100644
--- a/plat/rockchip/rk3399/platform.mk
+++ b/plat/rockchip/rk3399/platform.mk
@@ -55,8 +55,8 @@
 
 BL31_SOURCES            +=      ${RK_GIC_SOURCES}                               \
                                 drivers/arm/cci/cci.c                           \
-                                drivers/console/console.S                       \
-                                drivers/ti/uart/16550_console.S                 \
+                                drivers/console/aarch64/console.S		\
+                                drivers/ti/uart/aarch64/16550_console.S		\
                                 drivers/delay_timer/delay_timer.c               \
                                 drivers/delay_timer/generic_delay_timer.c	\
 				drivers/gpio/gpio.c				\
diff --git a/plat/xilinx/zynqmp/platform.mk b/plat/xilinx/zynqmp/platform.mk
index fe939c7..9bde5ff 100644
--- a/plat/xilinx/zynqmp/platform.mk
+++ b/plat/xilinx/zynqmp/platform.mk
@@ -67,8 +67,8 @@
 				drivers/arm/gic/common/gic_common.c		\
 				drivers/arm/gic/v2/gicv2_main.c			\
 				drivers/arm/gic/v2/gicv2_helpers.c		\
-				drivers/cadence/uart/cdns_console.S		\
-				drivers/console/console.S			\
+				drivers/cadence/uart/aarch64/cdns_console.S	\
+				drivers/console/aarch64/console.S		\
 				plat/arm/common/aarch64/arm_helpers.S		\
 				plat/arm/common/arm_cci.c			\
 				plat/arm/common/arm_common.c			\