ARMv8 Trusted Firmware release v0.2
diff --git a/plat/common/aarch64/platform_helpers.S b/plat/common/aarch64/platform_helpers.S
new file mode 100644
index 0000000..c574eb9
--- /dev/null
+++ b/plat/common/aarch64/platform_helpers.S
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <platform.h>
+
+
+	.globl	pcpu_dv_mem_stack
+	.weak	platform_get_core_pos
+	.weak	platform_set_stack
+	.weak	platform_is_primary_cpu
+	.weak	platform_set_coherent_stack
+	.weak	platform_check_mpidr
+	.weak	plat_report_exception
+
+	/* -----------------------------------------------------
+	 * 512 bytes of coherent stack for each cpu
+	 * -----------------------------------------------------
+	 */
+#define PCPU_DV_MEM_STACK_SIZE	0x200
+
+
+	.section	.text, "ax"; .align 3
+
+	/* -----------------------------------------------------
+	 * unsigned long long platform_set_coherent_stack
+	 *                                    (unsigned mpidr);
+	 * For a given mpidr, this function returns the stack
+	 * pointer allocated in device memory. This stack can
+	 * be used by C code which enables/disables the SCTLR.M
+	 * SCTLR.C bit e.g. while powering down a cpu
+	 * -----------------------------------------------------
+	 */
+platform_set_coherent_stack:; .type platform_set_coherent_stack, %function
+	mov	x5, x30 // lr
+	bl	platform_get_core_pos
+	add	x0, x0, #1
+	mov	x1, #PCPU_DV_MEM_STACK_SIZE
+	mul	x0, x0, x1
+	ldr	x1, =pcpu_dv_mem_stack
+	add	sp, x1, x0
+	ret	x5
+
+
+	/* -----------------------------------------------------
+	 *  int platform_get_core_pos(int mpidr);
+	 *  With this function: CorePos = (ClusterId * 4) +
+	 *  				  CoreId
+	 * -----------------------------------------------------
+	 */
+platform_get_core_pos:; .type platform_get_core_pos, %function
+	and	x1, x0, #MPIDR_CPU_MASK
+	and	x0, x0, #MPIDR_CLUSTER_MASK
+	add	x0, x1, x0, LSR #6
+	ret
+
+
+	/* -----------------------------------------------------
+	 * void platform_is_primary_cpu (unsigned int mpid);
+	 *
+	 * Given the mpidr say whether this cpu is the primary
+	 * cpu (applicable ony after a cold boot)
+	 * -----------------------------------------------------
+	 */
+platform_is_primary_cpu:; .type platform_is_primary_cpu, %function
+	and	x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
+	cmp	x0, #PRIMARY_CPU
+	cset	x0, eq
+	ret
+
+
+	/* -----------------------------------------------------
+	 * void platform_set_stack (int mpidr)
+	 * -----------------------------------------------------
+	 */
+platform_set_stack:; .type platform_set_stack, %function
+	mov	x9, x30 // lr
+	bl	platform_get_core_pos
+	add	x0, x0, #1
+	mov	x1, #PLATFORM_STACK_SIZE
+	mul	x0, x0, x1
+	ldr	x1, =platform_normal_stacks
+	add	sp, x1, x0
+	ret	x9
+
+	/* -----------------------------------------------------
+	 * Placeholder function which should be redefined by
+	 * each platform.
+	 * -----------------------------------------------------
+	 */
+platform_check_mpidr:; .type platform_check_mpidr, %function
+	mov	x0, xzr
+	ret
+
+	/* -----------------------------------------------------
+	 * Placeholder function which should be redefined by
+	 * each platform.
+	 * -----------------------------------------------------
+	 */
+plat_report_exception:
+	ret
+
+	/* -----------------------------------------------------
+	 * Per-cpu stacks in device memory.
+	 * Used for C code just before power down or right after
+	 * power up when the MMU or caches need to be turned on
+	 * or off. Each cpu gets a stack of 512 bytes.
+	 * -----------------------------------------------------
+	 */
+	.section	tzfw_coherent_mem, "aw", %nobits; .align 6
+
+pcpu_dv_mem_stack:
+	/* Zero fill */
+	.space (PLATFORM_CORE_COUNT * PCPU_DV_MEM_STACK_SIZE), 0
diff --git a/plat/fvp/aarch64/bl1_plat_helpers.S b/plat/fvp/aarch64/bl1_plat_helpers.S
new file mode 100644
index 0000000..d72dc39
--- /dev/null
+++ b/plat/fvp/aarch64/bl1_plat_helpers.S
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <platform.h>
+#include <fvp_pwrc.h>
+#include <gic.h>
+
+	.globl	platform_get_entrypoint
+	.globl	platform_cold_boot_init
+	.globl	plat_secondary_cold_boot_setup
+
+
+	.section	platform_code, "ax"; .align 3
+
+
+	.macro	platform_choose_gicmmap  param1, param2, x_tmp, w_tmp, res
+	ldr	\x_tmp, =VE_SYSREGS_BASE + V2M_SYS_ID
+	ldr	\w_tmp, [\x_tmp]
+	ubfx \w_tmp, \w_tmp, #SYS_ID_BLD_SHIFT, #SYS_ID_BLD_LENGTH
+	cmp	\w_tmp, #BLD_GIC_VE_MMAP
+	csel	\res, \param1, \param2, eq
+	.endm
+
+	/* -----------------------------------------------------
+	 * void plat_secondary_cold_boot_setup (void);
+	 *
+	 * This function performs any platform specific actions
+	 * needed for a secondary cpu after a cold reset e.g
+	 * mark the cpu's presence, mechanism to place it in a
+	 * holding pen etc.
+	 * TODO: Should we read the PSYS register to make sure
+	 * that the request has gone through.
+	 * -----------------------------------------------------
+	 */
+plat_secondary_cold_boot_setup:; .type plat_secondary_cold_boot_setup, %function
+	bl	read_mpidr
+	mov	x19, x0
+	bl	platform_get_core_pos
+	mov	x20, x0
+
+	/* ---------------------------------------------
+	 * Mark this cpu as being present. This is a
+	 * SO write. This array will be read using
+	 * normal memory so invalidate any prefetched
+	 * stale copies first.
+	 * ---------------------------------------------
+	 */
+	ldr	x1, =TZDRAM_BASE
+	mov	x0, #AFFMAP_OFF
+	add	x1, x0, x1
+	mov	x2, #PLATFORM_CACHE_LINE_SIZE
+	mul	x2, x2, x20
+	add	x0, x1, x2
+	bl	dcivac
+	str	x19, [x1, x2]
+
+	/* ---------------------------------------------
+	 * Power down this cpu.
+	 * TODO: Do we need to worry about powering the
+	 * cluster down as well here. That will need
+	 * locks which we won't have unless an elf-
+	 * loader zeroes out the zi section.
+	 * ---------------------------------------------
+	 */
+	ldr	x1, =PWRC_BASE
+	str	w19, [x1, #PPOFFR_OFF]
+
+	/* ---------------------------------------------
+	 * Deactivate the gic cpu interface as well
+	 * ---------------------------------------------
+	 */
+	ldr	x0, =VE_GICC_BASE
+	ldr	x1, =BASE_GICC_BASE
+	platform_choose_gicmmap	x0, x1, x2, w2, x1
+	mov	w0, #(IRQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP1)
+	orr	w0, w0, #(IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP0)
+	str	w0, [x1, #GICC_CTLR]
+
+	/* ---------------------------------------------
+	 * There is no sane reason to come out of this
+	 * wfi so panic if we do. This cpu will be pow-
+	 * ered on and reset by the cpu_on pm api
+	 * ---------------------------------------------
+	 */
+	dsb	sy
+	wfi
+cb_panic:
+	b	cb_panic
+
+
+	/* -----------------------------------------------------
+	 * void platform_get_entrypoint (unsigned int mpid);
+	 *
+	 * Main job of this routine is to distinguish between
+	 * a cold and warm boot.
+	 * On a cold boot the secondaries first wait for the
+	 * platform to be initialized after which they are
+	 * hotplugged in. The primary proceeds to perform the
+	 * platform initialization.
+	 * On a warm boot, each cpu jumps to the address in its
+	 * mailbox.
+	 *
+	 * TODO: Not a good idea to save lr in a temp reg
+	 * TODO: PSYSR is a common register and should be
+	 * 	accessed using locks. Since its not possible
+	 * 	to use locks immediately after a cold reset
+	 * 	we are relying on the fact that after a cold
+	 * 	reset all cpus will read the same WK field
+	 * -----------------------------------------------------
+	 */
+platform_get_entrypoint:; .type platform_get_entrypoint, %function
+	mov	x9, x30 // lr
+	mov	x2, x0
+	ldr	x1, =PWRC_BASE
+	str	w2, [x1, #PSYSR_OFF]
+	ldr	w2, [x1, #PSYSR_OFF]
+	ubfx	w2, w2, #PSYSR_WK_SHIFT, #PSYSR_WK_MASK
+	cbnz	w2, warm_reset
+	mov	x0, x2
+	b	exit
+warm_reset:
+	/* ---------------------------------------------
+	 * A per-cpu mailbox is maintained in the tru-
+	 * sted DRAM. Its flushed out of the caches
+	 * after every update using normal memory so
+	 * its safe to read it here with SO attributes
+	 * ---------------------------------------------
+	 */
+	ldr	x10, =TZDRAM_BASE + MBOX_OFF
+	bl	platform_get_core_pos
+	lsl	x0, x0, #CACHE_WRITEBACK_SHIFT
+	ldr	x0, [x10, x0]
+	cbz	x0, _panic
+exit:
+	ret	x9
+_panic:	b	_panic
+
+
+	/* -----------------------------------------------------
+	 * void platform_mem_init (void);
+	 *
+	 * Zero out the mailbox registers in the TZDRAM. The
+	 * mmu is turned off right now and only the primary can
+	 * ever execute this code. Secondaries will read the
+	 * mailboxes using SO accesses. In short, BL31 will
+	 * update the mailboxes after mapping the tzdram as
+	 * normal memory. It will flush its copy after update.
+	 * BL1 will always read the mailboxes with the MMU off
+	 * -----------------------------------------------------
+	 */
+platform_mem_init:; .type platform_mem_init, %function
+	ldr	x0, =TZDRAM_BASE + MBOX_OFF
+	stp	xzr, xzr, [x0, #0]
+	stp	xzr, xzr, [x0, #0x10]
+	stp	xzr, xzr, [x0, #0x20]
+	stp	xzr, xzr, [x0, #0x30]
+	ret
+
+
+	/* -----------------------------------------------------
+	 * void platform_cold_boot_init (bl1_main function);
+	 *
+	 * Routine called only by the primary cpu after a cold
+	 * boot to perform early platform initialization
+	 * -----------------------------------------------------
+	 */
+platform_cold_boot_init:; .type platform_cold_boot_init, %function
+	mov	x20, x0
+	bl	platform_mem_init
+	bl	read_mpidr
+	mov	x19, x0
+
+	/* ---------------------------------------------
+	 * Give ourselves a small coherent stack to
+	 * ease the pain of initializing the MMU and
+	 * CCI in assembler
+	 * ---------------------------------------------
+	 */
+	bl	platform_set_coherent_stack
+
+	/* ---------------------------------------------
+	 * Mark this cpu as being present. This is a
+	 * SO write. Invalidate any stale copies out of
+	 * paranoia as there is no one else around.
+	 * ---------------------------------------------
+	 */
+	mov	x0, x19
+	bl	platform_get_core_pos
+	mov	x21, x0
+
+	ldr	x1, =TZDRAM_BASE
+	mov	x0, #AFFMAP_OFF
+	add	x1, x0, x1
+	mov	x2, #PLATFORM_CACHE_LINE_SIZE
+	mul	x2, x2, x21
+	add	x0, x1, x2
+	bl	dcivac
+	str	x19, [x1, x2]
+
+	/* ---------------------------------------------
+	 * Enable CCI-400 for this cluster. No need
+	 * for locks as no other cpu is active at the
+	 * moment
+	 * ---------------------------------------------
+	 */
+	mov	x0, x19
+	bl	cci_enable_coherency
+
+	/* ---------------------------------------------
+	 * Architectural init. can be generic e.g.
+	 * enabling stack alignment and platform spec-
+	 * ific e.g. MMU & page table setup as per the
+	 * platform memory map. Perform the latter here
+	 * and the former in bl1_main.
+	 * ---------------------------------------------
+	 */
+	bl	bl1_early_platform_setup
+	bl	bl1_plat_arch_setup
+
+	/* ---------------------------------------------
+	 * Give ourselves a stack allocated in Normal
+	 * -IS-WBWA memory
+	 * ---------------------------------------------
+	 */
+	mov	x0, x19
+	bl	platform_set_stack
+
+	/* ---------------------------------------------
+	 * Jump to the main function. Returning from it
+	 * is a terminal error.
+	 * ---------------------------------------------
+	 */
+	blr	x20
+
+cb_init_panic:
+	b	cb_init_panic
diff --git a/plat/fvp/aarch64/fvp_common.c b/plat/fvp/aarch64/fvp_common.c
new file mode 100644
index 0000000..762f542
--- /dev/null
+++ b/plat/fvp/aarch64/fvp_common.c
@@ -0,0 +1,600 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <platform.h>
+#include <bl_common.h>
+/* Included only for error codes */
+#include <psci.h>
+
+unsigned char platform_normal_stacks[PLATFORM_STACK_SIZE][PLATFORM_CORE_COUNT]
+__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE),
+		section("tzfw_normal_stacks")));
+
+/*******************************************************************************
+ * This array holds the characteristics of the differences between the three
+ * FVP platforms (Base, A53_A57 & Foundation). It will be populated during cold
+ * boot at each boot stage by the primary before enabling the MMU (to allow cci
+ * configuration) & used thereafter. Each BL will have its own copy to allow
+ * independent operation.
+ ******************************************************************************/
+static unsigned long platform_config[CONFIG_LIMIT];
+
+/*******************************************************************************
+ * TODO: Check page table alignment to avoid space wastage
+ ******************************************************************************/
+
+/*******************************************************************************
+ * Level 1 translation tables need 4 entries for the 4GB address space accessib-
+ * le by the secure firmware. Input address space will be restricted using the
+ * T0SZ settings in the TCR.
+ ******************************************************************************/
+static unsigned long l1_xlation_table[ADDR_SPACE_SIZE >> 30]
+__attribute__ ((aligned((ADDR_SPACE_SIZE >> 30) << 3)));
+
+/*******************************************************************************
+ * Level 2 translation tables describe the first & second gb of the address
+ * space needed to address secure peripherals e.g. trusted ROM and RAM.
+ ******************************************************************************/
+static unsigned long l2_xlation_table[NUM_L2_PAGETABLES][NUM_2MB_IN_GB]
+__attribute__ ((aligned(NUM_2MB_IN_GB << 3)));
+
+/*******************************************************************************
+ * Level 3 translation tables (2 sets) describe the trusted & non-trusted RAM
+ * regions at a granularity of 4K.
+ ******************************************************************************/
+static unsigned long l3_xlation_table[NUM_L3_PAGETABLES][NUM_4K_IN_2MB]
+__attribute__ ((aligned(NUM_4K_IN_2MB << 3)));
+
+/*******************************************************************************
+ * Helper to create a level 1/2 table descriptor which points to a level 2/3
+ * table.
+ ******************************************************************************/
+static unsigned long create_table_desc(unsigned long *next_table_ptr)
+{
+	unsigned long desc = (unsigned long) next_table_ptr;
+
+	/* Clear the last 12 bits */
+	desc >>= FOUR_KB_SHIFT;
+	desc <<= FOUR_KB_SHIFT;
+
+	desc |= TABLE_DESC;
+
+	return desc;
+}
+
+/*******************************************************************************
+ * Helper to create a level 1/2/3 block descriptor which maps the va to addr
+ ******************************************************************************/
+static unsigned long create_block_desc(unsigned long desc,
+				       unsigned long addr,
+				       unsigned int level)
+{
+	switch (level) {
+	case LEVEL1:
+		desc |= (addr << FIRST_LEVEL_DESC_N) | BLOCK_DESC;
+		break;
+	case LEVEL2:
+		desc |= (addr << SECOND_LEVEL_DESC_N) | BLOCK_DESC;
+		break;
+	case LEVEL3:
+		desc |= (addr << THIRD_LEVEL_DESC_N) | TABLE_DESC;
+		break;
+	default:
+		assert(0);
+	}
+
+	return desc;
+}
+
+/*******************************************************************************
+ * Helper to create a level 1/2/3 block descriptor which maps the va to output_
+ * addr with Device nGnRE attributes.
+ ******************************************************************************/
+static unsigned long create_device_block(unsigned long output_addr,
+					 unsigned int level,
+					 unsigned int ns)
+{
+	unsigned long upper_attrs, lower_attrs, desc;
+
+	lower_attrs = LOWER_ATTRS(ACCESS_FLAG | OSH | AP_RW);
+	lower_attrs |= LOWER_ATTRS(ns | ATTR_DEVICE_INDEX);
+	upper_attrs = UPPER_ATTRS(XN);
+	desc = upper_attrs | lower_attrs;
+
+	return create_block_desc(desc, output_addr, level);
+}
+
+/*******************************************************************************
+ * Helper to create a level 1/2/3 block descriptor which maps the va to output_
+ * addr with inner-shareable normal wbwa read-only memory attributes.
+ ******************************************************************************/
+static unsigned long create_romem_block(unsigned long output_addr,
+					unsigned int level,
+					unsigned int ns)
+{
+	unsigned long upper_attrs, lower_attrs, desc;
+
+	lower_attrs = LOWER_ATTRS(ACCESS_FLAG | ISH | AP_RO);
+	lower_attrs |= LOWER_ATTRS(ns | ATTR_IWBWA_OWBWA_NTR_INDEX);
+	upper_attrs = UPPER_ATTRS(0ull);
+	desc = upper_attrs | lower_attrs;
+
+	return create_block_desc(desc, output_addr, level);
+}
+
+/*******************************************************************************
+ * Helper to create a level 1/2/3 block descriptor which maps the va to output_
+ * addr with inner-shareable normal wbwa read-write memory attributes.
+ ******************************************************************************/
+static unsigned long create_rwmem_block(unsigned long output_addr,
+					unsigned int level,
+					unsigned int ns)
+{
+	unsigned long upper_attrs, lower_attrs, desc;
+
+	lower_attrs = LOWER_ATTRS(ACCESS_FLAG | ISH | AP_RW);
+	lower_attrs |= LOWER_ATTRS(ns | ATTR_IWBWA_OWBWA_NTR_INDEX);
+	upper_attrs = UPPER_ATTRS(XN);
+	desc = upper_attrs | lower_attrs;
+
+	return create_block_desc(desc, output_addr, level);
+}
+
+/*******************************************************************************
+ * Create page tables as per the platform memory map. Certain aspects of page
+ * talble creating have been abstracted in the above routines. This can be impr-
+ * oved further.
+ * TODO: Move the page table setup helpers into the arch or lib directory
+ *******************************************************************************/
+static unsigned long fill_xlation_tables(meminfo *tzram_layout,
+					 unsigned long ro_start,
+					 unsigned long ro_limit,
+					 unsigned long coh_start,
+					 unsigned long coh_limit)
+{
+	unsigned long l2_desc, l3_desc;
+	unsigned long *xt_addr = 0, *pt_addr, off = 0;
+	unsigned long trom_start_index, trom_end_index;
+	unsigned long tzram_start_index, tzram_end_index;
+	unsigned long flash0_start_index, flash0_end_index;
+	unsigned long flash1_start_index, flash1_end_index;
+	unsigned long vram_start_index, vram_end_index;
+	unsigned long nsram_start_index, nsram_end_index;
+	unsigned long tdram_start_index, tdram_end_index;
+	unsigned long dram_start_index, dram_end_index;
+	unsigned long dev0_start_index, dev0_end_index;
+	unsigned long dev1_start_index, dev1_end_index;
+	unsigned int idx;
+
+
+	/*****************************************************************
+	 * LEVEL1 PAGETABLE SETUP
+	 *
+	 * Find the start and end indices of the memory peripherals in the
+	 * first level pagetables. These are the main areas we care about.
+	 * Also bump the end index by one if its equal to the start to
+	 * allow for regions which lie completely in a GB.
+	 *****************************************************************/
+	trom_start_index = ONE_GB_INDEX(TZROM_BASE);
+	dev0_start_index = ONE_GB_INDEX(TZRNG_BASE);
+	dram_start_index = ONE_GB_INDEX(DRAM_BASE);
+	dram_end_index = ONE_GB_INDEX(DRAM_BASE + DRAM_SIZE);
+
+	if (dram_end_index == dram_start_index)
+		dram_end_index++;
+
+	/*
+	 * Fill up the level1 translation table first
+	 */
+	for (idx = 0; idx < (ADDR_SPACE_SIZE >> 30); idx++) {
+
+		/*
+		 * Fill up the entry for the TZROM. This will cover
+		 * everything in the first GB.
+		 */
+		if (idx == trom_start_index) {
+			xt_addr = &l2_xlation_table[GB1_L2_PAGETABLE][0];
+			l1_xlation_table[idx] = create_table_desc(xt_addr);
+			continue;
+		}
+
+		/*
+		 * Mark the second gb as device
+		 */
+		if (idx == dev0_start_index) {
+			xt_addr = &l2_xlation_table[GB2_L2_PAGETABLE][0];
+			l1_xlation_table[idx] = create_table_desc(xt_addr);
+			continue;
+		}
+
+		/*
+		 * Fill up the block entry for the DRAM with Normal
+		 * inner-WBWA outer-WBWA non-transient attributes.
+		 * This will cover 2-4GB. Note that the acesses are
+		 * marked as non-secure.
+		 */
+		if ((idx >= dram_start_index) && (idx < dram_end_index)) {
+			l1_xlation_table[idx] = create_rwmem_block(idx, LEVEL1,
+								   NS);
+			continue;
+		}
+
+		assert(0);
+	}
+
+
+	/*****************************************************************
+	 * LEVEL2 PAGETABLE SETUP
+	 *
+	 * Find the start and end indices of the memory & peripherals in the
+	 * second level pagetables.
+	 ******************************************************************/
+
+	/* Initializations for the 1st GB */
+	trom_start_index = TWO_MB_INDEX(TZROM_BASE);
+	trom_end_index = TWO_MB_INDEX(TZROM_BASE + TZROM_SIZE);
+	if (trom_end_index == trom_start_index)
+		trom_end_index++;
+
+	tdram_start_index = TWO_MB_INDEX(TZDRAM_BASE);
+	tdram_end_index = TWO_MB_INDEX(TZDRAM_BASE + TZDRAM_SIZE);
+	if (tdram_end_index == tdram_start_index)
+		tdram_end_index++;
+
+	flash0_start_index = TWO_MB_INDEX(FLASH0_BASE);
+	flash0_end_index = TWO_MB_INDEX(FLASH0_BASE + TZROM_SIZE);
+	if (flash0_end_index == flash0_start_index)
+		flash0_end_index++;
+
+	flash1_start_index = TWO_MB_INDEX(FLASH1_BASE);
+	flash1_end_index = TWO_MB_INDEX(FLASH1_BASE + FLASH1_SIZE);
+	if (flash1_end_index == flash1_start_index)
+		flash1_end_index++;
+
+	vram_start_index = TWO_MB_INDEX(VRAM_BASE);
+	vram_end_index = TWO_MB_INDEX(VRAM_BASE + VRAM_SIZE);
+	if (vram_end_index == vram_start_index)
+		vram_end_index++;
+
+	dev0_start_index = TWO_MB_INDEX(DEVICE0_BASE);
+	dev0_end_index = TWO_MB_INDEX(DEVICE0_BASE + DEVICE0_SIZE);
+	if (dev0_end_index == dev0_start_index)
+		dev0_end_index++;
+
+	dev1_start_index = TWO_MB_INDEX(DEVICE1_BASE);
+	dev1_end_index = TWO_MB_INDEX(DEVICE1_BASE + DEVICE1_SIZE);
+	if (dev1_end_index == dev1_start_index)
+		dev1_end_index++;
+
+	/* Since the size is < 2M this is a single index */
+	tzram_start_index = TWO_MB_INDEX(tzram_layout->total_base);
+	nsram_start_index = TWO_MB_INDEX(NSRAM_BASE);
+
+	/*
+	 * Fill up the level2 translation table for the first GB next
+	 */
+	for (idx = 0; idx < NUM_2MB_IN_GB; idx++) {
+
+		l2_desc = INVALID_DESC;
+		xt_addr = &l2_xlation_table[GB1_L2_PAGETABLE][idx];
+
+		/* Block entries for 64M of trusted Boot ROM */
+		if ((idx >= trom_start_index) && (idx < trom_end_index))
+			l2_desc = create_romem_block(idx, LEVEL2, 0);
+
+		/* Single L3 page table entry for 256K of TZRAM */
+		if (idx == tzram_start_index) {
+			pt_addr = &l3_xlation_table[TZRAM_PAGETABLE][0];
+			l2_desc = create_table_desc(pt_addr);
+		}
+
+		/* Block entries for 32M of trusted DRAM */
+		if ((idx >= tdram_start_index) && (idx <= tdram_end_index))
+			l2_desc = create_rwmem_block(idx, LEVEL2, 0);
+
+		/* Block entries for 64M of aliased trusted Boot ROM */
+		if ((idx >= flash0_start_index) && (idx < flash0_end_index))
+			l2_desc = create_romem_block(idx, LEVEL2, 0);
+
+		/* Block entries for 64M of flash1 */
+		if ((idx >= flash1_start_index) && (idx < flash1_end_index))
+			l2_desc = create_romem_block(idx, LEVEL2, 0);
+
+		/* Block entries for 32M of VRAM */
+		if ((idx >= vram_start_index) && (idx < vram_end_index))
+			l2_desc = create_rwmem_block(idx, LEVEL2, 0);
+
+		/* Block entries for all the devices in the first gb */
+		if ((idx >= dev0_start_index) && (idx < dev0_end_index))
+			l2_desc = create_device_block(idx, LEVEL2, 0);
+
+		/* Block entries for all the devices in the first gb */
+		if ((idx >= dev1_start_index) && (idx < dev1_end_index))
+			l2_desc = create_device_block(idx, LEVEL2, 0);
+
+		/* Single L3 page table entry for 64K of NSRAM */
+		if (idx == nsram_start_index) {
+			pt_addr = &l3_xlation_table[NSRAM_PAGETABLE][0];
+			l2_desc = create_table_desc(pt_addr);
+		}
+
+		*xt_addr = l2_desc;
+	}
+
+
+	/*
+	 * Initializations for the 2nd GB. Mark everything as device
+	 * for the time being as the memory map is not final. Each
+	 * index will need to be offset'ed to allow absolute values
+	 */
+	off = NUM_2MB_IN_GB;
+	for (idx = off; idx < (NUM_2MB_IN_GB + off); idx++) {
+		l2_desc = create_device_block(idx, LEVEL2, 0);
+		xt_addr = &l2_xlation_table[GB2_L2_PAGETABLE][idx - off];
+		*xt_addr = l2_desc;
+	}
+
+
+	/*****************************************************************
+	 * LEVEL3 PAGETABLE SETUP
+	 * The following setup assumes knowledge of the scatter file. This
+	 * should be reasonable as this is platform specific code.
+	 *****************************************************************/
+
+	/* Fill up the level3 pagetable for the trusted SRAM. */
+	tzram_start_index = FOUR_KB_INDEX(tzram_layout->total_base);
+	tzram_end_index = FOUR_KB_INDEX(tzram_layout->total_base +
+					tzram_layout->total_size);
+	if (tzram_end_index == tzram_start_index)
+		tzram_end_index++;
+
+	/*
+	 * Reusing trom* to mark RO memory. BLX_STACKS follows BLX_RO in the
+	 * scatter file. Using BLX_RO$$Limit does not work as it might not
+	 * cross the page boundary thus leading to truncation of valid RO
+	 * memory
+	 */
+	trom_start_index = FOUR_KB_INDEX(ro_start);
+	trom_end_index = FOUR_KB_INDEX(ro_limit);
+	if (trom_end_index == trom_start_index)
+		trom_end_index++;
+
+	/*
+	 * Reusing dev* to mark coherent device memory. $$Limit works here
+	 * 'cause the coherent memory section is known to be 4k in size
+	 */
+	dev0_start_index = FOUR_KB_INDEX(coh_start);
+	dev0_end_index = FOUR_KB_INDEX(coh_limit);
+	if (dev0_end_index == dev0_start_index)
+		dev0_end_index++;
+
+
+	/* Each index will need to be offset'ed to allow absolute values */
+	off = FOUR_KB_INDEX(TZRAM_BASE);
+	for (idx = off; idx < (NUM_4K_IN_2MB + off); idx++) {
+
+		l3_desc = INVALID_DESC;
+		xt_addr = &l3_xlation_table[TZRAM_PAGETABLE][idx - off];
+
+		if (idx >= tzram_start_index && idx < tzram_end_index)
+			l3_desc = create_rwmem_block(idx, LEVEL3, 0);
+
+		if (idx >= trom_start_index && idx < trom_end_index)
+			l3_desc = create_romem_block(idx, LEVEL3, 0);
+
+		if (idx >= dev0_start_index && idx < dev0_end_index)
+			l3_desc = create_device_block(idx, LEVEL3, 0);
+
+		*xt_addr = l3_desc;
+	}
+
+	/* Fill up the level3 pagetable for the non-trusted SRAM. */
+	nsram_start_index = FOUR_KB_INDEX(NSRAM_BASE);
+	nsram_end_index = FOUR_KB_INDEX(NSRAM_BASE + NSRAM_SIZE);
+	if (nsram_end_index == nsram_start_index)
+		nsram_end_index++;
+
+	 /* Each index will need to be offset'ed to allow absolute values */
+	off = FOUR_KB_INDEX(NSRAM_BASE);
+	for (idx = off; idx < (NUM_4K_IN_2MB + off); idx++) {
+
+		l3_desc = INVALID_DESC;
+		xt_addr = &l3_xlation_table[NSRAM_PAGETABLE][idx - off];
+
+		if (idx >= nsram_start_index && idx < nsram_end_index)
+			l3_desc = create_rwmem_block(idx, LEVEL3, NS);
+
+		*xt_addr = l3_desc;
+	}
+
+	return (unsigned long) l1_xlation_table;
+}
+
+/*******************************************************************************
+ * Enable the MMU assuming that the pagetables have already been created
+ *******************************************************************************/
+void enable_mmu()
+{
+	unsigned long mair, tcr, ttbr, sctlr;
+	unsigned long current_el = read_current_el();
+
+	/* Set the attributes in the right indices of the MAIR */
+	mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
+	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
+				  ATTR_IWBWA_OWBWA_NTR_INDEX);
+	write_mair(mair);
+
+	/*
+	 * Set TCR bits as well. Inner & outer WBWA & shareable + T0SZ = 32
+	 */
+	tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA |
+		  TCR_RGN_INNER_WBA | TCR_T0SZ_4GB;
+	if (GET_EL(current_el) == MODE_EL3) {
+		tcr |= TCR_EL3_RES1;
+		/* Invalidate all TLBs */
+		tlbialle3();
+	} else {
+		/* Invalidate EL1 TLBs */
+		tlbivmalle1();
+	}
+
+	write_tcr(tcr);
+
+	/* Set TTBR bits as well */
+	assert(((unsigned long)l1_xlation_table & (sizeof(l1_xlation_table) - 1)) == 0);
+	ttbr = (unsigned long) l1_xlation_table;
+	write_ttbr0(ttbr);
+
+	sctlr = read_sctlr();
+	sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT;
+	sctlr |= SCTLR_A_BIT | SCTLR_C_BIT;
+	write_sctlr(sctlr);
+
+	return;
+}
+
+void disable_mmu(void)
+{
+	/* Zero out the MMU related registers */
+	write_mair(0);
+	write_tcr(0);
+	write_ttbr0(0);
+	write_sctlr(0);
+
+	/* Invalidate TLBs of the CurrentEL */
+	tlbiall();
+
+	/* Flush the caches */
+	dcsw_op_all(DCCISW);
+
+	return;
+}
+
+/*******************************************************************************
+ * Setup the pagetables as per the platform memory map & initialize the mmu
+ *******************************************************************************/
+void configure_mmu(meminfo *mem_layout,
+		   unsigned long ro_start,
+		   unsigned long ro_limit,
+		   unsigned long coh_start,
+		   unsigned long coh_limit)
+{
+	fill_xlation_tables(mem_layout,
+			    ro_start,
+			    ro_limit,
+			    coh_start,
+			    coh_limit);
+	enable_mmu();
+	return;
+}
+
+/* Simple routine which returns a configuration variable value */
+unsigned long platform_get_cfgvar(unsigned int var_id)
+{
+	assert(var_id < CONFIG_LIMIT);
+	return platform_config[var_id];
+}
+
+/*******************************************************************************
+ * A single boot loader stack is expected to work on both the Foundation FVP
+ * models and the two flavours of the Base FVP models (AEMv8 & Cortex). The
+ * SYS_ID register provides a mechanism for detecting the differences between
+ * these platforms. This information is stored in a per-BL array to allow the
+ * code to take the correct path.Per BL platform configuration.
+ ******************************************************************************/
+int platform_config_setup(void)
+{
+	unsigned int rev, hbi, bld, arch, sys_id, midr_pn;
+
+	sys_id = mmio_read_32(VE_SYSREGS_BASE + V2M_SYS_ID);
+	rev = (sys_id >> SYS_ID_REV_SHIFT) & SYS_ID_REV_MASK;
+	hbi = (sys_id >> SYS_ID_HBI_SHIFT) & SYS_ID_HBI_MASK;
+	bld = (sys_id >> SYS_ID_BLD_SHIFT) & SYS_ID_BLD_MASK;
+	arch = (sys_id >> SYS_ID_ARCH_SHIFT) & SYS_ID_ARCH_MASK;
+
+	assert(rev == REV_FVP);
+	assert(arch == ARCH_MODEL);
+
+	/*
+	 * The build field in the SYS_ID tells which variant of the GIC
+	 * memory is implemented by the model.
+	 */
+	switch (bld) {
+	case BLD_GIC_VE_MMAP:
+		platform_config[CONFIG_GICD_ADDR] = VE_GICD_BASE;
+		platform_config[CONFIG_GICC_ADDR] = VE_GICC_BASE;
+		platform_config[CONFIG_GICH_ADDR] = VE_GICH_BASE;
+		platform_config[CONFIG_GICV_ADDR] = VE_GICV_BASE;
+		break;
+	case BLD_GIC_A53A57_MMAP:
+		platform_config[CONFIG_GICD_ADDR] = BASE_GICD_BASE;
+		platform_config[CONFIG_GICC_ADDR] = BASE_GICC_BASE;
+		platform_config[CONFIG_GICH_ADDR] = BASE_GICH_BASE;
+		platform_config[CONFIG_GICV_ADDR] = BASE_GICV_BASE;
+		break;
+	default:
+		assert(0);
+	}
+
+	/*
+	 * The hbi field in the SYS_ID is 0x020 for the Base FVP & 0x010
+	 * for the Foundation FVP.
+	 */
+	switch (hbi) {
+	case HBI_FOUNDATION:
+		platform_config[CONFIG_MAX_AFF0] = 4;
+		platform_config[CONFIG_MAX_AFF1] = 1;
+		platform_config[CONFIG_CPU_SETUP] = 0;
+		platform_config[CONFIG_BASE_MMAP] = 0;
+		break;
+	case HBI_FVP_BASE:
+		midr_pn = (read_midr() >> MIDR_PN_SHIFT) & MIDR_PN_MASK;
+		if ((midr_pn == MIDR_PN_A57) || (midr_pn == MIDR_PN_A53))
+			platform_config[CONFIG_CPU_SETUP] = 1;
+		else
+			platform_config[CONFIG_CPU_SETUP] = 0;
+
+		platform_config[CONFIG_MAX_AFF0] = 4;
+		platform_config[CONFIG_MAX_AFF1] = 2;
+		platform_config[CONFIG_BASE_MMAP] = 1;
+		break;
+	default:
+		assert(0);
+	}
+
+	return 0;
+}
+
+unsigned long plat_get_ns_image_entrypoint(void) {
+	return NS_IMAGE_OFFSET;
+}
diff --git a/plat/fvp/aarch64/fvp_helpers.S b/plat/fvp/aarch64/fvp_helpers.S
new file mode 100644
index 0000000..5cb0660
--- /dev/null
+++ b/plat/fvp/aarch64/fvp_helpers.S
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <platform.h>
+
+	.globl	plat_report_exception
+
+	.section platform_code, "ax"
+
+	/* ---------------------------------------------
+	 * void plat_report_exception(unsigned int type)
+	 * Function to report an unhandled exception
+	 * with platform-specific means.
+	 * On FVP platform, it updates the LEDs
+	 * to indicate where we are
+	 * ---------------------------------------------
+	 */
+plat_report_exception:
+	mrs	x1, CurrentEl
+	lsr	x1, x1, #MODE_EL_SHIFT
+	lsl	x1, x1, #SYS_LED_EL_SHIFT
+	lsl	x0, x0, #SYS_LED_EC_SHIFT
+	mov	x2, #(SECURE << SYS_LED_SS_SHIFT)
+	orr	x0, x0, x2
+	orr	x0, x0, x1
+	mov	x1, #VE_SYSREGS_BASE
+	add	x1, x1, #V2M_SYS_LED
+	str	x0, [x1]
+	ret
diff --git a/plat/fvp/bl1_plat_setup.c b/plat/fvp/bl1_plat_setup.c
new file mode 100644
index 0000000..7131f7a
--- /dev/null
+++ b/plat/fvp/bl1_plat_setup.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <platform.h>
+#include <bl1.h>
+#include <console.h>
+
+/*******************************************************************************
+ * Declarations of linker defined symbols which will help us find the layout
+ * of trusted SRAM
+ ******************************************************************************/
+#if defined (__GNUC__)
+extern unsigned long __FIRMWARE_ROM_START__;
+extern unsigned long __FIRMWARE_ROM_SIZE__;
+extern unsigned long __FIRMWARE_DATA_START__;
+extern unsigned long __FIRMWARE_DATA_SIZE__;
+extern unsigned long __FIRMWARE_BSS_START__;
+extern unsigned long __FIRMWARE_BSS_SIZE__;
+extern unsigned long __DATA_RAM_START__;
+extern unsigned long __DATA_RAM_SIZE__;
+extern unsigned long __BSS_RAM_START__;
+extern unsigned long __BSS_RAM_SIZE__;
+extern unsigned long __FIRMWARE_RAM_STACKS_START__;
+extern unsigned long __FIRMWARE_RAM_STACKS_SIZE__;
+extern unsigned long __FIRMWARE_RAM_PAGETABLES_START__;
+extern unsigned long __FIRMWARE_RAM_PAGETABLES_SIZE__;
+extern unsigned long __FIRMWARE_RAM_COHERENT_START__;
+extern unsigned long __FIRMWARE_RAM_COHERENT_SIZE__;
+
+#define BL1_COHERENT_MEM_BASE	(&__FIRMWARE_RAM_COHERENT_START__)
+#define BL1_COHERENT_MEM_LIMIT \
+	((unsigned long long)&__FIRMWARE_RAM_COHERENT_START__ + \
+	 (unsigned long long)&__FIRMWARE_RAM_COHERENT_SIZE__)
+
+#define BL1_FIRMWARE_RAM_GLOBALS_ZI_BASE \
+	(unsigned long)(&__BSS_RAM_START__)
+#define BL1_FIRMWARE_RAM_GLOBALS_ZI_LENGTH \
+	(unsigned long)(&__FIRMWARE_BSS_SIZE__)
+
+#define BL1_FIRMWARE_RAM_COHERENT_ZI_BASE \
+	(unsigned long)(&__FIRMWARE_RAM_COHERENT_START__)
+#define BL1_FIRMWARE_RAM_COHERENT_ZI_LENGTH\
+	(unsigned long)(&__FIRMWARE_RAM_COHERENT_SIZE__)
+
+#define BL1_NORMAL_RAM_BASE (unsigned long)(&__BSS_RAM_START__)
+#define BL1_NORMAL_RAM_LIMIT \
+	((unsigned long)&__FIRMWARE_RAM_COHERENT_START__ +	\
+	 (unsigned long)&__FIRMWARE_RAM_COHERENT_SIZE__)
+#else
+ #error "Unknown compiler."
+#endif
+
+
+/* Data structure which holds the extents of the trusted SRAM for BL1*/
+static meminfo bl1_tzram_layout = {0};
+
+meminfo bl1_get_sec_mem_layout(void)
+{
+	return bl1_tzram_layout;
+}
+
+/*******************************************************************************
+ * Perform any BL1 specific platform actions.
+ ******************************************************************************/
+void bl1_early_platform_setup(void)
+{
+	unsigned long bl1_normal_ram_base;
+	unsigned long bl1_coherent_ram_limit;
+	unsigned long tzram_limit = TZRAM_BASE + TZRAM_SIZE;
+
+	/*
+	 * Initialize extents of the bl1 sections as per the platform
+	 * defined values.
+	 */
+	bl1_normal_ram_base  = BL1_NORMAL_RAM_BASE;
+	bl1_coherent_ram_limit = BL1_NORMAL_RAM_LIMIT;
+
+	/*
+	 * Calculate how much ram is BL1 using & how much remains free.
+	 * This also includes a rudimentary mechanism to detect whether
+	 * the BL1 data is loaded at the top or bottom of memory.
+	 * TODO: add support for discontigous chunks of free ram if
+	 *       needed. Might need dynamic memory allocation support
+	 *       et al.
+	 *       Also assuming that the section for coherent memory is
+	 *       the last and for globals the first in the scatter file.
+	 */
+	bl1_tzram_layout.total_base = TZRAM_BASE;
+	bl1_tzram_layout.total_size = TZRAM_SIZE;
+
+	if (bl1_coherent_ram_limit == tzram_limit) {
+		bl1_tzram_layout.free_base = TZRAM_BASE;
+		bl1_tzram_layout.free_size = bl1_normal_ram_base - TZRAM_BASE;
+	} else {
+		bl1_tzram_layout.free_base = bl1_coherent_ram_limit;
+		bl1_tzram_layout.free_size =
+			tzram_limit - bl1_coherent_ram_limit;
+	}
+}
+
+/*******************************************************************************
+ * Function which will evaluate how much of the trusted ram has been gobbled
+ * up by BL1 and return the base and size of whats available for loading BL2.
+ * Its called after coherency and the MMU have been turned on.
+ ******************************************************************************/
+void bl1_platform_setup(void)
+{
+	/*
+	 * This should zero out our coherent stacks as well but we don't care
+	 * as they are not being used right now.
+	 */
+	memset((void *) BL1_FIRMWARE_RAM_COHERENT_ZI_BASE, 0,
+	       (size_t) BL1_FIRMWARE_RAM_COHERENT_ZI_LENGTH);
+
+	/* Enable and initialize the System level generic timer */
+	mmio_write_32(SYS_CNTCTL_BASE + CNTCR_OFF, CNTCR_EN);
+
+	/* Initialize the console */
+	console_init();
+
+	return;
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architecture setup here. At the
+ * moment this is only intializes the mmu in a quick and dirty way. Later arch-
+ * itectural setup (bl1_arch_setup()) does not do anything platform specific.
+ ******************************************************************************/
+void bl1_plat_arch_setup(void)
+{
+	configure_mmu(&bl1_tzram_layout,
+		TZROM_BASE,			/* Read_only region start */
+		TZROM_BASE + TZROM_SIZE,	/* Read_only region size */
+		/* Coherent region start */
+		BL1_FIRMWARE_RAM_COHERENT_ZI_BASE,
+		/* Coherent region size */
+		BL1_FIRMWARE_RAM_COHERENT_ZI_BASE +
+			BL1_FIRMWARE_RAM_COHERENT_ZI_LENGTH);
+}
diff --git a/plat/fvp/bl2_plat_setup.c b/plat/fvp/bl2_plat_setup.c
new file mode 100644
index 0000000..e38f00b
--- /dev/null
+++ b/plat/fvp/bl2_plat_setup.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <platform.h>
+#include <bl2.h>
+#include <bl_common.h>
+
+/*******************************************************************************
+ * Declarations of linker defined symbols which will help us find the layout
+ * of trusted SRAM
+ ******************************************************************************/
+#if defined (__GNUC__)
+extern unsigned long __BL2_RO_BASE__;
+extern unsigned long __BL2_STACKS_BASE__;
+extern unsigned long __BL2_COHERENT_RAM_BASE__;
+extern unsigned long __BL2_RW_BASE__;
+
+#define BL2_RO_BASE		__BL2_RO_BASE__
+#define BL2_STACKS_BASE		__BL2_STACKS_BASE__
+#define BL2_COHERENT_RAM_BASE	__BL2_COHERENT_RAM_BASE__
+#define BL2_RW_BASE		__BL2_RW_BASE__
+
+#else
+ #error "Unknown compiler."
+#endif
+
+/* Pointer to memory visible to both BL2 and BL31 for passing data */
+extern unsigned char **bl2_el_change_mem_ptr;
+
+/* Data structure which holds the extents of the trusted SRAM for BL2 */
+static meminfo bl2_tzram_layout
+__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE),
+		section("tzfw_coherent_mem"))) = {0};
+
+/* Data structure which holds the extents of the non-trusted DRAM for BL2*/
+static meminfo dram_layout = {0};
+
+meminfo bl2_get_sec_mem_layout(void)
+{
+	return bl2_tzram_layout;
+}
+
+meminfo bl2_get_ns_mem_layout(void)
+{
+	return dram_layout;
+}
+
+/*******************************************************************************
+ * BL1 has passed the extents of the trusted SRAM that should be visible to BL2
+ * in x0. This memory layout is sitting at the base of the free trusted SRAM.
+ * Copy it to a safe loaction before its reclaimed by later BL2 functionality.
+ ******************************************************************************/
+void bl2_early_platform_setup(meminfo *mem_layout,
+			      void *data)
+{
+	/* Setup the BL2 memory layout */
+	bl2_tzram_layout.total_base = mem_layout->total_base;
+	bl2_tzram_layout.total_size = mem_layout->total_size;
+	bl2_tzram_layout.free_base = mem_layout->free_base;
+	bl2_tzram_layout.free_size = mem_layout->free_size;
+	bl2_tzram_layout.attr = mem_layout->attr;
+	bl2_tzram_layout.next = 0;
+
+	/* Initialize the platform config for future decision making */
+	platform_config_setup();
+
+	return;
+}
+
+/*******************************************************************************
+ * Not much to do here aprt from finding out the extents of non-trusted DRAM
+ * which will be used for loading the non-trusted software images. We are
+ * relying on pre-iniitialized zi memory so there is nothing to zero out like
+ * in BL1. This is 'cause BL2 is raw PIC binary. Its load address is determined
+ * at runtime. The ZI section might be lost if its not already there.
+ ******************************************************************************/
+void bl2_platform_setup()
+{
+	dram_layout.total_base = DRAM_BASE;
+	dram_layout.total_size = DRAM_SIZE;
+	dram_layout.free_base = DRAM_BASE;
+	dram_layout.free_size = DRAM_SIZE;
+	dram_layout.attr = 0;
+
+	/* Use the Trusted DRAM for passing args to BL31 */
+	bl2_el_change_mem_ptr = (unsigned char **) TZDRAM_BASE;
+
+	return;
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only intializes the mmu in a quick and dirty way.
+ ******************************************************************************/
+void bl2_plat_arch_setup()
+{
+	unsigned long sctlr;
+
+	/* Enable instruction cache. */
+	sctlr = read_sctlr();
+	sctlr |= SCTLR_I_BIT;
+	write_sctlr(sctlr);
+
+	/*
+	 * Very simple exception vectors which assert if any exception other
+	 * than a single SMC call from BL2 to pass control to BL31 in EL3 is
+	 * received.
+	 */
+	write_vbar((unsigned long) early_exceptions);
+
+	configure_mmu(&bl2_tzram_layout,
+		      (unsigned long) &BL2_RO_BASE,
+		      (unsigned long) &BL2_STACKS_BASE,
+		      (unsigned long) &BL2_COHERENT_RAM_BASE,
+		      (unsigned long) &BL2_RW_BASE);
+	return;
+}
diff --git a/plat/fvp/bl31_plat_setup.c b/plat/fvp/bl31_plat_setup.c
new file mode 100644
index 0000000..6c8635f
--- /dev/null
+++ b/plat/fvp/bl31_plat_setup.c
@@ -0,0 +1,424 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <platform.h>
+#include <bl31.h>
+#include <bl_common.h>
+#include <pl011.h>
+#include <bakery_lock.h>
+#include <cci400.h>
+#include <gic.h>
+#include <fvp_pwrc.h>
+
+/*******************************************************************************
+ * Declarations of linker defined symbols which will help us find the layout
+ * of trusted SRAM
+ ******************************************************************************/
+#if defined (__GNUC__)
+extern unsigned long __BL31_RO_BASE__;
+extern unsigned long __BL31_STACKS_BASE__;
+extern unsigned long __BL31_COHERENT_RAM_BASE__;
+extern unsigned long __BL31_RW_BASE__;
+
+#define BL31_RO_BASE		__BL31_RO_BASE__
+#define BL31_STACKS_BASE	__BL31_STACKS_BASE__
+#define BL31_COHERENT_RAM_BASE	__BL31_COHERENT_RAM_BASE__
+#define BL31_RW_BASE		__BL31_RW_BASE__
+
+#else
+ #error "Unknown compiler."
+#endif
+
+/*******************************************************************************
+ * This data structures holds information copied by BL31 from BL2 to pass
+ * control to the non-trusted software images. A per-cpu entry was created to
+ * use the same structure in the warm boot path but that's not the case right
+ * now. Persisting with this approach for the time being. TODO: Can this be
+ * moved out of device memory.
+ ******************************************************************************/
+el_change_info ns_entry_info[PLATFORM_CORE_COUNT]
+__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE),
+		section("tzfw_coherent_mem"))) = {0};
+
+/* Data structure which holds the extents of the trusted SRAM for BL31 */
+static meminfo bl31_tzram_layout
+__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE),
+		section("tzfw_coherent_mem"))) = {0};
+
+meminfo bl31_get_sec_mem_layout(void)
+{
+	return bl31_tzram_layout;
+}
+
+/*******************************************************************************
+ * Return information about passing control to the non-trusted software images
+ * to common code.TODO: In the initial architecture, the image after BL31 will
+ * always run in the non-secure state. In the final architecture there
+ * will be a series of images. This function will need enhancement then
+ ******************************************************************************/
+el_change_info *bl31_get_next_image_info(unsigned long mpidr)
+{
+	return &ns_entry_info[platform_get_core_pos(mpidr)];
+}
+
+/*******************************************************************************
+ * Perform any BL31 specific platform actions. Here we copy parameters passed
+ * by the calling EL (S-EL1 in BL2 & S-EL3 in BL1) before they are lost
+ * (potentially). This is done before the MMU is initialized so that the memory
+ * layout can be used while creating page tables.
+ ******************************************************************************/
+void bl31_early_platform_setup(meminfo *mem_layout,
+			       void *data,
+			       unsigned long mpidr)
+{
+	el_change_info *image_info = (el_change_info *) data;
+	unsigned int lin_index = platform_get_core_pos(mpidr);
+
+	/* Setup the BL31 memory layout */
+	bl31_tzram_layout.total_base = mem_layout->total_base;
+	bl31_tzram_layout.total_size = mem_layout->total_size;
+	bl31_tzram_layout.free_base = mem_layout->free_base;
+	bl31_tzram_layout.free_size = mem_layout->free_size;
+	bl31_tzram_layout.attr = mem_layout->attr;
+	bl31_tzram_layout.next = 0;
+
+	/* Save information about jumping into the NS world */
+	ns_entry_info[lin_index].entrypoint = image_info->entrypoint;
+	ns_entry_info[lin_index].spsr = image_info->spsr;
+	ns_entry_info[lin_index].args = image_info->args;
+	ns_entry_info[lin_index].security_state = image_info->security_state;
+	ns_entry_info[lin_index].next = image_info->next;
+
+	/* Initialize the platform config for future decision making */
+	platform_config_setup();
+}
+
+/*******************************************************************************
+ * Initialize the gic, configure the CLCD and zero out variables needed by the
+ * secondaries to boot up correctly.
+ ******************************************************************************/
+void bl31_platform_setup()
+{
+	unsigned int reg_val;
+
+        /* Initialize the gic cpu and distributor interfaces */
+        gic_setup();
+
+	/*
+	 * TODO: Configure the CLCD before handing control to
+	 * linux. Need to see if a separate driver is needed
+	 * instead.
+	 */
+	mmio_write_32(VE_SYSREGS_BASE + V2M_SYS_CFGDATA, 0);
+	mmio_write_32(VE_SYSREGS_BASE + V2M_SYS_CFGCTRL,
+		      (1ull << 31) | (1 << 30) | (7 << 20) | (0 << 16));
+
+	/* Allow access to the System counter timer module */
+	reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT);
+	reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT);
+	reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT);
+	mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(0), reg_val);
+	mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(1), reg_val);
+
+	reg_val = (1 << CNTNSAR_NS_SHIFT(0)) | (1 << CNTNSAR_NS_SHIFT(1));
+	mmio_write_32(SYS_TIMCTL_BASE + CNTNSAR, reg_val);
+
+	/* Intialize the power controller */
+	fvp_pwrc_setup();
+
+        /* Topologies are best known to the platform. */
+	plat_setup_topology();
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only intializes the mmu in a quick and dirty way.
+ ******************************************************************************/
+void bl31_plat_arch_setup()
+{
+	unsigned long sctlr;
+
+	/* Enable instruction cache. */
+	sctlr = read_sctlr();
+	sctlr |= SCTLR_I_BIT;
+	write_sctlr(sctlr);
+
+	write_vbar((unsigned long) runtime_exceptions);
+	configure_mmu(&bl31_tzram_layout,
+		      (unsigned long) &BL31_RO_BASE,
+		      (unsigned long) &BL31_STACKS_BASE,
+		      (unsigned long) &BL31_COHERENT_RAM_BASE,
+		      (unsigned long) &BL31_RW_BASE);
+}
+
+/*******************************************************************************
+ * TODO: Move GIC setup to a separate file in case it is needed by other BL
+ * stages or ELs
+ * TODO: Revisit if priorities are being set such that no non-secure interrupt
+ * can have a higher priority than a secure one as recommended in the GICv2 spec
+ *******************************************************************************/
+
+/*******************************************************************************
+ * This function does some minimal GICv3 configuration. The Firmware itself does
+ * not fully support GICv3 at this time and relies on GICv2 emulation as
+ * provided by GICv3. This function allows software (like Linux) in later stages
+ * to use full GICv3 features.
+ *******************************************************************************/
+void gicv3_cpuif_setup(void)
+{
+	unsigned int scr_val, val, base;
+
+	/*
+	 * When CPUs come out of reset they have their GICR_WAKER.ProcessorSleep
+	 * bit set. In order to allow interrupts to get routed to the CPU we
+	 * need to clear this bit if set and wait for GICR_WAKER.ChildrenAsleep
+	 * to clear (GICv3 Architecture specification 5.4.23).
+	 * GICR_WAKER is NOT banked per CPU, compute the correct base address
+	 * per CPU.
+	 *
+	 * TODO:
+	 * For GICv4 we also need to adjust the Base address based on
+	 * GICR_TYPER.VLPIS
+	 */
+	base = BASE_GICR_BASE +
+		(platform_get_core_pos(read_mpidr()) << GICR_PCPUBASE_SHIFT);
+	val = gicr_read_waker(base);
+
+	val &= ~WAKER_PS;
+	gicr_write_waker(base, val);
+	dsb();
+
+	/* We need to wait for ChildrenAsleep to clear. */
+	val = gicr_read_waker(base);
+	while (val & WAKER_CA) {
+		val = gicr_read_waker(base);
+	}
+
+	/*
+	 * We need to set SCR_EL3.NS in order to see GICv3 non-secure state.
+	 * Restore SCR_EL3.NS again before exit.
+	 */
+	scr_val = read_scr();
+	write_scr(scr_val | SCR_NS_BIT);
+
+	/*
+	 * By default EL2 and NS-EL1 software should be able to enable GICv3
+	 * System register access without any configuration at EL3. But it turns
+	 * out that GICC PMR as set in GICv2 mode does not affect GICv3 mode. So
+	 * we need to set it here again. In order to do that we need to enable
+	 * register access. We leave it enabled as it should be fine and might
+	 * prevent problems with later software trying to access GIC System
+	 * Registers.
+	 */
+	val = read_icc_sre_el3();
+	write_icc_sre_el3(val | ICC_SRE_EN | ICC_SRE_SRE);
+
+	val = read_icc_sre_el2();
+	write_icc_sre_el2(val | ICC_SRE_EN | ICC_SRE_SRE);
+
+	write_icc_pmr_el1(MAX_PRI_VAL);
+
+	/* Restore SCR_EL3 */
+	write_scr(scr_val);
+}
+
+/*******************************************************************************
+ * This function does some minimal GICv3 configuration when cores go
+ * down.
+ *******************************************************************************/
+void gicv3_cpuif_deactivate(void)
+{
+	unsigned int val, base;
+
+	/*
+	 * When taking CPUs down we need to set GICR_WAKER.ProcessorSleep and
+	 * wait for GICR_WAKER.ChildrenAsleep to get set.
+	 * (GICv3 Architecture specification 5.4.23).
+	 * GICR_WAKER is NOT banked per CPU, compute the correct base address
+	 * per CPU.
+	 *
+	 * TODO:
+	 * For GICv4 we also need to adjust the Base address based on
+	 * GICR_TYPER.VLPIS
+	 */
+	base = BASE_GICR_BASE +
+		(platform_get_core_pos(read_mpidr()) << GICR_PCPUBASE_SHIFT);
+	val = gicr_read_waker(base);
+	val |= WAKER_PS;
+	gicr_write_waker(base, val);
+	dsb();
+
+	/* We need to wait for ChildrenAsleep to set. */
+	val = gicr_read_waker(base);
+	while ((val & WAKER_CA) == 0) {
+		val = gicr_read_waker(base);
+	}
+}
+
+
+/*******************************************************************************
+ * Enable secure interrupts and use FIQs to route them. Disable legacy bypass
+ * and set the priority mask register to allow all interrupts to trickle in.
+ ******************************************************************************/
+void gic_cpuif_setup(unsigned int gicc_base)
+{
+	unsigned int val;
+
+	val = gicc_read_iidr(gicc_base);
+
+	/*
+	 * If GICv3 we need to do a bit of additional setup. We want to
+	 * allow default GICv2 behaviour but allow the next stage to
+	 * enable full gicv3 features.
+	 */
+	if (((val >> GICC_IIDR_ARCH_SHIFT) & GICC_IIDR_ARCH_MASK) >= 3) {
+		gicv3_cpuif_setup();
+	}
+
+	val = ENABLE_GRP0 | FIQ_EN | FIQ_BYP_DIS_GRP0;
+	val |= IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP1 | IRQ_BYP_DIS_GRP1;
+
+	gicc_write_pmr(gicc_base, MAX_PRI_VAL);
+	gicc_write_ctlr(gicc_base, val);
+}
+
+/*******************************************************************************
+ * Place the cpu interface in a state where it can never make a cpu exit wfi as
+ * as result of an asserted interrupt. This is critical for powering down a cpu
+ ******************************************************************************/
+void gic_cpuif_deactivate(unsigned int gicc_base)
+{
+	unsigned int val;
+
+	/* Disable secure, non-secure interrupts and disable their bypass */
+	val = gicc_read_ctlr(gicc_base);
+	val &= ~(ENABLE_GRP0 | ENABLE_GRP1);
+	val |= FIQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP0;
+	val |= IRQ_BYP_DIS_GRP0 | IRQ_BYP_DIS_GRP1;
+	gicc_write_ctlr(gicc_base, val);
+
+	val = gicc_read_iidr(gicc_base);
+
+	/*
+	 * If GICv3 we need to do a bit of additional setup. Make sure the
+	 * RDIST is put to sleep.
+	 */
+	if (((val >> GICC_IIDR_ARCH_SHIFT) & GICC_IIDR_ARCH_MASK) >= 3) {
+		gicv3_cpuif_deactivate();
+	}
+}
+
+/*******************************************************************************
+ * Per cpu gic distributor setup which will be done by all cpus after a cold
+ * boot/hotplug. This marks out the secure interrupts & enables them.
+ ******************************************************************************/
+void gic_pcpu_distif_setup(unsigned int gicd_base)
+{
+	gicd_write_igroupr(gicd_base, 0, ~0);
+
+	gicd_clr_igroupr(gicd_base, IRQ_SEC_PHY_TIMER);
+	gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_0);
+	gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_1);
+	gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_2);
+	gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_3);
+	gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_4);
+	gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_5);
+	gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_6);
+	gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_7);
+
+	gicd_set_ipriorityr(gicd_base, IRQ_SEC_PHY_TIMER, MAX_PRI_VAL);
+	gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_0, MAX_PRI_VAL);
+	gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_1, MAX_PRI_VAL);
+	gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_2, MAX_PRI_VAL);
+	gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_3, MAX_PRI_VAL);
+	gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_4, MAX_PRI_VAL);
+	gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_5, MAX_PRI_VAL);
+	gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_6, MAX_PRI_VAL);
+	gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_7, MAX_PRI_VAL);
+
+	gicd_set_isenabler(gicd_base, IRQ_SEC_PHY_TIMER);
+	gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_0);
+	gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_1);
+	gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_2);
+	gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_3);
+	gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_4);
+	gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_5);
+	gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_6);
+	gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_7);
+}
+
+/*******************************************************************************
+ * Global gic distributor setup which will be done by the primary cpu after a
+ * cold boot. It marks out the secure SPIs, PPIs & SGIs and enables them. It
+ * then enables the secure GIC distributor interface.
+ ******************************************************************************/
+void gic_distif_setup(unsigned int gicd_base)
+{
+	unsigned int ctr, num_ints, ctlr;
+
+	/* Disable the distributor before going further */
+	ctlr = gicd_read_ctlr(gicd_base);
+	ctlr &= ~(ENABLE_GRP0 | ENABLE_GRP1);
+	gicd_write_ctlr(gicd_base, ctlr);
+
+	/*
+	 * Mark out non-secure interrupts. Calculate number of
+	 * IGROUPR registers to consider. Will be equal to the
+	 * number of IT_LINES
+	 */
+	num_ints = gicd_read_typer(gicd_base) & IT_LINES_NO_MASK;
+	num_ints++;
+	for (ctr = 0; ctr < num_ints; ctr++)
+		gicd_write_igroupr(gicd_base, ctr << IGROUPR_SHIFT, ~0);
+
+	/* Configure secure interrupts now */
+	gicd_clr_igroupr(gicd_base, IRQ_TZ_WDOG);
+	gicd_set_ipriorityr(gicd_base, IRQ_TZ_WDOG, MAX_PRI_VAL);
+	gicd_set_itargetsr(gicd_base, IRQ_TZ_WDOG,
+			   platform_get_core_pos(read_mpidr()));
+	gicd_set_isenabler(gicd_base, IRQ_TZ_WDOG);
+	gic_pcpu_distif_setup(gicd_base);
+
+	gicd_write_ctlr(gicd_base, ctlr | ENABLE_GRP0);
+}
+
+void gic_setup(void)
+{
+	unsigned int gicd_base, gicc_base;
+
+	gicd_base = platform_get_cfgvar(CONFIG_GICD_ADDR);
+	gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR);
+
+	gic_cpuif_setup(gicc_base);
+	gic_distif_setup(gicd_base);
+}
diff --git a/plat/fvp/fvp_pm.c b/plat/fvp/fvp_pm.c
new file mode 100644
index 0000000..9621319
--- /dev/null
+++ b/plat/fvp/fvp_pm.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <console.h>
+#include <platform.h>
+#include <bl_common.h>
+#include <bl31.h>
+#include <bakery_lock.h>
+#include <cci400.h>
+#include <gic.h>
+#include <fvp_pwrc.h>
+/* Only included for error codes */
+#include <psci.h>
+
+/*******************************************************************************
+ * FVP handler called when an affinity instance is about to be turned on. The
+ * level and mpidr determine the affinity instance.
+ ******************************************************************************/
+int fvp_affinst_on(unsigned long mpidr,
+		   unsigned long sec_entrypoint,
+		   unsigned long ns_entrypoint,
+		   unsigned int afflvl,
+		   unsigned int state)
+{
+	int rc = PSCI_E_SUCCESS;
+	unsigned long linear_id;
+	mailbox *fvp_mboxes;
+	unsigned int psysr;
+
+	if (ns_entrypoint < DRAM_BASE) {
+		rc = PSCI_E_INVALID_PARAMS;
+		goto exit;
+	}
+
+	/*
+	 * It's possible to turn on only affinity level 0 i.e. a cpu
+	 * on the FVP. Ignore any other affinity level.
+	 */
+	if (afflvl != MPIDR_AFFLVL0)
+		goto exit;
+
+	/*
+	 * Ensure that we do not cancel an inflight power off request
+	 * for the target cpu. That would leave it in a zombie wfi.
+	 * Wait for it to power off, program the jump address for the
+	 * target cpu and then program the power controller to turn
+	 * that cpu on
+	 */
+	do {
+		psysr = fvp_pwrc_read_psysr(mpidr);
+	} while (psysr & PSYSR_AFF_L0);
+
+	linear_id = platform_get_core_pos(mpidr);
+	fvp_mboxes = (mailbox *) (TZDRAM_BASE + MBOX_OFF);
+	fvp_mboxes[linear_id].value = sec_entrypoint;
+	flush_dcache_range((unsigned long) &fvp_mboxes[linear_id],
+			   sizeof(unsigned long));
+
+	fvp_pwrc_write_pponr(mpidr);
+
+exit:
+	return rc;
+}
+
+/*******************************************************************************
+ * FVP handler called when an affinity instance is about to be turned off. The
+ * level and mpidr determine the affinity instance. The 'state' arg. allows the
+ * platform to decide whether the cluster is being turned off and take apt
+ * actions.
+ *
+ * CAUTION: This function is called with coherent stacks so that caches can be
+ * turned off, flushed and coherency disabled. There is no guarantee that caches
+ * will remain turned on across calls to this function as each affinity level is
+ * dealt with. So do not write & read global variables across calls. It will be
+ * wise to do flush a write to the global to prevent unpredictable results.
+ ******************************************************************************/
+int fvp_affinst_off(unsigned long mpidr,
+		    unsigned int afflvl,
+		    unsigned int state)
+{
+	int rc = PSCI_E_SUCCESS;
+	unsigned int gicc_base, ectlr;
+	unsigned long cpu_setup;
+
+	switch (afflvl) {
+	case MPIDR_AFFLVL1:
+		if (state == PSCI_STATE_OFF) {
+			/*
+			 * Disable coherency if this cluster is to be
+			 * turned off
+			 */
+			cci_disable_coherency(mpidr);
+
+			/*
+			 * Program the power controller to turn the
+			 * cluster off
+			 */
+			fvp_pwrc_write_pcoffr(mpidr);
+
+		}
+		break;
+
+	case MPIDR_AFFLVL0:
+		if (state == PSCI_STATE_OFF) {
+
+			/*
+			 * Take this cpu out of intra-cluster coherency if
+			 * the FVP flavour supports the SMP bit.
+			 */
+			cpu_setup = platform_get_cfgvar(CONFIG_CPU_SETUP);
+			if (cpu_setup) {
+				ectlr = read_cpuectlr();
+				ectlr &= ~CPUECTLR_SMP_BIT;
+				write_cpuectlr(ectlr);
+			}
+
+			/*
+			 * Prevent interrupts from spuriously waking up
+			 * this cpu
+			 */
+			gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR);
+			gic_cpuif_deactivate(gicc_base);
+
+			/*
+			 * Program the power controller to power this
+			 * cpu off
+			 */
+			fvp_pwrc_write_ppoffr(mpidr);
+		}
+		break;
+
+	default:
+		assert(0);
+	}
+
+	return rc;
+}
+
+/*******************************************************************************
+ * FVP handler called when an affinity instance is about to be suspended. The
+ * level and mpidr determine the affinity instance. The 'state' arg. allows the
+ * platform to decide whether the cluster is being turned off and take apt
+ * actions.
+ *
+ * CAUTION: This function is called with coherent stacks so that caches can be
+ * turned off, flushed and coherency disabled. There is no guarantee that caches
+ * will remain turned on across calls to this function as each affinity level is
+ * dealt with. So do not write & read global variables across calls. It will be
+ * wise to do flush a write to the global to prevent unpredictable results.
+ ******************************************************************************/
+int fvp_affinst_suspend(unsigned long mpidr,
+			unsigned long sec_entrypoint,
+			unsigned long ns_entrypoint,
+			unsigned int afflvl,
+			unsigned int state)
+{
+	int rc = PSCI_E_SUCCESS;
+	unsigned int gicc_base, ectlr;
+	unsigned long cpu_setup, linear_id;
+	mailbox *fvp_mboxes;
+
+	/* Cannot allow NS world to execute trusted firmware code */
+	if (ns_entrypoint < DRAM_BASE) {
+		rc = PSCI_E_INVALID_PARAMS;
+		goto exit;
+	}
+
+	switch (afflvl) {
+	case MPIDR_AFFLVL1:
+		if (state == PSCI_STATE_OFF) {
+			/*
+			 * Disable coherency if this cluster is to be
+			 * turned off
+			 */
+			cci_disable_coherency(mpidr);
+
+			/*
+			 * Program the power controller to turn the
+			 * cluster off
+			 */
+			fvp_pwrc_write_pcoffr(mpidr);
+
+		}
+		break;
+
+	case MPIDR_AFFLVL0:
+		if (state == PSCI_STATE_OFF) {
+			/*
+			 * Take this cpu out of intra-cluster coherency if
+			 * the FVP flavour supports the SMP bit.
+			 */
+			cpu_setup = platform_get_cfgvar(CONFIG_CPU_SETUP);
+			if (cpu_setup) {
+				ectlr = read_cpuectlr();
+				ectlr &= ~CPUECTLR_SMP_BIT;
+				write_cpuectlr(ectlr);
+			}
+
+			/* Program the jump address for the target cpu */
+			linear_id = platform_get_core_pos(mpidr);
+			fvp_mboxes = (mailbox *) (TZDRAM_BASE + MBOX_OFF);
+			fvp_mboxes[linear_id].value = sec_entrypoint;
+			flush_dcache_range((unsigned long) &fvp_mboxes[linear_id],
+					   sizeof(unsigned long));
+
+			/*
+			 * Prevent interrupts from spuriously waking up
+			 * this cpu
+			 */
+			gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR);
+			gic_cpuif_deactivate(gicc_base);
+
+			/*
+			 * Program the power controller to power this
+			 * cpu off and enable wakeup interrupts.
+			 */
+			fvp_pwrc_write_pwkupr(mpidr);
+			fvp_pwrc_write_ppoffr(mpidr);
+		}
+		break;
+
+	default:
+		assert(0);
+	}
+
+exit:
+	return rc;
+}
+
+/*******************************************************************************
+ * FVP handler called when an affinity instance has just been powered on after
+ * being turned off earlier. The level and mpidr determine the affinity
+ * instance. The 'state' arg. allows the platform to decide whether the cluster
+ * was turned off prior to wakeup and do what's necessary to setup it up
+ * correctly.
+ ******************************************************************************/
+int fvp_affinst_on_finish(unsigned long mpidr,
+			  unsigned int afflvl,
+			  unsigned int state)
+{
+	int rc = PSCI_E_SUCCESS;
+	unsigned long linear_id, cpu_setup;
+	mailbox *fvp_mboxes;
+	unsigned int gicd_base, gicc_base, reg_val, ectlr;
+
+	switch (afflvl) {
+
+	case MPIDR_AFFLVL1:
+		/* Enable coherency if this cluster was off */
+		if (state == PSCI_STATE_OFF)
+			cci_enable_coherency(mpidr);
+		break;
+
+	case MPIDR_AFFLVL0:
+		/*
+		 * Ignore the state passed for a cpu. It could only have
+		 * been off if we are here.
+		 */
+
+		/*
+		 * Turn on intra-cluster coherency if the FVP flavour supports
+		 * it.
+		 */
+		cpu_setup = platform_get_cfgvar(CONFIG_CPU_SETUP);
+		if (cpu_setup) {
+			ectlr = read_cpuectlr();
+			ectlr |= CPUECTLR_SMP_BIT;
+			write_cpuectlr(ectlr);
+		}
+
+		/* Zero the jump address in the mailbox for this cpu */
+		fvp_mboxes = (mailbox *) (TZDRAM_BASE + MBOX_OFF);
+		linear_id = platform_get_core_pos(mpidr);
+		fvp_mboxes[linear_id].value = 0;
+		flush_dcache_range((unsigned long) &fvp_mboxes[linear_id],
+				   sizeof(unsigned long));
+
+		gicd_base = platform_get_cfgvar(CONFIG_GICD_ADDR);
+		gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR);
+
+		/* Enable the gic cpu interface */
+		gic_cpuif_setup(gicc_base);
+
+		/* TODO: This setup is needed only after a cold boot */
+		gic_pcpu_distif_setup(gicd_base);
+
+		/* Allow access to the System counter timer module */
+		reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT);
+		reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT);
+		reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT);
+		mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(0), reg_val);
+		mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(1), reg_val);
+
+		reg_val = (1 << CNTNSAR_NS_SHIFT(0)) |
+			(1 << CNTNSAR_NS_SHIFT(1));
+		mmio_write_32(SYS_TIMCTL_BASE + CNTNSAR, reg_val);
+
+		break;
+
+	default:
+		assert(0);
+	}
+
+	return rc;
+}
+
+/*******************************************************************************
+ * FVP handler called when an affinity instance has just been powered on after
+ * having been suspended earlier. The level and mpidr determine the affinity
+ * instance.
+ * TODO: At the moment we reuse the on finisher and reinitialize the secure
+ * context. Need to implement a separate suspend finisher.
+ ******************************************************************************/
+int fvp_affinst_suspend_finish(unsigned long mpidr,
+			       unsigned int afflvl,
+			       unsigned int state)
+{
+	return fvp_affinst_on_finish(mpidr, afflvl, state);
+}
+
+
+/*******************************************************************************
+ * Export the platform handlers to enable psci to invoke them
+ ******************************************************************************/
+static plat_pm_ops fvp_plat_pm_ops = {
+	0,
+	fvp_affinst_on,
+	fvp_affinst_off,
+	fvp_affinst_suspend,
+	fvp_affinst_on_finish,
+	fvp_affinst_suspend_finish,
+};
+
+/*******************************************************************************
+ * Export the platform specific power ops & initialize the fvp power controller
+ ******************************************************************************/
+int platform_setup_pm(plat_pm_ops **plat_ops)
+{
+	*plat_ops = &fvp_plat_pm_ops;
+	return 0;
+}
diff --git a/plat/fvp/fvp_topology.c b/plat/fvp/fvp_topology.c
new file mode 100644
index 0000000..20f3324
--- /dev/null
+++ b/plat/fvp/fvp_topology.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <assert.h>
+#include <platform.h>
+#include <fvp_pwrc.h>
+/* TODO: Reusing psci error codes & state information. Get our own! */
+#include <psci.h>
+
+/* We treat '255' as an invalid affinity instance */
+#define AFFINST_INVAL	0xff
+
+/*******************************************************************************
+ * We support 3 flavours of the FVP: Foundation, Base AEM & Base Cortex. Each
+ * flavour has a different topology. The common bit is that there can be a max.
+ * of 2 clusters (affinity 1) and 4 cpus (affinity 0) per cluster. So we define
+ * a tree like data structure which caters to these maximum bounds. It simply
+ * marks the absent affinity level instances as PSCI_AFF_ABSENT e.g. there is no
+ * cluster 1 on the Foundation FVP. The 'data' field is currently unused.
+ ******************************************************************************/
+typedef struct {
+	unsigned char sibling;
+	unsigned char child;
+	unsigned char state;
+	unsigned int data;
+} affinity_info;
+
+/*******************************************************************************
+ * The following two data structures store the topology tree for the fvp. There
+ * is a separate array for each affinity level i.e. cpus and clusters. The child
+ * and sibling references allow traversal inside and in between the two arrays.
+ ******************************************************************************/
+static affinity_info fvp_aff1_topology_map[PLATFORM_CLUSTER_COUNT];
+static affinity_info fvp_aff0_topology_map[PLATFORM_CORE_COUNT];
+
+/* Simple global variable to safeguard us from stupidity */
+static unsigned int topology_setup_done;
+
+/*******************************************************************************
+ * This function implements a part of the critical interface between the psci
+ * generic layer and the platform to allow the former to detect the platform
+ * topology. psci queries the platform to determine how many affinity instances
+ * are present at a particular level for a given mpidr e.g. consider a dual
+ * cluster platform where each cluster has 4 cpus. A call to this function with
+ * (0, 0x100) will return the number of cpus implemented under cluster 1 i.e. 4.
+ * Similarly a call with (1, 0x100) will return 2 i.e. the number of clusters.
+ * This is 'cause we are effectively asking how many affinity level 1 instances
+ * are implemented under affinity level 2 instance 0.
+ ******************************************************************************/
+unsigned int plat_get_aff_count(unsigned int aff_lvl,
+				unsigned long mpidr)
+{
+	unsigned int aff_count = 1, ctr;
+	unsigned char parent_aff_id;
+
+	assert(topology_setup_done == 1);
+
+	switch (aff_lvl) {
+	case 3:
+	case 2:
+		/*
+		 * Assert if the parent affinity instance is not 0.
+		 * This also takes care of level 3 in an obfuscated way
+		 */
+		parent_aff_id = (mpidr >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK;
+		assert(parent_aff_id == 0);
+
+		/*
+		 * Report that we implement a single instance of
+		 * affinity levels 2 & 3 which are AFF_ABSENT
+		 */
+		break;
+	case 1:
+		/* Assert if the parent affinity instance is not 0. */
+		parent_aff_id = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK;
+		assert(parent_aff_id == 0);
+
+		/* Fetch the starting index in the aff1 array */
+		for (ctr = 0;
+		     fvp_aff1_topology_map[ctr].sibling != AFFINST_INVAL;
+		     ctr = fvp_aff1_topology_map[ctr].sibling) {
+			aff_count++;
+		}
+
+		break;
+	case 0:
+		/* Assert if the cluster id is anything apart from 0 or 1 */
+		parent_aff_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+		assert(parent_aff_id < PLATFORM_CLUSTER_COUNT);
+
+		/* Fetch the starting index in the aff0 array */
+		for (ctr = fvp_aff1_topology_map[parent_aff_id].child;
+		     fvp_aff0_topology_map[ctr].sibling != AFFINST_INVAL;
+		     ctr = fvp_aff0_topology_map[ctr].sibling) {
+			aff_count++;
+		}
+
+		break;
+	default:
+		assert(0);
+	}
+
+	return aff_count;
+}
+
+/*******************************************************************************
+ * This function implements a part of the critical interface between the psci
+ * generic layer and the platform to allow the former to detect the state of a
+ * affinity instance in the platform topology. psci queries the platform to
+ * determine whether an affinity instance is present or absent. This caters for
+ * topologies where an intermediate affinity level instance is missing e.g.
+ * consider a platform which implements a single cluster with 4 cpus and there
+ * is another cpu sitting directly on the interconnect along with the cluster.
+ * The mpidrs of the cluster would range from 0x0-0x3. The mpidr of the single
+ * cpu would be 0x100 to highlight that it does not belong to cluster 0. Cluster
+ * 1 is however missing but needs to be accounted to reach this single cpu in
+ * the topology tree. Hence it will be marked as PSCI_AFF_ABSENT. This is not
+ * applicable to the FVP but depicted as an example.
+ ******************************************************************************/
+unsigned int plat_get_aff_state(unsigned int aff_lvl,
+				unsigned long mpidr)
+{
+	unsigned int aff_state = PSCI_AFF_ABSENT, idx;
+	idx = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+
+	assert(topology_setup_done == 1);
+
+	switch (aff_lvl) {
+	case 3:
+	case 2:
+		/* Report affinity levels 2 & 3 as absent */
+		break;
+	case 1:
+		aff_state = fvp_aff1_topology_map[idx].state;
+		break;
+	case 0:
+		/*
+		 * First get start index of the aff0 in its array & then add
+		 * to it the affinity id that we want the state of
+		 */
+		idx = fvp_aff1_topology_map[idx].child;
+		idx += (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+		aff_state = fvp_aff0_topology_map[idx].state;
+		break;
+	default:
+		assert(0);
+	}
+
+	return aff_state;
+}
+
+/*******************************************************************************
+ * Handy optimization to prevent the psci implementation from traversing through
+ * affinity levels which are not present while detecting the platform topology.
+ ******************************************************************************/
+int plat_get_max_afflvl()
+{
+	return MPIDR_AFFLVL1;
+}
+
+/*******************************************************************************
+ * This function populates the FVP specific topology information depending upon
+ * the FVP flavour its running on. We construct all the mpidrs we can handle
+ * and rely on the PWRC.PSYSR to flag absent cpus when their status is queried.
+ ******************************************************************************/
+int plat_setup_topology()
+{
+	unsigned char aff0, aff1, aff_state, aff0_offset = 0;
+	unsigned long mpidr;
+
+	topology_setup_done = 0;
+
+	for (aff1 = 0; aff1 < PLATFORM_CLUSTER_COUNT; aff1++) {
+
+		fvp_aff1_topology_map[aff1].child = aff0_offset;
+		fvp_aff1_topology_map[aff1].sibling = aff1 + 1;
+
+		for (aff0 = 0; aff0 < PLATFORM_MAX_CPUS_PER_CLUSTER; aff0++) {
+
+			mpidr = aff1 << MPIDR_AFF1_SHIFT;
+			mpidr |= aff0 << MPIDR_AFF0_SHIFT;
+
+			if (fvp_pwrc_read_psysr(mpidr) != PSYSR_INVALID) {
+				/*
+				 * Presence of even a single aff0 indicates
+				 * presence of parent aff1 on the FVP.
+				 */
+				aff_state = PSCI_AFF_PRESENT;
+				fvp_aff1_topology_map[aff1].state =
+					PSCI_AFF_PRESENT;
+			} else {
+				aff_state = PSCI_AFF_ABSENT;
+			}
+
+			fvp_aff0_topology_map[aff0_offset].child = AFFINST_INVAL;
+			fvp_aff0_topology_map[aff0_offset].state = aff_state;
+			fvp_aff0_topology_map[aff0_offset].sibling =
+				aff0_offset + 1;
+
+			/* Increment the absolute number of aff0s traversed */
+			aff0_offset++;
+		}
+
+		/* Tie-off the last aff0 sibling to -1 to avoid overflow */
+		fvp_aff0_topology_map[aff0_offset - 1].sibling = AFFINST_INVAL;
+	}
+
+	/* Tie-off the last aff1 sibling to AFFINST_INVAL to avoid overflow */
+	fvp_aff1_topology_map[aff1 - 1].sibling = AFFINST_INVAL;
+
+	topology_setup_done = 1;
+	return 0;
+}
diff --git a/plat/fvp/platform.h b/plat/fvp/platform.h
new file mode 100644
index 0000000..21a7912
--- /dev/null
+++ b/plat/fvp/platform.h
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PLATFORM_H__
+#define __PLATFORM_H__
+
+#include <arch.h>
+#include <mmio.h>
+#include <psci.h>
+#include <bl_common.h>
+
+
+/*******************************************************************************
+ * Platform binary types for linking
+ ******************************************************************************/
+#define PLATFORM_LINKER_FORMAT          "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH            aarch64
+
+/*******************************************************************************
+ * Generic platform constants
+ ******************************************************************************/
+#define PLATFORM_STACK_SIZE		0x800
+
+#define FIRMWARE_WELCOME_STR		"Booting trusted firmware boot loader stage 1\n\r"
+#define BL2_IMAGE_NAME			"bl2.bin"
+#define BL31_IMAGE_NAME			"bl31.bin"
+#define NS_IMAGE_OFFSET			FLASH0_BASE
+
+#define PLATFORM_CACHE_LINE_SIZE	64
+#define PLATFORM_CLUSTER_COUNT		2ull
+#define PLATFORM_CLUSTER0_CORE_COUNT	4
+#define PLATFORM_CLUSTER1_CORE_COUNT	4
+#define PLATFORM_CORE_COUNT             (PLATFORM_CLUSTER1_CORE_COUNT + \
+			       		 PLATFORM_CLUSTER0_CORE_COUNT)
+#define PLATFORM_MAX_CPUS_PER_CLUSTER	4
+#define PRIMARY_CPU			0x0
+
+/* Constants for accessing platform configuration */
+#define CONFIG_GICD_ADDR		0
+#define CONFIG_GICC_ADDR		1
+#define CONFIG_GICH_ADDR		2
+#define CONFIG_GICV_ADDR		3
+#define CONFIG_MAX_AFF0		4
+#define CONFIG_MAX_AFF1		5
+/* Indicate whether the CPUECTLR SMP bit should be enabled. */
+#define CONFIG_CPU_SETUP		6
+#define CONFIG_BASE_MMAP		7
+#define CONFIG_LIMIT			8
+
+/*******************************************************************************
+ * Platform memory map related constants
+ ******************************************************************************/
+#define TZROM_BASE		0x00000000
+#define TZROM_SIZE		0x04000000
+
+#define TZRAM_BASE		0x04000000
+#define TZRAM_SIZE		0x40000
+
+#define FLASH0_BASE		0x08000000
+#define FLASH0_SIZE		TZROM_SIZE
+
+#define FLASH1_BASE		0x0c000000
+#define FLASH1_SIZE		0x04000000
+
+#define PSRAM_BASE		0x14000000
+#define PSRAM_SIZE		0x04000000
+
+#define VRAM_BASE		0x18000000
+#define VRAM_SIZE		0x02000000
+
+/* Aggregate of all devices in the first GB */
+#define DEVICE0_BASE		0x1a000000
+#define DEVICE0_SIZE		0x12200000
+
+#define DEVICE1_BASE		0x2f000000
+#define DEVICE1_SIZE		0x200000
+
+#define NSRAM_BASE		0x2e000000
+#define NSRAM_SIZE		0x10000
+
+/* Location of trusted dram on the base fvp */
+#define TZDRAM_BASE		0x06000000
+#define TZDRAM_SIZE		0x02000000
+#define MBOX_OFF		0x1000
+#define AFFMAP_OFF		0x1200
+
+#define DRAM_BASE              0x80000000ull
+#define DRAM_SIZE              0x80000000ull
+
+#define PCIE_EXP_BASE		0x40000000
+#define TZRNG_BASE		0x7fe60000
+#define TZNVCTR_BASE		0x7fe70000
+#define TZROOTKEY_BASE		0x7fe80000
+
+/* Memory mapped Generic timer interfaces  */
+#define SYS_CNTCTL_BASE		0x2a430000
+#define SYS_CNTREAD_BASE	0x2a800000
+#define SYS_TIMCTL_BASE		0x2a810000
+
+/* Counter timer module offsets */
+#define CNTNSAR			0x4
+#define CNTNSAR_NS_SHIFT(x)	x
+
+#define CNTACR_BASE(x)		(0x40 + (x << 2))
+#define CNTACR_RPCT_SHIFT	0x0
+#define CNTACR_RVCT_SHIFT	0x1
+#define CNTACR_RFRQ_SHIFT	0x2
+#define CNTACR_RVOFF_SHIFT	0x3
+#define CNTACR_RWVT_SHIFT	0x4
+#define CNTACR_RWPT_SHIFT	0x5
+
+/* V2M motherboard system registers & offsets */
+#define VE_SYSREGS_BASE		0x1c010000
+#define V2M_SYS_ID			0x0
+#define V2M_SYS_LED			0x8
+#define V2M_SYS_CFGDATA		0xa0
+#define V2M_SYS_CFGCTRL		0xa4
+
+/*
+ * V2M sysled bit definitions. The values written to this
+ * register are defined in arch.h & runtime_svc.h. Only
+ * used by the primary cpu to diagnose any cold boot issues.
+ *
+ * SYS_LED[0]   - Security state (S=0/NS=1)
+ * SYS_LED[2:1] - Exception Level (EL3-EL0)
+ * SYS_LED[7:3] - Exception Class (Sync/Async & origin)
+ *
+ */
+#define SYS_LED_SS_SHIFT		0x0
+#define SYS_LED_EL_SHIFT		0x1
+#define SYS_LED_EC_SHIFT		0x3
+
+#define SYS_LED_SS_MASK		0x1
+#define SYS_LED_EL_MASK		0x3
+#define SYS_LED_EC_MASK		0x1f
+
+/* V2M sysid register bits */
+#define SYS_ID_REV_SHIFT	27
+#define SYS_ID_HBI_SHIFT	16
+#define SYS_ID_BLD_SHIFT	12
+#define SYS_ID_ARCH_SHIFT	8
+#define SYS_ID_FPGA_SHIFT	0
+
+#define SYS_ID_REV_MASK	0xf
+#define SYS_ID_HBI_MASK	0xfff
+#define SYS_ID_BLD_MASK	0xf
+#define SYS_ID_ARCH_MASK	0xf
+#define SYS_ID_FPGA_MASK	0xff
+
+#define SYS_ID_BLD_LENGTH	4
+
+#define REV_FVP		0x0
+#define HBI_FVP_BASE		0x020
+#define HBI_FOUNDATION		0x010
+
+#define BLD_GIC_VE_MMAP	0x0
+#define BLD_GIC_A53A57_MMAP	0x1
+
+#define ARCH_MODEL		0x1
+
+/* FVP Power controller base address*/
+#define PWRC_BASE		0x1c100000
+
+/*******************************************************************************
+ * Platform specific per affinity states. Distinction between off and suspend
+ * is made to allow reporting of a suspended cpu as still being on e.g. in the
+ * affinity_info psci call.
+ ******************************************************************************/
+#define PLATFORM_MAX_AFF0	4
+#define PLATFORM_MAX_AFF1	2
+#define PLAT_AFF_UNK		0xff
+
+#define PLAT_AFF0_OFF		0x0
+#define PLAT_AFF0_ONPENDING	0x1
+#define PLAT_AFF0_SUSPEND	0x2
+#define PLAT_AFF0_ON		0x3
+
+#define PLAT_AFF1_OFF		0x0
+#define PLAT_AFF1_ONPENDING	0x1
+#define PLAT_AFF1_SUSPEND	0x2
+#define PLAT_AFF1_ON		0x3
+
+/*******************************************************************************
+ * BL2 specific defines.
+ ******************************************************************************/
+#define BL2_BASE			0x0402D000
+
+/*******************************************************************************
+ * BL31 specific defines.
+ ******************************************************************************/
+#define BL31_BASE			0x0400E000
+
+/*******************************************************************************
+ * Platform specific page table and MMU setup constants
+ ******************************************************************************/
+#define EL3_ADDR_SPACE_SIZE		(1ull << 32)
+#define EL3_NUM_PAGETABLES		2
+#define EL3_TROM_PAGETABLE		0
+#define EL3_TRAM_PAGETABLE		1
+
+#define ADDR_SPACE_SIZE			(1ull << 32)
+
+#define NUM_L2_PAGETABLES		2
+#define GB1_L2_PAGETABLE		0
+#define GB2_L2_PAGETABLE		1
+
+#define NUM_L3_PAGETABLES		2
+#define TZRAM_PAGETABLE			0
+#define NSRAM_PAGETABLE			1
+
+/*******************************************************************************
+ * CCI-400 related constants
+ ******************************************************************************/
+#define CCI400_BASE			0x2c090000
+#define CCI400_SL_IFACE_CLUSTER0	3
+#define CCI400_SL_IFACE_CLUSTER1	4
+#define CCI400_SL_IFACE_INDEX(mpidr)	(mpidr & MPIDR_CLUSTER_MASK ? \
+					 CCI400_SL_IFACE_CLUSTER1 :   \
+					 CCI400_SL_IFACE_CLUSTER0)
+
+/*******************************************************************************
+ * GIC-400 & interrupt handling related constants
+ ******************************************************************************/
+/* VE compatible GIC memory map */
+#define VE_GICD_BASE			0x2c001000
+#define VE_GICC_BASE			0x2c002000
+#define VE_GICH_BASE			0x2c004000
+#define VE_GICV_BASE			0x2c006000
+
+/* Base FVP compatible GIC memory map */
+#define BASE_GICD_BASE			0x2f000000
+#define BASE_GICR_BASE			0x2f100000
+#define BASE_GICC_BASE			0x2c000000
+#define BASE_GICH_BASE			0x2c010000
+#define BASE_GICV_BASE			0x2c02f000
+
+#define IRQ_TZ_WDOG			56
+#define IRQ_SEC_PHY_TIMER		29
+#define IRQ_SEC_SGI_0			8
+#define IRQ_SEC_SGI_1			9
+#define IRQ_SEC_SGI_2			10
+#define IRQ_SEC_SGI_3			11
+#define IRQ_SEC_SGI_4			12
+#define IRQ_SEC_SGI_5			13
+#define IRQ_SEC_SGI_6			14
+#define IRQ_SEC_SGI_7			15
+#define IRQ_SEC_SGI_8			16
+
+/*******************************************************************************
+ * PL011 related constants
+ ******************************************************************************/
+#define PL011_BASE			0x1c090000
+
+/*******************************************************************************
+ * Declarations and constants to access the mailboxes safely. Each mailbox is
+ * aligned on the biggest cache line size in the platform. This is known only
+ * to the platform as it might have a combination of integrated and external
+ * caches. Such alignment ensures that two maiboxes do not sit on the same cache
+ * line at any cache level. They could belong to different cpus/clusters &
+ * get written while being protected by different locks causing corruption of
+ * a valid mailbox address.
+ ******************************************************************************/
+#define CACHE_WRITEBACK_SHIFT   6
+#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
+
+#ifndef __ASSEMBLY__
+
+typedef volatile struct {
+	unsigned long value
+	__attribute__((__aligned__(CACHE_WRITEBACK_GRANULE)));
+} mailbox;
+
+/*******************************************************************************
+ * Function and variable prototypes
+ ******************************************************************************/
+extern unsigned long *bl1_normal_ram_base;
+extern unsigned long *bl1_normal_ram_len;
+extern unsigned long *bl1_normal_ram_limit;
+extern unsigned long *bl1_normal_ram_zi_base;
+extern unsigned long *bl1_normal_ram_zi_len;
+
+extern unsigned long *bl1_coherent_ram_base;
+extern unsigned long *bl1_coherent_ram_len;
+extern unsigned long *bl1_coherent_ram_limit;
+extern unsigned long *bl1_coherent_ram_zi_base;
+extern unsigned long *bl1_coherent_ram_zi_len;
+extern unsigned long warm_boot_entrypoint;
+
+extern void bl1_plat_arch_setup(void);
+extern void bl2_plat_arch_setup(void);
+extern void bl31_plat_arch_setup(void);
+extern int platform_setup_pm(plat_pm_ops **);
+extern unsigned int platform_get_core_pos(unsigned long mpidr);
+extern void disable_mmu(void);
+extern void enable_mmu(void);
+extern void configure_mmu(meminfo *,
+			  unsigned long,
+			  unsigned long,
+			  unsigned long,
+			  unsigned long);
+extern unsigned long platform_get_cfgvar(unsigned int);
+extern int platform_config_setup(void);
+extern void plat_report_exception(unsigned long);
+extern unsigned long plat_get_ns_image_entrypoint(void);
+
+/* Declarations for fvp_topology.c */
+extern int plat_setup_topology(void);
+extern int plat_get_max_afflvl(void);
+extern unsigned int plat_get_aff_count(unsigned int, unsigned long);
+extern unsigned int plat_get_aff_state(unsigned int, unsigned long);
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* __PLATFORM_H__ */