ARMv8 Trusted Firmware release v0.2
diff --git a/common/bl_common.c b/common/bl_common.c
new file mode 100644
index 0000000..d125786
--- /dev/null
+++ b/common/bl_common.c
@@ -0,0 +1,516 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <console.h>
+#include <platform.h>
+#include <semihosting.h>
+#include <bl_common.h>
+#include <bl1.h>
+
+/***********************************************************
+ * Memory for sharing data while changing exception levels.
+ * Only used by the primary core.
+ **********************************************************/
+unsigned char bl2_el_change_mem_ptr[EL_CHANGE_MEM_SIZE];
+
+unsigned long *get_el_change_mem_ptr(void)
+{
+	return (unsigned long *) bl2_el_change_mem_ptr;
+}
+
+unsigned long page_align(unsigned long value, unsigned dir)
+{
+	unsigned long page_size = 1 << FOUR_KB_SHIFT;
+
+	/* Round up the limit to the next page boundary */
+	if (value & (page_size - 1)) {
+		value &= ~(page_size - 1);
+		if (dir == UP)
+			value += page_size;
+	}
+
+	return value;
+}
+
+static inline unsigned int is_page_aligned (unsigned long addr) {
+	const unsigned long page_size = 1 << FOUR_KB_SHIFT;
+
+	return (addr & (page_size - 1)) == 0;
+}
+
+void change_security_state(unsigned int target_security_state)
+{
+	unsigned long scr = read_scr();
+
+	if (target_security_state == SECURE)
+		scr &= ~SCR_NS_BIT;
+	else if (target_security_state == NON_SECURE)
+		scr |= SCR_NS_BIT;
+	else
+		assert(0);
+
+	write_scr(scr);
+}
+
+int drop_el(aapcs64_params *args,
+	    unsigned long spsr,
+	    unsigned long entrypoint)
+{
+	write_spsr(spsr);
+	write_elr(entrypoint);
+	eret(args->arg0,
+	     args->arg1,
+	     args->arg2,
+	     args->arg3,
+	     args->arg4,
+	     args->arg5,
+	     args->arg6,
+	     args->arg7);
+	return -EINVAL;
+}
+
+long raise_el(aapcs64_params *args)
+{
+	return smc(args->arg0,
+		   args->arg1,
+		   args->arg2,
+		   args->arg3,
+		   args->arg4,
+		   args->arg5,
+		   args->arg6,
+		   args->arg7);
+}
+
+/*
+ * TODO: If we are not EL3 then currently we only issue an SMC.
+ * Add support for dropping into EL0 etc. Consider adding support
+ * for switching from S-EL1 to S-EL0/1 etc.
+ */
+long change_el(el_change_info *info)
+{
+	unsigned long current_el = read_current_el();
+
+	if (GET_EL(current_el) == MODE_EL3) {
+		/*
+		 * We can go anywhere from EL3. So find where.
+		 * TODO: Lots to do if we are going non-secure.
+		 * Flip the NS bit. Restore NS registers etc.
+		 * Just doing the bare minimal for now.
+		 */
+
+		if (info->security_state == NON_SECURE)
+			change_security_state(info->security_state);
+
+		return drop_el(&info->args, info->spsr, info->entrypoint);
+	} else
+		return raise_el(&info->args);
+}
+
+/* TODO: add a parameter for DAIF. not needed right now */
+unsigned long make_spsr(unsigned long target_el,
+			unsigned long target_sp,
+			unsigned long target_rw)
+{
+	unsigned long spsr;
+
+	/* Disable all exceptions & setup the EL */
+	spsr = (DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
+		<< PSR_DAIF_SHIFT;
+	spsr |= PSR_MODE(target_rw, target_el, target_sp);
+
+	return spsr;
+}
+
+/*******************************************************************************
+ * The next two functions are the weak definitions. Platform specific
+ * code can override them if it wishes to.
+ ******************************************************************************/
+
+/*******************************************************************************
+ * Function that takes a memory layout into which BL31 has been either top or
+ * bottom loaded. Using this information, it populates bl31_mem_layout to tell
+ * BL31 how much memory it has access to and how much is available for use. It
+ * does not need the address where BL31 has been loaded as BL31 will reclaim
+ * all the memory used by BL2.
+ * TODO: Revisit if this and init_bl2_mem_layout can be replaced by a single
+ * routine.
+ ******************************************************************************/
+void init_bl31_mem_layout(const meminfo *bl2_mem_layout,
+			  meminfo *bl31_mem_layout,
+			  unsigned int load_type)
+{
+	if (load_type == BOT_LOAD) {
+		/*
+		 * ------------                             ^
+		 * |   BL2    |                             |
+		 * |----------|                 ^           |  BL2
+		 * |          |                 | BL2 free  |  total
+		 * |          |                 |   size    |  size
+		 * |----------| BL2 free base   v           |
+		 * |   BL31   |                             |
+		 * ------------ BL2 total base              v
+		 */
+		unsigned long bl31_size;
+
+		bl31_mem_layout->free_base = bl2_mem_layout->free_base;
+
+		bl31_size = bl2_mem_layout->free_base - bl2_mem_layout->total_base;
+		bl31_mem_layout->free_size = bl2_mem_layout->total_size - bl31_size;
+	} else {
+		/*
+		 * ------------                             ^
+		 * |   BL31   |                             |
+		 * |----------|                 ^           |  BL2
+		 * |          |                 | BL2 free  |  total
+		 * |          |                 |   size    |  size
+		 * |----------| BL2 free base   v           |
+		 * |   BL2    |                             |
+		 * ------------ BL2 total base              v
+		 */
+		unsigned long bl2_size;
+
+		bl31_mem_layout->free_base = bl2_mem_layout->total_base;
+
+		bl2_size = bl2_mem_layout->free_base - bl2_mem_layout->total_base;
+		bl31_mem_layout->free_size = bl2_mem_layout->free_size + bl2_size;
+	}
+
+	bl31_mem_layout->total_base = bl2_mem_layout->total_base;
+	bl31_mem_layout->total_size = bl2_mem_layout->total_size;
+	bl31_mem_layout->attr = load_type;
+
+	flush_dcache_range((unsigned long) bl31_mem_layout, sizeof(meminfo));
+	return;
+}
+
+/*******************************************************************************
+ * Function that takes a memory layout into which BL2 has been either top or
+ * bottom loaded along with the address where BL2 has been loaded in it. Using
+ * this information, it populates bl2_mem_layout to tell BL2 how much memory
+ * it has access to and how much is available for use.
+ ******************************************************************************/
+void init_bl2_mem_layout(meminfo *bl1_mem_layout,
+			 meminfo *bl2_mem_layout,
+			 unsigned int load_type,
+			 unsigned long bl2_base)
+{
+	unsigned tmp;
+
+	if (load_type == BOT_LOAD) {
+		bl2_mem_layout->total_base = bl2_base;
+		tmp = bl1_mem_layout->free_base - bl2_base;
+		bl2_mem_layout->total_size = bl1_mem_layout->free_size + tmp;
+
+	} else {
+		bl2_mem_layout->total_base = bl1_mem_layout->free_base;
+		tmp = bl1_mem_layout->total_base + bl1_mem_layout->total_size;
+		bl2_mem_layout->total_size = tmp - bl1_mem_layout->free_base;
+	}
+
+	bl2_mem_layout->free_base = bl1_mem_layout->free_base;
+	bl2_mem_layout->free_size = bl1_mem_layout->free_size;
+	bl2_mem_layout->attr = load_type;
+
+	flush_dcache_range((unsigned long) bl2_mem_layout, sizeof(meminfo));
+	return;
+}
+
+static void dump_load_info(unsigned long image_load_addr,
+			   unsigned long image_size,
+			   const meminfo *mem_layout)
+{
+#if DEBUG
+	printf("Trying to load image at address 0x%lx, size = 0x%lx\r\n",
+		image_load_addr, image_size);
+	printf("Current memory layout:\r\n");
+	printf("  total region = [0x%lx, 0x%lx]\r\n", mem_layout->total_base,
+			mem_layout->total_base + mem_layout->total_size);
+	printf("  free region = [0x%lx, 0x%lx]\r\n", mem_layout->free_base,
+			mem_layout->free_base + mem_layout->free_size);
+#endif
+}
+
+/*******************************************************************************
+ * Generic function to load an image into the trusted RAM using semihosting
+ * given a name, extents of free memory & whether the image should be loaded at
+ * the bottom or top of the free memory. It updates the memory layout if the
+ * load is successful.
+ ******************************************************************************/
+unsigned long load_image(meminfo *mem_layout,
+			 const char *image_name,
+			 unsigned int load_type,
+			 unsigned long fixed_addr)
+{
+	unsigned long temp_image_base, image_base;
+	long offset;
+	int image_flen;
+
+	/* Find the size of the image */
+	image_flen = semihosting_get_flen(image_name);
+	if (image_flen < 0) {
+		printf("ERROR: Cannot access '%s' file (%i).\r\n",
+			image_name, image_flen);
+		return 0;
+	}
+
+	/* See if we have enough space */
+	if (image_flen > mem_layout->free_size) {
+		printf("ERROR: Cannot load '%s' file: Not enough space.\r\n",
+			image_name);
+		dump_load_info(0, image_flen, mem_layout);
+		return 0;
+	}
+
+	switch (load_type) {
+
+	case TOP_LOAD:
+
+	  /* Load the image in the top of free memory */
+	  temp_image_base = mem_layout->free_base + mem_layout->free_size;
+	  temp_image_base -= image_flen;
+
+	  /* Page align base address and check whether the image still fits */
+	  image_base = page_align(temp_image_base, DOWN);
+	  assert(image_base <= temp_image_base);
+
+	  if (image_base < mem_layout->free_base) {
+		  printf("ERROR: Cannot load '%s' file: Not enough space.\r\n",
+			  image_name);
+		  dump_load_info(image_base, image_flen, mem_layout);
+		  return 0;
+	  }
+
+	  /* Calculate the amount of extra memory used due to alignment */
+	  offset = temp_image_base - image_base;
+
+	  break;
+
+	case BOT_LOAD:
+
+	  /* Load the BL2 image in the bottom of free memory */
+	  temp_image_base = mem_layout->free_base;
+	  image_base = page_align(temp_image_base, UP);
+	  assert(image_base >= temp_image_base);
+
+	  /* Page align base address and check whether the image still fits */
+	  if (image_base + image_flen >
+	      mem_layout->free_base + mem_layout->free_size) {
+		  printf("ERROR: Cannot load '%s' file: Not enough space.\r\n",
+			  image_name);
+		  dump_load_info(image_base, image_flen, mem_layout);
+		  return 0;
+	  }
+
+	  /* Calculate the amount of extra memory used due to alignment */
+	  offset = image_base - temp_image_base;
+
+	  break;
+
+	default:
+	  assert(0);
+
+	}
+
+	/*
+	 * Some images must be loaded at a fixed address, not a dynamic one.
+	 *
+	 * This has been implemented as a hack on top of the existing dynamic
+	 * loading mechanism, for the time being.  If the 'fixed_addr' function
+	 * argument is different from zero, then it will force the load address.
+	 * So we still have this principle of top/bottom loading but the code
+	 * determining the load address is bypassed and the load address is
+	 * forced to the fixed one.
+	 *
+	 * This can result in quite a lot of wasted space because we still use
+	 * 1 sole meminfo structure to represent the extents of free memory,
+	 * where we should use some sort of linked list.
+	 *
+	 * E.g. we want to load BL2 at address 0x04020000, the resulting memory
+	 *      layout should look as follows:
+	 * ------------ 0x04040000
+	 * |          |  <- Free space (1)
+	 * |----------|
+	 * |   BL2    |
+	 * |----------| 0x04020000
+	 * |          |  <- Free space (2)
+	 * |----------|
+	 * |   BL1    |
+	 * ------------ 0x04000000
+	 *
+	 * But in the current hacky implementation, we'll need to specify
+	 * whether BL2 is loaded at the top or bottom of the free memory.
+	 * E.g. if BL2 is considered as top-loaded, the meminfo structure
+	 * will give the following view of the memory, hiding the chunk of
+	 * free memory above BL2:
+	 * ------------ 0x04040000
+	 * |          |
+	 * |          |
+	 * |   BL2    |
+	 * |----------| 0x04020000
+	 * |          |  <- Free space (2)
+	 * |----------|
+	 * |   BL1    |
+	 * ------------ 0x04000000
+	 */
+	if (fixed_addr != 0) {
+		/* Load the image at the given address. */
+		image_base = fixed_addr;
+
+		/* Check whether the image fits. */
+		if ((image_base < mem_layout->free_base) ||
+		    (image_base + image_flen >
+		       mem_layout->free_base + mem_layout->free_size)) {
+			printf("ERROR: Cannot load '%s' file: Not enough space.\r\n",
+				image_name);
+			dump_load_info(image_base, image_flen, mem_layout);
+			return 0;
+		}
+
+		/* Check whether the fixed load address is page-aligned. */
+		if (!is_page_aligned(image_base)) {
+			printf("ERROR: Cannot load '%s' file at unaligned address 0x%lx.\r\n",
+				image_name, fixed_addr);
+			return 0;
+		}
+
+		/*
+		 * Calculate the amount of extra memory used due to fixed
+		 * loading.
+		 */
+		if (load_type == TOP_LOAD) {
+			unsigned long max_addr, space_used;
+			/*
+			 * ------------ max_addr
+			 * | /wasted/ |                 | offset
+			 * |..........|..............................
+			 * |  image   |                 | image_flen
+			 * |----------| fixed_addr
+			 * |          |
+			 * |          |
+			 * ------------ total_base
+			 */
+			max_addr = mem_layout->total_base + mem_layout->total_size;
+			/*
+			 * Compute the amount of memory used by the image.
+			 * Corresponds to all space above the image load
+			 * address.
+			 */
+			space_used = max_addr - fixed_addr;
+			/*
+			 * Calculate the amount of wasted memory within the
+			 * amount of memory used by the image.
+			 */
+			offset = space_used - image_flen;
+		} else /* BOT_LOAD */
+			/*
+			 * ------------
+			 * |          |
+			 * |          |
+			 * |----------|
+			 * |  image   |
+			 * |..........| fixed_addr
+			 * | /wasted/ |                 | offset
+			 * ------------ total_base
+			 */
+			offset = fixed_addr - mem_layout->total_base;
+	}
+
+	/* We have enough space so load the image now */
+	image_flen = semihosting_download_file(image_name,
+					       image_flen,
+					       (void *) image_base);
+	if (image_flen <= 0) {
+		printf("ERROR: Failed to load '%s' file from semihosting (%i).\r\n",
+			image_name, image_flen);
+		return 0;
+	}
+
+	/*
+	 * File has been successfully loaded. Update the free memory
+	 * data structure & flush the contents of the TZRAM so that
+	 * the next EL can see it.
+	 */
+	/* Update the memory contents */
+	flush_dcache_range(image_base, image_flen);
+
+	mem_layout->free_size -= image_flen + offset;
+
+	/* Update the base of free memory since its moved up */
+	if (load_type == BOT_LOAD)
+		mem_layout->free_base += offset + image_flen;
+
+	return image_base;
+}
+
+/*******************************************************************************
+ * Run a loaded image from the given entry point. This could result in either
+ * dropping into a lower exception level or jumping to a higher exception level.
+ * The only way of doing the latter is through an SMC. In either case, setup the
+ * parameters for the EL change request correctly.
+ ******************************************************************************/
+int run_image(unsigned long entrypoint,
+	      unsigned long spsr,
+	      unsigned long target_security_state,
+	      meminfo *mem_layout,
+	      void *data)
+{
+	el_change_info run_image_info;
+	unsigned long current_el = read_current_el();
+
+	/* Tell next EL what we want done */
+	run_image_info.args.arg0 = RUN_IMAGE;
+	run_image_info.entrypoint = entrypoint;
+	run_image_info.spsr = spsr;
+	run_image_info.security_state = target_security_state;
+	run_image_info.next = 0;
+
+	/*
+	 * If we are EL3 then only an eret can take us to the desired
+	 * exception level. Else for the time being assume that we have
+	 * to jump to a higher EL and issue an SMC. Contents of argY
+	 * will go into the general purpose register xY e.g. arg0->x0
+	 */
+	if (GET_EL(current_el) == MODE_EL3) {
+		run_image_info.args.arg1 = (unsigned long) mem_layout;
+		run_image_info.args.arg2 = (unsigned long) data;
+	} else {
+		run_image_info.args.arg1 = entrypoint;
+		run_image_info.args.arg2 = spsr;
+		run_image_info.args.arg3 = (unsigned long) mem_layout;
+		run_image_info.args.arg4 = (unsigned long) data;
+	}
+
+	return change_el(&run_image_info);
+}
diff --git a/common/psci/psci_afflvl_off.c b/common/psci/psci_afflvl_off.c
new file mode 100644
index 0000000..937ba9d
--- /dev/null
+++ b/common/psci/psci_afflvl_off.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <console.h>
+#include <platform.h>
+#include <psci.h>
+#include <psci_private.h>
+
+typedef int (*afflvl_off_handler)(unsigned long, aff_map_node *);
+
+/*******************************************************************************
+ * The next three functions implement a handler for each supported affinity
+ * level which is called when that affinity level is turned off.
+ ******************************************************************************/
+static int psci_afflvl0_off(unsigned long mpidr, aff_map_node *cpu_node)
+{
+	unsigned int index, plat_state;
+	int rc = PSCI_E_SUCCESS;
+	unsigned long sctlr = read_sctlr();
+
+	assert(cpu_node->level == MPIDR_AFFLVL0);
+
+	/*
+	 * Generic management: Get the index for clearing any
+	 * lingering re-entry information
+	 */
+	index = cpu_node->data;
+	memset(&psci_ns_entry_info[index], 0, sizeof(psci_ns_entry_info[index]));
+
+	/*
+	 * Arch. management. Perform the necessary steps to flush all
+	 * cpu caches.
+	 *
+	 * TODO: This power down sequence varies across cpus so it needs to be
+	 * abstracted out on the basis of the MIDR like in cpu_reset_handler().
+	 * Do the bare minimal for the time being. Fix this before porting to
+	 * Cortex models.
+	 */
+	sctlr &= ~SCTLR_C_BIT;
+	write_sctlr(sctlr);
+
+	/*
+	 * CAUTION: This flush to the level of unification makes an assumption
+	 * about the cache hierarchy at affinity level 0 (cpu) in the platform.
+	 * Ideally the platform should tell psci which levels to flush to exit
+	 * coherency.
+	 */
+	dcsw_op_louis(DCCISW);
+
+	/*
+	 * Plat. management: Perform platform specific actions to turn this
+	 * cpu off e.g. exit cpu coherency, program the power controller etc.
+	 */
+	if (psci_plat_pm_ops->affinst_off) {
+
+		/* Get the current physical state of this cpu */
+		plat_state = psci_get_aff_phys_state(cpu_node);
+		rc = psci_plat_pm_ops->affinst_off(mpidr,
+						   cpu_node->level,
+						   plat_state);
+	}
+
+	/*
+	 * The only error cpu_off can return is E_DENIED. So check if that's
+	 * indeed the case. The caller will simply 'eret' in case of an error.
+	 */
+	if (rc != PSCI_E_SUCCESS)
+		assert(rc == PSCI_E_DENIED);
+
+	return rc;
+}
+
+static int psci_afflvl1_off(unsigned long mpidr, aff_map_node *cluster_node)
+{
+	int rc = PSCI_E_SUCCESS;
+	unsigned int plat_state;
+
+	/* Sanity check the cluster level */
+	assert(cluster_node->level == MPIDR_AFFLVL1);
+
+	/*
+	 * Keep the physical state of this cluster handy to decide
+	 * what action needs to be taken
+	 */
+	plat_state = psci_get_aff_phys_state(cluster_node);
+
+	/*
+	 * Arch. Management. Flush all levels of caches to PoC if
+	 * the cluster is to be shutdown
+	 */
+	if (plat_state == PSCI_STATE_OFF)
+		dcsw_op_all(DCCISW);
+
+	/*
+	 * Plat. Management. Allow the platform to do it's cluster
+	 * specific bookeeping e.g. turn off interconnect coherency,
+	 * program the power controller etc.
+	 */
+	if (psci_plat_pm_ops->affinst_off)
+		rc = psci_plat_pm_ops->affinst_off(mpidr,
+						   cluster_node->level,
+						   plat_state);
+
+	return rc;
+}
+
+static int psci_afflvl2_off(unsigned long mpidr, aff_map_node *system_node)
+{
+	int rc = PSCI_E_SUCCESS;
+	unsigned int plat_state;
+
+	/* Cannot go beyond this level */
+	assert(system_node->level == MPIDR_AFFLVL2);
+
+	/*
+	 * Keep the physical state of the system handy to decide what
+	 * action needs to be taken
+	 */
+	plat_state = psci_get_aff_phys_state(system_node);
+
+	/* No arch. and generic bookeeping to do here currently */
+
+	/*
+	 * Plat. Management : Allow the platform to do it's bookeeping
+	 * at this affinity level
+	 */
+	if (psci_plat_pm_ops->affinst_off)
+		rc = psci_plat_pm_ops->affinst_off(mpidr,
+						   system_node->level,
+						   plat_state);
+	return rc;
+}
+
+static const afflvl_off_handler psci_afflvl_off_handlers[] = {
+	psci_afflvl0_off,
+	psci_afflvl1_off,
+	psci_afflvl2_off,
+};
+
+/*******************************************************************************
+ * This function implements the core of the processing required to turn a cpu
+ * off. It's assumed that along with turning the cpu off, higher affinity levels
+ * will be turned off as far as possible. We first need to determine the new
+ * state off all the affinity instances in the mpidr corresponding to the target
+ * cpu. Action will be taken on the basis of this new state. To do the state
+ * change we first need to acquire the locks for all the implemented affinity
+ * level to be able to snapshot the system state. Then we need to start turning
+ * affinity levels off from the lowest to the highest (e.g. a cpu needs to be
+ * off before a cluster can be turned off). To achieve this flow, we start
+ * acquiring the locks from the highest to the lowest affinity level. Once we
+ * reach affinity level 0, we do the state change followed by the actions
+ * corresponding to the new state for affinity level 0. Actions as per the
+ * updated state for higher affinity levels are performed as we unwind back to
+ * highest affinity level.
+ ******************************************************************************/
+int psci_afflvl_off(unsigned long mpidr,
+		    int cur_afflvl,
+		    int tgt_afflvl)
+{
+	int rc = PSCI_E_SUCCESS, level;
+	unsigned int next_state, prev_state;
+	aff_map_node *aff_node;
+
+	mpidr &= MPIDR_AFFINITY_MASK;;
+
+	/*
+	 * Some affinity instances at levels between the current and
+	 * target levels could be absent in the mpidr. Skip them and
+	 * start from the first present instance.
+	 */
+	level = psci_get_first_present_afflvl(mpidr,
+					      cur_afflvl,
+					      tgt_afflvl,
+					      &aff_node);
+	/*
+	 * Return if there are no more affinity instances beyond this
+	 * level to process. Else ensure that the returned affinity
+	 * node makes sense.
+	 */
+	if (aff_node == NULL)
+		return rc;
+
+	assert(level == aff_node->level);
+
+	/*
+	 * This function acquires the lock corresponding to each
+	 * affinity level so that state management can be done safely.
+	 */
+	bakery_lock_get(mpidr, &aff_node->lock);
+
+	/* Keep the old state and the next one handy */
+	prev_state = psci_get_state(aff_node->state);
+	next_state = PSCI_STATE_OFF;
+
+	/*
+	 * We start from the highest affinity level and work our way
+	 * downwards to the lowest i.e. MPIDR_AFFLVL0.
+	 */
+	if (aff_node->level == tgt_afflvl) {
+		psci_change_state(mpidr,
+				  tgt_afflvl,
+				  get_max_afflvl(),
+				  next_state);
+	} else {
+		rc = psci_afflvl_off(mpidr, level - 1, tgt_afflvl);
+		if (rc != PSCI_E_SUCCESS) {
+			psci_set_state(aff_node->state, prev_state);
+			goto exit;
+		}
+	}
+
+	/*
+	 * Perform generic, architecture and platform specific
+	 * handling
+	 */
+	rc = psci_afflvl_off_handlers[level](mpidr, aff_node);
+	if (rc != PSCI_E_SUCCESS) {
+		psci_set_state(aff_node->state, prev_state);
+		goto exit;
+	}
+
+	/*
+	 * If all has gone as per plan then this cpu should be
+	 * marked as OFF
+	 */
+	if (level == MPIDR_AFFLVL0) {
+		next_state = psci_get_state(aff_node->state);
+		assert(next_state == PSCI_STATE_OFF);
+	}
+
+exit:
+	bakery_lock_release(mpidr, &aff_node->lock);
+	return rc;
+}
diff --git a/common/psci/psci_afflvl_on.c b/common/psci/psci_afflvl_on.c
new file mode 100644
index 0000000..b0de063
--- /dev/null
+++ b/common/psci/psci_afflvl_on.c
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <console.h>
+#include <platform.h>
+#include <psci.h>
+#include <psci_private.h>
+
+typedef int (*afflvl_on_handler)(unsigned long,
+				 aff_map_node *,
+				 unsigned long,
+				 unsigned long);
+
+/*******************************************************************************
+ * This function checks whether a cpu which has been requested to be turned on
+ * is OFF to begin with.
+ ******************************************************************************/
+static int cpu_on_validate_state(unsigned int state)
+{
+	unsigned int psci_state;
+
+	/* Get the raw psci state */
+	psci_state = psci_get_state(state);
+
+	if (psci_state == PSCI_STATE_ON || psci_state == PSCI_STATE_SUSPEND)
+		return PSCI_E_ALREADY_ON;
+
+	if (psci_state == PSCI_STATE_ON_PENDING)
+		return PSCI_E_ON_PENDING;
+
+	assert(psci_state == PSCI_STATE_OFF);
+	return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * Handler routine to turn a cpu on. It takes care of any generic, architectural
+ * or platform specific setup required.
+ * TODO: Split this code across separate handlers for each type of setup?
+ ******************************************************************************/
+static int psci_afflvl0_on(unsigned long target_cpu,
+			   aff_map_node *cpu_node,
+			   unsigned long ns_entrypoint,
+			   unsigned long context_id)
+{
+	unsigned int index, plat_state;
+	unsigned long psci_entrypoint;
+	int rc;
+
+	/* Sanity check to safeguard against data corruption */
+	assert(cpu_node->level == MPIDR_AFFLVL0);
+
+	/*
+	 * Generic management: Ensure that the cpu is off to be
+	 * turned on
+	 */
+	rc = cpu_on_validate_state(cpu_node->state);
+	if (rc != PSCI_E_SUCCESS)
+		return rc;
+
+	/*
+	 * Arch. management: Derive the re-entry information for
+	 * the non-secure world from the non-secure state from
+	 * where this call originated.
+	 */
+	index = cpu_node->data;
+	rc = psci_set_ns_entry_info(index, ns_entrypoint, context_id);
+	if (rc != PSCI_E_SUCCESS)
+		return rc;
+
+	/* Set the secure world (EL3) re-entry point after BL1 */
+	psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
+
+	/*
+	 * Plat. management: Give the platform the current state
+	 * of the target cpu to allow it to perform the necessary
+	 * steps to power on.
+	 */
+	if (psci_plat_pm_ops->affinst_on) {
+
+		/* Get the current physical state of this cpu */
+		plat_state = psci_get_aff_phys_state(cpu_node);
+		rc = psci_plat_pm_ops->affinst_on(target_cpu,
+						  psci_entrypoint,
+						  ns_entrypoint,
+						  cpu_node->level,
+						  plat_state);
+	}
+
+	return rc;
+}
+
+/*******************************************************************************
+ * Handler routine to turn a cluster on. It takes care or any generic, arch.
+ * or platform specific setup required.
+ * TODO: Split this code across separate handlers for each type of setup?
+ ******************************************************************************/
+static int psci_afflvl1_on(unsigned long target_cpu,
+			   aff_map_node *cluster_node,
+			   unsigned long ns_entrypoint,
+			   unsigned long context_id)
+{
+	int rc = PSCI_E_SUCCESS;
+	unsigned int plat_state;
+	unsigned long psci_entrypoint;
+
+	assert(cluster_node->level == MPIDR_AFFLVL1);
+
+	/*
+	 * There is no generic and arch. specific cluster
+	 * management required
+	 */
+
+	/*
+	 * Plat. management: Give the platform the current state
+	 * of the target cpu to allow it to perform the necessary
+	 * steps to power on.
+	 */
+	if (psci_plat_pm_ops->affinst_on) {
+		plat_state = psci_get_aff_phys_state(cluster_node);
+		psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
+		rc = psci_plat_pm_ops->affinst_on(target_cpu,
+						  psci_entrypoint,
+						  ns_entrypoint,
+						  cluster_node->level,
+						  plat_state);
+	}
+
+	return rc;
+}
+
+/*******************************************************************************
+ * Handler routine to turn a cluster of clusters on. It takes care or any
+ * generic, arch. or platform specific setup required.
+ * TODO: Split this code across separate handlers for each type of setup?
+ ******************************************************************************/
+static int psci_afflvl2_on(unsigned long target_cpu,
+			   aff_map_node *system_node,
+			   unsigned long ns_entrypoint,
+			   unsigned long context_id)
+{
+	int rc = PSCI_E_SUCCESS;
+	unsigned int plat_state;
+	unsigned long psci_entrypoint;
+
+	/* Cannot go beyond affinity level 2 in this psci imp. */
+	assert(system_node->level == MPIDR_AFFLVL2);
+
+	/*
+	 * There is no generic and arch. specific system management
+	 * required
+	 */
+
+	/*
+	 * Plat. management: Give the platform the current state
+	 * of the target cpu to allow it to perform the necessary
+	 * steps to power on.
+	 */
+	if (psci_plat_pm_ops->affinst_on) {
+		plat_state = psci_get_aff_phys_state(system_node);
+		psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
+		rc = psci_plat_pm_ops->affinst_on(target_cpu,
+						  psci_entrypoint,
+						  ns_entrypoint,
+						  system_node->level,
+						  plat_state);
+	}
+
+	return rc;
+}
+
+/* Private data structure to make this handlers accessible through indexing */
+static const afflvl_on_handler psci_afflvl_on_handlers[] = {
+	psci_afflvl0_on,
+	psci_afflvl1_on,
+	psci_afflvl2_on,
+};
+
+/*******************************************************************************
+ * This function implements the core of the processing required to turn a cpu
+ * on. It avoids recursion to traverse from the lowest to the highest affinity
+ * level unlike the off/suspend/pon_finisher functions. It does ensure that the
+ * locks are picked in the same order as the order routines to avoid deadlocks.
+ * The flow is: Take all the locks until the highest affinity level, Call the
+ * handlers for turning an affinity level on & finally change the state of the
+ * affinity level.
+ ******************************************************************************/
+int psci_afflvl_on(unsigned long target_cpu,
+		   unsigned long entrypoint,
+		   unsigned long context_id,
+		   int current_afflvl,
+		   int target_afflvl)
+{
+	unsigned int prev_state, next_state;
+	int rc = PSCI_E_SUCCESS, level;
+	aff_map_node *aff_node;
+	unsigned long mpidr = read_mpidr() & MPIDR_AFFINITY_MASK;
+
+	/*
+	 * This loop acquires the lock corresponding to each
+	 * affinity level so that by the time we hit the lowest
+	 * affinity level, the system topology is snapshot and
+	 * state management can be done safely.
+	 */
+	for (level = current_afflvl; level >= target_afflvl; level--) {
+		aff_node = psci_get_aff_map_node(target_cpu, level);
+		if (aff_node)
+			bakery_lock_get(mpidr, &aff_node->lock);
+	}
+
+	/*
+	 * Perform generic, architecture and platform specific
+	 * handling
+	 */
+	for (level = current_afflvl; level >= target_afflvl; level--) {
+
+		/* Grab the node for each affinity level once again */
+		aff_node = psci_get_aff_map_node(target_cpu, level);
+		if (aff_node) {
+
+			/* Keep the old state and the next one handy */
+			prev_state = psci_get_state(aff_node->state);
+			rc = psci_afflvl_on_handlers[level](target_cpu,
+							    aff_node,
+							    entrypoint,
+							    context_id);
+			if (rc != PSCI_E_SUCCESS) {
+				psci_set_state(aff_node->state, prev_state);
+				goto exit;
+			}
+		}
+	}
+
+	/*
+	 * State management: Update the states since this is the
+	 * target affinity level requested.
+	 */
+	psci_change_state(target_cpu,
+			  target_afflvl,
+			  get_max_afflvl(),
+			  PSCI_STATE_ON_PENDING);
+
+exit:
+	/*
+	 * This loop releases the lock corresponding to each affinity level
+	 * in the reverse order. It also checks the final state of the cpu.
+	 */
+	for (level = target_afflvl; level <= current_afflvl; level++) {
+		aff_node = psci_get_aff_map_node(target_cpu, level);
+		if (aff_node) {
+			if (level == MPIDR_AFFLVL0) {
+				next_state = psci_get_state(aff_node->state);
+				assert(next_state == PSCI_STATE_ON_PENDING);
+			}
+			bakery_lock_release(mpidr, &aff_node->lock);
+		}
+	}
+
+	return rc;
+}
+
+/*******************************************************************************
+ * The following functions finish an earlier affinity power on request. They
+ * are called by the common finisher routine in psci_common.c.
+ ******************************************************************************/
+static unsigned int psci_afflvl0_on_finish(unsigned long mpidr,
+					   aff_map_node *cpu_node,
+					   unsigned int prev_state)
+{
+	unsigned int index, plat_state, rc = PSCI_E_SUCCESS;
+
+	assert(cpu_node->level == MPIDR_AFFLVL0);
+
+	/*
+	 * Plat. management: Perform the platform specific actions
+	 * for this cpu e.g. enabling the gic or zeroing the mailbox
+	 * register. The actual state of this cpu has already been
+	 * changed.
+	 */
+	if (psci_plat_pm_ops->affinst_on_finish) {
+
+		/* Get the previous physical state of this cpu */
+		plat_state = psci_get_phys_state(prev_state);
+		rc = psci_plat_pm_ops->affinst_on_finish(mpidr,
+							 cpu_node->level,
+							 plat_state);
+		assert(rc == PSCI_E_SUCCESS);
+	}
+
+	/*
+	 * Arch. management: Turn on mmu & restore architectural state
+	 */
+	write_vbar((unsigned long) runtime_exceptions);
+	enable_mmu();
+
+	/*
+	 * All the platform specific actions for turning this cpu
+	 * on have completed. Perform enough arch.initialization
+	 * to run in the non-secure address space.
+	 */
+	bl31_arch_setup();
+
+	/*
+	 * Generic management: Now we just need to retrieve the
+	 * information that we had stashed away during the cpu_on
+	 * call to set this cpu on it's way. First get the index
+	 * for restoring the re-entry info
+	 */
+	index = cpu_node->data;
+	rc = psci_get_ns_entry_info(index);
+
+	/* Clean caches before re-entering normal world */
+	dcsw_op_louis(DCCSW);
+
+	return rc;
+}
+
+static unsigned int psci_afflvl1_on_finish(unsigned long mpidr,
+					   aff_map_node *cluster_node,
+					   unsigned int prev_state)
+{
+	unsigned int rc = PSCI_E_SUCCESS;
+	unsigned int plat_state;
+
+	assert(cluster_node->level == MPIDR_AFFLVL1);
+
+	/*
+	 * Plat. management: Perform the platform specific actions
+	 * as per the old state of the cluster e.g. enabling
+	 * coherency at the interconnect depends upon the state with
+	 * which this cluster was powered up. If anything goes wrong
+	 * then assert as there is no way to recover from this
+	 * situation.
+	 */
+	if (psci_plat_pm_ops->affinst_on_finish) {
+		plat_state = psci_get_phys_state(prev_state);
+		rc = psci_plat_pm_ops->affinst_on_finish(mpidr,
+							 cluster_node->level,
+							 plat_state);
+		assert(rc == PSCI_E_SUCCESS);
+	}
+
+	return rc;
+}
+
+
+static unsigned int psci_afflvl2_on_finish(unsigned long mpidr,
+					   aff_map_node *system_node,
+					   unsigned int prev_state)
+{
+	int rc = PSCI_E_SUCCESS;
+	unsigned int plat_state;
+
+	/* Cannot go beyond this affinity level */
+	assert(system_node->level == MPIDR_AFFLVL2);
+
+	/*
+	 * Currently, there are no architectural actions to perform
+	 * at the system level.
+	 */
+
+	/*
+	 * Plat. management: Perform the platform specific actions
+	 * as per the old state of the cluster e.g. enabling
+	 * coherency at the interconnect depends upon the state with
+	 * which this cluster was powered up. If anything goes wrong
+	 * then assert as there is no way to recover from this
+	 * situation.
+	 */
+	if (psci_plat_pm_ops->affinst_on_finish) {
+		plat_state = psci_get_phys_state(system_node->state);
+		rc = psci_plat_pm_ops->affinst_on_finish(mpidr,
+							 system_node->level,
+							 plat_state);
+		assert(rc == PSCI_E_SUCCESS);
+	}
+
+	return rc;
+}
+
+const afflvl_power_on_finisher psci_afflvl_on_finishers[] = {
+	psci_afflvl0_on_finish,
+	psci_afflvl1_on_finish,
+	psci_afflvl2_on_finish,
+};
+
diff --git a/common/psci/psci_afflvl_suspend.c b/common/psci/psci_afflvl_suspend.c
new file mode 100644
index 0000000..030f15d
--- /dev/null
+++ b/common/psci/psci_afflvl_suspend.c
@@ -0,0 +1,465 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <console.h>
+#include <platform.h>
+#include <psci.h>
+#include <psci_private.h>
+
+typedef int (*afflvl_suspend_handler)(unsigned long,
+				      aff_map_node *,
+				      unsigned long,
+				      unsigned long,
+				      unsigned int);
+
+/*******************************************************************************
+ * The next three functions implement a handler for each supported affinity
+ * level which is called when that affinity level is about to be suspended.
+ ******************************************************************************/
+static int psci_afflvl0_suspend(unsigned long mpidr,
+				aff_map_node *cpu_node,
+				unsigned long ns_entrypoint,
+				unsigned long context_id,
+				unsigned int power_state)
+{
+	unsigned int index, plat_state;
+	unsigned long psci_entrypoint, sctlr = read_sctlr();
+	int rc = PSCI_E_SUCCESS;
+
+	/* Sanity check to safeguard against data corruption */
+	assert(cpu_node->level == MPIDR_AFFLVL0);
+
+	/*
+	 * Generic management: Store the re-entry information for the
+	 * non-secure world
+	 */
+	index = cpu_node->data;
+	rc = psci_set_ns_entry_info(index, ns_entrypoint, context_id);
+	if (rc != PSCI_E_SUCCESS)
+		return rc;
+
+	/*
+	 * Arch. management: Save the secure context, flush the
+	 * L1 caches and exit intra-cluster coherency et al
+	 */
+	psci_secure_context[index].sctlr = read_sctlr();
+	psci_secure_context[index].scr = read_scr();
+	psci_secure_context[index].cptr = read_cptr();
+	psci_secure_context[index].cpacr = read_cpacr();
+	psci_secure_context[index].cntfrq = read_cntfrq_el0();
+	psci_secure_context[index].mair = read_mair();
+	psci_secure_context[index].tcr = read_tcr();
+	psci_secure_context[index].ttbr = read_ttbr0();
+	psci_secure_context[index].vbar = read_vbar();
+
+	/* Set the secure world (EL3) re-entry point after BL1 */
+	psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
+
+	/*
+	 * Arch. management. Perform the necessary steps to flush all
+	 * cpu caches.
+	 *
+	 * TODO: This power down sequence varies across cpus so it needs to be
+	 * abstracted out on the basis of the MIDR like in cpu_reset_handler().
+	 * Do the bare minimal for the time being. Fix this before porting to
+	 * Cortex models.
+	 */
+	sctlr &= ~SCTLR_C_BIT;
+	write_sctlr(sctlr);
+
+	/*
+	 * CAUTION: This flush to the level of unification makes an assumption
+	 * about the cache hierarchy at affinity level 0 (cpu) in the platform.
+	 * Ideally the platform should tell psci which levels to flush to exit
+	 * coherency.
+	 */
+	dcsw_op_louis(DCCISW);
+
+	/*
+	 * Plat. management: Allow the platform to perform the
+	 * necessary actions to turn off this cpu e.g. set the
+	 * platform defined mailbox with the psci entrypoint,
+	 * program the power controller etc.
+	 */
+	if (psci_plat_pm_ops->affinst_suspend) {
+		plat_state = psci_get_aff_phys_state(cpu_node);
+		rc = psci_plat_pm_ops->affinst_suspend(mpidr,
+						       psci_entrypoint,
+						       ns_entrypoint,
+						       cpu_node->level,
+						       plat_state);
+	}
+
+	return rc;
+}
+
+static int psci_afflvl1_suspend(unsigned long mpidr,
+				aff_map_node *cluster_node,
+				unsigned long ns_entrypoint,
+				unsigned long context_id,
+				unsigned int power_state)
+{
+	int rc = PSCI_E_SUCCESS;
+	unsigned int plat_state;
+	unsigned long psci_entrypoint;
+
+	/* Sanity check the cluster level */
+	assert(cluster_node->level == MPIDR_AFFLVL1);
+
+	/*
+	 * Keep the physical state of this cluster handy to decide
+	 * what action needs to be taken
+	 */
+	plat_state = psci_get_aff_phys_state(cluster_node);
+
+	/*
+	 * Arch. management: Flush all levels of caches to PoC if the
+	 * cluster is to be shutdown
+	 */
+	if (plat_state == PSCI_STATE_OFF)
+		dcsw_op_all(DCCISW);
+
+	/*
+	 * Plat. Management. Allow the platform to do it's cluster
+	 * specific bookeeping e.g. turn off interconnect coherency,
+	 * program the power controller etc.
+	 */
+	if (psci_plat_pm_ops->affinst_suspend) {
+
+		/*
+		 * Sending the psci entrypoint is currently redundant
+		 * beyond affinity level 0 but one never knows what a
+		 * platform might do. Also it allows us to keep the
+		 * platform handler prototype the same.
+		 */
+		psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
+
+		rc = psci_plat_pm_ops->affinst_suspend(mpidr,
+						       psci_entrypoint,
+						       ns_entrypoint,
+						       cluster_node->level,
+						       plat_state);
+	}
+
+	return rc;
+}
+
+
+static int psci_afflvl2_suspend(unsigned long mpidr,
+				aff_map_node *system_node,
+				unsigned long ns_entrypoint,
+				unsigned long context_id,
+				unsigned int power_state)
+{
+	int rc = PSCI_E_SUCCESS;
+	unsigned int plat_state;
+	unsigned long psci_entrypoint;
+
+	/* Cannot go beyond this */
+	assert(system_node->level == MPIDR_AFFLVL2);
+
+	/*
+	 * Keep the physical state of the system handy to decide what
+	 * action needs to be taken
+	 */
+	plat_state = psci_get_aff_phys_state(system_node);
+
+	/*
+	 * Plat. Management : Allow the platform to do it's bookeeping
+	 * at this affinity level
+	 */
+	if (psci_plat_pm_ops->affinst_suspend) {
+
+		/*
+		 * Sending the psci entrypoint is currently redundant
+		 * beyond affinity level 0 but one never knows what a
+		 * platform might do. Also it allows us to keep the
+		 * platform handler prototype the same.
+		 */
+		psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
+
+		rc = psci_plat_pm_ops->affinst_suspend(mpidr,
+						       psci_entrypoint,
+						       ns_entrypoint,
+						       system_node->level,
+						       plat_state);
+	}
+
+	return rc;
+}
+
+static const afflvl_suspend_handler psci_afflvl_suspend_handlers[] = {
+	psci_afflvl0_suspend,
+	psci_afflvl1_suspend,
+	psci_afflvl2_suspend,
+};
+
+/*******************************************************************************
+ * This function implements the core of the processing required to suspend a cpu
+ * It'S assumed that along with suspending the cpu, higher affinity levels will
+ * be suspended as far as possible. Suspending a cpu is equivalent to physically
+ * powering it down, but the cpu is still available to the OS for scheduling.
+ * We first need to determine the new state off all the affinity instances in
+ * the mpidr corresponding to the target cpu. Action will be taken on the basis
+ * of this new state. To do the state change we first need to acquire the locks
+ * for all the implemented affinity level to be able to snapshot the system
+ * state. Then we need to start suspending affinity levels from the lowest to
+ * the highest (e.g. a cpu needs to be suspended before a cluster can be). To
+ * achieve this flow, we start acquiring the locks from the highest to the
+ * lowest affinity level. Once we reach affinity level 0, we do the state change
+ * followed by the actions corresponding to the new state for affinity level 0.
+ * Actions as per the updated state for higher affinity levels are performed as
+ * we unwind back to highest affinity level.
+ ******************************************************************************/
+int psci_afflvl_suspend(unsigned long mpidr,
+			unsigned long entrypoint,
+			unsigned long context_id,
+			unsigned int power_state,
+			int cur_afflvl,
+			int tgt_afflvl)
+{
+	int rc = PSCI_E_SUCCESS, level;
+	unsigned int prev_state, next_state;
+	aff_map_node *aff_node;
+
+	mpidr &= MPIDR_AFFINITY_MASK;
+
+	/*
+	 * Some affinity instances at levels between the current and
+	 * target levels could be absent in the mpidr. Skip them and
+	 * start from the first present instance.
+	 */
+	level = psci_get_first_present_afflvl(mpidr,
+					      cur_afflvl,
+					      tgt_afflvl,
+					      &aff_node);
+
+	/*
+	 * Return if there are no more affinity instances beyond this
+	 * level to process. Else ensure that the returned affinity
+	 * node makes sense.
+	 */
+	if (aff_node == NULL)
+		return rc;
+
+	assert(level == aff_node->level);
+
+	/*
+	 * This function acquires the lock corresponding to each
+	 * affinity level so that state management can be done safely.
+	 */
+	bakery_lock_get(mpidr, &aff_node->lock);
+
+	/* Keep the old state and the next one handy */
+	prev_state = psci_get_state(aff_node->state);
+	next_state = PSCI_STATE_SUSPEND;
+
+	/*
+	 * We start from the highest affinity level and work our way
+	 * downwards to the lowest i.e. MPIDR_AFFLVL0.
+	 */
+	if (aff_node->level == tgt_afflvl) {
+		psci_change_state(mpidr,
+				  tgt_afflvl,
+				  get_max_afflvl(),
+				  next_state);
+	} else {
+		rc = psci_afflvl_suspend(mpidr,
+					 entrypoint,
+					 context_id,
+					 power_state,
+					 level - 1,
+					 tgt_afflvl);
+		if (rc != PSCI_E_SUCCESS) {
+			psci_set_state(aff_node->state, prev_state);
+			goto exit;
+		}
+	}
+
+	/*
+	 * Perform generic, architecture and platform specific
+	 * handling
+	 */
+	rc = psci_afflvl_suspend_handlers[level](mpidr,
+						 aff_node,
+						 entrypoint,
+						 context_id,
+						 power_state);
+	if (rc != PSCI_E_SUCCESS) {
+		psci_set_state(aff_node->state, prev_state);
+		goto exit;
+	}
+
+	/*
+	 * If all has gone as per plan then this cpu should be
+	 * marked as OFF
+	 */
+	if (level == MPIDR_AFFLVL0) {
+		next_state = psci_get_state(aff_node->state);
+		assert(next_state == PSCI_STATE_SUSPEND);
+	}
+
+exit:
+	bakery_lock_release(mpidr, &aff_node->lock);
+	return rc;
+}
+
+/*******************************************************************************
+ * The following functions finish an earlier affinity suspend request. They
+ * are called by the common finisher routine in psci_common.c.
+ ******************************************************************************/
+static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr,
+						aff_map_node *cpu_node,
+						unsigned int prev_state)
+{
+	unsigned int index, plat_state, rc = 0;
+
+	assert(cpu_node->level == MPIDR_AFFLVL0);
+
+	/*
+	 * Plat. management: Perform the platform specific actions
+	 * before we change the state of the cpu e.g. enabling the
+	 * gic or zeroing the mailbox register. If anything goes
+	 * wrong then assert as there is no way to recover from this
+	 * situation.
+	 */
+	if (psci_plat_pm_ops->affinst_suspend_finish) {
+		plat_state = psci_get_phys_state(prev_state);
+		rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
+							      cpu_node->level,
+							      plat_state);
+		assert(rc == PSCI_E_SUCCESS);
+	}
+
+	/* Get the index for restoring the re-entry information */
+	index = cpu_node->data;
+
+	/*
+	 * Arch. management: Restore the stashed secure architectural
+	 * context in the right order.
+	 */
+	write_vbar(psci_secure_context[index].vbar);
+	write_mair(psci_secure_context[index].mair);
+	write_tcr(psci_secure_context[index].tcr);
+	write_ttbr0(psci_secure_context[index].ttbr);
+	write_sctlr(psci_secure_context[index].sctlr);
+
+	/* MMU and coherency should be enabled by now */
+	write_scr(psci_secure_context[index].scr);
+	write_cptr(psci_secure_context[index].cptr);
+	write_cpacr(psci_secure_context[index].cpacr);
+	write_cntfrq_el0(psci_secure_context[index].cntfrq);
+
+	/*
+	 * Generic management: Now we just need to retrieve the
+	 * information that we had stashed away during the suspend
+	 * call to set this cpu on it's way.
+	 */
+	rc = psci_get_ns_entry_info(index);
+
+	/* Clean caches before re-entering normal world */
+	dcsw_op_louis(DCCSW);
+
+	return rc;
+}
+
+static unsigned int psci_afflvl1_suspend_finish(unsigned long mpidr,
+						aff_map_node *cluster_node,
+						unsigned int prev_state)
+{
+	unsigned int rc = 0;
+	unsigned int plat_state;
+
+	assert(cluster_node->level == MPIDR_AFFLVL1);
+
+	/*
+	 * Plat. management: Perform the platform specific actions
+	 * as per the old state of the cluster e.g. enabling
+	 * coherency at the interconnect depends upon the state with
+	 * which this cluster was powered up. If anything goes wrong
+	 * then assert as there is no way to recover from this
+	 * situation.
+	 */
+	if (psci_plat_pm_ops->affinst_suspend_finish) {
+		plat_state = psci_get_phys_state(prev_state);
+		rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
+							      cluster_node->level,
+							      plat_state);
+		assert(rc == PSCI_E_SUCCESS);
+	}
+
+	return rc;
+}
+
+
+static unsigned int psci_afflvl2_suspend_finish(unsigned long mpidr,
+						aff_map_node *system_node,
+						unsigned int target_afflvl)
+{
+	int rc = PSCI_E_SUCCESS;
+	unsigned int plat_state;
+
+	/* Cannot go beyond this affinity level */
+	assert(system_node->level == MPIDR_AFFLVL2);
+
+	/*
+	 * Currently, there are no architectural actions to perform
+	 * at the system level.
+	 */
+
+	/*
+	 * Plat. management: Perform the platform specific actions
+	 * as per the old state of the cluster e.g. enabling
+	 * coherency at the interconnect depends upon the state with
+	 * which this cluster was powered up. If anything goes wrong
+	 * then assert as there is no way to recover from this
+	 * situation.
+	 */
+	if (psci_plat_pm_ops->affinst_suspend_finish) {
+		plat_state = psci_get_phys_state(system_node->state);
+		rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
+							      system_node->level,
+							      plat_state);
+		assert(rc == PSCI_E_SUCCESS);
+	}
+
+	return rc;
+}
+
+const afflvl_power_on_finisher psci_afflvl_suspend_finishers[] = {
+	psci_afflvl0_suspend_finish,
+	psci_afflvl1_suspend_finish,
+	psci_afflvl2_suspend_finish,
+};
+
diff --git a/common/psci/psci_common.c b/common/psci/psci_common.c
new file mode 100644
index 0000000..6b07c53
--- /dev/null
+++ b/common/psci/psci_common.c
@@ -0,0 +1,520 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <console.h>
+#include <platform.h>
+#include <psci.h>
+#include <psci_private.h>
+
+/*******************************************************************************
+ * Arrays that contains information needs to resume a cpu's execution when woken
+ * out of suspend or off states. 'psci_ns_einfo_idx' keeps track of the next
+ * free index in the 'psci_ns_entry_info' & 'psci_secure_context' arrays. Each
+ * cpu is allocated a single entry in each array during startup.
+ ******************************************************************************/
+secure_context psci_secure_context[PSCI_NUM_AFFS];
+ns_entry_info psci_ns_entry_info[PSCI_NUM_AFFS];
+unsigned int psci_ns_einfo_idx;
+
+/*******************************************************************************
+ * Grand array that holds the platform's topology information for state
+ * management of affinity instances. Each node (aff_map_node) in the array
+ * corresponds to an affinity instance e.g. cluster, cpu within an mpidr
+ ******************************************************************************/
+aff_map_node psci_aff_map[PSCI_NUM_AFFS]
+__attribute__ ((section("tzfw_coherent_mem")));
+
+/*******************************************************************************
+ * In a system, a certain number of affinity instances are present at an
+ * affinity level. The cumulative number of instances across all levels are
+ * stored in 'psci_aff_map'. The topology tree has been flattenned into this
+ * array. To retrieve nodes, information about the extents of each affinity
+ * level i.e. start index and end index needs to be present. 'psci_aff_limits'
+ * stores this information.
+ ******************************************************************************/
+aff_limits_node psci_aff_limits[MPIDR_MAX_AFFLVL + 1];
+
+/*******************************************************************************
+ * Pointer to functions exported by the platform to complete power mgmt. ops
+ ******************************************************************************/
+plat_pm_ops *psci_plat_pm_ops;
+
+/*******************************************************************************
+ * Simple routine to retrieve the maximum affinity level supported by the
+ * platform and check that it makes sense.
+ ******************************************************************************/
+int get_max_afflvl()
+{
+	int aff_lvl;
+
+	aff_lvl = plat_get_max_afflvl();
+	assert(aff_lvl <= MPIDR_MAX_AFFLVL && aff_lvl >= MPIDR_AFFLVL0);
+
+	return aff_lvl;
+}
+
+/*******************************************************************************
+ * Simple routine to set the id of an affinity instance at a given level in the
+ * mpidr.
+ ******************************************************************************/
+unsigned long mpidr_set_aff_inst(unsigned long mpidr,
+				 unsigned char aff_inst,
+				 int aff_lvl)
+{
+	unsigned long aff_shift;
+
+	assert(aff_lvl <= MPIDR_AFFLVL3);
+
+	/*
+	 * Decide the number of bits to shift by depending upon
+	 * the affinity level
+	 */
+	aff_shift = get_afflvl_shift(aff_lvl);
+
+	/* Clear the existing affinity instance & set the new one*/
+	mpidr &= ~(MPIDR_AFFLVL_MASK << aff_shift);
+	mpidr |= aff_inst << aff_shift;
+
+	return mpidr;
+}
+
+/*******************************************************************************
+ * Simple routine to determine whether an affinity instance at a given level
+ * in an mpidr exists or not.
+ ******************************************************************************/
+int psci_validate_mpidr(unsigned long mpidr, int level)
+{
+	aff_map_node *node;
+
+	node = psci_get_aff_map_node(mpidr, level);
+	if (node && (node->state & PSCI_AFF_PRESENT))
+		return PSCI_E_SUCCESS;
+	else
+		return PSCI_E_INVALID_PARAMS;
+}
+
+/*******************************************************************************
+ * Simple routine to determine the first affinity level instance that is present
+ * between the start and end affinity levels. This helps to skip handling of
+ * absent affinity levels while performing psci operations.
+ * The start level can be > or <= to the end level depending upon whether this
+ * routine is expected to search top down or bottom up.
+ ******************************************************************************/
+int psci_get_first_present_afflvl(unsigned long mpidr,
+				  int start_afflvl,
+				  int end_afflvl,
+				  aff_map_node **node)
+{
+	int level;
+
+	/* Check whether we have to search up or down */
+	if (start_afflvl <= end_afflvl) {
+		for (level = start_afflvl; level <= end_afflvl; level++) {
+			*node = psci_get_aff_map_node(mpidr, level);
+			if (*node && ((*node)->state & PSCI_AFF_PRESENT))
+				break;
+		}
+	} else {
+		for (level = start_afflvl; level >= end_afflvl; level--) {
+			*node = psci_get_aff_map_node(mpidr, level);
+			if (*node && ((*node)->state & PSCI_AFF_PRESENT))
+				break;
+		}
+	}
+
+	return level;
+}
+
+/*******************************************************************************
+ * Recursively change the affinity state between the current and target affinity
+ * levels. The target state matters only if we are starting from affinity level
+ * 0 i.e. a cpu otherwise the state depends upon the state of the lower affinity
+ * levels.
+ ******************************************************************************/
+int psci_change_state(unsigned long mpidr,
+		      int cur_afflvl,
+		      int tgt_afflvl,
+		      unsigned int tgt_state)
+{
+	int rc = PSCI_E_SUCCESS;
+	unsigned int state;
+	aff_map_node *aff_node;
+
+	/* Sanity check the affinity levels */
+	assert(tgt_afflvl >= cur_afflvl);
+
+	aff_node = psci_get_aff_map_node(mpidr, cur_afflvl);
+	assert(aff_node);
+
+	/* TODO: Check whether the affinity level is present or absent*/
+
+	if (cur_afflvl == MPIDR_AFFLVL0) {
+		psci_set_state(aff_node->state, tgt_state);
+	} else {
+		state = psci_calculate_affinity_state(aff_node);
+		psci_set_state(aff_node->state, state);
+	}
+
+	if (cur_afflvl != tgt_afflvl)
+		psci_change_state(mpidr, cur_afflvl + 1, tgt_afflvl, tgt_state);
+
+	return rc;
+}
+
+/*******************************************************************************
+ * This routine does the heavy lifting for psci_change_state(). It examines the
+ * state of each affinity instance at the next lower affinity level and decides
+ * it's final state accordingly. If a lower affinity instance is ON then the
+ * higher affinity instance is ON. If all the lower affinity instances are OFF
+ * then the higher affinity instance is OFF. If atleast one lower affinity
+ * instance is SUSPENDED then the higher affinity instance is SUSPENDED. If only
+ * a single lower affinity instance is ON_PENDING then the higher affinity
+ * instance in ON_PENDING as well.
+ ******************************************************************************/
+unsigned int psci_calculate_affinity_state(aff_map_node *aff_node)
+{
+	int ctr;
+	unsigned int aff_count, hi_aff_state;
+	unsigned long tempidr;
+	aff_map_node *lo_aff_node;
+
+	/* Cannot calculate lowest affinity state. It's simply assigned */
+	assert(aff_node->level > MPIDR_AFFLVL0);
+
+	/*
+	 * Find the number of affinity instances at level X-1 e.g. number of
+	 * cpus in a cluster. The level X state depends upon the state of each
+	 * instance at level X-1
+	 */
+	hi_aff_state = PSCI_STATE_OFF;
+	aff_count = plat_get_aff_count(aff_node->level - 1, aff_node->mpidr);
+	for (ctr = 0; ctr < aff_count; ctr++) {
+
+		/*
+		 * Create a mpidr for each lower affinity level (X-1). Use their
+		 * states to influence the higher affinity state (X).
+		 */
+		tempidr = mpidr_set_aff_inst(aff_node->mpidr,
+					     ctr,
+					     aff_node->level - 1);
+		lo_aff_node = psci_get_aff_map_node(tempidr,
+						    aff_node->level - 1);
+		assert(lo_aff_node);
+
+		/* Continue only if the cpu exists within the cluster */
+		if (!(lo_aff_node->state & PSCI_AFF_PRESENT))
+			continue;
+
+		switch (psci_get_state(lo_aff_node->state)) {
+
+		/*
+		 * If any lower affinity is on within the cluster, then
+		 * the higher affinity is on.
+		 */
+		case PSCI_STATE_ON:
+			return PSCI_STATE_ON;
+
+		/*
+		 * At least one X-1 needs to be suspended for X to be suspended
+		 * but it's effectively on for the affinity_info call.
+		 * SUSPEND > ON_PENDING > OFF.
+		 */
+		case PSCI_STATE_SUSPEND:
+			hi_aff_state = PSCI_STATE_SUSPEND;
+			continue;
+
+		/*
+		 * Atleast one X-1 needs to be on_pending & the rest off for X
+		 * to be on_pending. ON_PENDING > OFF.
+		 */
+		case PSCI_STATE_ON_PENDING:
+			if (hi_aff_state != PSCI_STATE_SUSPEND)
+				hi_aff_state = PSCI_STATE_ON_PENDING;
+			continue;
+
+		/* Higher affinity is off if all lower affinities are off. */
+		case PSCI_STATE_OFF:
+			continue;
+
+		default:
+			assert(0);
+		}
+	}
+
+	return hi_aff_state;
+}
+
+/*******************************************************************************
+ * This function retrieves all the stashed information needed to correctly
+ * resume a cpu's execution in the non-secure state after it has been physically
+ * powered on i.e. turned ON or resumed from SUSPEND
+ ******************************************************************************/
+unsigned int psci_get_ns_entry_info(unsigned int index)
+{
+	unsigned long sctlr = 0, scr, el_status, id_aa64pfr0;
+
+	scr = read_scr();
+
+	/* Switch to the non-secure view of the registers */
+	write_scr(scr | SCR_NS_BIT);
+
+	/* Find out which EL we are going to */
+	id_aa64pfr0 = read_id_aa64pfr0_el1();
+	el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) &
+		ID_AA64PFR0_ELX_MASK;
+
+	/* Restore endianess */
+	if (psci_ns_entry_info[index].sctlr & SCTLR_EE_BIT)
+		sctlr |= SCTLR_EE_BIT;
+	else
+		sctlr &= ~SCTLR_EE_BIT;
+
+	/* Turn off MMU and Caching */
+	sctlr &= ~(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_M_BIT);
+
+	/* Set the register width */
+	if (psci_ns_entry_info[index].scr & SCR_RW_BIT)
+		scr |= SCR_RW_BIT;
+	else
+		scr &= ~SCR_RW_BIT;
+
+	scr |= SCR_NS_BIT;
+
+	if (el_status)
+		write_sctlr_el2(sctlr);
+	else
+		write_sctlr_el1(sctlr);
+
+	/* Fulfill the cpu_on entry reqs. as per the psci spec */
+	write_scr(scr);
+	write_spsr(psci_ns_entry_info[index].eret_info.spsr);
+	write_elr(psci_ns_entry_info[index].eret_info.entrypoint);
+
+	return psci_ns_entry_info[index].context_id;
+}
+
+/*******************************************************************************
+ * This function retrieves and stashes all the information needed to correctly
+ * resume a cpu's execution in the non-secure state after it has been physically
+ * powered on i.e. turned ON or resumed from SUSPEND. This is done prior to
+ * turning it on or before suspending it.
+ ******************************************************************************/
+int psci_set_ns_entry_info(unsigned int index,
+			   unsigned long entrypoint,
+			   unsigned long context_id)
+{
+	int rc = PSCI_E_SUCCESS;
+	unsigned int rw, mode, ee, spsr = 0;
+	unsigned long id_aa64pfr0 = read_id_aa64pfr0_el1(), scr = read_scr();
+	unsigned long el_status;
+
+	/* Figure out what mode do we enter the non-secure world in */
+	el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) &
+		ID_AA64PFR0_ELX_MASK;
+
+	/*
+	 * Figure out whether the cpu enters the non-secure address space
+	 * in aarch32 or aarch64
+	 */
+	rw = scr & SCR_RW_BIT;
+	if (rw) {
+
+		/*
+		 * Check whether a Thumb entry point has been provided for an
+		 * aarch64 EL
+		 */
+		if (entrypoint & 0x1)
+			return PSCI_E_INVALID_PARAMS;
+
+		if (el_status && (scr & SCR_HCE_BIT)) {
+			mode = MODE_EL2;
+			ee = read_sctlr_el2() & SCTLR_EE_BIT;
+		} else {
+			mode = MODE_EL1;
+			ee = read_sctlr_el1() & SCTLR_EE_BIT;
+		}
+
+		spsr = DAIF_DBG_BIT | DAIF_ABT_BIT;
+		spsr |= DAIF_IRQ_BIT | DAIF_FIQ_BIT;
+		spsr <<= PSR_DAIF_SHIFT;
+		spsr |= make_spsr(mode, MODE_SP_ELX, !rw);
+
+		psci_ns_entry_info[index].sctlr |= ee;
+		psci_ns_entry_info[index].scr |= SCR_RW_BIT;
+	} else {
+
+		/* Check whether aarch32 has to be entered in Thumb mode */
+		if (entrypoint & 0x1)
+			spsr = SPSR32_T_BIT;
+
+		if (el_status && (scr & SCR_HCE_BIT)) {
+			mode = AARCH32_MODE_HYP;
+			ee = read_sctlr_el2() & SCTLR_EE_BIT;
+		} else {
+			mode = AARCH32_MODE_SVC;
+			ee = read_sctlr_el1() & SCTLR_EE_BIT;
+		}
+
+		/*
+		 * TODO: Choose async. exception bits if HYP mode is not
+		 * implemented according to the values of SCR.{AW, FW} bits
+		 */
+		spsr |= DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
+		spsr <<= PSR_DAIF_SHIFT;
+		if(ee)
+			spsr |= SPSR32_EE_BIT;
+		spsr |= mode;
+
+		/* Ensure that the CSPR.E and SCTLR.EE bits match */
+		psci_ns_entry_info[index].sctlr |= ee;
+		psci_ns_entry_info[index].scr &= ~SCR_RW_BIT;
+	}
+
+	psci_ns_entry_info[index].eret_info.entrypoint = entrypoint;
+	psci_ns_entry_info[index].eret_info.spsr = spsr;
+	psci_ns_entry_info[index].context_id = context_id;
+
+	return rc;
+}
+
+/*******************************************************************************
+ * An affinity level could be on, on_pending, suspended or off. These are the
+ * logical states it can be in. Physically either it's off or on. When it's in
+ * the state on_pending then it's about to be turned on. It's not possible to
+ * tell whether that's actually happenned or not. So we err on the side of
+ * caution & treat the affinity level as being turned off.
+ ******************************************************************************/
+inline unsigned int psci_get_phys_state(unsigned int aff_state)
+{
+	return (aff_state != PSCI_STATE_ON ? PSCI_STATE_OFF : PSCI_STATE_ON);
+}
+
+unsigned int psci_get_aff_phys_state(aff_map_node *aff_node)
+{
+	unsigned int aff_state;
+
+	aff_state = psci_get_state(aff_node->state);
+	return psci_get_phys_state(aff_state);
+}
+
+/*******************************************************************************
+ * Generic handler which is called when a cpu is physically powered on. It
+ * recurses through all the affinity levels performing generic, architectural,
+ * platform setup and state management e.g. for a cluster that's been powered
+ * on, it will call the platform specific code which will enable coherency at
+ * the interconnect level. For a cpu it could mean turning on the MMU etc.
+ *
+ * This function traverses from the lowest to the highest affinity level
+ * implemented by the platform. Since it's recursive, for each call the
+ * 'cur_afflvl' & 'tgt_afflvl' parameters keep track of which level we are at
+ * and which level we need to get to respectively. Locks are picked up along the
+ * way so that when the lowest affinity level is hit, state management can be
+ * safely done. Prior to this, each affinity level does it's bookeeping as per
+ * the state out of reset.
+ *
+ * CAUTION: This function is called with coherent stacks so that coherency and
+ * the mmu can be turned on safely.
+ ******************************************************************************/
+unsigned int psci_afflvl_power_on_finish(unsigned long mpidr,
+					 int cur_afflvl,
+					 int tgt_afflvl,
+					 afflvl_power_on_finisher *pon_handlers)
+{
+	unsigned int prev_state, next_state, rc = PSCI_E_SUCCESS;
+	aff_map_node *aff_node;
+	int level;
+
+	mpidr &= MPIDR_AFFINITY_MASK;;
+
+	/*
+	 * Some affinity instances at levels between the current and
+	 * target levels could be absent in the mpidr. Skip them and
+	 * start from the first present instance.
+	 */
+	level = psci_get_first_present_afflvl(mpidr,
+					      cur_afflvl,
+					      tgt_afflvl,
+					      &aff_node);
+	/*
+	 * Return if there are no more affinity instances beyond this
+	 * level to process. Else ensure that the returned affinity
+	 * node makes sense.
+	 */
+	if (aff_node == NULL)
+		return rc;
+
+	assert(level == aff_node->level);
+
+	/*
+	 * This function acquires the lock corresponding to each
+	 * affinity level so that by the time we hit the highest
+	 * affinity level, the system topology is snapshot and state
+	 * management can be done safely.
+	 */
+	bakery_lock_get(mpidr, &aff_node->lock);
+
+	/* Keep the old and new state handy */
+	prev_state = psci_get_state(aff_node->state);
+	next_state = PSCI_STATE_ON;
+
+	/* Perform generic, architecture and platform specific handling */
+	rc = pon_handlers[level](mpidr, aff_node, prev_state);
+	if (rc != PSCI_E_SUCCESS) {
+		psci_set_state(aff_node->state, prev_state);
+		goto exit;
+	}
+
+	/*
+	 * State management: Update the states if this is the highest
+	 * affinity level requested else pass the job to the next level.
+	 */
+	if (aff_node->level != tgt_afflvl) {
+		rc = psci_afflvl_power_on_finish(mpidr,
+						 level + 1,
+						 tgt_afflvl,
+						 pon_handlers);
+	} else {
+		psci_change_state(mpidr, MPIDR_AFFLVL0, tgt_afflvl, next_state);
+	}
+
+	/* If all has gone as per plan then this cpu should be marked as ON */
+	if (level == MPIDR_AFFLVL0) {
+		next_state = psci_get_state(aff_node->state);
+		assert(next_state == PSCI_STATE_ON);
+	}
+
+exit:
+	bakery_lock_release(mpidr, &aff_node->lock);
+	return rc;
+}
diff --git a/common/psci/psci_entry.S b/common/psci/psci_entry.S
new file mode 100644
index 0000000..4ea74c5
--- /dev/null
+++ b/common/psci/psci_entry.S
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <platform.h>
+#include <psci.h>
+#include <psci_private.h>
+#include <asm_macros.S>
+
+	.globl	psci_aff_on_finish_entry
+	.globl	psci_aff_suspend_finish_entry
+	.globl	__psci_cpu_off
+	.globl	__psci_cpu_suspend
+
+	.section	platform_code, "ax"; .align 3
+
+	/* -----------------------------------------------------
+	 * This cpu has been physically powered up. Depending
+	 * upon whether it was resumed from suspend or simply
+	 * turned on, call the common power on finisher with
+	 * the handlers (chosen depending upon original state).
+	 * For ease, the finisher is called with coherent
+	 * stacks. This allows the cluster/cpu finishers to
+	 * enter coherency and enable the mmu without running
+	 * into issues. We switch back to normal stacks once
+	 * all this is done.
+	 * -----------------------------------------------------
+	 */
+psci_aff_on_finish_entry:
+	adr	x23, psci_afflvl_on_finishers
+	b	psci_aff_common_finish_entry
+
+psci_aff_suspend_finish_entry:
+	adr	x23, psci_afflvl_suspend_finishers
+
+psci_aff_common_finish_entry:
+	adr	x22, psci_afflvl_power_on_finish
+	bl	read_mpidr
+	mov	x19, x0
+	bl	platform_set_coherent_stack
+
+	/* ---------------------------------------------
+	 * Call the finishers starting from affinity
+	 * level 0.
+	 * ---------------------------------------------
+	 */
+	bl	get_max_afflvl
+	mov	x3, x23
+	mov	x2, x0
+	mov	x0, x19
+	mov	x1, #MPIDR_AFFLVL0
+	blr	x22
+	mov	x21, x0
+
+	/* --------------------------------------------
+	 * Give ourselves a stack allocated in Normal
+	 * -IS-WBWA memory
+	 * --------------------------------------------
+	 */
+	mov	x0, x19
+	bl	platform_set_stack
+
+	/* --------------------------------------------
+	 * Restore the context id. value
+	 * --------------------------------------------
+	 */
+	mov	x0, x21
+
+	/* --------------------------------------------
+	 * Jump back to the non-secure world assuming
+	 * that the elr and spsr setup has been done
+	 * by the finishers
+	 * --------------------------------------------
+	 */
+	eret
+_panic:
+	b	_panic
+
+	/* -----------------------------------------------------
+	 * The following two stubs give the calling cpu a
+	 * coherent stack to allow flushing of caches without
+	 * suffering from stack coherency issues
+	 * -----------------------------------------------------
+	 */
+__psci_cpu_off:
+	func_prologue
+	sub	sp, sp, #0x10
+	stp	x19, x20, [sp, #0]
+	mov	x19, sp
+	bl	read_mpidr
+	bl	platform_set_coherent_stack
+	bl	psci_cpu_off
+	mov	x1, #PSCI_E_SUCCESS
+	cmp	x0, x1
+	b.eq	final_wfi
+	mov	sp, x19
+	ldp	x19, x20, [sp,#0]
+	add	sp, sp, #0x10
+	func_epilogue
+	ret
+
+__psci_cpu_suspend:
+	func_prologue
+	sub	sp, sp, #0x20
+	stp	x19, x20, [sp, #0]
+	stp	x21, x22, [sp, #0x10]
+	mov	x19, sp
+	mov	x20, x0
+	mov	x21, x1
+	mov	x22, x2
+	bl	read_mpidr
+	bl	platform_set_coherent_stack
+	mov	x0, x20
+	mov	x1, x21
+	mov	x2, x22
+	bl	psci_cpu_suspend
+	mov	x1, #PSCI_E_SUCCESS
+	cmp	x0, x1
+	b.eq	final_wfi
+	mov	sp, x19
+	ldp	x21, x22, [sp,#0x10]
+	ldp	x19, x20, [sp,#0]
+	add	sp, sp, #0x20
+	func_epilogue
+	ret
+
+final_wfi:
+	dsb	sy
+	wfi
+wfi_spill:
+	b	wfi_spill
+
diff --git a/common/psci/psci_main.c b/common/psci/psci_main.c
new file mode 100644
index 0000000..eca2dec
--- /dev/null
+++ b/common/psci/psci_main.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <console.h>
+#include <platform.h>
+#include <psci_private.h>
+
+/*******************************************************************************
+ * PSCI frontend api for servicing SMCs. Described in the PSCI spec.
+ ******************************************************************************/
+int psci_cpu_on(unsigned long target_cpu,
+		unsigned long entrypoint,
+		unsigned long context_id)
+
+{
+	int rc;
+	unsigned int start_afflvl, target_afflvl;
+
+	/* Determine if the cpu exists of not */
+	rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
+	if (rc != PSCI_E_SUCCESS) {
+		goto exit;
+	}
+
+	start_afflvl = get_max_afflvl();
+	target_afflvl = MPIDR_AFFLVL0;
+	rc = psci_afflvl_on(target_cpu,
+			    entrypoint,
+			    context_id,
+			    start_afflvl,
+			    target_afflvl);
+
+exit:
+	return rc;
+}
+
+unsigned int psci_version(void)
+{
+	return PSCI_MAJOR_VER | PSCI_MINOR_VER;
+}
+
+int psci_cpu_suspend(unsigned int power_state,
+		     unsigned long entrypoint,
+		     unsigned long context_id)
+{
+	int rc;
+	unsigned long mpidr;
+	unsigned int tgt_afflvl, pstate_type;
+
+	/* TODO: Standby states are not supported at the moment */
+	pstate_type = psci_get_pstate_type(power_state);
+	if (pstate_type == 0) {
+		rc = PSCI_E_INVALID_PARAMS;
+		goto exit;
+	}
+
+	/* Sanity check the requested state */
+	tgt_afflvl = psci_get_pstate_afflvl(power_state);
+	if (tgt_afflvl > MPIDR_MAX_AFFLVL) {
+		rc = PSCI_E_INVALID_PARAMS;
+		goto exit;
+	}
+
+	mpidr = read_mpidr();
+	rc = psci_afflvl_suspend(mpidr,
+				 entrypoint,
+				 context_id,
+				 power_state,
+				 tgt_afflvl,
+				 MPIDR_AFFLVL0);
+
+exit:
+	if (rc != PSCI_E_SUCCESS)
+		assert(rc == PSCI_E_INVALID_PARAMS);
+	return rc;
+}
+
+int psci_cpu_off(void)
+{
+	int rc;
+	unsigned long mpidr;
+	int target_afflvl = get_max_afflvl();
+
+	mpidr = read_mpidr();
+
+	/*
+	 * Traverse from the highest to the lowest affinity level. When the
+	 * lowest affinity level is hit, all the locks are acquired. State
+	 * management is done immediately followed by cpu, cluster ...
+	 * ..target_afflvl specific actions as this function unwinds back.
+	 */
+	rc = psci_afflvl_off(mpidr, target_afflvl, MPIDR_AFFLVL0);
+
+	if (rc != PSCI_E_SUCCESS) {
+		assert(rc == PSCI_E_DENIED);
+	}
+
+	return rc;
+}
+
+int psci_affinity_info(unsigned long target_affinity,
+		       unsigned int lowest_affinity_level)
+{
+	int rc = PSCI_E_INVALID_PARAMS;
+	unsigned int aff_state;
+	aff_map_node *node;
+
+	if (lowest_affinity_level > get_max_afflvl()) {
+		goto exit;
+	}
+
+	node = psci_get_aff_map_node(target_affinity, lowest_affinity_level);
+	if (node && (node->state & PSCI_AFF_PRESENT)) {
+		aff_state = psci_get_state(node->state);
+
+		/* A suspended cpu is available & on for the OS */
+		if (aff_state == PSCI_STATE_SUSPEND) {
+			aff_state = PSCI_STATE_ON;
+		}
+
+		rc = aff_state;
+	}
+exit:
+	return rc;
+}
+
+/* Unimplemented */
+int psci_migrate(unsigned int target_cpu)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+/* Unimplemented */
+unsigned int psci_migrate_info_type(void)
+{
+	return PSCI_TOS_NOT_PRESENT;
+}
+
+unsigned long psci_migrate_info_up_cpu(void)
+{
+	/*
+	 * Return value of this currently unsupported call depends upon
+	 * what psci_migrate_info_type() returns.
+	 */
+	return PSCI_E_SUCCESS;
+}
+
+/* Unimplemented */
+void psci_system_off(void)
+{
+	assert(0);
+}
+
+/* Unimplemented */
+void psci_system_reset(void)
+{
+	assert(0);
+}
+
diff --git a/common/psci/psci_private.h b/common/psci/psci_private.h
new file mode 100644
index 0000000..48d40d0
--- /dev/null
+++ b/common/psci/psci_private.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PSCI_PRIVATE_H__
+#define __PSCI_PRIVATE_H__
+
+#include <bakery_lock.h>
+
+#ifndef __ASSEMBLY__
+/*******************************************************************************
+ * The following two data structures hold the generic information to bringup
+ * a suspended/hotplugged out cpu
+ ******************************************************************************/
+typedef struct {
+	unsigned long entrypoint;
+	unsigned long spsr;
+} eret_params;
+
+typedef struct {
+	eret_params eret_info;
+	unsigned long context_id;
+	unsigned int scr;
+	unsigned int sctlr;
+} ns_entry_info;
+
+/*******************************************************************************
+ *
+ *
+ ******************************************************************************/
+typedef struct {
+	unsigned long sctlr;
+	unsigned long scr;
+	unsigned long cptr;
+	unsigned long cpacr;
+	unsigned long cntfrq;
+	unsigned long mair;
+	unsigned long tcr;
+	unsigned long ttbr;
+	unsigned long vbar;
+} secure_context;
+
+/*******************************************************************************
+ * The following two data structures hold the topology tree which in turn tracks
+ * the state of the all the affinity instances supported by the platform.
+ ******************************************************************************/
+typedef struct {
+	unsigned long mpidr;
+	unsigned char state;
+	char level;
+	unsigned int data;
+	bakery_lock lock;
+} aff_map_node;
+
+typedef struct {
+	int min;
+	int max;
+} aff_limits_node;
+
+typedef unsigned int (*afflvl_power_on_finisher)(unsigned long,
+						 aff_map_node *,
+						 unsigned int);
+
+/*******************************************************************************
+ * Data prototypes
+ ******************************************************************************/
+extern secure_context psci_secure_context[PSCI_NUM_AFFS];
+extern ns_entry_info psci_ns_entry_info[PSCI_NUM_AFFS];
+extern unsigned int psci_ns_einfo_idx;
+extern aff_limits_node psci_aff_limits[MPIDR_MAX_AFFLVL + 1];
+extern plat_pm_ops *psci_plat_pm_ops;
+extern aff_map_node psci_aff_map[PSCI_NUM_AFFS];
+extern afflvl_power_on_finisher psci_afflvl_off_finish_handlers[];
+extern afflvl_power_on_finisher psci_afflvl_sus_finish_handlers[];
+
+/*******************************************************************************
+ * Function prototypes
+ ******************************************************************************/
+/* Private exported functions from psci_common.c */
+extern int get_max_afflvl(void);
+extern unsigned int psci_get_phys_state(unsigned int);
+extern unsigned int psci_get_aff_phys_state(aff_map_node *);
+extern unsigned int psci_calculate_affinity_state(aff_map_node *);
+extern unsigned int psci_get_ns_entry_info(unsigned int index);
+extern unsigned long mpidr_set_aff_inst(unsigned long,unsigned char, int);
+extern int psci_change_state(unsigned long, int, int, unsigned int);
+extern int psci_validate_mpidr(unsigned long, int);
+extern unsigned int psci_afflvl_power_on_finish(unsigned long,
+						int,
+						int,
+						afflvl_power_on_finisher *);
+extern int psci_set_ns_entry_info(unsigned int index,
+				  unsigned long entrypoint,
+				  unsigned long context_id);
+extern int psci_get_first_present_afflvl(unsigned long,
+					 int, int,
+					 aff_map_node **);
+/* Private exported functions from psci_setup.c */
+extern aff_map_node *psci_get_aff_map_node(unsigned long, int);
+
+/* Private exported functions from psci_affinity_on.c */
+extern int psci_afflvl_on(unsigned long,
+			  unsigned long,
+			  unsigned long,
+			  int,
+			  int);
+
+/* Private exported functions from psci_affinity_off.c */
+extern int psci_afflvl_off(unsigned long, int, int);
+
+/* Private exported functions from psci_affinity_suspend.c */
+extern int psci_afflvl_suspend(unsigned long,
+			       unsigned long,
+			       unsigned long,
+			       unsigned int,
+			       int,
+			       int);
+extern unsigned int psci_afflvl_suspend_finish(unsigned long, int, int);
+#endif /*__ASSEMBLY__*/
+
+#endif /* __PSCI_PRIVATE_H__ */
diff --git a/common/psci/psci_setup.c b/common/psci/psci_setup.c
new file mode 100644
index 0000000..9095e75
--- /dev/null
+++ b/common/psci/psci_setup.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <console.h>
+#include <platform.h>
+#include <psci_private.h>
+
+/*******************************************************************************
+ * Routines for retrieving the node corresponding to an affinity level instance
+ * in the mpidr. The first one uses binary search to find the node corresponding
+ * to the mpidr (key) at a particular affinity level. The second routine decides
+ * extents of the binary search at each affinity level.
+ ******************************************************************************/
+static int psci_aff_map_get_idx(unsigned long key,
+				int min_idx,
+				int max_idx)
+{
+	int mid;
+
+	/*
+	 * Terminating condition: If the max and min indices have crossed paths
+	 * during the binary search then the key has not been found.
+	 */
+	if (max_idx < min_idx)
+		return PSCI_E_INVALID_PARAMS;
+
+	/*
+	 * Bisect the array around 'mid' and then recurse into the array chunk
+	 * where the key is likely to be found. The mpidrs in each node in the
+	 * 'psci_aff_map' for a given affinity level are stored in an ascending
+	 * order which makes the binary search possible.
+	 */
+	mid = min_idx + ((max_idx - min_idx) >> 1);	/* Divide by 2 */
+	if (psci_aff_map[mid].mpidr > key)
+		return psci_aff_map_get_idx(key, min_idx, mid - 1);
+	else if (psci_aff_map[mid].mpidr < key)
+		return psci_aff_map_get_idx(key, mid + 1, max_idx);
+	else
+		return mid;
+}
+
+aff_map_node *psci_get_aff_map_node(unsigned long mpidr, int aff_lvl)
+{
+	int rc;
+
+	/* Right shift the mpidr to the required affinity level */
+	mpidr = mpidr_mask_lower_afflvls(mpidr, aff_lvl);
+
+	rc = psci_aff_map_get_idx(mpidr,
+				  psci_aff_limits[aff_lvl].min,
+				  psci_aff_limits[aff_lvl].max);
+	if (rc >= 0)
+		return &psci_aff_map[rc];
+	else
+		return NULL;
+}
+
+/*******************************************************************************
+ * Function which initializes the 'aff_map_node' corresponding to an affinity
+ * level instance. Each node has a unique mpidr, level and bakery lock. The data
+ * field is opaque and holds affinity level specific data e.g. for affinity
+ * level 0 it contains the index into arrays that hold the secure/non-secure
+ * state for a cpu that's been turned on/off
+ ******************************************************************************/
+static void psci_init_aff_map_node(unsigned long mpidr,
+				   int level,
+				   unsigned int idx)
+{
+	unsigned char state;
+	psci_aff_map[idx].mpidr = mpidr;
+	psci_aff_map[idx].level = level;
+	bakery_lock_init(&psci_aff_map[idx].lock);
+
+	/*
+	 * If an affinity instance is present then mark it as OFF to begin with.
+	 */
+	state = plat_get_aff_state(level, mpidr);
+	psci_aff_map[idx].state = state;
+	if (state & PSCI_AFF_PRESENT) {
+		psci_set_state(psci_aff_map[idx].state, PSCI_STATE_OFF);
+	}
+
+	if (level == MPIDR_AFFLVL0) {
+		/* Ensure that we have not overflowed the psci_ns_einfo array */
+		assert(psci_ns_einfo_idx < PSCI_NUM_AFFS);
+
+		psci_aff_map[idx].data = psci_ns_einfo_idx;
+		psci_ns_einfo_idx++;
+	}
+
+	return;
+}
+
+/*******************************************************************************
+ * Core routine used by the Breadth-First-Search algorithm to populate the
+ * affinity tree. Each level in the tree corresponds to an affinity level. This
+ * routine's aim is to traverse to the target affinity level and populate nodes
+ * in the 'psci_aff_map' for all the siblings at that level. It uses the current
+ * affinity level to keep track of how many levels from the root of the tree
+ * have been traversed. If the current affinity level != target affinity level,
+ * then the platform is asked to return the number of children that each
+ * affinity instance has at the current affinity level. Traversal is then done
+ * for each child at the next lower level i.e. current affinity level - 1.
+ *
+ * CAUTION: This routine assumes that affinity instance ids are allocated in a
+ * monotonically increasing manner at each affinity level in a mpidr starting
+ * from 0. If the platform breaks this assumption then this code will have to
+ * be reworked accordingly.
+ ******************************************************************************/
+static unsigned int psci_init_aff_map(unsigned long mpidr,
+				      unsigned int affmap_idx,
+				      int cur_afflvl,
+				      int tgt_afflvl)
+{
+	unsigned int ctr, aff_count;
+
+	assert(cur_afflvl >= tgt_afflvl);
+
+	/*
+	 * Find the number of siblings at the current affinity level &
+	 * assert if there are none 'cause then we have been invoked with
+	 * an invalid mpidr.
+	 */
+	aff_count = plat_get_aff_count(cur_afflvl, mpidr);
+	assert(aff_count);
+
+	if (tgt_afflvl < cur_afflvl) {
+		for (ctr = 0; ctr < aff_count; ctr++) {
+			mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl);
+			affmap_idx = psci_init_aff_map(mpidr,
+						       affmap_idx,
+						       cur_afflvl - 1,
+						       tgt_afflvl);
+		}
+	} else {
+		for (ctr = 0; ctr < aff_count; ctr++, affmap_idx++) {
+			mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl);
+			psci_init_aff_map_node(mpidr, cur_afflvl, affmap_idx);
+		}
+
+		/* affmap_idx is 1 greater than the max index of cur_afflvl */
+		psci_aff_limits[cur_afflvl].max = affmap_idx - 1;
+	}
+
+	return affmap_idx;
+}
+
+/*******************************************************************************
+ * This function initializes the topology tree by querying the platform. To do
+ * so, it's helper routines implement a Breadth-First-Search. At each affinity
+ * level the platform conveys the number of affinity instances that exist i.e.
+ * the affinity count. The algorithm populates the psci_aff_map recursively
+ * using this information. On a platform that implements two clusters of 4 cpus
+ * each, the populated aff_map_array would look like this:
+ *
+ *            <- cpus cluster0 -><- cpus cluster1 ->
+ * ---------------------------------------------------
+ * | 0  | 1  | 0  | 1  | 2  | 3  | 0  | 1  | 2  | 3  |
+ * ---------------------------------------------------
+ *           ^                                       ^
+ * cluster __|                                 cpu __|
+ * limit                                      limit
+ *
+ * The first 2 entries are of the cluster nodes. The next 4 entries are of cpus
+ * within cluster 0. The last 4 entries are of cpus within cluster 1.
+ * The 'psci_aff_limits' array contains the max & min index of each affinity
+ * level within the 'psci_aff_map' array. This allows restricting search of a
+ * node at an affinity level between the indices in the limits array.
+ ******************************************************************************/
+void psci_setup(unsigned long mpidr)
+{
+	int afflvl, affmap_idx, rc, max_afflvl;
+	aff_map_node *node;
+
+	/* Initialize psci's internal state */
+	memset(psci_aff_map, 0, sizeof(psci_aff_map));
+	memset(psci_aff_limits, 0, sizeof(psci_aff_limits));
+	memset(psci_ns_entry_info, 0, sizeof(psci_ns_entry_info));
+	psci_ns_einfo_idx = 0;
+	psci_plat_pm_ops = NULL;
+
+	/* Find out the maximum affinity level that the platform implements */
+	max_afflvl = get_max_afflvl();
+	assert(max_afflvl <= MPIDR_MAX_AFFLVL);
+
+	/*
+	 * This call traverses the topology tree with help from the platform and
+	 * populates the affinity map using a breadth-first-search recursively.
+	 * We assume that the platform allocates affinity instance ids from 0
+	 * onwards at each affinity level in the mpidr. FIRST_MPIDR = 0.0.0.0
+	 */
+	affmap_idx = 0;
+	for (afflvl = max_afflvl; afflvl >= MPIDR_AFFLVL0; afflvl--) {
+		affmap_idx = psci_init_aff_map(FIRST_MPIDR,
+					       affmap_idx,
+					       max_afflvl,
+					       afflvl);
+	}
+
+	/*
+	 * Set the bounds for the affinity counts of each level in the map. Also
+	 * flush out the entire array so that it's visible to subsequent power
+	 * management operations. The 'psci_aff_map' array is allocated in
+	 * coherent memory so does not need flushing. The 'psci_aff_limits'
+	 * array is allocated in normal memory. It will be accessed when the mmu
+	 * is off e.g. after reset. Hence it needs to be flushed.
+	 */
+	for (afflvl = MPIDR_AFFLVL0; afflvl < max_afflvl; afflvl++) {
+		psci_aff_limits[afflvl].min =
+			psci_aff_limits[afflvl + 1].max + 1;
+	}
+
+	flush_dcache_range((unsigned long) psci_aff_limits,
+			   sizeof(psci_aff_limits));
+
+	/*
+	 * Mark the affinity instances in our mpidr as ON. No need to lock as
+	 * this is the primary cpu.
+	 */
+	mpidr &= MPIDR_AFFINITY_MASK;
+	for (afflvl = max_afflvl; afflvl >= MPIDR_AFFLVL0; afflvl--) {
+
+		node = psci_get_aff_map_node(mpidr, afflvl);
+		assert(node);
+
+		/* Mark each present node as ON. */
+		if (node->state & PSCI_AFF_PRESENT) {
+			psci_set_state(node->state, PSCI_STATE_ON);
+		}
+	}
+
+	rc = platform_setup_pm(&psci_plat_pm_ops);
+	assert(rc == 0);
+	assert(psci_plat_pm_ops);
+
+	return;
+}
diff --git a/common/runtime_svc.c b/common/runtime_svc.c
new file mode 100644
index 0000000..ed1225f
--- /dev/null
+++ b/common/runtime_svc.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <console.h>
+#include <platform.h>
+#include <semihosting.h>
+#include <bl_common.h>
+#include <psci.h>
+
+/*******************************************************************************
+ * Perform initialization of runtime services possibly across exception levels
+ * in the secure address space e.g. psci & interrupt handling.
+ ******************************************************************************/
+void runtime_svc_init(unsigned long mpidr)
+{
+	psci_setup(mpidr);
+	return;
+}