Setup VBAR_EL3 incrementally

This patch ensures that VBAR_EL3 points to the simple stack-less
'early_exceptions' when the C runtime stack is not correctly setup to
use the more complex 'runtime_exceptions'. It is initialised to
'runtime_exceptions' once this is done.

This patch also moves all exception vectors into a '.vectors' section
and modifies linker scripts to place all such sections together. This
will minimize space wastage from alignment restrictions.

Change-Id: I8c3e596ea3412c8bd582af9e8d622bb1cb2e049d
diff --git a/bl1/aarch64/early_exceptions.S b/bl1/aarch64/early_exceptions.S
index ef47f9e..84bdae1 100644
--- a/bl1/aarch64/early_exceptions.S
+++ b/bl1/aarch64/early_exceptions.S
@@ -37,12 +37,12 @@
 	.globl	early_exceptions
 	.weak	display_boot_progress
 
-	.section	.text, "ax"; .align 11
+	.section	.vectors, "ax"; .align 11
 
 	/* -----------------------------------------------------
-	 * Very simple exception handlers used by BL1 and BL2.
-	 * Apart from one SMC exception all other traps loop
-	 * endlessly.
+	 * Very simple stackless exception handlers used by all
+	 * bootloader stages. BL31 uses them before stacks are
+	 * setup. BL1/BL2 use them throughout.
 	 * -----------------------------------------------------
 	 */
 	.align	7
@@ -164,6 +164,7 @@
 
 	.align	7
 
+	.section	.text, "ax"
 process_exception:
 	sub	sp, sp, #0x40
 	stp	x0, x1, [sp, #0x0]
diff --git a/bl1/bl1.ld.S b/bl1/bl1.ld.S
index 969b8c2..ac52e52 100644
--- a/bl1/bl1.ld.S
+++ b/bl1/bl1.ld.S
@@ -45,6 +45,7 @@
         *bl1_entrypoint.o(.text)
         *(.text)
         *(.rodata*)
+        *(.vectors)
         __RO_END__ = .;
     } >ROM
 
diff --git a/bl2/bl2.ld.S b/bl2/bl2.ld.S
index 849297a..77eb201 100644
--- a/bl2/bl2.ld.S
+++ b/bl2/bl2.ld.S
@@ -49,6 +49,7 @@
         *bl2_entrypoint.o(.text)
         *(.text)
         *(.rodata*)
+        *(.vectors)
         __RO_END_UNALIGNED__ = .;
         /*
          * Memory page(s) mapped to this section will be marked as
diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S
index 57f6551..cd0c023 100644
--- a/bl31/aarch64/bl31_entrypoint.S
+++ b/bl31/aarch64/bl31_entrypoint.S
@@ -58,7 +58,7 @@
 	 * Set the exception vector to something sane.
 	 * ---------------------------------------------
 	 */
-	adr	x1, runtime_exceptions
+	adr	x1, early_exceptions
 	msr	vbar_el3, x1
 
 	/* ---------------------------------------------------------------------
@@ -155,6 +155,14 @@
 	bl	platform_set_stack
 
 	/* ---------------------------------------------
+	 * Use the more complex exception vectors now
+	 * the stacks are setup.
+	 * ---------------------------------------------
+	 */
+	adr	x1, runtime_exceptions
+	msr	vbar_el3, x1
+
+	/* ---------------------------------------------
 	 * Use SP_EL0 to initialize BL31. It allows us
 	 * to jump to the next image without having to
 	 * come back here to ensure all of the stack's
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index ce0af74..92835dc 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -37,8 +37,8 @@
 #include <asm_macros.S>
 
 
-	.section	.text, "ax"; .align 11
-	
+	.section	.vectors, "ax"; .align 11
+
 	.align	7
 runtime_exceptions:
 	/* -----------------------------------------------------
diff --git a/bl31/bl31.ld.S b/bl31/bl31.ld.S
index 7cc8527..2583c9a 100644
--- a/bl31/bl31.ld.S
+++ b/bl31/bl31.ld.S
@@ -50,6 +50,7 @@
         *bl31_entrypoint.o(.text)
         *(.text)
         *(.rodata*)
+        *(.vectors)
         __RO_END_UNALIGNED__ = .;
         /*
          * Memory page(s) mapped to this section will be marked as read-only,
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 5c374aa..f7c2168 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -62,7 +62,8 @@
 				spinlock.o				\
 				gic_v3_sysregs.o			\
 				bakery_lock.o				\
-				runtime_svc.o
+				runtime_svc.o				\
+				early_exceptions.o
 
 BL31_ENTRY_POINT	:=	bl31_entrypoint
 BL31_MAPFILE		:=	bl31.map
diff --git a/common/psci/psci_afflvl_on.c b/common/psci/psci_afflvl_on.c
index 83d47d5..14f524c 100644
--- a/common/psci/psci_afflvl_on.c
+++ b/common/psci/psci_afflvl_on.c
@@ -353,7 +353,6 @@
 	/*
 	 * Arch. management: Turn on mmu & restore architectural state
 	 */
-	write_vbar((unsigned long) runtime_exceptions);
 	enable_mmu();
 
 	/*
diff --git a/common/psci/psci_afflvl_suspend.c b/common/psci/psci_afflvl_suspend.c
index 2abcafb..f374840 100644
--- a/common/psci/psci_afflvl_suspend.c
+++ b/common/psci/psci_afflvl_suspend.c
@@ -115,7 +115,6 @@
 	psci_suspend_context[index].sec_sysregs.mair = read_mair();
 	psci_suspend_context[index].sec_sysregs.tcr = read_tcr();
 	psci_suspend_context[index].sec_sysregs.ttbr = read_ttbr0();
-	psci_suspend_context[index].sec_sysregs.vbar = read_vbar();
 	psci_suspend_context[index].sec_sysregs.pstate =
 		read_daif() & (DAIF_ABT_BIT | DAIF_DBG_BIT);
 
@@ -424,7 +423,6 @@
 	 * Arch. management: Restore the stashed secure architectural
 	 * context in the right order.
 	 */
-	write_vbar(psci_suspend_context[index].sec_sysregs.vbar);
 	write_daif(read_daif() | psci_suspend_context[index].sec_sysregs.pstate);
 	write_mair(psci_suspend_context[index].sec_sysregs.mair);
 	write_tcr(psci_suspend_context[index].sec_sysregs.tcr);
diff --git a/common/psci/psci_entry.S b/common/psci/psci_entry.S
index a6a1a6f..28a4143 100644
--- a/common/psci/psci_entry.S
+++ b/common/psci/psci_entry.S
@@ -63,6 +63,17 @@
 
 psci_aff_common_finish_entry:
 	adr	x22, psci_afflvl_power_on_finish
+
+	/* ---------------------------------------------
+	 * Exceptions should not occur at this point.
+	 * Set VBAR in order to handle and report any
+	 * that do occur
+	 * ---------------------------------------------
+	 */
+	adr	x0, early_exceptions
+	msr	vbar_el3, x0
+	isb
+
 	bl	read_mpidr
 	mov	x19, x0
 	bl	platform_set_coherent_stack
@@ -90,6 +101,16 @@
 	mov	x0, x19
 	bl	platform_set_stack
 
+	/* ---------------------------------------------
+	 * Now that the execution stack has been set
+	 * up, enable full runtime exception handling.
+	 * Since we're just about to leave this EL with
+	 * ERET, we don't need an ISB here
+	 * ---------------------------------------------
+	 */
+	adr	x0, runtime_exceptions
+	msr	vbar_el3, x0
+
 	/* --------------------------------------------
 	 * Use the size of the general purpose register
 	 * context to restore the register state
diff --git a/include/aarch64/arch.h b/include/aarch64/arch.h
index 10d2adb..f40e148 100644
--- a/include/aarch64/arch.h
+++ b/include/aarch64/arch.h
@@ -326,7 +326,6 @@
 	unsigned long mair;
 	unsigned long tcr;
 	unsigned long ttbr;
-	unsigned long vbar;
 	unsigned long pstate;
 } sysregs_context;