AArch32: Refactor SP_MIN to support RESET_TO_SP_MIN

This patch uses the `el3_entrypoint_common` macro to initialize
CPU registers, in SP_MIN entrypoint.s file, in both cold and warm
boot path. It also adds conditional compilation, in cold and warm
boot entry path, based on RESET_TO_SP_MIN.

Change-Id: Id493ca840dc7b9e26948dc78ee928e9fdb76b9e4
diff --git a/bl32/sp_min/aarch32/entrypoint.S b/bl32/sp_min/aarch32/entrypoint.S
index 33d35b9..54f2ced 100644
--- a/bl32/sp_min/aarch32/entrypoint.S
+++ b/bl32/sp_min/aarch32/entrypoint.S
@@ -32,6 +32,7 @@
 #include <asm_macros.S>
 #include <bl_common.h>
 #include <context.h>
+#include <el3_common_macros.S>
 #include <runtime_svc.h>
 #include <smcc_helpers.h>
 #include <smcc_macros.S>
@@ -41,7 +42,8 @@
 	.globl	sp_min_entrypoint
 	.globl	sp_min_warm_entrypoint
 
-func sp_min_vector_table
+
+vector_base sp_min_vector_table
 	b	sp_min_entrypoint
 	b	plat_panic_handler	/* Undef */
 	b	handle_smc		/* Syscall */
@@ -50,185 +52,70 @@
 	b	plat_panic_handler	/* Reserved */
 	b	plat_panic_handler	/* IRQ */
 	b	plat_panic_handler	/* FIQ */
-endfunc sp_min_vector_table
-
-func handle_smc
-	smcc_save_gp_mode_regs
-
-	/* r0 points to smc_context */
-	mov	r2, r0				/* handle */
-	ldcopr	r0, SCR
-
-	/* Save SCR in stack */
-	push	{r0}
-	and	r3, r0, #SCR_NS_BIT		/* flags */
-
-	/* Switch to Secure Mode*/
-	bic	r0, #SCR_NS_BIT
-	stcopr	r0, SCR
-	isb
-	ldr	r0, [r2, #SMC_CTX_GPREG_R0]	/* smc_fid */
-	/* Check whether an SMC64 is issued */
-	tst	r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
-	beq	1f	/* SMC32 is detected */
-	mov	r0, #SMC_UNK
-	str	r0, [r2, #SMC_CTX_GPREG_R0]
-	mov	r0, r2
-	b	2f	/* Skip handling the SMC */
-1:
-	mov	r1, #0				/* cookie */
-	bl	handle_runtime_svc
-2:
-	/* r0 points to smc context */
-
-	/* Restore SCR from stack */
-	pop	{r1}
-	stcopr	r1, SCR
-	isb
 
-	b	sp_min_exit
-endfunc handle_smc
 
 /*
  * The Cold boot/Reset entrypoint for SP_MIN
  */
 func sp_min_entrypoint
-
-	/*
-	 * The caches and TLBs are disabled at reset. If any implementation
-	 * allows the caches/TLB to be hit while they are disabled, ensure
-	 * that they are invalidated here
+#if !RESET_TO_SP_MIN
+	/* ---------------------------------------------------------------
+	 * Preceding bootloader has populated r0 with a pointer to a
+	 * 'bl_params_t' structure & r1 with a pointer to platform
+	 * specific structure
+	 * ---------------------------------------------------------------
 	 */
+	mov	r11, r0
+	mov	r12, r1
 
-	/* Make sure we are in Secure Mode*/
-	ldcopr	r0, SCR
-	bic	r0, #SCR_NS_BIT
-	stcopr	r0, SCR
-	isb
-
-	/* Switch to monitor mode */
-	cps	#MODE32_mon
-	isb
-
-	/*
-	 * Set sane values for NS SCTLR as well.
-	 * Switch to non secure mode for this.
+	/* ---------------------------------------------------------------------
+	 * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
+	 * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot
+	 * and primary/secondary CPU logic should not be executed in this case.
+	 *
+	 * Also, assume that the previous bootloader has already set up the CPU
+	 * endianness and has initialised the memory.
+	 * ---------------------------------------------------------------------
 	 */
-	ldr	r0, =(SCTLR_RES1)
-	ldcopr	r1, SCR
-	orr	r2, r1, #SCR_NS_BIT
-	stcopr	r2, SCR
-	isb
+	el3_entrypoint_common					\
+		_set_endian=0					\
+		_warm_boot_mailbox=0				\
+		_secondary_cold_boot=0				\
+		_init_memory=0					\
+		_init_c_runtime=1				\
+		_exception_vectors=sp_min_vector_table
 
-	ldcopr	r2, SCTLR
-	orr	r0, r0, r2
-	stcopr	r0, SCTLR
-	isb
-
-	stcopr	r1, SCR
-	isb
-
-	/*
-	 * Set the CPU endianness before doing anything that might involve
-	 * memory reads or writes.
+	/* ---------------------------------------------------------------------
+	 * Relay the previous bootloader's arguments to the platform layer
+	 * ---------------------------------------------------------------------
 	 */
-	ldcopr	r0, SCTLR
-	bic	r0, r0, #SCTLR_EE_BIT
-	stcopr	r0, SCTLR
-	isb
-
-	/* Run the CPU Specific Reset handler */
-	bl	reset_handler
-
-	/*
-	 * Enable the instruction cache and data access
-	 * alignment checks
+	mov	r0, r11
+	mov	r1, r12
+#else
+	/* ---------------------------------------------------------------------
+	 * For RESET_TO_SP_MIN systems which have a programmable reset address,
+	 * sp_min_entrypoint() is executed only on the cold boot path so we can
+	 * skip the warm boot mailbox mechanism.
+	 * ---------------------------------------------------------------------
 	 */
-	ldcopr	r0, SCTLR
-	ldr	r1, =(SCTLR_RES1 | SCTLR_A_BIT | SCTLR_I_BIT)
-	orr	r0, r0, r1
-	stcopr	r0, SCTLR
-	isb
-
-	/* Set the vector tables */
-	ldr	r0, =sp_min_vector_table
-	stcopr	r0, VBAR
-	stcopr	r0, MVBAR
-	isb
+	el3_entrypoint_common					\
+		_set_endian=1					\
+		_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS	\
+		_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU	\
+		_init_memory=1					\
+		_init_c_runtime=1				\
+		_exception_vectors=sp_min_vector_table
 
-	/*
-	 * Enable the SIF bit to disable instruction fetches
-	 * from Non-secure memory.
+	/* ---------------------------------------------------------------------
+	 * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader
+	 * to run so there's no argument to relay from a previous bootloader.
+	 * Zero the arguments passed to the platform layer to reflect that.
+	 * ---------------------------------------------------------------------
 	 */
-	ldcopr	r0, SCR
-	orr	r0, r0, #SCR_SIF_BIT
-	stcopr	r0, SCR
+	mov	r0, #0
+	mov	r1, #0
+#endif /* RESET_TO_SP_MIN */
 
-	/*
-	 * Enable the SError interrupt now that the exception vectors have been
-	 * setup.
-	 */
-	cpsie   a
-	isb
-
-	/* Enable access to Advanced SIMD registers */
-	ldcopr	r0, NSACR
-	bic	r0, r0, #NSASEDIS_BIT
-	orr	r0, r0, #(NASCR_CP10_BIT | NASCR_CP11_BIT)
-	stcopr	r0, NSACR
-	isb
-
-	/*
-	 * Enable access to Advanced SIMD, Floating point and to the Trace
-	 * functionality as well.
-	 */
-	ldcopr	r0, CPACR
-	bic	r0, r0, #ASEDIS_BIT
-	bic	r0, r0, #TRCDIS_BIT
-	orr	r0, r0, #CPACR_ENABLE_FP_ACCESS
-	stcopr	r0, CPACR
-	isb
-
-	vmrs	r0, FPEXC
-	orr	r0, r0, #FPEXC_EN_BIT
-	vmsr	FPEXC, r0
-
-	/* Detect whether Warm or Cold boot */
-	bl	plat_get_my_entrypoint
-	cmp	r0, #0
-	/* If warm boot detected, jump to warm boot entry */
-	bxne	r0
-
-	/* Setup C runtime stack */
-	bl	plat_set_my_stack
-
-	/* Perform platform specific memory initialization */
-	bl	platform_mem_init
-
-	/* Initialize the C Runtime Environment */
-
-	/*
-	 * Invalidate the RW memory used by SP_MIN image. This includes
-	 * the data and NOBITS sections. This is done to safeguard against
-	 * possible corruption of this memory by dirty cache lines in a system
-	 * cache as a result of use by an earlier boot loader stage.
-	 */
-	ldr	r0, =__RW_START__
-	ldr	r1, =__RW_END__
-	sub	r1, r1, r0
-	bl	inv_dcache_range
-
-	ldr	r0, =__BSS_START__
-	ldr	r1, =__BSS_SIZE__
-	bl	zeromem
-
-#if USE_COHERENT_MEM
-	ldr	r0, =__COHERENT_RAM_START__
-	ldr	r1, =__COHERENT_RAM_UNALIGNED_SIZE__
-	bl	zeromem
-#endif
-
-	/* Perform platform specific early arch. setup */
 	bl	sp_min_early_platform_setup
 	bl	sp_min_plat_arch_setup
 
@@ -270,13 +157,76 @@
 	b	sp_min_exit
 endfunc sp_min_entrypoint
 
+
+/*
+ * SMC handling function for SP_MIN.
+ */
+func handle_smc
+	smcc_save_gp_mode_regs
+
+	/* r0 points to smc_context */
+	mov	r2, r0				/* handle */
+	ldcopr	r0, SCR
+
+	/* Save SCR in stack */
+	push	{r0}
+	and	r3, r0, #SCR_NS_BIT		/* flags */
+
+	/* Switch to Secure Mode*/
+	bic	r0, #SCR_NS_BIT
+	stcopr	r0, SCR
+	isb
+	ldr	r0, [r2, #SMC_CTX_GPREG_R0]	/* smc_fid */
+	/* Check whether an SMC64 is issued */
+	tst	r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
+	beq	1f	/* SMC32 is detected */
+	mov	r0, #SMC_UNK
+	str	r0, [r2, #SMC_CTX_GPREG_R0]
+	mov	r0, r2
+	b	2f	/* Skip handling the SMC */
+1:
+	mov	r1, #0				/* cookie */
+	bl	handle_runtime_svc
+2:
+	/* r0 points to smc context */
+
+	/* Restore SCR from stack */
+	pop	{r1}
+	stcopr	r1, SCR
+	isb
+
+	b	sp_min_exit
+endfunc handle_smc
+
+
 /*
  * The Warm boot entrypoint for SP_MIN.
  */
 func sp_min_warm_entrypoint
-
-	/* Setup C runtime stack */
-	bl	plat_set_my_stack
+	/*
+	 * On the warm boot path, most of the EL3 initialisations performed by
+	 * 'el3_entrypoint_common' must be skipped:
+	 *
+	 *  - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by
+	 *    programming the reset address do we need to set the CPU endianness.
+	 *    In other cases, we assume this has been taken care by the
+	 *    entrypoint code.
+	 *
+	 *  - No need to determine the type of boot, we know it is a warm boot.
+	 *
+	 *  - Do not try to distinguish between primary and secondary CPUs, this
+	 *    notion only exists for a cold boot.
+	 *
+	 *  - No need to initialise the memory or the C runtime environment,
+	 *    it has been done once and for all on the cold boot path.
+	 */
+	el3_entrypoint_common					\
+		_set_endian=PROGRAMMABLE_RESET_ADDRESS		\
+		_warm_boot_mailbox=0				\
+		_secondary_cold_boot=0				\
+		_init_memory=0					\
+		_init_c_runtime=0				\
+		_exception_vectors=sp_min_vector_table
 
 	/* --------------------------------------------
 	 * Enable the MMU with the DCache disabled. It
diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S
index b158db1..e0e23e8 100644
--- a/bl32/sp_min/sp_min.ld.S
+++ b/bl32/sp_min/sp_min.ld.S
@@ -50,6 +50,7 @@
         __TEXT_START__ = .;
         *entrypoint.o(.text*)
         *(.text*)
+        *(.vectors)
         . = NEXT(4096);
         __TEXT_END__ = .;
     } >RAM
@@ -98,6 +99,7 @@
         KEEP(*(cpu_ops))
         __CPU_OPS_END__ = .;
 
+        *(.vectors)
         __RO_END_UNALIGNED__ = .;
 
         /*