armv8/fsl-lsch3: Release secondary cores from boot hold off with Boot Page

Secondary cores need to be released from holdoff by boot release
registers. With GPP bootrom, they can boot from main memory
directly. Individual spin table is used for each core. Spin table
and the boot page is reserved in device tree so OS won't overwrite.

Signed-off-by: York Sun <yorksun@freescale.com>
Signed-off-by: Arnab Basu <arnab.basu@freescale.com>
diff --git a/arch/arm/cpu/armv8/fsl-lsch3/lowlevel.S b/arch/arm/cpu/armv8/fsl-lsch3/lowlevel.S
index ad32b6c..2a88aab 100644
--- a/arch/arm/cpu/armv8/fsl-lsch3/lowlevel.S
+++ b/arch/arm/cpu/armv8/fsl-lsch3/lowlevel.S
@@ -8,7 +8,9 @@
 
 #include <config.h>
 #include <linux/linkage.h>
+#include <asm/gic.h>
 #include <asm/macro.h>
+#include "mp.h"
 
 ENTRY(lowlevel_init)
 	mov	x29, lr			/* Save LR */
@@ -35,31 +37,114 @@
 #endif
 #endif
 
-	branch_if_master x0, x1, 1f
+	branch_if_master x0, x1, 2f
 
+	ldr	x0, =secondary_boot_func
+	blr	x0
+2:
+	mov	lr, x29			/* Restore LR */
+	ret
+ENDPROC(lowlevel_init)
+
+	/* Keep literals not used by the secondary boot code outside it */
+	.ltorg
+
+	/* Using 64 bit alignment since the spin table is accessed as data */
+	.align 4
+	.global secondary_boot_code
+	/* Secondary Boot Code starts here */
+secondary_boot_code:
+	.global __spin_table
+__spin_table:
+	.space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
+
+	.align 2
+ENTRY(secondary_boot_func)
 	/*
-	 * Slave should wait for master clearing spin table.
-	 * This sync prevent salves observing incorrect
-	 * value of spin table and jumping to wrong place.
+	 * MPIDR_EL1 Fields:
+	 * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
+	 * MPIDR[7:2] = AFF0_RES
+	 * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
+	 * MPIDR[23:16] = AFF2_CLUSTERID
+	 * MPIDR[24] = MT
+	 * MPIDR[29:25] = RES0
+	 * MPIDR[30] = U
+	 * MPIDR[31] = ME
+	 * MPIDR[39:32] = AFF3
+	 *
+	 * Linear Processor ID (LPID) calculation from MPIDR_EL1:
+	 * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
+	 * until AFF2_CLUSTERID and AFF3 have non-zero values)
+	 *
+	 * LPID = MPIDR[15:8] | MPIDR[1:0]
 	 */
-#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
-#ifdef CONFIG_GICV2
-	ldr	x0, =GICC_BASE
-#endif
-	bl	gic_wait_for_interrupt
-#endif
-
+	mrs	x0, mpidr_el1
+	ubfm	x1, x0, #8, #15
+	ubfm	x2, x0, #0, #1
+	orr	x10, x2, x1, lsl #2	/* x10 has LPID */
+	ubfm    x9, x0, #0, #15         /* x9 contains MPIDR[15:0] */
 	/*
-	 * All processors will enter EL2 and optionally EL1.
+	 * offset of the spin table element for this core from start of spin
+	 * table (each elem is padded to 64 bytes)
 	 */
-	bl	armv8_switch_to_el2
+	lsl	x1, x10, #6
+	ldr	x0, =__spin_table
+	/* physical address of this cpus spin table element */
+	add	x11, x1, x0
+
+	str	x9, [x11, #16]	/* LPID */
+	mov	x4, #1
+	str	x4, [x11, #8]	/* STATUS */
+	dsb	sy
+#if defined(CONFIG_GICV3)
+	gic_wait_for_interrupt_m x0
+#elif defined(CONFIG_GICV2)
+        ldr     x0, =GICC_BASE
+        gic_wait_for_interrupt_m x0, w1
+#endif
+
+	bl secondary_switch_to_el2
 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
-	bl	armv8_switch_to_el1
+	bl secondary_switch_to_el1
 #endif
-	b	2f
 
-1:
-2:
-	mov	lr, x29			/* Restore LR */
-	ret
-ENDPROC(lowlevel_init)
+slave_cpu:
+	wfe
+	ldr	x0, [x11]
+	cbz	x0, slave_cpu
+#ifndef CONFIG_ARMV8_SWITCH_TO_EL1
+	mrs     x1, sctlr_el2
+#else
+	mrs     x1, sctlr_el1
+#endif
+	tbz     x1, #25, cpu_is_le
+	rev     x0, x0                  /* BE to LE conversion */
+cpu_is_le:
+	br	x0			/* branch to the given address */
+ENDPROC(secondary_boot_func)
+
+ENTRY(secondary_switch_to_el2)
+	switch_el x0, 1f, 0f, 0f
+0:	ret
+1:	armv8_switch_to_el2_m x0
+ENDPROC(secondary_switch_to_el2)
+
+ENTRY(secondary_switch_to_el1)
+	switch_el x0, 0f, 1f, 0f
+0:	ret
+1:	armv8_switch_to_el1_m x0, x1
+ENDPROC(secondary_switch_to_el1)
+
+	/* Ensure that the literals used by the secondary boot code are
+	 * assembled within it (this is required so that we can protect
+	 * this area with a single memreserve region
+	 */
+	.ltorg
+
+	/* 64 bit alignment for elements accessed as data */
+	.align 4
+	.globl __secondary_boot_code_size
+	.type __secondary_boot_code_size, %object
+	/* Secondary Boot Code ends here */
+__secondary_boot_code_size:
+	.quad .-secondary_boot_code