powerpc: change 86xx SMP boot method

We put the bootpg for the secondary cpus into memory and use
BPTR to get to it.  This is a step towards converting to the
ePAPR boot methodology.  Also, the code is written to
deal properly with more than 4GB of RAM.

Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
diff --git a/cpu/mpc86xx/Makefile b/cpu/mpc86xx/Makefile
index a9767ad..34a9755 100644
--- a/cpu/mpc86xx/Makefile
+++ b/cpu/mpc86xx/Makefile
@@ -31,6 +31,10 @@
 START	= start.o
 SOBJS	= cache.o
 
+ifneq ($(CONFIG_NUM_CPUS),1)
+COBJS-y += mp.o
+SOBJS += release.o
+endif
 COBJS-y	+= traps.o
 COBJS-y	+= cpu.o
 COBJS-y	+= cpu_init.o
diff --git a/cpu/mpc86xx/cpu_init.c b/cpu/mpc86xx/cpu_init.c
index 4ab88f0..06f179c 100644
--- a/cpu/mpc86xx/cpu_init.c
+++ b/cpu/mpc86xx/cpu_init.c
@@ -31,6 +31,7 @@
 #include <mpc86xx.h>
 #include <asm/mmu.h>
 #include <asm/fsl_law.h>
+#include "mp.h"
 
 DECLARE_GLOBAL_DATA_PTR;
 
@@ -121,6 +122,9 @@
  */
 int cpu_init_r(void)
 {
+#if (CONFIG_NUM_CPUS > 1)
+	setup_mp();
+#endif
 	return 0;
 }
 
diff --git a/cpu/mpc86xx/fdt.c b/cpu/mpc86xx/fdt.c
index 1fef94f5..3adfad9 100644
--- a/cpu/mpc86xx/fdt.c
+++ b/cpu/mpc86xx/fdt.c
@@ -9,9 +9,17 @@
 #include <common.h>
 #include <libfdt.h>
 #include <fdt_support.h>
+#include "mp.h"
+
+DECLARE_GLOBAL_DATA_PTR;
 
 void ft_cpu_setup(void *blob, bd_t *bd)
 {
+#if (CONFIG_NUM_CPUS > 1)
+	int off;
+	u32 bootpg;
+#endif
+
 	do_fixup_by_prop_u32(blob, "device_type", "cpu", 4,
 			     "timebase-frequency", bd->bi_busfreq / 4, 1);
 	do_fixup_by_prop_u32(blob, "device_type", "cpu", 4,
@@ -32,4 +40,17 @@
 	do_fixup_by_compat_u32(blob, "ns16550",
 			       "clock-frequency", CONFIG_SYS_NS16550_CLK, 1);
 #endif
+
+#if (CONFIG_NUM_CPUS > 1)
+	/* if we have 4G or more of memory, put the boot page at 4Gb-1M */
+	if (gd->ram_size > 0xfffff000)
+		bootpg = 0xfff00000;
+	else
+		bootpg = gd->ram_size - (1024 * 1024);
+
+	/* Reserve the boot page so OSes dont use it */
+	off = fdt_add_mem_rsv(blob, bootpg, (u64)4096);
+	if (off < 0)
+		printf("%s: %s\n", __FUNCTION__, fdt_strerror(off));
+#endif
 }
diff --git a/cpu/mpc86xx/mp.c b/cpu/mpc86xx/mp.c
new file mode 100644
index 0000000..5014401
--- /dev/null
+++ b/cpu/mpc86xx/mp.c
@@ -0,0 +1,68 @@
+#include <common.h>
+#include <asm/processor.h>
+#include <asm/mmu.h>
+#include <ioports.h>
+#include <lmb.h>
+#include <asm/io.h>
+#include "mp.h"
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#if (CONFIG_NUM_CPUS > 1)
+void cpu_mp_lmb_reserve(struct lmb *lmb)
+{
+	u32 bootpg;
+
+	/* if we have 4G or more of memory, put the boot page at 4Gb-1M */
+	if ((u64)gd->ram_size > 0xfffff000)
+		bootpg = 0xfff00000;
+	else
+		bootpg = gd->ram_size - (1024 * 1024);
+
+	/* tell u-boot we stole a page */
+	lmb_reserve(lmb, bootpg, 4096);
+}
+
+/*
+ * Copy the code for other cpus to execute into an
+ * aligned location accessible via BPTR
+ */
+void setup_mp(void)
+{
+	extern ulong __secondary_start_page;
+	ulong fixup = (ulong)&__secondary_start_page;
+	u32 bootpg;
+	u32 bootpg_va;
+
+	/*
+	 * If we have 4G or more of memory, put the boot page at 4Gb-1M.
+	 * Otherwise, put it at the very end of RAM.
+	 */
+	if (gd->ram_size > 0xfffff000)
+		bootpg = 0xfff00000;
+	else
+		bootpg = gd->ram_size - (1024 * 1024);
+
+	if (bootpg >= CONFIG_SYS_MAX_DDR_BAT_SIZE) {
+		/* We're not covered by the DDR mapping, set up BAT  */
+		write_bat(DBAT7, CONFIG_SYS_SCRATCH_VA | BATU_BL_128K |
+			  BATU_VS | BATU_VP,
+			  bootpg | BATL_PP_RW | BATL_MEMCOHERENCE);
+		bootpg_va = CONFIG_SYS_SCRATCH_VA;
+	} else {
+		bootpg_va = bootpg;
+	}
+
+	memcpy((void *)bootpg_va, (void *)fixup, 4096);
+	flush_cache(bootpg_va, 4096);
+
+	/* remove the temporary BAT mapping */
+	if (bootpg >= CONFIG_SYS_MAX_DDR_BAT_SIZE)
+		write_bat(DBAT7, 0, 0);
+
+	/* If the physical location of bootpg is not at fff00000, set BPTR */
+	if (bootpg != 0xfff00000)
+		out_be32((uint *)(CONFIG_SYS_CCSRBAR + 0x20), 0x80000000 |
+			 (bootpg >> 12));
+}
+#endif
diff --git a/cpu/mpc86xx/mp.h b/cpu/mpc86xx/mp.h
new file mode 100644
index 0000000..886e0c8
--- /dev/null
+++ b/cpu/mpc86xx/mp.h
@@ -0,0 +1,7 @@
+#ifndef __MPC86XX_MP_H_
+#define __MPC86XX_MP_H_
+
+void setup_mp(void);
+void cpu_mp_lmb_reserve(struct lmb *lmb);
+
+#endif
diff --git a/cpu/mpc86xx/release.S b/cpu/mpc86xx/release.S
new file mode 100644
index 0000000..b524e50
--- /dev/null
+++ b/cpu/mpc86xx/release.S
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2004, 2007, 2008 Freescale Semiconductor.
+ * Srikanth Srinivasan <srikanth.srinivaan@freescale.com>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+#include <config.h>
+#include <mpc86xx.h>
+#include <version.h>
+
+#include <ppc_asm.tmpl>
+#include <ppc_defs.h>
+
+#include <asm/cache.h>
+#include <asm/mmu.h>
+
+/* If this is a multi-cpu system then we need to handle the
+ * 2nd cpu.  The assumption is that the 2nd cpu is being
+ * held in boot holdoff mode until the 1st cpu unlocks it
+ * from Linux.	We'll do some basic cpu init and then pass
+ * it to the Linux Reset Vector.
+ * Sri:	 Much of this initialization is not required. Linux
+ * rewrites the bats, and the sprs and also enables the L1 cache.
+ *
+ * Core 0 must copy this to a 1M aligned region and set BPTR
+ * to point to it.
+ */
+#if (CONFIG_NUM_CPUS > 1)
+	.align 12
+.globl __secondary_start_page
+__secondary_start_page:
+	.space 0x100	/* space over to reset vector loc */
+	mfspr	r0, MSSCR0
+	andi.	r0, r0, 0x0020
+	rlwinm	r0,r0,27,31,31
+	mtspr	PIR, r0
+
+	/* Invalidate BATs */
+	li	r0, 0
+	mtspr	IBAT0U, r0
+	mtspr	IBAT1U, r0
+	mtspr	IBAT2U, r0
+	mtspr	IBAT3U, r0
+	mtspr	IBAT4U, r0
+	mtspr	IBAT5U, r0
+	mtspr	IBAT6U, r0
+	mtspr	IBAT7U, r0
+	isync
+	mtspr	DBAT0U, r0
+	mtspr	DBAT1U, r0
+	mtspr	DBAT2U, r0
+	mtspr	DBAT3U, r0
+	mtspr	DBAT4U, r0
+	mtspr	DBAT5U, r0
+	mtspr	DBAT6U, r0
+	mtspr	DBAT7U, r0
+	isync
+	sync
+
+	/* enable extended addressing */
+	mfspr	r0, HID0
+	lis	r0, (HID0_HIGH_BAT_EN | HID0_XBSEN | HID0_XAEN)@h
+	ori	r0, r0, (HID0_HIGH_BAT_EN | HID0_XBSEN | HID0_XAEN)@l
+	mtspr	HID0, r0
+	sync
+	isync
+
+#ifdef CONFIG_SYS_L2
+	/* init the L2 cache */
+	addis	r3, r0, L2_INIT@h
+	ori	r3, r3, L2_INIT@l
+	sync
+	mtspr	l2cr, r3
+#ifdef CONFIG_ALTIVEC
+	dssall
+#endif
+	/* invalidate the L2 cache */
+	mfspr	r3, l2cr
+	rlwinm.	r3, r3, 0, 0, 0
+	beq	1f
+
+	mfspr	r3, l2cr
+	rlwinm	r3, r3, 0, 1, 31
+
+#ifdef	CONFIG_ALTIVEC
+	dssall
+#endif
+	sync
+	mtspr	l2cr, r3
+	sync
+1:	mfspr	r3, l2cr
+	oris	r3, r3, L2CR_L2I@h
+	mtspr	l2cr, r3
+
+invl2:
+	mfspr	r3, l2cr
+	andis.	r3, r3, L2CR_L2I@h
+	bne	invl2
+	sync
+#endif
+
+	/* enable and invalidate the data cache */
+	mfspr	r3, HID0
+	li	r5, HID0_DCFI|HID0_DLOCK
+	andc	r3, r3, r5
+	mtspr	HID0, r3		/* no invalidate, unlock */
+	ori	r3, r3, HID0_DCE
+	ori	r5, r3, HID0_DCFI
+	mtspr	HID0, r5		/* enable + invalidate */
+	mtspr	HID0, r3		/* enable */
+	sync
+#ifdef CFG_L2
+	sync
+	lis	r3, L2_ENABLE@h
+	ori	r3, r3, L2_ENABLE@l
+	mtspr	l2cr, r3
+	isync
+	sync
+#endif
+
+	/* enable and invalidate the instruction cache*/
+	mfspr	r3, HID0
+	li	r5, HID0_ICFI|HID0_ILOCK
+	andc	r3, r3, r5
+	ori	r3, r3, HID0_ICE
+	ori	r5, r3, HID0_ICFI
+	mtspr	HID0, r5
+	mtspr	HID0, r3
+	isync
+	sync
+
+	/* TBEN in HID0 */
+	mfspr	r4, HID0
+	oris	r4, r4, 0x0400
+	mtspr	HID0, r4
+	sync
+	isync
+
+	/* MCP|SYNCBE|ABE in HID1 */
+	mfspr	r4, HID1
+	oris	r4, r4, 0x8000
+	ori	r4, r4, 0x0C00
+	mtspr	HID1, r4
+	sync
+	isync
+
+	lis	r3, CONFIG_LINUX_RESET_VEC@h
+	ori	r3, r3, CONFIG_LINUX_RESET_VEC@l
+	mtlr	r3
+	blr
+
+	/* Never Returns, Running in Linux Now */
+#endif
diff --git a/cpu/mpc86xx/start.S b/cpu/mpc86xx/start.S
index b1a23b4..48f8c5a 100644
--- a/cpu/mpc86xx/start.S
+++ b/cpu/mpc86xx/start.S
@@ -179,20 +179,10 @@
 
 boot_cold:
 boot_warm:
-
-	/* if this is a multi-core system we need to check which cpu
-	 * this is, if it is not cpu 0 send the cpu to the linux reset
-	 * vector */
-#if (CONFIG_NUM_CPUS > 1)
-	mfspr	r0, MSSCR0
-	andi.	r0, r0, 0x0020
-	rlwinm	r0,r0,27,31,31
-	mtspr	PIR, r0
-	beq	1f
-
-	bl	secondary_cpu_setup
-#endif
-
+	/*
+	 * NOTE: Only Cpu 0 will ever come here.  Other cores go to an
+	 * address specified by the BPTR
+	 */
 1:
 #ifdef CONFIG_SYS_RAMBOOT
 	/* disable everything */
@@ -976,65 +966,6 @@
 	sync
 	blr
 #endif
-#endif
-
-/* If this is a multi-cpu system then we need to handle the
- * 2nd cpu.  The assumption is that the 2nd cpu is being
- * held in boot holdoff mode until the 1st cpu unlocks it
- * from Linux.	We'll do some basic cpu init and then pass
- * it to the Linux Reset Vector.
- * Sri:	 Much of this initialization is not required. Linux
- * rewrites the bats, and the sprs and also enables the L1 cache.
- */
-#if (CONFIG_NUM_CPUS > 1)
-.globl secondary_cpu_setup
-secondary_cpu_setup:
-	/* Do only core setup on all cores except cpu0 */
-	bl	invalidate_bats
-	sync
-	bl	enable_ext_addr
-
-#ifdef CONFIG_SYS_L2
-	/* init the L2 cache */
-	addis	r3, r0, L2_INIT@h
-	ori	r3, r3, L2_INIT@l
-	sync
-	mtspr	l2cr, r3
-#ifdef CONFIG_ALTIVEC
-	dssall
-#endif
-	/* invalidate the L2 cache */
-	bl	l2cache_invalidate
-	sync
 #endif
 
-	/* enable and invalidate the data cache */
-	bl	dcache_enable
-	sync
-
-	/* enable and invalidate the instruction cache*/
-	bl	icache_enable
-	sync
-
-	/* TBEN in HID0 */
-	mfspr	r4, HID0
-	oris	r4, r4, 0x0400
-	mtspr	HID0, r4
-	sync
-	isync
-
-	/* MCP|SYNCBE|ABE in HID1 */
-	mfspr	r4, HID1
-	oris	r4, r4, 0x8000
-	ori	r4, r4, 0x0C00
-	mtspr	HID1, r4
-	sync
-	isync
-
-	lis	r3, CONFIG_LINUX_RESET_VEC@h
-	ori	r3, r3, CONFIG_LINUX_RESET_VEC@l
-	mtlr	r3
-	blr
 
-	/* Never Returns, Running in Linux Now */
-#endif