arm64 patch: gicv3 support

This patch add gicv3 support to uboot armv8 platform.

Changes for v2:
  - rename arm/cpu/armv8/gic.S with arm/lib/gic_64.S
  - move smp_kick_all_cpus() from gic.S to start.S, it would be
    implementation dependent.
  - Each core initialize it's own ReDistributor instead of master
    initializeing all ReDistributors. This is advised by arnab.basu
    <arnab.basu@freescale.com>.

Signed-off-by: David Feng <fenghua@phytium.com.cn>
diff --git a/arch/arm/cpu/armv8/Makefile b/arch/arm/cpu/armv8/Makefile
index b6eb6de..7d93f59 100644
--- a/arch/arm/cpu/armv8/Makefile
+++ b/arch/arm/cpu/armv8/Makefile
@@ -13,5 +13,4 @@
 obj-y	+= exceptions.o
 obj-y	+= cache.o
 obj-y	+= tlb.o
-obj-y	+= gic.o
 obj-y	+= transition.o
diff --git a/arch/arm/cpu/armv8/gic.S b/arch/arm/cpu/armv8/gic.S
deleted file mode 100644
index 599aa8f..0000000
--- a/arch/arm/cpu/armv8/gic.S
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * GIC Initialization Routines.
- *
- * (C) Copyright 2013
- * David Feng <fenghua@phytium.com.cn>
- *
- * SPDX-License-Identifier:	GPL-2.0+
- */
-
-#include <asm-offsets.h>
-#include <config.h>
-#include <linux/linkage.h>
-#include <asm/macro.h>
-#include <asm/gic.h>
-
-
-/*************************************************************************
- *
- * void gic_init(void) __attribute__((weak));
- *
- * Currently, this routine only initialize secure copy of GIC
- * with Security Extensions at EL3.
- *
- *************************************************************************/
-WEAK(gic_init)
-	branch_if_slave	x0, 2f
-
-	/* Initialize Distributor and SPIs */
-	ldr	x1, =GICD_BASE
-	mov	w0, #0x3		/* EnableGrp0 | EnableGrp1 */
-	str	w0, [x1, GICD_CTLR]	/* Secure GICD_CTLR */
-	ldr	w0, [x1, GICD_TYPER]
-	and	w2, w0, #0x1f		/* ITLinesNumber */
-	cbz	w2, 2f			/* No SPIs */
-	add	x1, x1, (GICD_IGROUPRn + 4)
-	mov	w0, #~0			/* Config SPIs as Grp1 */
-1:	str	w0, [x1], #0x4
-	sub	w2, w2, #0x1
-	cbnz	w2, 1b
-
-	/* Initialize SGIs and PPIs */
-2:	ldr	x1, =GICD_BASE
-	mov	w0, #~0			/* Config SGIs and PPIs as Grp1 */
-	str	w0, [x1, GICD_IGROUPRn]	/* GICD_IGROUPR0 */
-	mov	w0, #0x1		/* Enable SGI 0 */
-	str	w0, [x1, GICD_ISENABLERn]
-
-	/* Initialize Cpu Interface */
-	ldr	x1, =GICC_BASE
-	mov	w0, #0x1e7		/* Disable IRQ/FIQ Bypass & */
-					/* Enable Ack Group1 Interrupt & */
-					/* EnableGrp0 & EnableGrp1 */
-	str	w0, [x1, GICC_CTLR]	/* Secure GICC_CTLR */
-
-	mov	w0, #0x1 << 7		/* Non-Secure access to GICC_PMR */
-	str	w0, [x1, GICC_PMR]
-
-	ret
-ENDPROC(gic_init)
-
-
-/*************************************************************************
- *
- * void gic_send_sgi(u64 sgi) __attribute__((weak));
- *
- *************************************************************************/
-WEAK(gic_send_sgi)
-	ldr	x1, =GICD_BASE
-	mov	w2, #0x8000
-	movk	w2, #0x100, lsl #16
-	orr	w2, w2, w0
-	str	w2, [x1, GICD_SGIR]
-	ret
-ENDPROC(gic_send_sgi)
-
-
-/*************************************************************************
- *
- * void wait_for_wakeup(void) __attribute__((weak));
- *
- * Wait for SGI 0 from master.
- *
- *************************************************************************/
-WEAK(wait_for_wakeup)
-	ldr	x1, =GICC_BASE
-0:	wfi
-	ldr	w0, [x1, GICC_AIAR]
-	str	w0, [x1, GICC_AEOIR]
-	cbnz	w0, 0b
-	ret
-ENDPROC(wait_for_wakeup)
-
-
-/*************************************************************************
- *
- * void smp_kick_all_cpus(void) __attribute__((weak));
- *
- *************************************************************************/
-WEAK(smp_kick_all_cpus)
-	/* Kick secondary cpus up by SGI 0 interrupt */
-	mov	x0, xzr			/* SGI 0 */
-	mov	x29, lr			/* Save LR */
-	bl	gic_send_sgi
-	mov	lr, x29			/* Restore LR */
-	ret
-ENDPROC(smp_kick_all_cpus)
diff --git a/arch/arm/cpu/armv8/start.S b/arch/arm/cpu/armv8/start.S
index 4f95289..33d3f36 100644
--- a/arch/arm/cpu/armv8/start.S
+++ b/arch/arm/cpu/armv8/start.S
@@ -50,7 +50,10 @@
 	 */
 	adr	x0, vectors
 	switch_el x1, 3f, 2f, 1f
-3:	msr	vbar_el3, x0
+3:	mrs	x0, scr_el3
+	orr	x0, x0, #0xf			/* SCR_EL3.NS|IRQ|FIQ|EA */
+	msr	scr_el3, x0
+	msr	vbar_el3, x0
 	msr	cptr_el3, xzr			/* Enable FP/SIMD */
 	ldr	x0, =COUNTER_FREQUENCY
 	msr	cntfrq_el0, x0			/* Initialize CNTFRQ */
@@ -95,32 +98,61 @@
 /*-----------------------------------------------------------------------*/
 
 WEAK(lowlevel_init)
-	/* Initialize GIC Secure Bank Status */
 	mov	x29, lr			/* Save LR */
-	bl	gic_init
 
-	branch_if_master x0, x1, 1f
+#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
+	branch_if_slave x0, 1f
+	ldr	x0, =GICD_BASE
+	bl	gic_init_secure
+1:
+#if defined(CONFIG_GICV3)
+	ldr	x0, =GICR_BASE
+	bl	gic_init_secure_percpu
+#elif defined(CONFIG_GICV2)
+	ldr	x0, =GICD_BASE
+	ldr	x1, =GICC_BASE
+	bl	gic_init_secure_percpu
+#endif
+#endif
+
+	branch_if_master x0, x1, 2f
 
 	/*
 	 * Slave should wait for master clearing spin table.
 	 * This sync prevent salves observing incorrect
 	 * value of spin table and jumping to wrong place.
 	 */
-	bl	wait_for_wakeup
+#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
+#ifdef CONFIG_GICV2
+	ldr	x0, =GICC_BASE
+#endif
+	bl	gic_wait_for_interrupt
+#endif
 
 	/*
-	 * All processors will enter EL2 and optionally EL1.
+	 * All slaves will enter EL2 and optionally EL1.
 	 */
 	bl	armv8_switch_to_el2
 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
 	bl	armv8_switch_to_el1
 #endif
 
-1:
+2:
 	mov	lr, x29			/* Restore LR */
 	ret
 ENDPROC(lowlevel_init)
 
+WEAK(smp_kick_all_cpus)
+	/* Kick secondary cpus up by SGI 0 interrupt */
+	mov	x29, lr			/* Save LR */
+#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
+	ldr	x0, =GICD_BASE
+	bl	gic_kick_secondary_cpus
+#endif
+	mov	lr, x29			/* Restore LR */
+	ret
+ENDPROC(smp_kick_all_cpus)
+
 /*-----------------------------------------------------------------------*/
 
 ENTRY(c_runtime_cpu_setup)