arm64: core support

Relocation code based on a patch by Scott Wood, which is:
Signed-off-by: Scott Wood <scottwood@freescale.com>

Signed-off-by: David Feng <fenghua@phytium.com.cn>
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 760345f..4178f8c 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -1,6 +1,86 @@
 #ifndef __ASM_ARM_SYSTEM_H
 #define __ASM_ARM_SYSTEM_H
 
+#ifdef CONFIG_ARM64
+
+/*
+ * SCTLR_EL1/SCTLR_EL2/SCTLR_EL3 bits definitions
+ */
+#define CR_M		(1 << 0)	/* MMU enable			*/
+#define CR_A		(1 << 1)	/* Alignment abort enable	*/
+#define CR_C		(1 << 2)	/* Dcache enable		*/
+#define CR_SA		(1 << 3)	/* Stack Alignment Check Enable	*/
+#define CR_I		(1 << 12)	/* Icache enable		*/
+#define CR_WXN		(1 << 19)	/* Write Permision Imply XN	*/
+#define CR_EE		(1 << 25)	/* Exception (Big) Endian	*/
+
+#define PGTABLE_SIZE	(0x10000)
+
+#ifndef __ASSEMBLY__
+
+#define isb()				\
+	({asm volatile(			\
+	"isb" : : : "memory");		\
+	})
+
+#define wfi()				\
+	({asm volatile(			\
+	"wfi" : : : "memory");		\
+	})
+
+static inline unsigned int current_el(void)
+{
+	unsigned int el;
+	asm volatile("mrs %0, CurrentEL" : "=r" (el) : : "cc");
+	return el >> 2;
+}
+
+static inline unsigned int get_sctlr(void)
+{
+	unsigned int el, val;
+
+	el = current_el();
+	if (el == 1)
+		asm volatile("mrs %0, sctlr_el1" : "=r" (val) : : "cc");
+	else if (el == 2)
+		asm volatile("mrs %0, sctlr_el2" : "=r" (val) : : "cc");
+	else
+		asm volatile("mrs %0, sctlr_el3" : "=r" (val) : : "cc");
+
+	return val;
+}
+
+static inline void set_sctlr(unsigned int val)
+{
+	unsigned int el;
+
+	el = current_el();
+	if (el == 1)
+		asm volatile("msr sctlr_el1, %0" : : "r" (val) : "cc");
+	else if (el == 2)
+		asm volatile("msr sctlr_el2, %0" : : "r" (val) : "cc");
+	else
+		asm volatile("msr sctlr_el3, %0" : : "r" (val) : "cc");
+
+	asm volatile("isb");
+}
+
+void __asm_flush_dcache_all(void);
+void __asm_flush_dcache_range(u64 start, u64 end);
+void __asm_invalidate_tlb_all(void);
+void __asm_invalidate_icache_all(void);
+
+void armv8_switch_to_el2(void);
+void armv8_switch_to_el1(void);
+void gic_init(void);
+void gic_send_sgi(unsigned long sgino);
+void wait_for_wakeup(void);
+void smp_kick_all_cpus(void);
+
+#endif	/* __ASSEMBLY__ */
+
+#else /* CONFIG_ARM64 */
+
 #ifdef __KERNEL__
 
 #define CPU_ARCH_UNKNOWN	0
@@ -45,6 +125,8 @@
 #define CR_AFE	(1 << 29)	/* Access flag enable			*/
 #define CR_TE	(1 << 30)	/* Thumb exception enable		*/
 
+#define PGTABLE_SIZE		(4096 * 4)
+
 /*
  * This is used to ensure the compiler did actually allocate the register we
  * asked it for some inline assembly sequences.  Apparently we can't trust
@@ -132,4 +214,6 @@
 
 #endif /* __KERNEL__ */
 
+#endif /* CONFIG_ARM64 */
+
 #endif