Per-cpu data cache restructuring

This patch prepares the per-cpu pointer cache for wider use by:
* renaming the structure to cpu_data and placing in new header
* providing accessors for this CPU, or other CPUs
* splitting the initialization of the TPIDR pointer from the
  initialization of the cpu_data content
* moving the crash stack initialization to a crash stack function
* setting the TPIDR pointer very early during boot

Change-Id: Icef9004ff88f8eb241d48c14be3158087d7e49a3
diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S
index 3c9042b..2e7476a 100644
--- a/bl31/aarch64/bl31_entrypoint.S
+++ b/bl31/aarch64/bl31_entrypoint.S
@@ -131,6 +131,15 @@
 	bl	zeromem16
 
 	/* ---------------------------------------------
+	 * Initialise cpu_data and crash reporting
+	 * ---------------------------------------------
+	 */
+	bl	init_cpu_data_ptr
+#if CRASH_REPORTING
+	bl	init_crash_reporting
+#endif
+
+	/* ---------------------------------------------
 	 * Use SP_EL0 for the C runtime stack.
 	 * ---------------------------------------------
 	 */
diff --git a/bl31/aarch64/cpu_data.S b/bl31/aarch64/cpu_data.S
new file mode 100644
index 0000000..feb51d6
--- /dev/null
+++ b/bl31/aarch64/cpu_data.S
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm_macros.S>
+#include <cpu_data.h>
+
+.globl	init_cpu_data_ptr
+.globl	_cpu_data_by_mpidr
+.globl	_cpu_data_by_index
+
+/* -----------------------------------------------------------------
+ * void init_cpu_data_ptr(void)
+ *
+ * Initialise the TPIDR_EL3 register to refer to the cpu_data_t
+ * for the calling CPU. This must be called before cm_get_cpu_data()
+ *
+ * This can be called without a valid stack.
+ * clobbers: x0, x1, x9, x10
+ * -----------------------------------------------------------------
+ */
+func init_cpu_data_ptr
+	mov	x10, x30
+	mrs	x0, mpidr_el1
+	bl	_cpu_data_by_mpidr
+	msr	tpidr_el3, x0
+	ret	x10
+
+
+/* -----------------------------------------------------------------
+ * cpu_data_t *_cpu_data_by_mpidr(uint64_t mpidr)
+ *
+ * Return the cpu_data structure for the CPU with given MPIDR
+ *
+ * This can be called without a valid stack. It assumes that
+ * platform_get_core_pos() does not clobber register x9.
+ * clobbers: x0, x1, x9
+ * -----------------------------------------------------------------
+ */
+func _cpu_data_by_mpidr
+	mov	x9, x30
+	bl	platform_get_core_pos
+	mov	x30, x9
+	b	_cpu_data_by_index
+
+
+/* -----------------------------------------------------------------
+ * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index)
+ *
+ * Return the cpu_data structure for the CPU with given linear index
+ *
+ * This can be called without a valid stack.
+ * clobbers: x0, x1
+ * -----------------------------------------------------------------
+ */
+func _cpu_data_by_index
+	adr	x1, percpu_data
+	add	x0, x1, x0, LSL #CPU_DATA_LOG2SIZE
+	ret
diff --git a/bl31/aarch64/crash_reporting.S b/bl31/aarch64/crash_reporting.S
index 21f74a4..1118e96 100644
--- a/bl31/aarch64/crash_reporting.S
+++ b/bl31/aarch64/crash_reporting.S
@@ -30,12 +30,13 @@
 #include <arch.h>
 #include <asm_macros.S>
 #include <context.h>
+#include <cpu_data.h>
 #include <plat_macros.S>
 #include <platform_def.h>
 
-	.globl	get_crash_stack
 	.globl	dump_state_and_die
 	.globl	dump_intr_state_and_die
+	.globl  init_crash_reporting
 
 #if CRASH_REPORTING
 	/* ------------------------------------------------------
@@ -232,7 +233,7 @@
 	/* Check if tpidr is initialized */
 	cbz	x0, infinite_loop
 
-	ldr	x0, [x0, #PTR_CACHE_CRASH_STACK_OFFSET]
+	ldr	x0, [x0, #CPU_DATA_CRASH_STACK_OFFSET]
 	/* store the x30 and sp to stack */
 	str	x30, [x0, #-(REG_SIZE)]!
 	mov	x30, sp
@@ -281,19 +282,31 @@
 #define PCPU_CRASH_STACK_SIZE	0x140
 
 	/* -----------------------------------------------------
-	 * void get_crash_stack (uint64_t mpidr) : This
-	 * function is used to allocate a small stack for
-	 * reporting unhandled exceptions
+	 * Per-cpu crash stacks in normal memory.
 	 * -----------------------------------------------------
 	 */
-func get_crash_stack
-	mov	x10, x30 // lr
-	get_mp_stack pcpu_crash_stack, PCPU_CRASH_STACK_SIZE
-	ret	x10
+declare_stack pcpu_crash_stack, tzfw_normal_stacks, \
+		PCPU_CRASH_STACK_SIZE, PLATFORM_CORE_COUNT
 
 	/* -----------------------------------------------------
-	 * Per-cpu crash stacks in normal memory.
+	 * Provides each CPU with a small stacks for reporting
+	 * unhandled exceptions, and stores the stack address
+	 * in cpu_data
+	 *
+	 * This can be called without a runtime stack
+	 * clobbers: x0 - x4
 	 * -----------------------------------------------------
 	 */
-declare_stack pcpu_crash_stack, tzfw_normal_stacks, \
-		PCPU_CRASH_STACK_SIZE, PLATFORM_CORE_COUNT
+func init_crash_reporting
+	mov	x4, x30
+	mov	x2, #0
+	adr	x3, pcpu_crash_stack
+init_crash_loop:
+	mov	x0, x2
+	bl	_cpu_data_by_index
+	add	x3, x3, #PCPU_CRASH_STACK_SIZE
+	str	x3, [x0, #CPU_DATA_CRASH_STACK_OFFSET]
+	add	x2, x2, #1
+	cmp	x2, #PLATFORM_CORE_COUNT
+	b.lo	init_crash_loop
+	ret	x4
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 99fc357..4602e41 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -30,11 +30,13 @@
 
 BL31_SOURCES		+=	bl31/bl31_main.c				\
 				bl31/context_mgmt.c				\
+				bl31/cpu_data_array.c				\
 				bl31/runtime_svc.c				\
 				bl31/interrupt_mgmt.c				\
 				bl31/aarch64/bl31_arch_setup.c			\
 				bl31/aarch64/bl31_entrypoint.S			\
 				bl31/aarch64/context.S				\
+				bl31/aarch64/cpu_data.S				\
 				bl31/aarch64/runtime_exceptions.S		\
 				bl31/aarch64/crash_reporting.S	\
 				common/aarch64/early_exceptions.S		\
diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c
index 6765e60..5bb11ba 100644
--- a/bl31/bl31_main.c
+++ b/bl31/bl31_main.c
@@ -97,7 +97,6 @@
 	 */
 	assert(cm_get_context(NON_SECURE));
 	cm_set_next_eret_context(NON_SECURE);
-	cm_init_pcpu_ptr_cache();
 	write_vbar_el3((uint64_t) runtime_exceptions);
 	isb();
 	next_image_type = NON_SECURE;
diff --git a/bl31/context_mgmt.c b/bl31/context_mgmt.c
index 122a0d4..59be748 100644
--- a/bl31/context_mgmt.c
+++ b/bl31/context_mgmt.c
@@ -51,9 +51,6 @@
 
 static context_info_t cm_context_info[PLATFORM_CORE_COUNT];
 
-/* The per_cpu_ptr_cache_t space allocation */
-static per_cpu_ptr_cache_t per_cpu_ptr_cache_space[PLATFORM_CORE_COUNT];
-
 /*******************************************************************************
  * Context management library initialisation routine. This library is used by
  * runtime services to share pointers to 'cpu_context' structures for the secure
@@ -295,34 +292,3 @@
 			 "msr	spsel, #0\n"
 			 : : "r" (ctx));
 }
-
-/************************************************************************
- * The following function is used to populate the per cpu pointer cache.
- * The pointer will be stored in the tpidr_el3 register.
- *************************************************************************/
-void cm_init_pcpu_ptr_cache()
-{
-	unsigned long mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
-	per_cpu_ptr_cache_t *pcpu_ptr_cache;
-
-	pcpu_ptr_cache = &per_cpu_ptr_cache_space[linear_id];
-	assert(pcpu_ptr_cache);
-#if CRASH_REPORTING
-	pcpu_ptr_cache->crash_stack = get_crash_stack(mpidr);
-#endif
-
-	cm_set_pcpu_ptr_cache(pcpu_ptr_cache);
-}
-
-
-void cm_set_pcpu_ptr_cache(const void *pcpu_ptr)
-{
-	write_tpidr_el3((unsigned long)pcpu_ptr);
-}
-
-void *cm_get_pcpu_ptr_cache(void)
-{
-	return (void *)read_tpidr_el3();
-}
-
diff --git a/bl31/cpu_data_array.c b/bl31/cpu_data_array.c
new file mode 100644
index 0000000..b0042a1
--- /dev/null
+++ b/bl31/cpu_data_array.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <cassert.h>
+#include <cpu_data.h>
+#include <platform_def.h>
+
+/* verify assembler offsets match data structures */
+CASSERT(CPU_DATA_CRASH_STACK_OFFSET == __builtin_offsetof
+	(cpu_data_t, crash_stack),
+	assert_cpu_data_crash_stack_offset_mismatch);
+
+CASSERT((1 << CPU_DATA_LOG2SIZE) == sizeof(cpu_data_t),
+	assert_cpu_data_log2size_mismatch);
+
+/* The per_cpu_ptr_cache_t space allocation */
+cpu_data_t percpu_data[PLATFORM_CORE_COUNT];
diff --git a/include/bl31/context.h b/include/bl31/context.h
index 16cc744..b889f68 100644
--- a/include/bl31/context.h
+++ b/include/bl31/context.h
@@ -185,11 +185,6 @@
 #define CTX_FP_FPCR		0x208
 #define CTX_FPREGS_END		0x210
 
-/******************************************************************************
- * Offsets for the per cpu cache implementation
- ******************************************************************************/
-#define PTR_CACHE_CRASH_STACK_OFFSET 0x0
-
 #ifndef __ASSEMBLY__
 
 #include <cassert.h>
@@ -330,17 +325,6 @@
 void fpregs_context_save(fp_regs_t *regs);
 void fpregs_context_restore(fp_regs_t *regs);
 
-
-/* Per-CPU pointer cache of recently used pointers and also the crash stack
- * TODO: Add other commonly used variables to this (tf_issues#90)
- */
-typedef struct per_cpu_ptr_cache {
-	uint64_t crash_stack;
-} per_cpu_ptr_cache_t;
-
-CASSERT(PTR_CACHE_CRASH_STACK_OFFSET == __builtin_offsetof\
-	(per_cpu_ptr_cache_t, crash_stack), \
-	assert_per_cpu_ptr_cache_crash_stack_offset_mismatch);
 
 #undef CTX_SYSREG_ALL
 #undef CTX_FP_ALL
diff --git a/include/bl31/context_mgmt.h b/include/bl31/context_mgmt.h
index 86bbc58..ca9d9fa 100644
--- a/include/bl31/context_mgmt.h
+++ b/include/bl31/context_mgmt.h
@@ -54,8 +54,6 @@
 			  uint32_t bit_pos,
 			  uint32_t value);
 void cm_set_next_eret_context(uint32_t security_state);
-void cm_init_pcpu_ptr_cache();
-void cm_set_pcpu_ptr_cache(const void *pcpu_ptr);
-void *cm_get_pcpu_ptr_cache(void);
 uint32_t cm_get_scr_el3(uint32_t security_state);
+
 #endif /* __CM_H__ */
diff --git a/include/bl31/cpu_data.h b/include/bl31/cpu_data.h
new file mode 100644
index 0000000..2d256e4
--- /dev/null
+++ b/include/bl31/cpu_data.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CPU_DATA_H__
+#define __CPU_DATA_H__
+
+/* Offsets for the cpu_data structure */
+#define CPU_DATA_CRASH_STACK_OFFSET	0x0
+#define CPU_DATA_LOG2SIZE		6
+
+#ifndef __ASSEMBLY__
+
+#include <arch_helpers.h>
+#include <platform_def.h>
+#include <stdint.h>
+
+/*******************************************************************************
+ * Function & variable prototypes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * Cache of frequently used per-cpu data:
+ *   Address of the crash stack
+ * It is aligned to the cache line boundary to allow efficient concurrent
+ * manipulation of these pointers on different cpus
+ *
+ * TODO: Add other commonly used variables to this (tf_issues#90)
+ *
+ * The data structure and the _cpu_data accessors should not be used directly
+ * by components that have per-cpu members. The member access macros should be
+ * used for this.
+ ******************************************************************************/
+
+typedef struct cpu_data {
+	uint64_t crash_stack;
+} __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;
+
+struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
+struct cpu_data *_cpu_data_by_mpidr(uint64_t mpidr);
+
+/* Return the cpu_data structure for the current CPU. */
+static inline struct cpu_data *_cpu_data(void)
+{
+	return (cpu_data_t *)read_tpidr_el3();
+}
+
+
+/**************************************************************************
+ * APIs for initialising and accessing per-cpu data
+ *************************************************************************/
+
+void init_cpu_data_ptr(void);
+
+#define get_cpu_data(_m)		   _cpu_data()->_m
+#define set_cpu_data(_m, _v)		   _cpu_data()->_m = _v
+#define get_cpu_data_by_index(_ix, _m)	   _cpu_data_by_index(_ix)->_m
+#define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = _v
+#define get_cpu_data_by_mpidr(_id, _m)	   _cpu_data_by_mpidr(_id)->_m
+#define set_cpu_data_by_mpidr(_id, _m, _v) _cpu_data_by_mpidr(_id)->_m = _v
+
+
+#endif /* __ASSEMBLY__ */
+#endif /* __CPU_DATA_H__ */
diff --git a/include/bl31/runtime_svc.h b/include/bl31/runtime_svc.h
index d7d88d4..3b84f06 100644
--- a/include/bl31/runtime_svc.h
+++ b/include/bl31/runtime_svc.h
@@ -267,7 +267,8 @@
 void runtime_svc_init();
 extern uint64_t __RT_SVC_DESCS_START__;
 extern uint64_t __RT_SVC_DESCS_END__;
-uint64_t get_crash_stack(uint64_t mpidr);
+void init_crash_reporting(void);
 void runtime_exceptions(void);
+
 #endif /*__ASSEMBLY__*/
 #endif /* __RUNTIME_SVC_H__ */
diff --git a/services/std_svc/psci/psci_afflvl_on.c b/services/std_svc/psci/psci_afflvl_on.c
index 443e6af..1c7a877 100644
--- a/services/std_svc/psci/psci_afflvl_on.c
+++ b/services/std_svc/psci/psci_afflvl_on.c
@@ -380,7 +380,6 @@
 	 */
 	assert(cm_get_context(NON_SECURE));
 	cm_set_next_eret_context(NON_SECURE);
-	cm_init_pcpu_ptr_cache();
 	write_vbar_el3((uint64_t) runtime_exceptions);
 
 	/*
diff --git a/services/std_svc/psci/psci_afflvl_suspend.c b/services/std_svc/psci/psci_afflvl_suspend.c
index a986e5c..3a1a419 100644
--- a/services/std_svc/psci/psci_afflvl_suspend.c
+++ b/services/std_svc/psci/psci_afflvl_suspend.c
@@ -497,7 +497,6 @@
 	 * set on this cpu prior to suspension.
 	 */
 	cm_set_next_eret_context(NON_SECURE);
-	cm_init_pcpu_ptr_cache();
 	write_vbar_el3((uint64_t) runtime_exceptions);
 
 	/*
diff --git a/services/std_svc/psci/psci_entry.S b/services/std_svc/psci/psci_entry.S
index bc8d900..037673d 100644
--- a/services/std_svc/psci/psci_entry.S
+++ b/services/std_svc/psci/psci_entry.S
@@ -61,6 +61,12 @@
 	adr	x22, psci_afflvl_power_on_finish
 
 	/* ---------------------------------------------
+	 * Initialise the pcpu cache pointer for the CPU
+	 * ---------------------------------------------
+	 */
+	bl	init_cpu_data_ptr
+
+	/* ---------------------------------------------
 	 * Exceptions should not occur at this point.
 	 * Set VBAR in order to handle and report any
 	 * that do occur