Merge pull request #143 from athoelke/at/remove-nsram
Remove NSRAM from FVP memory map
diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S
index 3c9042b..e4dfea4 100644
--- a/bl31/aarch64/bl31_entrypoint.S
+++ b/bl31/aarch64/bl31_entrypoint.S
@@ -72,11 +72,13 @@
isb
/* ---------------------------------------------
- * Set the exception vector to something sane.
+ * Set the exception vector and zero tpidr_el3
+ * until the crash reporting is set up
* ---------------------------------------------
*/
- adr x1, early_exceptions
+ adr x1, runtime_exceptions
msr vbar_el3, x1
+ msr tpidr_el3, xzr
/* ---------------------------------------------------------------------
* The initial state of the Architectural feature trap register
@@ -131,6 +133,15 @@
bl zeromem16
/* ---------------------------------------------
+ * Initialise cpu_data and crash reporting
+ * ---------------------------------------------
+ */
+#if CRASH_REPORTING
+ bl init_crash_reporting
+#endif
+ bl init_cpu_data_ptr
+
+ /* ---------------------------------------------
* Use SP_EL0 for the C runtime stack.
* ---------------------------------------------
*/
diff --git a/bl31/aarch64/cpu_data.S b/bl31/aarch64/cpu_data.S
new file mode 100644
index 0000000..feb51d6
--- /dev/null
+++ b/bl31/aarch64/cpu_data.S
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm_macros.S>
+#include <cpu_data.h>
+
+.globl init_cpu_data_ptr
+.globl _cpu_data_by_mpidr
+.globl _cpu_data_by_index
+
+/* -----------------------------------------------------------------
+ * void init_cpu_data_ptr(void)
+ *
+ * Initialise the TPIDR_EL3 register to refer to the cpu_data_t
+ * for the calling CPU. This must be called before cm_get_cpu_data()
+ *
+ * This can be called without a valid stack.
+ * clobbers: x0, x1, x9, x10
+ * -----------------------------------------------------------------
+ */
+func init_cpu_data_ptr
+ mov x10, x30
+ mrs x0, mpidr_el1
+ bl _cpu_data_by_mpidr
+ msr tpidr_el3, x0
+ ret x10
+
+
+/* -----------------------------------------------------------------
+ * cpu_data_t *_cpu_data_by_mpidr(uint64_t mpidr)
+ *
+ * Return the cpu_data structure for the CPU with given MPIDR
+ *
+ * This can be called without a valid stack. It assumes that
+ * platform_get_core_pos() does not clobber register x9.
+ * clobbers: x0, x1, x9
+ * -----------------------------------------------------------------
+ */
+func _cpu_data_by_mpidr
+ mov x9, x30
+ bl platform_get_core_pos
+ mov x30, x9
+ b _cpu_data_by_index
+
+
+/* -----------------------------------------------------------------
+ * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index)
+ *
+ * Return the cpu_data structure for the CPU with given linear index
+ *
+ * This can be called without a valid stack.
+ * clobbers: x0, x1
+ * -----------------------------------------------------------------
+ */
+func _cpu_data_by_index
+ adr x1, percpu_data
+ add x0, x1, x0, LSL #CPU_DATA_LOG2SIZE
+ ret
diff --git a/bl31/aarch64/crash_reporting.S b/bl31/aarch64/crash_reporting.S
index 21f74a4..1118e96 100644
--- a/bl31/aarch64/crash_reporting.S
+++ b/bl31/aarch64/crash_reporting.S
@@ -30,12 +30,13 @@
#include <arch.h>
#include <asm_macros.S>
#include <context.h>
+#include <cpu_data.h>
#include <plat_macros.S>
#include <platform_def.h>
- .globl get_crash_stack
.globl dump_state_and_die
.globl dump_intr_state_and_die
+ .globl init_crash_reporting
#if CRASH_REPORTING
/* ------------------------------------------------------
@@ -232,7 +233,7 @@
/* Check if tpidr is initialized */
cbz x0, infinite_loop
- ldr x0, [x0, #PTR_CACHE_CRASH_STACK_OFFSET]
+ ldr x0, [x0, #CPU_DATA_CRASH_STACK_OFFSET]
/* store the x30 and sp to stack */
str x30, [x0, #-(REG_SIZE)]!
mov x30, sp
@@ -281,19 +282,31 @@
#define PCPU_CRASH_STACK_SIZE 0x140
/* -----------------------------------------------------
- * void get_crash_stack (uint64_t mpidr) : This
- * function is used to allocate a small stack for
- * reporting unhandled exceptions
+ * Per-cpu crash stacks in normal memory.
* -----------------------------------------------------
*/
-func get_crash_stack
- mov x10, x30 // lr
- get_mp_stack pcpu_crash_stack, PCPU_CRASH_STACK_SIZE
- ret x10
+declare_stack pcpu_crash_stack, tzfw_normal_stacks, \
+ PCPU_CRASH_STACK_SIZE, PLATFORM_CORE_COUNT
/* -----------------------------------------------------
- * Per-cpu crash stacks in normal memory.
+ * Provides each CPU with a small stacks for reporting
+ * unhandled exceptions, and stores the stack address
+ * in cpu_data
+ *
+ * This can be called without a runtime stack
+ * clobbers: x0 - x4
* -----------------------------------------------------
*/
-declare_stack pcpu_crash_stack, tzfw_normal_stacks, \
- PCPU_CRASH_STACK_SIZE, PLATFORM_CORE_COUNT
+func init_crash_reporting
+ mov x4, x30
+ mov x2, #0
+ adr x3, pcpu_crash_stack
+init_crash_loop:
+ mov x0, x2
+ bl _cpu_data_by_index
+ add x3, x3, #PCPU_CRASH_STACK_SIZE
+ str x3, [x0, #CPU_DATA_CRASH_STACK_OFFSET]
+ add x2, x2, #1
+ cmp x2, #PLATFORM_CORE_COUNT
+ b.lo init_crash_loop
+ ret x4
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 99fc357..5555c31 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -30,14 +30,15 @@
BL31_SOURCES += bl31/bl31_main.c \
bl31/context_mgmt.c \
+ bl31/cpu_data_array.c \
bl31/runtime_svc.c \
bl31/interrupt_mgmt.c \
bl31/aarch64/bl31_arch_setup.c \
bl31/aarch64/bl31_entrypoint.S \
bl31/aarch64/context.S \
+ bl31/aarch64/cpu_data.S \
bl31/aarch64/runtime_exceptions.S \
- bl31/aarch64/crash_reporting.S \
- common/aarch64/early_exceptions.S \
+ bl31/aarch64/crash_reporting.S \
lib/aarch64/cpu_helpers.S \
lib/locks/bakery/bakery_lock.c \
lib/locks/exclusive/spinlock.S \
diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c
index 6765e60..6f88e65 100644
--- a/bl31/bl31_main.c
+++ b/bl31/bl31_main.c
@@ -71,7 +71,6 @@
******************************************************************************/
void bl31_main(void)
{
-
/* Perform remaining generic architectural setup from EL3 */
bl31_arch_setup();
@@ -89,17 +88,7 @@
/* Clean caches before re-entering normal world */
dcsw_op_all(DCCSW);
- /*
- * Use the more complex exception vectors now that context
- * management is setup. SP_EL3 should point to a 'cpu_context'
- * structure which has an exception stack allocated. The PSCI
- * service should have set the context.
- */
- assert(cm_get_context(NON_SECURE));
- cm_set_next_eret_context(NON_SECURE);
- cm_init_pcpu_ptr_cache();
- write_vbar_el3((uint64_t) runtime_exceptions);
- isb();
+ /* By default run the non-secure BL3-3 image next */
next_image_type = NON_SECURE;
/*
diff --git a/bl31/context_mgmt.c b/bl31/context_mgmt.c
index 122a0d4..67a6e03 100644
--- a/bl31/context_mgmt.c
+++ b/bl31/context_mgmt.c
@@ -35,24 +35,12 @@
#include <bl31.h>
#include <context.h>
#include <context_mgmt.h>
+#include <cpu_data.h>
#include <interrupt_mgmt.h>
#include <platform.h>
#include <platform_def.h>
#include <runtime_svc.h>
-/*******************************************************************************
- * Data structure which holds the pointers to non-secure and secure security
- * state contexts for each cpu. It is aligned to the cache line boundary to
- * allow efficient concurrent manipulation of these pointers on different cpus
- ******************************************************************************/
-typedef struct {
- void *ptr[2];
-} __aligned (CACHE_WRITEBACK_GRANULE) context_info_t;
-
-static context_info_t cm_context_info[PLATFORM_CORE_COUNT];
-
-/* The per_cpu_ptr_cache_t space allocation */
-static per_cpu_ptr_cache_t per_cpu_ptr_cache_space[PLATFORM_CORE_COUNT];
/*******************************************************************************
* Context management library initialisation routine. This library is used by
@@ -82,25 +70,9 @@
******************************************************************************/
void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state)
{
- uint32_t linear_id = platform_get_core_pos(mpidr);
-
- assert(security_state <= NON_SECURE);
-
- return cm_context_info[linear_id].ptr[security_state];
-}
-
-/*******************************************************************************
- * This function returns a pointer to the most recent 'cpu_context' structure
- * for the calling CPU that was set as the context for the specified security
- * state. NULL is returned if no such structure has been specified.
- ******************************************************************************/
-void *cm_get_context(uint32_t security_state)
-{
- uint32_t linear_id = platform_get_core_pos(read_mpidr());
-
assert(security_state <= NON_SECURE);
- return cm_context_info[linear_id].ptr[security_state];
+ return get_cpu_data_by_mpidr(mpidr, cpu_context[security_state]);
}
/*******************************************************************************
@@ -109,23 +81,12 @@
******************************************************************************/
void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_state)
{
- uint32_t linear_id = platform_get_core_pos(mpidr);
-
assert(security_state <= NON_SECURE);
- cm_context_info[linear_id].ptr[security_state] = context;
+ set_cpu_data_by_mpidr(mpidr, cpu_context[security_state], context);
}
/*******************************************************************************
- * This function sets the pointer to the current 'cpu_context' structure for the
- * specified security state for the calling CPU
- ******************************************************************************/
-void cm_set_context(void *context, uint32_t security_state)
-{
- cm_set_context_by_mpidr(read_mpidr(), context, security_state);
-}
-
-/*******************************************************************************
* The next four functions are used by runtime services to save and restore EL3
* and EL1 contexts on the 'cpu_context' structure for the specified security
* state.
@@ -295,34 +256,3 @@
"msr spsel, #0\n"
: : "r" (ctx));
}
-
-/************************************************************************
- * The following function is used to populate the per cpu pointer cache.
- * The pointer will be stored in the tpidr_el3 register.
- *************************************************************************/
-void cm_init_pcpu_ptr_cache()
-{
- unsigned long mpidr = read_mpidr();
- uint32_t linear_id = platform_get_core_pos(mpidr);
- per_cpu_ptr_cache_t *pcpu_ptr_cache;
-
- pcpu_ptr_cache = &per_cpu_ptr_cache_space[linear_id];
- assert(pcpu_ptr_cache);
-#if CRASH_REPORTING
- pcpu_ptr_cache->crash_stack = get_crash_stack(mpidr);
-#endif
-
- cm_set_pcpu_ptr_cache(pcpu_ptr_cache);
-}
-
-
-void cm_set_pcpu_ptr_cache(const void *pcpu_ptr)
-{
- write_tpidr_el3((unsigned long)pcpu_ptr);
-}
-
-void *cm_get_pcpu_ptr_cache(void)
-{
- return (void *)read_tpidr_el3();
-}
-
diff --git a/bl31/cpu_data_array.c b/bl31/cpu_data_array.c
new file mode 100644
index 0000000..b0042a1
--- /dev/null
+++ b/bl31/cpu_data_array.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <cassert.h>
+#include <cpu_data.h>
+#include <platform_def.h>
+
+/* verify assembler offsets match data structures */
+CASSERT(CPU_DATA_CRASH_STACK_OFFSET == __builtin_offsetof
+ (cpu_data_t, crash_stack),
+ assert_cpu_data_crash_stack_offset_mismatch);
+
+CASSERT((1 << CPU_DATA_LOG2SIZE) == sizeof(cpu_data_t),
+ assert_cpu_data_log2size_mismatch);
+
+/* The per_cpu_ptr_cache_t space allocation */
+cpu_data_t percpu_data[PLATFORM_CORE_COUNT];
diff --git a/drivers/arm/pl011/pl011_console.c b/drivers/arm/pl011/pl011_console.c
index a26c00e..81897ca 100644
--- a/drivers/arm/pl011/pl011_console.c
+++ b/drivers/arm/pl011/pl011_console.c
@@ -71,7 +71,12 @@
int console_putc(int c)
{
- assert(uart_base);
+ /* If the console has not been initialized then return an error
+ * code. Asserting here would result in recursion and stack
+ * exhaustion
+ */
+ if (!uart_base)
+ return -1;
if (c == '\n') {
WAIT_UNTIL_UART_FREE(uart_base);
diff --git a/include/bl31/context.h b/include/bl31/context.h
index 16cc744..c0230b8 100644
--- a/include/bl31/context.h
+++ b/include/bl31/context.h
@@ -185,14 +185,10 @@
#define CTX_FP_FPCR 0x208
#define CTX_FPREGS_END 0x210
-/******************************************************************************
- * Offsets for the per cpu cache implementation
- ******************************************************************************/
-#define PTR_CACHE_CRASH_STACK_OFFSET 0x0
-
#ifndef __ASSEMBLY__
#include <cassert.h>
+#include <platform_def.h> /* for CACHE_WRITEBACK_GRANULE */
#include <stdint.h>
/*
@@ -330,17 +326,6 @@
void fpregs_context_save(fp_regs_t *regs);
void fpregs_context_restore(fp_regs_t *regs);
-
-/* Per-CPU pointer cache of recently used pointers and also the crash stack
- * TODO: Add other commonly used variables to this (tf_issues#90)
- */
-typedef struct per_cpu_ptr_cache {
- uint64_t crash_stack;
-} per_cpu_ptr_cache_t;
-
-CASSERT(PTR_CACHE_CRASH_STACK_OFFSET == __builtin_offsetof\
- (per_cpu_ptr_cache_t, crash_stack), \
- assert_per_cpu_ptr_cache_crash_stack_offset_mismatch);
#undef CTX_SYSREG_ALL
#undef CTX_FP_ALL
diff --git a/include/bl31/context_mgmt.h b/include/bl31/context_mgmt.h
index 86bbc58..ade2fa1 100644
--- a/include/bl31/context_mgmt.h
+++ b/include/bl31/context_mgmt.h
@@ -31,6 +31,7 @@
#ifndef __CM_H__
#define __CM_H__
+#include <cpu_data.h>
#include <stdint.h>
/*******************************************************************************
@@ -38,11 +39,11 @@
******************************************************************************/
void cm_init(void);
void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state);
-void *cm_get_context(uint32_t security_state);
+static inline void *cm_get_context(uint32_t security_state);
void cm_set_context_by_mpidr(uint64_t mpidr,
void *context,
uint32_t security_state);
-void cm_set_context(void *context, uint32_t security_state);
+static inline void cm_set_context(void *context, uint32_t security_state);
void cm_el3_sysregs_context_save(uint32_t security_state);
void cm_el3_sysregs_context_restore(uint32_t security_state);
void cm_el1_sysregs_context_save(uint32_t security_state);
@@ -54,8 +55,32 @@
uint32_t bit_pos,
uint32_t value);
void cm_set_next_eret_context(uint32_t security_state);
-void cm_init_pcpu_ptr_cache();
-void cm_set_pcpu_ptr_cache(const void *pcpu_ptr);
-void *cm_get_pcpu_ptr_cache(void);
uint32_t cm_get_scr_el3(uint32_t security_state);
+
+/* Inline definitions */
+
+/*******************************************************************************
+ * This function returns a pointer to the most recent 'cpu_context' structure
+ * for the calling CPU that was set as the context for the specified security
+ * state. NULL is returned if no such structure has been specified.
+ ******************************************************************************/
+void *cm_get_context(uint32_t security_state)
+{
+ assert(security_state <= NON_SECURE);
+
+ return get_cpu_data(cpu_context[security_state]);
+}
+
+/*******************************************************************************
+ * This function sets the pointer to the current 'cpu_context' structure for the
+ * specified security state for the calling CPU
+ ******************************************************************************/
+void cm_set_context(void *context, uint32_t security_state)
+{
+ assert(security_state <= NON_SECURE);
+
+ set_cpu_data(cpu_context[security_state], context);
+}
+
+
#endif /* __CM_H__ */
diff --git a/include/bl31/cpu_data.h b/include/bl31/cpu_data.h
new file mode 100644
index 0000000..5f45f14
--- /dev/null
+++ b/include/bl31/cpu_data.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CPU_DATA_H__
+#define __CPU_DATA_H__
+
+/* Offsets for the cpu_data structure */
+#define CPU_DATA_CRASH_STACK_OFFSET 0x10
+#define CPU_DATA_LOG2SIZE 6
+
+#ifndef __ASSEMBLY__
+
+#include <arch_helpers.h>
+#include <platform_def.h>
+#include <stdint.h>
+
+/*******************************************************************************
+ * Function & variable prototypes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * Cache of frequently used per-cpu data:
+ * Pointers to non-secure and secure security state contexts
+ * Address of the crash stack
+ * It is aligned to the cache line boundary to allow efficient concurrent
+ * manipulation of these pointers on different cpus
+ *
+ * TODO: Add other commonly used variables to this (tf_issues#90)
+ *
+ * The data structure and the _cpu_data accessors should not be used directly
+ * by components that have per-cpu members. The member access macros should be
+ * used for this.
+ ******************************************************************************/
+
+typedef struct cpu_data {
+ void *cpu_context[2];
+ uint64_t crash_stack;
+} __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;
+
+struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
+struct cpu_data *_cpu_data_by_mpidr(uint64_t mpidr);
+
+/* Return the cpu_data structure for the current CPU. */
+static inline struct cpu_data *_cpu_data(void)
+{
+ return (cpu_data_t *)read_tpidr_el3();
+}
+
+
+/**************************************************************************
+ * APIs for initialising and accessing per-cpu data
+ *************************************************************************/
+
+void init_cpu_data_ptr(void);
+
+#define get_cpu_data(_m) _cpu_data()->_m
+#define set_cpu_data(_m, _v) _cpu_data()->_m = _v
+#define get_cpu_data_by_index(_ix, _m) _cpu_data_by_index(_ix)->_m
+#define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = _v
+#define get_cpu_data_by_mpidr(_id, _m) _cpu_data_by_mpidr(_id)->_m
+#define set_cpu_data_by_mpidr(_id, _m, _v) _cpu_data_by_mpidr(_id)->_m = _v
+
+
+#endif /* __ASSEMBLY__ */
+#endif /* __CPU_DATA_H__ */
diff --git a/include/bl31/runtime_svc.h b/include/bl31/runtime_svc.h
index d7d88d4..f3543d4 100644
--- a/include/bl31/runtime_svc.h
+++ b/include/bl31/runtime_svc.h
@@ -267,7 +267,7 @@
void runtime_svc_init();
extern uint64_t __RT_SVC_DESCS_START__;
extern uint64_t __RT_SVC_DESCS_END__;
-uint64_t get_crash_stack(uint64_t mpidr);
-void runtime_exceptions(void);
+void init_crash_reporting(void);
+
#endif /*__ASSEMBLY__*/
#endif /* __RUNTIME_SVC_H__ */
diff --git a/plat/fvp/fvp_def.h b/plat/fvp/fvp_def.h
index 04ba611..59dcc90 100644
--- a/plat/fvp/fvp_def.h
+++ b/plat/fvp/fvp_def.h
@@ -137,7 +137,7 @@
#define SYS_LED_EC_MASK 0x1f
/* V2M sysid register bits */
-#define SYS_ID_REV_SHIFT 27
+#define SYS_ID_REV_SHIFT 28
#define SYS_ID_HBI_SHIFT 16
#define SYS_ID_BLD_SHIFT 12
#define SYS_ID_ARCH_SHIFT 8
diff --git a/plat/fvp/fvp_pm.c b/plat/fvp/fvp_pm.c
index d702643..03f06e7 100644
--- a/plat/fvp/fvp_pm.c
+++ b/plat/fvp/fvp_pm.c
@@ -290,7 +290,7 @@
int rc = PSCI_E_SUCCESS;
unsigned long linear_id, cpu_setup;
mailbox_t *fvp_mboxes;
- unsigned int gicd_base, gicc_base, reg_val, ectlr;
+ unsigned int gicd_base, gicc_base, ectlr;
switch (afflvl) {
@@ -354,17 +354,6 @@
/* TODO: This setup is needed only after a cold boot */
gic_pcpu_distif_setup(gicd_base);
- /* Allow access to the System counter timer module */
- reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT);
- reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT);
- reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT);
- mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(0), reg_val);
- mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(1), reg_val);
-
- reg_val = (1 << CNTNSAR_NS_SHIFT(0)) |
- (1 << CNTNSAR_NS_SHIFT(1));
- mmio_write_32(SYS_TIMCTL_BASE + CNTNSAR, reg_val);
-
break;
default:
diff --git a/services/std_svc/psci/psci_afflvl_on.c b/services/std_svc/psci/psci_afflvl_on.c
index 443e6af..e4d8f1f 100644
--- a/services/std_svc/psci/psci_afflvl_on.c
+++ b/services/std_svc/psci/psci_afflvl_on.c
@@ -373,17 +373,6 @@
bl31_arch_setup();
/*
- * Use the more complex exception vectors to enable SPD
- * initialisation. SP_EL3 should point to a 'cpu_context'
- * structure. The calling cpu should have set the
- * context already
- */
- assert(cm_get_context(NON_SECURE));
- cm_set_next_eret_context(NON_SECURE);
- cm_init_pcpu_ptr_cache();
- write_vbar_el3((uint64_t) runtime_exceptions);
-
- /*
* Call the cpu on finish handler registered by the Secure Payload
* Dispatcher to let it do any bookeeping. If the handler encounters an
* error, it's expected to assert within
diff --git a/services/std_svc/psci/psci_afflvl_suspend.c b/services/std_svc/psci/psci_afflvl_suspend.c
index a986e5c..9934310 100644
--- a/services/std_svc/psci/psci_afflvl_suspend.c
+++ b/services/std_svc/psci/psci_afflvl_suspend.c
@@ -491,16 +491,6 @@
rc = PSCI_E_SUCCESS;
/*
- * Use the more complex exception vectors to enable SPD
- * initialisation. SP_EL3 should point to a 'cpu_context'
- * structure. The non-secure context should have been
- * set on this cpu prior to suspension.
- */
- cm_set_next_eret_context(NON_SECURE);
- cm_init_pcpu_ptr_cache();
- write_vbar_el3((uint64_t) runtime_exceptions);
-
- /*
* Call the cpu suspend finish handler registered by the Secure Payload
* Dispatcher to let it do any bookeeping. If the handler encounters an
* error, it's expected to assert within
diff --git a/services/std_svc/psci/psci_entry.S b/services/std_svc/psci/psci_entry.S
index bc8d900..5628d79 100644
--- a/services/std_svc/psci/psci_entry.S
+++ b/services/std_svc/psci/psci_entry.S
@@ -61,12 +61,16 @@
adr x22, psci_afflvl_power_on_finish
/* ---------------------------------------------
- * Exceptions should not occur at this point.
- * Set VBAR in order to handle and report any
- * that do occur
+ * Initialise the pcpu cache pointer for the CPU
* ---------------------------------------------
*/
- adr x0, early_exceptions
+ bl init_cpu_data_ptr
+
+ /* ---------------------------------------------
+ * Set the exception vectors
+ * ---------------------------------------------
+ */
+ adr x0, runtime_exceptions
msr vbar_el3, x0
isb
diff --git a/services/std_svc/psci/psci_main.c b/services/std_svc/psci/psci_main.c
index c0866fb..2d7b018 100644
--- a/services/std_svc/psci/psci_main.c
+++ b/services/std_svc/psci/psci_main.c
@@ -221,50 +221,68 @@
void *handle,
uint64_t flags)
{
- uint64_t rc;
+ if (is_caller_secure(flags))
+ SMC_RET1(handle, SMC_UNK);
- switch (smc_fid) {
- case PSCI_VERSION:
- rc = psci_version();
- break;
+ if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
+ /* 32-bit PSCI function, clear top parameter bits */
- case PSCI_CPU_OFF:
- rc = __psci_cpu_off();
- break;
+ x1 = (uint32_t)x1;
+ x2 = (uint32_t)x2;
+ x3 = (uint32_t)x3;
- case PSCI_CPU_SUSPEND_AARCH64:
- case PSCI_CPU_SUSPEND_AARCH32:
- rc = __psci_cpu_suspend(x1, x2, x3);
- break;
+ switch (smc_fid) {
+ case PSCI_VERSION:
+ SMC_RET1(handle, psci_version());
- case PSCI_CPU_ON_AARCH64:
- case PSCI_CPU_ON_AARCH32:
- rc = psci_cpu_on(x1, x2, x3);
- break;
+ case PSCI_CPU_OFF:
+ SMC_RET1(handle, __psci_cpu_off());
- case PSCI_AFFINITY_INFO_AARCH32:
- case PSCI_AFFINITY_INFO_AARCH64:
- rc = psci_affinity_info(x1, x2);
- break;
+ case PSCI_CPU_SUSPEND_AARCH32:
+ SMC_RET1(handle, __psci_cpu_suspend(x1, x2, x3));
- case PSCI_MIG_AARCH32:
- case PSCI_MIG_AARCH64:
- rc = psci_migrate(x1);
- break;
+ case PSCI_CPU_ON_AARCH32:
+ SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
- case PSCI_MIG_INFO_TYPE:
- rc = psci_migrate_info_type();
- break;
+ case PSCI_AFFINITY_INFO_AARCH32:
+ SMC_RET1(handle, psci_affinity_info(x1, x2));
- case PSCI_MIG_INFO_UP_CPU_AARCH32:
- case PSCI_MIG_INFO_UP_CPU_AARCH64:
- rc = psci_migrate_info_up_cpu();
- break;
+ case PSCI_MIG_AARCH32:
+ SMC_RET1(handle, psci_migrate(x1));
- default:
- rc = SMC_UNK;
- WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid);
+ case PSCI_MIG_INFO_TYPE:
+ SMC_RET1(handle, psci_migrate_info_type());
+
+ case PSCI_MIG_INFO_UP_CPU_AARCH32:
+ SMC_RET1(handle, psci_migrate_info_up_cpu());
+
+ default:
+ break;
+ }
+ } else {
+ /* 64-bit PSCI function */
+
+ switch (smc_fid) {
+ case PSCI_CPU_SUSPEND_AARCH64:
+ SMC_RET1(handle, __psci_cpu_suspend(x1, x2, x3));
+
+ case PSCI_CPU_ON_AARCH64:
+ SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
+
+ case PSCI_AFFINITY_INFO_AARCH64:
+ SMC_RET1(handle, psci_affinity_info(x1, x2));
+
+ case PSCI_MIG_AARCH64:
+ SMC_RET1(handle, psci_migrate(x1));
+
+ case PSCI_MIG_INFO_UP_CPU_AARCH64:
+ SMC_RET1(handle, psci_migrate_info_up_cpu());
+
+ default:
+ break;
+ }
}
- SMC_RET1(handle, rc);
+ WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid);
+ SMC_RET1(handle, SMC_UNK);
}