Simplify management of SCTLR_EL3 and SCTLR_EL1
This patch reworks the manner in which the M,A, C, SA, I, WXN & EE bits of
SCTLR_EL3 & SCTLR_EL1 are managed. The EE bit is cleared immediately after reset
in EL3. The I, A and SA bits are set next in EL3 and immediately upon entry in
S-EL1. These bits are no longer managed in the blX_arch_setup() functions. They
do not have to be saved and restored either. The M, WXN and optionally the C
bit are set in the enable_mmu_elX() function. This is done during both the warm
and cold boot paths.
Fixes ARM-software/tf-issues#226
Change-Id: Ie894d1a07b8697c116960d858cd138c50bc7a069
diff --git a/bl1/aarch64/bl1_arch_setup.c b/bl1/aarch64/bl1_arch_setup.c
index cf69ac7..eeaa24a 100644
--- a/bl1/aarch64/bl1_arch_setup.c
+++ b/bl1/aarch64/bl1_arch_setup.c
@@ -37,20 +37,11 @@
******************************************************************************/
void bl1_arch_setup(void)
{
- unsigned long tmp_reg = 0;
-
- /* Enable alignment checks */
- tmp_reg = read_sctlr_el3();
- tmp_reg |= (SCTLR_A_BIT | SCTLR_SA_BIT);
- write_sctlr_el3(tmp_reg);
- isb();
-
/*
* Set the next EL to be AArch64, route external abort and SError
* interrupts to EL3
*/
- tmp_reg = SCR_RES1_BITS | SCR_RW_BIT | SCR_EA_BIT;
- write_scr(tmp_reg);
+ write_scr_el3(SCR_RES1_BITS | SCR_RW_BIT | SCR_EA_BIT);
/*
* Enable SError and Debug exceptions
diff --git a/bl1/aarch64/bl1_entrypoint.S b/bl1/aarch64/bl1_entrypoint.S
index ac6d913..dd7d78f 100644
--- a/bl1/aarch64/bl1_entrypoint.S
+++ b/bl1/aarch64/bl1_entrypoint.S
@@ -44,7 +44,7 @@
func bl1_entrypoint
/* ---------------------------------------------
* Set the CPU endianness before doing anything
- * that might involve memory reads or writes
+ * that might involve memory reads or writes.
* ---------------------------------------------
*/
mrs x0, sctlr_el3
@@ -59,12 +59,14 @@
*/
bl cpu_reset_handler
- /* -------------------------------
- * Enable the instruction cache.
- * -------------------------------
+ /* ---------------------------------------------
+ * Enable the instruction cache, stack pointer
+ * and data access alignment checks
+ * ---------------------------------------------
*/
+ mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
mrs x0, sctlr_el3
- orr x0, x0, #SCTLR_I_BIT
+ orr x0, x0, x1
msr sctlr_el3, x0
isb
diff --git a/bl2/aarch64/bl2_entrypoint.S b/bl2/aarch64/bl2_entrypoint.S
index c615baf..6fcd040 100644
--- a/bl2/aarch64/bl2_entrypoint.S
+++ b/bl2/aarch64/bl2_entrypoint.S
@@ -65,11 +65,13 @@
msr vbar_el1, x0
/* ---------------------------------------------
- * Enable the instruction cache.
+ * Enable the instruction cache, stack pointer
+ * and data access alignment checks
* ---------------------------------------------
*/
+ mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
mrs x0, sctlr_el1
- orr x0, x0, #SCTLR_I_BIT
+ orr x0, x0, x1
msr sctlr_el1, x0
isb
diff --git a/bl31/aarch64/bl31_arch_setup.c b/bl31/aarch64/bl31_arch_setup.c
index e0382b3..f67881e 100644
--- a/bl31/aarch64/bl31_arch_setup.c
+++ b/bl31/aarch64/bl31_arch_setup.c
@@ -42,21 +42,12 @@
******************************************************************************/
void bl31_arch_setup(void)
{
- unsigned long tmp_reg = 0;
- uint64_t counter_freq;
-
- /* Enable alignment checks */
- tmp_reg = read_sctlr_el3();
- tmp_reg |= (SCTLR_A_BIT | SCTLR_SA_BIT);
- write_sctlr_el3(tmp_reg);
-
/*
* Route external abort and SError interrupts to EL3
* other SCR bits will be configured before exiting to a lower exception
* level
*/
- tmp_reg = SCR_RES1_BITS | SCR_EA_BIT;
- write_scr(tmp_reg);
+ write_scr_el3(SCR_RES1_BITS | SCR_EA_BIT);
/*
* Enable SError and Debug exceptions
@@ -65,6 +56,5 @@
enable_debug_exceptions();
/* Program the counter frequency */
- counter_freq = plat_get_syscnt_freq();
- write_cntfrq_el0(counter_freq);
+ write_cntfrq_el0(plat_get_syscnt_freq());
}
diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S
index 1023983..69d2243 100644
--- a/bl31/aarch64/bl31_entrypoint.S
+++ b/bl31/aarch64/bl31_entrypoint.S
@@ -52,6 +52,15 @@
mov x20, x0
mov x21, x1
#else
+ /* ---------------------------------------------
+ * Set the CPU endianness before doing anything
+ * that might involve memory reads or writes.
+ * ---------------------------------------------
+ */
+ mrs x0, sctlr_el3
+ bic x0, x0, #SCTLR_EE_BIT
+ msr sctlr_el3, x0
+ isb
/* -----------------------------------------------------
* Perform any processor specific actions upon reset
@@ -61,14 +70,15 @@
*/
bl cpu_reset_handler
#endif
-
/* ---------------------------------------------
- * Enable the instruction cache.
+ * Enable the instruction cache, stack pointer
+ * and data access alignment checks
* ---------------------------------------------
*/
- mrs x1, sctlr_el3
- orr x1, x1, #SCTLR_I_BIT
- msr sctlr_el3, x1
+ mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+ mrs x0, sctlr_el3
+ orr x0, x0, x1
+ msr sctlr_el3, x0
isb
/* ---------------------------------------------
diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S
index 7a1797e..91b6128 100644
--- a/bl32/tsp/aarch64/tsp_entrypoint.S
+++ b/bl32/tsp/aarch64/tsp_entrypoint.S
@@ -89,11 +89,13 @@
msr vbar_el1, x0
/* ---------------------------------------------
- * Enable the instruction cache.
+ * Enable the instruction cache, stack pointer
+ * and data access alignment checks
* ---------------------------------------------
*/
+ mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
mrs x0, sctlr_el1
- orr x0, x0, #SCTLR_I_BIT
+ orr x0, x0, x1
msr sctlr_el1, x0
isb
@@ -196,11 +198,13 @@
msr vbar_el1, x0
/* ---------------------------------------------
- * Enable the instruction cache.
+ * Enable the instruction cache, stack pointer
+ * and data access alignment checks
* ---------------------------------------------
*/
+ mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
mrs x0, sctlr_el1
- orr x0, x0, #SCTLR_I_BIT
+ orr x0, x0, x1
msr sctlr_el1, x0
isb
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
index ff91efc..0427208 100644
--- a/include/lib/aarch64/arch.h
+++ b/include/lib/aarch64/arch.h
@@ -129,11 +129,8 @@
#define SCTLR_A_BIT (1 << 1)
#define SCTLR_C_BIT (1 << 2)
#define SCTLR_SA_BIT (1 << 3)
-#define SCTLR_B_BIT (1 << 7)
-#define SCTLR_Z_BIT (1 << 11)
#define SCTLR_I_BIT (1 << 12)
#define SCTLR_WXN_BIT (1 << 19)
-#define SCTLR_EXCEPTION_BITS (0x3 << 6)
#define SCTLR_EE_BIT (1 << 25)
/* CPUECTLR definitions */
diff --git a/lib/aarch64/xlat_tables.c b/lib/aarch64/xlat_tables.c
index d494112..ddc9ba8 100644
--- a/lib/aarch64/xlat_tables.c
+++ b/lib/aarch64/xlat_tables.c
@@ -329,8 +329,7 @@
isb(); \
\
sctlr = read_sctlr_el##_el(); \
- sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT; \
- sctlr |= SCTLR_A_BIT; \
+ sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
\
if (flags & DISABLE_DCACHE) \
sctlr &= ~SCTLR_C_BIT; \
diff --git a/services/std_svc/psci/psci_entry.S b/services/std_svc/psci/psci_entry.S
index 192b638..e9ad130 100644
--- a/services/std_svc/psci/psci_entry.S
+++ b/services/std_svc/psci/psci_entry.S
@@ -54,6 +54,25 @@
adr x23, psci_afflvl_suspend_finishers
psci_aff_common_finish_entry:
+#if !RESET_TO_BL31
+ /* ---------------------------------------------
+ * Enable the instruction cache, stack pointer
+ * and data access alignment checks. Also, set
+ * the EL3 exception endianess to little-endian.
+ * It can be assumed that BL3-1 entrypoint code
+ * will do this when RESET_TO_BL31 is set. The
+ * same assumption cannot be made when another
+ * boot loader executes before BL3-1 in the warm
+ * boot path e.g. BL1.
+ * ---------------------------------------------
+ */
+ mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+ mrs x0, sctlr_el3
+ orr x0, x0, x1
+ msr sctlr_el3, x0
+ isb
+#endif
+
/* ---------------------------------------------
* Initialise the pcpu cache pointer for the CPU
* ---------------------------------------------