Merge pull request #1404 from soby-mathew/sm/bl_layout_change
ARM platforms: Change memory layout and update documentation
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 0e47ddf..a6c0a9a 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -61,8 +61,8 @@
endif
ifeq (${WORKAROUND_CVE_2017_5715},1)
-BL31_SOURCES += lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S \
- lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
+BL31_SOURCES += lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S \
+ lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
endif
BL31_LINKERFILE := bl31/bl31.ld.S
diff --git a/bl32/sp_min/sp_min.mk b/bl32/sp_min/sp_min.mk
index 193b1d5..6233299 100644
--- a/bl32/sp_min/sp_min.mk
+++ b/bl32/sp_min/sp_min.mk
@@ -29,8 +29,8 @@
endif
ifeq (${WORKAROUND_CVE_2017_5715},1)
-BL32_SOURCES += bl32/sp_min/workaround_cve_2017_5715_bpiall.S \
- bl32/sp_min/workaround_cve_2017_5715_icache_inv.S
+BL32_SOURCES += bl32/sp_min/wa_cve_2017_5715_bpiall.S \
+ bl32/sp_min/wa_cve_2017_5715_icache_inv.S
endif
BL32_LINKERFILE := bl32/sp_min/sp_min.ld.S
diff --git a/bl32/sp_min/workaround_cve_2017_5715_bpiall.S b/bl32/sp_min/wa_cve_2017_5715_bpiall.S
similarity index 94%
rename from bl32/sp_min/workaround_cve_2017_5715_bpiall.S
rename to bl32/sp_min/wa_cve_2017_5715_bpiall.S
index 5387cef..385f3d4 100644
--- a/bl32/sp_min/workaround_cve_2017_5715_bpiall.S
+++ b/bl32/sp_min/wa_cve_2017_5715_bpiall.S
@@ -6,9 +6,9 @@
#include <asm_macros.S>
- .globl workaround_bpiall_runtime_exceptions
+ .globl wa_cve_2017_5715_bpiall_vbar
-vector_base workaround_bpiall_runtime_exceptions
+vector_base wa_cve_2017_5715_bpiall_vbar
/* We encode the exception entry in the bottom 3 bits of SP */
add sp, sp, #1 /* Reset: 0b111 */
add sp, sp, #1 /* Undef: 0b110 */
diff --git a/bl32/sp_min/workaround_cve_2017_5715_icache_inv.S b/bl32/sp_min/wa_cve_2017_5715_icache_inv.S
similarity index 94%
rename from bl32/sp_min/workaround_cve_2017_5715_icache_inv.S
rename to bl32/sp_min/wa_cve_2017_5715_icache_inv.S
index 9102b02..d0a4625 100644
--- a/bl32/sp_min/workaround_cve_2017_5715_icache_inv.S
+++ b/bl32/sp_min/wa_cve_2017_5715_icache_inv.S
@@ -6,9 +6,9 @@
#include <asm_macros.S>
- .globl workaround_icache_inv_runtime_exceptions
+ .globl wa_cve_2017_5715_icache_inv_vbar
-vector_base workaround_icache_inv_runtime_exceptions
+vector_base wa_cve_2017_5715_icache_inv_vbar
/* We encode the exception entry in the bottom 3 bits of SP */
add sp, sp, #1 /* Reset: 0b111 */
add sp, sp, #1 /* Undef: 0b110 */
diff --git a/docs/cpu-specific-build-macros.rst b/docs/cpu-specific-build-macros.rst
index 65f6adb..c11f640 100644
--- a/docs/cpu-specific-build-macros.rst
+++ b/docs/cpu-specific-build-macros.rst
@@ -24,6 +24,17 @@
with the recommendation in the spec regarding workaround discovery.
Defaults to 1.
+- ``WORKAROUND_CVE_2018_3639``: Enables the security workaround for
+ `CVE-2018-3639`_. Defaults to 1. The TF-A project recommends to keep
+ the default value of 1 even on platforms that are unaffected by
+ CVE-2018-3639, in order to comply with the recommendation in the spec
+ regarding workaround discovery.
+
+- ``DYNAMIC_WORKAROUND_CVE_2018_3639``: Enables dynamic mitigation for
+ `CVE-2018-3639`_. This build option should be set to 1 if the target
+ platform contains at least 1 CPU that requires dynamic mitigation.
+ Defaults to 0.
+
CPU Errata Workarounds
----------------------
diff --git a/include/lib/cpus/aarch32/cortex_a57.h b/include/lib/cpus/aarch32/cortex_a57.h
index 3fac9c7..18cabe1 100644
--- a/include/lib/cpus/aarch32/cortex_a57.h
+++ b/include/lib/cpus/aarch32/cortex_a57.h
@@ -44,6 +44,7 @@
#define CORTEX_A57_CPUACTLR p15, 0, c15
#define CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_DMB (ULL(1) << 59)
+#define CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_STORE (ULL(1) << 55)
#define CORTEX_A57_CPUACTLR_GRE_NGRE_AS_NGNRE (ULL(1) << 54)
#define CORTEX_A57_CPUACTLR_DIS_OVERREAD (ULL(1) << 52)
#define CORTEX_A57_CPUACTLR_NO_ALLOC_WBWA (ULL(1) << 49)
diff --git a/include/lib/cpus/aarch32/cortex_a72.h b/include/lib/cpus/aarch32/cortex_a72.h
index f7da1f0..0331ace 100644
--- a/include/lib/cpus/aarch32/cortex_a72.h
+++ b/include/lib/cpus/aarch32/cortex_a72.h
@@ -32,6 +32,7 @@
#define CORTEX_A72_CPUACTLR p15, 0, c15
#define CORTEX_A72_CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH (ULL(1) << 56)
+#define CORTEX_A72_CPUACTLR_DIS_LOAD_PASS_STORE (ULL(1) << 55)
#define CORTEX_A72_CPUACTLR_NO_ALLOC_WBWA (ULL(1) << 49)
#define CORTEX_A72_CPUACTLR_DCC_AS_DCCI (ULL(1) << 44)
#define CORTEX_A72_CPUACTLR_DIS_INSTR_PREFETCH (ULL(1) << 32)
diff --git a/include/lib/cpus/aarch64/cortex_a57.h b/include/lib/cpus/aarch64/cortex_a57.h
index 6c45c06..83ec934 100644
--- a/include/lib/cpus/aarch64/cortex_a57.h
+++ b/include/lib/cpus/aarch64/cortex_a57.h
@@ -44,6 +44,7 @@
#define CORTEX_A57_CPUACTLR_EL1 S3_1_C15_C2_0
#define CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_DMB (ULL(1) << 59)
+#define CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_STORE (ULL(1) << 55)
#define CORTEX_A57_CPUACTLR_EL1_GRE_NGRE_AS_NGNRE (ULL(1) << 54)
#define CORTEX_A57_CPUACTLR_EL1_DIS_OVERREAD (ULL(1) << 52)
#define CORTEX_A57_CPUACTLR_EL1_NO_ALLOC_WBWA (ULL(1) << 49)
diff --git a/include/lib/cpus/aarch64/cortex_a72.h b/include/lib/cpus/aarch64/cortex_a72.h
index 6fbb707..9f18470 100644
--- a/include/lib/cpus/aarch64/cortex_a72.h
+++ b/include/lib/cpus/aarch64/cortex_a72.h
@@ -32,6 +32,7 @@
#define CORTEX_A72_CPUACTLR_EL1 S3_1_C15_C2_0
#define CORTEX_A72_CPUACTLR_EL1_DISABLE_L1_DCACHE_HW_PFTCH (ULL(1) << 56)
+#define CORTEX_A72_CPUACTLR_EL1_DIS_LOAD_PASS_STORE (ULL(1) << 55)
#define CORTEX_A72_CPUACTLR_EL1_NO_ALLOC_WBWA (ULL(1) << 49)
#define CORTEX_A72_CPUACTLR_EL1_DCC_AS_DCCI (ULL(1) << 44)
#define CORTEX_A72_CPUACTLR_EL1_DIS_INSTR_PREFETCH (ULL(1) << 32)
diff --git a/include/lib/cpus/aarch64/cortex_a73.h b/include/lib/cpus/aarch64/cortex_a73.h
index faff5fe..4db0cae 100644
--- a/include/lib/cpus/aarch64/cortex_a73.h
+++ b/include/lib/cpus/aarch64/cortex_a73.h
@@ -22,4 +22,11 @@
******************************************************************************/
#define CORTEX_A73_L2MERRSR_EL1 S3_1_C15_C2_3 /* Instruction def. */
+/*******************************************************************************
+ * CPU implementation defined register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A73_IMP_DEF_REG1 S3_0_C15_C0_0
+
+#define CORTEX_A73_IMP_DEF_REG1_DISABLE_LOAD_PASS_STORE (1 << 3)
+
#endif /* __CORTEX_A73_H__ */
diff --git a/include/lib/cpus/aarch64/cortex_a75.h b/include/lib/cpus/aarch64/cortex_a75.h
index 20f0251..493c7d4 100644
--- a/include/lib/cpus/aarch64/cortex_a75.h
+++ b/include/lib/cpus/aarch64/cortex_a75.h
@@ -16,6 +16,13 @@
#define CORTEX_A75_CPUPWRCTLR_EL1 S3_0_C15_C2_7
#define CORTEX_A75_CPUECTLR_EL1 S3_0_C15_C1_4
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A75_CPUACTLR_EL1 S3_0_C15_C1_0
+
+#define CORTEX_A75_CPUACTLR_EL1_DISABLE_LOAD_PASS_STORE (1 << 35)
+
/* Definitions of register field mask in CORTEX_A75_CPUPWRCTLR_EL1 */
#define CORTEX_A75_CORE_PWRDN_EN_MASK 0x1
diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S
index bfe2449..cd8f3e8 100644
--- a/include/lib/cpus/aarch64/cpu_macros.S
+++ b/include/lib/cpus/aarch64/cpu_macros.S
@@ -18,6 +18,9 @@
/* Special constant to specify that CPU has no reset function */
#define CPU_NO_RESET_FUNC 0
+#define CPU_NO_EXTRA1_FUNC 0
+#define CPU_NO_EXTRA2_FUNC 0
+
/* Word size for 64-bit CPUs */
#define CPU_WORD_SIZE 8
@@ -48,6 +51,8 @@
#endif
CPU_EXTRA1_FUNC:
.space 8
+CPU_EXTRA2_FUNC:
+ .space 8
#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
CPU_PWR_DWN_OPS: /* cpu_ops power down functions */
.space (8 * CPU_MAX_PWR_DWN_OPS)
@@ -119,6 +124,10 @@
* This is a placeholder for future per CPU operations. Currently,
* some CPUs use this entry to set a test function to determine if
* the workaround for CVE-2017-5715 needs to be applied or not.
+ * _extra2:
+ * This is a placeholder for future per CPU operations. Currently
+ * some CPUs use this entry to set a function to disable the
+ * workaround for CVE-2018-3639.
* _power_down_ops:
* Comma-separated list of functions to perform power-down
* operatios on the CPU. At least one, and up to
@@ -129,7 +138,7 @@
* used to handle power down at subsequent levels
*/
.macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
- _extra1:req, _power_down_ops:vararg
+ _extra1:req, _extra2:req, _power_down_ops:vararg
.section cpu_ops, "a"
.align 3
.type cpu_ops_\_name, %object
@@ -138,6 +147,7 @@
.quad \_resetfunc
#endif
.quad \_extra1
+ .quad \_extra2
#ifdef IMAGE_BL31
1:
/* Insert list of functions */
@@ -196,14 +206,15 @@
.macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
_power_down_ops:vararg
- declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, \
+ declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, \
\_power_down_ops
.endm
- .macro declare_cpu_ops_workaround_cve_2017_5715 _name:req, _midr:req, \
- _resetfunc:req, _extra1:req, _power_down_ops:vararg
+ .macro declare_cpu_ops_wa _name:req, _midr:req, \
+ _resetfunc:req, _extra1:req, _extra2:req, \
+ _power_down_ops:vararg
declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
- \_extra1, \_power_down_ops
+ \_extra1, \_extra2, \_power_down_ops
.endm
#if REPORT_ERRATA
diff --git a/include/lib/cpus/wa_cve_2017_5715.h b/include/lib/cpus/wa_cve_2017_5715.h
new file mode 100644
index 0000000..0a65a56
--- /dev/null
+++ b/include/lib/cpus/wa_cve_2017_5715.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __WA_CVE_2017_5715_H__
+#define __WA_CVE_2017_5715_H__
+
+int check_wa_cve_2017_5715(void);
+
+#endif /* __WA_CVE_2017_5715_H__ */
diff --git a/include/lib/cpus/wa_cve_2018_3639.h b/include/lib/cpus/wa_cve_2018_3639.h
new file mode 100644
index 0000000..36546f7
--- /dev/null
+++ b/include/lib/cpus/wa_cve_2018_3639.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __WA_CVE_2018_3639_H__
+#define __WA_CVE_2018_3639_H__
+
+void *wa_cve_2018_3639_get_disable_ptr(void);
+
+#endif /* __WA_CVE_2018_3639_H__ */
diff --git a/include/lib/cpus/workaround_cve_2017_5715.h b/include/lib/cpus/workaround_cve_2017_5715.h
deleted file mode 100644
index e837a67..0000000
--- a/include/lib/cpus/workaround_cve_2017_5715.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef __WORKAROUND_CVE_2017_5715_H__
-#define __WORKAROUND_CVE_2017_5715_H__
-
-int check_workaround_cve_2017_5715(void);
-
-#endif /* __WORKAROUND_CVE_2017_5715_H__ */
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
index cdd74a3..a4f3ea1 100644
--- a/include/lib/el3_runtime/aarch64/context.h
+++ b/include/lib/el3_runtime/aarch64/context.h
@@ -128,8 +128,8 @@
* Constants that allow assembler code to access members of and the 'fp_regs'
* structure at their correct offsets.
******************************************************************************/
-#if CTX_INCLUDE_FPREGS
#define CTX_FPREGS_OFFSET (CTX_SYSREGS_OFFSET + CTX_SYSREGS_END)
+#if CTX_INCLUDE_FPREGS
#define CTX_FP_Q0 U(0x0)
#define CTX_FP_Q1 U(0x10)
#define CTX_FP_Q2 U(0x20)
@@ -170,8 +170,14 @@
#else
#define CTX_FPREGS_END U(0x210) /* Align to the next 16 byte boundary */
#endif
+#else
+#define CTX_FPREGS_END U(0)
#endif
+#define CTX_CVE_2018_3639_OFFSET (CTX_FPREGS_OFFSET + CTX_FPREGS_END)
+#define CTX_CVE_2018_3639_DISABLE U(0)
+#define CTX_CVE_2018_3639_END U(0x10) /* Align to the next 16 byte boundary */
+
#ifndef __ASSEMBLY__
#include <cassert.h>
@@ -195,6 +201,7 @@
#define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT)
#endif
#define CTX_EL3STATE_ALL (CTX_EL3STATE_END >> DWORD_SHIFT)
+#define CTX_CVE_2018_3639_ALL (CTX_CVE_2018_3639_END >> DWORD_SHIFT)
/*
* AArch64 general purpose register context structure. Usually x0-x18,
@@ -227,6 +234,9 @@
*/
DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL);
+/* Function pointer used by CVE-2018-3639 dynamic mitigation */
+DEFINE_REG_STRUCT(cve_2018_3639, CTX_CVE_2018_3639_ALL);
+
/*
* Macros to access members of any of the above structures using their
* offsets
@@ -251,6 +261,7 @@
#if CTX_INCLUDE_FPREGS
fp_regs_t fpregs_ctx;
#endif
+ cve_2018_3639_t cve_2018_3639_ctx;
} cpu_context_t;
/* Macros to access members of the 'cpu_context_t' structure */
@@ -276,6 +287,8 @@
#endif
CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx), \
assert_core_context_el3state_offset_mismatch);
+CASSERT(CTX_CVE_2018_3639_OFFSET == __builtin_offsetof(cpu_context_t, cve_2018_3639_ctx), \
+ assert_core_context_cve_2018_3639_offset_mismatch);
/*
* Helper macro to set the general purpose registers that correspond to
diff --git a/include/services/arm_arch_svc.h b/include/services/arm_arch_svc.h
index 2961601..0d2f477 100644
--- a/include/services/arm_arch_svc.h
+++ b/include/services/arm_arch_svc.h
@@ -10,5 +10,8 @@
#define SMCCC_VERSION U(0x80000000)
#define SMCCC_ARCH_FEATURES U(0x80000001)
#define SMCCC_ARCH_WORKAROUND_1 U(0x80008000)
+#define SMCCC_ARCH_WORKAROUND_2 U(0x80007FFF)
+
+#define SMCCC_ARCH_NOT_REQUIRED -2
#endif /* __ARM_ARCH_SVC_H__ */
diff --git a/lib/cpus/aarch32/cortex_a57.S b/lib/cpus/aarch32/cortex_a57.S
index f446bff..dff86be 100644
--- a/lib/cpus/aarch32/cortex_a57.S
+++ b/lib/cpus/aarch32/cortex_a57.S
@@ -337,6 +337,15 @@
bx lr
endfunc check_errata_cve_2017_5715
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+ mov r0, #ERRATA_APPLIES
+#else
+ mov r0, #ERRATA_MISSING
+#endif
+ bx lr
+endfunc check_errata_cve_2018_3639
+
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A57.
* Shall clobber: r0-r6
@@ -392,6 +401,14 @@
bl errata_a57_859972_wa
#endif
+#if WORKAROUND_CVE_2018_3639
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_STORE
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ isb
+ dsb sy
+#endif
+
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
@@ -525,6 +542,7 @@
report_errata ERRATA_A57_833471, cortex_a57, 833471
report_errata ERRATA_A57_859972, cortex_a57, 859972
report_errata WORKAROUND_CVE_2017_5715, cortex_a57, cve_2017_5715
+ report_errata WORKAROUND_CVE_2018_3639, cortex_a57, cve_2018_3639
pop {r12, lr}
bx lr
diff --git a/lib/cpus/aarch32/cortex_a72.S b/lib/cpus/aarch32/cortex_a72.S
index 56e91f5..3bc3388 100644
--- a/lib/cpus/aarch32/cortex_a72.S
+++ b/lib/cpus/aarch32/cortex_a72.S
@@ -92,6 +92,15 @@
bx lr
endfunc check_errata_cve_2017_5715
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+ mov r0, #ERRATA_APPLIES
+#else
+ mov r0, #ERRATA_MISSING
+#endif
+ bx lr
+endfunc check_errata_cve_2018_3639
+
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A72.
* -------------------------------------------------
@@ -105,6 +114,15 @@
mov r0, r4
bl errata_a72_859971_wa
#endif
+
+#if WORKAROUND_CVE_2018_3639
+ ldcopr16 r0, r1, CORTEX_A72_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A72_CPUACTLR_DIS_LOAD_PASS_STORE
+ stcopr16 r0, r1, CORTEX_A72_CPUACTLR
+ isb
+ dsb sy
+#endif
+
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
@@ -241,6 +259,7 @@
*/
report_errata ERRATA_A72_859971, cortex_a72, 859971
report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715
+ report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639
pop {r12, lr}
bx lr
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index 4d072e1..07fadd1 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -337,6 +337,15 @@
ret
endfunc check_errata_cve_2017_5715
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2018_3639
+
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A57.
* Shall clobber: x0-x19
@@ -393,10 +402,18 @@
#endif
#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
- adr x0, workaround_mmu_runtime_exceptions
+ adr x0, wa_cve_2017_5715_mmu_vbar
msr vbar_el3, x0
#endif
+#if WORKAROUND_CVE_2018_3639
+ mrs x0, CORTEX_A57_CPUACTLR_EL1
+ orr x0, x0, #CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_STORE
+ msr CORTEX_A57_CPUACTLR_EL1, x0
+ isb
+ dsb sy
+#endif
+
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
@@ -528,6 +545,7 @@
report_errata ERRATA_A57_833471, cortex_a57, 833471
report_errata ERRATA_A57_859972, cortex_a57, 859972
report_errata WORKAROUND_CVE_2017_5715, cortex_a57, cve_2017_5715
+ report_errata WORKAROUND_CVE_2018_3639, cortex_a57, cve_2018_3639
ldp x8, x30, [sp], #16
ret
@@ -555,8 +573,9 @@
ret
endfunc cortex_a57_cpu_reg_dump
-declare_cpu_ops_workaround_cve_2017_5715 cortex_a57, CORTEX_A57_MIDR, \
+declare_cpu_ops_wa cortex_a57, CORTEX_A57_MIDR, \
cortex_a57_reset_func, \
check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
cortex_a57_core_pwr_dwn, \
cortex_a57_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
index 29fa77b..bb9381d 100644
--- a/lib/cpus/aarch64/cortex_a72.S
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -110,6 +110,15 @@
ret
endfunc check_errata_cve_2017_5715
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2018_3639
+
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A72.
* -------------------------------------------------
@@ -126,11 +135,19 @@
#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
cpu_check_csv2 x0, 1f
- adr x0, workaround_mmu_runtime_exceptions
+ adr x0, wa_cve_2017_5715_mmu_vbar
msr vbar_el3, x0
1:
#endif
+#if WORKAROUND_CVE_2018_3639
+ mrs x0, CORTEX_A72_CPUACTLR_EL1
+ orr x0, x0, #CORTEX_A72_CPUACTLR_EL1_DIS_LOAD_PASS_STORE
+ msr CORTEX_A72_CPUACTLR_EL1, x0
+ isb
+ dsb sy
+#endif
+
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
@@ -265,6 +282,7 @@
*/
report_errata ERRATA_A72_859971, cortex_a72, 859971
report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715
+ report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639
ldp x8, x30, [sp], #16
ret
@@ -292,8 +310,9 @@
ret
endfunc cortex_a72_cpu_reg_dump
-declare_cpu_ops_workaround_cve_2017_5715 cortex_a72, CORTEX_A72_MIDR, \
+declare_cpu_ops_wa cortex_a72, CORTEX_A72_MIDR, \
cortex_a72_reset_func, \
check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
cortex_a72_core_pwr_dwn, \
cortex_a72_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S
index 0a961ea..d595f12 100644
--- a/lib/cpus/aarch64/cortex_a73.S
+++ b/lib/cpus/aarch64/cortex_a73.S
@@ -38,11 +38,18 @@
func cortex_a73_reset_func
#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
cpu_check_csv2 x0, 1f
- adr x0, workaround_bpiall_vbar0_runtime_exceptions
+ adr x0, wa_cve_2017_5715_bpiall_vbar
msr vbar_el3, x0
1:
#endif
+#if WORKAROUND_CVE_2018_3639
+ mrs x0, CORTEX_A73_IMP_DEF_REG1
+ orr x0, x0, #CORTEX_A73_IMP_DEF_REG1_DISABLE_LOAD_PASS_STORE
+ msr CORTEX_A73_IMP_DEF_REG1, x0
+ isb
+#endif
+
/* ---------------------------------------------
* Enable the SMP bit.
* Clobbers : x0
@@ -129,6 +136,15 @@
ret
endfunc check_errata_cve_2017_5715
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2018_3639
+
#if REPORT_ERRATA
/*
* Errata printing function for Cortex A75. Must follow AAPCS.
@@ -144,6 +160,7 @@
* checking functions of each errata.
*/
report_errata WORKAROUND_CVE_2017_5715, cortex_a73, cve_2017_5715
+ report_errata WORKAROUND_CVE_2018_3639, cortex_a73, cve_2018_3639
ldp x8, x30, [sp], #16
ret
@@ -170,8 +187,9 @@
ret
endfunc cortex_a73_cpu_reg_dump
-declare_cpu_ops_workaround_cve_2017_5715 cortex_a73, CORTEX_A73_MIDR, \
+declare_cpu_ops_wa cortex_a73, CORTEX_A73_MIDR, \
cortex_a73_reset_func, \
check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
cortex_a73_core_pwr_dwn, \
cortex_a73_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S
index 288f5af..20ec32c 100644
--- a/lib/cpus/aarch64/cortex_a75.S
+++ b/lib/cpus/aarch64/cortex_a75.S
@@ -13,11 +13,18 @@
func cortex_a75_reset_func
#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
cpu_check_csv2 x0, 1f
- adr x0, workaround_bpiall_vbar0_runtime_exceptions
+ adr x0, wa_cve_2017_5715_bpiall_vbar
msr vbar_el3, x0
1:
#endif
+#if WORKAROUND_CVE_2018_3639
+ mrs x0, CORTEX_A75_CPUACTLR_EL1
+ orr x0, x0, #CORTEX_A75_CPUACTLR_EL1_DISABLE_LOAD_PASS_STORE
+ msr CORTEX_A75_CPUACTLR_EL1, x0
+ isb
+#endif
+
#if ENABLE_AMU
/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
mrs x0, actlr_el3
@@ -57,6 +64,15 @@
ret
endfunc check_errata_cve_2017_5715
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2018_3639
+
/* ---------------------------------------------
* HW will do the cache maintenance while powering down
* ---------------------------------------------
@@ -88,6 +104,7 @@
* checking functions of each errata.
*/
report_errata WORKAROUND_CVE_2017_5715, cortex_a75, cve_2017_5715
+ report_errata WORKAROUND_CVE_2018_3639, cortex_a75, cve_2018_3639
ldp x8, x30, [sp], #16
ret
@@ -113,7 +130,8 @@
ret
endfunc cortex_a75_cpu_reg_dump
-declare_cpu_ops_workaround_cve_2017_5715 cortex_a75, CORTEX_A75_MIDR, \
+declare_cpu_ops_wa cortex_a75, CORTEX_A75_MIDR, \
cortex_a75_reset_func, \
check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
cortex_a75_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
index 9f13ed2..69ece8f 100644
--- a/lib/cpus/aarch64/cpu_helpers.S
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -285,7 +285,7 @@
#endif
/*
- * int check_workaround_cve_2017_5715(void);
+ * int check_wa_cve_2017_5715(void);
*
* This function returns:
* - ERRATA_APPLIES when firmware mitigation is required.
@@ -296,8 +296,8 @@
* NOTE: Must be called only after cpu_ops have been initialized
* in per-CPU data.
*/
- .globl check_workaround_cve_2017_5715
-func check_workaround_cve_2017_5715
+ .globl check_wa_cve_2017_5715
+func check_wa_cve_2017_5715
mrs x0, tpidr_el3
#if ENABLE_ASSERTIONS
cmp x0, #0
@@ -315,4 +315,28 @@
1:
mov x0, #ERRATA_NOT_APPLIES
ret
-endfunc check_workaround_cve_2017_5715
+endfunc check_wa_cve_2017_5715
+
+/*
+ * void *wa_cve_2018_3639_get_disable_ptr(void);
+ *
+ * Returns a function pointer which is used to disable mitigation
+ * for CVE-2018-3639.
+ * The function pointer is only returned on cores that employ
+ * dynamic mitigation. If the core uses static mitigation or is
+ * unaffected by CVE-2018-3639 this function returns NULL.
+ *
+ * NOTE: Must be called only after cpu_ops have been initialized
+ * in per-CPU data.
+ */
+ .globl wa_cve_2018_3639_get_disable_ptr
+func wa_cve_2018_3639_get_disable_ptr
+ mrs x0, tpidr_el3
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
+ ldr x0, [x0, #CPU_EXTRA2_FUNC]
+ ret
+endfunc wa_cve_2018_3639_get_disable_ptr
diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
similarity index 67%
rename from lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
rename to lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
index cd82497..8437155 100644
--- a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
+++ b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
@@ -9,13 +9,13 @@
#include <asm_macros.S>
#include <context.h>
- .globl workaround_bpiall_vbar0_runtime_exceptions
+ .globl wa_cve_2017_5715_bpiall_vbar
#define EMIT_BPIALL 0xee070fd5
#define EMIT_SMC 0xe1600070
#define ESR_EL3_A64_SMC0 0x5e000000
- .macro enter_workaround _from_vector
+ .macro apply_cve_2017_5715_wa _from_vector
/*
* Save register state to enable a call to AArch32 S-EL1 and return
* Identify the original calling vector in w2 (==_from_vector)
@@ -66,7 +66,7 @@
movz w8, SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, SPSR_AIF_MASK)
/* Switch EL3 exception vectors while the workaround is executing. */
- adr x9, workaround_bpiall_vbar1_runtime_exceptions
+ adr x9, wa_cve_2017_5715_bpiall_ret_vbar
/* Setup SCTLR_EL1 with MMU off and I$ on */
ldr x10, stub_sel1_sctlr
@@ -93,13 +93,13 @@
* is not enabled, the existing runtime exception vector table is used.
* ---------------------------------------------------------------------
*/
-vector_base workaround_bpiall_vbar0_runtime_exceptions
+vector_base wa_cve_2017_5715_bpiall_vbar
/* ---------------------------------------------------------------------
* Current EL with SP_EL0 : 0x0 - 0x200
* ---------------------------------------------------------------------
*/
-vector_entry workaround_bpiall_vbar0_sync_exception_sp_el0
+vector_entry bpiall_sync_exception_sp_el0
b sync_exception_sp_el0
nop /* to force 8 byte alignment for the following stub */
@@ -114,79 +114,79 @@
.word EMIT_BPIALL
.word EMIT_SMC
- check_vector_size workaround_bpiall_vbar0_sync_exception_sp_el0
+ check_vector_size bpiall_sync_exception_sp_el0
-vector_entry workaround_bpiall_vbar0_irq_sp_el0
+vector_entry bpiall_irq_sp_el0
b irq_sp_el0
- check_vector_size workaround_bpiall_vbar0_irq_sp_el0
+ check_vector_size bpiall_irq_sp_el0
-vector_entry workaround_bpiall_vbar0_fiq_sp_el0
+vector_entry bpiall_fiq_sp_el0
b fiq_sp_el0
- check_vector_size workaround_bpiall_vbar0_fiq_sp_el0
+ check_vector_size bpiall_fiq_sp_el0
-vector_entry workaround_bpiall_vbar0_serror_sp_el0
+vector_entry bpiall_serror_sp_el0
b serror_sp_el0
- check_vector_size workaround_bpiall_vbar0_serror_sp_el0
+ check_vector_size bpiall_serror_sp_el0
/* ---------------------------------------------------------------------
* Current EL with SP_ELx: 0x200 - 0x400
* ---------------------------------------------------------------------
*/
-vector_entry workaround_bpiall_vbar0_sync_exception_sp_elx
+vector_entry bpiall_sync_exception_sp_elx
b sync_exception_sp_elx
- check_vector_size workaround_bpiall_vbar0_sync_exception_sp_elx
+ check_vector_size bpiall_sync_exception_sp_elx
-vector_entry workaround_bpiall_vbar0_irq_sp_elx
+vector_entry bpiall_irq_sp_elx
b irq_sp_elx
- check_vector_size workaround_bpiall_vbar0_irq_sp_elx
+ check_vector_size bpiall_irq_sp_elx
-vector_entry workaround_bpiall_vbar0_fiq_sp_elx
+vector_entry bpiall_fiq_sp_elx
b fiq_sp_elx
- check_vector_size workaround_bpiall_vbar0_fiq_sp_elx
+ check_vector_size bpiall_fiq_sp_elx
-vector_entry workaround_bpiall_vbar0_serror_sp_elx
+vector_entry bpiall_serror_sp_elx
b serror_sp_elx
- check_vector_size workaround_bpiall_vbar0_serror_sp_elx
+ check_vector_size bpiall_serror_sp_elx
/* ---------------------------------------------------------------------
* Lower EL using AArch64 : 0x400 - 0x600
* ---------------------------------------------------------------------
*/
-vector_entry workaround_bpiall_vbar0_sync_exception_aarch64
- enter_workaround 1
- check_vector_size workaround_bpiall_vbar0_sync_exception_aarch64
+vector_entry bpiall_sync_exception_aarch64
+ apply_cve_2017_5715_wa 1
+ check_vector_size bpiall_sync_exception_aarch64
-vector_entry workaround_bpiall_vbar0_irq_aarch64
- enter_workaround 2
- check_vector_size workaround_bpiall_vbar0_irq_aarch64
+vector_entry bpiall_irq_aarch64
+ apply_cve_2017_5715_wa 2
+ check_vector_size bpiall_irq_aarch64
-vector_entry workaround_bpiall_vbar0_fiq_aarch64
- enter_workaround 4
- check_vector_size workaround_bpiall_vbar0_fiq_aarch64
+vector_entry bpiall_fiq_aarch64
+ apply_cve_2017_5715_wa 4
+ check_vector_size bpiall_fiq_aarch64
-vector_entry workaround_bpiall_vbar0_serror_aarch64
- enter_workaround 8
- check_vector_size workaround_bpiall_vbar0_serror_aarch64
+vector_entry bpiall_serror_aarch64
+ apply_cve_2017_5715_wa 8
+ check_vector_size bpiall_serror_aarch64
/* ---------------------------------------------------------------------
* Lower EL using AArch32 : 0x600 - 0x800
* ---------------------------------------------------------------------
*/
-vector_entry workaround_bpiall_vbar0_sync_exception_aarch32
- enter_workaround 1
- check_vector_size workaround_bpiall_vbar0_sync_exception_aarch32
+vector_entry bpiall_sync_exception_aarch32
+ apply_cve_2017_5715_wa 1
+ check_vector_size bpiall_sync_exception_aarch32
-vector_entry workaround_bpiall_vbar0_irq_aarch32
- enter_workaround 2
- check_vector_size workaround_bpiall_vbar0_irq_aarch32
+vector_entry bpiall_irq_aarch32
+ apply_cve_2017_5715_wa 2
+ check_vector_size bpiall_irq_aarch32
-vector_entry workaround_bpiall_vbar0_fiq_aarch32
- enter_workaround 4
- check_vector_size workaround_bpiall_vbar0_fiq_aarch32
+vector_entry bpiall_fiq_aarch32
+ apply_cve_2017_5715_wa 4
+ check_vector_size bpiall_fiq_aarch32
-vector_entry workaround_bpiall_vbar0_serror_aarch32
- enter_workaround 8
- check_vector_size workaround_bpiall_vbar0_serror_aarch32
+vector_entry bpiall_serror_aarch32
+ apply_cve_2017_5715_wa 8
+ check_vector_size bpiall_serror_aarch32
/* ---------------------------------------------------------------------
* This vector table is used while the workaround is executing. It
@@ -195,73 +195,73 @@
* EL3 state before proceeding with the normal runtime exception vector.
* ---------------------------------------------------------------------
*/
-vector_base workaround_bpiall_vbar1_runtime_exceptions
+vector_base wa_cve_2017_5715_bpiall_ret_vbar
/* ---------------------------------------------------------------------
* Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED)
* ---------------------------------------------------------------------
*/
-vector_entry workaround_bpiall_vbar1_sync_exception_sp_el0
+vector_entry bpiall_ret_sync_exception_sp_el0
b report_unhandled_exception
- check_vector_size workaround_bpiall_vbar1_sync_exception_sp_el0
+ check_vector_size bpiall_ret_sync_exception_sp_el0
-vector_entry workaround_bpiall_vbar1_irq_sp_el0
+vector_entry bpiall_ret_irq_sp_el0
b report_unhandled_interrupt
- check_vector_size workaround_bpiall_vbar1_irq_sp_el0
+ check_vector_size bpiall_ret_irq_sp_el0
-vector_entry workaround_bpiall_vbar1_fiq_sp_el0
+vector_entry bpiall_ret_fiq_sp_el0
b report_unhandled_interrupt
- check_vector_size workaround_bpiall_vbar1_fiq_sp_el0
+ check_vector_size bpiall_ret_fiq_sp_el0
-vector_entry workaround_bpiall_vbar1_serror_sp_el0
+vector_entry bpiall_ret_serror_sp_el0
b report_unhandled_exception
- check_vector_size workaround_bpiall_vbar1_serror_sp_el0
+ check_vector_size bpiall_ret_serror_sp_el0
/* ---------------------------------------------------------------------
* Current EL with SP_ELx: 0x200 - 0x400 (UNUSED)
* ---------------------------------------------------------------------
*/
-vector_entry workaround_bpiall_vbar1_sync_exception_sp_elx
+vector_entry bpiall_ret_sync_exception_sp_elx
b report_unhandled_exception
- check_vector_size workaround_bpiall_vbar1_sync_exception_sp_elx
+ check_vector_size bpiall_ret_sync_exception_sp_elx
-vector_entry workaround_bpiall_vbar1_irq_sp_elx
+vector_entry bpiall_ret_irq_sp_elx
b report_unhandled_interrupt
- check_vector_size workaround_bpiall_vbar1_irq_sp_elx
+ check_vector_size bpiall_ret_irq_sp_elx
-vector_entry workaround_bpiall_vbar1_fiq_sp_elx
+vector_entry bpiall_ret_fiq_sp_elx
b report_unhandled_interrupt
- check_vector_size workaround_bpiall_vbar1_fiq_sp_elx
+ check_vector_size bpiall_ret_fiq_sp_elx
-vector_entry workaround_bpiall_vbar1_serror_sp_elx
+vector_entry bpiall_ret_serror_sp_elx
b report_unhandled_exception
- check_vector_size workaround_bpiall_vbar1_serror_sp_elx
+ check_vector_size bpiall_ret_serror_sp_elx
/* ---------------------------------------------------------------------
* Lower EL using AArch64 : 0x400 - 0x600 (UNUSED)
* ---------------------------------------------------------------------
*/
-vector_entry workaround_bpiall_vbar1_sync_exception_aarch64
+vector_entry bpiall_ret_sync_exception_aarch64
b report_unhandled_exception
- check_vector_size workaround_bpiall_vbar1_sync_exception_aarch64
+ check_vector_size bpiall_ret_sync_exception_aarch64
-vector_entry workaround_bpiall_vbar1_irq_aarch64
+vector_entry bpiall_ret_irq_aarch64
b report_unhandled_interrupt
- check_vector_size workaround_bpiall_vbar1_irq_aarch64
+ check_vector_size bpiall_ret_irq_aarch64
-vector_entry workaround_bpiall_vbar1_fiq_aarch64
+vector_entry bpiall_ret_fiq_aarch64
b report_unhandled_interrupt
- check_vector_size workaround_bpiall_vbar1_fiq_aarch64
+ check_vector_size bpiall_ret_fiq_aarch64
-vector_entry workaround_bpiall_vbar1_serror_aarch64
+vector_entry bpiall_ret_serror_aarch64
b report_unhandled_exception
- check_vector_size workaround_bpiall_vbar1_serror_aarch64
+ check_vector_size bpiall_ret_serror_aarch64
/* ---------------------------------------------------------------------
* Lower EL using AArch32 : 0x600 - 0x800
* ---------------------------------------------------------------------
*/
-vector_entry workaround_bpiall_vbar1_sync_exception_aarch32
+vector_entry bpiall_ret_sync_exception_aarch32
/*
* w2 indicates which SEL1 stub was run and thus which original vector was used
* w3-w6 contain saved system register state (esr_el3 in w3)
@@ -281,7 +281,7 @@
* to workaround entry table in preparation for subsequent
* Sync/IRQ/FIQ/SError exceptions.
*/
- adr x0, workaround_bpiall_vbar0_runtime_exceptions
+ adr x0, wa_cve_2017_5715_bpiall_vbar
msr vbar_el3, x0
/*
@@ -324,34 +324,34 @@
1:
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
b sync_exception_aarch64
- check_vector_size workaround_bpiall_vbar1_sync_exception_aarch32
+ check_vector_size bpiall_ret_sync_exception_aarch32
-vector_entry workaround_bpiall_vbar1_irq_aarch32
+vector_entry bpiall_ret_irq_aarch32
b report_unhandled_interrupt
/*
* Post-workaround fan-out for non-sync exceptions
*/
workaround_not_sync:
- tbnz w2, #3, workaround_bpiall_vbar1_serror
- tbnz w2, #2, workaround_bpiall_vbar1_fiq
+ tbnz w2, #3, bpiall_ret_serror
+ tbnz w2, #2, bpiall_ret_fiq
/* IRQ */
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
b irq_aarch64
-workaround_bpiall_vbar1_fiq:
+bpiall_ret_fiq:
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
b fiq_aarch64
-workaround_bpiall_vbar1_serror:
+bpiall_ret_serror:
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
b serror_aarch64
- check_vector_size workaround_bpiall_vbar1_irq_aarch32
+ check_vector_size bpiall_ret_irq_aarch32
-vector_entry workaround_bpiall_vbar1_fiq_aarch32
+vector_entry bpiall_ret_fiq_aarch32
b report_unhandled_interrupt
- check_vector_size workaround_bpiall_vbar1_fiq_aarch32
+ check_vector_size bpiall_ret_fiq_aarch32
-vector_entry workaround_bpiall_vbar1_serror_aarch32
+vector_entry bpiall_ret_serror_aarch32
b report_unhandled_exception
- check_vector_size workaround_bpiall_vbar1_serror_aarch32
+ check_vector_size bpiall_ret_serror_aarch32
diff --git a/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S b/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
new file mode 100644
index 0000000..039e373
--- /dev/null
+++ b/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arm_arch_svc.h>
+#include <asm_macros.S>
+#include <context.h>
+
+ .globl wa_cve_2017_5715_mmu_vbar
+
+#define ESR_EL3_A64_SMC0 0x5e000000
+
+vector_base wa_cve_2017_5715_mmu_vbar
+
+ .macro apply_cve_2017_5715_wa _is_sync_exception
+ stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ mrs x1, sctlr_el3
+ /* Disable MMU */
+ bic x1, x1, #SCTLR_M_BIT
+ msr sctlr_el3, x1
+ isb
+ /* Enable MMU */
+ orr x1, x1, #SCTLR_M_BIT
+ msr sctlr_el3, x1
+ /*
+ * Defer ISB to avoid synchronizing twice in case we hit
+ * the workaround SMC call which will implicitly synchronize
+ * because of the ERET instruction.
+ */
+
+ /*
+ * Ensure SMC is coming from A64 state on #0
+ * with W0 = SMCCC_ARCH_WORKAROUND_1
+ *
+ * This sequence evaluates as:
+ * (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
+ * allowing use of a single branch operation
+ */
+ .if \_is_sync_exception
+ orr w1, wzr, #SMCCC_ARCH_WORKAROUND_1
+ cmp w0, w1
+ mrs x0, esr_el3
+ mov_imm w1, ESR_EL3_A64_SMC0
+ ccmp w0, w1, #0, eq
+ /* Static predictor will predict a fall through */
+ bne 1f
+ eret
+1:
+ .endif
+
+ /*
+ * Synchronize now to enable the MMU. This is required
+ * to ensure the load pair below reads the data stored earlier.
+ */
+ isb
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ .endm
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry mmu_sync_exception_sp_el0
+ b sync_exception_sp_el0
+ check_vector_size mmu_sync_exception_sp_el0
+
+vector_entry mmu_irq_sp_el0
+ b irq_sp_el0
+ check_vector_size mmu_irq_sp_el0
+
+vector_entry mmu_fiq_sp_el0
+ b fiq_sp_el0
+ check_vector_size mmu_fiq_sp_el0
+
+vector_entry mmu_serror_sp_el0
+ b serror_sp_el0
+ check_vector_size mmu_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry mmu_sync_exception_sp_elx
+ b sync_exception_sp_elx
+ check_vector_size mmu_sync_exception_sp_elx
+
+vector_entry mmu_irq_sp_elx
+ b irq_sp_elx
+ check_vector_size mmu_irq_sp_elx
+
+vector_entry mmu_fiq_sp_elx
+ b fiq_sp_elx
+ check_vector_size mmu_fiq_sp_elx
+
+vector_entry mmu_serror_sp_elx
+ b serror_sp_elx
+ check_vector_size mmu_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry mmu_sync_exception_aarch64
+ apply_cve_2017_5715_wa _is_sync_exception=1
+ b sync_exception_aarch64
+ check_vector_size mmu_sync_exception_aarch64
+
+vector_entry mmu_irq_aarch64
+ apply_cve_2017_5715_wa _is_sync_exception=0
+ b irq_aarch64
+ check_vector_size mmu_irq_aarch64
+
+vector_entry mmu_fiq_aarch64
+ apply_cve_2017_5715_wa _is_sync_exception=0
+ b fiq_aarch64
+ check_vector_size mmu_fiq_aarch64
+
+vector_entry mmu_serror_aarch64
+ apply_cve_2017_5715_wa _is_sync_exception=0
+ b serror_aarch64
+ check_vector_size mmu_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry mmu_sync_exception_aarch32
+ apply_cve_2017_5715_wa _is_sync_exception=1
+ b sync_exception_aarch32
+ check_vector_size mmu_sync_exception_aarch32
+
+vector_entry mmu_irq_aarch32
+ apply_cve_2017_5715_wa _is_sync_exception=0
+ b irq_aarch32
+ check_vector_size mmu_irq_aarch32
+
+vector_entry mmu_fiq_aarch32
+ apply_cve_2017_5715_wa _is_sync_exception=0
+ b fiq_aarch32
+ check_vector_size mmu_fiq_aarch32
+
+vector_entry mmu_serror_aarch32
+ apply_cve_2017_5715_wa _is_sync_exception=0
+ b serror_aarch32
+ check_vector_size mmu_serror_aarch32
diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
deleted file mode 100644
index b24b620..0000000
--- a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch.h>
-#include <arm_arch_svc.h>
-#include <asm_macros.S>
-#include <context.h>
-
- .globl workaround_mmu_runtime_exceptions
-
-#define ESR_EL3_A64_SMC0 0x5e000000
-
-vector_base workaround_mmu_runtime_exceptions
-
- .macro apply_workaround _is_sync_exception
- stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
- mrs x1, sctlr_el3
- /* Disable MMU */
- bic x1, x1, #SCTLR_M_BIT
- msr sctlr_el3, x1
- isb
- /* Enable MMU */
- orr x1, x1, #SCTLR_M_BIT
- msr sctlr_el3, x1
- /*
- * Defer ISB to avoid synchronizing twice in case we hit
- * the workaround SMC call which will implicitly synchronize
- * because of the ERET instruction.
- */
-
- /*
- * Ensure SMC is coming from A64 state on #0
- * with W0 = SMCCC_ARCH_WORKAROUND_1
- *
- * This sequence evaluates as:
- * (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
- * allowing use of a single branch operation
- */
- .if \_is_sync_exception
- orr w1, wzr, #SMCCC_ARCH_WORKAROUND_1
- cmp w0, w1
- mrs x0, esr_el3
- mov_imm w1, ESR_EL3_A64_SMC0
- ccmp w0, w1, #0, eq
- /* Static predictor will predict a fall through */
- bne 1f
- eret
-1:
- .endif
-
- /*
- * Synchronize now to enable the MMU. This is required
- * to ensure the load pair below reads the data stored earlier.
- */
- isb
- ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
- .endm
-
- /* ---------------------------------------------------------------------
- * Current EL with SP_EL0 : 0x0 - 0x200
- * ---------------------------------------------------------------------
- */
-vector_entry workaround_mmu_sync_exception_sp_el0
- b sync_exception_sp_el0
- check_vector_size workaround_mmu_sync_exception_sp_el0
-
-vector_entry workaround_mmu_irq_sp_el0
- b irq_sp_el0
- check_vector_size workaround_mmu_irq_sp_el0
-
-vector_entry workaround_mmu_fiq_sp_el0
- b fiq_sp_el0
- check_vector_size workaround_mmu_fiq_sp_el0
-
-vector_entry workaround_mmu_serror_sp_el0
- b serror_sp_el0
- check_vector_size workaround_mmu_serror_sp_el0
-
- /* ---------------------------------------------------------------------
- * Current EL with SP_ELx: 0x200 - 0x400
- * ---------------------------------------------------------------------
- */
-vector_entry workaround_mmu_sync_exception_sp_elx
- b sync_exception_sp_elx
- check_vector_size workaround_mmu_sync_exception_sp_elx
-
-vector_entry workaround_mmu_irq_sp_elx
- b irq_sp_elx
- check_vector_size workaround_mmu_irq_sp_elx
-
-vector_entry workaround_mmu_fiq_sp_elx
- b fiq_sp_elx
- check_vector_size workaround_mmu_fiq_sp_elx
-
-vector_entry workaround_mmu_serror_sp_elx
- b serror_sp_elx
- check_vector_size workaround_mmu_serror_sp_elx
-
- /* ---------------------------------------------------------------------
- * Lower EL using AArch64 : 0x400 - 0x600
- * ---------------------------------------------------------------------
- */
-vector_entry workaround_mmu_sync_exception_aarch64
- apply_workaround _is_sync_exception=1
- b sync_exception_aarch64
- check_vector_size workaround_mmu_sync_exception_aarch64
-
-vector_entry workaround_mmu_irq_aarch64
- apply_workaround _is_sync_exception=0
- b irq_aarch64
- check_vector_size workaround_mmu_irq_aarch64
-
-vector_entry workaround_mmu_fiq_aarch64
- apply_workaround _is_sync_exception=0
- b fiq_aarch64
- check_vector_size workaround_mmu_fiq_aarch64
-
-vector_entry workaround_mmu_serror_aarch64
- apply_workaround _is_sync_exception=0
- b serror_aarch64
- check_vector_size workaround_mmu_serror_aarch64
-
- /* ---------------------------------------------------------------------
- * Lower EL using AArch32 : 0x600 - 0x800
- * ---------------------------------------------------------------------
- */
-vector_entry workaround_mmu_sync_exception_aarch32
- apply_workaround _is_sync_exception=1
- b sync_exception_aarch32
- check_vector_size workaround_mmu_sync_exception_aarch32
-
-vector_entry workaround_mmu_irq_aarch32
- apply_workaround _is_sync_exception=0
- b irq_aarch32
- check_vector_size workaround_mmu_irq_aarch32
-
-vector_entry workaround_mmu_fiq_aarch32
- apply_workaround _is_sync_exception=0
- b fiq_aarch32
- check_vector_size workaround_mmu_fiq_aarch32
-
-vector_entry workaround_mmu_serror_aarch32
- apply_workaround _is_sync_exception=0
- b serror_aarch32
- check_vector_size workaround_mmu_serror_aarch32
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 3ba8c1f..434c13e 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -17,6 +17,8 @@
A57_DISABLE_NON_TEMPORAL_HINT ?=1
WORKAROUND_CVE_2017_5715 ?=1
+WORKAROUND_CVE_2018_3639 ?=1
+DYNAMIC_WORKAROUND_CVE_2018_3639 ?=0
# Process SKIP_A57_L1_FLUSH_PWR_DWN flag
$(eval $(call assert_boolean,SKIP_A57_L1_FLUSH_PWR_DWN))
@@ -34,6 +36,19 @@
$(eval $(call assert_boolean,WORKAROUND_CVE_2017_5715))
$(eval $(call add_define,WORKAROUND_CVE_2017_5715))
+# Process WORKAROUND_CVE_2018_3639 flag
+$(eval $(call assert_boolean,WORKAROUND_CVE_2018_3639))
+$(eval $(call add_define,WORKAROUND_CVE_2018_3639))
+
+$(eval $(call assert_boolean,DYNAMIC_WORKAROUND_CVE_2018_3639))
+$(eval $(call add_define,DYNAMIC_WORKAROUND_CVE_2018_3639))
+
+ifneq (${DYNAMIC_WORKAROUND_CVE_2018_3639},0)
+ ifeq (${WORKAROUND_CVE_2018_3639},0)
+ $(error "Error: WORKAROUND_CVE_2018_3639 must be 1 if DYNAMIC_WORKAROUND_CVE_2018_3639 is 1")
+ endif
+endif
+
# CPU Errata Build flags.
# These should be enabled by the platform if the erratum workaround needs to be
# applied.
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index 121ca4d..707e6db 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -404,6 +404,15 @@
msr spsr_el3, x16
msr elr_el3, x17
+#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639
+ /* Restore mitigation state as it was on entry to EL3 */
+ ldr x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
+ cmp x17, xzr
+ beq 1f
+ blr x17
+#endif
+
+1:
/* Restore saved general purpose registers and return */
b restore_gp_registers_eret
endfunc el3_exit
diff --git a/services/arm_arch_svc/arm_arch_svc_setup.c b/services/arm_arch_svc/arm_arch_svc_setup.c
index eb736c0..45c4704 100644
--- a/services/arm_arch_svc/arm_arch_svc_setup.c
+++ b/services/arm_arch_svc/arm_arch_svc_setup.c
@@ -10,7 +10,8 @@
#include <runtime_svc.h>
#include <smccc.h>
#include <smccc_helpers.h>
-#include <workaround_cve_2017_5715.h>
+#include <wa_cve_2017_5715.h>
+#include <wa_cve_2018_3639.h>
static int32_t smccc_version(void)
{
@@ -25,10 +26,31 @@
return SMC_OK;
#if WORKAROUND_CVE_2017_5715
case SMCCC_ARCH_WORKAROUND_1:
- if (check_workaround_cve_2017_5715() == ERRATA_NOT_APPLIES)
+ if (check_wa_cve_2017_5715() == ERRATA_NOT_APPLIES)
return 1;
return 0; /* ERRATA_APPLIES || ERRATA_MISSING */
#endif
+#if WORKAROUND_CVE_2018_3639
+ case SMCCC_ARCH_WORKAROUND_2:
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ /*
+ * On a platform where at least one CPU requires
+ * dynamic mitigation but others are either unaffected
+ * or permanently mitigated, report the latter as not
+ * needing dynamic mitigation.
+ */
+ if (wa_cve_2018_3639_get_disable_ptr() == NULL)
+ return 1;
+ /*
+ * If we get here, this CPU requires dynamic mitigation
+ * so report it as such.
+ */
+ return 0;
+#else
+ /* Either the CPUs are unaffected or permanently mitigated */
+ return SMCCC_ARCH_NOT_REQUIRED;
+#endif
+#endif
default:
return SMC_UNK;
}
@@ -60,6 +82,16 @@
*/
SMC_RET0(handle);
#endif
+#if WORKAROUND_CVE_2018_3639
+ case SMCCC_ARCH_WORKAROUND_2:
+ /*
+ * The workaround has already been applied on affected PEs
+ * requiring dynamic mitigation during entry to EL3.
+ * On unaffected or statically mitigated PEs, this function
+ * has no effect.
+ */
+ SMC_RET0(handle);
+#endif
default:
WARN("Unimplemented Arm Architecture Service Call: 0x%x \n",
smc_fid);