Merge "feat(tc): enable stack protector" into integration
diff --git a/docs/design/cpu-specific-build-macros.rst b/docs/design/cpu-specific-build-macros.rst
index 369ec6f..17b2954 100644
--- a/docs/design/cpu-specific-build-macros.rst
+++ b/docs/design/cpu-specific-build-macros.rst
@@ -38,6 +38,10 @@
in EL3 FW. This build option should be set to 1 if the target platform contains
at least 1 CPU that requires this mitigation. Defaults to 1.
+- ``WORKAROUND_CVE_2024_7881``: Enables mitigation for `CVE-2024-7881`.
+ This build option should be set to 1 if the target platform contains at
+ least 1 CPU that requires this mitigation. Defaults to 1.
+
.. _arm_cpu_macros_errata_workarounds:
CPU Errata Workarounds
@@ -1055,7 +1059,7 @@
--------------
-*Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.*
+*Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.*
.. _CVE-2017-5715: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-5715
.. _CVE-2018-3639: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-3639
diff --git a/include/lib/cpus/aarch64/cortex_x4.h b/include/lib/cpus/aarch64/cortex_x4.h
index f701216..116f9a0 100644
--- a/include/lib/cpus/aarch64/cortex_x4.h
+++ b/include/lib/cpus/aarch64/cortex_x4.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2022-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -36,6 +36,11 @@
#define CORTEX_X4_CPUACTLR5_EL1 S3_0_C15_C8_0
#define CORTEX_X4_CPUACTLR5_EL1_BIT_14 (ULL(1) << 14)
+/*******************************************************************************
+ * CPU Auxiliary control register 6 specific definitions
+ ******************************************************************************/
+#define CORTEX_X4_CPUACTLR6_EL1 S3_0_C15_C8_1
+
#ifndef __ASSEMBLER__
#if ERRATA_X4_2726228
long check_erratum_cortex_x4_2726228(long cpu_rev);
diff --git a/include/lib/cpus/aarch64/cortex_x925.h b/include/lib/cpus/aarch64/cortex_x925.h
index b0d0ca4..ecbbb59 100644
--- a/include/lib/cpus/aarch64/cortex_x925.h
+++ b/include/lib/cpus/aarch64/cortex_x925.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2023-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -21,4 +21,9 @@
#define CORTEX_X925_CPUPWRCTLR_EL1 S3_0_C15_C2_7
#define CORTEX_X925_CPUPWRCTLR_EL1_CORE_PWRDN_BIT U(1)
+/*******************************************************************************
+ * CPU Auxiliary control register 6 specific definitions
+ ******************************************************************************/
+#define CORTEX_X925_CPUACTLR6_EL1 S3_0_C15_C8_1
+
#endif /* CORTEX_X925_H */
diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S
index 5e92934..0ce9c3c 100644
--- a/include/lib/cpus/aarch64/cpu_macros.S
+++ b/include/lib/cpus/aarch64/cpu_macros.S
@@ -63,6 +63,10 @@
* This is a placeholder for future per CPU operations. Currently,
* some CPUs use this entry to set a test function to determine if
* the workaround for CVE-2022-23960 needs to be applied or not.
+ * _extra4:
+ * This is a placeholder for future per CPU operations. Currently,
+ * some CPUs use this entry to set a test function to determine if
+ * the workaround for CVE-2024-7881 needs to be applied or not.
* _e_handler:
* This is a placeholder for future per CPU exception handlers.
* _power_down_ops:
@@ -75,7 +79,8 @@
* used to handle power down at subsequent levels
*/
.macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
- _extra1:req, _extra2:req, _extra3:req, _e_handler:req, _power_down_ops:vararg
+ _extra1:req, _extra2:req, _extra3:req, _extra4:req, \
+ _e_handler:req, _power_down_ops:vararg
.section .cpu_ops, "a"
.align 3
.type cpu_ops_\_name, %object
@@ -86,6 +91,7 @@
.quad \_extra1
.quad \_extra2
.quad \_extra3
+ .quad \_extra4
.quad \_e_handler
#ifdef IMAGE_BL31
/* Insert list of functions */
@@ -148,21 +154,28 @@
.macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
_power_down_ops:vararg
- declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, \
+ declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, 0, \
\_power_down_ops
.endm
.macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \
_e_handler:req, _power_down_ops:vararg
declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
- 0, 0, 0, \_e_handler, \_power_down_ops
+ 0, 0, 0, 0, \_e_handler, \_power_down_ops
.endm
.macro declare_cpu_ops_wa _name:req, _midr:req, \
_resetfunc:req, _extra1:req, _extra2:req, \
_extra3:req, _power_down_ops:vararg
declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
+ \_extra1, \_extra2, \_extra3, 0, 0, \_power_down_ops
+ .endm
+
+ .macro declare_cpu_ops_wa_4 _name:req, _midr:req, \
+ _resetfunc:req, _extra1:req, _extra2:req, \
+ _extra3:req, _extra4:req, _power_down_ops:vararg
+ declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
- \_extra1, \_extra2, \_extra3, 0, \_power_down_ops
+ \_extra1, \_extra2, \_extra3, \_extra4, 0, \_power_down_ops
.endm
/*
diff --git a/include/lib/cpus/aarch64/neoverse_v2.h b/include/lib/cpus/aarch64/neoverse_v2.h
index 1171e95..427cafa 100644
--- a/include/lib/cpus/aarch64/neoverse_v2.h
+++ b/include/lib/cpus/aarch64/neoverse_v2.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -58,4 +58,9 @@
#define NEOVERSE_V2_CPUACTLR5_EL1_BIT_56 (ULL(1) << 56)
#define NEOVERSE_V2_CPUACTLR5_EL1_BIT_55 (ULL(1) << 55)
+/*******************************************************************************
+ * CPU Auxiliary control register 6 specific definitions
+ ******************************************************************************/
+#define NEOVERSE_V2_CPUACTLR6_EL1 S3_0_C15_C8_1
+
#endif /* NEOVERSE_V2_H */
diff --git a/include/lib/cpus/aarch64/neoverse_v3.h b/include/lib/cpus/aarch64/neoverse_v3.h
index e5f75ba..a31bdd3 100644
--- a/include/lib/cpus/aarch64/neoverse_v3.h
+++ b/include/lib/cpus/aarch64/neoverse_v3.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2022-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -22,7 +22,12 @@
/*******************************************************************************
* CPU Power Control register specific definitions
******************************************************************************/
-#define NEOVERSE_V3_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define NEOVERSE_V3_CPUPWRCTLR_EL1 S3_0_C15_C2_7
#define NEOVERSE_V3_CPUPWRCTLR_EL1_CORE_PWRDN_BIT U(1)
+/*******************************************************************************
+ * CPU Auxiliary control register 6 specific definitions
+ ******************************************************************************/
+#define NEOVERSE_V3_CPUACTLR6_EL1 S3_0_C15_C8_1
+
#endif /* NEOVERSE_V3_H */
diff --git a/include/lib/cpus/cpu_ops.h b/include/lib/cpus/cpu_ops.h
index 0084189..c1bdf8d 100644
--- a/include/lib/cpus/cpu_ops.h
+++ b/include/lib/cpus/cpu_ops.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2023-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -28,6 +28,7 @@
#define CPU_NO_EXTRA1_FUNC 0
#define CPU_NO_EXTRA2_FUNC 0
#define CPU_NO_EXTRA3_FUNC 0
+#define CPU_NO_EXTRA4_FUNC 0
#endif /* __aarch64__ */
@@ -45,6 +46,7 @@
#define CPU_EXTRA1_FUNC_SIZE CPU_WORD_SIZE
#define CPU_EXTRA2_FUNC_SIZE CPU_WORD_SIZE
#define CPU_EXTRA3_FUNC_SIZE CPU_WORD_SIZE
+#define CPU_EXTRA4_FUNC_SIZE CPU_WORD_SIZE
#define CPU_E_HANDLER_FUNC_SIZE CPU_WORD_SIZE
/* The power down core and cluster is needed only in BL31 and BL32 */
#if defined(IMAGE_BL31) || defined(IMAGE_BL32)
@@ -89,7 +91,8 @@
#define CPU_EXTRA1_FUNC CPU_RESET_FUNC + CPU_RESET_FUNC_SIZE
#define CPU_EXTRA2_FUNC CPU_EXTRA1_FUNC + CPU_EXTRA1_FUNC_SIZE
#define CPU_EXTRA3_FUNC CPU_EXTRA2_FUNC + CPU_EXTRA2_FUNC_SIZE
-#define CPU_E_HANDLER_FUNC CPU_EXTRA3_FUNC + CPU_EXTRA3_FUNC_SIZE
+#define CPU_EXTRA4_FUNC CPU_EXTRA3_FUNC + CPU_EXTRA3_FUNC_SIZE
+#define CPU_E_HANDLER_FUNC CPU_EXTRA4_FUNC + CPU_EXTRA4_FUNC_SIZE
#define CPU_PWR_DWN_OPS CPU_E_HANDLER_FUNC + CPU_E_HANDLER_FUNC_SIZE
#else
#define CPU_PWR_DWN_OPS CPU_RESET_FUNC + CPU_RESET_FUNC_SIZE
@@ -119,6 +122,7 @@
void (*extra1_func)(void);
void (*extra2_func)(void);
void (*extra3_func)(void);
+ void (*extra4_func)(void);
void (*e_handler_func)(long es);
#endif /* __aarch64__ */
#if (defined(IMAGE_BL31) || defined(IMAGE_BL32)) && CPU_MAX_PWR_DWN_OPS
diff --git a/include/lib/cpus/errata.h b/include/lib/cpus/errata.h
index 2c31515..a2f2fc6 100644
--- a/include/lib/cpus/errata.h
+++ b/include/lib/cpus/errata.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -48,6 +48,8 @@
unsigned int check_if_affected_core(void);
#endif
+int check_wa_cve_2024_7881(void);
+
/*
* NOTE that this structure will be different on AArch32 and AArch64. The
* uintptr_t will reflect the change and the alignment will be correct in both.
diff --git a/include/services/arm_arch_svc.h b/include/services/arm_arch_svc.h
index c2b1f41..ed9bc95 100644
--- a/include/services/arm_arch_svc.h
+++ b/include/services/arm_arch_svc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -14,6 +14,7 @@
#define SMCCC_ARCH_WORKAROUND_2 U(0x80007FFF)
#define SMCCC_ARCH_WORKAROUND_3 U(0x80003FFF)
#define SMCCC_ARCH_FEATURE_AVAILABILITY U(0x80000003)
+#define SMCCC_ARCH_WORKAROUND_4 U(0x80000004)
#define SMCCC_GET_SOC_VERSION U(0)
#define SMCCC_GET_SOC_REVISION U(1)
diff --git a/lib/cpus/aarch64/cortex_x3.S b/lib/cpus/aarch64/cortex_x3.S
index 6becf7b..4a0212e 100644
--- a/lib/cpus/aarch64/cortex_x3.S
+++ b/lib/cpus/aarch64/cortex_x3.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -111,6 +111,17 @@
check_erratum_chosen cortex_x3, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+workaround_reset_start cortex_x3, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
+ /* ---------------------------------
+ * Sets BIT41 of CPUACTLR6_EL1 which
+ * disables L1 Data cache prefetcher
+ * ---------------------------------
+ */
+ sysreg_bit_set CORTEX_X3_CPUACTLR6_EL1, BIT(41)
+workaround_reset_end cortex_x3, CVE(2024, 7881)
+
+check_erratum_chosen cortex_x3, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
+
cpu_reset_func_start cortex_x3
/* Disable speculative loads */
msr SSBS, xzr
@@ -151,6 +162,10 @@
ret
endfunc cortex_x3_cpu_reg_dump
-declare_cpu_ops cortex_x3, CORTEX_X3_MIDR, \
+declare_cpu_ops_wa_4 cortex_x3, CORTEX_X3_MIDR, \
cortex_x3_reset_func, \
+ CPU_NO_EXTRA1_FUNC, \
+ CPU_NO_EXTRA2_FUNC, \
+ CPU_NO_EXTRA3_FUNC, \
+ check_erratum_cortex_x3_7881, \
cortex_x3_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_x4.S b/lib/cpus/aarch64/cortex_x4.S
index 81704da..5765828 100644
--- a/lib/cpus/aarch64/cortex_x4.S
+++ b/lib/cpus/aarch64/cortex_x4.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2022-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -108,6 +108,17 @@
check_erratum_chosen cortex_x4, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+workaround_reset_start cortex_x4, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
+ /* ---------------------------------
+ * Sets BIT41 of CPUACTLR6_EL1 which
+ * disables L1 Data cache prefetcher
+ * ---------------------------------
+ */
+ sysreg_bit_set CORTEX_X4_CPUACTLR6_EL1, BIT(41)
+workaround_reset_end cortex_x4, CVE(2024, 7881)
+
+check_erratum_chosen cortex_x4, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
+
cpu_reset_func_start cortex_x4
/* Disable speculative loads */
msr SSBS, xzr
@@ -149,6 +160,10 @@
ret
endfunc cortex_x4_cpu_reg_dump
-declare_cpu_ops cortex_x4, CORTEX_X4_MIDR, \
+declare_cpu_ops_wa_4 cortex_x4, CORTEX_X4_MIDR, \
cortex_x4_reset_func, \
+ CPU_NO_EXTRA1_FUNC, \
+ CPU_NO_EXTRA2_FUNC, \
+ CPU_NO_EXTRA3_FUNC, \
+ check_erratum_cortex_x4_7881, \
cortex_x4_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_x925.S b/lib/cpus/aarch64/cortex_x925.S
index 3a31664..5b6632a 100644
--- a/lib/cpus/aarch64/cortex_x925.S
+++ b/lib/cpus/aarch64/cortex_x925.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2023-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -28,6 +28,17 @@
check_erratum_ls cortex_x925, CVE(2024, 5660), CPU_REV(0, 1)
+workaround_reset_start cortex_x925, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
+ /* ---------------------------------
+ * Sets BIT41 of CPUACTLR6_EL1 which
+ * disables L1 Data cache prefetcher
+ * ---------------------------------
+ */
+ sysreg_bit_set CORTEX_X925_CPUACTLR6_EL1, BIT(41)
+workaround_reset_end cortex_x925, CVE(2024, 7881)
+
+check_erratum_chosen cortex_x925, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
+
cpu_reset_func_start cortex_x925
/* Disable speculative loads */
msr SSBS, xzr
@@ -66,6 +77,10 @@
ret
endfunc cortex_x925_cpu_reg_dump
-declare_cpu_ops cortex_x925, CORTEX_X925_MIDR, \
+declare_cpu_ops_wa_4 cortex_x925, CORTEX_X925_MIDR, \
cortex_x925_reset_func, \
+ CPU_NO_EXTRA1_FUNC, \
+ CPU_NO_EXTRA2_FUNC, \
+ CPU_NO_EXTRA3_FUNC, \
+ check_erratum_cortex_x925_7881, \
cortex_x925_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
index 3aa4f15..0f9a3b8 100644
--- a/lib/cpus/aarch64/cpu_helpers.S
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -327,6 +327,43 @@
endfunc check_wa_cve_2017_5715
/*
+ * int check_wa_cve_2024_7881(void);
+ *
+ * This function returns:
+ * - ERRATA_APPLIES when firmware mitigation is required.
+ * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
+ * - ERRATA_MISSING when firmware mitigation would be required but
+ * is not compiled in.
+ *
+ * NOTE: Must be called only after cpu_ops have been initialized
+ * in per-CPU data.
+ */
+.globl check_wa_cve_2024_7881
+func check_wa_cve_2024_7881
+ mrs x0, tpidr_el3
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr x0, [x0, #CPU_EXTRA4_FUNC]
+ /*
+ * If the reserved function pointer is NULL, this CPU
+ * is unaffected by CVE-2024-7881 so bail out.
+ */
+ cmp x0, #CPU_NO_EXTRA4_FUNC
+ beq 1f
+ br x0
+1:
+ mov x0, #ERRATA_NOT_APPLIES
+ ret
+endfunc check_wa_cve_2024_7881
+
+/*
* void *wa_cve_2018_3639_get_disable_ptr(void);
*
* Returns a function pointer which is used to disable mitigation
diff --git a/lib/cpus/aarch64/neoverse_v2.S b/lib/cpus/aarch64/neoverse_v2.S
index f56a5e8..b43f6dd 100644
--- a/lib/cpus/aarch64/neoverse_v2.S
+++ b/lib/cpus/aarch64/neoverse_v2.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -97,6 +97,17 @@
wa_cve_2022_23960_bhb_vector_table NEOVERSE_V2_BHB_LOOP_COUNT, neoverse_v2
#endif /* WORKAROUND_CVE_2022_23960 */
+workaround_reset_start neoverse_v2, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
+ /* ---------------------------------
+ * Sets BIT41 of CPUACTLR6_EL1 which
+ * disables L1 Data cache prefetcher
+ * ---------------------------------
+ */
+ sysreg_bit_set NEOVERSE_V2_CPUACTLR6_EL1, BIT(41)
+workaround_reset_end neoverse_v2, CVE(2024, 7881)
+
+check_erratum_chosen neoverse_v2, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
+
/* ----------------------------------------------------
* HW will do the cache maintenance while powering down
* ----------------------------------------------------
@@ -142,6 +153,10 @@
ret
endfunc neoverse_v2_cpu_reg_dump
-declare_cpu_ops neoverse_v2, NEOVERSE_V2_MIDR, \
+declare_cpu_ops_wa_4 neoverse_v2, NEOVERSE_V2_MIDR, \
neoverse_v2_reset_func, \
+ CPU_NO_EXTRA1_FUNC, \
+ CPU_NO_EXTRA2_FUNC, \
+ CPU_NO_EXTRA3_FUNC, \
+ check_erratum_neoverse_v2_7881, \
neoverse_v2_core_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_v3.S b/lib/cpus/aarch64/neoverse_v3.S
index 4346d7d..69b6627 100644
--- a/lib/cpus/aarch64/neoverse_v3.S
+++ b/lib/cpus/aarch64/neoverse_v3.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2022-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -46,6 +46,17 @@
check_erratum_chosen neoverse_v3, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+workaround_reset_start neoverse_v3, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
+ /* ---------------------------------
+ * Sets BIT41 of CPUACTLR6_EL1 which
+ * disables L1 Data cache prefetcher
+ * ---------------------------------
+ */
+ sysreg_bit_set NEOVERSE_V3_CPUACTLR6_EL1, BIT(41)
+workaround_reset_end neoverse_v3, CVE(2024, 7881)
+
+check_erratum_chosen neoverse_v3, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
+
/* ---------------------------------------------
* HW will do the cache maintenance while powering down
* ---------------------------------------------
@@ -90,6 +101,10 @@
neoverse_v3_reset_func, \
neoverse_v3_core_pwr_dwn
-declare_cpu_ops neoverse_v3, NEOVERSE_V3_MIDR, \
+declare_cpu_ops_wa_4 neoverse_v3, NEOVERSE_V3_MIDR, \
neoverse_v3_reset_func, \
+ CPU_NO_EXTRA1_FUNC, \
+ CPU_NO_EXTRA2_FUNC, \
+ CPU_NO_EXTRA3_FUNC, \
+ check_erratum_neoverse_v3_7881, \
neoverse_v3_core_pwr_dwn
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index d532460..1984689 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
+# Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
# Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
@@ -32,6 +32,8 @@
CPU_FLAG_LIST += DYNAMIC_WORKAROUND_CVE_2018_3639
WORKAROUND_CVE_2022_23960 ?=1
CPU_FLAG_LIST += WORKAROUND_CVE_2022_23960
+WORKAROUND_CVE_2024_7881 ?=1
+CPU_FLAG_LIST += WORKAROUND_CVE_2024_7881
# Flag to disable Hardware page aggregation(HPA).
# This flag is enabled by default.
diff --git a/plat/mediatek/drivers/apusys/mt8196/apusys_power.c b/plat/mediatek/drivers/apusys/mt8196/apusys_power.c
index 4262d63..dcf6423 100644
--- a/plat/mediatek/drivers/apusys/mt8196/apusys_power.c
+++ b/plat/mediatek/drivers/apusys/mt8196/apusys_power.c
@@ -1,16 +1,12 @@
/*
- * Copyright (c) 2024, MediaTek Inc. All rights reserved.
+ * Copyright (c) 2024-2025, MediaTek Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <inttypes.h>
-#define SPMI_ENABLE (0)
-
-#if SPMI_ENABLE
#include <include/drivers/spmi_api.h>
-#endif
#include <common/debug.h>
#include <drivers/delay_timer.h>
@@ -259,16 +255,13 @@
uint32_t en_set_offset = BUCK_VAPU_PMIC_REG_EN_SET_ADDR;
uint32_t en_clr_offset = BUCK_VAPU_PMIC_REG_EN_CLR_ADDR;
uint32_t en_shift = BUCK_VAPU_PMIC_REG_EN_SHIFT;
-#if SPMI_ENABLE
struct spmi_device *vsram_sdev;
-#endif
unsigned char vsram = 0;
mmio_write_32(APUSYS_PCU + APU_PCUTOP_CTRL_SET, AUTO_BUCK_EN);
mmio_write_32((APUSYS_PCU + APU_PCU_BUCK_STEP_SEL), BUCK_STEP_SEL_VAL);
-#if SPMI_ENABLE
vsram_sdev = get_spmi_device(SPMI_MASTER_1, SPMI_SLAVE_4);
if (!vsram_sdev) {
ERROR("[APUPW] VSRAM BUCK4 get device fail\n");
@@ -279,7 +272,6 @@
ERROR("[APUPW] VSRAM BUCK4 read fail\n");
return -1;
}
-#endif
mmio_write_32(APUSYS_PCU + APU_PCU_BUCK_ON_DAT0_L,
(BUCK_VAPU_PMIC_REG_VOSEL_ADDR << PMIC_OFF_ADDR_OFF) | vsram);
diff --git a/plat/mediatek/drivers/apusys/mt8196/apusys_security_ctrl_plat.c b/plat/mediatek/drivers/apusys/mt8196/apusys_security_ctrl_plat.c
index fbd2aa0..a0d21c6 100644
--- a/plat/mediatek/drivers/apusys/mt8196/apusys_security_ctrl_plat.c
+++ b/plat/mediatek/drivers/apusys/mt8196/apusys_security_ctrl_plat.c
@@ -1,14 +1,13 @@
/*
- * Copyright (c) 2024, MediaTek Inc. All rights reserved.
+ * Copyright (c) 2024-2025, MediaTek Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#define ENABLE_SMPU_PROTECT (0)
+#define ENABLE_SMPU_PROTECT (1)
#if ENABLE_SMPU_PROTECT
#include "emi.h"
-#include "mt_emi.h"
#endif
#include <common/debug.h>
diff --git a/plat/mediatek/drivers/apusys/mt8196/rules.mk b/plat/mediatek/drivers/apusys/mt8196/rules.mk
index 4ffaf73..aeb6d3d 100644
--- a/plat/mediatek/drivers/apusys/mt8196/rules.mk
+++ b/plat/mediatek/drivers/apusys/mt8196/rules.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2024, MediaTek Inc. All rights reserved.
+# Copyright (c) 2024-2025, MediaTek Inc. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -10,7 +10,6 @@
ifeq (${CONFIG_MTK_APUSYS_EMI_SUPPORT}, y)
PLAT_INCLUDES += -I${MTK_PLAT}/drivers/emi/common
-PLAT_INCLUDES += -I${MTK_PLAT}/drivers/emi/${MTK_SOC}
endif
LOCAL_SRCS-y := ${LOCAL_DIR}/apusys_ammu.c
diff --git a/plat/mediatek/drivers/mminfra/mminfra_stub.c b/plat/mediatek/drivers/mminfra/mminfra_stub.c
new file mode 100644
index 0000000..dc37280
--- /dev/null
+++ b/plat/mediatek/drivers/mminfra/mminfra_stub.c
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2025, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <drivers/mminfra_public.h>
+
+int mminfra_get_if_in_use(void)
+{
+ return 0;
+}
+
+int mminfra_put(void)
+{
+ return 0;
+}
diff --git a/plat/mediatek/drivers/mminfra/rules.mk b/plat/mediatek/drivers/mminfra/rules.mk
new file mode 100644
index 0000000..f3a6822
--- /dev/null
+++ b/plat/mediatek/drivers/mminfra/rules.mk
@@ -0,0 +1,17 @@
+#
+# Copyright (c) 2025, MediaTek Inc. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+LOCAL_DIR := $(call GET_LOCAL_DIR)
+
+MODULE := mminfra
+
+PLAT_INCLUDES += -I${MTK_PLAT}/include/drivers/
+
+ifeq ($(MTKLIB_PATH),)
+LOCAL_SRCS-y := ${LOCAL_DIR}/mminfra_stub.c
+endif
+
+$(eval $(call MAKE_MODULE,$(MODULE),$(LOCAL_SRCS-y),$(MTK_BL)))
diff --git a/plat/mediatek/include/drivers/mminfra_public.h b/plat/mediatek/include/drivers/mminfra_public.h
new file mode 100644
index 0000000..14ab361
--- /dev/null
+++ b/plat/mediatek/include/drivers/mminfra_public.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2025, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef MMINFRA_PUBLIC_H
+#define MMINFRA_PUBLIC_H
+
+#define MMINFRA_RET_ERR (-1)
+#define MMINFRA_RET_POWER_OFF 0
+#define MMINFRA_RET_POWER_ON 1
+
+int mminfra_get_if_in_use(void);
+int mminfra_put(void);
+
+#endif
diff --git a/plat/mediatek/mt8196/include/platform_def.h b/plat/mediatek/mt8196/include/platform_def.h
index 4752e5b..72d83e6 100644
--- a/plat/mediatek/mt8196/include/platform_def.h
+++ b/plat/mediatek/mt8196/include/platform_def.h
@@ -148,6 +148,15 @@
GIC_INTR_CFG_LEVEL)
/*******************************************************************************
+ * CIRQ related constants
+ ******************************************************************************/
+#define SYS_CIRQ_BASE (IO_PHYS + 0x1CB000)
+#define MD_WDT_IRQ_BIT_ID (397)
+#define CIRQ_REG_NUM (26)
+#define CIRQ_SPI_START (128)
+#define CIRQ_IRQ_NUM (831)
+
+/*******************************************************************************
* MM IOMMU & SMI related constants
******************************************************************************/
#define SMI_LARB_0_BASE (IO_PHYS + 0x0c022000)
diff --git a/plat/mediatek/mt8196/plat_config.mk b/plat/mediatek/mt8196/plat_config.mk
index 773407a..0c87db9 100644
--- a/plat/mediatek/mt8196/plat_config.mk
+++ b/plat/mediatek/mt8196/plat_config.mk
@@ -27,7 +27,7 @@
CONFIG_ARCH_ARM_V9 := y
CONFIG_MTK_APUSYS_CE_SUPPORT := y
-CONFIG_MTK_APUSYS_EMI_SUPPORT := n
+CONFIG_MTK_APUSYS_EMI_SUPPORT := y
CONFIG_MTK_APUSYS_LOGTOP_SUPPORT := y
CONFIG_MTK_APUSYS_RV_APUMMU_SUPPORT := y
CONFIG_MTK_APUSYS_RV_COREDUMP_WA_SUPPORT := y
diff --git a/plat/mediatek/mt8196/platform.mk b/plat/mediatek/mt8196/platform.mk
index 09c6715..3e1165e 100644
--- a/plat/mediatek/mt8196/platform.mk
+++ b/plat/mediatek/mt8196/platform.mk
@@ -29,10 +29,12 @@
MODULES-y += $(MTK_PLAT)/lib/mtk_init
MODULES-y += $(MTK_PLAT)/lib/pm
MODULES-y += $(MTK_PLAT)/drivers/apusys
+MODULES-y += $(MTK_PLAT)/drivers/cirq
MODULES-y += $(MTK_PLAT)/drivers/dp
MODULES-y += $(MTK_PLAT)/drivers/emi
MODULES-y += $(MTK_PLAT)/drivers/gicv3
MODULES-y += $(MTK_PLAT)/drivers/mcusys
+MODULES-y += $(MTK_PLAT)/drivers/mminfra
MODULES-y += $(MTK_PLAT)/drivers/spm
MODULES-y += $(MTK_PLAT)/drivers/timer
MODULES-y += $(MTK_PLAT)/drivers/vcp
diff --git a/services/arm_arch_svc/arm_arch_svc_setup.c b/services/arm_arch_svc/arm_arch_svc_setup.c
index 6acd1b6..46333af 100644
--- a/services/arm_arch_svc/arm_arch_svc_setup.c
+++ b/services/arm_arch_svc/arm_arch_svc_setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -99,6 +99,14 @@
return SMC_ARCH_CALL_SUCCESS;
#endif /* ARCH_FEATURE_AVAILABILITY */
+#if WORKAROUND_CVE_2024_7881
+ case SMCCC_ARCH_WORKAROUND_4:
+ if (check_wa_cve_2024_7881() != ERRATA_APPLIES) {
+ return SMC_ARCH_CALL_NOT_SUPPORTED;
+ }
+ return 0;
+#endif /* WORKAROUND_CVE_2024_7881 */
+
#endif /* __aarch64__ */
/* Fallthrough */
@@ -254,6 +262,15 @@
*/
SMC_RET0(handle);
#endif
+#if WORKAROUND_CVE_2024_7881
+ case SMCCC_ARCH_WORKAROUND_4:
+ /*
+ * The workaround has already been applied on affected PEs
+ * during cold boot. This function has no effect whether PE is
+ * affected or not.
+ */
+ SMC_RET0(handle);
+#endif /* WORKAROUND_CVE_2024_7881 */
#endif /* __aarch64__ */
#if ARCH_FEATURE_AVAILABILITY
/* return is 64 bit so only reply on SMC64 requests */