Merge "feat(mt8186): add DFD control in SiP service" into integration
diff --git a/docs/design/cpu-specific-build-macros.rst b/docs/design/cpu-specific-build-macros.rst
index 9401811..b7d1168 100644
--- a/docs/design/cpu-specific-build-macros.rst
+++ b/docs/design/cpu-specific-build-macros.rst
@@ -29,6 +29,10 @@
    platform contains at least 1 CPU that requires dynamic mitigation.
    Defaults to 0.
 
+-  ``WORKAROUND_CVE_2022_23960``: Enables mitigation for `CVE-2022-23960`_.
+   This build option should be set to 1 if the target platform contains at
+   least 1 CPU that requires this mitigation. Defaults to 1.
+
 .. _arm_cpu_macros_errata_workarounds:
 
 CPU Errata Workarounds
@@ -585,6 +589,7 @@
 
 .. _CVE-2017-5715: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-5715
 .. _CVE-2018-3639: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-3639
+.. _CVE-2022-23960: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23960
 .. _Cortex-A53 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm048406/index.html
 .. _Cortex-A57 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm049219/index.html
 .. _Cortex-A72 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm012079/index.html
diff --git a/include/lib/cpus/aarch64/cortex_a710.h b/include/lib/cpus/aarch64/cortex_a710.h
index ec62421..09614ee 100644
--- a/include/lib/cpus/aarch64/cortex_a710.h
+++ b/include/lib/cpus/aarch64/cortex_a710.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,9 @@
 
 #define CORTEX_A710_MIDR					U(0x410FD470)
 
+/* Cortex-A710 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A710_BHB_LOOP_COUNT				U(32)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_a76.h b/include/lib/cpus/aarch64/cortex_a76.h
index a61825f..74fb6e9 100644
--- a/include/lib/cpus/aarch64/cortex_a76.h
+++ b/include/lib/cpus/aarch64/cortex_a76.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,38 +10,41 @@
 #include <lib/utils_def.h>
 
 /* Cortex-A76 MIDR for revision 0 */
-#define CORTEX_A76_MIDR		U(0x410fd0b0)
+#define CORTEX_A76_MIDR						U(0x410fd0b0)
+
+/* Cortex-A76 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A76_BHB_LOOP_COUNT				U(24)
 
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
-#define CORTEX_A76_CPUPWRCTLR_EL1	S3_0_C15_C2_7
-#define CORTEX_A76_CPUECTLR_EL1		S3_0_C15_C1_4
+#define CORTEX_A76_CPUPWRCTLR_EL1				S3_0_C15_C2_7
+#define CORTEX_A76_CPUECTLR_EL1					S3_0_C15_C1_4
 
-#define CORTEX_A76_CPUECTLR_EL1_WS_THR_L2	(ULL(3) << 24)
-#define CORTEX_A76_CPUECTLR_EL1_BIT_51		(ULL(1) << 51)
+#define CORTEX_A76_CPUECTLR_EL1_WS_THR_L2			(ULL(3) << 24)
+#define CORTEX_A76_CPUECTLR_EL1_BIT_51				(ULL(1) << 51)
 
 /*******************************************************************************
  * CPU Auxiliary Control register specific definitions.
  ******************************************************************************/
-#define CORTEX_A76_CPUACTLR_EL1		S3_0_C15_C1_0
+#define CORTEX_A76_CPUACTLR_EL1					S3_0_C15_C1_0
 
 #define CORTEX_A76_CPUACTLR_EL1_DISABLE_STATIC_PREDICTION	(ULL(1) << 6)
 
-#define CORTEX_A76_CPUACTLR_EL1_BIT_13	(ULL(1) << 13)
+#define CORTEX_A76_CPUACTLR_EL1_BIT_13				(ULL(1) << 13)
 
-#define CORTEX_A76_CPUACTLR2_EL1	S3_0_C15_C1_1
+#define CORTEX_A76_CPUACTLR2_EL1				S3_0_C15_C1_1
 
-#define CORTEX_A76_CPUACTLR2_EL1_BIT_2	(ULL(1) << 2)
+#define CORTEX_A76_CPUACTLR2_EL1_BIT_2				(ULL(1) << 2)
 
 #define CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE	(ULL(1) << 16)
 
-#define CORTEX_A76_CPUACTLR3_EL1	S3_0_C15_C1_2
+#define CORTEX_A76_CPUACTLR3_EL1				S3_0_C15_C1_2
 
-#define CORTEX_A76_CPUACTLR3_EL1_BIT_10	(ULL(1) << 10)
+#define CORTEX_A76_CPUACTLR3_EL1_BIT_10				(ULL(1) << 10)
 
 
 /* Definitions of register field mask in CORTEX_A76_CPUPWRCTLR_EL1 */
-#define CORTEX_A76_CORE_PWRDN_EN_MASK	U(0x1)
+#define CORTEX_A76_CORE_PWRDN_EN_MASK				U(0x1)
 
 #endif /* CORTEX_A76_H */
diff --git a/include/lib/cpus/aarch64/cortex_a77.h b/include/lib/cpus/aarch64/cortex_a77.h
index 5753e90..4a87168 100644
--- a/include/lib/cpus/aarch64/cortex_a77.h
+++ b/include/lib/cpus/aarch64/cortex_a77.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -12,6 +12,9 @@
 /* Cortex-A77 MIDR */
 #define CORTEX_A77_MIDR					U(0x410FD0D0)
 
+/* Cortex-A77 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A77_BHB_LOOP_COUNT			U(24)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_a78.h b/include/lib/cpus/aarch64/cortex_a78.h
index 42b0833..f3cb39f 100644
--- a/include/lib/cpus/aarch64/cortex_a78.h
+++ b/include/lib/cpus/aarch64/cortex_a78.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2021, ARM Limited. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -11,6 +11,9 @@
 
 #define CORTEX_A78_MIDR					U(0x410FD410)
 
+/* Cortex-A78 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A78_BHB_LOOP_COUNT			U(32)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_x2.h b/include/lib/cpus/aarch64/cortex_x2.h
index e3d0fa9..62530e2 100644
--- a/include/lib/cpus/aarch64/cortex_x2.h
+++ b/include/lib/cpus/aarch64/cortex_x2.h
@@ -9,6 +9,9 @@
 
 #define CORTEX_X2_MIDR						U(0x410FD480)
 
+/* Cortex-X2 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_X2_BHB_LOOP_COUNT       				U(32)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/neoverse_n1.h b/include/lib/cpus/aarch64/neoverse_n1.h
index b50befa..b6b8d8d 100644
--- a/include/lib/cpus/aarch64/neoverse_n1.h
+++ b/include/lib/cpus/aarch64/neoverse_n1.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,58 +10,61 @@
 #include <lib/utils_def.h>
 
 /* Neoverse N1 MIDR for revision 0 */
-#define NEOVERSE_N1_MIDR		U(0x410fd0c0)
+#define NEOVERSE_N1_MIDR				U(0x410fd0c0)
+
+/* Neoverse N1 loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_N1_BHB_LOOP_COUNT			U(24)
 
 /* Exception Syndrome register EC code for IC Trap */
-#define NEOVERSE_N1_EC_IC_TRAP		U(0x1f)
+#define NEOVERSE_N1_EC_IC_TRAP				U(0x1f)
 
 /*******************************************************************************
  * CPU Power Control register specific definitions.
  ******************************************************************************/
-#define NEOVERSE_N1_CPUPWRCTLR_EL1	S3_0_C15_C2_7
+#define NEOVERSE_N1_CPUPWRCTLR_EL1			S3_0_C15_C2_7
 
 /* Definitions of register field mask in NEOVERSE_N1_CPUPWRCTLR_EL1 */
-#define NEOVERSE_N1_CORE_PWRDN_EN_MASK	U(0x1)
+#define NEOVERSE_N1_CORE_PWRDN_EN_MASK			U(0x1)
 
-#define NEOVERSE_N1_ACTLR_AMEN_BIT	(U(1) << 4)
+#define NEOVERSE_N1_ACTLR_AMEN_BIT			(U(1) << 4)
 
-#define NEOVERSE_N1_AMU_NR_COUNTERS	U(5)
-#define NEOVERSE_N1_AMU_GROUP0_MASK	U(0x1f)
+#define NEOVERSE_N1_AMU_NR_COUNTERS			U(5)
+#define NEOVERSE_N1_AMU_GROUP0_MASK			U(0x1f)
 
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
-#define NEOVERSE_N1_CPUECTLR_EL1	S3_0_C15_C1_4
+#define NEOVERSE_N1_CPUECTLR_EL1			S3_0_C15_C1_4
 
-#define NEOVERSE_N1_WS_THR_L2_MASK	(ULL(3) << 24)
+#define NEOVERSE_N1_WS_THR_L2_MASK			(ULL(3) << 24)
 #define NEOVERSE_N1_CPUECTLR_EL1_MM_TLBPF_DIS_BIT	(ULL(1) << 51)
 #define NEOVERSE_N1_CPUECTLR_EL1_EXTLLC_BIT		(ULL(1) << 0)
 
 /*******************************************************************************
  * CPU Auxiliary Control register specific definitions.
  ******************************************************************************/
-#define NEOVERSE_N1_CPUACTLR_EL1	S3_0_C15_C1_0
+#define NEOVERSE_N1_CPUACTLR_EL1			S3_0_C15_C1_0
 
-#define NEOVERSE_N1_CPUACTLR_EL1_BIT_6	(ULL(1) << 6)
-#define NEOVERSE_N1_CPUACTLR_EL1_BIT_13	(ULL(1) << 13)
+#define NEOVERSE_N1_CPUACTLR_EL1_BIT_6			(ULL(1) << 6)
+#define NEOVERSE_N1_CPUACTLR_EL1_BIT_13			(ULL(1) << 13)
 
-#define NEOVERSE_N1_CPUACTLR2_EL1	S3_0_C15_C1_1
+#define NEOVERSE_N1_CPUACTLR2_EL1			S3_0_C15_C1_1
 
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_0		(ULL(1) << 0)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_2		(ULL(1) << 2)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_11	(ULL(1) << 11)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_15	(ULL(1) << 15)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_16	(ULL(1) << 16)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_59	(ULL(1) << 59)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_0			(ULL(1) << 0)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_2			(ULL(1) << 2)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_11		(ULL(1) << 11)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_15		(ULL(1) << 15)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_16		(ULL(1) << 16)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_59		(ULL(1) << 59)
 
-#define NEOVERSE_N1_CPUACTLR3_EL1	S3_0_C15_C1_2
+#define NEOVERSE_N1_CPUACTLR3_EL1			S3_0_C15_C1_2
 
-#define NEOVERSE_N1_CPUACTLR3_EL1_BIT_10	(ULL(1) << 10)
+#define NEOVERSE_N1_CPUACTLR3_EL1_BIT_10		(ULL(1) << 10)
 
 /* Instruction patching registers */
-#define CPUPSELR_EL3	S3_6_C15_C8_0
-#define CPUPCR_EL3	S3_6_C15_C8_1
-#define CPUPOR_EL3	S3_6_C15_C8_2
-#define CPUPMR_EL3	S3_6_C15_C8_3
+#define CPUPSELR_EL3					S3_6_C15_C8_0
+#define CPUPCR_EL3					S3_6_C15_C8_1
+#define CPUPOR_EL3					S3_6_C15_C8_2
+#define CPUPMR_EL3					S3_6_C15_C8_3
 
 #endif /* NEOVERSE_N1_H */
diff --git a/include/lib/cpus/aarch64/neoverse_n2.h b/include/lib/cpus/aarch64/neoverse_n2.h
index a1e676e..0452b39 100644
--- a/include/lib/cpus/aarch64/neoverse_n2.h
+++ b/include/lib/cpus/aarch64/neoverse_n2.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,9 @@
 /* Neoverse N2 ID register for revision r0p0 */
 #define NEOVERSE_N2_MIDR				U(0x410FD490)
 
+/* Neoverse N2 loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_N2_BHB_LOOP_COUNT			U(32)
+
 /*******************************************************************************
  * CPU Power control register
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/neoverse_v1.h b/include/lib/cpus/aarch64/neoverse_v1.h
index e43c907..a904c04 100644
--- a/include/lib/cpus/aarch64/neoverse_v1.h
+++ b/include/lib/cpus/aarch64/neoverse_v1.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2021, ARM Limited. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,9 @@
 
 #define NEOVERSE_V1_MIDR					U(0x410FD400)
 
+/* Neoverse V1 loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_V1_BHB_LOOP_COUNT				U(32)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
diff --git a/lib/cpus/aarch64/cortex_a710.S b/lib/cpus/aarch64/cortex_a710.S
index 4d5d949..aea62ae 100644
--- a/lib/cpus/aarch64/cortex_a710.S
+++ b/lib/cpus/aarch64/cortex_a710.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <cortex_a710.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
 #error "Cortex A710 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table CORTEX_A710_BHB_LOOP_COUNT, cortex_a710
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 /* --------------------------------------------------
  * Errata Workaround for Cortex-A710 Erratum 1987031.
  * This applies to revision r0p0, r1p0 and r2p0 of Cortex-A710. It is still
@@ -305,6 +310,15 @@
 	b       cpu_rev_var_ls
 endfunc check_errata_2282622
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ----------------------------------------------------
@@ -344,6 +358,7 @@
 	report_errata ERRATA_A710_2267065, cortex_a710, 2267065
 	report_errata ERRATA_A710_2136059, cortex_a710, 2136059
 	report_errata ERRATA_A710_2282622, cortex_a710, 2282622
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a710, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -404,6 +419,15 @@
 	bl	errata_a710_2282622_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex-A710 generic vectors are overridden to apply errata
+         * mitigation on exception entry from lower ELs.
+         */
+	adr	x0, wa_cve_vbar_cortex_a710
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
 	isb
 	ret	x19
 endfunc cortex_a710_reset_func
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
index 4f7f4bb..114d0f5 100644
--- a/lib/cpus/aarch64/cortex_a76.S
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -7,11 +7,11 @@
 #include <arch.h>
 #include <asm_macros.S>
 #include <common/bl_common.h>
-#include <context.h>
 #include <cortex_a76.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
 #include <services/arm_arch_svc.h>
+#include "wa_cve_2022_23960_bhb.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -35,59 +35,17 @@
 	 *
 	 * The macro saves x2-x3 to the context. In the fast path
 	 * x0-x3 registers do not need to be restored as the calling
-	 * context will have saved them.
+	 * context will have saved them. The macro also saves
+	 * x29-x30 to the context in the sync_exception path.
 	 */
 	.macro apply_cve_2018_3639_wa _is_sync_exception _esr_el3_val
 	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
-
 	.if \_is_sync_exception
-		/*
-		 * Ensure SMC is coming from A64/A32 state on #0
-		 * with W0 = SMCCC_ARCH_WORKAROUND_2
-		 *
-		 * This sequence evaluates as:
-		 *    (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
-		 * allowing use of a single branch operation
-		 */
-		orr	w2, wzr, #SMCCC_ARCH_WORKAROUND_2
-		cmp	x0, x2
-		mrs	x3, esr_el3
-		mov_imm	w2, \_esr_el3_val
-		ccmp	w2, w3, #0, eq
-		/*
-		 * Static predictor will predict a fall-through, optimizing
-		 * the `SMCCC_ARCH_WORKAROUND_2` fast path.
-		 */
-		bne	1f
-
-		/*
-		 * The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
-		 * fast path.
-		 */
-		cmp	x1, xzr /* enable/disable check */
-
-		/*
-		 * When the calling context wants mitigation disabled,
-		 * we program the mitigation disable function in the
-		 * CPU context, which gets invoked on subsequent exits from
-		 * EL3 via the `el3_exit` function. Otherwise NULL is
-		 * programmed in the CPU context, which results in caller's
-		 * inheriting the EL3 mitigation state (enabled) on subsequent
-		 * `el3_exit`.
-		 */
-		mov	x0, xzr
-		adr	x1, cortex_a76_disable_wa_cve_2018_3639
-		csel	x1, x1, x0, eq
-		str	x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
-
-		mrs	x2, CORTEX_A76_CPUACTLR2_EL1
-		orr	x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
-		bic	x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
-		csel	x3, x3, x1, eq
-		msr	CORTEX_A76_CPUACTLR2_EL1, x3
-		exception_return /* exception_return contains ISB */
+	stp	x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+	mov_imm	w2, \_esr_el3_val
+	bl	apply_cve_2018_3639_sync_wa
+	ldp	x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
 	.endif
-1:
 	/*
 	 * Always enable v4 mitigation during EL3 execution. This is not
 	 * required for the fast path above because it does not perform any
@@ -105,8 +63,10 @@
 	 */
 	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
 	.endm
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
 
-vector_base cortex_a76_wa_cve_2018_3639_a76_vbar
+#if DYNAMIC_WORKAROUND_CVE_2018_3639 || WORKAROUND_CVE_2022_23960
+vector_base cortex_a76_wa_cve_vbar
 
 	/* ---------------------------------------------------------------------
 	 * Current EL with SP_EL0 : 0x0 - 0x200
@@ -153,22 +113,54 @@
 	 * ---------------------------------------------------------------------
 	 */
 vector_entry cortex_a76_sync_exception_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	sync_exception_aarch64
 end_vector_entry cortex_a76_sync_exception_aarch64
 
 vector_entry cortex_a76_irq_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	irq_aarch64
 end_vector_entry cortex_a76_irq_aarch64
 
 vector_entry cortex_a76_fiq_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	fiq_aarch64
 end_vector_entry cortex_a76_fiq_aarch64
 
 vector_entry cortex_a76_serror_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	serror_aarch64
 end_vector_entry cortex_a76_serror_aarch64
 
@@ -177,24 +169,130 @@
 	 * ---------------------------------------------------------------------
 	 */
 vector_entry cortex_a76_sync_exception_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	sync_exception_aarch32
 end_vector_entry cortex_a76_sync_exception_aarch32
 
 vector_entry cortex_a76_irq_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	irq_aarch32
 end_vector_entry cortex_a76_irq_aarch32
 
 vector_entry cortex_a76_fiq_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	fiq_aarch32
 end_vector_entry cortex_a76_fiq_aarch32
 
 vector_entry cortex_a76_serror_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	serror_aarch32
 end_vector_entry cortex_a76_serror_aarch32
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 || WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+	/*
+	 * -----------------------------------------------------------------
+	 * This function applies the mitigation for CVE-2018-3639
+	 * specifically for sync exceptions. It implements a fast path
+	 * where `SMCCC_ARCH_WORKAROUND_2` SMC calls from a lower EL
+	 * running in AArch64 will go through the fast and return early.
+	 *
+	 * In the fast path x0-x3 registers do not need to be restored as the
+	 * calling context will have saved them.
+	 *
+	 * Caller must pass value of esr_el3 to compare via x2.
+	 * Save and restore these registers outside of this function from the
+	 * context before jumping to the main runtime vector table entry.
+	 *
+	 * Shall clobber: x0-x3, x30
+	 * -----------------------------------------------------------------
+	 */
+func apply_cve_2018_3639_sync_wa
+	/*
+	 * Ensure SMC is coming from A64/A32 state on #0
+	 * with W0 = SMCCC_ARCH_WORKAROUND_2
+	 *
+	 * This sequence evaluates as:
+	 *    (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
+	 * allowing use of a single branch operation
+	 * X2 populated outside this function with the SMC FID.
+	 */
+	orr	w3, wzr, #SMCCC_ARCH_WORKAROUND_2
+	cmp	x0, x3
+	mrs	x3, esr_el3
+
+	ccmp	w2, w3, #0, eq
+	/*
+	 * Static predictor will predict a fall-through, optimizing
+	 * the `SMCCC_ARCH_WORKAROUND_2` fast path.
+	 */
+	bne	1f
+
+	/*
+	* The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
+	* fast path.
+	*/
+	cmp	x1, xzr /* enable/disable check */
+
+	/*
+	 * When the calling context wants mitigation disabled,
+	 * we program the mitigation disable function in the
+	 * CPU context, which gets invoked on subsequent exits from
+	 * EL3 via the `el3_exit` function. Otherwise NULL is
+	 * programmed in the CPU context, which results in caller's
+	 * inheriting the EL3 mitigation state (enabled) on subsequent
+	 * `el3_exit`.
+	 */
+	mov	x0, xzr
+	adr	x1, cortex_a76_disable_wa_cve_2018_3639
+	csel	x1, x1, x0, eq
+	str	x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
+
+	mrs	x2, CORTEX_A76_CPUACTLR2_EL1
+	orr	x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+	bic	x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+	csel	x3, x3, x1, eq
+	msr	CORTEX_A76_CPUACTLR2_EL1, x3
+	ldp	x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+	/*
+	* `SMCCC_ARCH_WORKAROUND_2`fast path return to lower EL.
+	*/
+	exception_return /* exception_return contains ISB */
+1:
+	ret
+endfunc apply_cve_2018_3639_sync_wa
 #endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
 
 	/* --------------------------------------------------
@@ -519,6 +617,15 @@
 #endif
 endfunc check_errata_1165522
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif /* WORKAROUND_CVE_2022_23960 */
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A76.
 	 * Shall clobber: x0-x19
@@ -590,16 +697,31 @@
 	 * The Cortex-A76 generic vectors are overwritten to use the vectors
 	 * defined above. This is required in order to apply mitigation
 	 * against CVE-2018-3639 on exception entry from lower ELs.
+	 * If the below vector table is used, skip overriding it again for
+	 *  CVE_2022_23960 as both use the same vbar.
 	 */
-	adr	x0, cortex_a76_wa_cve_2018_3639_a76_vbar
+	adr	x0, cortex_a76_wa_cve_vbar
 	msr	vbar_el3, x0
 	isb
+	b	2f
 #endif /* IMAGE_BL31 */
 
 1:
 #endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
 #endif /* WORKAROUND_CVE_2018_3639 */
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex-A76 generic vectors are overridden to apply errata
+	 * mitigation on exception entry from lower ELs. This will be bypassed
+	 * if DYNAMIC_WORKAROUND_CVE_2018_3639 has overridden the vectors.
+	 */
+	adr	x0, cortex_a76_wa_cve_vbar
+	msr	vbar_el3, x0
+	isb
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+2:
+
 #if ERRATA_DSU_798953
 	bl	errata_dsu_798953_wa
 #endif
@@ -656,6 +778,7 @@
 	report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
 	report_errata ERRATA_DSU_798953, cortex_a76, dsu_798953
 	report_errata ERRATA_DSU_936184, cortex_a76, dsu_936184
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a76, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/cortex_a77.S b/lib/cpus/aarch64/cortex_a77.S
index 8c8f4d3..e7365e2 100644
--- a/lib/cpus/aarch64/cortex_a77.S
+++ b/lib/cpus/aarch64/cortex_a77.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <cortex_a77.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
 #error "Cortex-A77 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table CORTEX_A77_BHB_LOOP_COUNT, cortex_a77
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 	/* --------------------------------------------------
 	 * Errata Workaround for Cortex A77 Errata #1508412.
 	 * This applies only to revision <= r1p0 of Cortex A77.
@@ -194,6 +199,15 @@
 	b	cpu_rev_var_ls
 endfunc check_errata_1791578
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A77.
 	 * Shall clobber: x0-x19
@@ -224,6 +238,16 @@
 	bl	errata_a77_1791578_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex-A77 generic vectors are overridden to apply errata
+         * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_cortex_a77
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+	isb
 	ret	x19
 endfunc cortex_a77_reset_func
 
@@ -261,6 +285,7 @@
 	report_errata ERRATA_A77_1925769, cortex_a77, 1925769
 	report_errata ERRATA_A77_1946167, cortex_a77, 1946167
 	report_errata ERRATA_A77_1791578, cortex_a77, 1791578
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a77, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/cortex_a78.S b/lib/cpus/aarch64/cortex_a78.S
index a1288ba..1a6f848 100644
--- a/lib/cpus/aarch64/cortex_a78.S
+++ b/lib/cpus/aarch64/cortex_a78.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2021, ARM Limited. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,12 +10,16 @@
 #include <cortex_a78.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
 #error "cortex_a78 must be compiled with HW_ASSISTED_COHERENCY enabled"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table CORTEX_A78_BHB_LOOP_COUNT, cortex_a78
+#endif /* WORKAROUND_CVE_2022_23960 */
 
 /* --------------------------------------------------
  * Errata Workaround for A78 Erratum 1688305.
@@ -263,6 +267,15 @@
 	b	cpu_rev_var_range
 endfunc check_errata_2242635
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A78
 	 * -------------------------------------------------
@@ -327,6 +340,15 @@
 	msr	CPUAMCNTENSET1_EL0, x0
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex-A78 generic vectors are overridden to apply errata
+	 * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_cortex_a78
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
 	isb
 	ret	x19
 endfunc cortex_a78_reset_func
@@ -368,6 +390,7 @@
 	report_errata ERRATA_A78_1952683, cortex_a78, 1952683
 	report_errata ERRATA_A78_2132060, cortex_a78, 2132060
 	report_errata ERRATA_A78_2242635, cortex_a78, 2242635
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a78, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/cortex_x2.S b/lib/cpus/aarch64/cortex_x2.S
index 2ecfbbb..9586a5b 100644
--- a/lib/cpus/aarch64/cortex_x2.S
+++ b/lib/cpus/aarch64/cortex_x2.S
@@ -10,6 +10,7 @@
 #include <cortex_x2.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
 #error "Cortex X2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table CORTEX_X2_BHB_LOOP_COUNT, cortex_x2
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 	/* --------------------------------------------------
 	 * Errata Workaround for Cortex X2 Errata #2002765.
 	 * This applies to revisions r0p0, r1p0, and r2p0 and
@@ -222,6 +227,16 @@
 	mov	x1, #0x20
 	b	cpu_rev_var_ls
 endfunc check_errata_2216384
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ----------------------------------------------------
@@ -258,6 +273,7 @@
 	report_errata ERRATA_X2_2017096, cortex_x2, 2017096
 	report_errata ERRATA_X2_2081180, cortex_x2, 2081180
 	report_errata ERRATA_X2_2216384, cortex_x2, 2216384
+	report_errata WORKAROUND_CVE_2022_23960, cortex_x2, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -305,6 +321,16 @@
 	bl	errata_x2_2216384_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex-X2 generic vectors are overridden to apply errata
+         * mitigation on exception entry from lower ELs.
+         */
+	adr	x0, wa_cve_vbar_cortex_x2
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+	isb
 	ret x19
 endfunc cortex_x2_reset_func
 
diff --git a/lib/cpus/aarch64/neoverse_n1.S b/lib/cpus/aarch64/neoverse_n1.S
index 9c97cf6..b75b0c1 100644
--- a/lib/cpus/aarch64/neoverse_n1.S
+++ b/lib/cpus/aarch64/neoverse_n1.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -8,8 +8,8 @@
 #include <asm_macros.S>
 #include <cpuamu.h>
 #include <cpu_macros.S>
-#include <context.h>
 #include <neoverse_n1.h>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -23,6 +23,10 @@
 
 	.global neoverse_n1_errata_ic_trap_handler
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table NEOVERSE_N1_BHB_LOOP_COUNT, neoverse_n1
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 /* --------------------------------------------------
  * Errata Workaround for Neoverse N1 Erratum 1043202.
  * This applies to revision r0p0 and r1p0 of Neoverse N1.
@@ -464,6 +468,15 @@
 	b	cpu_rev_var_range
 endfunc check_errata_1946160
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 func neoverse_n1_reset_func
 	mov	x19, x30
 
@@ -575,6 +588,15 @@
 	bl	errata_dsu_936184_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Neoverse-N1 generic vectors are overridden to apply errata
+         * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_neoverse_n1
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
 	isb
 	ret	x19
 endfunc neoverse_n1_reset_func
@@ -624,6 +646,7 @@
 	report_errata ERRATA_N1_1868343, neoverse_n1, 1868343
 	report_errata ERRATA_N1_1946160, neoverse_n1, 1946160
 	report_errata ERRATA_DSU_936184, neoverse_n1, dsu_936184
+	report_errata WORKAROUND_CVE_2022_23960, neoverse_n1, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S
index 621aded..b93f2a6 100644
--- a/lib/cpus/aarch64/neoverse_n2.S
+++ b/lib/cpus/aarch64/neoverse_n2.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -8,6 +8,7 @@
 #include <asm_macros.S>
 #include <cpu_macros.S>
 #include <neoverse_n2.h>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -19,6 +20,10 @@
 #error "Neoverse-N2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table NEOVERSE_N2_BHB_LOOP_COUNT, neoverse_n2
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 /* --------------------------------------------------
  * Errata Workaround for Neoverse N2 Erratum 2002655.
  * This applies to revision r0p0 of Neoverse N2. it is still open.
@@ -333,6 +338,15 @@
 	b	cpu_rev_var_ls
 endfunc check_errata_2280757
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* -------------------------------------------
 	 * The CPU Ops reset function for Neoverse N2.
 	 * -------------------------------------------
@@ -428,6 +442,15 @@
 	bl	errata_n2_2002655_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Neoverse-N2 generic vectors are overridden to apply errata
+         * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_neoverse_n2
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
 	isb
 	ret	x19
 endfunc neoverse_n2_reset_func
@@ -469,6 +492,7 @@
 	report_errata ERRATA_N2_2138958, neoverse_n2, 2138958
 	report_errata ERRATA_N2_2242400, neoverse_n2, 2242400
 	report_errata ERRATA_N2_2280757, neoverse_n2, 2280757
+	report_errata WORKAROUND_CVE_2022_23960, neoverse_n2, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/neoverse_v1.S b/lib/cpus/aarch64/neoverse_v1.S
index 62a7a30..6adb3a8 100644
--- a/lib/cpus/aarch64/neoverse_v1.S
+++ b/lib/cpus/aarch64/neoverse_v1.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <neoverse_v1.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
 #error "Neoverse-V1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table NEOVERSE_V1_BHB_LOOP_COUNT, neoverse_v1
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 	/* --------------------------------------------------
 	 * Errata Workaround for Neoverse V1 Errata #1774420.
 	 * This applies to revisions r0p0 and r1p0, fixed in r1p1.
@@ -325,6 +330,15 @@
 	b	cpu_rev_var_range
 endfunc check_errata_2216392
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* ---------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ---------------------------------------------
@@ -364,6 +378,7 @@
 	report_errata ERRATA_V1_2139242, neoverse_v1, 2139242
 	report_errata ERRATA_V1_2108267, neoverse_v1, 2108267
 	report_errata ERRATA_V1_2216392, neoverse_v1, 2216392
+	report_errata WORKAROUND_CVE_2022_23960, neoverse_v1, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -422,6 +437,16 @@
 	bl	errata_neoverse_v1_2216392_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Neoverse-V1 generic vectors are overridden to apply errata
+         * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_neoverse_v1
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+	isb
 	ret	x19
 endfunc neoverse_v1_reset_func
 
diff --git a/lib/cpus/aarch64/wa_cve_2022_23960_bhb.S b/lib/cpus/aarch64/wa_cve_2022_23960_bhb.S
new file mode 100644
index 0000000..e0e41cc
--- /dev/null
+++ b/lib/cpus/aarch64/wa_cve_2022_23960_bhb.S
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <context.h>
+
+#if WORKAROUND_CVE_2022_23960
+	/*
+	 * This macro applies the mitigation for CVE-2022-23960.
+         * The macro saves x2-x3 to the CPU context.
+         * SP should point to the CPU context.
+	 */
+	.macro	apply_cve_2022_23960_bhb_wa _bhb_loop_count
+	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+
+	/* CVE-BHB-NUM loop count */
+	mov	x2, \_bhb_loop_count
+
+1:
+	/* b pc+4 part of the workaround */
+	b	2f
+2:
+	subs	x2, x2, #1
+	bne	1b
+	dsb	sy
+	isb
+	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+	.endm
+#endif /* WORKAROUND_CVE_2022_23960 */
diff --git a/lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S b/lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S
new file mode 100644
index 0000000..220fa11
--- /dev/null
+++ b/lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <services/arm_arch_svc.h>
+#include "wa_cve_2022_23960_bhb.S"
+
+	/*
+	 * This macro is used to isolate the vector table for relevant CPUs
+	 * used in the mitigation for CVE_2022_23960.
+	 */
+	.macro wa_cve_2022_23960_bhb_vector_table _bhb_loop_count, _cpu
+
+	.globl	wa_cve_vbar_\_cpu
+
+vector_base wa_cve_vbar_\_cpu
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_EL0 : 0x0 - 0x200
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry bhb_sync_exception_sp_el0_\_cpu
+	b	sync_exception_sp_el0
+end_vector_entry bhb_sync_exception_sp_el0_\_cpu
+
+vector_entry bhb_irq_sp_el0_\_cpu
+	b	irq_sp_el0
+end_vector_entry bhb_irq_sp_el0_\_cpu
+
+vector_entry bhb_fiq_sp_el0_\_cpu
+	b	fiq_sp_el0
+end_vector_entry bhb_fiq_sp_el0_\_cpu
+
+vector_entry bhb_serror_sp_el0_\_cpu
+	b	serror_sp_el0
+end_vector_entry bhb_serror_sp_el0_\_cpu
+
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_ELx: 0x200 - 0x400
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry bhb_sync_exception_sp_elx_\_cpu
+	b	sync_exception_sp_elx
+end_vector_entry bhb_sync_exception_sp_elx_\_cpu
+
+vector_entry bhb_irq_sp_elx_\_cpu
+	b	irq_sp_elx
+end_vector_entry bhb_irq_sp_elx_\_cpu
+
+vector_entry bhb_fiq_sp_elx_\_cpu
+	b	fiq_sp_elx
+end_vector_entry bhb_fiq_sp_elx_\_cpu
+
+vector_entry bhb_serror_sp_elx_\_cpu
+	b	serror_sp_elx
+end_vector_entry bhb_serror_sp_elx_\_cpu
+
+	/* ---------------------------------------------------------------------
+	 * Lower EL using AArch64 : 0x400 - 0x600
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry bhb_sync_exception_aarch64_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	sync_exception_aarch64
+end_vector_entry bhb_sync_exception_aarch64_\_cpu
+
+vector_entry bhb_irq_aarch64_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	irq_aarch64
+end_vector_entry bhb_irq_aarch64_\_cpu
+
+vector_entry bhb_fiq_aarch64_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	fiq_aarch64
+end_vector_entry bhb_fiq_aarch64_\_cpu
+
+vector_entry bhb_serror_aarch64_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	serror_aarch64
+end_vector_entry bhb_serror_aarch64_\_cpu
+
+	/* ---------------------------------------------------------------------
+	 * Lower EL using AArch32 : 0x600 - 0x800
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry bhb_sync_exception_aarch32_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	sync_exception_aarch32
+end_vector_entry bhb_sync_exception_aarch32_\_cpu
+
+vector_entry bhb_irq_aarch32_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	irq_aarch32
+end_vector_entry bhb_irq_aarch32_\_cpu
+
+vector_entry bhb_fiq_aarch32_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	fiq_aarch32
+end_vector_entry bhb_fiq_aarch32_\_cpu
+
+vector_entry bhb_serror_aarch32_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	serror_aarch32
+end_vector_entry bhb_serror_aarch32_\_cpu
+	.endm
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index e812c07..c7630fb 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -24,6 +24,7 @@
 WORKAROUND_CVE_2017_5715		?=1
 WORKAROUND_CVE_2018_3639		?=1
 DYNAMIC_WORKAROUND_CVE_2018_3639	?=0
+WORKAROUND_CVE_2022_23960		?=1
 
 # Flags to indicate internal or external Last level cache
 # By default internal
@@ -56,6 +57,10 @@
 $(eval $(call assert_boolean,DYNAMIC_WORKAROUND_CVE_2018_3639))
 $(eval $(call add_define,DYNAMIC_WORKAROUND_CVE_2018_3639))
 
+# Process WORKAROUND_CVE_2022_23960 flag
+$(eval $(call assert_boolean,WORKAROUND_CVE_2022_23960))
+$(eval $(call add_define,WORKAROUND_CVE_2022_23960))
+
 $(eval $(call assert_boolean,NEOVERSE_Nx_EXTERNAL_LLC))
 $(eval $(call add_define,NEOVERSE_Nx_EXTERNAL_LLC))
 
diff --git a/plat/arm/css/sgi/include/sgi_base_platform_def.h b/plat/arm/css/sgi/include/sgi_base_platform_def.h
index 93609b9..c9c8c04 100644
--- a/plat/arm/css/sgi/include/sgi_base_platform_def.h
+++ b/plat/arm/css/sgi/include/sgi_base_platform_def.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2022, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -35,8 +35,8 @@
 # if SPM_MM
 #  define PLAT_ARM_MMAP_ENTRIES		(9  + ((CSS_SGI_CHIP_COUNT - 1) * 3))
 #  define MAX_XLAT_TABLES		(7  + ((CSS_SGI_CHIP_COUNT - 1) * 3))
-#  define PLAT_SP_IMAGE_MMAP_REGIONS	10
-#  define PLAT_SP_IMAGE_MAX_XLAT_TABLES	12
+#  define PLAT_SP_IMAGE_MMAP_REGIONS	9
+#  define PLAT_SP_IMAGE_MAX_XLAT_TABLES	11
 # else
 #  define PLAT_ARM_MMAP_ENTRIES		(5 + ((CSS_SGI_CHIP_COUNT - 1) * 3))
 #  define MAX_XLAT_TABLES		(6 + ((CSS_SGI_CHIP_COUNT - 1) * 3))
@@ -130,21 +130,6 @@
 # define PLATFORM_STACK_SIZE 0x440
 #endif
 
-/* PL011 UART related constants */
-#define SOC_CSS_SEC_UART_BASE			UL(0x2A410000)
-#define SOC_CSS_NSEC_UART_BASE			UL(0x2A400000)
-#define SOC_CSS_UART_SIZE			UL(0x10000)
-#define SOC_CSS_UART_CLK_IN_HZ			UL(7372800)
-
-/* UART related constants */
-#define PLAT_ARM_BOOT_UART_BASE			SOC_CSS_SEC_UART_BASE
-#define PLAT_ARM_BOOT_UART_CLK_IN_HZ		SOC_CSS_UART_CLK_IN_HZ
-
-#define PLAT_ARM_RUN_UART_BASE			SOC_CSS_SEC_UART_BASE
-#define PLAT_ARM_RUN_UART_CLK_IN_HZ		SOC_CSS_UART_CLK_IN_HZ
-
-#define PLAT_ARM_CRASH_UART_BASE		SOC_CSS_SEC_UART_BASE
-#define PLAT_ARM_CRASH_UART_CLK_IN_HZ		SOC_CSS_UART_CLK_IN_HZ
 
 #define PLAT_ARM_NSTIMER_FRAME_ID	0
 
@@ -273,18 +258,4 @@
 		CSS_SGI_REMOTE_CHIP_MEM_OFFSET(n) + ARM_DRAM2_END,	\
 		ARM_TZC_NS_DRAM_S_ACCESS, PLAT_ARM_TZC_NS_DEV_ACCESS}
 
-#if SPM_MM
-
-/*
- * Stand-alone MM logs would be routed via secure UART. Define page table
- * entry for secure UART which would be common to all platforms.
- */
-#define SOC_PLATFORM_SECURE_UART	MAP_REGION_FLAT(		\
-					SOC_CSS_SEC_UART_BASE,		\
-					SOC_CSS_UART_SIZE,		\
-					MT_DEVICE | MT_RW | 		\
-					MT_SECURE | MT_USER)
-
-#endif
-
 #endif /* SGI_BASE_PLATFORM_DEF_H */
diff --git a/plat/arm/css/sgi/include/sgi_soc_css_def.h b/plat/arm/css/sgi/include/sgi_soc_css_def.h
deleted file mode 100644
index f78b45a..0000000
--- a/plat/arm/css/sgi/include/sgi_soc_css_def.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef SGI_SOC_CSS_DEF_H
-#define SGI_SOC_CSS_DEF_H
-
-#include <lib/utils_def.h>
-#include <plat/arm/board/common/v2m_def.h>
-#include <plat/arm/soc/common/soc_css_def.h>
-#include <plat/common/common_def.h>
-
-/*
- * Definitions common to all ARM CSSv1-based development platforms
- */
-
-/* Platform ID address */
-#define BOARD_CSS_PLAT_ID_REG_ADDR		UL(0x7ffe00e0)
-
-/* Platform ID related accessors */
-#define BOARD_CSS_PLAT_ID_REG_ID_MASK		0x0f
-#define BOARD_CSS_PLAT_ID_REG_ID_SHIFT		0x0
-#define BOARD_CSS_PLAT_TYPE_EMULATOR		0x02
-
-#ifndef __ASSEMBLER__
-
-#include <lib/mmio.h>
-
-#define BOARD_CSS_GET_PLAT_TYPE(addr)					\
-	((mmio_read_32(addr) & BOARD_CSS_PLAT_ID_REG_ID_MASK)		\
-	>> BOARD_CSS_PLAT_ID_REG_ID_SHIFT)
-
-#endif /* __ASSEMBLER__ */
-
-#define MAX_IO_DEVICES			3
-#define MAX_IO_HANDLES			4
-
-/* Reserve the last block of flash for PSCI MEM PROTECT flag */
-#define PLAT_ARM_FLASH_IMAGE_BASE	V2M_FLASH0_BASE
-#define PLAT_ARM_FLASH_IMAGE_MAX_SIZE	(V2M_FLASH0_SIZE - V2M_FLASH_BLOCK_SIZE)
-
-#define PLAT_ARM_NVM_BASE		V2M_FLASH0_BASE
-#define PLAT_ARM_NVM_SIZE		(V2M_FLASH0_SIZE - V2M_FLASH_BLOCK_SIZE)
-
-#endif /* SGI_SOC_CSS_DEF_H */
diff --git a/plat/arm/css/sgi/include/sgi_soc_css_def_v2.h b/plat/arm/css/sgi/include/sgi_soc_css_def_v2.h
index acf31eb..639b687 100644
--- a/plat/arm/css/sgi/include/sgi_soc_css_def_v2.h
+++ b/plat/arm/css/sgi/include/sgi_soc_css_def_v2.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -24,10 +24,17 @@
 
 #define SOC_CSS_PCIE_CONTROL_BASE	UL(0x0ef20000)
 
+/* PL011 UART related constants */
+#define SOC_CSS_UART1_BASE		UL(0x0ef80000)
+#define SOC_CSS_UART0_BASE		UL(0x0ef70000)
+
 /* Memory controller */
 #define SOC_MEMCNTRL_BASE		UL(0x10000000)
 #define SOC_MEMCNTRL_SIZE		UL(0x10000000)
 
+#define SOC_CSS_UART0_CLK_IN_HZ		UL(7372800)
+#define SOC_CSS_UART1_CLK_IN_HZ		UL(7372800)
+
 /* SoC NIC-400 Global Programmers View (GPV) */
 #define SOC_CSS_NIC400_BASE		UL(0x0ED00000)
 
@@ -199,4 +206,17 @@
 #define PLAT_ARM_NVM_BASE		V2M_FLASH0_BASE
 #define PLAT_ARM_NVM_SIZE		(V2M_FLASH0_SIZE - V2M_FLASH_BLOCK_SIZE)
 
+/* UART related constants */
+#define PLAT_ARM_BOOT_UART_BASE			SOC_CSS_UART0_BASE
+#define PLAT_ARM_BOOT_UART_CLK_IN_HZ		SOC_CSS_UART0_CLK_IN_HZ
+
+#define PLAT_ARM_RUN_UART_BASE			SOC_CSS_UART1_BASE
+#define PLAT_ARM_RUN_UART_CLK_IN_HZ		SOC_CSS_UART1_CLK_IN_HZ
+
+#define PLAT_ARM_SP_MIN_RUN_UART_BASE		SOC_CSS_UART1_BASE
+#define PLAT_ARM_SP_MIN_RUN_UART_CLK_IN_HZ	SOC_CSS_UART1_CLK_IN_HZ
+
+#define PLAT_ARM_CRASH_UART_BASE		PLAT_ARM_RUN_UART_BASE
+#define PLAT_ARM_CRASH_UART_CLK_IN_HZ		PLAT_ARM_RUN_UART_CLK_IN_HZ
+
 #endif /* SGI_SOC_CSS_DEF_V2_H */
diff --git a/plat/arm/css/sgi/include/sgi_soc_platform_def.h b/plat/arm/css/sgi/include/sgi_soc_platform_def.h
index 3b8d9c6..405d62f 100644
--- a/plat/arm/css/sgi/include/sgi_soc_platform_def.h
+++ b/plat/arm/css/sgi/include/sgi_soc_platform_def.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -7,10 +7,10 @@
 #ifndef SGI_SOC_PLATFORM_DEF_H
 #define SGI_SOC_PLATFORM_DEF_H
 
+#include <sgi_base_platform_def.h>
+#include <plat/arm/board/common/board_css_def.h>
 #include <plat/arm/board/common/v2m_def.h>
 #include <plat/arm/soc/common/soc_css_def.h>
-#include <sgi_base_platform_def.h>
-#include <sgi_soc_css_def.h>
 
 /* Map the System registers to access from S-EL0 */
 #define CSS_SYSTEMREG_DEVICE_BASE	(0x1C010000)
diff --git a/plat/arm/css/sgi/sgi_plat.c b/plat/arm/css/sgi/sgi_plat.c
index a0199c3..20c52e9 100644
--- a/plat/arm/css/sgi/sgi_plat.c
+++ b/plat/arm/css/sgi/sgi_plat.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2022, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -89,7 +89,6 @@
 const mmap_region_t plat_arm_secure_partition_mmap[] = {
 	PLAT_ARM_SECURE_MAP_SYSTEMREG,
 	PLAT_ARM_SECURE_MAP_NOR2,
-	SOC_PLATFORM_SECURE_UART,
 	PLAT_ARM_SECURE_MAP_DEVICE,
 	ARM_SP_IMAGE_MMAP,
 	ARM_SP_IMAGE_NS_BUF_MMAP,
diff --git a/plat/arm/css/sgi/sgi_plat_v2.c b/plat/arm/css/sgi/sgi_plat_v2.c
index cef5345..1a2a966 100644
--- a/plat/arm/css/sgi/sgi_plat_v2.c
+++ b/plat/arm/css/sgi/sgi_plat_v2.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -83,7 +83,6 @@
 const mmap_region_t plat_arm_secure_partition_mmap[] = {
 	PLAT_ARM_SECURE_MAP_SYSTEMREG,
 	PLAT_ARM_SECURE_MAP_NOR2,
-	SOC_PLATFORM_SECURE_UART,
 	SOC_PLATFORM_PERIPH_MAP_DEVICE_USER,
 	ARM_SP_IMAGE_MMAP,
 	ARM_SP_IMAGE_NS_BUF_MMAP,
diff --git a/plat/marvell/armada/a3k/common/cm3_system_reset.c b/plat/marvell/armada/a3k/common/cm3_system_reset.c
index 548ff51..f105d59 100644
--- a/plat/marvell/armada/a3k/common/cm3_system_reset.c
+++ b/plat/marvell/armada/a3k/common/cm3_system_reset.c
@@ -58,5 +58,5 @@
 	}
 
 	/* If we reach here, the command is not implemented. */
-	ERROR("System reset command not implemented in WTMI firmware!\n");
+	WARN("System reset command not implemented in WTMI firmware!\n");
 }
diff --git a/plat/st/common/bl2_stm32_io_storage.c b/plat/st/common/bl2_stm32_io_storage.c
index 2d68a50..4391195 100644
--- a/plat/st/common/bl2_stm32_io_storage.c
+++ b/plat/st/common/bl2_stm32_io_storage.c
@@ -379,19 +379,21 @@
 		stm32_sdmmc2_mmc_get_device_size();
 
 #if STM32MP_EMMC_BOOT
-	magic = get_boot_part_ssbl_header();
+	if (mmc_dev_type == MMC_IS_EMMC) {
+		magic = get_boot_part_ssbl_header();
 
-	if (magic == BOOT_API_IMAGE_HEADER_MAGIC_NB) {
-		VERBOSE("%s, header found, jump to emmc load\n", __func__);
-		idx = IMG_IDX_BL33;
-		part = &stm32image_dev_info_spec.part_info[idx];
-		part->part_offset = PLAT_EMMC_BOOT_SSBL_OFFSET;
-		part->bkp_offset = 0U;
-		mmc_device_spec.use_boot_part = true;
+		if (magic == BOOT_API_IMAGE_HEADER_MAGIC_NB) {
+			VERBOSE("%s, header found, jump to emmc load\n", __func__);
+			idx = IMG_IDX_BL33;
+			part = &stm32image_dev_info_spec.part_info[idx];
+			part->part_offset = PLAT_EMMC_BOOT_SSBL_OFFSET;
+			part->bkp_offset = 0U;
+			mmc_device_spec.use_boot_part = true;
 
-		goto emmc_boot;
-	} else {
-		WARN("%s: Can't find STM32 header on a boot partition\n", __func__);
+			goto emmc_boot;
+		} else {
+			WARN("%s: Can't find STM32 header on a boot partition\n", __func__);
+		}
 	}
 #endif