Merge changes I1517b69c,Ie01f36ff into integration

* changes:
  fix(ufs): move nutrs assignment to ufs_init
  refactor(ufs): adds a function for sending command
diff --git a/docs/design/cpu-specific-build-macros.rst b/docs/design/cpu-specific-build-macros.rst
index 9401811..b7d1168 100644
--- a/docs/design/cpu-specific-build-macros.rst
+++ b/docs/design/cpu-specific-build-macros.rst
@@ -29,6 +29,10 @@
    platform contains at least 1 CPU that requires dynamic mitigation.
    Defaults to 0.
 
+-  ``WORKAROUND_CVE_2022_23960``: Enables mitigation for `CVE-2022-23960`_.
+   This build option should be set to 1 if the target platform contains at
+   least 1 CPU that requires this mitigation. Defaults to 1.
+
 .. _arm_cpu_macros_errata_workarounds:
 
 CPU Errata Workarounds
@@ -585,6 +589,7 @@
 
 .. _CVE-2017-5715: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-5715
 .. _CVE-2018-3639: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-3639
+.. _CVE-2022-23960: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23960
 .. _Cortex-A53 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm048406/index.html
 .. _Cortex-A57 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm049219/index.html
 .. _Cortex-A72 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm012079/index.html
diff --git a/include/lib/cpus/aarch64/cortex_a710.h b/include/lib/cpus/aarch64/cortex_a710.h
index ec62421..09614ee 100644
--- a/include/lib/cpus/aarch64/cortex_a710.h
+++ b/include/lib/cpus/aarch64/cortex_a710.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,9 @@
 
 #define CORTEX_A710_MIDR					U(0x410FD470)
 
+/* Cortex-A710 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A710_BHB_LOOP_COUNT				U(32)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_a72.h b/include/lib/cpus/aarch64/cortex_a72.h
index 28b440e..1777645 100644
--- a/include/lib/cpus/aarch64/cortex_a72.h
+++ b/include/lib/cpus/aarch64/cortex_a72.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -12,6 +12,9 @@
 /* Cortex-A72 midr for revision 0 */
 #define CORTEX_A72_MIDR 				U(0x410FD080)
 
+/* Cortex-A72 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A72_BHB_LOOP_COUNT			U(8)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_a76.h b/include/lib/cpus/aarch64/cortex_a76.h
index a61825f..74fb6e9 100644
--- a/include/lib/cpus/aarch64/cortex_a76.h
+++ b/include/lib/cpus/aarch64/cortex_a76.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,38 +10,41 @@
 #include <lib/utils_def.h>
 
 /* Cortex-A76 MIDR for revision 0 */
-#define CORTEX_A76_MIDR		U(0x410fd0b0)
+#define CORTEX_A76_MIDR						U(0x410fd0b0)
+
+/* Cortex-A76 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A76_BHB_LOOP_COUNT				U(24)
 
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
-#define CORTEX_A76_CPUPWRCTLR_EL1	S3_0_C15_C2_7
-#define CORTEX_A76_CPUECTLR_EL1		S3_0_C15_C1_4
+#define CORTEX_A76_CPUPWRCTLR_EL1				S3_0_C15_C2_7
+#define CORTEX_A76_CPUECTLR_EL1					S3_0_C15_C1_4
 
-#define CORTEX_A76_CPUECTLR_EL1_WS_THR_L2	(ULL(3) << 24)
-#define CORTEX_A76_CPUECTLR_EL1_BIT_51		(ULL(1) << 51)
+#define CORTEX_A76_CPUECTLR_EL1_WS_THR_L2			(ULL(3) << 24)
+#define CORTEX_A76_CPUECTLR_EL1_BIT_51				(ULL(1) << 51)
 
 /*******************************************************************************
  * CPU Auxiliary Control register specific definitions.
  ******************************************************************************/
-#define CORTEX_A76_CPUACTLR_EL1		S3_0_C15_C1_0
+#define CORTEX_A76_CPUACTLR_EL1					S3_0_C15_C1_0
 
 #define CORTEX_A76_CPUACTLR_EL1_DISABLE_STATIC_PREDICTION	(ULL(1) << 6)
 
-#define CORTEX_A76_CPUACTLR_EL1_BIT_13	(ULL(1) << 13)
+#define CORTEX_A76_CPUACTLR_EL1_BIT_13				(ULL(1) << 13)
 
-#define CORTEX_A76_CPUACTLR2_EL1	S3_0_C15_C1_1
+#define CORTEX_A76_CPUACTLR2_EL1				S3_0_C15_C1_1
 
-#define CORTEX_A76_CPUACTLR2_EL1_BIT_2	(ULL(1) << 2)
+#define CORTEX_A76_CPUACTLR2_EL1_BIT_2				(ULL(1) << 2)
 
 #define CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE	(ULL(1) << 16)
 
-#define CORTEX_A76_CPUACTLR3_EL1	S3_0_C15_C1_2
+#define CORTEX_A76_CPUACTLR3_EL1				S3_0_C15_C1_2
 
-#define CORTEX_A76_CPUACTLR3_EL1_BIT_10	(ULL(1) << 10)
+#define CORTEX_A76_CPUACTLR3_EL1_BIT_10				(ULL(1) << 10)
 
 
 /* Definitions of register field mask in CORTEX_A76_CPUPWRCTLR_EL1 */
-#define CORTEX_A76_CORE_PWRDN_EN_MASK	U(0x1)
+#define CORTEX_A76_CORE_PWRDN_EN_MASK				U(0x1)
 
 #endif /* CORTEX_A76_H */
diff --git a/include/lib/cpus/aarch64/cortex_a77.h b/include/lib/cpus/aarch64/cortex_a77.h
index 5753e90..4a87168 100644
--- a/include/lib/cpus/aarch64/cortex_a77.h
+++ b/include/lib/cpus/aarch64/cortex_a77.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -12,6 +12,9 @@
 /* Cortex-A77 MIDR */
 #define CORTEX_A77_MIDR					U(0x410FD0D0)
 
+/* Cortex-A77 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A77_BHB_LOOP_COUNT			U(24)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_a78.h b/include/lib/cpus/aarch64/cortex_a78.h
index 42b0833..f3cb39f 100644
--- a/include/lib/cpus/aarch64/cortex_a78.h
+++ b/include/lib/cpus/aarch64/cortex_a78.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2021, ARM Limited. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -11,6 +11,9 @@
 
 #define CORTEX_A78_MIDR					U(0x410FD410)
 
+/* Cortex-A78 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A78_BHB_LOOP_COUNT			U(32)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_x2.h b/include/lib/cpus/aarch64/cortex_x2.h
index e3d0fa9..62530e2 100644
--- a/include/lib/cpus/aarch64/cortex_x2.h
+++ b/include/lib/cpus/aarch64/cortex_x2.h
@@ -9,6 +9,9 @@
 
 #define CORTEX_X2_MIDR						U(0x410FD480)
 
+/* Cortex-X2 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_X2_BHB_LOOP_COUNT       				U(32)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S
index 92891ce..92e65ae 100644
--- a/include/lib/cpus/aarch64/cpu_macros.S
+++ b/include/lib/cpus/aarch64/cpu_macros.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -21,6 +21,7 @@
 
 #define CPU_NO_EXTRA1_FUNC		0
 #define CPU_NO_EXTRA2_FUNC		0
+#define CPU_NO_EXTRA3_FUNC		0
 
 /* Word size for 64-bit CPUs */
 #define CPU_WORD_SIZE			8
@@ -39,6 +40,7 @@
 	.equ	CPU_MIDR_SIZE, CPU_WORD_SIZE
 	.equ	CPU_EXTRA1_FUNC_SIZE, CPU_WORD_SIZE
 	.equ	CPU_EXTRA2_FUNC_SIZE, CPU_WORD_SIZE
+	.equ	CPU_EXTRA3_FUNC_SIZE, CPU_WORD_SIZE
 	.equ	CPU_E_HANDLER_FUNC_SIZE, CPU_WORD_SIZE
 	.equ	CPU_RESET_FUNC_SIZE, CPU_WORD_SIZE
 	.equ	CPU_PWR_DWN_OPS_SIZE, CPU_WORD_SIZE * CPU_MAX_PWR_DWN_OPS
@@ -80,7 +82,8 @@
 	.equ	CPU_RESET_FUNC, CPU_MIDR + CPU_MIDR_SIZE
 	.equ	CPU_EXTRA1_FUNC, CPU_RESET_FUNC + CPU_RESET_FUNC_SIZE
 	.equ	CPU_EXTRA2_FUNC, CPU_EXTRA1_FUNC + CPU_EXTRA1_FUNC_SIZE
-	.equ	CPU_E_HANDLER_FUNC, CPU_EXTRA2_FUNC + CPU_EXTRA2_FUNC_SIZE
+	.equ	CPU_EXTRA3_FUNC, CPU_EXTRA2_FUNC + CPU_EXTRA2_FUNC_SIZE
+	.equ	CPU_E_HANDLER_FUNC, CPU_EXTRA3_FUNC + CPU_EXTRA3_FUNC_SIZE
 	.equ	CPU_PWR_DWN_OPS, CPU_E_HANDLER_FUNC + CPU_E_HANDLER_FUNC_SIZE
 	.equ	CPU_ERRATA_FUNC, CPU_PWR_DWN_OPS + CPU_PWR_DWN_OPS_SIZE
 	.equ	CPU_ERRATA_LOCK, CPU_ERRATA_FUNC + CPU_ERRATA_FUNC_SIZE
@@ -134,9 +137,13 @@
 	 *	some CPUs use this entry to set a test function to determine if
 	 *	the workaround for CVE-2017-5715 needs to be applied or not.
 	 * _extra2:
-	 *	This is a placeholder for future per CPU operations.  Currently
+	 *	This is a placeholder for future per CPU operations. Currently
 	 *	some CPUs use this entry to set a function to disable the
 	 *	workaround for CVE-2018-3639.
+	 * _extra3:
+	 *	This is a placeholder for future per CPU operations. Currently,
+	 *	some CPUs use this entry to set a test function to determine if
+	 *	the workaround for CVE-2022-23960 needs to be applied or not.
 	 * _e_handler:
 	 *	This is a placeholder for future per CPU exception handlers.
 	 * _power_down_ops:
@@ -149,7 +156,7 @@
 	 *	used to handle power down at subsequent levels
 	 */
 	.macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
-		_extra1:req, _extra2:req, _e_handler:req, _power_down_ops:vararg
+		_extra1:req, _extra2:req, _extra3:req, _e_handler:req, _power_down_ops:vararg
 	.section cpu_ops, "a"
 	.align 3
 	.type cpu_ops_\_name, %object
@@ -159,6 +166,7 @@
 #endif
 	.quad \_extra1
 	.quad \_extra2
+	.quad \_extra3
 	.quad \_e_handler
 #ifdef IMAGE_BL31
 	/* Insert list of functions */
@@ -204,21 +212,21 @@
 
 	.macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
 		_power_down_ops:vararg
-		declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, \
+		declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, \
 			\_power_down_ops
 	.endm
 
 	.macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \
 		_e_handler:req, _power_down_ops:vararg
 		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
-			0, 0, \_e_handler, \_power_down_ops
+			0, 0, 0, \_e_handler, \_power_down_ops
 	.endm
 
 	.macro declare_cpu_ops_wa _name:req, _midr:req, \
 		_resetfunc:req, _extra1:req, _extra2:req, \
-		_power_down_ops:vararg
+		_extra3:req, _power_down_ops:vararg
 		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
-			\_extra1, \_extra2, 0, \_power_down_ops
+			\_extra1, \_extra2, \_extra3, 0, \_power_down_ops
 	.endm
 
 #if REPORT_ERRATA
diff --git a/include/lib/cpus/aarch64/neoverse_n1.h b/include/lib/cpus/aarch64/neoverse_n1.h
index b50befa..b6b8d8d 100644
--- a/include/lib/cpus/aarch64/neoverse_n1.h
+++ b/include/lib/cpus/aarch64/neoverse_n1.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,58 +10,61 @@
 #include <lib/utils_def.h>
 
 /* Neoverse N1 MIDR for revision 0 */
-#define NEOVERSE_N1_MIDR		U(0x410fd0c0)
+#define NEOVERSE_N1_MIDR				U(0x410fd0c0)
+
+/* Neoverse N1 loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_N1_BHB_LOOP_COUNT			U(24)
 
 /* Exception Syndrome register EC code for IC Trap */
-#define NEOVERSE_N1_EC_IC_TRAP		U(0x1f)
+#define NEOVERSE_N1_EC_IC_TRAP				U(0x1f)
 
 /*******************************************************************************
  * CPU Power Control register specific definitions.
  ******************************************************************************/
-#define NEOVERSE_N1_CPUPWRCTLR_EL1	S3_0_C15_C2_7
+#define NEOVERSE_N1_CPUPWRCTLR_EL1			S3_0_C15_C2_7
 
 /* Definitions of register field mask in NEOVERSE_N1_CPUPWRCTLR_EL1 */
-#define NEOVERSE_N1_CORE_PWRDN_EN_MASK	U(0x1)
+#define NEOVERSE_N1_CORE_PWRDN_EN_MASK			U(0x1)
 
-#define NEOVERSE_N1_ACTLR_AMEN_BIT	(U(1) << 4)
+#define NEOVERSE_N1_ACTLR_AMEN_BIT			(U(1) << 4)
 
-#define NEOVERSE_N1_AMU_NR_COUNTERS	U(5)
-#define NEOVERSE_N1_AMU_GROUP0_MASK	U(0x1f)
+#define NEOVERSE_N1_AMU_NR_COUNTERS			U(5)
+#define NEOVERSE_N1_AMU_GROUP0_MASK			U(0x1f)
 
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
-#define NEOVERSE_N1_CPUECTLR_EL1	S3_0_C15_C1_4
+#define NEOVERSE_N1_CPUECTLR_EL1			S3_0_C15_C1_4
 
-#define NEOVERSE_N1_WS_THR_L2_MASK	(ULL(3) << 24)
+#define NEOVERSE_N1_WS_THR_L2_MASK			(ULL(3) << 24)
 #define NEOVERSE_N1_CPUECTLR_EL1_MM_TLBPF_DIS_BIT	(ULL(1) << 51)
 #define NEOVERSE_N1_CPUECTLR_EL1_EXTLLC_BIT		(ULL(1) << 0)
 
 /*******************************************************************************
  * CPU Auxiliary Control register specific definitions.
  ******************************************************************************/
-#define NEOVERSE_N1_CPUACTLR_EL1	S3_0_C15_C1_0
+#define NEOVERSE_N1_CPUACTLR_EL1			S3_0_C15_C1_0
 
-#define NEOVERSE_N1_CPUACTLR_EL1_BIT_6	(ULL(1) << 6)
-#define NEOVERSE_N1_CPUACTLR_EL1_BIT_13	(ULL(1) << 13)
+#define NEOVERSE_N1_CPUACTLR_EL1_BIT_6			(ULL(1) << 6)
+#define NEOVERSE_N1_CPUACTLR_EL1_BIT_13			(ULL(1) << 13)
 
-#define NEOVERSE_N1_CPUACTLR2_EL1	S3_0_C15_C1_1
+#define NEOVERSE_N1_CPUACTLR2_EL1			S3_0_C15_C1_1
 
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_0		(ULL(1) << 0)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_2		(ULL(1) << 2)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_11	(ULL(1) << 11)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_15	(ULL(1) << 15)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_16	(ULL(1) << 16)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_59	(ULL(1) << 59)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_0			(ULL(1) << 0)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_2			(ULL(1) << 2)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_11		(ULL(1) << 11)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_15		(ULL(1) << 15)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_16		(ULL(1) << 16)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_59		(ULL(1) << 59)
 
-#define NEOVERSE_N1_CPUACTLR3_EL1	S3_0_C15_C1_2
+#define NEOVERSE_N1_CPUACTLR3_EL1			S3_0_C15_C1_2
 
-#define NEOVERSE_N1_CPUACTLR3_EL1_BIT_10	(ULL(1) << 10)
+#define NEOVERSE_N1_CPUACTLR3_EL1_BIT_10		(ULL(1) << 10)
 
 /* Instruction patching registers */
-#define CPUPSELR_EL3	S3_6_C15_C8_0
-#define CPUPCR_EL3	S3_6_C15_C8_1
-#define CPUPOR_EL3	S3_6_C15_C8_2
-#define CPUPMR_EL3	S3_6_C15_C8_3
+#define CPUPSELR_EL3					S3_6_C15_C8_0
+#define CPUPCR_EL3					S3_6_C15_C8_1
+#define CPUPOR_EL3					S3_6_C15_C8_2
+#define CPUPMR_EL3					S3_6_C15_C8_3
 
 #endif /* NEOVERSE_N1_H */
diff --git a/include/lib/cpus/aarch64/neoverse_n2.h b/include/lib/cpus/aarch64/neoverse_n2.h
index a1e676e..0452b39 100644
--- a/include/lib/cpus/aarch64/neoverse_n2.h
+++ b/include/lib/cpus/aarch64/neoverse_n2.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,9 @@
 /* Neoverse N2 ID register for revision r0p0 */
 #define NEOVERSE_N2_MIDR				U(0x410FD490)
 
+/* Neoverse N2 loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_N2_BHB_LOOP_COUNT			U(32)
+
 /*******************************************************************************
  * CPU Power control register
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/neoverse_v1.h b/include/lib/cpus/aarch64/neoverse_v1.h
index e43c907..a904c04 100644
--- a/include/lib/cpus/aarch64/neoverse_v1.h
+++ b/include/lib/cpus/aarch64/neoverse_v1.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2021, ARM Limited. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,9 @@
 
 #define NEOVERSE_V1_MIDR					U(0x410FD400)
 
+/* Neoverse V1 loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_V1_BHB_LOOP_COUNT				U(32)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
diff --git a/include/lib/cpus/wa_cve_2022_23960.h b/include/lib/cpus/wa_cve_2022_23960.h
new file mode 100644
index 0000000..35b3fd8
--- /dev/null
+++ b/include/lib/cpus/wa_cve_2022_23960.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef WA_CVE_2022_23960_H
+#define WA_CVE_2022_23960_H
+
+int check_smccc_arch_wa3_applies(void);
+
+#endif /* WA_CVE_2022_23960_H */
diff --git a/include/services/arm_arch_svc.h b/include/services/arm_arch_svc.h
index 5bbd8bb..645b388 100644
--- a/include/services/arm_arch_svc.h
+++ b/include/services/arm_arch_svc.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -12,6 +12,7 @@
 #define SMCCC_ARCH_SOC_ID		U(0x80000002)
 #define SMCCC_ARCH_WORKAROUND_1		U(0x80008000)
 #define SMCCC_ARCH_WORKAROUND_2		U(0x80007FFF)
+#define SMCCC_ARCH_WORKAROUND_3		U(0x80003FFF)
 
 #define SMCCC_GET_SOC_VERSION		U(0)
 #define SMCCC_GET_SOC_REVISION		U(1)
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index 8ef0f92..3766ec7 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2022, ARM Limited and Contributors. All rights reserved.
  * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
@@ -470,7 +470,12 @@
 	bl	errata_a57_859972_wa
 #endif
 
-#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+#if IMAGE_BL31 && ( WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960 )
+	/* ---------------------------------------------------------------
+	 * Override vector table & enable existing workaround if either of
+	 * the build flags are enabled
+	 * ---------------------------------------------------------------
+	 */
 	adr	x0, wa_cve_2017_5715_mmu_vbar
 	msr	vbar_el3, x0
 	/* isb will be performed before returning from this function */
@@ -506,6 +511,20 @@
 	ret	x19
 endfunc cortex_a57_reset_func
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
+func check_smccc_arch_workaround_3
+	mov	x0, #ERRATA_APPLIES
+	ret
+endfunc check_smccc_arch_workaround_3
+
 	/* ----------------------------------------------------
 	 * The CPU Ops core power down function for Cortex-A57.
 	 * ----------------------------------------------------
@@ -630,6 +649,7 @@
 	report_errata ERRATA_A57_1319537, cortex_a57, 1319537
 	report_errata WORKAROUND_CVE_2017_5715, cortex_a57, cve_2017_5715
 	report_errata WORKAROUND_CVE_2018_3639, cortex_a57, cve_2018_3639
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a57, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -661,5 +681,6 @@
 	cortex_a57_reset_func, \
 	check_errata_cve_2017_5715, \
 	CPU_NO_EXTRA2_FUNC, \
+	check_smccc_arch_workaround_3, \
 	cortex_a57_core_pwr_dwn, \
 	cortex_a57_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a710.S b/lib/cpus/aarch64/cortex_a710.S
index 4d5d949..aea62ae 100644
--- a/lib/cpus/aarch64/cortex_a710.S
+++ b/lib/cpus/aarch64/cortex_a710.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <cortex_a710.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
 #error "Cortex A710 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table CORTEX_A710_BHB_LOOP_COUNT, cortex_a710
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 /* --------------------------------------------------
  * Errata Workaround for Cortex-A710 Erratum 1987031.
  * This applies to revision r0p0, r1p0 and r2p0 of Cortex-A710. It is still
@@ -305,6 +310,15 @@
 	b       cpu_rev_var_ls
 endfunc check_errata_2282622
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ----------------------------------------------------
@@ -344,6 +358,7 @@
 	report_errata ERRATA_A710_2267065, cortex_a710, 2267065
 	report_errata ERRATA_A710_2136059, cortex_a710, 2136059
 	report_errata ERRATA_A710_2282622, cortex_a710, 2282622
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a710, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -404,6 +419,15 @@
 	bl	errata_a710_2282622_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex-A710 generic vectors are overridden to apply errata
+         * mitigation on exception entry from lower ELs.
+         */
+	adr	x0, wa_cve_vbar_cortex_a710
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
 	isb
 	ret	x19
 endfunc cortex_a710_reset_func
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
index aff6072..de2d36e 100644
--- a/lib/cpus/aarch64/cortex_a72.S
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,11 @@
 #include <cortex_a72.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table CORTEX_A72_BHB_LOOP_COUNT, cortex_a72
+#endif /* WORKAROUND_CVE_2022_23960 */
 
 	/* ---------------------------------------------
 	 * Disable L1 data cache and unified L2 cache
@@ -133,6 +138,24 @@
 	ret
 endfunc check_errata_1319367
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
+func check_smccc_arch_workaround_3
+	cpu_check_csv2	x0, 1f
+	mov	x0, #ERRATA_APPLIES
+	ret
+1:
+	mov	x0, #ERRATA_NOT_APPLIES
+	ret
+endfunc check_smccc_arch_workaround_3
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A72.
 	 * -------------------------------------------------
@@ -147,13 +170,28 @@
 	bl	errata_a72_859971_wa
 #endif
 
-#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+#if IMAGE_BL31 && (WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960)
 	cpu_check_csv2	x0, 1f
 	adr	x0, wa_cve_2017_5715_mmu_vbar
 	msr	vbar_el3, x0
 	/* isb will be performed before returning from this function */
+
+	/* Skip CVE_2022_23960 mitigation if cve_2017_5715 mitigation applied */
+	b	2f
 1:
-#endif
+#if WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex-A72 generic vectors are overridden to apply the
+         * mitigation on exception entry from lower ELs for revisions >= r1p0
+	 * which has CSV2 implemented.
+	 */
+	adr	x0, wa_cve_vbar_cortex_a72
+	msr	vbar_el3, x0
+
+	/* isb will be performed before returning from this function */
+#endif /* WORKAROUND_CVE_2022_23960 */
+2:
+#endif /* IMAGE_BL31 &&  (WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960) */
 
 #if WORKAROUND_CVE_2018_3639
 	mrs	x0, CORTEX_A72_CPUACTLR_EL1
@@ -299,6 +337,7 @@
 	report_errata ERRATA_A72_1319367, cortex_a72, 1319367
 	report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715
 	report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a72, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -330,5 +369,6 @@
 	cortex_a72_reset_func, \
 	check_errata_cve_2017_5715, \
 	CPU_NO_EXTRA2_FUNC, \
+	check_smccc_arch_workaround_3, \
 	cortex_a72_core_pwr_dwn, \
 	cortex_a72_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S
index 5c8a887..edcd1f5 100644
--- a/lib/cpus/aarch64/cortex_a73.S
+++ b/lib/cpus/aarch64/cortex_a73.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -111,13 +111,21 @@
 	bl	errata_a73_855423_wa
 #endif
 
-#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+#if IMAGE_BL31 && (WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960)
 	cpu_check_csv2	x0, 1f
 	adr	x0, wa_cve_2017_5715_bpiall_vbar
 	msr	vbar_el3, x0
-	/* isb will be performed before returning from this function */
+	isb
+	/* Skip installing vector table again for CVE_2022_23960 */
+        b       2f
 1:
+#if WORKAROUND_CVE_2022_23960
+	adr	x0, wa_cve_2017_5715_bpiall_vbar
+	msr	vbar_el3, x0
+	isb
 #endif
+2:
+#endif /* IMAGE_BL31 &&  (WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960) */
 
 #if WORKAROUND_CVE_2018_3639
 	mrs	x0, CORTEX_A73_IMP_DEF_REG1
@@ -221,6 +229,28 @@
 	ret
 endfunc check_errata_cve_2018_3639
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960
+	cpu_check_csv2	x0, 1f
+	mov	x0, #ERRATA_APPLIES
+	ret
+ 1:
+# if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+# else
+	mov	x0, #ERRATA_MISSING
+# endif /* WORKAROUND_CVE_2022_23960 */
+	ret
+#endif /* WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960 */
+	mov	x0, #ERRATA_MISSING
+	ret
+endfunc check_errata_cve_2022_23960
+
+func check_smccc_arch_workaround_3
+	mov	x0, #ERRATA_APPLIES
+	ret
+endfunc check_smccc_arch_workaround_3
+
 #if REPORT_ERRATA
 /*
  * Errata printing function for Cortex A75. Must follow AAPCS.
@@ -239,6 +269,7 @@
 	report_errata ERRATA_A73_855423, cortex_a73, 855423
 	report_errata WORKAROUND_CVE_2017_5715, cortex_a73, cve_2017_5715
 	report_errata WORKAROUND_CVE_2018_3639, cortex_a73, cve_2018_3639
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a73, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -269,5 +300,6 @@
 	cortex_a73_reset_func, \
 	check_errata_cve_2017_5715, \
 	CPU_NO_EXTRA2_FUNC, \
+	check_smccc_arch_workaround_3, \
 	cortex_a73_core_pwr_dwn, \
 	cortex_a73_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S
index 657457e..d561be4 100644
--- a/lib/cpus/aarch64/cortex_a75.S
+++ b/lib/cpus/aarch64/cortex_a75.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -90,13 +90,21 @@
 	bl	errata_a75_790748_wa
 #endif
 
-#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+#if IMAGE_BL31 && (WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960)
 	cpu_check_csv2	x0, 1f
 	adr	x0, wa_cve_2017_5715_bpiall_vbar
 	msr	vbar_el3, x0
 	isb
+	/* Skip installing vector table again for CVE_2022_23960 */
+        b       2f
 1:
+#if WORKAROUND_CVE_2022_23960
+	adr	x0, wa_cve_2017_5715_bpiall_vbar
+	msr	vbar_el3, x0
+	isb
 #endif
+2:
+#endif /* IMAGE_BL31 &&  (WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960) */
 
 #if WORKAROUND_CVE_2018_3639
 	mrs	x0, CORTEX_A75_CPUACTLR_EL1
@@ -161,6 +169,28 @@
 	ret
 endfunc check_errata_cve_2018_3639
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960
+	cpu_check_csv2	x0, 1f
+	mov	x0, #ERRATA_APPLIES
+	ret
+1:
+# if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+# else
+	mov	x0, #ERRATA_MISSING
+# endif /* WORKAROUND_CVE_2022_23960 */
+	ret
+#endif /* WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960 */
+	mov	x0, #ERRATA_MISSING
+	ret
+endfunc check_errata_cve_2022_23960
+
+func check_smccc_arch_workaround_3
+	mov	x0, #ERRATA_APPLIES
+	ret
+endfunc check_smccc_arch_workaround_3
+
 	/* ---------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ---------------------------------------------
@@ -197,6 +227,7 @@
 	report_errata WORKAROUND_CVE_2018_3639, cortex_a75, cve_2018_3639
 	report_errata ERRATA_DSU_798953, cortex_a75, dsu_798953
 	report_errata ERRATA_DSU_936184, cortex_a75, dsu_936184
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a75, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -226,4 +257,5 @@
 	cortex_a75_reset_func, \
 	check_errata_cve_2017_5715, \
 	CPU_NO_EXTRA2_FUNC, \
+	check_smccc_arch_workaround_3, \
 	cortex_a75_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
index 4f7f4bb..50bd8cd 100644
--- a/lib/cpus/aarch64/cortex_a76.S
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -7,11 +7,11 @@
 #include <arch.h>
 #include <asm_macros.S>
 #include <common/bl_common.h>
-#include <context.h>
 #include <cortex_a76.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
 #include <services/arm_arch_svc.h>
+#include "wa_cve_2022_23960_bhb.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -35,59 +35,17 @@
 	 *
 	 * The macro saves x2-x3 to the context. In the fast path
 	 * x0-x3 registers do not need to be restored as the calling
-	 * context will have saved them.
+	 * context will have saved them. The macro also saves
+	 * x29-x30 to the context in the sync_exception path.
 	 */
 	.macro apply_cve_2018_3639_wa _is_sync_exception _esr_el3_val
 	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
-
 	.if \_is_sync_exception
-		/*
-		 * Ensure SMC is coming from A64/A32 state on #0
-		 * with W0 = SMCCC_ARCH_WORKAROUND_2
-		 *
-		 * This sequence evaluates as:
-		 *    (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
-		 * allowing use of a single branch operation
-		 */
-		orr	w2, wzr, #SMCCC_ARCH_WORKAROUND_2
-		cmp	x0, x2
-		mrs	x3, esr_el3
-		mov_imm	w2, \_esr_el3_val
-		ccmp	w2, w3, #0, eq
-		/*
-		 * Static predictor will predict a fall-through, optimizing
-		 * the `SMCCC_ARCH_WORKAROUND_2` fast path.
-		 */
-		bne	1f
-
-		/*
-		 * The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
-		 * fast path.
-		 */
-		cmp	x1, xzr /* enable/disable check */
-
-		/*
-		 * When the calling context wants mitigation disabled,
-		 * we program the mitigation disable function in the
-		 * CPU context, which gets invoked on subsequent exits from
-		 * EL3 via the `el3_exit` function. Otherwise NULL is
-		 * programmed in the CPU context, which results in caller's
-		 * inheriting the EL3 mitigation state (enabled) on subsequent
-		 * `el3_exit`.
-		 */
-		mov	x0, xzr
-		adr	x1, cortex_a76_disable_wa_cve_2018_3639
-		csel	x1, x1, x0, eq
-		str	x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
-
-		mrs	x2, CORTEX_A76_CPUACTLR2_EL1
-		orr	x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
-		bic	x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
-		csel	x3, x3, x1, eq
-		msr	CORTEX_A76_CPUACTLR2_EL1, x3
-		exception_return /* exception_return contains ISB */
+	stp	x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+	mov_imm	w2, \_esr_el3_val
+	bl	apply_cve_2018_3639_sync_wa
+	ldp	x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
 	.endif
-1:
 	/*
 	 * Always enable v4 mitigation during EL3 execution. This is not
 	 * required for the fast path above because it does not perform any
@@ -105,8 +63,10 @@
 	 */
 	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
 	.endm
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
 
-vector_base cortex_a76_wa_cve_2018_3639_a76_vbar
+#if DYNAMIC_WORKAROUND_CVE_2018_3639 || WORKAROUND_CVE_2022_23960
+vector_base cortex_a76_wa_cve_vbar
 
 	/* ---------------------------------------------------------------------
 	 * Current EL with SP_EL0 : 0x0 - 0x200
@@ -153,22 +113,54 @@
 	 * ---------------------------------------------------------------------
 	 */
 vector_entry cortex_a76_sync_exception_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	sync_exception_aarch64
 end_vector_entry cortex_a76_sync_exception_aarch64
 
 vector_entry cortex_a76_irq_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	irq_aarch64
 end_vector_entry cortex_a76_irq_aarch64
 
 vector_entry cortex_a76_fiq_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	fiq_aarch64
 end_vector_entry cortex_a76_fiq_aarch64
 
 vector_entry cortex_a76_serror_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	serror_aarch64
 end_vector_entry cortex_a76_serror_aarch64
 
@@ -177,24 +169,130 @@
 	 * ---------------------------------------------------------------------
 	 */
 vector_entry cortex_a76_sync_exception_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	sync_exception_aarch32
 end_vector_entry cortex_a76_sync_exception_aarch32
 
 vector_entry cortex_a76_irq_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	irq_aarch32
 end_vector_entry cortex_a76_irq_aarch32
 
 vector_entry cortex_a76_fiq_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	fiq_aarch32
 end_vector_entry cortex_a76_fiq_aarch32
 
 vector_entry cortex_a76_serror_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	serror_aarch32
 end_vector_entry cortex_a76_serror_aarch32
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 || WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+	/*
+	 * -----------------------------------------------------------------
+	 * This function applies the mitigation for CVE-2018-3639
+	 * specifically for sync exceptions. It implements a fast path
+	 * where `SMCCC_ARCH_WORKAROUND_2` SMC calls from a lower EL
+	 * running in AArch64 will go through the fast and return early.
+	 *
+	 * In the fast path x0-x3 registers do not need to be restored as the
+	 * calling context will have saved them.
+	 *
+	 * Caller must pass value of esr_el3 to compare via x2.
+	 * Save and restore these registers outside of this function from the
+	 * context before jumping to the main runtime vector table entry.
+	 *
+	 * Shall clobber: x0-x3, x30
+	 * -----------------------------------------------------------------
+	 */
+func apply_cve_2018_3639_sync_wa
+	/*
+	 * Ensure SMC is coming from A64/A32 state on #0
+	 * with W0 = SMCCC_ARCH_WORKAROUND_2
+	 *
+	 * This sequence evaluates as:
+	 *    (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
+	 * allowing use of a single branch operation
+	 * X2 populated outside this function with the SMC FID.
+	 */
+	orr	w3, wzr, #SMCCC_ARCH_WORKAROUND_2
+	cmp	x0, x3
+	mrs	x3, esr_el3
+
+	ccmp	w2, w3, #0, eq
+	/*
+	 * Static predictor will predict a fall-through, optimizing
+	 * the `SMCCC_ARCH_WORKAROUND_2` fast path.
+	 */
+	bne	1f
+
+	/*
+	* The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
+	* fast path.
+	*/
+	cmp	x1, xzr /* enable/disable check */
+
+	/*
+	 * When the calling context wants mitigation disabled,
+	 * we program the mitigation disable function in the
+	 * CPU context, which gets invoked on subsequent exits from
+	 * EL3 via the `el3_exit` function. Otherwise NULL is
+	 * programmed in the CPU context, which results in caller's
+	 * inheriting the EL3 mitigation state (enabled) on subsequent
+	 * `el3_exit`.
+	 */
+	mov	x0, xzr
+	adr	x1, cortex_a76_disable_wa_cve_2018_3639
+	csel	x1, x1, x0, eq
+	str	x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
+
+	mrs	x2, CORTEX_A76_CPUACTLR2_EL1
+	orr	x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+	bic	x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+	csel	x3, x3, x1, eq
+	msr	CORTEX_A76_CPUACTLR2_EL1, x3
+	ldp	x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+	/*
+	* `SMCCC_ARCH_WORKAROUND_2`fast path return to lower EL.
+	*/
+	exception_return /* exception_return contains ISB */
+1:
+	ret
+endfunc apply_cve_2018_3639_sync_wa
 #endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
 
 	/* --------------------------------------------------
@@ -519,6 +617,15 @@
 #endif
 endfunc check_errata_1165522
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif /* WORKAROUND_CVE_2022_23960 */
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A76.
 	 * Shall clobber: x0-x19
@@ -590,16 +697,31 @@
 	 * The Cortex-A76 generic vectors are overwritten to use the vectors
 	 * defined above. This is required in order to apply mitigation
 	 * against CVE-2018-3639 on exception entry from lower ELs.
+	 * If the below vector table is used, skip overriding it again for
+	 *  CVE_2022_23960 as both use the same vbar.
 	 */
-	adr	x0, cortex_a76_wa_cve_2018_3639_a76_vbar
+	adr	x0, cortex_a76_wa_cve_vbar
 	msr	vbar_el3, x0
 	isb
+	b	2f
 #endif /* IMAGE_BL31 */
 
 1:
 #endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
 #endif /* WORKAROUND_CVE_2018_3639 */
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex-A76 generic vectors are overridden to apply errata
+	 * mitigation on exception entry from lower ELs. This will be bypassed
+	 * if DYNAMIC_WORKAROUND_CVE_2018_3639 has overridden the vectors.
+	 */
+	adr	x0, cortex_a76_wa_cve_vbar
+	msr	vbar_el3, x0
+	isb
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+2:
+
 #if ERRATA_DSU_798953
 	bl	errata_dsu_798953_wa
 #endif
@@ -656,6 +778,7 @@
 	report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
 	report_errata ERRATA_DSU_798953, cortex_a76, dsu_798953
 	report_errata ERRATA_DSU_936184, cortex_a76, dsu_936184
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a76, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -685,4 +808,5 @@
 	cortex_a76_reset_func, \
 	CPU_NO_EXTRA1_FUNC, \
 	cortex_a76_disable_wa_cve_2018_3639, \
+	CPU_NO_EXTRA3_FUNC, \
 	cortex_a76_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a77.S b/lib/cpus/aarch64/cortex_a77.S
index 8c8f4d3..e7365e2 100644
--- a/lib/cpus/aarch64/cortex_a77.S
+++ b/lib/cpus/aarch64/cortex_a77.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <cortex_a77.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
 #error "Cortex-A77 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table CORTEX_A77_BHB_LOOP_COUNT, cortex_a77
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 	/* --------------------------------------------------
 	 * Errata Workaround for Cortex A77 Errata #1508412.
 	 * This applies only to revision <= r1p0 of Cortex A77.
@@ -194,6 +199,15 @@
 	b	cpu_rev_var_ls
 endfunc check_errata_1791578
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A77.
 	 * Shall clobber: x0-x19
@@ -224,6 +238,16 @@
 	bl	errata_a77_1791578_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex-A77 generic vectors are overridden to apply errata
+         * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_cortex_a77
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+	isb
 	ret	x19
 endfunc cortex_a77_reset_func
 
@@ -261,6 +285,7 @@
 	report_errata ERRATA_A77_1925769, cortex_a77, 1925769
 	report_errata ERRATA_A77_1946167, cortex_a77, 1946167
 	report_errata ERRATA_A77_1791578, cortex_a77, 1791578
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a77, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/cortex_a78.S b/lib/cpus/aarch64/cortex_a78.S
index a1288ba..1a6f848 100644
--- a/lib/cpus/aarch64/cortex_a78.S
+++ b/lib/cpus/aarch64/cortex_a78.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2021, ARM Limited. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,12 +10,16 @@
 #include <cortex_a78.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
 #error "cortex_a78 must be compiled with HW_ASSISTED_COHERENCY enabled"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table CORTEX_A78_BHB_LOOP_COUNT, cortex_a78
+#endif /* WORKAROUND_CVE_2022_23960 */
 
 /* --------------------------------------------------
  * Errata Workaround for A78 Erratum 1688305.
@@ -263,6 +267,15 @@
 	b	cpu_rev_var_range
 endfunc check_errata_2242635
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A78
 	 * -------------------------------------------------
@@ -327,6 +340,15 @@
 	msr	CPUAMCNTENSET1_EL0, x0
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex-A78 generic vectors are overridden to apply errata
+	 * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_cortex_a78
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
 	isb
 	ret	x19
 endfunc cortex_a78_reset_func
@@ -368,6 +390,7 @@
 	report_errata ERRATA_A78_1952683, cortex_a78, 1952683
 	report_errata ERRATA_A78_2132060, cortex_a78, 2132060
 	report_errata ERRATA_A78_2242635, cortex_a78, 2242635
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a78, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/cortex_x2.S b/lib/cpus/aarch64/cortex_x2.S
index 2ecfbbb..9586a5b 100644
--- a/lib/cpus/aarch64/cortex_x2.S
+++ b/lib/cpus/aarch64/cortex_x2.S
@@ -10,6 +10,7 @@
 #include <cortex_x2.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
 #error "Cortex X2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table CORTEX_X2_BHB_LOOP_COUNT, cortex_x2
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 	/* --------------------------------------------------
 	 * Errata Workaround for Cortex X2 Errata #2002765.
 	 * This applies to revisions r0p0, r1p0, and r2p0 and
@@ -222,6 +227,16 @@
 	mov	x1, #0x20
 	b	cpu_rev_var_ls
 endfunc check_errata_2216384
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ----------------------------------------------------
@@ -258,6 +273,7 @@
 	report_errata ERRATA_X2_2017096, cortex_x2, 2017096
 	report_errata ERRATA_X2_2081180, cortex_x2, 2081180
 	report_errata ERRATA_X2_2216384, cortex_x2, 2216384
+	report_errata WORKAROUND_CVE_2022_23960, cortex_x2, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -305,6 +321,16 @@
 	bl	errata_x2_2216384_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex-X2 generic vectors are overridden to apply errata
+         * mitigation on exception entry from lower ELs.
+         */
+	adr	x0, wa_cve_vbar_cortex_x2
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+	isb
 	ret x19
 endfunc cortex_x2_reset_func
 
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
index bd8f85f..2385627 100644
--- a/lib/cpus/aarch64/cpu_helpers.S
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2021, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2022, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -381,7 +381,7 @@
 	 * If the reserved function pointer is NULL, this CPU
 	 * is unaffected by CVE-2017-5715 so bail out.
 	 */
-	cmp	x0, #0
+	cmp	x0, #CPU_NO_EXTRA1_FUNC
 	beq	1f
 	br	x0
 1:
@@ -416,3 +416,41 @@
 	ldr	x0, [x0, #CPU_EXTRA2_FUNC]
 	ret
 endfunc wa_cve_2018_3639_get_disable_ptr
+
+/*
+ * int check_smccc_arch_wa3_applies(void);
+ *
+ * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate
+ * CVE-2022-23960 for this CPU. It returns:
+ *  - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate
+ *    the CVE.
+ *  - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to
+ *    mitigate the CVE.
+ *
+ * NOTE: Must be called only after cpu_ops have been initialized
+ *       in per-CPU data.
+ */
+	.globl	check_smccc_arch_wa3_applies
+func check_smccc_arch_wa3_applies
+	mrs	x0, tpidr_el3
+#if ENABLE_ASSERTIONS
+	cmp	x0, #0
+	ASM_ASSERT(ne)
+#endif
+	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+	cmp	x0, #0
+	ASM_ASSERT(ne)
+#endif
+	ldr	x0, [x0, #CPU_EXTRA3_FUNC]
+	/*
+	 * If the reserved function pointer is NULL, this CPU
+	 * is unaffected by CVE-2022-23960 so bail out.
+	 */
+	cmp	x0, #CPU_NO_EXTRA3_FUNC
+	beq	1f
+	br	x0
+1:
+	mov	x0, #ERRATA_NOT_APPLIES
+	ret
+endfunc check_smccc_arch_wa3_applies
diff --git a/lib/cpus/aarch64/neoverse_n1.S b/lib/cpus/aarch64/neoverse_n1.S
index 9c97cf6..b75b0c1 100644
--- a/lib/cpus/aarch64/neoverse_n1.S
+++ b/lib/cpus/aarch64/neoverse_n1.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -8,8 +8,8 @@
 #include <asm_macros.S>
 #include <cpuamu.h>
 #include <cpu_macros.S>
-#include <context.h>
 #include <neoverse_n1.h>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -23,6 +23,10 @@
 
 	.global neoverse_n1_errata_ic_trap_handler
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table NEOVERSE_N1_BHB_LOOP_COUNT, neoverse_n1
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 /* --------------------------------------------------
  * Errata Workaround for Neoverse N1 Erratum 1043202.
  * This applies to revision r0p0 and r1p0 of Neoverse N1.
@@ -464,6 +468,15 @@
 	b	cpu_rev_var_range
 endfunc check_errata_1946160
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 func neoverse_n1_reset_func
 	mov	x19, x30
 
@@ -575,6 +588,15 @@
 	bl	errata_dsu_936184_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Neoverse-N1 generic vectors are overridden to apply errata
+         * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_neoverse_n1
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
 	isb
 	ret	x19
 endfunc neoverse_n1_reset_func
@@ -624,6 +646,7 @@
 	report_errata ERRATA_N1_1868343, neoverse_n1, 1868343
 	report_errata ERRATA_N1_1946160, neoverse_n1, 1946160
 	report_errata ERRATA_DSU_936184, neoverse_n1, dsu_936184
+	report_errata WORKAROUND_CVE_2022_23960, neoverse_n1, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S
index 621aded..b93f2a6 100644
--- a/lib/cpus/aarch64/neoverse_n2.S
+++ b/lib/cpus/aarch64/neoverse_n2.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -8,6 +8,7 @@
 #include <asm_macros.S>
 #include <cpu_macros.S>
 #include <neoverse_n2.h>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -19,6 +20,10 @@
 #error "Neoverse-N2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table NEOVERSE_N2_BHB_LOOP_COUNT, neoverse_n2
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 /* --------------------------------------------------
  * Errata Workaround for Neoverse N2 Erratum 2002655.
  * This applies to revision r0p0 of Neoverse N2. it is still open.
@@ -333,6 +338,15 @@
 	b	cpu_rev_var_ls
 endfunc check_errata_2280757
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* -------------------------------------------
 	 * The CPU Ops reset function for Neoverse N2.
 	 * -------------------------------------------
@@ -428,6 +442,15 @@
 	bl	errata_n2_2002655_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Neoverse-N2 generic vectors are overridden to apply errata
+         * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_neoverse_n2
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
 	isb
 	ret	x19
 endfunc neoverse_n2_reset_func
@@ -469,6 +492,7 @@
 	report_errata ERRATA_N2_2138958, neoverse_n2, 2138958
 	report_errata ERRATA_N2_2242400, neoverse_n2, 2242400
 	report_errata ERRATA_N2_2280757, neoverse_n2, 2280757
+	report_errata WORKAROUND_CVE_2022_23960, neoverse_n2, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/neoverse_v1.S b/lib/cpus/aarch64/neoverse_v1.S
index 62a7a30..6adb3a8 100644
--- a/lib/cpus/aarch64/neoverse_v1.S
+++ b/lib/cpus/aarch64/neoverse_v1.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <neoverse_v1.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
 #error "Neoverse-V1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table NEOVERSE_V1_BHB_LOOP_COUNT, neoverse_v1
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 	/* --------------------------------------------------
 	 * Errata Workaround for Neoverse V1 Errata #1774420.
 	 * This applies to revisions r0p0 and r1p0, fixed in r1p1.
@@ -325,6 +330,15 @@
 	b	cpu_rev_var_range
 endfunc check_errata_2216392
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* ---------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ---------------------------------------------
@@ -364,6 +378,7 @@
 	report_errata ERRATA_V1_2139242, neoverse_v1, 2139242
 	report_errata ERRATA_V1_2108267, neoverse_v1, 2108267
 	report_errata ERRATA_V1_2216392, neoverse_v1, 2216392
+	report_errata WORKAROUND_CVE_2022_23960, neoverse_v1, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -422,6 +437,16 @@
 	bl	errata_neoverse_v1_2216392_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Neoverse-V1 generic vectors are overridden to apply errata
+         * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_neoverse_v1
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+	isb
 	ret	x19
 endfunc neoverse_v1_reset_func
 
diff --git a/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
index c9a9544..0222818 100644
--- a/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
+++ b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -308,22 +308,25 @@
 
 	/*
 	 * Check if SMC is coming from A64 state on #0
-	 * with W0 = SMCCC_ARCH_WORKAROUND_1
+	 * with W0 = SMCCC_ARCH_WORKAROUND_1 or W0 = SMCCC_ARCH_WORKAROUND_3
 	 *
 	 * This sequence evaluates as:
-	 *    (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
+	 *    (W0==SMCCC_ARCH_WORKAROUND_1) || (W0==SMCCC_ARCH_WORKAROUND_3) ?
+	 *    (ESR_EL3==SMC#0) : (NE)
 	 * allowing use of a single branch operation
 	 */
 	orr	w2, wzr, #SMCCC_ARCH_WORKAROUND_1
 	cmp	w0, w2
+	orr	w2, wzr, #SMCCC_ARCH_WORKAROUND_3
+	ccmp	w0, w2, #4, ne
 	mov_imm	w2, ESR_EL3_A64_SMC0
 	ccmp	w3, w2, #0, eq
 	/* Static predictor will predict a fall through */
 	bne	1f
 	eret
 1:
-	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
-	b	sync_exception_aarch64
+	/* restore x2 and x3 and continue sync exception handling */
+	b	bpiall_ret_sync_exception_aarch32_tail
 end_vector_entry bpiall_ret_sync_exception_aarch32
 
 vector_entry bpiall_ret_irq_aarch32
@@ -355,3 +358,11 @@
 vector_entry bpiall_ret_serror_aarch32
 	b	report_unhandled_exception
 end_vector_entry bpiall_ret_serror_aarch32
+
+	/*
+	 * Part of bpiall_ret_sync_exception_aarch32 to save vector space
+	 */
+func bpiall_ret_sync_exception_aarch32_tail
+	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+	b	sync_exception_aarch64
+endfunc bpiall_ret_sync_exception_aarch32_tail
diff --git a/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S b/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
index 5134ee3..ed0a549 100644
--- a/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
+++ b/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -34,15 +34,18 @@
 
 	/*
 	 * Ensure SMC is coming from A64/A32 state on #0
-	 * with W0 = SMCCC_ARCH_WORKAROUND_1
+	 * with W0 = SMCCC_ARCH_WORKAROUND_1 or W0 = SMCCC_ARCH_WORKAROUND_3
 	 *
 	 * This sequence evaluates as:
-	 *    (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
+	 *    (W0==SMCCC_ARCH_WORKAROUND_1) || (W0==SMCCC_ARCH_WORKAROUND_3) ?
+	 *    (ESR_EL3==SMC#0) : (NE)
 	 * allowing use of a single branch operation
 	 */
 	.if \_is_sync_exception
 		orr	w1, wzr, #SMCCC_ARCH_WORKAROUND_1
 		cmp	w0, w1
+		orr	w1, wzr, #SMCCC_ARCH_WORKAROUND_3
+		ccmp	w0, w1, #4, ne
 		mrs	x0, esr_el3
 		mov_imm	w1, \_esr_el3_val
 		ccmp	w0, w1, #0, eq
diff --git a/lib/cpus/aarch64/wa_cve_2022_23960_bhb.S b/lib/cpus/aarch64/wa_cve_2022_23960_bhb.S
new file mode 100644
index 0000000..e0e41cc
--- /dev/null
+++ b/lib/cpus/aarch64/wa_cve_2022_23960_bhb.S
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <context.h>
+
+#if WORKAROUND_CVE_2022_23960
+	/*
+	 * This macro applies the mitigation for CVE-2022-23960.
+         * The macro saves x2-x3 to the CPU context.
+         * SP should point to the CPU context.
+	 */
+	.macro	apply_cve_2022_23960_bhb_wa _bhb_loop_count
+	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+
+	/* CVE-BHB-NUM loop count */
+	mov	x2, \_bhb_loop_count
+
+1:
+	/* b pc+4 part of the workaround */
+	b	2f
+2:
+	subs	x2, x2, #1
+	bne	1b
+	dsb	sy
+	isb
+	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+	.endm
+#endif /* WORKAROUND_CVE_2022_23960 */
diff --git a/lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S b/lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S
new file mode 100644
index 0000000..220fa11
--- /dev/null
+++ b/lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <services/arm_arch_svc.h>
+#include "wa_cve_2022_23960_bhb.S"
+
+	/*
+	 * This macro is used to isolate the vector table for relevant CPUs
+	 * used in the mitigation for CVE_2022_23960.
+	 */
+	.macro wa_cve_2022_23960_bhb_vector_table _bhb_loop_count, _cpu
+
+	.globl	wa_cve_vbar_\_cpu
+
+vector_base wa_cve_vbar_\_cpu
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_EL0 : 0x0 - 0x200
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry bhb_sync_exception_sp_el0_\_cpu
+	b	sync_exception_sp_el0
+end_vector_entry bhb_sync_exception_sp_el0_\_cpu
+
+vector_entry bhb_irq_sp_el0_\_cpu
+	b	irq_sp_el0
+end_vector_entry bhb_irq_sp_el0_\_cpu
+
+vector_entry bhb_fiq_sp_el0_\_cpu
+	b	fiq_sp_el0
+end_vector_entry bhb_fiq_sp_el0_\_cpu
+
+vector_entry bhb_serror_sp_el0_\_cpu
+	b	serror_sp_el0
+end_vector_entry bhb_serror_sp_el0_\_cpu
+
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_ELx: 0x200 - 0x400
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry bhb_sync_exception_sp_elx_\_cpu
+	b	sync_exception_sp_elx
+end_vector_entry bhb_sync_exception_sp_elx_\_cpu
+
+vector_entry bhb_irq_sp_elx_\_cpu
+	b	irq_sp_elx
+end_vector_entry bhb_irq_sp_elx_\_cpu
+
+vector_entry bhb_fiq_sp_elx_\_cpu
+	b	fiq_sp_elx
+end_vector_entry bhb_fiq_sp_elx_\_cpu
+
+vector_entry bhb_serror_sp_elx_\_cpu
+	b	serror_sp_elx
+end_vector_entry bhb_serror_sp_elx_\_cpu
+
+	/* ---------------------------------------------------------------------
+	 * Lower EL using AArch64 : 0x400 - 0x600
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry bhb_sync_exception_aarch64_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	sync_exception_aarch64
+end_vector_entry bhb_sync_exception_aarch64_\_cpu
+
+vector_entry bhb_irq_aarch64_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	irq_aarch64
+end_vector_entry bhb_irq_aarch64_\_cpu
+
+vector_entry bhb_fiq_aarch64_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	fiq_aarch64
+end_vector_entry bhb_fiq_aarch64_\_cpu
+
+vector_entry bhb_serror_aarch64_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	serror_aarch64
+end_vector_entry bhb_serror_aarch64_\_cpu
+
+	/* ---------------------------------------------------------------------
+	 * Lower EL using AArch32 : 0x600 - 0x800
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry bhb_sync_exception_aarch32_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	sync_exception_aarch32
+end_vector_entry bhb_sync_exception_aarch32_\_cpu
+
+vector_entry bhb_irq_aarch32_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	irq_aarch32
+end_vector_entry bhb_irq_aarch32_\_cpu
+
+vector_entry bhb_fiq_aarch32_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	fiq_aarch32
+end_vector_entry bhb_fiq_aarch32_\_cpu
+
+vector_entry bhb_serror_aarch32_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	serror_aarch32
+end_vector_entry bhb_serror_aarch32_\_cpu
+	.endm
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index e812c07..c7630fb 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -24,6 +24,7 @@
 WORKAROUND_CVE_2017_5715		?=1
 WORKAROUND_CVE_2018_3639		?=1
 DYNAMIC_WORKAROUND_CVE_2018_3639	?=0
+WORKAROUND_CVE_2022_23960		?=1
 
 # Flags to indicate internal or external Last level cache
 # By default internal
@@ -56,6 +57,10 @@
 $(eval $(call assert_boolean,DYNAMIC_WORKAROUND_CVE_2018_3639))
 $(eval $(call add_define,DYNAMIC_WORKAROUND_CVE_2018_3639))
 
+# Process WORKAROUND_CVE_2022_23960 flag
+$(eval $(call assert_boolean,WORKAROUND_CVE_2022_23960))
+$(eval $(call add_define,WORKAROUND_CVE_2022_23960))
+
 $(eval $(call assert_boolean,NEOVERSE_Nx_EXTERNAL_LLC))
 $(eval $(call add_define,NEOVERSE_Nx_EXTERNAL_LLC))
 
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
index a24a2e5..acac886 100644
--- a/plat/arm/board/fvp/platform.mk
+++ b/plat/arm/board/fvp/platform.mk
@@ -308,14 +308,6 @@
 # Enable dynamic mitigation support by default
 DYNAMIC_WORKAROUND_CVE_2018_3639	:=	1
 
-# Enable reclaiming of BL31 initialisation code for secondary cores
-# stacks for FVP. However, don't enable reclaiming for clang.
-ifneq (${RESET_TO_BL31},1)
-ifeq ($(findstring clang,$(notdir $(CC))),)
-RECLAIM_INIT_CODE	:=	1
-endif
-endif
-
 ifeq (${ENABLE_AMU},1)
 BL31_SOURCES		+=	lib/cpus/aarch64/cpuamu.c		\
 				lib/cpus/aarch64/cpuamu_helpers.S
diff --git a/plat/arm/css/sgi/include/sgi_base_platform_def.h b/plat/arm/css/sgi/include/sgi_base_platform_def.h
index 93609b9..c9c8c04 100644
--- a/plat/arm/css/sgi/include/sgi_base_platform_def.h
+++ b/plat/arm/css/sgi/include/sgi_base_platform_def.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2022, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -35,8 +35,8 @@
 # if SPM_MM
 #  define PLAT_ARM_MMAP_ENTRIES		(9  + ((CSS_SGI_CHIP_COUNT - 1) * 3))
 #  define MAX_XLAT_TABLES		(7  + ((CSS_SGI_CHIP_COUNT - 1) * 3))
-#  define PLAT_SP_IMAGE_MMAP_REGIONS	10
-#  define PLAT_SP_IMAGE_MAX_XLAT_TABLES	12
+#  define PLAT_SP_IMAGE_MMAP_REGIONS	9
+#  define PLAT_SP_IMAGE_MAX_XLAT_TABLES	11
 # else
 #  define PLAT_ARM_MMAP_ENTRIES		(5 + ((CSS_SGI_CHIP_COUNT - 1) * 3))
 #  define MAX_XLAT_TABLES		(6 + ((CSS_SGI_CHIP_COUNT - 1) * 3))
@@ -130,21 +130,6 @@
 # define PLATFORM_STACK_SIZE 0x440
 #endif
 
-/* PL011 UART related constants */
-#define SOC_CSS_SEC_UART_BASE			UL(0x2A410000)
-#define SOC_CSS_NSEC_UART_BASE			UL(0x2A400000)
-#define SOC_CSS_UART_SIZE			UL(0x10000)
-#define SOC_CSS_UART_CLK_IN_HZ			UL(7372800)
-
-/* UART related constants */
-#define PLAT_ARM_BOOT_UART_BASE			SOC_CSS_SEC_UART_BASE
-#define PLAT_ARM_BOOT_UART_CLK_IN_HZ		SOC_CSS_UART_CLK_IN_HZ
-
-#define PLAT_ARM_RUN_UART_BASE			SOC_CSS_SEC_UART_BASE
-#define PLAT_ARM_RUN_UART_CLK_IN_HZ		SOC_CSS_UART_CLK_IN_HZ
-
-#define PLAT_ARM_CRASH_UART_BASE		SOC_CSS_SEC_UART_BASE
-#define PLAT_ARM_CRASH_UART_CLK_IN_HZ		SOC_CSS_UART_CLK_IN_HZ
 
 #define PLAT_ARM_NSTIMER_FRAME_ID	0
 
@@ -273,18 +258,4 @@
 		CSS_SGI_REMOTE_CHIP_MEM_OFFSET(n) + ARM_DRAM2_END,	\
 		ARM_TZC_NS_DRAM_S_ACCESS, PLAT_ARM_TZC_NS_DEV_ACCESS}
 
-#if SPM_MM
-
-/*
- * Stand-alone MM logs would be routed via secure UART. Define page table
- * entry for secure UART which would be common to all platforms.
- */
-#define SOC_PLATFORM_SECURE_UART	MAP_REGION_FLAT(		\
-					SOC_CSS_SEC_UART_BASE,		\
-					SOC_CSS_UART_SIZE,		\
-					MT_DEVICE | MT_RW | 		\
-					MT_SECURE | MT_USER)
-
-#endif
-
 #endif /* SGI_BASE_PLATFORM_DEF_H */
diff --git a/plat/arm/css/sgi/include/sgi_soc_css_def.h b/plat/arm/css/sgi/include/sgi_soc_css_def.h
deleted file mode 100644
index f78b45a..0000000
--- a/plat/arm/css/sgi/include/sgi_soc_css_def.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef SGI_SOC_CSS_DEF_H
-#define SGI_SOC_CSS_DEF_H
-
-#include <lib/utils_def.h>
-#include <plat/arm/board/common/v2m_def.h>
-#include <plat/arm/soc/common/soc_css_def.h>
-#include <plat/common/common_def.h>
-
-/*
- * Definitions common to all ARM CSSv1-based development platforms
- */
-
-/* Platform ID address */
-#define BOARD_CSS_PLAT_ID_REG_ADDR		UL(0x7ffe00e0)
-
-/* Platform ID related accessors */
-#define BOARD_CSS_PLAT_ID_REG_ID_MASK		0x0f
-#define BOARD_CSS_PLAT_ID_REG_ID_SHIFT		0x0
-#define BOARD_CSS_PLAT_TYPE_EMULATOR		0x02
-
-#ifndef __ASSEMBLER__
-
-#include <lib/mmio.h>
-
-#define BOARD_CSS_GET_PLAT_TYPE(addr)					\
-	((mmio_read_32(addr) & BOARD_CSS_PLAT_ID_REG_ID_MASK)		\
-	>> BOARD_CSS_PLAT_ID_REG_ID_SHIFT)
-
-#endif /* __ASSEMBLER__ */
-
-#define MAX_IO_DEVICES			3
-#define MAX_IO_HANDLES			4
-
-/* Reserve the last block of flash for PSCI MEM PROTECT flag */
-#define PLAT_ARM_FLASH_IMAGE_BASE	V2M_FLASH0_BASE
-#define PLAT_ARM_FLASH_IMAGE_MAX_SIZE	(V2M_FLASH0_SIZE - V2M_FLASH_BLOCK_SIZE)
-
-#define PLAT_ARM_NVM_BASE		V2M_FLASH0_BASE
-#define PLAT_ARM_NVM_SIZE		(V2M_FLASH0_SIZE - V2M_FLASH_BLOCK_SIZE)
-
-#endif /* SGI_SOC_CSS_DEF_H */
diff --git a/plat/arm/css/sgi/include/sgi_soc_css_def_v2.h b/plat/arm/css/sgi/include/sgi_soc_css_def_v2.h
index acf31eb..639b687 100644
--- a/plat/arm/css/sgi/include/sgi_soc_css_def_v2.h
+++ b/plat/arm/css/sgi/include/sgi_soc_css_def_v2.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -24,10 +24,17 @@
 
 #define SOC_CSS_PCIE_CONTROL_BASE	UL(0x0ef20000)
 
+/* PL011 UART related constants */
+#define SOC_CSS_UART1_BASE		UL(0x0ef80000)
+#define SOC_CSS_UART0_BASE		UL(0x0ef70000)
+
 /* Memory controller */
 #define SOC_MEMCNTRL_BASE		UL(0x10000000)
 #define SOC_MEMCNTRL_SIZE		UL(0x10000000)
 
+#define SOC_CSS_UART0_CLK_IN_HZ		UL(7372800)
+#define SOC_CSS_UART1_CLK_IN_HZ		UL(7372800)
+
 /* SoC NIC-400 Global Programmers View (GPV) */
 #define SOC_CSS_NIC400_BASE		UL(0x0ED00000)
 
@@ -199,4 +206,17 @@
 #define PLAT_ARM_NVM_BASE		V2M_FLASH0_BASE
 #define PLAT_ARM_NVM_SIZE		(V2M_FLASH0_SIZE - V2M_FLASH_BLOCK_SIZE)
 
+/* UART related constants */
+#define PLAT_ARM_BOOT_UART_BASE			SOC_CSS_UART0_BASE
+#define PLAT_ARM_BOOT_UART_CLK_IN_HZ		SOC_CSS_UART0_CLK_IN_HZ
+
+#define PLAT_ARM_RUN_UART_BASE			SOC_CSS_UART1_BASE
+#define PLAT_ARM_RUN_UART_CLK_IN_HZ		SOC_CSS_UART1_CLK_IN_HZ
+
+#define PLAT_ARM_SP_MIN_RUN_UART_BASE		SOC_CSS_UART1_BASE
+#define PLAT_ARM_SP_MIN_RUN_UART_CLK_IN_HZ	SOC_CSS_UART1_CLK_IN_HZ
+
+#define PLAT_ARM_CRASH_UART_BASE		PLAT_ARM_RUN_UART_BASE
+#define PLAT_ARM_CRASH_UART_CLK_IN_HZ		PLAT_ARM_RUN_UART_CLK_IN_HZ
+
 #endif /* SGI_SOC_CSS_DEF_V2_H */
diff --git a/plat/arm/css/sgi/include/sgi_soc_platform_def.h b/plat/arm/css/sgi/include/sgi_soc_platform_def.h
index 3b8d9c6..405d62f 100644
--- a/plat/arm/css/sgi/include/sgi_soc_platform_def.h
+++ b/plat/arm/css/sgi/include/sgi_soc_platform_def.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -7,10 +7,10 @@
 #ifndef SGI_SOC_PLATFORM_DEF_H
 #define SGI_SOC_PLATFORM_DEF_H
 
+#include <sgi_base_platform_def.h>
+#include <plat/arm/board/common/board_css_def.h>
 #include <plat/arm/board/common/v2m_def.h>
 #include <plat/arm/soc/common/soc_css_def.h>
-#include <sgi_base_platform_def.h>
-#include <sgi_soc_css_def.h>
 
 /* Map the System registers to access from S-EL0 */
 #define CSS_SYSTEMREG_DEVICE_BASE	(0x1C010000)
diff --git a/plat/arm/css/sgi/sgi_plat.c b/plat/arm/css/sgi/sgi_plat.c
index a0199c3..20c52e9 100644
--- a/plat/arm/css/sgi/sgi_plat.c
+++ b/plat/arm/css/sgi/sgi_plat.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2022, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -89,7 +89,6 @@
 const mmap_region_t plat_arm_secure_partition_mmap[] = {
 	PLAT_ARM_SECURE_MAP_SYSTEMREG,
 	PLAT_ARM_SECURE_MAP_NOR2,
-	SOC_PLATFORM_SECURE_UART,
 	PLAT_ARM_SECURE_MAP_DEVICE,
 	ARM_SP_IMAGE_MMAP,
 	ARM_SP_IMAGE_NS_BUF_MMAP,
diff --git a/plat/arm/css/sgi/sgi_plat_v2.c b/plat/arm/css/sgi/sgi_plat_v2.c
index cef5345..1a2a966 100644
--- a/plat/arm/css/sgi/sgi_plat_v2.c
+++ b/plat/arm/css/sgi/sgi_plat_v2.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -83,7 +83,6 @@
 const mmap_region_t plat_arm_secure_partition_mmap[] = {
 	PLAT_ARM_SECURE_MAP_SYSTEMREG,
 	PLAT_ARM_SECURE_MAP_NOR2,
-	SOC_PLATFORM_SECURE_UART,
 	SOC_PLATFORM_PERIPH_MAP_DEVICE_USER,
 	ARM_SP_IMAGE_MMAP,
 	ARM_SP_IMAGE_NS_BUF_MMAP,
diff --git a/plat/marvell/armada/a3k/common/cm3_system_reset.c b/plat/marvell/armada/a3k/common/cm3_system_reset.c
index 548ff51..f105d59 100644
--- a/plat/marvell/armada/a3k/common/cm3_system_reset.c
+++ b/plat/marvell/armada/a3k/common/cm3_system_reset.c
@@ -58,5 +58,5 @@
 	}
 
 	/* If we reach here, the command is not implemented. */
-	ERROR("System reset command not implemented in WTMI firmware!\n");
+	WARN("System reset command not implemented in WTMI firmware!\n");
 }
diff --git a/plat/mediatek/mt8186/drivers/dfd/plat_dfd.c b/plat/mediatek/mt8186/drivers/dfd/plat_dfd.c
new file mode 100644
index 0000000..ade0837
--- /dev/null
+++ b/plat/mediatek/mt8186/drivers/dfd/plat_dfd.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2022, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <lib/mmio.h>
+#include <mtk_sip_svc.h>
+#include <plat_dfd.h>
+
+static bool dfd_enabled;
+static uint64_t dfd_base_addr;
+static uint64_t dfd_chain_length;
+static uint64_t dfd_cache_dump;
+
+static void dfd_setup(uint64_t base_addr, uint64_t chain_length,
+		      uint64_t cache_dump)
+{
+	mmio_write_32(MCUSYS_DFD_MAP, base_addr >> 24);
+	mmio_write_32(WDT_DEBUG_CTL, WDT_DEBUG_CTL_VAL_0);
+
+	sync_writel(DFD_INTERNAL_CTL, (BIT(0) | BIT(2)));
+
+	mmio_setbits_32(DFD_INTERNAL_CTL, BIT(13));
+	mmio_setbits_32(DFD_INTERNAL_CTL, BIT(3));
+	mmio_setbits_32(DFD_INTERNAL_CTL, (BIT(19) | BIT(20)));
+	mmio_write_32(DFD_INTERNAL_PWR_ON, (BIT(0) | BIT(1) | BIT(3)));
+	mmio_write_32(DFD_CHAIN_LENGTH0, chain_length);
+	mmio_write_32(DFD_INTERNAL_SHIFT_CLK_RATIO, 0);
+	mmio_write_32(DFD_INTERNAL_TEST_SO_0, DFD_INTERNAL_TEST_SO_0_VAL);
+	mmio_write_32(DFD_INTERNAL_NUM_OF_TEST_SO_GROUP, 1);
+
+	mmio_write_32(DFD_TEST_SI_0, DFD_TEST_SI_0_VAL);
+	mmio_write_32(DFD_TEST_SI_1, DFD_TEST_SI_1_VAL);
+
+	sync_writel(DFD_V30_CTL, 1);
+
+	mmio_write_32(DFD_V30_BASE_ADDR, (base_addr & 0xFFF00000));
+
+	/* setup global variables for suspend and resume */
+	dfd_enabled = true;
+	dfd_base_addr = base_addr;
+	dfd_chain_length = chain_length;
+	dfd_cache_dump = cache_dump;
+
+	if ((cache_dump & DFD_CACHE_DUMP_ENABLE) != 0UL) {
+		mmio_write_32(WDT_DEBUG_CTL, WDT_DEBUG_CTL_VAL_1);
+		sync_writel(DFD_V35_ENALBE, 1);
+		sync_writel(DFD_V35_TAP_NUMBER, DFD_V35_TAP_NUMBER_VAL);
+		sync_writel(DFD_V35_TAP_EN, DFD_V35_TAP_EN_VAL);
+		sync_writel(DFD_V35_SEQ0_0, DFD_V35_SEQ0_0_VAL);
+
+		if (cache_dump & DFD_PARITY_ERR_TRIGGER) {
+			sync_writel(DFD_HW_TRIGGER_MASK, DFD_HW_TRIGGER_MASK_VAL);
+			mmio_setbits_32(DFD_INTERNAL_CTL, BIT(4));
+		}
+	}
+	dsbsy();
+}
+
+void dfd_resume(void)
+{
+	if (dfd_enabled == true) {
+		dfd_setup(dfd_base_addr, dfd_chain_length, dfd_cache_dump);
+	}
+}
+
+uint64_t dfd_smc_dispatcher(uint64_t arg0, uint64_t arg1,
+			    uint64_t arg2, uint64_t arg3)
+{
+	uint64_t ret = 0L;
+
+	switch (arg0) {
+	case PLAT_MTK_DFD_SETUP_MAGIC:
+		INFO("[%s] DFD setup call from kernel\n", __func__);
+		dfd_setup(arg1, arg2, arg3);
+		break;
+	case PLAT_MTK_DFD_READ_MAGIC:
+		/* only allow to access DFD register base + 0x200 */
+		if (arg1 <= 0x200) {
+			ret = mmio_read_32(MISC1_CFG_BASE + arg1);
+		}
+		break;
+	case PLAT_MTK_DFD_WRITE_MAGIC:
+		/* only allow to access DFD register base + 0x200 */
+		if (arg1 <= 0x200) {
+			sync_writel(MISC1_CFG_BASE + arg1, arg2);
+		}
+		break;
+	default:
+		ret = MTK_SIP_E_INVALID_PARAM;
+		break;
+	}
+
+	return ret;
+}
diff --git a/plat/mediatek/mt8186/drivers/dfd/plat_dfd.h b/plat/mediatek/mt8186/drivers/dfd/plat_dfd.h
new file mode 100644
index 0000000..1901ec9
--- /dev/null
+++ b/plat/mediatek/mt8186/drivers/dfd/plat_dfd.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2022, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLAT_DFD_H
+#define PLAT_DFD_H
+
+#include <arch_helpers.h>
+#include <lib/mmio.h>
+#include <platform_def.h>
+
+#define sync_writel(addr, val)	do { mmio_write_32((addr), (val)); \
+				dsbsy(); \
+				} while (0)
+
+#define PLAT_MTK_DFD_SETUP_MAGIC		(0x99716150)
+#define PLAT_MTK_DFD_READ_MAGIC			(0x99716151)
+#define PLAT_MTK_DFD_WRITE_MAGIC		(0x99716152)
+
+#define MCU_BIU_BASE				(MCUCFG_BASE)
+#define MISC1_CFG_BASE				(MCU_BIU_BASE + 0xA040)
+
+#define DFD_INTERNAL_CTL			(MISC1_CFG_BASE + 0x00)
+#define DFD_INTERNAL_PWR_ON			(MISC1_CFG_BASE + 0x08)
+#define DFD_CHAIN_LENGTH0			(MISC1_CFG_BASE + 0x0C)
+#define DFD_INTERNAL_SHIFT_CLK_RATIO		(MISC1_CFG_BASE + 0x10)
+#define DFD_INTERNAL_TEST_SO_0			(MISC1_CFG_BASE + 0x28)
+#define DFD_INTERNAL_NUM_OF_TEST_SO_GROUP	(MISC1_CFG_BASE + 0x30)
+#define DFD_V30_CTL				(MISC1_CFG_BASE + 0x48)
+#define DFD_V30_BASE_ADDR			(MISC1_CFG_BASE + 0x4C)
+#define DFD_TEST_SI_0				(MISC1_CFG_BASE + 0x58)
+#define DFD_TEST_SI_1				(MISC1_CFG_BASE + 0x5C)
+#define DFD_HW_TRIGGER_MASK			(MISC1_CFG_BASE + 0xBC)
+
+#define DFD_V35_ENALBE				(MCU_BIU_BASE + 0xA0A8)
+#define DFD_V35_TAP_NUMBER			(MCU_BIU_BASE + 0xA0AC)
+#define DFD_V35_TAP_EN				(MCU_BIU_BASE + 0xA0B0)
+#define DFD_V35_SEQ0_0				(MCU_BIU_BASE + 0xA0C0)
+#define DFD_V35_SEQ0_1				(MCU_BIU_BASE + 0xA0C4)
+
+#define DFD_CACHE_DUMP_ENABLE			(1U)
+#define DFD_PARITY_ERR_TRIGGER			(2U)
+
+#define MCUSYS_DFD_MAP				(0x10001390)
+#define WDT_DEBUG_CTL				(0x10007048)
+
+#define WDT_DEBUG_CTL_VAL_0			(0x950603A0)
+#define DFD_INTERNAL_TEST_SO_0_VAL		(0x3B)
+#define DFD_TEST_SI_0_VAL			(0x108)
+#define DFD_TEST_SI_1_VAL			(0x20200000)
+
+#define WDT_DEBUG_CTL_VAL_1			(0x95063E80)
+#define DFD_V35_TAP_NUMBER_VAL			(0xA)
+#define DFD_V35_TAP_EN_VAL			(0x3FF)
+#define DFD_V35_SEQ0_0_VAL			(0x63668820)
+#define DFD_HW_TRIGGER_MASK_VAL			(0xC)
+
+void dfd_resume(void);
+uint64_t dfd_smc_dispatcher(uint64_t arg0, uint64_t arg1,
+			    uint64_t arg2, uint64_t arg3);
+
+#endif /* PLAT_DFD_H */
diff --git a/plat/mediatek/mt8186/include/plat_sip_calls.h b/plat/mediatek/mt8186/include/plat_sip_calls.h
index 598a5b8..9e3726b 100644
--- a/plat/mediatek/mt8186/include/plat_sip_calls.h
+++ b/plat/mediatek/mt8186/include/plat_sip_calls.h
@@ -10,6 +10,10 @@
 /*******************************************************************************
  * Plat SiP function constants
  ******************************************************************************/
-#define MTK_PLAT_SIP_NUM_CALLS    0
+#define MTK_PLAT_SIP_NUM_CALLS		(2)
+
+/* DFD */
+#define MTK_SIP_KERNEL_DFD_AARCH32	(0x82000205)
+#define MTK_SIP_KERNEL_DFD_AARCH64	(0xC2000205)
 
 #endif /* PLAT_SIP_CALLS_H */
diff --git a/plat/mediatek/mt8186/plat_pm.c b/plat/mediatek/mt8186/plat_pm.c
index 6bc6b9d..e125c99 100644
--- a/plat/mediatek/mt8186/plat_pm.c
+++ b/plat/mediatek/mt8186/plat_pm.c
@@ -13,6 +13,7 @@
 #include <mt_gic_v3.h>
 #include <mtspmc.h>
 #include <plat/common/platform.h>
+#include <plat_dfd.h>
 #include <plat_mtk_lpm.h>
 #include <plat_params.h>
 #include <plat_pm.h>
@@ -164,6 +165,8 @@
 	mt_gic_distif_restore();
 	gic_sgi_restore_all();
 
+	dfd_resume();
+
 	(void)plat_mt_pm_invoke(plat_mt_pm->pwr_mcusys_on_finished, cpu, state);
 }
 
diff --git a/plat/mediatek/mt8186/plat_sip_calls.c b/plat/mediatek/mt8186/plat_sip_calls.c
index 87ba786..cb66218 100644
--- a/plat/mediatek/mt8186/plat_sip_calls.c
+++ b/plat/mediatek/mt8186/plat_sip_calls.c
@@ -8,6 +8,7 @@
 #include <common/runtime_svc.h>
 #include <mt_spm_vcorefs.h>
 #include <mtk_sip_svc.h>
+#include <plat_dfd.h>
 #include "plat_sip_calls.h"
 
 uintptr_t mediatek_plat_sip_handler(uint32_t smc_fid,
@@ -27,6 +28,11 @@
 		ret = spm_vcorefs_args(x1, x2, x3, (uint64_t *)&x4);
 		SMC_RET2(handle, ret, x4);
 		break;
+	case MTK_SIP_KERNEL_DFD_AARCH32:
+	case MTK_SIP_KERNEL_DFD_AARCH64:
+		ret = dfd_smc_dispatcher(x1, x2, x3, x4);
+		SMC_RET1(handle, ret);
+		break;
 	default:
 		ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
 		break;
diff --git a/plat/mediatek/mt8186/platform.mk b/plat/mediatek/mt8186/platform.mk
index 6108a05..b6d9ca8 100644
--- a/plat/mediatek/mt8186/platform.mk
+++ b/plat/mediatek/mt8186/platform.mk
@@ -15,6 +15,7 @@
 		 -I${MTK_PLAT}/common/lpm/                        \
                  -I${MTK_PLAT_SOC}/drivers/spm/                   \
                  -I${MTK_PLAT_SOC}/drivers/dcm/                   \
+                 -I${MTK_PLAT_SOC}/drivers/dfd/                    \
                  -I${MTK_PLAT_SOC}/drivers/emi_mpu/               \
                  -I${MTK_PLAT_SOC}/drivers/gpio/               \
                  -I${MTK_PLAT_SOC}/drivers/mcdi/                  \
@@ -58,6 +59,7 @@
                 ${MTK_PLAT_SOC}/bl31_plat_setup.c                     \
                 ${MTK_PLAT_SOC}/drivers/dcm/mtk_dcm.c                 \
                 ${MTK_PLAT_SOC}/drivers/dcm/mtk_dcm_utils.c           \
+                ${MTK_PLAT_SOC}/drivers/dfd/plat_dfd.c                \
                 ${MTK_PLAT_SOC}/drivers/emi_mpu/emi_mpu.c             \
                 ${MTK_PLAT_SOC}/drivers/gpio/mtgpio.c                 \
                 ${MTK_PLAT_SOC}/drivers/mcdi/mt_cpu_pm.c              \
diff --git a/plat/nxp/common/soc_errata/errata_a050426.c b/plat/nxp/common/soc_errata/errata_a050426.c
index 13a0000..ba4f71f 100644
--- a/plat/nxp/common/soc_errata/errata_a050426.c
+++ b/plat/nxp/common/soc_errata/errata_a050426.c
@@ -1,16 +1,26 @@
 /*
- * Copyright 2021 NXP
+ * Copyright 2021-2022 NXP
  *
  * SPDX-License-Identifier: BSD-3-Clause
  *
  */
 
+#include <common/debug.h>
 #include <mmio.h>
 
 void erratum_a050426(void)
 {
 	uint32_t i, val3, val4;
 
+	/*
+	 * Part of this Errata is implemented in RCW and SCRATCHRW5
+	 * register is updated to hold Errata number.
+	 * Validate whether RCW has already included required changes
+	 */
+	if (mmio_read_32(0x01e00210) != 0x00050426) {
+		ERROR("%s: Invalid RCW : ERR050426 not implemented\n", __func__);
+	}
+
 	/* Enable BIST to access Internal memory locations */
 	val3 = mmio_read_32(0x700117E60);
 	mmio_write_32(0x700117E60, (val3 | 0x80000001));
@@ -63,7 +73,7 @@
 		mmio_write_32(0x706718000 + (i * 4), 0x55555555);
 		mmio_write_32(0x706718800 + (i * 4), 0x55555555);
 	}
-	mmio_write_32(0x706b0a000 + (i * 4), 0x55555555);
+	mmio_write_32(0x706b0a000, 0x55555555);
 
 	for (i = 0U; i < 4U; i++) {
 		mmio_write_32(0x706b0e000 + (i * 4), 0x55555555);
@@ -79,7 +89,7 @@
 		mmio_write_32(0x706b15000 + (i * 4), 0x55555555);
 		mmio_write_32(0x706b15800 + (i * 4), 0x55555555);
 	}
-	mmio_write_32(0x706e12000 + (i * 4), 0x55555555);
+	mmio_write_32(0x706e12000, 0x55555555);
 
 	for (i = 0U; i < 4U; i++) {
 		mmio_write_32(0x706e14000 + (i * 4), 0x55555555);
@@ -147,62 +157,6 @@
 		mmio_write_32(0x70a209800 + (i * 4), 0x55555555);
 	}
 
-	/* PEX1 Internal Memory.*/
-	for (i = 0U; i < 3U; i++) {
-		mmio_write_32(0x70a508000 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 5U; i++) {
-		mmio_write_32(0x70a520000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70a528000 + (i * 4), 0x55555555);
-	}
-
-	/* PEX2 Internal Memory.*/
-	for (i = 0U; i < 3U; i++) {
-		mmio_write_32(0x70a608000 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 5U; i++) {
-		mmio_write_32(0x70a620000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70a628000 + (i * 4), 0x55555555);
-	}
-
-	/* PEX3 Internal Memory.*/
-	for (i = 0U; i < 5U; i++) {
-		mmio_write_32(0x70a708000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70a728000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70a730000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70a738000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70a748000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70a758000 + (i * 4), 0x55555555);
-	}
-
-	/* PEX4 Internal Memory.*/
-	for (i = 0U; i < 3U; i++) {
-		mmio_write_32(0x70a808000 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 5U; i++) {
-		mmio_write_32(0x70a820000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70a828000 + (i * 4), 0x55555555);
-	}
-
-	/* PEX5 Internal Memory.*/
-	for (i = 0U; i < 5U; i++) {
-		mmio_write_32(0x70aa08000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70aa28000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70aa30000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70aa38000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70aa48000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70aa58000 + (i * 4), 0x55555555);
-	}
-
-	/* PEX6 Internal Memory.*/
-	for (i = 0U; i < 3U; i++) {
-		mmio_write_32(0x70ab08000 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 5U; i++) {
-		mmio_write_32(0x70ab20000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70ab28000 + (i * 4), 0x55555555);
-	}
-
 	/* QDMA Internal Memory.*/
 	for (i = 0U; i < 5U; i++) {
 		mmio_write_32(0x70b008000 + (i * 4), 0x55555555);
@@ -241,174 +195,6 @@
 		mmio_write_32(0x70b029800 + (i * 4), 0x55555555);
 	}
 
-	/* lnx1_e1000#0 Internal Memory.*/
-	for (i = 0U; i < 3U; i++) {
-		mmio_write_32(0x70c00a000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00a200 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00a400 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00a600 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00a800 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00aa00 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00ac00 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00ae00 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00b000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00b200 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00b400 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00b600 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00b800 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00ba00 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00bc00 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00be00 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 5U; i++) {
-		mmio_write_32(0x70c00c000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00c400 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00c800 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00cc00 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00d000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00d400 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00d800 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00dc00 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 3U; i++) {
-		mmio_write_32(0x70c00e000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c00f000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c012000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c012200 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c012400 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c012600 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c012800 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c012a00 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c012c00 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c012e00 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c013000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c013200 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c013400 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c013600 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c013800 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c013a00 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c013c00 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c013e00 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 5U; i++) {
-		mmio_write_32(0x70c014000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c014400 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c014800 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c014c00 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c015000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c015400 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c015800 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c015c00 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 3U; i++) {
-		mmio_write_32(0x70c016000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c017000 + (i * 4), 0x55555555);
-	}
-
-	/* lnx1_xfi Internal Memory.*/
-	for (i = 0U; i < 3U; i++) {
-		mmio_write_32(0x70c108000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c108200 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c10a000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c10a400 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 5U; i++) {
-		mmio_write_32(0x70c10c000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c10c400 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 3U; i++) {
-		mmio_write_32(0x70c10e000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c10e200 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c110000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c110400 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 5U; i++) {
-		mmio_write_32(0x70c112000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c112400 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 3U; i++) {
-		mmio_write_32(0x70c114000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c114200 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c116000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c116400 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 5U; i++) {
-		mmio_write_32(0x70c118000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c118400 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 3U; i++) {
-		mmio_write_32(0x70c11a000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c11a200 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c11c000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c11c400 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 5U; i++) {
-		mmio_write_32(0x70c11e000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c11e400 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 3U; i++) {
-		mmio_write_32(0x70c120000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c120200 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c122000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c122400 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 5U; i++) {
-		mmio_write_32(0x70c124000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c124400 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 3U; i++) {
-		mmio_write_32(0x70c126000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c126200 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c128000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c128400 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 5U; i++) {
-		mmio_write_32(0x70c12a000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c12a400 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 3U; i++) {
-		mmio_write_32(0x70c12c000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c12c200 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c12e000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c12e400 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 5U; i++) {
-		mmio_write_32(0x70c130000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c130400 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 3U; i++) {
-		mmio_write_32(0x70c132000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c132200 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c134000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c134400 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 5U; i++) {
-		mmio_write_32(0x70c136000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c136400 + (i * 4), 0x55555555);
-	}
-
-	/* lnx2_xfi Internal Memory.*/
-	for (i = 0U; i < 3U; i++) {
-		mmio_write_32(0x70c308000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c308200 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c30a000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c30a400 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 5U; i++) {
-		mmio_write_32(0x70c30c000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c30c400 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 3U; i++) {
-		mmio_write_32(0x70c30e000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c30e200 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c310000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c310400 + (i * 4), 0x55555555);
-	}
-	for (i = 0U; i < 5U; i++) {
-		mmio_write_32(0x70c312000 + (i * 4), 0x55555555);
-		mmio_write_32(0x70c312400 + (i * 4), 0x55555555);
-	}
-
 	/* Disable BIST */
 	mmio_write_32(0x700117E60, val3);
 	mmio_write_32(0x700117E90, val4);
diff --git a/plat/st/common/bl2_stm32_io_storage.c b/plat/st/common/bl2_stm32_io_storage.c
index 2d68a50..4391195 100644
--- a/plat/st/common/bl2_stm32_io_storage.c
+++ b/plat/st/common/bl2_stm32_io_storage.c
@@ -379,19 +379,21 @@
 		stm32_sdmmc2_mmc_get_device_size();
 
 #if STM32MP_EMMC_BOOT
-	magic = get_boot_part_ssbl_header();
+	if (mmc_dev_type == MMC_IS_EMMC) {
+		magic = get_boot_part_ssbl_header();
 
-	if (magic == BOOT_API_IMAGE_HEADER_MAGIC_NB) {
-		VERBOSE("%s, header found, jump to emmc load\n", __func__);
-		idx = IMG_IDX_BL33;
-		part = &stm32image_dev_info_spec.part_info[idx];
-		part->part_offset = PLAT_EMMC_BOOT_SSBL_OFFSET;
-		part->bkp_offset = 0U;
-		mmc_device_spec.use_boot_part = true;
+		if (magic == BOOT_API_IMAGE_HEADER_MAGIC_NB) {
+			VERBOSE("%s, header found, jump to emmc load\n", __func__);
+			idx = IMG_IDX_BL33;
+			part = &stm32image_dev_info_spec.part_info[idx];
+			part->part_offset = PLAT_EMMC_BOOT_SSBL_OFFSET;
+			part->bkp_offset = 0U;
+			mmc_device_spec.use_boot_part = true;
 
-		goto emmc_boot;
-	} else {
-		WARN("%s: Can't find STM32 header on a boot partition\n", __func__);
+			goto emmc_boot;
+		} else {
+			WARN("%s: Can't find STM32 header on a boot partition\n", __func__);
+		}
 	}
 #endif
 
diff --git a/plat/st/stm32mp1/platform.mk b/plat/st/stm32mp1/platform.mk
index 0f579a4..a4c40c4 100644
--- a/plat/st/stm32mp1/platform.mk
+++ b/plat/st/stm32mp1/platform.mk
@@ -38,6 +38,7 @@
 
 # Not needed for Cortex-A7
 WORKAROUND_CVE_2017_5715:=	0
+WORKAROUND_CVE_2022_23960:=	0
 
 ifeq (${PSA_FWU_SUPPORT},1)
 ifneq (${STM32MP_USE_STM32IMAGE},1)
diff --git a/plat/xilinx/common/pm_service/pm_ipi.c b/plat/xilinx/common/pm_service/pm_ipi.c
index 7b2c8ec..03a7278 100644
--- a/plat/xilinx/common/pm_service/pm_ipi.c
+++ b/plat/xilinx/common/pm_service/pm_ipi.c
@@ -6,10 +6,8 @@
 
 
 #include <arch_helpers.h>
-
 #include <lib/bakery_lock.h>
 #include <lib/mmio.h>
-
 #include <ipi.h>
 #include <plat_ipi.h>
 #include <plat_private.h>
@@ -17,7 +15,6 @@
 
 #include "pm_ipi.h"
 
-
 #define ERROR_CODE_MASK		0xFFFFU
 
 DEFINE_BAKERY_LOCK(pm_secure_lock);
diff --git a/plat/xilinx/versal/pm_service/pm_client.c b/plat/xilinx/versal/pm_service/pm_client.c
index 77ec20e..4c1d340 100644
--- a/plat/xilinx/versal/pm_service/pm_client.c
+++ b/plat/xilinx/versal/pm_service/pm_client.c
@@ -136,6 +136,7 @@
 			enum pm_device_node_idx node_idx;
 			uint32_t idx, irq, lowest_set = reg & (-reg);
 			enum pm_ret_status ret;
+
 			idx = __builtin_ctz(lowest_set);
 			irq = base_irq + idx;
 
diff --git a/plat/xilinx/zynqmp/platform.mk b/plat/xilinx/zynqmp/platform.mk
index 3d98584..620bf6c 100644
--- a/plat/xilinx/zynqmp/platform.mk
+++ b/plat/xilinx/zynqmp/platform.mk
@@ -45,11 +45,11 @@
 
 
 ifdef ZYNQMP_WDT_RESTART
-$(eval $(call add_define,ZYNQMP_WDT_RESTART))
+    $(eval $(call add_define,ZYNQMP_WDT_RESTART))
 endif
 
 ifdef ZYNQMP_IPI_CRC_CHECK
-  $(warning "ZYNQMP_IPI_CRC_CHECK macro is deprecated...instead please use IPI_CRC_CHECK.")
+    $(warning "ZYNQMP_IPI_CRC_CHECK macro is deprecated...instead please use IPI_CRC_CHECK.")
 endif
 
 ifdef IPI_CRC_CHECK
diff --git a/plat/xilinx/zynqmp/pm_service/pm_api_sys.h b/plat/xilinx/zynqmp/pm_service/pm_api_sys.h
index 4e38c42..48b3877 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_api_sys.h
+++ b/plat/xilinx/zynqmp/pm_service/pm_api_sys.h
@@ -175,7 +175,6 @@
 				   uint32_t key_lo,
 				   uint32_t key_hi,
 				   uint32_t *value);
-
 enum pm_ret_status pm_fpga_read(uint32_t reg_numframes,
 				uint32_t address_low,
 				uint32_t address_high,
@@ -189,15 +188,12 @@
 				      unsigned int mask,
 				      unsigned int value,
 				      unsigned int *out);
-
 enum pm_ret_status pm_pll_set_parameter(enum pm_node_id nid,
-				enum pm_pll_param param_id,
-				unsigned int value);
-
+					enum pm_pll_param param_id,
+					unsigned int value);
 enum pm_ret_status pm_pll_get_parameter(enum pm_node_id nid,
-				enum pm_pll_param param_id,
-				unsigned int *value);
-
+					enum pm_pll_param param_id,
+					unsigned int *value);
 enum pm_ret_status pm_pll_set_mode(enum pm_node_id nid, enum pm_pll_mode mode);
 enum pm_ret_status pm_pll_get_mode(enum pm_node_id nid, enum pm_pll_mode *mode);
 enum pm_ret_status pm_efuse_access(uint32_t address_high,
@@ -205,7 +201,6 @@
 enum pm_ret_status em_set_action(unsigned int *value);
 enum pm_ret_status em_remove_action(unsigned int *value);
 enum pm_ret_status em_send_errors(unsigned int *value);
-
 enum pm_ret_status pm_feature_config(unsigned int ioctl_id,
 				     unsigned int config_id,
 				     unsigned int value,
diff --git a/plat/xilinx/zynqmp/pm_service/pm_defs.h b/plat/xilinx/zynqmp/pm_service/pm_defs.h
index 2baf960..8eb197a 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_defs.h
+++ b/plat/xilinx/zynqmp/pm_service/pm_defs.h
@@ -303,16 +303,16 @@
 };
 
 /**
- * @PM_PLL_PARAM_DIV2:         Enable for divide by 2 function inside the PLL
- * @PM_PLL_PARAM_FBDIV:        Feedback divisor integer portion for the PLL
- * @PM_PLL_PARAM_DATA:         Feedback divisor fractional portion for the PLL
- * @PM_PLL_PARAM_PRE_SRC:      Clock source for PLL input
- * @PM_PLL_PARAM_POST_SRC:     Clock source for PLL Bypass mode
- * @PM_PLL_PARAM_LOCK_DLY:     Lock circuit config settings for lock windowsize
- * @PM_PLL_PARAM_LOCK_CNT:     Lock circuit counter setting
- * @PM_PLL_PARAM_LFHF:         PLL loop filter high frequency capacitor control
- * @PM_PLL_PARAM_CP:           PLL charge pump control
- * @PM_PLL_PARAM_RES:          PLL loop filter resistor control
+ * @PM_PLL_PARAM_DIV2:		Enable for divide by 2 function inside the PLL
+ * @PM_PLL_PARAM_FBDIV:		Feedback divisor integer portion for the PLL
+ * @PM_PLL_PARAM_DATA:		Feedback divisor fractional portion for the PLL
+ * @PM_PLL_PARAM_PRE_SRC:	Clock source for PLL input
+ * @PM_PLL_PARAM_POST_SRC:	Clock source for PLL Bypass mode
+ * @PM_PLL_PARAM_LOCK_DLY:	Lock circuit config settings for lock windowsize
+ * @PM_PLL_PARAM_LOCK_CNT:	Lock circuit counter setting
+ * @PM_PLL_PARAM_LFHF:		PLL loop filter high frequency capacitor control
+ * @PM_PLL_PARAM_CP:		PLL charge pump control
+ * @PM_PLL_PARAM_RES:		PLL loop filter resistor control
  */
 enum pm_pll_param {
 	PM_PLL_PARAM_DIV2,
@@ -329,9 +329,9 @@
 };
 
 /**
- * @PM_PLL_MODE_RESET:         PLL is in reset (not locked)
- * @PM_PLL_MODE_INTEGER:       PLL is locked in integer mode
- * @PM_PLL_MODE_FRACTIONAL:    PLL is locked in fractional mode
+ * @PM_PLL_MODE_RESET:		PLL is in reset (not locked)
+ * @PM_PLL_MODE_INTEGER:	PLL is locked in integer mode
+ * @PM_PLL_MODE_FRACTIONAL:	PLL is locked in fractional mode
  */
 enum pm_pll_mode {
 	PM_PLL_MODE_RESET,
@@ -341,8 +341,8 @@
 };
 
 /**
- * @PM_CLOCK_DIV0_ID:          Clock divider 0
- * @PM_CLOCK_DIV1_ID:          Clock divider 1
+ * @PM_CLOCK_DIV0_ID:		Clock divider 0
+ * @PM_CLOCK_DIV1_ID:		Clock divider 1
  */
 enum pm_clock_div_id {
 	PM_CLOCK_DIV0_ID,
diff --git a/plat/xilinx/zynqmp/pm_service/pm_svc_main.c b/plat/xilinx/zynqmp/pm_service/pm_svc_main.c
index b789da1..d88e5fa 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_svc_main.c
+++ b/plat/xilinx/zynqmp/pm_service/pm_svc_main.c
@@ -34,7 +34,6 @@
 static int active_cores = 0;
 #endif
 
-
 /**
  * pm_context - Structure which contains data for power management
  * @api_version		version of PM API, must match with one on PMU side
@@ -103,7 +102,7 @@
  * action.
  */
 static uint64_t ttc_fiq_handler(uint32_t id, uint32_t flags, void *handle,
-                               void *cookie)
+				void *cookie)
 {
 	INFO("BL31: Got TTC FIQ\n");
 
@@ -136,7 +135,7 @@
  * running CPU calls system restart.
  */
 static uint64_t __unused __dead2 zynqmp_sgi7_irq(uint32_t id, uint32_t flags,
-                                                void *handle, void *cookie)
+						 void *handle, void *cookie)
 {
 	int i;
 	uint32_t value;
diff --git a/plat/xilinx/zynqmp/tsp/tsp_plat_setup.c b/plat/xilinx/zynqmp/tsp/tsp_plat_setup.c
index 5e770f7..352ba82 100644
--- a/plat/xilinx/zynqmp/tsp/tsp_plat_setup.c
+++ b/plat/xilinx/zynqmp/tsp/tsp_plat_setup.c
@@ -8,7 +8,6 @@
 #include <common/debug.h>
 #include <drivers/console.h>
 #include <plat/arm/common/plat_arm.h>
-
 #include <plat_private.h>
 #include <platform_tsp.h>
 
diff --git a/services/arm_arch_svc/arm_arch_svc_setup.c b/services/arm_arch_svc/arm_arch_svc_setup.c
index 5523a1c..46ccd9e 100644
--- a/services/arm_arch_svc/arm_arch_svc_setup.c
+++ b/services/arm_arch_svc/arm_arch_svc_setup.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,7 @@
 #include <lib/cpus/errata_report.h>
 #include <lib/cpus/wa_cve_2017_5715.h>
 #include <lib/cpus/wa_cve_2018_3639.h>
+#include <lib/cpus/wa_cve_2022_23960.h>
 #include <lib/smccc.h>
 #include <services/arm_arch_svc.h>
 #include <smccc_helpers.h>
@@ -74,6 +75,20 @@
 	}
 #endif
 
+#if (WORKAROUND_CVE_2022_23960 || WORKAROUND_CVE_2017_5715)
+	case SMCCC_ARCH_WORKAROUND_3:
+		/*
+		 * SMCCC_ARCH_WORKAROUND_3 should also take into account
+		 * CVE-2017-5715 since this SMC can be used instead of
+		 * SMCCC_ARCH_WORKAROUND_1.
+		 */
+		if ((check_smccc_arch_wa3_applies() == ERRATA_NOT_APPLIES) &&
+		    (check_wa_cve_2017_5715() == ERRATA_NOT_APPLIES)) {
+			return 1;
+		}
+		return 0; /* ERRATA_APPLIES || ERRATA_MISSING */
+#endif
+
 	/* Fallthrough */
 
 	default:
@@ -117,7 +132,7 @@
 	case SMCCC_ARCH_WORKAROUND_1:
 		/*
 		 * The workaround has already been applied on affected PEs
-		 * during entry to EL3.  On unaffected PEs, this function
+		 * during entry to EL3. On unaffected PEs, this function
 		 * has no effect.
 		 */
 		SMC_RET0(handle);
@@ -132,6 +147,15 @@
 		 */
 		SMC_RET0(handle);
 #endif
+#if (WORKAROUND_CVE_2022_23960 || WORKAROUND_CVE_2017_5715)
+	case SMCCC_ARCH_WORKAROUND_3:
+		/*
+		 * The workaround has already been applied on affected PEs
+		 * during entry to EL3. On unaffected PEs, this function
+		 * has no effect.
+		 */
+		SMC_RET0(handle);
+#endif
 	default:
 		WARN("Unimplemented Arm Architecture Service Call: 0x%x \n",
 			smc_fid);