Merge changes from topic "rss/mboot-attest" into integration

* changes:
  docs(maintainers): add PSA, MHU, RSS comms code owners
  feat(plat/arm/fvp): enable RSS backend based measured boot
  feat(lib/psa): mock PSA APIs
  feat(drivers/measured_boot): add RSS backend
  feat(drivers/arm/rss): add RSS communication driver
  feat(lib/psa): add initial attestation API
  feat(lib/psa): add measured boot API
  feat(drivers/arm/mhu): add MHU driver
diff --git a/docs/about/release-information.rst b/docs/about/release-information.rst
index b3553ae..e9eaa80 100644
--- a/docs/about/release-information.rst
+++ b/docs/about/release-information.rst
@@ -48,7 +48,9 @@
 +-----------------+---------------------------+------------------------------+
 | v2.6            | 4th week of Nov '21       | 2nd week of Nov '21          |
 +-----------------+---------------------------+------------------------------+
-| v2.7            | 2nd week of May '22       | 4th week of Apr '22          |
+| v2.7            | 5th week of May '22       | 3rd week of May '22          |
++-----------------+---------------------------+------------------------------+
+| v2.8            | 5th week of Nov '22       | 3rd week of Nov '22          |
 +-----------------+---------------------------+------------------------------+
 
 Removal of Deprecated Interfaces
diff --git a/docs/design/cpu-specific-build-macros.rst b/docs/design/cpu-specific-build-macros.rst
index 3029458..fbd2cbc 100644
--- a/docs/design/cpu-specific-build-macros.rst
+++ b/docs/design/cpu-specific-build-macros.rst
@@ -296,6 +296,14 @@
    CPU. This needs to be enabled for revisions r1p0, r1p1, and r1p2. The issue
    is present in r0p0 but there is no workaround. It is still open.
 
+-  ``ERRATA_A78_2376745``: This applies errata 2376745 workaround to Cortex-A78
+   CPU. This needs to be enabled for revisions r0p0, r1p0, r1p1, and r1p2, and
+   it is still open.
+
+-  ``ERRATA_A78_2395406``: This applies errata 2395406 workaround to Cortex-A78
+   CPU. This needs to be enabled for revisions r0p0, r1p0, r1p1, and r1p2, and
+   it is still open.
+
 For Cortex-A78 AE, the following errata build flags are defined :
 
 - ``ERRATA_A78_AE_1941500`` : This applies errata 1941500 workaround to
@@ -314,6 +322,17 @@
   Cortex-A78 AE CPU. This needs to be enabled for revisions r0p0 and r0p1. This
   erratum is still open.
 
+For Cortex-X1 CPU, the following errata build flags are defined:
+
+- ``ERRATA_X1_1821534`` : This applies errata 1821534 workaround to Cortex-X1
+   CPU. This needs to be enabled only for revision <= r1p0 of the CPU.
+
+- ``ERRATA_X1_1688305`` : This applies errata 1688305 workaround to Cortex-X1
+   CPU. This needs to be enabled only for revision <= r1p0 of the CPU.
+
+- ``ERRATA_X1_1827429`` : This applies errata 1827429 workaround to Cortex-X1
+   CPU. This needs to be enabled only for revision <= r1p0 of the CPU.
+
 For Neoverse N1, the following errata build flags are defined :
 
 -  ``ERRATA_N1_1073348``: This applies errata 1073348 workaround to Neoverse-N1
@@ -433,6 +452,10 @@
    Cortex-A710 CPU. This needs to be enabled for revisions r0p0, r1p0 and r2p0
    of the CPU and is fixed in r2p1.
 
+-  ``ERRATA_A710_2008768``: This applies errata 2008768 workaround to
+   Cortex-A710 CPU. This needs to be enabled for revisions r0p0, r1p0 and r2p0
+   of the CPU and is fixed in r2p1.
+
 For Neoverse N2, the following errata build flags are defined :
 
 -  ``ERRATA_N2_2002655``: This applies errata 2002655 workaround to Neoverse-N2
@@ -553,6 +576,12 @@
    r2p0 it is fixed). However, please note that this workaround results in
    increased DSU power consumption on idle.
 
+-  ``ERRATA_DSU_2313941``: This applies errata 2313941 workaround for the
+   affected DSU configurations. This errata applies for those DSUs with
+   revisions r0p0, r1p0, r2p0, r2p1, r3p0, r3p1 and is still open. However,
+   please note that this workaround results in increased DSU power consumption
+   on idle.
+
 CPU Specific optimizations
 --------------------------
 
diff --git a/docs/plat/xilinx-versal.rst b/docs/plat/xilinx-versal.rst
index 91ad6f1..09a6ee2 100644
--- a/docs/plat/xilinx-versal.rst
+++ b/docs/plat/xilinx-versal.rst
@@ -44,7 +44,7 @@
 *   `VERSAL_PLATFORM`: Select the platform. Options:
     -   `versal_virt`	: Versal Virtual platform
     -   `spp_itr6`	: SPP ITR6
-    -   `emu_it6`	: EMU ITR6
+    -   `emu_itr6`	: EMU ITR6
 
 # PLM->TF-A Parameter Passing
 ------------------------------
diff --git a/drivers/st/spi/stm32_qspi.c b/drivers/st/spi/stm32_qspi.c
index d3c26d9..73aa9ac 100644
--- a/drivers/st/spi/stm32_qspi.c
+++ b/drivers/st/spi/stm32_qspi.c
@@ -1,13 +1,10 @@
 /*
- * Copyright (c) 2019-2021, STMicroelectronics - All Rights Reserved
+ * Copyright (c) 2019-2022, STMicroelectronics - All Rights Reserved
  *
  * SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
  */
 
 #include <inttypes.h>
-#include <libfdt.h>
-
-#include <platform_def.h>
 
 #include <common/debug.h>
 #include <common/fdt_wrappers.h>
@@ -19,6 +16,9 @@
 #include <drivers/st/stm32mp_reset.h>
 #include <lib/mmio.h>
 #include <lib/utils_def.h>
+#include <libfdt.h>
+
+#include <platform_def.h>
 
 /* Timeout for device interface reset */
 #define TIMEOUT_US_1_MS			1000U
@@ -139,10 +139,6 @@
 	int ret = 0;
 	uint64_t timeout;
 
-	if (op->data.nbytes == 0U) {
-		return stm32_qspi_wait_for_not_busy();
-	}
-
 	timeout = timeout_init_us(QSPI_CMD_TIMEOUT_US);
 	while ((mmio_read_32(qspi_base() + QSPI_SR) & QSPI_SR_TCF) == 0U) {
 		if (timeout_elapsed(timeout)) {
@@ -163,6 +159,10 @@
 	/* Clear flags */
 	mmio_write_32(qspi_base() + QSPI_FCR, QSPI_FCR_CTCF | QSPI_FCR_CTEF);
 
+	if (ret == 0) {
+		ret = stm32_qspi_wait_for_not_busy();
+	}
+
 	return ret;
 }
 
@@ -251,11 +251,6 @@
 		op->dummy.buswidth, op->data.buswidth,
 		op->addr.val, op->data.nbytes);
 
-	ret = stm32_qspi_wait_for_not_busy();
-	if (ret != 0) {
-		return ret;
-	}
-
 	addr_max = op->addr.val + op->data.nbytes + 1U;
 
 	if ((op->data.dir == SPI_MEM_DATA_IN) && (op->data.nbytes != 0U)) {
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index bbbc77a..dfb9fe4 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -488,7 +488,8 @@
 #define SCR_HXEn_BIT		(UL(1) << 38)
 #define SCR_ENTP2_SHIFT		U(41)
 #define SCR_ENTP2_BIT		(UL(1) << SCR_ENTP2_SHIFT)
-#define SCR_AMVOFFEN_BIT	(UL(1) << 35)
+#define SCR_AMVOFFEN_SHIFT	U(35)
+#define SCR_AMVOFFEN_BIT	(UL(1) << SCR_AMVOFFEN_SHIFT)
 #define SCR_TWEDEn_BIT		(UL(1) << 29)
 #define SCR_ECVEN_BIT		(UL(1) << 28)
 #define SCR_FGTEN_BIT		(UL(1) << 27)
@@ -1222,7 +1223,8 @@
 #define ERXMISC0_EL1		S3_0_C5_C5_0
 #define ERXMISC1_EL1		S3_0_C5_C5_1
 
-#define ERXCTLR_ED_BIT		(U(1) << 0)
+#define ERXCTLR_ED_SHIFT	U(0)
+#define ERXCTLR_ED_BIT		(U(1) << ERXCTLR_ED_SHIFT)
 #define ERXCTLR_UE_BIT		(U(1) << 4)
 
 #define ERXPFGCTL_UC_BIT	(U(1) << 1)
diff --git a/include/lib/cpus/aarch64/cortex_hunter.h b/include/lib/cpus/aarch64/cortex_hunter.h
index 8b59fd9..24bd217 100644
--- a/include/lib/cpus/aarch64/cortex_hunter.h
+++ b/include/lib/cpus/aarch64/cortex_hunter.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,9 @@
 
 #define CORTEX_HUNTER_MIDR					U(0x410FD810)
 
+/* Cortex Hunter loop count for CVE-2022-23960 mitigation */
+#define CORTEX_HUNTER_BHB_LOOP_COUNT				U(132)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_makalu.h b/include/lib/cpus/aarch64/cortex_makalu.h
index 4e0dc86..ee59657 100644
--- a/include/lib/cpus/aarch64/cortex_makalu.h
+++ b/include/lib/cpus/aarch64/cortex_makalu.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,9 @@
 
 #define CORTEX_MAKALU_MIDR					U(0x410FD4D0)
 
+/* Cortex Makalu loop count for CVE-2022-23960 mitigation */
+#define CORTEX_MAKALU_BHB_LOOP_COUNT				U(38)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_makalu_elp_arm.h b/include/lib/cpus/aarch64/cortex_makalu_elp_arm.h
index a0d788e..9ed5ee3 100644
--- a/include/lib/cpus/aarch64/cortex_makalu_elp_arm.h
+++ b/include/lib/cpus/aarch64/cortex_makalu_elp_arm.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,9 @@
 
 #define CORTEX_MAKALU_ELP_ARM_MIDR				U(0x410FD4E0)
 
+/* Cortex Makalu ELP loop count for CVE-2022-23960 mitigation */
+#define CORTEX_MAKALU_ELP_ARM_BHB_LOOP_COUNT			U(132)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_x1.h b/include/lib/cpus/aarch64/cortex_x1.h
new file mode 100644
index 0000000..e3661a8
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_x1.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2022, Google LLC. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_X1_H
+#define CORTEX_X1_H
+
+/* Cortex-X1 MIDR for r1p0 */
+#define CORTEX_X1_MIDR			U(0x411fd440)
+
+/* Cortex-X1 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_X1_BHB_LOOP_COUNT	U(32)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_X1_CPUECTLR_EL1		S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_X1_ACTLR2_EL1		S3_0_C15_C1_1
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_X1_CPUPWRCTLR_EL1	S3_0_C15_C2_7
+#define CORTEX_X1_CORE_PWRDN_EN_MASK	U(0x1)
+
+#endif /* CORTEX_X1_H */
diff --git a/include/lib/cpus/aarch64/dsu_def.h b/include/lib/cpus/aarch64/dsu_def.h
index 0969acf..577de61 100644
--- a/include/lib/cpus/aarch64/dsu_def.h
+++ b/include/lib/cpus/aarch64/dsu_def.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -32,6 +32,7 @@
 #define CLUSTERACTLR_EL1	S3_0_C15_C3_3
 
 #define CLUSTERACTLR_EL1_DISABLE_CLOCK_GATING	(ULL(1) << 15)
+#define CLUSTERACTLR_EL1_DISABLE_SCLK_GATING	(ULL(3) << 15)
 
 /********************************************************************
  * Masks applied for DSU errata workarounds
diff --git a/include/lib/cpus/aarch64/neoverse_demeter.h b/include/lib/cpus/aarch64/neoverse_demeter.h
index 230ed66..f1afae7 100644
--- a/include/lib/cpus/aarch64/neoverse_demeter.h
+++ b/include/lib/cpus/aarch64/neoverse_demeter.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,9 @@
 
 #define NEOVERSE_DEMETER_MIDR				U(0x410FD4F0)
 
+/* Neoverse Demeter loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_DEMETER_BHB_LOOP_COUNT			U(132)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/neoverse_poseidon.h b/include/lib/cpus/aarch64/neoverse_poseidon.h
index 0a8b1d1..798ecd1 100644
--- a/include/lib/cpus/aarch64/neoverse_poseidon.h
+++ b/include/lib/cpus/aarch64/neoverse_poseidon.h
@@ -10,6 +10,9 @@
 
 #define NEOVERSE_POSEIDON_MIDR                      		U(0x410FD830)
 
+/* Neoverse Poseidon loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_POSEIDON_BHB_LOOP_COUNT			U(132)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
diff --git a/include/services/ffa_svc.h b/include/services/ffa_svc.h
index 2b4a377..0836579 100644
--- a/include/services/ffa_svc.h
+++ b/include/services/ffa_svc.h
@@ -38,6 +38,7 @@
 #define FFA_VERSION_MINOR_SHIFT		0
 #define FFA_VERSION_MINOR_MASK		U(0xFFFF)
 #define FFA_VERSION_BIT31_MASK 		U(0x1u << 31)
+#define FFA_VERSION_MASK		U(0xFFFFFFFF)
 
 
 #define MAKE_FFA_VERSION(major, minor) 	\
diff --git a/lib/cpus/aarch32/cortex_a57.S b/lib/cpus/aarch32/cortex_a57.S
index 2e97abb..18ee1f9 100644
--- a/lib/cpus/aarch32/cortex_a57.S
+++ b/lib/cpus/aarch32/cortex_a57.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -396,6 +396,11 @@
 	bx	lr
 endfunc check_errata_cve_2018_3639
 
+func check_errata_cve_2022_23960
+	mov	r0, #ERRATA_MISSING
+	bx	lr
+endfunc check_errata_cve_2022_23960
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A57.
 	 * Shall clobber: r0-r6
@@ -600,6 +605,7 @@
 	report_errata ERRATA_A57_859972, cortex_a57, 859972
 	report_errata WORKAROUND_CVE_2017_5715, cortex_a57, cve_2017_5715
 	report_errata WORKAROUND_CVE_2018_3639, cortex_a57, cve_2018_3639
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a57, cve_2022_23960
 
 	pop	{r12, lr}
 	bx	lr
diff --git a/lib/cpus/aarch32/cortex_a72.S b/lib/cpus/aarch32/cortex_a72.S
index ff2b0e6..03914b2 100644
--- a/lib/cpus/aarch32/cortex_a72.S
+++ b/lib/cpus/aarch32/cortex_a72.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -101,6 +101,11 @@
 	bx	lr
 endfunc check_errata_cve_2018_3639
 
+func check_errata_cve_2022_23960
+	mov	r0, #ERRATA_MISSING
+	bx	lr
+endfunc check_errata_cve_2022_23960
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A72.
 	 * -------------------------------------------------
@@ -260,6 +265,7 @@
 	report_errata ERRATA_A72_859971, cortex_a72, 859971
 	report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715
 	report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a72, cve_2022_23960
 
 	pop	{r12, lr}
 	bx	lr
diff --git a/lib/cpus/aarch64/cortex_a510.S b/lib/cpus/aarch64/cortex_a510.S
index 34e1082..f444077 100644
--- a/lib/cpus/aarch64/cortex_a510.S
+++ b/lib/cpus/aarch64/cortex_a510.S
@@ -301,6 +301,7 @@
 	report_errata ERRATA_A510_2250311, cortex_a510, 2250311
 	report_errata ERRATA_A510_2218950, cortex_a510, 2218950
 	report_errata ERRATA_A510_2172148, cortex_a510, 2172148
+	report_errata ERRATA_DSU_2313941, cortex_a510, dsu_2313941
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -312,12 +313,15 @@
 
 	/* Disable speculative loads */
 	msr	SSBS, xzr
-	isb
 
 	/* Get the CPU revision and stash it in x18. */
 	bl	cpu_get_rev_var
 	mov	x18, x0
 
+#if ERRATA_DSU_2313941
+	bl	errata_dsu_2313941_wa
+#endif
+
 #if ERRATA_A510_1922240
 	mov	x0, x18
 	bl	errata_cortex_a510_1922240_wa
@@ -353,6 +357,7 @@
 	bl	errata_cortex_a510_2172148_wa
 #endif
 
+	isb
 	ret	x19
 endfunc cortex_a510_reset_func
 
diff --git a/lib/cpus/aarch64/cortex_a710.S b/lib/cpus/aarch64/cortex_a710.S
index aea62ae..5d8e9a6 100644
--- a/lib/cpus/aarch64/cortex_a710.S
+++ b/lib/cpus/aarch64/cortex_a710.S
@@ -310,6 +310,49 @@
 	b       cpu_rev_var_ls
 endfunc check_errata_2282622
 
+/* ---------------------------------------------------------------
+ * Errata Workaround for Cortex-A710 Erratum 2008768.
+ * This applies to revision r0p0, r1p0 and r2p0.
+ * It is fixed in r2p1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x2, x17
+ * ---------------------------------------------------------------
+ */
+func errata_a710_2008768_wa
+	mov     x17, x30
+	bl      check_errata_2008768
+	cbz     x0, 1f
+
+	/* Stash ERRSELR_EL1 in x2 */
+	mrs	x2, ERRSELR_EL1
+
+	/* Select error record 0 and clear ED bit */
+	msr	ERRSELR_EL1, xzr
+	mrs	x1, ERXCTLR_EL1
+	bfi	x1, xzr, #ERXCTLR_ED_SHIFT, #1
+	msr	ERXCTLR_EL1, x1
+
+	/* Select error record 1 and clear ED bit */
+	mov	x0, #1
+	msr	ERRSELR_EL1, x0
+	mrs	x1, ERXCTLR_EL1
+	bfi	x1, xzr, #ERXCTLR_ED_SHIFT, #1
+	msr	ERXCTLR_EL1, x1
+
+	/* Restore ERRSELR_EL1 from x2 */
+	msr	ERRSELR_EL1, x2
+
+1:
+	ret     x17
+endfunc errata_a710_2008768_wa
+
+func check_errata_2008768
+	/* Applies to r0p0, r1p0 and r2p0 */
+	mov     x1, #0x20
+	b       cpu_rev_var_ls
+endfunc check_errata_2008768
+
 func check_errata_cve_2022_23960
 #if WORKAROUND_CVE_2022_23960
 	mov	x0, #ERRATA_APPLIES
@@ -324,6 +367,14 @@
 	 * ----------------------------------------------------
 	 */
 func cortex_a710_core_pwr_dwn
+
+#if ERRATA_A710_2008768
+	mov	x4, x30
+	bl	cpu_get_rev_var
+	bl	errata_a710_2008768_wa
+	mov	x30, x4
+#endif
+
 	/* ---------------------------------------------------
 	 * Enable CPU power down bit in power control register
 	 * ---------------------------------------------------
@@ -358,7 +409,9 @@
 	report_errata ERRATA_A710_2267065, cortex_a710, 2267065
 	report_errata ERRATA_A710_2136059, cortex_a710, 2136059
 	report_errata ERRATA_A710_2282622, cortex_a710, 2282622
+	report_errata ERRATA_A710_2008768, cortex_a710, 2008768
 	report_errata WORKAROUND_CVE_2022_23960, cortex_a710, cve_2022_23960
+	report_errata ERRATA_DSU_2313941, cortex_a710, dsu_2313941
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -374,6 +427,10 @@
 	bl	cpu_get_rev_var
 	mov	x18, x0
 
+#if ERRATA_DSU_2313941
+	bl	errata_dsu_2313941_wa
+#endif
+
 #if ERRATA_A710_1987031
 	mov	x0, x18
 	bl	errata_a710_1987031_wa
diff --git a/lib/cpus/aarch64/cortex_a78.S b/lib/cpus/aarch64/cortex_a78.S
index 1a6f848..be94e91 100644
--- a/lib/cpus/aarch64/cortex_a78.S
+++ b/lib/cpus/aarch64/cortex_a78.S
@@ -267,6 +267,62 @@
 	b	cpu_rev_var_range
 endfunc check_errata_2242635
 
+/* --------------------------------------------------
+ * Errata Workaround for Cortex A78 Errata 2376745.
+ * This applies to revisions r0p0, r1p0, r1p1, and r1p2.
+ * It is still open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * --------------------------------------------------
+ */
+func errata_a78_2376745_wa
+	/* Check revision. */
+	mov	x17, x30
+	bl	check_errata_2376745
+	cbz	x0, 1f
+
+	/* Apply the workaround. */
+	mrs	x1, CORTEX_A78_ACTLR2_EL1
+	orr	x1, x1, #BIT(0)
+	msr	CORTEX_A78_ACTLR2_EL1, x1
+1:
+	ret	x17
+endfunc errata_a78_2376745_wa
+
+func check_errata_2376745
+	/* Applies to r0p0, r0p1, r1p1, and r1p2 */
+	mov	x1, #CPU_REV(1, 2)
+	b	cpu_rev_var_ls
+endfunc check_errata_2376745
+
+/* --------------------------------------------------
+ * Errata Workaround for Cortex A78 Errata 2395406.
+ * This applies to revisions r0p0, r1p0, r1p1, and r1p2.
+ * It is still open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * --------------------------------------------------
+ */
+func errata_a78_2395406_wa
+	/* Check revision. */
+	mov	x17, x30
+	bl	check_errata_2395406
+	cbz	x0, 1f
+
+	/* Apply the workaround. */
+	mrs	x1, CORTEX_A78_ACTLR2_EL1
+	orr	x1, x1, #BIT(40)
+	msr	CORTEX_A78_ACTLR2_EL1, x1
+1:
+	ret	x17
+endfunc errata_a78_2395406_wa
+
+func check_errata_2395406
+	/* Applies to r0p0, r0p1, r1p1, and r1p2 */
+	mov	x1, #CPU_REV(1, 2)
+	b	cpu_rev_var_ls
+endfunc check_errata_2395406
+
 func check_errata_cve_2022_23960
 #if WORKAROUND_CVE_2022_23960
 	mov	x0, #ERRATA_APPLIES
@@ -320,6 +376,16 @@
 	bl	errata_a78_2242635_wa
 #endif
 
+#if ERRATA_A78_2376745
+	mov	x0, x18
+	bl	errata_a78_2376745_wa
+#endif
+
+#if ERRATA_A78_2395406
+	mov	x0, x18
+	bl	errata_a78_2395406_wa
+#endif
+
 #if ENABLE_AMU
 	/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
 	mrs	x0, actlr_el3
@@ -390,6 +456,8 @@
 	report_errata ERRATA_A78_1952683, cortex_a78, 1952683
 	report_errata ERRATA_A78_2132060, cortex_a78, 2132060
 	report_errata ERRATA_A78_2242635, cortex_a78, 2242635
+	report_errata ERRATA_A78_2376745, cortex_a78, 2376745
+	report_errata ERRATA_A78_2395406, cortex_a78, 2395406
 	report_errata WORKAROUND_CVE_2022_23960, cortex_a78, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
diff --git a/lib/cpus/aarch64/cortex_hunter.S b/lib/cpus/aarch64/cortex_hunter.S
index 2ab4296..973637e 100644
--- a/lib/cpus/aarch64/cortex_hunter.S
+++ b/lib/cpus/aarch64/cortex_hunter.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <cortex_hunter.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,9 +22,32 @@
 #error "Cortex Hunter supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+        wa_cve_2022_23960_bhb_vector_table CORTEX_HUNTER_BHB_LOOP_COUNT, cortex_hunter
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 func cortex_hunter_reset_func
 	/* Disable speculative loads */
 	msr	SSBS, xzr
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex Hunter generic vectors are overridden to apply errata
+	 * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_cortex_hunter
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
 	isb
 	ret
 endfunc cortex_hunter_reset_func
@@ -49,6 +73,18 @@
  * Errata printing function for Cortex Hunter. Must follow AAPCS.
  */
 func cortex_hunter_errata_report
+	stp	x8, x30, [sp, #-16]!
+
+	bl	cpu_get_rev_var
+	mov	x8, x0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata WORKAROUND_CVE_2022_23960, cortex_hunter, cve_2022_23960
+
+	ldp	x8, x30, [sp], #16
 	ret
 endfunc cortex_hunter_errata_report
 #endif
diff --git a/lib/cpus/aarch64/cortex_makalu.S b/lib/cpus/aarch64/cortex_makalu.S
index 98c7d6d..7603210 100644
--- a/lib/cpus/aarch64/cortex_makalu.S
+++ b/lib/cpus/aarch64/cortex_makalu.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <cortex_makalu.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,9 +22,32 @@
 #error "Cortex Makalu supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table CORTEX_MAKALU_BHB_LOOP_COUNT, cortex_makalu
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov     x0, #ERRATA_APPLIES
+#else
+	mov     x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 func cortex_makalu_reset_func
 	/* Disable speculative loads */
 	msr	SSBS, xzr
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex Makalu generic vectors are overridden to apply errata
+	 * mitigation on exception entry from lower ELs.
+	 */
+        adr	x0, wa_cve_vbar_cortex_makalu
+        msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
 	isb
 	ret
 endfunc cortex_makalu_reset_func
@@ -49,6 +73,18 @@
  * Errata printing function for Cortex Makalu. Must follow AAPCS.
  */
 func cortex_makalu_errata_report
+	stp	x8, x30, [sp, #-16]!
+
+	bl	cpu_get_rev_var
+	mov	x8, x0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata WORKAROUND_CVE_2022_23960, cortex_makalu, cve_2022_23960
+
+	ldp     x8, x30, [sp], #16
 	ret
 endfunc cortex_makalu_errata_report
 #endif
diff --git a/lib/cpus/aarch64/cortex_makalu_elp_arm.S b/lib/cpus/aarch64/cortex_makalu_elp_arm.S
index fbbf205..f4d2df0 100644
--- a/lib/cpus/aarch64/cortex_makalu_elp_arm.S
+++ b/lib/cpus/aarch64/cortex_makalu_elp_arm.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <cortex_makalu_elp_arm.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
 #error "Cortex Makalu ELP supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table CORTEX_MAKALU_ELP_ARM_BHB_LOOP_COUNT, cortex_makalu_elp_arm
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ----------------------------------------------------
@@ -37,22 +42,53 @@
 	ret
 endfunc cortex_makalu_elp_arm_core_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for Cortex Makalu ELP. Must follow AAPCS.
- */
-func cortex_makalu_elp_arm_errata_report
-	ret
-endfunc cortex_makalu_elp_arm_errata_report
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
 #endif
+	ret
+endfunc check_errata_cve_2022_23960
 
 func cortex_makalu_elp_arm_reset_func
 	/* Disable speculative loads */
 	msr	SSBS, xzr
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex Makalu ELP generic vectors are overridden to apply
+	 * errata mitigation on exception entry from lower ELs.
+         */
+	adr	x0, wa_cve_vbar_cortex_makalu_elp_arm
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
 	isb
 	ret
 endfunc cortex_makalu_elp_arm_reset_func
 
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex Makalu ELP. Must follow AAPCS.
+ */
+func cortex_makalu_elp_arm_errata_report
+	stp	x8, x30, [sp, #-16]!
+
+	bl	cpu_get_rev_var
+	mov	x8, x0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata WORKAROUND_CVE_2022_23960, cortex_makalu_elp_arm, cve_2022_23960
+
+	ldp	x8, x30, [sp], #16
+	ret
+endfunc cortex_makalu_elp_arm_errata_report
+#endif
+
 	/* ---------------------------------------------
 	 * This function provides Cortex Makalu ELP-
 	 * specific register information for crash
diff --git a/lib/cpus/aarch64/cortex_x1.S b/lib/cpus/aarch64/cortex_x1.S
new file mode 100644
index 0000000..9a7f666
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_x1.S
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2022, Google LLC. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <cortex_x1.h>
+#include <cpu_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex-X1 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex-X1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table CORTEX_X1_BHB_LOOP_COUNT, cortex_x1
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+/* --------------------------------------------------
+ * Errata Workaround for X1 Erratum 1821534.
+ * This applies to revision r0p0 and r1p0 of X1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_x1_1821534_wa
+	/* Compare x0 against revision r1p0 */
+	mov	x17, x30
+	bl	check_errata_1821534
+	cbz	x0, 1f
+	mrs	x1, CORTEX_X1_ACTLR2_EL1
+	orr	x1, x1, BIT(2)
+	msr	CORTEX_X1_ACTLR2_EL1, x1
+	isb
+1:
+	ret	x17
+endfunc errata_x1_1821534_wa
+
+func check_errata_1821534
+	/* Applies to r0p0 and r1p0 */
+	mov	x1, #0x10
+	b	cpu_rev_var_ls
+endfunc check_errata_1821534
+
+/* --------------------------------------------------
+ * Errata Workaround for X1 Erratum 1688305.
+ * This applies to revision r0p0 and r1p0 of X1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_x1_1688305_wa
+	/* Compare x0 against revision r1p0 */
+	mov	x17, x30
+	bl	check_errata_1688305
+	cbz	x0, 1f
+	mrs	x0, CORTEX_X1_ACTLR2_EL1
+	orr	x0, x0, BIT(1)
+	msr	CORTEX_X1_ACTLR2_EL1, x0
+	isb
+
+1:
+	ret	x17
+endfunc errata_x1_1688305_wa
+
+func check_errata_1688305
+	/* Applies to r0p0 and r1p0 */
+	mov	x1, #0x10
+	b	cpu_rev_var_ls
+endfunc check_errata_1688305
+
+/* --------------------------------------------------
+ * Errata Workaround for X1 Erratum 1827429.
+ * This applies to revision r0p0 and r1p0 of X1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_x1_1827429_wa
+	/* Compare x0 against revision r1p0 */
+	mov	x17, x30
+	bl	check_errata_1827429
+	cbz	x0, 1f
+	mrs	x0, CORTEX_X1_CPUECTLR_EL1
+	orr	x0, x0, BIT(53)
+	msr	CORTEX_X1_CPUECTLR_EL1, x0
+	isb
+
+1:
+	ret	x17
+endfunc errata_x1_1827429_wa
+
+func check_errata_1827429
+	/* Applies to r0p0 and r1p0 */
+	mov	x1, #0x10
+	b	cpu_rev_var_ls
+endfunc check_errata_1827429
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
+	/* -------------------------------------------------
+	 * The CPU Ops reset function for Cortex-X1.
+	 * Shall clobber: x0-x19
+	 * -------------------------------------------------
+	 */
+func cortex_x1_reset_func
+	mov	x19, x30
+	bl	cpu_get_rev_var
+	mov	x18, x0
+
+#if ERRATA_X1_1821534
+	mov	x0, x18
+	bl	errata_x1_1821534_wa
+#endif
+
+#if ERRATA_X1_1688305
+	mov	x0, x18
+	bl	errata_x1_1688305_wa
+#endif
+
+#if ERRATA_X1_1827429
+	mov	x0, x18
+	bl	errata_x1_1827429_wa
+#endif
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex-X1 generic vectors are overridden to apply errata
+	 * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_cortex_x1
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+	isb
+	ret	x19
+endfunc cortex_x1_reset_func
+
+	/* ---------------------------------------------
+	 * HW will do the cache maintenance while powering down
+	 * ---------------------------------------------
+	 */
+func cortex_x1_core_pwr_dwn
+	/* ---------------------------------------------
+	 * Enable CPU power down bit in power control register
+	 * ---------------------------------------------
+	 */
+	mrs	x0, CORTEX_X1_CPUPWRCTLR_EL1
+	orr	x0, x0, #CORTEX_X1_CORE_PWRDN_EN_MASK
+	msr	CORTEX_X1_CPUPWRCTLR_EL1, x0
+	isb
+	ret
+endfunc cortex_x1_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex X1. Must follow AAPCS.
+ */
+func cortex_x1_errata_report
+	stp	x8, x30, [sp, #-16]!
+
+	bl	cpu_get_rev_var
+	mov	x8, x0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata ERRATA_X1_1821534, cortex_x1, 1821534
+	report_errata ERRATA_X1_1688305, cortex_x1, 1688305
+	report_errata ERRATA_X1_1827429, cortex_x1, 1827429
+	report_errata WORKAROUND_CVE_2022_23960, cortex_x1, cve_2022_23960
+
+	ldp	x8, x30, [sp], #16
+	ret
+endfunc cortex_x1_errata_report
+#endif
+
+       /* ---------------------------------------------
+	* This function provides Cortex X1 specific
+	* register information for crash reporting.
+	* It needs to return with x6 pointing to
+	* a list of register names in ascii and
+	* x8 - x15 having values of registers to be
+	* reported.
+	* ---------------------------------------------
+	*/
+.section .rodata.cortex_x1_regs, "aS"
+cortex_x1_regs:  /* The ascii list of register names to be reported */
+	.asciz	"cpuectlr_el1", ""
+
+func cortex_x1_cpu_reg_dump
+	adr	x6, cortex_x1_regs
+	mrs	x8, CORTEX_X1_CPUECTLR_EL1
+	ret
+endfunc cortex_x1_cpu_reg_dump
+
+declare_cpu_ops cortex_x1, CORTEX_X1_MIDR, \
+	cortex_x1_reset_func, \
+	cortex_x1_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_x2.S b/lib/cpus/aarch64/cortex_x2.S
index 90a906b..3e0810b 100644
--- a/lib/cpus/aarch64/cortex_x2.S
+++ b/lib/cpus/aarch64/cortex_x2.S
@@ -305,6 +305,7 @@
 	report_errata ERRATA_X2_2147715, cortex_x2, 2147715
 	report_errata ERRATA_X2_2216384, cortex_x2, 2216384
 	report_errata WORKAROUND_CVE_2022_23960, cortex_x2, cve_2022_23960
+	report_errata ERRATA_DSU_2313941, cortex_x2, dsu_2313941
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -316,12 +317,15 @@
 
 	/* Disable speculative loads */
 	msr	SSBS, xzr
-	isb
 
 	/* Get the CPU revision and stash it in x18. */
 	bl	cpu_get_rev_var
 	mov	x18, x0
 
+#if ERRATA_DSU_2313941
+	bl	errata_dsu_2313941_wa
+#endif
+
 #if ERRATA_X2_2002765
 	mov	x0, x18
 	bl	errata_cortex_x2_2002765_wa
@@ -367,7 +371,7 @@
 #endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
 
 	isb
-	ret x19
+	ret	x19
 endfunc cortex_x2_reset_func
 
 	/* ---------------------------------------------
diff --git a/lib/cpus/aarch64/dsu_helpers.S b/lib/cpus/aarch64/dsu_helpers.S
index da052d5..419b6ea 100644
--- a/lib/cpus/aarch64/dsu_helpers.S
+++ b/lib/cpus/aarch64/dsu_helpers.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -139,3 +139,57 @@
 1:
 	ret	x17
 endfunc errata_dsu_936184_wa
+
+	/* -----------------------------------------------------------------------
+	 * DSU erratum 2313941 check function
+	 * Checks the DSU variant, revision and configuration to determine if
+	 * the erratum applies. Erratum applies on all configurations of the
+	 * DSU and if revision-variant is r0p0, r1p0, r2p0, r2p1, r3p0, r3p1.
+	 *
+	 * The erratum is still open.
+	 *
+	 * This function is called from both assembly and C environment. So it
+	 * follows AAPCS.
+	 *
+	 * Clobbers: x0-x3
+	 * -----------------------------------------------------------------------
+	 */
+	.globl	check_errata_dsu_2313941
+	.globl	errata_dsu_2313941_wa
+
+func check_errata_dsu_2313941
+	mov	x2, #ERRATA_APPLIES
+	mov	x3, #ERRATA_NOT_APPLIES
+
+	/* Check if DSU version is less than or equal to r3p1 */
+	mrs	x1, CLUSTERIDR_EL1
+
+	/* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
+	ubfx	x0, x1, #CLUSTERIDR_REV_SHIFT,\
+			#(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
+	mov	x1, #(0x31 << CLUSTERIDR_REV_SHIFT)
+	cmp	x0, x1
+	csel	x0, x2, x3, LS
+	ret
+endfunc check_errata_dsu_2313941
+
+	/* --------------------------------------------------
+	 * Errata Workaround for DSU erratum #2313941.
+	 *
+	 * Can clobber only: x0-x17
+	 * --------------------------------------------------
+	 */
+func errata_dsu_2313941_wa
+	mov	x17, x30
+	bl	check_errata_dsu_2313941
+	cbz	x0, 1f
+
+	/* If erratum applies, disable high-level clock gating */
+	mrs	x0, CLUSTERACTLR_EL1
+	orr	x0, x0, #CLUSTERACTLR_EL1_DISABLE_SCLK_GATING
+	msr	CLUSTERACTLR_EL1, x0
+	isb
+1:
+	ret	x17
+endfunc errata_dsu_2313941_wa
+
diff --git a/lib/cpus/aarch64/neoverse_demeter.S b/lib/cpus/aarch64/neoverse_demeter.S
index f43c18b..41cb4ee 100644
--- a/lib/cpus/aarch64/neoverse_demeter.S
+++ b/lib/cpus/aarch64/neoverse_demeter.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <neoverse_demeter.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
 #error "Neoverse Demeter supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table NEOVERSE_DEMETER_BHB_LOOP_COUNT, neoverse_demeter
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ----------------------------------------------------
@@ -37,22 +42,52 @@
 	ret
 endfunc neoverse_demeter_core_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for Neoverse Demeter. Must follow AAPCS.
- */
-func neoverse_demeter_errata_report
-	ret
-endfunc neoverse_demeter_errata_report
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
 #endif
+	ret
+endfunc check_errata_cve_2022_23960
 
 func neoverse_demeter_reset_func
 	/* Disable speculative loads */
 	msr	SSBS, xzr
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Neoverse Demeter vectors are overridden to apply
+	 * errata mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_neoverse_demeter
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
 	isb
 	ret
 endfunc neoverse_demeter_reset_func
 
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Neoverse Demeter. Must follow AAPCS.
+ */
+func neoverse_demeter_errata_report
+	stp	x8, x30, [sp, #-16]!
+
+	bl	cpu_get_rev_var
+	mov	x8, x0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata WORKAROUND_CVE_2022_23960, neoverse_demeter, cve_2022_23960
+
+	ldp	x8, x30, [sp], #16
+	ret
+endfunc neoverse_demeter_errata_report
+#endif
+
 	/* ---------------------------------------------
 	 * This function provides Neoverse Demeter-
 	 * specific register information for crash
diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S
index b93f2a6..5b796dc 100644
--- a/lib/cpus/aarch64/neoverse_n2.S
+++ b/lib/cpus/aarch64/neoverse_n2.S
@@ -367,6 +367,10 @@
 	orr	x0, x0, #NEOVERSE_N2_CPUACTLR2_EL1_BIT_2
 	msr	NEOVERSE_N2_CPUACTLR2_EL1, x0
 
+#if ERRATA_DSU_2313941
+	bl	errata_dsu_2313941_wa
+#endif
+
 #if ERRATA_N2_2067956
 	mov	x0, x18
 	bl	errata_n2_2067956_wa
@@ -493,6 +497,7 @@
 	report_errata ERRATA_N2_2242400, neoverse_n2, 2242400
 	report_errata ERRATA_N2_2280757, neoverse_n2, 2280757
 	report_errata WORKAROUND_CVE_2022_23960, neoverse_n2, cve_2022_23960
+	report_errata ERRATA_DSU_2313941, neoverse_n2, dsu_2313941
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/neoverse_poseidon.S b/lib/cpus/aarch64/neoverse_poseidon.S
index 43a93aa..030293d 100644
--- a/lib/cpus/aarch64/neoverse_poseidon.S
+++ b/lib/cpus/aarch64/neoverse_poseidon.S
@@ -10,6 +10,7 @@
 #include <neoverse_poseidon.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
 #error "Neoverse Poseidon supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table NEOVERSE_POSEIDON_BHB_LOOP_COUNT, neoverse_poseidon
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 	/* ---------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ---------------------------------------------
@@ -37,22 +42,53 @@
 	ret
 endfunc neoverse_poseidon_core_pwr_dwn
 
-#if REPORT_ERRATA
-	/*
-	 * Errata printing function for Neoverse Poseidon. Must follow AAPCS.
-	 */
-func neoverse_poseidon_errata_report
-	ret
-endfunc neoverse_poseidon_errata_report
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
 #endif
+	ret
+endfunc check_errata_cve_2022_23960
 
 func neoverse_poseidon_reset_func
 	/* Disable speculative loads */
 	msr	SSBS, xzr
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Neoverse Poseidon generic vectors are overridden to apply
+	 * errata mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_neoverse_poseidon
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
 	isb
 	ret
 endfunc neoverse_poseidon_reset_func
 
+#if REPORT_ERRATA
+	/*
+	 * Errata printing function for Neoverse Poseidon. Must follow AAPCS.
+	 */
+func neoverse_poseidon_errata_report
+	stp	x8, x30, [sp, #-16]!
+
+	bl	cpu_get_rev_var
+	mov	x8, x0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata WORKAROUND_CVE_2022_23960, neoverse_poseidon, cve_2022_23960
+
+	ldp	x8, x30, [sp], #16
+	ret
+endfunc neoverse_poseidon_errata_report
+#endif
+
 	/* ---------------------------------------------
 	 * This function provides Neoverse-Poseidon specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 462ca9d..e14bb24 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2014-2022, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2014-2022, Arm Limited and Contributors. All rights reserved.
 # Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
@@ -333,6 +333,14 @@
 # present in r0p0 as well but there is no workaround for that revision.
 ERRATA_A78_2242635	?=0
 
+# Flag to apply erratum 2376745 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, r1p1, and r1p2 of the A78 cpu. It is still open.
+ERRATA_A78_2376745	?=0
+
+# Flag to apply erratum 2395406 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, r1p1, and r1p2 of the A78 cpu. It is still open.
+ERRATA_A78_2395406	?=0
+
 # Flag to apply erratum 1941500 workaround during reset. This erratum applies
 # to revisions r0p0 and r0p1 of the A78 AE cpu. It is still open.
 ERRATA_A78_AE_1941500	?=0
@@ -349,6 +357,18 @@
 # to revisions r0p0 and r0p1 of the A78 AE cpu. It is still open.
 ERRATA_A78_AE_2395408	?=0
 
+# Flag to apply erratum 1821534 workaround during reset. This erratum applies
+# to revisions r0p0 - r1p0 of the X1 cpu and fixed in r1p1.
+ERRATA_X1_1821534	?=0
+
+# Flag to apply erratum 1688305 workaround during reset. This erratum applies
+# to revisions r0p0 - r1p0 of the X1 cpu and fixed in r1p1.
+ERRATA_X1_1688305	?=0
+
+# Flag to apply erratum 1827429 workaround during reset. This erratum applies
+# to revisions r0p0 - r1p0 of the X1 cpu and fixed in r1p1.
+ERRATA_X1_1827429	?=0
+
 # Flag to apply T32 CLREX workaround during reset. This erratum applies
 # only to r0p0 and r1p0 of the Neoverse N1 cpu.
 ERRATA_N1_1043202	?=0
@@ -484,6 +504,10 @@
 # to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is fixed in r2p1.
 ERRATA_A710_2282622	?=0
 
+# Flag to apply erratum 2008768 workaround during reset. This erratum applies
+# to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is fixed in r2p1.
+ERRATA_A710_2008768	?=0
+
 # Flag to apply erratum 2067956 workaround during reset. This erratum applies
 # to revision r0p0 of the Neoverse N2 cpu and is still open.
 ERRATA_N2_2067956	?=0
@@ -590,6 +614,11 @@
 # higher DSU power consumption on idle.
 ERRATA_DSU_936184	?=0
 
+# Flag to apply DSU erratum 2313941. This erratum applies to DSUs revisions
+# r0p0, r1p0, r2p0, r2p1, r3p0, r3p1 and is still open. Applying the workaround
+# results in higher DSU power consumption on idle.
+ERRATA_DSU_2313941	?=0
+
 # Process ERRATA_A9_794073 flag
 $(eval $(call assert_boolean,ERRATA_A9_794073))
 $(eval $(call add_define,ERRATA_A9_794073))
@@ -842,6 +871,14 @@
 $(eval $(call assert_boolean,ERRATA_A78_2242635))
 $(eval $(call add_define,ERRATA_A78_2242635))
 
+# Process ERRATA_A78_2376745 flag
+$(eval $(call assert_boolean,ERRATA_A78_2376745))
+$(eval $(call add_define,ERRATA_A78_2376745))
+
+# Process ERRATA_A78_2395406 flag
+$(eval $(call assert_boolean,ERRATA_A78_2395406))
+$(eval $(call add_define,ERRATA_A78_2395406))
+
 # Process ERRATA_A78_AE_1941500 flag
 $(eval $(call assert_boolean,ERRATA_A78_AE_1941500))
 $(eval $(call add_define,ERRATA_A78_AE_1941500))
@@ -858,6 +895,18 @@
 $(eval $(call assert_boolean,ERRATA_A78_AE_2395408))
 $(eval $(call add_define,ERRATA_A78_AE_2395408))
 
+# Process ERRATA_X1_1821534 flag
+$(eval $(call assert_boolean,ERRATA_X1_1821534))
+$(eval $(call add_define,ERRATA_X1_1821534))
+
+# Process ERRATA_X1_1688305 flag
+$(eval $(call assert_boolean,ERRATA_X1_1688305))
+$(eval $(call add_define,ERRATA_X1_1688305))
+
+# Process ERRATA_X1_1827429 flag
+$(eval $(call assert_boolean,ERRATA_X1_1827429))
+$(eval $(call add_define,ERRATA_X1_1827429))
+
 # Process ERRATA_N1_1043202 flag
 $(eval $(call assert_boolean,ERRATA_N1_1043202))
 $(eval $(call add_define,ERRATA_N1_1043202))
@@ -990,6 +1039,10 @@
 $(eval $(call assert_boolean,ERRATA_A710_2282622))
 $(eval $(call add_define,ERRATA_A710_2282622))
 
+# Process ERRATA_A710_2008768 flag
+$(eval $(call assert_boolean,ERRATA_A710_2008768))
+$(eval $(call add_define,ERRATA_A710_2008768))
+
 # Process ERRATA_N2_2067956 flag
 $(eval $(call assert_boolean,ERRATA_N2_2067956))
 $(eval $(call add_define,ERRATA_N2_2067956))
@@ -1090,6 +1143,10 @@
 $(eval $(call assert_boolean,ERRATA_DSU_936184))
 $(eval $(call add_define,ERRATA_DSU_936184))
 
+# Process ERRATA_DSU_2313941 flag
+$(eval $(call assert_boolean,ERRATA_DSU_2313941))
+$(eval $(call add_define,ERRATA_DSU_2313941))
+
 # Errata build flags
 ifneq (${ERRATA_A53_843419},0)
 TF_LDFLAGS_aarch64	+= --fix-cortex-a53-843419
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index 449f120..0f09ebe 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -284,16 +284,6 @@
 	}
 
 	/*
-	 * FEAT_AMUv1p1 virtual offset registers are only accessible from EL3
-	 * and EL2, when clear, this bit traps accesses from EL2 so we set it
-	 * to 1 when EL2 is present.
-	 */
-	if (is_armv8_6_feat_amuv1p1_present() &&
-		(el_implemented(2) != EL_IMPL_NONE)) {
-		scr_el3 |= SCR_AMVOFFEN_BIT;
-	}
-
-	/*
 	 * Initialise SCTLR_EL1 to the reset value corresponding to the target
 	 * execution state setting all fields rather than relying of the hw.
 	 * Some fields have architecturally UNKNOWN reset values and these are
@@ -843,6 +833,12 @@
 	 */
 	write_scr_el3(read_scr_el3() | SCR_NS_BIT);
 
+	/*
+	 * Ensure the NS bit change is committed before the EL2/EL1
+	 * state restoration.
+	 */
+	isb();
+
 	/* Restore EL2 and EL1 sysreg contexts */
 	cm_el2_sysregs_context_restore(NON_SECURE);
 	cm_el1_sysregs_context_restore(NON_SECURE);
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
index d329c3d..72566fd 100644
--- a/lib/extensions/amu/aarch64/amu.c
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -75,7 +75,7 @@
 		((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
 }
 
-static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
+static inline __unused void ctx_write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
 {
 	uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
 
@@ -85,6 +85,16 @@
 	write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
 }
 
+static inline __unused void ctx_write_scr_el3_amvoffen(cpu_context_t *ctx, uint64_t amvoffen)
+{
+	uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
+
+	value &= ~SCR_AMVOFFEN_BIT;
+	value |= (amvoffen << SCR_AMVOFFEN_SHIFT) & SCR_AMVOFFEN_BIT;
+
+	write_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3, value);
+}
+
 static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
 {
 	write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
@@ -226,7 +236,7 @@
 	 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
 	 * the Activity Monitor registers do not trap to EL3.
 	 */
-	write_cptr_el3_tam(ctx, 0U);
+	ctx_write_cptr_el3_tam(ctx, 0U);
 
 	/*
 	 * Retrieve the number of architected counters. All of these counters
@@ -285,6 +295,13 @@
 			 * used.
 			 */
 			write_hcr_el2_amvoffen(0U);
+		} else {
+			/*
+			 * Virtual offset registers are only accessible from EL3
+			 * and EL2, when clear, this bit traps accesses from EL2
+			 * so we set it to 1 when EL2 is present.
+			 */
+			ctx_write_scr_el3_amvoffen(ctx, 1U);
 		}
 
 #if AMU_RESTRICT_COUNTERS
diff --git a/lib/libc/snprintf.c b/lib/libc/snprintf.c
index 675d243..12f51c0 100644
--- a/lib/libc/snprintf.c
+++ b/lib/libc/snprintf.c
@@ -11,6 +11,16 @@
 #include <common/debug.h>
 #include <plat/common/platform.h>
 
+#define get_num_va_args(_args, _lcount)				\
+	(((_lcount) > 1)  ? va_arg(_args, long long int) :	\
+	(((_lcount) == 1) ? va_arg(_args, long int) :		\
+			    va_arg(_args, int)))
+
+#define get_unum_va_args(_args, _lcount)				\
+	(((_lcount) > 1)  ? va_arg(_args, unsigned long long int) :	\
+	(((_lcount) == 1) ? va_arg(_args, unsigned long int) :		\
+			    va_arg(_args, unsigned int)))
+
 #define CHECK_AND_PUT_CHAR(buf, size, chars_printed, ch)	\
 	do {						\
 		if ((chars_printed) < (size)) {		\
@@ -80,6 +90,11 @@
  * %u - unsigned decimal format
  * %p - pointer format
  *
+ * The following length specifiers are supported by this print
+ * %l - long int
+ * %ll - long long int
+ * %z - size_t sized integer formats
+ *
  * The following padding specifiers are supported by this print
  * %0NN - Left-pad the number with 0s (NN is a decimal number)
  * %NN - Left-pad the number or string with spaces (NN is a decimal number)
@@ -101,6 +116,7 @@
 	bool left;
 	bool capitalise;
 	size_t chars_printed = 0U;
+	unsigned int l_count;
 
 	if (n == 0U) {
 		/* There isn't space for anything. */
@@ -118,6 +134,7 @@
 		padc ='\0';
 		padn = 0;
 		capitalise = false;
+		l_count = 0;
 
 		if (*fmt == '%') {
 			fmt++;
@@ -152,7 +169,7 @@
 
 			case 'i':
 			case 'd':
-				num = va_arg(args, int);
+				num = get_num_va_args(args, l_count);
 
 				if (num < 0) {
 					CHECK_AND_PUT_CHAR(s, n, chars_printed,
@@ -170,10 +187,18 @@
 				string_print(&s, n, &chars_printed, str);
 				break;
 			case 'u':
-				unum = va_arg(args, unsigned int);
+				unum = get_unum_va_args(args, l_count);
 				unsigned_num_print(&s, n, &chars_printed,
 						   unum, 10, padc, padn, false);
 				break;
+			case 'z':
+				l_count = 1;
+				fmt++;
+				goto loop;
+			case 'l':
+				l_count++;
+				fmt++;
+				goto loop;
 			case 'p':
 				unum = (uintptr_t)va_arg(args, void *);
 				if (unum > 0U) {
@@ -186,7 +211,7 @@
 			case 'X':
 				capitalise = true;
 			case 'x':
-				unum = va_arg(args, unsigned int);
+				unum = get_unum_va_args(args, l_count);
 				unsigned_num_print(&s, n, &chars_printed,
 						   unum, 16, padc, padn,
 						   capitalise);
diff --git a/plat/intel/soc/agilex/bl2_plat_setup.c b/plat/intel/soc/agilex/bl2_plat_setup.c
index 03adcf3..211a7b7 100644
--- a/plat/intel/soc/agilex/bl2_plat_setup.c
+++ b/plat/intel/soc/agilex/bl2_plat_setup.c
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2019-2021, ARM Limited and Contributors. All rights reserved.
- * Copyright (c) 2019-2021, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -23,6 +23,7 @@
 #include "ccu/ncore_ccu.h"
 #include "qspi/cadence_qspi.h"
 #include "socfpga_emac.h"
+#include "socfpga_f2sdram_manager.h"
 #include "socfpga_handoff.h"
 #include "socfpga_mailbox.h"
 #include "socfpga_private.h"
@@ -81,8 +82,10 @@
 	mailbox_init();
 	agx_mmc_init();
 
-	if (!intel_mailbox_is_fpga_not_ready())
-		socfpga_bridges_enable();
+	if (!intel_mailbox_is_fpga_not_ready()) {
+		socfpga_bridges_enable(SOC2FPGA_MASK | LWHPS2FPGA_MASK |
+					FPGA2SOC_MASK);
+	}
 }
 
 
diff --git a/plat/intel/soc/agilex/include/agilex_noc.h b/plat/intel/soc/agilex/include/agilex_noc.h
deleted file mode 100644
index 9aba3c3..0000000
--- a/plat/intel/soc/agilex/include/agilex_noc.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef AGX_NOC_H
-#define AGX_NOC_H
-
-
-#define AXI_AP					(1<<0)
-#define FPGA2SOC				(1<<16)
-#define MPU					(1<<24)
-#define AGX_NOC_PER_SCR_NAND			0xffd21000
-#define AGX_NOC_PER_SCR_NAND_DATA		0xffd21004
-#define AGX_NOC_PER_SCR_USB0			0xffd2100c
-#define AGX_NOC_PER_SCR_USB1			0xffd21010
-#define AGX_NOC_PER_SCR_SPI_M0			0xffd2101c
-#define AGX_NOC_PER_SCR_SPI_M1			0xffd21020
-#define AGX_NOC_PER_SCR_SPI_S0			0xffd21024
-#define AGX_NOC_PER_SCR_SPI_S1			0xffd21028
-#define AGX_NOC_PER_SCR_EMAC0			0xffd2102c
-#define AGX_NOC_PER_SCR_EMAC1			0xffd21030
-#define AGX_NOC_PER_SCR_EMAC2			0xffd21034
-#define AGX_NOC_PER_SCR_SDMMC			0xffd21040
-#define AGX_NOC_PER_SCR_GPIO0			0xffd21044
-#define AGX_NOC_PER_SCR_GPIO1			0xffd21048
-#define AGX_NOC_PER_SCR_I2C0			0xffd21050
-#define AGX_NOC_PER_SCR_I2C1			0xffd21058
-#define AGX_NOC_PER_SCR_I2C2			0xffd2105c
-#define AGX_NOC_PER_SCR_I2C3			0xffd21060
-#define AGX_NOC_PER_SCR_SP_TIMER0		0xffd21064
-#define AGX_NOC_PER_SCR_SP_TIMER1		0xffd21068
-#define AGX_NOC_PER_SCR_UART0			0xffd2106c
-#define AGX_NOC_PER_SCR_UART1			0xffd21070
-
-
-#define AGX_NOC_SYS_SCR_DMA_ECC			0xffd21108
-#define AGX_NOC_SYS_SCR_EMAC0RX_ECC		0xffd2110c
-#define AGX_NOC_SYS_SCR_EMAC0TX_ECC		0xffd21110
-#define AGX_NOC_SYS_SCR_EMAC1RX_ECC		0xffd21114
-#define AGX_NOC_SYS_SCR_EMAC1TX_ECC		0xffd21118
-#define AGX_NOC_SYS_SCR_EMAC2RX_ECC		0xffd2111c
-#define AGX_NOC_SYS_SCR_EMAC2TX_ECC		0xffd21120
-#define AGX_NOC_SYS_SCR_NAND_ECC		0xffd2112c
-#define AGX_NOC_SYS_SCR_NAND_READ_ECC		0xffd21130
-#define AGX_NOC_SYS_SCR_NAND_WRITE_ECC		0xffd21134
-#define AGX_NOC_SYS_SCR_OCRAM_ECC		0xffd21138
-#define AGX_NOC_SYS_SCR_SDMMC_ECC		0xffd21140
-#define AGX_NOC_SYS_SCR_USB0_ECC		0xffd21144
-#define AGX_NOC_SYS_SCR_USB1_ECC		0xffd21148
-#define AGX_NOC_SYS_SCR_CLK_MGR			0xffd2114c
-#define AGX_NOC_SYS_SCR_IO_MGR			0xffd21154
-#define AGX_NOC_SYS_SCR_RST_MGR			0xffd21158
-#define AGX_NOC_SYS_SCR_SYS_MGR			0xffd2115c
-#define AGX_NOC_SYS_SCR_OSC0_TIMER		0xffd21160
-#define AGX_NOC_SYS_SCR_OSC1_TIMER		0xffd21164
-#define AGX_NOC_SYS_SCR_WATCHDOG0		0xffd21168
-#define AGX_NOC_SYS_SCR_WATCHDOG1		0xffd2116c
-#define AGX_NOC_SYS_SCR_WATCHDOG2		0xffd21170
-#define AGX_NOC_SYS_SCR_WATCHDOG3		0xffd21174
-#define AGX_NOC_SYS_SCR_DAP			0xffd21178
-#define AGX_NOC_SYS_SCR_L4_NOC_PROBES		0xffd21190
-#define AGX_NOC_SYS_SCR_L4_NOC_QOS		0xffd21194
-
-#define AGX_CCU_NOC_BRIDGE_CPU0_RAM		0xf7004688
-#define AGX_CCU_NOC_BRIDGE_IOM_RAM		0xf7004688
-
-#endif
diff --git a/plat/intel/soc/agilex/include/socfpga_plat_def.h b/plat/intel/soc/agilex/include/socfpga_plat_def.h
index 499684d..b216ab1 100644
--- a/plat/intel/soc/agilex/include/socfpga_plat_def.h
+++ b/plat/intel/soc/agilex/include/socfpga_plat_def.h
@@ -20,6 +20,7 @@
 
 /* Register Mapping */
 #define SOCFPGA_CCU_NOC_REG_BASE		0xf7000000
+#define SOCFPGA_F2SDRAMMGR_REG_BASE		U(0xf8024000)
 
 #define SOCFPGA_MMC_REG_BASE			0xff808000
 
diff --git a/plat/intel/soc/agilex/platform.mk b/plat/intel/soc/agilex/platform.mk
index 0e5f911..6fe0be1 100644
--- a/plat/intel/soc/agilex/platform.mk
+++ b/plat/intel/soc/agilex/platform.mk
@@ -65,6 +65,7 @@
 		plat/intel/soc/agilex/soc/agilex_clock_manager.c	\
 		plat/intel/soc/common/socfpga_psci.c			\
 		plat/intel/soc/common/socfpga_sip_svc.c			\
+		plat/intel/soc/common/socfpga_sip_svc_v2.c		\
 		plat/intel/soc/common/socfpga_topology.c		\
 		plat/intel/soc/common/sip/socfpga_sip_ecc.c		\
 		plat/intel/soc/common/sip/socfpga_sip_fcs.c		\
diff --git a/plat/intel/soc/common/include/socfpga_f2sdram_manager.h b/plat/intel/soc/common/include/socfpga_f2sdram_manager.h
new file mode 100644
index 0000000..82bb6cb
--- /dev/null
+++ b/plat/intel/soc/common/include/socfpga_f2sdram_manager.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SOCFPGA_F2SDRAMMANAGER_H
+#define SOCFPGA_F2SDRAMMANAGER_H
+
+#include "socfpga_plat_def.h"
+
+/* FPGA2SDRAM Register Map */
+#define SOCFPGA_F2SDRAMMGR_SIDEBANDMGR_FLAGINSTATUS0	0x14
+#define SOCFPGA_F2SDRAMMGR_SIDEBANDMGR_FLAGOUTCLR0	0x54
+#define SOCFPGA_F2SDRAMMGR_SIDEBANDMGR_FLAGOUTSET0	0x50
+
+#define FLAGOUTSETCLR_F2SDRAM0_ENABLE		(BIT(1))
+#define FLAGOUTSETCLR_F2SDRAM1_ENABLE		(BIT(4))
+#define FLAGOUTSETCLR_F2SDRAM2_ENABLE		(BIT(7))
+
+#define FLAGOUTSETCLR_F2SDRAM0_IDLEREQ		(BIT(0))
+#define FLAGOUTSETCLR_F2SDRAM1_IDLEREQ		(BIT(3))
+#define FLAGOUTSETCLR_F2SDRAM2_IDLEREQ		(BIT(6))
+#define FLAGINTSTATUS_F2SDRAM0_IDLEACK		(BIT(1))
+#define FLAGINTSTATUS_F2SDRAM1_IDLEACK		(BIT(5))
+#define FLAGINTSTATUS_F2SDRAM2_IDLEACK		(BIT(9))
+#define FLAGOUTSETCLR_F2SDRAM0_FORCE_DRAIN	(BIT(2))
+#define FLAGOUTSETCLR_F2SDRAM1_FORCE_DRAIN	(BIT(5))
+#define FLAGOUTSETCLR_F2SDRAM2_FORCE_DRAIN	(BIT(8))
+
+#define FLAGINTSTATUS_F2SOC_RESPEMPTY		(BIT(3))
+#define FLAGINTSTATUS_F2SDRAM0_RESPEMPTY	(BIT(3))
+#define FLAGINTSTATUS_F2SDRAM1_RESPEMPTY	(BIT(7))
+#define FLAGINTSTATUS_F2SDRAM2_RESPEMPTY	(BIT(11))
+
+#define SOCFPGA_F2SDRAMMGR(_reg)	(SOCFPGA_F2SDRAMMGR_REG_BASE \
+						+ (SOCFPGA_F2SDRAMMGR_##_reg))
+
+#endif /* SOCFPGA_F2SDRAMMGR_H */
diff --git a/plat/intel/soc/common/include/socfpga_fcs.h b/plat/intel/soc/common/include/socfpga_fcs.h
index d3b7141..893551d 100644
--- a/plat/intel/soc/common/include/socfpga_fcs.h
+++ b/plat/intel/soc/common/include/socfpga_fcs.h
@@ -9,38 +9,300 @@
 
 /* FCS Definitions */
 
-#define FCS_RANDOM_WORD_SIZE		8U
-#define FCS_PROV_DATA_WORD_SIZE		44U
-#define FCS_SHA384_WORD_SIZE		12U
+#define FCS_RANDOM_WORD_SIZE					8U
+#define FCS_PROV_DATA_WORD_SIZE					44U
+#define FCS_SHA384_WORD_SIZE					12U
 
-#define FCS_RANDOM_BYTE_SIZE		(FCS_RANDOM_WORD_SIZE * 4U)
-#define FCS_PROV_DATA_BYTE_SIZE		(FCS_PROV_DATA_WORD_SIZE * 4U)
-#define FCS_SHA384_BYTE_SIZE		(FCS_SHA384_WORD_SIZE * 4U)
+#define FCS_RANDOM_BYTE_SIZE					(FCS_RANDOM_WORD_SIZE * 4U)
+#define FCS_RANDOM_EXT_MAX_WORD_SIZE				1020U
+#define FCS_PROV_DATA_BYTE_SIZE					(FCS_PROV_DATA_WORD_SIZE * 4U)
+#define FCS_SHA384_BYTE_SIZE					(FCS_SHA384_WORD_SIZE * 4U)
 
-#define FCS_CRYPTION_DATA_0		0x10100
+#define FCS_RANDOM_EXT_OFFSET					3
 
+#define FCS_MODE_DECRYPT					0x0
+#define FCS_MODE_ENCRYPT					0x1
+#define FCS_ENCRYPTION_DATA_0					0x10100
+#define FCS_DECRYPTION_DATA_0					0x10102
+#define FCS_OWNER_ID_OFFSET					0xC
+#define FCS_CRYPTION_CRYPTO_HEADER				0x07000000
+#define FCS_CRYPTION_RESP_WORD_SIZE				4U
+#define FCS_CRYPTION_RESP_SIZE_OFFSET				3U
+
+#define PSGSIGMA_TEARDOWN_MAGIC					0xB852E2A4
+#define	PSGSIGMA_SESSION_ID_ONE					0x1
+#define PSGSIGMA_UNKNOWN_SESSION				0xFFFFFFFF
+
+#define	RESERVED_AS_ZERO					0x0
+/* FCS Single cert */
+
+#define FCS_BIG_CNTR_SEL					0x1
+
+#define FCS_SVN_CNTR_0_SEL					0x2
+#define FCS_SVN_CNTR_1_SEL					0x3
+#define FCS_SVN_CNTR_2_SEL					0x4
+#define FCS_SVN_CNTR_3_SEL					0x5
+
+#define FCS_BIG_CNTR_VAL_MAX					495U
+#define FCS_SVN_CNTR_VAL_MAX					64U
+
+/* FCS Attestation Cert Request Parameter */
+
+#define FCS_ATTEST_FIRMWARE_CERT				0x01
+#define FCS_ATTEST_DEV_ID_SELF_SIGN_CERT			0x02
+#define FCS_ATTEST_DEV_ID_ENROLL_CERT				0x04
+#define FCS_ATTEST_ENROLL_SELF_SIGN_CERT			0x08
+#define FCS_ATTEST_ALIAS_CERT					0x10
+#define FCS_ATTEST_CERT_MAX_REQ_PARAM				0xFF
+
+/* FCS Crypto Service */
+
+#define FCS_CS_KEY_OBJ_MAX_WORD_SIZE				88U
+#define FCS_CS_KEY_INFO_MAX_WORD_SIZE				36U
+#define FCS_CS_KEY_RESP_STATUS_MASK				0xFF
+#define FCS_CS_KEY_RESP_STATUS_OFFSET				16U
+
+#define FCS_CS_FIELD_SIZE_MASK					0xFFFF
+#define FCS_CS_FIELD_FLAG_OFFSET				24
+#define FCS_CS_FIELD_FLAG_INIT					BIT(0)
+#define FCS_CS_FIELD_FLAG_UPDATE				BIT(1)
+#define FCS_CS_FIELD_FLAG_FINALIZE				BIT(2)
+
+#define FCS_AES_MAX_DATA_SIZE					0x10000000	/* 256 MB */
+#define FCS_AES_MIN_DATA_SIZE					0x20		/* 32 Byte */
+#define FCS_AES_CMD_MAX_WORD_SIZE				15U
+
+#define FCS_GET_DIGEST_CMD_MAX_WORD_SIZE			7U
+#define FCS_GET_DIGEST_RESP_MAX_WORD_SIZE			19U
+#define FCS_MAC_VERIFY_CMD_MAX_WORD_SIZE			23U
+#define FCS_MAC_VERIFY_RESP_MAX_WORD_SIZE			4U
+#define FCS_SHA_HMAC_CRYPTO_PARAM_SIZE_OFFSET			8U
+
+#define FCS_ECDSA_GET_PUBKEY_MAX_WORD_SIZE			5U
+#define FCS_ECDSA_SHA2_DATA_SIGN_CMD_MAX_WORD_SIZE		7U
+#define FCS_ECDSA_SHA2_DATA_SIG_VERIFY_CMD_MAX_WORD_SIZE	43U
+#define FCS_ECDSA_HASH_SIGN_CMD_MAX_WORD_SIZE			17U
+#define FCS_ECDSA_HASH_SIG_VERIFY_CMD_MAX_WORD_SIZE		52U
+#define FCS_ECDH_REQUEST_CMD_MAX_WORD_SIZE			29U
 /* FCS Payload Structure */
+typedef struct fcs_rng_payload_t {
+	uint32_t session_id;
+	uint32_t context_id;
+	uint32_t crypto_header;
+	uint32_t size;
+} fcs_rng_payload;
+
+typedef struct fcs_encrypt_payload_t {
+	uint32_t first_word;
+	uint32_t src_addr;
+	uint32_t src_size;
+	uint32_t dst_addr;
+	uint32_t dst_size;
+} fcs_encrypt_payload;
 
-typedef struct fcs_crypt_payload_t {
+typedef struct fcs_decrypt_payload_t {
 	uint32_t first_word;
+	uint32_t owner_id[2];
+	uint32_t src_addr;
+	uint32_t src_size;
+	uint32_t dst_addr;
+	uint32_t dst_size;
+} fcs_decrypt_payload;
+
+typedef struct fcs_encrypt_ext_payload_t {
+	uint32_t session_id;
+	uint32_t context_id;
+	uint32_t crypto_header;
+	uint32_t src_addr;
+	uint32_t src_size;
+	uint32_t dst_addr;
+	uint32_t dst_size;
+} fcs_encrypt_ext_payload;
+
+typedef struct fcs_decrypt_ext_payload_t {
+	uint32_t session_id;
+	uint32_t context_id;
+	uint32_t crypto_header;
+	uint32_t owner_id[2];
 	uint32_t src_addr;
 	uint32_t src_size;
 	uint32_t dst_addr;
 	uint32_t dst_size;
-} fcs_crypt_payload;
+} fcs_decrypt_ext_payload;
+
+typedef struct psgsigma_teardown_msg_t {
+	uint32_t reserved_word;
+	uint32_t magic_word;
+	uint32_t session_id;
+} psgsigma_teardown_msg;
+
+typedef struct fcs_cntr_set_preauth_payload_t {
+	uint32_t first_word;
+	uint32_t counter_value;
+} fcs_cntr_set_preauth_payload;
+
+typedef struct fcs_cs_key_payload_t {
+	uint32_t session_id;
+	uint32_t reserved0;
+	uint32_t reserved1;
+	uint32_t key_id;
+} fcs_cs_key_payload;
+
+typedef struct fcs_crypto_service_data_t {
+	uint32_t session_id;
+	uint32_t context_id;
+	uint32_t key_id;
+	uint32_t crypto_param_size;
+	uint64_t crypto_param;
+	uint8_t is_updated;
+} fcs_crypto_service_data;
+
+typedef struct fcs_crypto_service_aes_data_t {
+	uint32_t session_id;
+	uint32_t context_id;
+	uint32_t param_size;
+	uint32_t key_id;
+	uint32_t crypto_param[7];
+	uint8_t is_updated;
+} fcs_crypto_service_aes_data;
 
 /* Functions Definitions */
 
 uint32_t intel_fcs_random_number_gen(uint64_t addr, uint64_t *ret_size,
 				uint32_t *mbox_error);
+int intel_fcs_random_number_gen_ext(uint32_t session_id, uint32_t context_id,
+				uint32_t size, uint32_t *send_id);
 uint32_t intel_fcs_send_cert(uint64_t addr, uint64_t size,
 				uint32_t *send_id);
 uint32_t intel_fcs_get_provision_data(uint32_t *send_id);
-uint32_t intel_fcs_cryption(uint32_t mode, uint32_t src_addr,
-			uint32_t src_size, uint32_t dst_addr,
-			uint32_t dst_size, uint32_t *send_id);
+uint32_t intel_fcs_cntr_set_preauth(uint8_t counter_type,
+				int32_t counter_value,
+				uint32_t test_bit,
+				uint32_t *mbox_error);
+uint32_t intel_fcs_encryption(uint32_t src_addr, uint32_t src_size,
+				uint32_t dst_addr, uint32_t dst_size,
+				uint32_t *send_id);
+
+uint32_t intel_fcs_decryption(uint32_t src_addr, uint32_t src_size,
+				uint32_t dst_addr, uint32_t dst_size,
+				uint32_t *send_id);
 
+int intel_fcs_encryption_ext(uint32_t session_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint32_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
+int intel_fcs_decryption_ext(uint32_t sesion_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint32_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
+
+int intel_fcs_sigma_teardown(uint32_t session_id, uint32_t *mbox_error);
+int intel_fcs_chip_id(uint32_t *id_low, uint32_t *id_high, uint32_t *mbox_error);
+int intel_fcs_attestation_subkey(uint64_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
+int intel_fcs_get_measurement(uint64_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
 uint32_t intel_fcs_get_rom_patch_sha384(uint64_t addr, uint64_t *ret_size,
 				uint32_t *mbox_error);
 
+int intel_fcs_create_cert_on_reload(uint32_t cert_request,
+				uint32_t *mbox_error);
+int intel_fcs_get_attestation_cert(uint32_t cert_request, uint64_t dst_addr,
+				uint32_t *dst_size, uint32_t *mbox_error);
+
+int intel_fcs_open_crypto_service_session(uint32_t *session_id,
+				uint32_t *mbox_error);
+int intel_fcs_close_crypto_service_session(uint32_t session_id,
+				uint32_t *mbox_error);
+
+int intel_fcs_import_crypto_service_key(uint64_t src_addr, uint32_t src_size,
+				uint32_t *mbox_error);
+int intel_fcs_export_crypto_service_key(uint32_t session_id, uint32_t key_id,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
+int intel_fcs_remove_crypto_service_key(uint32_t session_id, uint32_t key_id,
+				uint32_t *mbox_error);
+int intel_fcs_get_crypto_service_key_info(uint32_t session_id, uint32_t key_id,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
+
+int intel_fcs_get_digest_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error);
+int intel_fcs_get_digest_update_finalize(uint32_t session_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint8_t is_finalised, uint32_t *mbox_error);
+
+int intel_fcs_mac_verify_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error);
+int intel_fcs_mac_verify_update_finalize(uint32_t session_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t data_size, uint8_t is_finalised,
+				uint32_t *mbox_error);
+
+int intel_fcs_ecdsa_hash_sign_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error);
+int intel_fcs_ecdsa_hash_sign_finalize(uint32_t session_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
+
+int intel_fcs_ecdsa_hash_sig_verify_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error);
+int intel_fcs_ecdsa_hash_sig_verify_finalize(uint32_t session_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
+
+int intel_fcs_ecdsa_sha2_data_sign_init(uint32_t session_id,
+				uint32_t context_id, uint32_t key_id,
+				uint32_t param_size, uint64_t param_data,
+				uint32_t *mbox_error);
+int intel_fcs_ecdsa_sha2_data_sign_update_finalize(uint32_t session_id,
+				uint32_t context_id, uint32_t src_addr,
+				uint32_t src_size, uint64_t dst_addr,
+				uint32_t *dst_size, uint8_t is_finalised,
+				uint32_t *mbox_error);
+
+int intel_fcs_ecdsa_sha2_data_sig_verify_init(uint32_t session_id,
+				uint32_t context_id, uint32_t key_id,
+				uint32_t param_size, uint64_t param_data,
+				uint32_t *mbox_error);
+int intel_fcs_ecdsa_sha2_data_sig_verify_update_finalize(uint32_t session_id,
+				uint32_t context_id, uint32_t src_addr,
+				uint32_t src_size, uint64_t dst_addr,
+				uint32_t *dst_size, uint32_t data_size,
+				uint8_t is_finalised, uint32_t *mbox_error);
+
+int intel_fcs_ecdsa_get_pubkey_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error);
+int intel_fcs_ecdsa_get_pubkey_finalize(uint32_t session_id, uint32_t context_id,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
+
+int intel_fcs_ecdh_request_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error);
+int intel_fcs_ecdh_request_finalize(uint32_t session_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
+
+int intel_fcs_aes_crypt_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint64_t param_addr,
+				uint32_t param_size, uint32_t *mbox_error);
+int intel_fcs_aes_crypt_update_finalize(uint32_t session_id,
+				uint32_t context_id, uint64_t src_addr,
+				uint32_t src_size, uint64_t dst_addr,
+				uint32_t dst_size, uint8_t is_finalised,
+				uint32_t *send_id);
+
 #endif /* SOCFPGA_FCS_H */
diff --git a/plat/intel/soc/common/include/socfpga_mailbox.h b/plat/intel/soc/common/include/socfpga_mailbox.h
index b260a62..1f4b2a4 100644
--- a/plat/intel/soc/common/include/socfpga_mailbox.h
+++ b/plat/intel/soc/common/include/socfpga_mailbox.h
@@ -10,95 +10,124 @@
 #include <lib/utils_def.h>
 
 
-#define MBOX_OFFSET			0xffa30000
+#define MBOX_OFFSET					0xffa30000
 
-#define MBOX_ATF_CLIENT_ID		0x1U
-#define MBOX_MAX_JOB_ID			0xFU
-#define MBOX_MAX_IND_JOB_ID		(MBOX_MAX_JOB_ID - 1U)
-#define MBOX_JOB_ID			MBOX_MAX_JOB_ID
-
+#define MBOX_ATF_CLIENT_ID				0x1U
+#define MBOX_MAX_JOB_ID					0xFU
+#define MBOX_MAX_IND_JOB_ID				(MBOX_MAX_JOB_ID - 1U)
+#define MBOX_JOB_ID					MBOX_MAX_JOB_ID
+#define MBOX_TEST_BIT					BIT(31)
 
 /* Mailbox Shared Memory Register Map */
-#define MBOX_CIN			0x00
-#define MBOX_ROUT			0x04
-#define MBOX_URG			0x08
-#define MBOX_INT			0x0C
-#define MBOX_COUT			0x20
-#define MBOX_RIN			0x24
-#define MBOX_STATUS			0x2C
-#define MBOX_CMD_BUFFER			0x40
-#define MBOX_RESP_BUFFER		0xC0
+#define MBOX_CIN					0x00
+#define MBOX_ROUT					0x04
+#define MBOX_URG					0x08
+#define MBOX_INT					0x0C
+#define MBOX_COUT					0x20
+#define MBOX_RIN					0x24
+#define MBOX_STATUS					0x2C
+#define MBOX_CMD_BUFFER					0x40
+#define MBOX_RESP_BUFFER				0xC0
 
 /* Mailbox SDM doorbell */
-#define MBOX_DOORBELL_TO_SDM		0x400
-#define MBOX_DOORBELL_FROM_SDM		0x480
+#define MBOX_DOORBELL_TO_SDM				0x400
+#define MBOX_DOORBELL_FROM_SDM				0x480
 
 
 /* Mailbox commands */
 
-#define MBOX_CMD_NOOP			0x00
-#define MBOX_CMD_SYNC			0x01
-#define MBOX_CMD_RESTART		0x02
-#define MBOX_CMD_CANCEL			0x03
-#define MBOX_CMD_VAB_SRC_CERT		0x0B
-#define MBOX_CMD_GET_IDCODE		0x10
-#define MBOX_CMD_GET_USERCODE		0x13
-#define MBOX_CMD_REBOOT_HPS		0x47
+#define MBOX_CMD_NOOP					0x00
+#define MBOX_CMD_SYNC					0x01
+#define MBOX_CMD_RESTART				0x02
+#define MBOX_CMD_CANCEL					0x03
+#define MBOX_CMD_VAB_SRC_CERT				0x0B
+#define MBOX_CMD_GET_IDCODE				0x10
+#define MBOX_CMD_GET_USERCODE				0x13
+#define MBOX_CMD_GET_CHIPID				0x12
+#define MBOX_CMD_REBOOT_HPS				0x47
 
 /* Reconfiguration Commands */
-#define MBOX_CONFIG_STATUS		0x04
-#define MBOX_RECONFIG			0x06
-#define MBOX_RECONFIG_DATA		0x08
-#define MBOX_RECONFIG_STATUS		0x09
+#define MBOX_CONFIG_STATUS				0x04
+#define MBOX_RECONFIG					0x06
+#define MBOX_RECONFIG_DATA				0x08
+#define MBOX_RECONFIG_STATUS				0x09
 
 /* HWMON Commands */
-#define MBOX_HWMON_READVOLT		0x18
-#define MBOX_HWMON_READTEMP		0x19
+#define MBOX_HWMON_READVOLT				0x18
+#define MBOX_HWMON_READTEMP				0x19
 
 
 /* QSPI Commands */
-#define MBOX_CMD_QSPI_OPEN		0x32
-#define MBOX_CMD_QSPI_CLOSE		0x33
-#define MBOX_CMD_QSPI_SET_CS		0x34
-#define MBOX_CMD_QSPI_DIRECT		0x3B
+#define MBOX_CMD_QSPI_OPEN				0x32
+#define MBOX_CMD_QSPI_CLOSE				0x33
+#define MBOX_CMD_QSPI_SET_CS				0x34
+#define MBOX_CMD_QSPI_DIRECT				0x3B
 
 /* RSU Commands */
-#define MBOX_GET_SUBPARTITION_TABLE	0x5A
-#define MBOX_RSU_STATUS			0x5B
-#define MBOX_RSU_UPDATE			0x5C
-#define MBOX_HPS_STAGE_NOTIFY		0x5D
+#define MBOX_GET_SUBPARTITION_TABLE			0x5A
+#define MBOX_RSU_STATUS					0x5B
+#define MBOX_RSU_UPDATE					0x5C
+#define MBOX_HPS_STAGE_NOTIFY				0x5D
 
 /* FCS Command */
-#define MBOX_FCS_GET_PROVISION			0x7B
-#define MBOX_FCS_ENCRYPT_REQ			0x7E
-#define MBOX_FCS_DECRYPT_REQ			0x7F
-#define MBOX_FCS_RANDOM_GEN			0x80
+#define MBOX_FCS_GET_PROVISION				0x7B
+#define MBOX_FCS_CNTR_SET_PREAUTH			0x7C
+#define MBOX_FCS_ENCRYPT_REQ				0x7E
+#define MBOX_FCS_DECRYPT_REQ				0x7F
+#define MBOX_FCS_RANDOM_GEN				0x80
+#define MBOX_FCS_AES_CRYPT_REQ				0x81
+#define MBOX_FCS_GET_DIGEST_REQ				0x82
+#define MBOX_FCS_MAC_VERIFY_REQ				0x83
+#define MBOX_FCS_ECDSA_HASH_SIGN_REQ			0x84
+#define MBOX_FCS_ECDSA_SHA2_DATA_SIGN_REQ		0x85
+#define MBOX_FCS_ECDSA_HASH_SIG_VERIFY			0x86
+#define MBOX_FCS_ECDSA_SHA2_DATA_SIGN_VERIFY		0x87
+#define MBOX_FCS_ECDSA_GET_PUBKEY			0x88
+#define MBOX_FCS_ECDH_REQUEST				0x89
+#define MBOX_FCS_OPEN_CS_SESSION			0xA0
+#define MBOX_FCS_CLOSE_CS_SESSION			0xA1
+#define MBOX_FCS_IMPORT_CS_KEY				0xA5
+#define MBOX_FCS_EXPORT_CS_KEY				0xA6
+#define MBOX_FCS_REMOVE_CS_KEY				0xA7
+#define MBOX_FCS_GET_CS_KEY_INFO			0xA8
+
+/* PSG SIGMA Commands */
+#define MBOX_PSG_SIGMA_TEARDOWN				0xD5
+
+/* Attestation Commands */
+#define MBOX_CREATE_CERT_ON_RELOAD			0x180
+#define MBOX_GET_ATTESTATION_CERT			0x181
+#define MBOX_ATTESTATION_SUBKEY				0x182
+#define MBOX_GET_MEASUREMENT				0x183
+
 /* Miscellaneous commands */
 #define MBOX_GET_ROM_PATCH_SHA384	0x1B0
 
 /* Mailbox Definitions */
 
-#define CMD_DIRECT			0
-#define CMD_INDIRECT			1
-#define CMD_CASUAL			0
-#define CMD_URGENT			1
+#define CMD_DIRECT					0
+#define CMD_INDIRECT					1
+#define CMD_CASUAL					0
+#define CMD_URGENT					1
 
-#define MBOX_WORD_BYTE			4U
-#define MBOX_RESP_BUFFER_SIZE		16
-#define MBOX_CMD_BUFFER_SIZE		32
+#define MBOX_WORD_BYTE					4U
+#define MBOX_RESP_BUFFER_SIZE				16
+#define MBOX_CMD_BUFFER_SIZE				32
+#define MBOX_INC_HEADER_MAX_WORD_SIZE			1024U
 
 /* Execution states for HPS_STAGE_NOTIFY */
-#define HPS_EXECUTION_STATE_FSBL	0
-#define HPS_EXECUTION_STATE_SSBL	1
-#define HPS_EXECUTION_STATE_OS		2
+#define HPS_EXECUTION_STATE_FSBL			0
+#define HPS_EXECUTION_STATE_SSBL			1
+#define HPS_EXECUTION_STATE_OS				2
 
 /* Status Response */
-#define MBOX_RET_OK			0
-#define MBOX_RET_ERROR			-1
-#define MBOX_NO_RESPONSE		-2
-#define MBOX_WRONG_ID			-3
-#define MBOX_BUFFER_FULL		-4
-#define MBOX_TIMEOUT			-2047
+#define MBOX_RET_OK					0
+#define MBOX_RET_ERROR					-1
+#define MBOX_NO_RESPONSE				-2
+#define MBOX_WRONG_ID					-3
+#define MBOX_BUFFER_FULL				-4
+#define MBOX_BUSY					-5
+#define MBOX_TIMEOUT					-2047
 
 /* Reconfig Status Response */
 #define RECONFIG_STATUS_STATE				0
@@ -123,39 +152,56 @@
 
 /* Mailbox Macros */
 
-#define MBOX_ENTRY_TO_ADDR(_buf, ptr)	(MBOX_OFFSET + (MBOX_##_buf##_BUFFER) \
-						+ MBOX_WORD_BYTE * (ptr))
+#define MBOX_ENTRY_TO_ADDR(_buf, ptr)			(MBOX_OFFSET + (MBOX_##_buf##_BUFFER) \
+								+ MBOX_WORD_BYTE * (ptr))
 
 /* Mailbox interrupt flags and masks */
-#define MBOX_INT_FLAG_COE		0x1
-#define MBOX_INT_FLAG_RIE		0x2
-#define MBOX_INT_FLAG_UAE		0x100
-#define MBOX_COE_BIT(INTERRUPT)		((INTERRUPT) & 0x3)
-#define MBOX_UAE_BIT(INTERRUPT)		(((INTERRUPT) & (1<<8)))
+#define MBOX_INT_FLAG_COE				0x1
+#define MBOX_INT_FLAG_RIE				0x2
+#define MBOX_INT_FLAG_UAE				0x100
+#define MBOX_COE_BIT(INTERRUPT)				((INTERRUPT) & 0x3)
+#define MBOX_UAE_BIT(INTERRUPT)				(((INTERRUPT) & (1<<8)))
 
 /* Mailbox response and status */
-#define MBOX_RESP_ERR(BUFFER)		((BUFFER) & 0x00000fff)
-#define MBOX_RESP_LEN(BUFFER)		(((BUFFER) & 0x007ff000) >> 12)
-#define MBOX_RESP_CLIENT_ID(BUFFER)	(((BUFFER) & 0xf0000000) >> 28)
-#define MBOX_RESP_JOB_ID(BUFFER)	(((BUFFER) & 0x0f000000) >> 24)
-#define MBOX_STATUS_UA_MASK		(1<<8)
+#define MBOX_RESP_ERR(BUFFER)				((BUFFER) & 0x000007ff)
+#define MBOX_RESP_LEN(BUFFER)				(((BUFFER) & 0x007ff000) >> 12)
+#define MBOX_RESP_CLIENT_ID(BUFFER)			(((BUFFER) & 0xf0000000) >> 28)
+#define MBOX_RESP_JOB_ID(BUFFER)			(((BUFFER) & 0x0f000000) >> 24)
+#define MBOX_STATUS_UA_MASK				(1<<8)
 
 /* Mailbox command and response */
-#define MBOX_CLIENT_ID_CMD(CLIENT_ID)	((CLIENT_ID) << 28)
-#define MBOX_JOB_ID_CMD(JOB_ID)		(JOB_ID<<24)
-#define MBOX_CMD_LEN_CMD(CMD_LEN)	((CMD_LEN) << 12)
-#define MBOX_INDIRECT(val)		((val) << 11)
-#define MBOX_CMD_MASK(header)		((header) & 0x7ff)
+#define MBOX_CLIENT_ID_CMD(CLIENT_ID)			((CLIENT_ID) << 28)
+#define MBOX_JOB_ID_CMD(JOB_ID)				(JOB_ID<<24)
+#define MBOX_CMD_LEN_CMD(CMD_LEN)			((CMD_LEN) << 12)
+#define MBOX_INDIRECT(val)				((val) << 11)
+#define MBOX_CMD_MASK(header)				((header) & 0x7ff)
+
+/* Mailbox payload */
+#define MBOX_DATA_MAX_LEN				0x3ff
+#define MBOX_PAYLOAD_FLAG_BUSY				BIT(0)
 
 /* RSU Macros */
-#define RSU_VERSION_ACMF		BIT(8)
-#define RSU_VERSION_ACMF_MASK		0xff00
+#define RSU_VERSION_ACMF				BIT(8)
+#define RSU_VERSION_ACMF_MASK				0xff00
 
 /* Config Status Macros */
 #define CONFIG_STATUS_WORD_SIZE		16U
 #define CONFIG_STATUS_FW_VER_OFFSET	1
 #define CONFIG_STATUS_FW_VER_MASK	0x00FFFFFF
 
+/* Data structure */
+
+typedef struct mailbox_payload {
+	uint32_t header;
+	uint32_t data[MBOX_DATA_MAX_LEN];
+} mailbox_payload_t;
+
+typedef struct mailbox_container {
+	uint32_t flag;
+	uint32_t index;
+	mailbox_payload_t *payload;
+} mailbox_container_t;
+
 /* Mailbox Function Definitions */
 
 void mailbox_set_int(uint32_t interrupt_input);
@@ -168,8 +214,13 @@
 			unsigned int *resp_len);
 int mailbox_send_cmd_async(uint32_t *job_id, uint32_t cmd, uint32_t *args,
 			unsigned int len, unsigned int indirect);
+int mailbox_send_cmd_async_ext(uint32_t header_cmd, uint32_t *args,
+			unsigned int len);
 int mailbox_read_response(uint32_t *job_id, uint32_t *response,
 			unsigned int *resp_len);
+int mailbox_read_response_async(uint32_t *job_id, uint32_t *header,
+			uint32_t *response, unsigned int *resp_len,
+			uint8_t ignore_client_id);
 int iterate_resp(uint32_t mbox_resp_len, uint32_t *resp_buf,
 			unsigned int *resp_len);
 
diff --git a/plat/intel/soc/common/include/socfpga_reset_manager.h b/plat/intel/soc/common/include/socfpga_reset_manager.h
index a976df7..cce16ab 100644
--- a/plat/intel/soc/common/include/socfpga_reset_manager.h
+++ b/plat/intel/soc/common/include/socfpga_reset_manager.h
@@ -9,11 +9,22 @@
 
 #include "socfpga_plat_def.h"
 
+#define SOCFPGA_BRIDGE_ENABLE			BIT(0)
+#define SOCFPGA_BRIDGE_HAS_MASK			BIT(1)
+
+#define SOC2FPGA_MASK				(1<<0)
+#define LWHPS2FPGA_MASK				(1<<1)
+#define FPGA2SOC_MASK				(1<<2)
+#define F2SDRAM0_MASK				(1<<3)
+#define F2SDRAM1_MASK				(1<<4)
+#define F2SDRAM2_MASK				(1<<5)
 
 /* Register Mapping */
 
 #define SOCFPGA_RSTMGR_STAT			0x000
 #define SOCFPGA_RSTMGR_HDSKEN			0x010
+#define SOCFPGA_RSTMGR_HDSKREQ			0x014
+#define SOCFPGA_RSTMGR_HDSKACK			0x018
 #define SOCFPGA_RSTMGR_MPUMODRST		0x020
 #define SOCFPGA_RSTMGR_PER0MODRST		0x024
 #define SOCFPGA_RSTMGR_PER1MODRST		0x028
@@ -78,14 +89,20 @@
 #define RSTMGR_HDSKEN_DEBUG_L3NOC		0x00020000
 #define RSTMGR_HDSKEN_SDRSELFREFEN		0x00000001
 
+#define RSTMGR_HDSKEQ_FPGAHSREQ			0x4
+
 #define RSTMGR_BRGMODRST_SOC2FPGA		0x1
 #define RSTMGR_BRGMODRST_LWHPS2FPGA		0x2
 #define RSTMGR_BRGMODRST_FPGA2SOC		0x4
+#define RSTMGR_BRGMODRST_F2SSDRAM0		0x8
 #define RSTMGR_BRGMODRST_F2SSDRAM1		0x10
 #define RSTMGR_BRGMODRST_F2SSDRAM2		0x20
 #define RSTMGR_BRGMODRST_MPFE			0x40
 #define RSTMGR_BRGMODRST_DDRSCH			0x40
 
+#define RSTMGR_HDSKREQ_FPGAHSREQ		(BIT(2))
+#define RSTMGR_HDSKACK_FPGAHSACK_MASK		(BIT(2))
+
 /* Definitions */
 
 #define RSTMGR_L2_MODRST			0x0100
@@ -94,7 +111,7 @@
 /* Macros */
 
 #define SOCFPGA_RSTMGR(_reg)		(SOCFPGA_RSTMGR_REG_BASE \
-						+ (SOCFPGA_RSTMGR_##_reg))
+					+ (SOCFPGA_RSTMGR_##_reg))
 #define RSTMGR_FIELD(_reg, _field)	(RSTMGR_##_reg##MODRST_##_field)
 
 /* Function Declarations */
@@ -102,7 +119,7 @@
 void deassert_peripheral_reset(void);
 void config_hps_hs_before_warm_reset(void);
 
-int socfpga_bridges_enable(void);
-int socfpga_bridges_disable(void);
+int socfpga_bridges_enable(uint32_t mask);
+int socfpga_bridges_disable(uint32_t mask);
 
 #endif /* SOCFPGA_RESETMANAGER_H */
diff --git a/plat/intel/soc/common/include/socfpga_sip_svc.h b/plat/intel/soc/common/include/socfpga_sip_svc.h
index 43f3dc4..0803eb5 100644
--- a/plat/intel/soc/common/include/socfpga_sip_svc.h
+++ b/plat/intel/soc/common/include/socfpga_sip_svc.h
@@ -9,29 +9,43 @@
 
 
 /* SiP status response */
-#define INTEL_SIP_SMC_STATUS_OK				0
-#define INTEL_SIP_SMC_STATUS_BUSY			0x1
-#define INTEL_SIP_SMC_STATUS_REJECTED			0x2
-#define INTEL_SIP_SMC_STATUS_ERROR			0x4
-#define INTEL_SIP_SMC_RSU_ERROR				0x7
+#define INTEL_SIP_SMC_STATUS_OK					0
+#define INTEL_SIP_SMC_STATUS_BUSY				0x1
+#define INTEL_SIP_SMC_STATUS_REJECTED				0x2
+#define INTEL_SIP_SMC_STATUS_NO_RESPONSE			0x3
+#define INTEL_SIP_SMC_STATUS_ERROR				0x4
+#define INTEL_SIP_SMC_RSU_ERROR					0x7
 
 /* SiP mailbox error code */
-#define GENERIC_RESPONSE_ERROR				0x3FF
+#define GENERIC_RESPONSE_ERROR					0x3FF
 
-/* SMC SiP service function identifier */
+/* SiP V2 command code range */
+#define INTEL_SIP_SMC_CMD_MASK					0xFFFF
+#define INTEL_SIP_SMC_CMD_V2_RANGE_BEGIN			0x400
+#define INTEL_SIP_SMC_CMD_V2_RANGE_END				0x4FF
+
+/* SiP V2 protocol header */
+#define INTEL_SIP_SMC_HEADER_JOB_ID_MASK			0xF
+#define INTEL_SIP_SMC_HEADER_JOB_ID_OFFSET			0U
+#define INTEL_SIP_SMC_HEADER_CID_MASK				0xF
+#define INTEL_SIP_SMC_HEADER_CID_OFFSET				4U
+#define INTEL_SIP_SMC_HEADER_VERSION_MASK			0xF
+#define INTEL_SIP_SMC_HEADER_VERSION_OFFSET			60U
+
+/* SMC SiP service function identifier for version 1 */
 
 /* FPGA Reconfig */
-#define INTEL_SIP_SMC_FPGA_CONFIG_START			0xC2000001
-#define INTEL_SIP_SMC_FPGA_CONFIG_WRITE			0x42000002
-#define INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE	0xC2000003
-#define INTEL_SIP_SMC_FPGA_CONFIG_ISDONE		0xC2000004
-#define INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM		0xC2000005
+#define INTEL_SIP_SMC_FPGA_CONFIG_START				0xC2000001
+#define INTEL_SIP_SMC_FPGA_CONFIG_WRITE				0x42000002
+#define INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE		0xC2000003
+#define INTEL_SIP_SMC_FPGA_CONFIG_ISDONE			0xC2000004
+#define INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM			0xC2000005
 
 /* FPGA Bitstream Flag */
-#define FLAG_PARTIAL_CONFIG				BIT(0)
-#define FLAG_AUTHENTICATION				BIT(1)
-#define CONFIG_TEST_FLAG(_flag, _type)			(((flag) & FLAG_##_type) \
-							== FLAG_##_type)
+#define FLAG_PARTIAL_CONFIG					BIT(0)
+#define FLAG_AUTHENTICATION					BIT(1)
+#define CONFIG_TEST_FLAG(_flag, _type)				(((flag) & FLAG_##_type) \
+								== FLAG_##_type)
 
 /* Secure Register Access */
 #define INTEL_SIP_SMC_REG_READ				0xC2000007
@@ -39,56 +53,121 @@
 #define INTEL_SIP_SMC_REG_UPDATE			0xC2000009
 
 /* Remote System Update */
-#define INTEL_SIP_SMC_RSU_STATUS			0xC200000B
-#define INTEL_SIP_SMC_RSU_UPDATE			0xC200000C
-#define INTEL_SIP_SMC_RSU_NOTIFY			0xC200000E
-#define INTEL_SIP_SMC_RSU_RETRY_COUNTER			0xC200000F
-#define INTEL_SIP_SMC_RSU_DCMF_VERSION			0xC2000010
-#define INTEL_SIP_SMC_RSU_COPY_DCMF_VERSION		0xC2000011
-#define INTEL_SIP_SMC_RSU_MAX_RETRY			0xC2000012
-#define INTEL_SIP_SMC_RSU_COPY_MAX_RETRY		0xC2000013
-#define INTEL_SIP_SMC_RSU_DCMF_STATUS			0xC2000014
-#define INTEL_SIP_SMC_RSU_COPY_DCMF_STATUS		0xC2000015
+#define INTEL_SIP_SMC_RSU_STATUS				0xC200000B
+#define INTEL_SIP_SMC_RSU_UPDATE				0xC200000C
+#define INTEL_SIP_SMC_RSU_NOTIFY				0xC200000E
+#define INTEL_SIP_SMC_RSU_RETRY_COUNTER				0xC200000F
+#define INTEL_SIP_SMC_RSU_DCMF_VERSION				0xC2000010
+#define INTEL_SIP_SMC_RSU_COPY_DCMF_VERSION			0xC2000011
+#define INTEL_SIP_SMC_RSU_MAX_RETRY				0xC2000012
+#define INTEL_SIP_SMC_RSU_COPY_MAX_RETRY			0xC2000013
+#define INTEL_SIP_SMC_RSU_DCMF_STATUS				0xC2000014
+#define INTEL_SIP_SMC_RSU_COPY_DCMF_STATUS			0xC2000015
 
 /* Hardware monitor */
-#define INTEL_SIP_SMC_HWMON_READTEMP			0xC2000020
-#define INTEL_SIP_SMC_HWMON_READVOLT			0xC2000021
-#define TEMP_CHANNEL_MAX				(1 << 15)
-#define VOLT_CHANNEL_MAX				(1 << 15)
+#define INTEL_SIP_SMC_HWMON_READTEMP				0xC2000020
+#define INTEL_SIP_SMC_HWMON_READVOLT				0xC2000021
+#define TEMP_CHANNEL_MAX					(1 << 15)
+#define VOLT_CHANNEL_MAX					(1 << 15)
 
 /* ECC */
-#define INTEL_SIP_SMC_ECC_DBE				0xC200000D
+#define INTEL_SIP_SMC_ECC_DBE					0xC200000D
 
 /* Generic Command */
-#define INTEL_SIP_SMC_GET_ROM_PATCH_SHA384		0xC2000040
+#define INTEL_SIP_SMC_SERVICE_COMPLETED				0xC200001E
+#define INTEL_SIP_SMC_FIRMWARE_VERSION				0xC200001F
+#define INTEL_SIP_SMC_HPS_SET_BRIDGES				0xC2000032
+#define INTEL_SIP_SMC_GET_ROM_PATCH_SHA384			0xC2000040
 
-/* Send Mailbox Command */
-#define INTEL_SIP_SMC_MBOX_SEND_CMD			0xC200001E
-#define INTEL_SIP_SMC_FIRMWARE_VERSION			0xC200001F
-#define INTEL_SIP_SMC_HPS_SET_BRIDGES			0xC2000032
+#define SERVICE_COMPLETED_MODE_ASYNC				0x00004F4E
 
 /* Mailbox Command */
-#define INTEL_SIP_SMC_GET_USERCODE			0xC200003D
+#define INTEL_SIP_SMC_MBOX_SEND_CMD				0xC200003C
+#define INTEL_SIP_SMC_GET_USERCODE				0xC200003D
+
+/* FPGA Crypto Services */
+#define INTEL_SIP_SMC_FCS_RANDOM_NUMBER				0xC200005A
+#define INTEL_SIP_SMC_FCS_RANDOM_NUMBER_EXT			0x4200008F
+#define INTEL_SIP_SMC_FCS_CRYPTION				0x4200005B
+#define INTEL_SIP_SMC_FCS_CRYPTION_EXT				0xC2000090
+#define INTEL_SIP_SMC_FCS_SERVICE_REQUEST			0x4200005C
+#define INTEL_SIP_SMC_FCS_SEND_CERTIFICATE			0x4200005D
+#define INTEL_SIP_SMC_FCS_GET_PROVISION_DATA			0x4200005E
+#define INTEL_SIP_SMC_FCS_CNTR_SET_PREAUTH			0xC200005F
+#define INTEL_SIP_SMC_FCS_PSGSIGMA_TEARDOWN			0xC2000064
+#define INTEL_SIP_SMC_FCS_CHIP_ID				0xC2000065
+#define INTEL_SIP_SMC_FCS_ATTESTATION_SUBKEY			0xC2000066
+#define INTEL_SIP_SMC_FCS_ATTESTATION_MEASUREMENTS		0xC2000067
+#define INTEL_SIP_SMC_FCS_GET_ATTESTATION_CERT			0xC2000068
+#define INTEL_SIP_SMC_FCS_CREATE_CERT_ON_RELOAD			0xC2000069
+#define INTEL_SIP_SMC_FCS_OPEN_CS_SESSION			0xC200006E
+#define INTEL_SIP_SMC_FCS_CLOSE_CS_SESSION			0xC200006F
+#define INTEL_SIP_SMC_FCS_IMPORT_CS_KEY				0x42000070
+#define INTEL_SIP_SMC_FCS_EXPORT_CS_KEY				0xC2000071
+#define INTEL_SIP_SMC_FCS_REMOVE_CS_KEY				0xC2000072
+#define INTEL_SIP_SMC_FCS_GET_CS_KEY_INFO			0xC2000073
+#define INTEL_SIP_SMC_FCS_AES_CRYPT_INIT			0xC2000074
+#define INTEL_SIP_SMC_FCS_AES_CRYPT_UPDATE			0x42000075
+#define INTEL_SIP_SMC_FCS_AES_CRYPT_FINALIZE			0x42000076
+#define INTEL_SIP_SMC_FCS_GET_DIGEST_INIT			0xC2000077
+#define INTEL_SIP_SMC_FCS_GET_DIGEST_UPDATE			0xC2000078
+#define INTEL_SIP_SMC_FCS_GET_DIGEST_FINALIZE			0xC2000079
+#define INTEL_SIP_SMC_FCS_MAC_VERIFY_INIT			0xC200007A
+#define INTEL_SIP_SMC_FCS_MAC_VERIFY_UPDATE			0xC200007B
+#define INTEL_SIP_SMC_FCS_MAC_VERIFY_FINALIZE			0xC200007C
+#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGN_INIT			0xC200007D
+#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGN_FINALIZE		0xC200007F
+#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_INIT		0xC2000080
+#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_UPDATE		0xC2000081
+#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_FINALIZE		0xC2000082
+#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIG_VERIFY_INIT		0xC2000083
+#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIG_VERIFY_FINALIZE	0xC2000085
+#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_INIT	0xC2000086
+#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_UPDATE	0xC2000087
+#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_FINALIZE	0xC2000088
+#define INTEL_SIP_SMC_FCS_ECDSA_GET_PUBKEY_INIT			0xC2000089
+#define INTEL_SIP_SMC_FCS_ECDSA_GET_PUBKEY_FINALIZE		0xC200008B
+#define INTEL_SIP_SMC_FCS_ECDH_REQUEST_INIT			0xC200008C
+#define INTEL_SIP_SMC_FCS_ECDH_REQUEST_FINALIZE			0xC200008E
 
-/* SiP Definitions */
+#define INTEL_SIP_SMC_FCS_SHA_MODE_MASK				0xF
+#define INTEL_SIP_SMC_FCS_DIGEST_SIZE_MASK			0xF
+#define INTEL_SIP_SMC_FCS_DIGEST_SIZE_OFFSET			4U
+#define INTEL_SIP_SMC_FCS_ECC_ALGO_MASK				0xF
 
 /* ECC DBE */
-#define WARM_RESET_WFI_FLAG				BIT(31)
-#define SYSMGR_ECC_DBE_COLD_RST_MASK			(SYSMGR_ECC_OCRAM_MASK |\
-							SYSMGR_ECC_DDR0_MASK |\
-							SYSMGR_ECC_DDR1_MASK)
+#define WARM_RESET_WFI_FLAG					BIT(31)
+#define SYSMGR_ECC_DBE_COLD_RST_MASK				(SYSMGR_ECC_OCRAM_MASK |\
+								SYSMGR_ECC_DDR0_MASK |\
+								SYSMGR_ECC_DDR1_MASK)
 
 /* Non-mailbox SMC Call */
-#define INTEL_SIP_SMC_SVC_VERSION			0xC2000200
+#define INTEL_SIP_SMC_SVC_VERSION				0xC2000200
+
+/**
+ * SMC SiP service function identifier for version 2
+ * Command code from 0x400 ~ 0x4FF
+ */
+
+/* V2: Non-mailbox function identifier */
+#define INTEL_SIP_SMC_V2_GET_SVC_VERSION			0xC2000400
+#define INTEL_SIP_SMC_V2_REG_READ				0xC2000401
+#define INTEL_SIP_SMC_V2_REG_WRITE				0xC2000402
+#define INTEL_SIP_SMC_V2_REG_UPDATE				0xC2000403
+#define INTEL_SIP_SMC_V2_HPS_SET_BRIDGES			0xC2000404
+
+/* V2: Mailbox function identifier */
+#define INTEL_SIP_SMC_V2_MAILBOX_SEND_COMMAND			0xC2000420
+#define INTEL_SIP_SMC_V2_MAILBOX_POLL_RESPONSE			0xC2000421
 
 /* SMC function IDs for SiP Service queries */
-#define SIP_SVC_CALL_COUNT				0x8200ff00
-#define SIP_SVC_UID					0x8200ff01
-#define SIP_SVC_VERSION					0x8200ff03
+#define SIP_SVC_CALL_COUNT					0x8200ff00
+#define SIP_SVC_UID						0x8200ff01
+#define SIP_SVC_VERSION						0x8200ff03
 
 /* SiP Service Calls version numbers */
-#define SIP_SVC_VERSION_MAJOR				1
-#define SIP_SVC_VERSION_MINOR				0
+#define SIP_SVC_VERSION_MAJOR					1
+#define SIP_SVC_VERSION_MINOR					0
 
 
 /* Structure Definitions */
@@ -101,12 +180,38 @@
 	int block_number;
 };
 
-/* Function Definitions */
+typedef enum {
+	NO_REQUEST = 0,
+	RECONFIGURATION,
+	BITSTREAM_AUTH
+} config_type;
 
+/* Function Definitions */
+bool is_size_4_bytes_aligned(uint32_t size);
 bool is_address_in_ddr_range(uint64_t addr, uint64_t size);
 
 /* ECC DBE */
 bool cold_reset_for_ecc_dbe(void);
 uint32_t intel_ecc_dbe_notification(uint64_t dbe_value);
 
+/* Secure register access */
+uint32_t intel_secure_reg_read(uint64_t reg_addr, uint32_t *retval);
+uint32_t intel_secure_reg_write(uint64_t reg_addr, uint32_t val,
+				uint32_t *retval);
+uint32_t intel_secure_reg_update(uint64_t reg_addr, uint32_t mask,
+				 uint32_t val, uint32_t *retval);
+
+/* Miscellaneous HPS services */
+uint32_t intel_hps_set_bridges(uint64_t enable, uint64_t mask);
+
+/* SiP Service handler for version 2 */
+uintptr_t sip_smc_handler_v2(uint32_t smc_fid,
+			 u_register_t x1,
+			 u_register_t x2,
+			 u_register_t x3,
+			 u_register_t x4,
+			 void *cookie,
+			 void *handle,
+			 u_register_t flags);
+
 #endif /* SOCFPGA_SIP_SVC_H */
diff --git a/plat/intel/soc/common/include/socfpga_system_manager.h b/plat/intel/soc/common/include/socfpga_system_manager.h
index a77734d..7f67313 100644
--- a/plat/intel/soc/common/include/socfpga_system_manager.h
+++ b/plat/intel/soc/common/include/socfpga_system_manager.h
@@ -38,8 +38,8 @@
 #define SYSMGR_SDMMC_DRVSEL(x)			(((x) & 0x7) << 0)
 #define SYSMGR_SDMMC_SMPLSEL(x)			(((x) & 0x7) << 4)
 
-#define IDLE_DATA_LWSOC2FPGA				BIT(0)
-#define IDLE_DATA_SOC2FPGA				BIT(4)
+#define IDLE_DATA_LWSOC2FPGA				BIT(4)
+#define IDLE_DATA_SOC2FPGA				BIT(0)
 #define IDLE_DATA_MASK		(IDLE_DATA_LWSOC2FPGA | IDLE_DATA_SOC2FPGA)
 
 #define SYSMGR_ECC_OCRAM_MASK				BIT(1)
diff --git a/plat/intel/soc/common/sip/socfpga_sip_fcs.c b/plat/intel/soc/common/sip/socfpga_sip_fcs.c
index 85551a4..eacc4dd 100644
--- a/plat/intel/soc/common/sip/socfpga_sip_fcs.c
+++ b/plat/intel/soc/common/sip/socfpga_sip_fcs.c
@@ -11,13 +11,71 @@
 #include "socfpga_mailbox.h"
 #include "socfpga_sip_svc.h"
 
-static bool is_size_4_bytes_aligned(uint32_t size)
+/* FCS static variables */
+static fcs_crypto_service_aes_data fcs_aes_init_payload;
+static fcs_crypto_service_data fcs_sha_get_digest_param;
+static fcs_crypto_service_data fcs_sha_mac_verify_param;
+static fcs_crypto_service_data fcs_ecdsa_hash_sign_param;
+static fcs_crypto_service_data fcs_ecdsa_hash_sig_verify_param;
+static fcs_crypto_service_data fcs_sha2_data_sign_param;
+static fcs_crypto_service_data fcs_sha2_data_sig_verify_param;
+static fcs_crypto_service_data fcs_ecdsa_get_pubkey_param;
+static fcs_crypto_service_data fcs_ecdh_request_param;
+
+bool is_size_4_bytes_aligned(uint32_t size)
 {
 	if ((size % MBOX_WORD_BYTE) != 0U) {
 		return false;
 	} else {
 		return true;
 	}
+}
+
+static bool is_8_bytes_aligned(uint32_t data)
+{
+	if ((data % (MBOX_WORD_BYTE * 2U)) != 0U) {
+		return false;
+	} else {
+		return true;
+	}
+}
+
+static bool is_32_bytes_aligned(uint32_t data)
+{
+	if ((data % (8U * MBOX_WORD_BYTE)) != 0U) {
+		return false;
+	} else {
+		return true;
+	}
+}
+
+static int intel_fcs_crypto_service_init(uint32_t session_id,
+			uint32_t context_id, uint32_t key_id,
+			uint32_t param_size, uint64_t param_data,
+			fcs_crypto_service_data *data_addr,
+			uint32_t *mbox_error)
+{
+	if (mbox_error == NULL) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (param_size != 4) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	memset(data_addr, 0, sizeof(fcs_crypto_service_data));
+
+	data_addr->session_id = session_id;
+	data_addr->context_id = context_id;
+	data_addr->key_id = key_id;
+	data_addr->crypto_param_size = param_size;
+	data_addr->crypto_param = param_data;
+
+	data_addr->is_updated = 0;
+
+	*mbox_error = 0;
+
+	return INTEL_SIP_SMC_STATUS_OK;
 }
 
 uint32_t intel_fcs_random_number_gen(uint64_t addr, uint64_t *ret_size,
@@ -57,6 +115,45 @@
 	return INTEL_SIP_SMC_STATUS_OK;
 }
 
+int intel_fcs_random_number_gen_ext(uint32_t session_id, uint32_t context_id,
+				uint32_t size, uint32_t *send_id)
+{
+	int status;
+	uint32_t payload_size;
+	uint32_t crypto_header;
+
+	if (size > (FCS_RANDOM_EXT_MAX_WORD_SIZE *
+		MBOX_WORD_BYTE) || size == 0U) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_size_4_bytes_aligned(size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	crypto_header = (FCS_CS_FIELD_FLAG_INIT | FCS_CS_FIELD_FLAG_FINALIZE) <<
+			FCS_CS_FIELD_FLAG_OFFSET;
+
+	fcs_rng_payload payload = {
+		session_id,
+		context_id,
+		crypto_header,
+		size
+	};
+
+	payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+	status = mailbox_send_cmd_async(send_id, MBOX_FCS_RANDOM_GEN,
+					(uint32_t *) &payload, payload_size,
+					CMD_INDIRECT);
+
+	if (status < 0) {
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
 uint32_t intel_fcs_send_cert(uint64_t addr, uint64_t size,
 					uint32_t *send_id)
 {
@@ -74,6 +171,8 @@
 				(uint32_t *)addr, size / MBOX_WORD_BYTE,
 				CMD_DIRECT);
 
+	flush_dcache_range(addr, size);
+
 	if (status < 0) {
 		return INTEL_SIP_SMC_STATUS_ERROR;
 	}
@@ -89,50 +188,346 @@
 				NULL, 0U, CMD_DIRECT);
 
 	if (status < 0) {
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+uint32_t intel_fcs_cntr_set_preauth(uint8_t counter_type, int32_t counter_value,
+					uint32_t test_bit, uint32_t *mbox_error)
+{
+	int status;
+	uint32_t first_word;
+	uint32_t payload_size;
+
+	if ((test_bit != MBOX_TEST_BIT) &&
+		(test_bit != 0)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if ((counter_type < FCS_BIG_CNTR_SEL) ||
+		(counter_type > FCS_SVN_CNTR_3_SEL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if ((counter_type == FCS_BIG_CNTR_SEL) &&
+		(counter_value > FCS_BIG_CNTR_VAL_MAX)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if ((counter_type >= FCS_SVN_CNTR_0_SEL) &&
+		(counter_type <= FCS_SVN_CNTR_3_SEL) &&
+		(counter_value > FCS_SVN_CNTR_VAL_MAX)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	first_word = test_bit | counter_type;
+	fcs_cntr_set_preauth_payload payload = {
+		first_word,
+		counter_value
+	};
+
+	payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+	status =  mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_CNTR_SET_PREAUTH,
+				  (uint32_t *) &payload, payload_size,
+				  CMD_CASUAL, NULL, NULL);
+
+	if (status < 0) {
+		*mbox_error = -status;
 		return INTEL_SIP_SMC_STATUS_ERROR;
 	}
 
 	return INTEL_SIP_SMC_STATUS_OK;
 }
 
-uint32_t intel_fcs_cryption(uint32_t mode, uint32_t src_addr,
-		uint32_t src_size, uint32_t dst_addr,
-		uint32_t dst_size, uint32_t *send_id)
+uint32_t intel_fcs_encryption(uint32_t src_addr, uint32_t src_size,
+		uint32_t dst_addr, uint32_t dst_size, uint32_t *send_id)
 {
 	int status;
-	uint32_t cmd;
+	uint32_t load_size;
 
-	fcs_crypt_payload payload = {
-		FCS_CRYPTION_DATA_0,
+	fcs_encrypt_payload payload = {
+		FCS_ENCRYPTION_DATA_0,
 		src_addr,
 		src_size,
 		dst_addr,
 		dst_size };
+	load_size = sizeof(payload) / MBOX_WORD_BYTE;
 
 	if (!is_address_in_ddr_range(src_addr, src_size) ||
 		!is_address_in_ddr_range(dst_addr, dst_size)) {
 		return INTEL_SIP_SMC_STATUS_REJECTED;
 	}
 
-	if (!is_size_4_bytes_aligned(sizeof(fcs_crypt_payload))) {
+	if (!is_size_4_bytes_aligned(src_size)) {
 		return INTEL_SIP_SMC_STATUS_REJECTED;
 	}
 
-	if (mode != 0U) {
-		cmd = MBOX_FCS_ENCRYPT_REQ;
-	} else {
-		cmd = MBOX_FCS_DECRYPT_REQ;
+	status = mailbox_send_cmd_async(send_id, MBOX_FCS_ENCRYPT_REQ,
+				(uint32_t *) &payload, load_size,
+				CMD_INDIRECT);
+	inv_dcache_range(dst_addr, dst_size);
+
+	if (status < 0) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+uint32_t intel_fcs_decryption(uint32_t src_addr, uint32_t src_size,
+		uint32_t dst_addr, uint32_t dst_size, uint32_t *send_id)
+{
+	int status;
+	uint32_t load_size;
+	uintptr_t id_offset;
+
+	id_offset = src_addr + FCS_OWNER_ID_OFFSET;
+	fcs_decrypt_payload payload = {
+		FCS_DECRYPTION_DATA_0,
+		{mmio_read_32(id_offset),
+		mmio_read_32(id_offset + MBOX_WORD_BYTE)},
+		src_addr,
+		src_size,
+		dst_addr,
+		dst_size };
+	load_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_size_4_bytes_aligned(src_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
 	}
 
-	status = mailbox_send_cmd_async(send_id, cmd, (uint32_t *) &payload,
-				sizeof(fcs_crypt_payload) / MBOX_WORD_BYTE,
+	status = mailbox_send_cmd_async(send_id, MBOX_FCS_DECRYPT_REQ,
+				(uint32_t *) &payload, load_size,
 				CMD_INDIRECT);
 	inv_dcache_range(dst_addr, dst_size);
 
 	if (status < 0) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_encryption_ext(uint32_t session_id, uint32_t context_id,
+		uint32_t src_addr, uint32_t src_size,
+		uint32_t dst_addr, uint32_t *dst_size, uint32_t *mbox_error)
+{
+	int status;
+	uint32_t payload_size;
+	uint32_t resp_len = FCS_CRYPTION_RESP_WORD_SIZE;
+	uint32_t resp_data[FCS_CRYPTION_RESP_WORD_SIZE] = {0U};
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_size_4_bytes_aligned(src_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	fcs_encrypt_ext_payload payload = {
+		session_id,
+		context_id,
+		FCS_CRYPTION_CRYPTO_HEADER,
+		src_addr,
+		src_size,
+		dst_addr,
+		*dst_size
+	};
+
+	payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ENCRYPT_REQ,
+				(uint32_t *) &payload, payload_size,
+				CMD_CASUAL, resp_data, &resp_len);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	if (resp_len != FCS_CRYPTION_RESP_WORD_SIZE) {
+		*mbox_error = MBOX_RET_ERROR;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_data[FCS_CRYPTION_RESP_SIZE_OFFSET];
+	inv_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_decryption_ext(uint32_t session_id, uint32_t context_id,
+		uint32_t src_addr, uint32_t src_size,
+		uint32_t dst_addr, uint32_t *dst_size, uint32_t *mbox_error)
+{
+	int status;
+	uintptr_t id_offset;
+	uint32_t payload_size;
+	uint32_t resp_len = FCS_CRYPTION_RESP_WORD_SIZE;
+	uint32_t resp_data[FCS_CRYPTION_RESP_WORD_SIZE] = {0U};
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_size_4_bytes_aligned(src_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	id_offset = src_addr + FCS_OWNER_ID_OFFSET;
+	fcs_decrypt_ext_payload payload = {
+		session_id,
+		context_id,
+		FCS_CRYPTION_CRYPTO_HEADER,
+		{mmio_read_32(id_offset),
+		mmio_read_32(id_offset + MBOX_WORD_BYTE)},
+		src_addr,
+		src_size,
+		dst_addr,
+		*dst_size
+	};
+
+	payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_DECRYPT_REQ,
+				(uint32_t *) &payload, payload_size,
+				CMD_CASUAL, resp_data, &resp_len);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	if (resp_len != FCS_CRYPTION_RESP_WORD_SIZE) {
+		*mbox_error = MBOX_RET_ERROR;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_data[FCS_CRYPTION_RESP_SIZE_OFFSET];
+	inv_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_sigma_teardown(uint32_t session_id, uint32_t *mbox_error)
+{
+	int status;
+
+	if ((session_id != PSGSIGMA_SESSION_ID_ONE) &&
+		(session_id != PSGSIGMA_UNKNOWN_SESSION)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	psgsigma_teardown_msg message = {
+		RESERVED_AS_ZERO,
+		PSGSIGMA_TEARDOWN_MAGIC,
+		session_id
+	};
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_PSG_SIGMA_TEARDOWN,
+			(uint32_t *) &message, sizeof(message) / MBOX_WORD_BYTE,
+			CMD_CASUAL, NULL, NULL);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_chip_id(uint32_t *id_low, uint32_t *id_high, uint32_t *mbox_error)
+{
+	int status;
+	uint32_t load_size;
+	uint32_t chip_id[2];
+
+	load_size = sizeof(chip_id) / MBOX_WORD_BYTE;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_GET_CHIPID, NULL,
+			0U, CMD_CASUAL, (uint32_t *) chip_id, &load_size);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*id_low = chip_id[0];
+	*id_high = chip_id[1];
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_attestation_subkey(uint64_t src_addr, uint32_t src_size,
+		uint64_t dst_addr, uint32_t *dst_size, uint32_t *mbox_error)
+{
+	int status;
+	uint32_t send_size = src_size / MBOX_WORD_BYTE;
+	uint32_t ret_size = *dst_size / MBOX_WORD_BYTE;
+
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_ATTESTATION_SUBKEY,
+			(uint32_t *) src_addr, send_size, CMD_CASUAL,
+			(uint32_t *) dst_addr, &ret_size);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = ret_size * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_get_measurement(uint64_t src_addr, uint32_t src_size,
+		uint64_t dst_addr, uint32_t *dst_size, uint32_t *mbox_error)
+{
+	int status;
+	uint32_t send_size = src_size / MBOX_WORD_BYTE;
+	uint32_t ret_size = *dst_size / MBOX_WORD_BYTE;
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
 		return INTEL_SIP_SMC_STATUS_REJECTED;
 	}
 
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_GET_MEASUREMENT,
+			(uint32_t *) src_addr, send_size, CMD_CASUAL,
+			(uint32_t *) dst_addr, &ret_size);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = ret_size * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
 	return INTEL_SIP_SMC_STATUS_OK;
 }
 
@@ -165,3 +560,1180 @@
 
 	return INTEL_SIP_SMC_STATUS_OK;
 }
+
+int intel_fcs_get_attestation_cert(uint32_t cert_request, uint64_t dst_addr,
+			uint32_t *dst_size, uint32_t *mbox_error)
+{
+	int status;
+	uint32_t ret_size = *dst_size / MBOX_WORD_BYTE;
+
+	if (mbox_error == NULL) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (cert_request < FCS_ATTEST_FIRMWARE_CERT ||
+		cert_request > FCS_ATTEST_CERT_MAX_REQ_PARAM) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_GET_ATTESTATION_CERT,
+			(uint32_t *) &cert_request, 1U, CMD_CASUAL,
+			(uint32_t *) dst_addr, &ret_size);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = ret_size * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_create_cert_on_reload(uint32_t cert_request,
+			uint32_t *mbox_error)
+{
+	int status;
+
+	if (mbox_error == NULL) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (cert_request < FCS_ATTEST_FIRMWARE_CERT ||
+		cert_request > FCS_ATTEST_CERT_MAX_REQ_PARAM) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_CREATE_CERT_ON_RELOAD,
+			(uint32_t *) &cert_request, 1U, CMD_CASUAL,
+			NULL, NULL);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_open_crypto_service_session(uint32_t *session_id,
+			uint32_t *mbox_error)
+{
+	int status;
+	uint32_t resp_len = 1U;
+
+	if ((session_id == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_OPEN_CS_SESSION,
+			NULL, 0U, CMD_CASUAL, session_id, &resp_len);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_close_crypto_service_session(uint32_t session_id,
+			uint32_t *mbox_error)
+{
+	int status;
+
+	if (mbox_error == NULL) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_CLOSE_CS_SESSION,
+			&session_id, 1U, CMD_CASUAL, NULL, NULL);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_import_crypto_service_key(uint64_t src_addr, uint32_t src_size,
+		uint32_t *send_id)
+{
+	int status;
+
+	if (src_size > (FCS_CS_KEY_OBJ_MAX_WORD_SIZE *
+		MBOX_WORD_BYTE)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	status = mailbox_send_cmd_async(send_id, MBOX_FCS_IMPORT_CS_KEY,
+				(uint32_t *)src_addr, src_size / MBOX_WORD_BYTE,
+				CMD_INDIRECT);
+
+	if (status < 0) {
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_export_crypto_service_key(uint32_t session_id, uint32_t key_id,
+		uint64_t dst_addr, uint32_t *dst_size,
+		uint32_t *mbox_error)
+{
+	int status;
+	uint32_t i;
+	uint32_t payload_size;
+	uint32_t resp_len = FCS_CS_KEY_OBJ_MAX_WORD_SIZE;
+	uint32_t resp_data[FCS_CS_KEY_OBJ_MAX_WORD_SIZE] = {0U};
+	uint32_t op_status = 0U;
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	fcs_cs_key_payload payload = {
+		session_id,
+		RESERVED_AS_ZERO,
+		RESERVED_AS_ZERO,
+		key_id
+	};
+
+	payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_EXPORT_CS_KEY,
+			(uint32_t *) &payload, payload_size,
+			CMD_CASUAL, resp_data, &resp_len);
+
+	if (resp_len > 0) {
+		op_status = resp_data[0] & FCS_CS_KEY_RESP_STATUS_MASK;
+	}
+
+	if (status < 0) {
+		*mbox_error = (-status) | (op_status << FCS_CS_KEY_RESP_STATUS_OFFSET);
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	if (resp_len > 1) {
+
+		/* Export key object is start at second response data */
+		*dst_size = (resp_len - 1) * MBOX_WORD_BYTE;
+
+		for (i = 1U; i < resp_len; i++) {
+			mmio_write_32(dst_addr, resp_data[i]);
+			dst_addr += MBOX_WORD_BYTE;
+		}
+
+		flush_dcache_range(dst_addr - *dst_size, *dst_size);
+
+	} else {
+
+		/* Unexpected response, missing key object in response */
+		*mbox_error = MBOX_RET_ERROR;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_remove_crypto_service_key(uint32_t session_id, uint32_t key_id,
+		uint32_t *mbox_error)
+{
+	int status;
+	uint32_t payload_size;
+	uint32_t resp_len = 1U;
+	uint32_t resp_data = 0U;
+	uint32_t op_status = 0U;
+
+	if (mbox_error == NULL) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	fcs_cs_key_payload payload = {
+		session_id,
+		RESERVED_AS_ZERO,
+		RESERVED_AS_ZERO,
+		key_id
+	};
+
+	payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_REMOVE_CS_KEY,
+			(uint32_t *) &payload, payload_size,
+			CMD_CASUAL, &resp_data, &resp_len);
+
+	if (resp_len > 0) {
+		op_status = resp_data & FCS_CS_KEY_RESP_STATUS_MASK;
+	}
+
+	if (status < 0) {
+		*mbox_error = (-status) | (op_status << FCS_CS_KEY_RESP_STATUS_OFFSET);
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_get_crypto_service_key_info(uint32_t session_id, uint32_t key_id,
+		uint64_t dst_addr, uint32_t *dst_size,
+		uint32_t *mbox_error)
+{
+	int status;
+	uint32_t payload_size;
+	uint32_t resp_len = FCS_CS_KEY_INFO_MAX_WORD_SIZE;
+	uint32_t op_status = 0U;
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	fcs_cs_key_payload payload = {
+		session_id,
+		RESERVED_AS_ZERO,
+		RESERVED_AS_ZERO,
+		key_id
+	};
+
+	payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_GET_CS_KEY_INFO,
+				(uint32_t *) &payload, payload_size,
+				CMD_CASUAL, (uint32_t *) dst_addr, &resp_len);
+
+	if (resp_len > 0) {
+		op_status = mmio_read_32(dst_addr) &
+			FCS_CS_KEY_RESP_STATUS_MASK;
+	}
+
+	if (status < 0) {
+		*mbox_error = (-status) | (op_status << FCS_CS_KEY_RESP_STATUS_OFFSET);
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_len * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_get_digest_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error)
+{
+	return intel_fcs_crypto_service_init(session_id, context_id,
+				key_id, param_size, param_data,
+				(void *) &fcs_sha_get_digest_param,
+				mbox_error);
+}
+
+int intel_fcs_get_digest_update_finalize(uint32_t session_id,
+				uint32_t context_id, uint32_t src_addr,
+				uint32_t src_size, uint64_t dst_addr,
+				uint32_t *dst_size, uint8_t is_finalised,
+				uint32_t *mbox_error)
+{
+	int status;
+	uint32_t i;
+	uint32_t flag;
+	uint32_t crypto_header;
+	uint32_t resp_len;
+	uint32_t payload[FCS_GET_DIGEST_CMD_MAX_WORD_SIZE] = {0U};
+
+	if (dst_size == NULL || mbox_error == NULL) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (fcs_sha_get_digest_param.session_id != session_id ||
+	    fcs_sha_get_digest_param.context_id != context_id) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	/* Source data must be 8 bytes aligned */
+	if (!is_8_bytes_aligned(src_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		 !is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	resp_len = *dst_size / MBOX_WORD_BYTE;
+
+	/* Prepare crypto header */
+	flag = 0;
+
+	if (fcs_sha_get_digest_param.is_updated) {
+		fcs_sha_get_digest_param.crypto_param_size = 0;
+	} else {
+		flag |=  FCS_CS_FIELD_FLAG_INIT;
+	}
+
+	if (is_finalised != 0U) {
+		flag |=  FCS_CS_FIELD_FLAG_FINALIZE;
+	} else {
+		flag |=  FCS_CS_FIELD_FLAG_UPDATE;
+		fcs_sha_get_digest_param.is_updated = 1;
+	}
+
+	crypto_header = ((flag << FCS_CS_FIELD_FLAG_OFFSET) |
+			(fcs_sha_get_digest_param.crypto_param_size &
+			FCS_CS_FIELD_SIZE_MASK));
+
+	/* Prepare command payload */
+	i = 0;
+	payload[i] = fcs_sha_get_digest_param.session_id;
+	i++;
+	payload[i] = fcs_sha_get_digest_param.context_id;
+	i++;
+	payload[i] = crypto_header;
+	i++;
+
+	if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+		FCS_CS_FIELD_FLAG_INIT) {
+		payload[i] = fcs_sha_get_digest_param.key_id;
+		i++;
+		/* Crypto parameters */
+		payload[i] = fcs_sha_get_digest_param.crypto_param
+				& INTEL_SIP_SMC_FCS_SHA_MODE_MASK;
+		payload[i] |= ((fcs_sha_get_digest_param.crypto_param
+				>> INTEL_SIP_SMC_FCS_DIGEST_SIZE_OFFSET)
+				& INTEL_SIP_SMC_FCS_DIGEST_SIZE_MASK)
+				<< FCS_SHA_HMAC_CRYPTO_PARAM_SIZE_OFFSET;
+		i++;
+	}
+	/* Data source address and size */
+	payload[i] = src_addr;
+	i++;
+	payload[i] = src_size;
+	i++;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_GET_DIGEST_REQ,
+				payload, i, CMD_CASUAL,
+				(uint32_t *) dst_addr, &resp_len);
+
+	if (is_finalised != 0U) {
+		memset((void *)&fcs_sha_get_digest_param, 0,
+		sizeof(fcs_crypto_service_data));
+	}
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_len * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_mac_verify_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error)
+{
+	return intel_fcs_crypto_service_init(session_id, context_id,
+				key_id, param_size, param_data,
+				(void *) &fcs_sha_mac_verify_param,
+				mbox_error);
+}
+
+int intel_fcs_mac_verify_update_finalize(uint32_t session_id,
+				uint32_t context_id, uint32_t src_addr,
+				uint32_t src_size, uint64_t dst_addr,
+				uint32_t *dst_size, uint32_t data_size,
+				uint8_t is_finalised, uint32_t *mbox_error)
+{
+	int status;
+	uint32_t i;
+	uint32_t flag;
+	uint32_t crypto_header;
+	uint32_t resp_len;
+	uint32_t payload[FCS_MAC_VERIFY_CMD_MAX_WORD_SIZE] = {0U};
+	uintptr_t mac_offset;
+
+	if (dst_size == NULL || mbox_error == NULL) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (fcs_sha_mac_verify_param.session_id != session_id ||
+		fcs_sha_mac_verify_param.context_id != context_id) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (data_size >= src_size) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_size_4_bytes_aligned(src_size) ||
+		!is_8_bytes_aligned(data_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	resp_len = *dst_size / MBOX_WORD_BYTE;
+
+	/* Prepare crypto header */
+	flag = 0;
+
+	if (fcs_sha_mac_verify_param.is_updated) {
+		fcs_sha_mac_verify_param.crypto_param_size = 0;
+	} else {
+		flag |=  FCS_CS_FIELD_FLAG_INIT;
+	}
+
+	if (is_finalised) {
+		flag |=  FCS_CS_FIELD_FLAG_FINALIZE;
+	} else {
+		flag |=  FCS_CS_FIELD_FLAG_UPDATE;
+		fcs_sha_mac_verify_param.is_updated = 1;
+	}
+
+	crypto_header = ((flag << FCS_CS_FIELD_FLAG_OFFSET) |
+			(fcs_sha_mac_verify_param.crypto_param_size &
+			FCS_CS_FIELD_SIZE_MASK));
+
+	/* Prepare command payload */
+	i = 0;
+	payload[i] = fcs_sha_mac_verify_param.session_id;
+	i++;
+	payload[i] = fcs_sha_mac_verify_param.context_id;
+	i++;
+	payload[i] = crypto_header;
+	i++;
+
+	if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+		FCS_CS_FIELD_FLAG_INIT) {
+		payload[i] = fcs_sha_mac_verify_param.key_id;
+		i++;
+		/* Crypto parameters */
+		payload[i] = ((fcs_sha_mac_verify_param.crypto_param
+				>> INTEL_SIP_SMC_FCS_DIGEST_SIZE_OFFSET)
+				& INTEL_SIP_SMC_FCS_DIGEST_SIZE_MASK)
+				<< FCS_SHA_HMAC_CRYPTO_PARAM_SIZE_OFFSET;
+		i++;
+	}
+	/* Data source address and size */
+	payload[i] = src_addr;
+	i++;
+	payload[i] = data_size;
+	i++;
+
+	if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+		FCS_CS_FIELD_FLAG_FINALIZE) {
+		/* Copy mac data to command */
+		mac_offset = src_addr + data_size;
+		memcpy((uint8_t *) &payload[i], (uint8_t *) mac_offset,
+		src_size - data_size);
+
+		i += (src_size - data_size) / MBOX_WORD_BYTE;
+	}
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_MAC_VERIFY_REQ,
+				payload, i, CMD_CASUAL,
+				(uint32_t *) dst_addr, &resp_len);
+
+	if (is_finalised) {
+		memset((void *)&fcs_sha_mac_verify_param, 0,
+		sizeof(fcs_crypto_service_data));
+	}
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_len * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_ecdsa_hash_sign_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error)
+{
+	return intel_fcs_crypto_service_init(session_id, context_id,
+				key_id, param_size, param_data,
+				(void *) &fcs_ecdsa_hash_sign_param,
+				mbox_error);
+}
+
+int intel_fcs_ecdsa_hash_sign_finalize(uint32_t session_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error)
+{
+	int status;
+	uint32_t i;
+	uint32_t payload[FCS_ECDSA_HASH_SIGN_CMD_MAX_WORD_SIZE] = {0U};
+	uint32_t resp_len;
+	uintptr_t hash_data_addr;
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (fcs_ecdsa_hash_sign_param.session_id != session_id ||
+		fcs_ecdsa_hash_sign_param.context_id != context_id) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	resp_len = *dst_size / MBOX_WORD_BYTE;
+
+	/* Prepare command payload */
+	/* Crypto header */
+	i = 0;
+	payload[i] = fcs_ecdsa_hash_sign_param.session_id;
+	i++;
+	payload[i] = fcs_ecdsa_hash_sign_param.context_id;
+
+	i++;
+	payload[i] = fcs_ecdsa_hash_sign_param.crypto_param_size
+			& FCS_CS_FIELD_SIZE_MASK;
+	payload[i] |= (FCS_CS_FIELD_FLAG_INIT | FCS_CS_FIELD_FLAG_UPDATE
+			| FCS_CS_FIELD_FLAG_FINALIZE)
+			<< FCS_CS_FIELD_FLAG_OFFSET;
+	i++;
+	payload[i] = fcs_ecdsa_hash_sign_param.key_id;
+
+	/* Crypto parameters */
+	i++;
+	payload[i] = fcs_ecdsa_hash_sign_param.crypto_param
+			& INTEL_SIP_SMC_FCS_ECC_ALGO_MASK;
+
+	/* Hash Data */
+	i++;
+	hash_data_addr = src_addr;
+	memcpy((uint8_t *) &payload[i], (uint8_t *) hash_data_addr,
+			src_size);
+
+	i += src_size / MBOX_WORD_BYTE;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ECDSA_HASH_SIGN_REQ,
+			payload, i, CMD_CASUAL, (uint32_t *) dst_addr,
+			&resp_len);
+
+	memset((void *) &fcs_ecdsa_hash_sign_param,
+			0, sizeof(fcs_crypto_service_data));
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_len * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_ecdsa_hash_sig_verify_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error)
+{
+	return intel_fcs_crypto_service_init(session_id, context_id,
+				key_id, param_size, param_data,
+				(void *) &fcs_ecdsa_hash_sig_verify_param,
+				mbox_error);
+}
+
+int intel_fcs_ecdsa_hash_sig_verify_finalize(uint32_t session_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error)
+{
+	int status;
+	uint32_t i = 0;
+	uint32_t payload[FCS_ECDSA_HASH_SIG_VERIFY_CMD_MAX_WORD_SIZE] = {0U};
+	uint32_t resp_len;
+	uintptr_t hash_sig_pubkey_addr;
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (fcs_ecdsa_hash_sig_verify_param.session_id != session_id ||
+	fcs_ecdsa_hash_sig_verify_param.context_id != context_id) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	resp_len = *dst_size / MBOX_WORD_BYTE;
+
+	/* Prepare command payload */
+	/* Crypto header */
+	i = 0;
+	payload[i] = fcs_ecdsa_hash_sig_verify_param.session_id;
+
+	i++;
+	payload[i] = fcs_ecdsa_hash_sig_verify_param.context_id;
+
+	i++;
+	payload[i] = fcs_ecdsa_hash_sig_verify_param.crypto_param_size
+			& FCS_CS_FIELD_SIZE_MASK;
+	payload[i] |= (FCS_CS_FIELD_FLAG_INIT | FCS_CS_FIELD_FLAG_UPDATE
+			| FCS_CS_FIELD_FLAG_FINALIZE)
+			<< FCS_CS_FIELD_FLAG_OFFSET;
+
+	i++;
+	payload[i] = fcs_ecdsa_hash_sig_verify_param.key_id;
+
+	/* Crypto parameters */
+	i++;
+	payload[i] = fcs_ecdsa_hash_sig_verify_param.crypto_param
+			& INTEL_SIP_SMC_FCS_ECC_ALGO_MASK;
+
+	/* Hash Data Word, Signature Data Word and Public Key Data word */
+	i++;
+	hash_sig_pubkey_addr = src_addr;
+	memcpy((uint8_t *) &payload[i],
+			(uint8_t *) hash_sig_pubkey_addr, src_size);
+
+	i += (src_size / MBOX_WORD_BYTE);
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ECDSA_HASH_SIG_VERIFY,
+			payload, i, CMD_CASUAL, (uint32_t *) dst_addr,
+			&resp_len);
+
+	memset((void *)&fcs_ecdsa_hash_sig_verify_param,
+			0, sizeof(fcs_crypto_service_data));
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_len * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_ecdsa_sha2_data_sign_init(uint32_t session_id,
+				uint32_t context_id, uint32_t key_id,
+				uint32_t param_size, uint64_t param_data,
+				uint32_t *mbox_error)
+{
+	return intel_fcs_crypto_service_init(session_id, context_id,
+				key_id, param_size, param_data,
+				(void *) &fcs_sha2_data_sign_param,
+				mbox_error);
+}
+
+int intel_fcs_ecdsa_sha2_data_sign_update_finalize(uint32_t session_id,
+				uint32_t context_id, uint32_t src_addr,
+				uint32_t src_size, uint64_t dst_addr,
+				uint32_t *dst_size, uint8_t is_finalised,
+				uint32_t *mbox_error)
+{
+	int status;
+	int i;
+	uint32_t flag;
+	uint32_t crypto_header;
+	uint32_t payload[FCS_ECDSA_SHA2_DATA_SIGN_CMD_MAX_WORD_SIZE] = {0U};
+	uint32_t resp_len;
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (fcs_sha2_data_sign_param.session_id != session_id ||
+		fcs_sha2_data_sign_param.context_id != context_id) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	/* Source data must be 8 bytes aligned */
+	if (!is_8_bytes_aligned(src_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	resp_len = *dst_size / MBOX_WORD_BYTE;
+
+	/* Prepare crypto header */
+	flag = 0;
+	if (fcs_sha2_data_sign_param.is_updated) {
+		fcs_sha2_data_sign_param.crypto_param_size = 0;
+	} else {
+		flag |= FCS_CS_FIELD_FLAG_INIT;
+	}
+
+	if (is_finalised != 0U) {
+		flag |= FCS_CS_FIELD_FLAG_FINALIZE;
+	} else {
+		flag |= FCS_CS_FIELD_FLAG_UPDATE;
+		fcs_sha2_data_sign_param.is_updated = 1;
+	}
+	crypto_header = (flag << FCS_CS_FIELD_FLAG_OFFSET) |
+			fcs_sha2_data_sign_param.crypto_param_size;
+
+	/* Prepare command payload */
+	i = 0;
+	payload[i] = fcs_sha2_data_sign_param.session_id;
+	i++;
+	payload[i] = fcs_sha2_data_sign_param.context_id;
+	i++;
+	payload[i] = crypto_header;
+	i++;
+
+	if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+		FCS_CS_FIELD_FLAG_INIT) {
+		payload[i] = fcs_sha2_data_sign_param.key_id;
+		/* Crypto parameters */
+		i++;
+		payload[i] = fcs_sha2_data_sign_param.crypto_param
+				& INTEL_SIP_SMC_FCS_ECC_ALGO_MASK;
+		i++;
+	}
+
+	/* Data source address and size */
+	payload[i] = src_addr;
+	i++;
+	payload[i] = src_size;
+	i++;
+	status = mailbox_send_cmd(MBOX_JOB_ID,
+			MBOX_FCS_ECDSA_SHA2_DATA_SIGN_REQ, payload,
+			i, CMD_CASUAL, (uint32_t *) dst_addr,
+			&resp_len);
+
+	if (is_finalised != 0U) {
+		memset((void *)&fcs_sha2_data_sign_param, 0,
+			sizeof(fcs_crypto_service_data));
+	}
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_len * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_ecdsa_sha2_data_sig_verify_init(uint32_t session_id,
+				uint32_t context_id, uint32_t key_id,
+				uint32_t param_size, uint64_t param_data,
+				uint32_t *mbox_error)
+{
+	return intel_fcs_crypto_service_init(session_id, context_id,
+				key_id, param_size, param_data,
+				(void *) &fcs_sha2_data_sig_verify_param,
+				mbox_error);
+}
+
+int intel_fcs_ecdsa_sha2_data_sig_verify_update_finalize(uint32_t session_id,
+				uint32_t context_id, uint32_t src_addr,
+				uint32_t src_size, uint64_t dst_addr,
+				uint32_t *dst_size, uint32_t data_size,
+				uint8_t is_finalised, uint32_t *mbox_error)
+{
+	int status;
+	uint32_t i;
+	uint32_t flag;
+	uint32_t crypto_header;
+	uint32_t payload[FCS_ECDSA_SHA2_DATA_SIG_VERIFY_CMD_MAX_WORD_SIZE] = {0U};
+	uint32_t resp_len;
+	uintptr_t sig_pubkey_offset;
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (fcs_sha2_data_sig_verify_param.session_id != session_id ||
+		fcs_sha2_data_sig_verify_param.context_id != context_id) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_size_4_bytes_aligned(src_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_8_bytes_aligned(data_size) ||
+		!is_8_bytes_aligned(src_addr)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	resp_len = *dst_size / MBOX_WORD_BYTE;
+
+	/* Prepare crypto header */
+	flag = 0;
+	if (fcs_sha2_data_sig_verify_param.is_updated)
+		fcs_sha2_data_sig_verify_param.crypto_param_size = 0;
+	else
+		flag |= FCS_CS_FIELD_FLAG_INIT;
+
+	if (is_finalised != 0U)
+		flag |= FCS_CS_FIELD_FLAG_FINALIZE;
+	else {
+		flag |= FCS_CS_FIELD_FLAG_UPDATE;
+		fcs_sha2_data_sig_verify_param.is_updated = 1;
+	}
+	crypto_header = (flag << FCS_CS_FIELD_FLAG_OFFSET) |
+			fcs_sha2_data_sig_verify_param.crypto_param_size;
+
+	/* Prepare command payload */
+	i = 0;
+	payload[i] = fcs_sha2_data_sig_verify_param.session_id;
+	i++;
+	payload[i] = fcs_sha2_data_sig_verify_param.context_id;
+	i++;
+	payload[i] = crypto_header;
+	i++;
+
+	if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+		FCS_CS_FIELD_FLAG_INIT) {
+		payload[i] = fcs_sha2_data_sig_verify_param.key_id;
+		i++;
+		/* Crypto parameters */
+		payload[i] = fcs_sha2_data_sig_verify_param.crypto_param
+				& INTEL_SIP_SMC_FCS_ECC_ALGO_MASK;
+		i++;
+	}
+
+	/* Data source address and size */
+	payload[i] = src_addr;
+	i++;
+	payload[i] = data_size;
+	i++;
+
+	if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+		FCS_CS_FIELD_FLAG_FINALIZE) {
+		/* Signature + Public Key Data */
+		sig_pubkey_offset = src_addr + data_size;
+		memcpy((uint8_t *) &payload[i], (uint8_t *) sig_pubkey_offset,
+			src_size - data_size);
+
+		i += (src_size - data_size) / MBOX_WORD_BYTE;
+	}
+
+	status = mailbox_send_cmd(MBOX_JOB_ID,
+			MBOX_FCS_ECDSA_SHA2_DATA_SIGN_VERIFY, payload, i,
+			CMD_CASUAL, (uint32_t *) dst_addr, &resp_len);
+
+	if (is_finalised != 0U) {
+		memset((void *) &fcs_sha2_data_sig_verify_param, 0,
+			sizeof(fcs_crypto_service_data));
+	}
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_len * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_ecdsa_get_pubkey_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error)
+{
+	return intel_fcs_crypto_service_init(session_id, context_id,
+				key_id, param_size, param_data,
+				(void *) &fcs_ecdsa_get_pubkey_param,
+				mbox_error);
+}
+
+int intel_fcs_ecdsa_get_pubkey_finalize(uint32_t session_id, uint32_t context_id,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error)
+{
+	int status;
+	int i;
+	uint32_t crypto_header;
+	uint32_t ret_size;
+	uint32_t payload[FCS_ECDSA_GET_PUBKEY_MAX_WORD_SIZE] = {0U};
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (fcs_ecdsa_get_pubkey_param.session_id != session_id ||
+		fcs_ecdsa_get_pubkey_param.context_id != context_id) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	ret_size = *dst_size / MBOX_WORD_BYTE;
+
+	crypto_header = ((FCS_CS_FIELD_FLAG_INIT |
+			FCS_CS_FIELD_FLAG_UPDATE |
+			FCS_CS_FIELD_FLAG_FINALIZE) <<
+			FCS_CS_FIELD_FLAG_OFFSET) |
+			fcs_ecdsa_get_pubkey_param.crypto_param_size;
+	i = 0;
+	/* Prepare command payload */
+	payload[i] = session_id;
+	i++;
+	payload[i] = context_id;
+	i++;
+	payload[i] = crypto_header;
+	i++;
+	payload[i] = fcs_ecdsa_get_pubkey_param.key_id;
+	i++;
+	payload[i] = (uint32_t) fcs_ecdsa_get_pubkey_param.crypto_param &
+			INTEL_SIP_SMC_FCS_ECC_ALGO_MASK;
+	i++;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ECDSA_GET_PUBKEY,
+			payload, i, CMD_CASUAL,
+			(uint32_t *) dst_addr, &ret_size);
+
+	memset((void *) &fcs_ecdsa_get_pubkey_param, 0,
+		sizeof(fcs_crypto_service_data));
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = ret_size * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_ecdh_request_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error)
+{
+	return intel_fcs_crypto_service_init(session_id, context_id,
+				key_id, param_size, param_data,
+				(void *) &fcs_ecdh_request_param,
+				mbox_error);
+}
+
+int intel_fcs_ecdh_request_finalize(uint32_t session_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error)
+{
+	int status;
+	uint32_t i;
+	uint32_t payload[FCS_ECDH_REQUEST_CMD_MAX_WORD_SIZE] = {0U};
+	uint32_t resp_len;
+	uintptr_t pubkey;
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (fcs_ecdh_request_param.session_id != session_id ||
+		fcs_ecdh_request_param.context_id != context_id) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	resp_len = *dst_size / MBOX_WORD_BYTE;
+
+	/* Prepare command payload */
+	i = 0;
+	/* Crypto header */
+	payload[i] = fcs_ecdh_request_param.session_id;
+	i++;
+	payload[i] = fcs_ecdh_request_param.context_id;
+	i++;
+	payload[i] = fcs_ecdh_request_param.crypto_param_size
+			& FCS_CS_FIELD_SIZE_MASK;
+	payload[i] |= (FCS_CS_FIELD_FLAG_INIT | FCS_CS_FIELD_FLAG_UPDATE
+			| FCS_CS_FIELD_FLAG_FINALIZE)
+			<< FCS_CS_FIELD_FLAG_OFFSET;
+	i++;
+	payload[i] = fcs_ecdh_request_param.key_id;
+	i++;
+	/* Crypto parameters */
+	payload[i] = fcs_ecdh_request_param.crypto_param
+			& INTEL_SIP_SMC_FCS_ECC_ALGO_MASK;
+	i++;
+	/* Public key data */
+	pubkey = src_addr;
+	memcpy((uint8_t *) &payload[i], (uint8_t *) pubkey, src_size);
+	i += src_size / MBOX_WORD_BYTE;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ECDH_REQUEST,
+			payload, i, CMD_CASUAL, (uint32_t *) dst_addr,
+			&resp_len);
+
+	memset((void *)&fcs_ecdh_request_param, 0,
+			sizeof(fcs_crypto_service_data));
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_len * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_aes_crypt_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint64_t param_addr,
+				uint32_t param_size, uint32_t *mbox_error)
+{
+	if (mbox_error == NULL) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	memset((void *)&fcs_aes_init_payload, 0U, sizeof(fcs_aes_init_payload));
+
+	fcs_aes_init_payload.session_id = session_id;
+	fcs_aes_init_payload.context_id = context_id;
+	fcs_aes_init_payload.param_size = param_size;
+	fcs_aes_init_payload.key_id	= key_id;
+
+	memcpy((uint8_t *) fcs_aes_init_payload.crypto_param,
+		(uint8_t *) param_addr, param_size);
+
+	fcs_aes_init_payload.is_updated = 0;
+
+	*mbox_error = 0;
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_aes_crypt_update_finalize(uint32_t session_id,
+				uint32_t context_id, uint64_t src_addr,
+				uint32_t src_size, uint64_t dst_addr,
+				uint32_t dst_size, uint8_t is_finalised,
+				uint32_t *send_id)
+{
+	int status;
+	int i;
+	uint32_t flag;
+	uint32_t crypto_header;
+	uint32_t fcs_aes_crypt_payload[FCS_AES_CMD_MAX_WORD_SIZE];
+
+	if (fcs_aes_init_payload.session_id != session_id ||
+		fcs_aes_init_payload.context_id != context_id) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if ((!is_8_bytes_aligned(src_addr)) ||
+		(!is_32_bytes_aligned(src_size)) ||
+		(!is_address_in_ddr_range(src_addr, src_size))) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if ((!is_8_bytes_aligned(dst_addr)) ||
+		(!is_32_bytes_aligned(dst_size))) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if ((dst_size > FCS_AES_MAX_DATA_SIZE ||
+		dst_size < FCS_AES_MIN_DATA_SIZE) ||
+		(src_size > FCS_AES_MAX_DATA_SIZE ||
+		src_size < FCS_AES_MIN_DATA_SIZE)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	/* Prepare crypto header*/
+	flag = 0;
+	if (fcs_aes_init_payload.is_updated) {
+		fcs_aes_init_payload.param_size = 0;
+	} else {
+		flag |= FCS_CS_FIELD_FLAG_INIT;
+	}
+
+	if (is_finalised != 0U) {
+		flag |= FCS_CS_FIELD_FLAG_FINALIZE;
+	} else {
+		flag |= FCS_CS_FIELD_FLAG_UPDATE;
+		fcs_aes_init_payload.is_updated = 1;
+	}
+	crypto_header = (flag << FCS_CS_FIELD_FLAG_OFFSET) |
+			fcs_aes_init_payload.param_size;
+
+	i = 0U;
+	fcs_aes_crypt_payload[i] = session_id;
+	i++;
+	fcs_aes_crypt_payload[i] = context_id;
+	i++;
+	fcs_aes_crypt_payload[i] = crypto_header;
+	i++;
+
+	if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+		FCS_CS_FIELD_FLAG_INIT) {
+		fcs_aes_crypt_payload[i] = fcs_aes_init_payload.key_id;
+		i++;
+
+		memcpy((uint8_t *) &fcs_aes_crypt_payload[i],
+			(uint8_t *) fcs_aes_init_payload.crypto_param,
+			fcs_aes_init_payload.param_size);
+
+		i += fcs_aes_init_payload.param_size / MBOX_WORD_BYTE;
+	}
+
+	fcs_aes_crypt_payload[i] = (uint32_t) src_addr;
+	i++;
+	fcs_aes_crypt_payload[i] = src_size;
+	i++;
+	fcs_aes_crypt_payload[i] = (uint32_t) dst_addr;
+	i++;
+	fcs_aes_crypt_payload[i] = dst_size;
+	i++;
+
+	status = mailbox_send_cmd_async(send_id, MBOX_FCS_AES_CRYPT_REQ,
+					fcs_aes_crypt_payload, i,
+					CMD_INDIRECT);
+
+	if (is_finalised != 0U) {
+		memset((void *)&fcs_aes_init_payload, 0,
+			sizeof(fcs_aes_init_payload));
+	}
+
+	if (status < 0U) {
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
diff --git a/plat/intel/soc/common/soc/socfpga_mailbox.c b/plat/intel/soc/common/soc/socfpga_mailbox.c
index 8ecd6db..778d4af 100644
--- a/plat/intel/soc/common/soc/socfpga_mailbox.c
+++ b/plat/intel/soc/common/soc/socfpga_mailbox.c
@@ -11,6 +11,8 @@
 #include "socfpga_mailbox.h"
 #include "socfpga_sip_svc.h"
 
+static mailbox_payload_t mailbox_resp_payload;
+static mailbox_container_t mailbox_resp_ctr = {0, 0, &mailbox_resp_payload};
 
 static bool is_mailbox_cmdbuf_full(uint32_t cin)
 {
@@ -171,6 +173,95 @@
 	return MBOX_NO_RESPONSE;
 }
 
+int mailbox_read_response_async(unsigned int *job_id, uint32_t *header,
+				uint32_t *response, unsigned int *resp_len,
+				uint8_t ignore_client_id)
+{
+	uint32_t rin;
+	uint32_t rout;
+	uint32_t resp_data;
+	uint32_t ret_resp_len = 0;
+	uint8_t is_done = 0;
+
+	if ((mailbox_resp_ctr.flag & MBOX_PAYLOAD_FLAG_BUSY) != 0) {
+		ret_resp_len = MBOX_RESP_LEN(
+				mailbox_resp_ctr.payload->header) -
+				mailbox_resp_ctr.index;
+	}
+
+	if (mmio_read_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM) == 1U) {
+		mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM, 0U);
+	}
+
+	rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN);
+	rout = mmio_read_32(MBOX_OFFSET + MBOX_ROUT);
+
+	while (rout != rin && !is_done) {
+
+		resp_data = mmio_read_32(MBOX_ENTRY_TO_ADDR(RESP, (rout)++));
+
+		rout %= MBOX_RESP_BUFFER_SIZE;
+		mmio_write_32(MBOX_OFFSET + MBOX_ROUT, rout);
+		rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN);
+
+		if ((mailbox_resp_ctr.flag & MBOX_PAYLOAD_FLAG_BUSY) != 0) {
+			mailbox_resp_ctr.payload->data[mailbox_resp_ctr.index] = resp_data;
+			mailbox_resp_ctr.index++;
+			ret_resp_len--;
+		} else {
+			if (!ignore_client_id) {
+				if (MBOX_RESP_CLIENT_ID(resp_data) != MBOX_ATF_CLIENT_ID) {
+					*resp_len = 0;
+					return MBOX_WRONG_ID;
+				}
+			}
+
+			*job_id = MBOX_RESP_JOB_ID(resp_data);
+			ret_resp_len = MBOX_RESP_LEN(resp_data);
+			mailbox_resp_ctr.payload->header = resp_data;
+			mailbox_resp_ctr.flag |= MBOX_PAYLOAD_FLAG_BUSY;
+		}
+
+		if (ret_resp_len == 0) {
+			is_done = 1;
+		}
+	}
+
+	if (is_done != 0) {
+
+		/* copy header data to input address if applicable */
+		if (header != 0) {
+			*header = mailbox_resp_ctr.payload->header;
+		}
+
+		/* copy response data to input buffer if applicable */
+		ret_resp_len = MBOX_RESP_LEN(mailbox_resp_ctr.payload->header);
+		if ((ret_resp_len > 0) && (response == NULL) && resp_len) {
+			if (*resp_len > ret_resp_len) {
+				*resp_len = ret_resp_len;
+			}
+
+			memcpy((uint8_t *) response,
+				(uint8_t *) mailbox_resp_ctr.payload->data,
+				*resp_len * MBOX_WORD_BYTE);
+		}
+
+		/* reset async response param */
+		mailbox_resp_ctr.index = 0;
+		mailbox_resp_ctr.flag = 0;
+
+		if (MBOX_RESP_ERR(mailbox_resp_ctr.payload->header) > 0U) {
+			INFO("Error in async response: %x\n",
+				mailbox_resp_ctr.payload->header);
+			return -MBOX_RESP_ERR(mailbox_resp_ctr.payload->header);
+		}
+
+		return MBOX_RET_OK;
+	}
+
+	*resp_len = 0;
+	return (mailbox_resp_ctr.flag & MBOX_PAYLOAD_FLAG_BUSY) ? MBOX_BUSY : MBOX_NO_RESPONSE;
+}
 
 int mailbox_poll_response(uint32_t job_id, uint32_t urgent, uint32_t *response,
 				unsigned int *resp_len)
@@ -294,6 +385,12 @@
 	return MBOX_RET_OK;
 }
 
+int mailbox_send_cmd_async_ext(uint32_t header_cmd, uint32_t *args,
+			unsigned int len)
+{
+	return fill_mailbox_circular_buffer(header_cmd, args, len);
+}
+
 int mailbox_send_cmd_async(uint32_t *job_id, uint32_t cmd, uint32_t *args,
 			  unsigned int len, unsigned int indirect)
 {
diff --git a/plat/intel/soc/common/soc/socfpga_reset_manager.c b/plat/intel/soc/common/soc/socfpga_reset_manager.c
index b0de60e..bb4efab 100644
--- a/plat/intel/soc/common/soc/socfpga_reset_manager.c
+++ b/plat/intel/soc/common/soc/socfpga_reset_manager.c
@@ -4,10 +4,12 @@
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
-#include <common/debug.h>
 #include <errno.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
 #include <lib/mmio.h>
 
+#include "socfpga_f2sdram_manager.h"
 #include "socfpga_mailbox.h"
 #include "socfpga_reset_manager.h"
 #include "socfpga_system_manager.h"
@@ -89,58 +91,241 @@
 
 static int poll_idle_status(uint32_t addr, uint32_t mask, uint32_t match)
 {
-	int time_out = 1000;
+	int time_out = 300;
 
 	while (time_out--) {
 		if ((mmio_read_32(addr) & mask) == match) {
 			return 0;
 		}
+		udelay(1000);
 	}
 	return -ETIMEDOUT;
 }
 
+static void socfpga_s2f_bridge_mask(uint32_t mask,
+				uint32_t *brg_mask,
+				uint32_t *noc_mask)
+{
+	*brg_mask = 0;
+	*noc_mask = 0;
+
+	if ((mask & SOC2FPGA_MASK) != 0U) {
+		*brg_mask |= RSTMGR_FIELD(BRG, SOC2FPGA);
+		*noc_mask |= IDLE_DATA_SOC2FPGA;
+	}
+
+	if ((mask & LWHPS2FPGA_MASK) != 0U) {
+		*brg_mask |= RSTMGR_FIELD(BRG, LWHPS2FPGA);
+		*noc_mask |= IDLE_DATA_LWSOC2FPGA;
+	}
+}
+
-int socfpga_bridges_enable(void)
+static void socfpga_f2s_bridge_mask(uint32_t mask,
+				uint32_t *brg_mask,
+				uint32_t *f2s_idlereq,
+				uint32_t *f2s_force_drain,
+				uint32_t *f2s_en,
+				uint32_t *f2s_idleack,
+				uint32_t *f2s_respempty)
 {
-	/* Clear idle request */
-	mmio_setbits_32(SOCFPGA_SYSMGR(NOC_IDLEREQ_CLR), ~0);
+	*brg_mask = 0;
+	*f2s_idlereq = 0;
+	*f2s_force_drain = 0;
+	*f2s_en = 0;
+	*f2s_idleack = 0;
+	*f2s_respempty = 0;
+
+#if PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10
+	if ((mask & FPGA2SOC_MASK) != 0U) {
+		*brg_mask |= RSTMGR_FIELD(BRG, FPGA2SOC);
+	}
+	if ((mask & F2SDRAM0_MASK) != 0U) {
+		*brg_mask |= RSTMGR_FIELD(BRG, F2SSDRAM0);
+		*f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM0_IDLEREQ;
+		*f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM0_FORCE_DRAIN;
+		*f2s_en |= FLAGOUTSETCLR_F2SDRAM0_ENABLE;
+		*f2s_idleack |= FLAGINTSTATUS_F2SDRAM0_IDLEACK;
+		*f2s_respempty |= FLAGINTSTATUS_F2SDRAM0_RESPEMPTY;
+	}
+	if ((mask & F2SDRAM1_MASK) != 0U) {
+		*brg_mask |= RSTMGR_FIELD(BRG, F2SSDRAM1);
+		*f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM1_IDLEREQ;
+		*f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM1_FORCE_DRAIN;
+		*f2s_en |= FLAGOUTSETCLR_F2SDRAM1_ENABLE;
+		*f2s_idleack |= FLAGINTSTATUS_F2SDRAM1_IDLEACK;
+		*f2s_respempty |= FLAGINTSTATUS_F2SDRAM1_RESPEMPTY;
+	}
+	if ((mask & F2SDRAM2_MASK) != 0U) {
+		*brg_mask |= RSTMGR_FIELD(BRG, F2SSDRAM2);
+		*f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM2_IDLEREQ;
+		*f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM2_FORCE_DRAIN;
+		*f2s_en |= FLAGOUTSETCLR_F2SDRAM2_ENABLE;
+		*f2s_idleack |= FLAGINTSTATUS_F2SDRAM2_IDLEACK;
+		*f2s_respempty |= FLAGINTSTATUS_F2SDRAM2_RESPEMPTY;
+	}
+#else
+	if ((mask & FPGA2SOC_MASK) != 0U) {
+		*brg_mask |= RSTMGR_FIELD(BRG, FPGA2SOC);
+		*f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM0_IDLEREQ;
+		*f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM0_FORCE_DRAIN;
+		*f2s_en |= FLAGOUTSETCLR_F2SDRAM0_ENABLE;
+		*f2s_idleack |= FLAGINTSTATUS_F2SDRAM0_IDLEACK;
+		*f2s_respempty |= FLAGINTSTATUS_F2SDRAM0_RESPEMPTY;
+	}
+#endif
+}
+
+int socfpga_bridges_enable(uint32_t mask)
+{
+	int ret = 0;
+	uint32_t brg_mask = 0;
+	uint32_t noc_mask = 0;
+	uint32_t f2s_idlereq = 0;
+	uint32_t f2s_force_drain = 0;
+	uint32_t f2s_en = 0;
+	uint32_t f2s_idleack = 0;
+	uint32_t f2s_respempty = 0;
+
+	/* Enable s2f bridge */
+	socfpga_s2f_bridge_mask(mask, &brg_mask, &noc_mask);
+	if (brg_mask != 0U) {
+		/* Clear idle request */
+		mmio_setbits_32(SOCFPGA_SYSMGR(NOC_IDLEREQ_CLR),
+				noc_mask);
+
+		/* De-assert all bridges */
+		mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), brg_mask);
 
-	/* De-assert all bridges */
-	mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), ~0);
+		/* Wait until idle ack becomes 0 */
+		ret = poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLEACK),
+						noc_mask, 0);
+		if (ret < 0) {
+			ERROR("S2F bridge enable: "
+					"Timeout waiting for idle ack\n");
+		}
+	}
+
+	/* Enable f2s bridge */
+	socfpga_f2s_bridge_mask(mask, &brg_mask, &f2s_idlereq,
+						&f2s_force_drain, &f2s_en,
+						&f2s_idleack, &f2s_respempty);
+	if (brg_mask != 0U) {
+		mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), brg_mask);
+
+		mmio_clrbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+			f2s_idlereq);
+
+		ret = poll_idle_status(SOCFPGA_F2SDRAMMGR(
+			SIDEBANDMGR_FLAGINSTATUS0), f2s_idleack, 0);
+		if (ret < 0) {
+			ERROR("F2S bridge enable: "
+					"Timeout waiting for idle ack");
+		}
 
-	/* Wait until idle ack becomes 0 */
-	return poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLEACK),
-				IDLE_DATA_MASK, 0);
+		mmio_clrbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+			f2s_force_drain);
+		udelay(5);
+
+		mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+			f2s_en);
+		udelay(5);
+	}
+
+	return ret;
 }
 
-int socfpga_bridges_disable(void)
+int socfpga_bridges_disable(uint32_t mask)
 {
-	/* Set idle request */
-	mmio_write_32(SOCFPGA_SYSMGR(NOC_IDLEREQ_SET), ~0);
+	int ret = 0;
+	int timeout = 300;
+	uint32_t brg_mask = 0;
+	uint32_t noc_mask = 0;
+	uint32_t f2s_idlereq = 0;
+	uint32_t f2s_force_drain = 0;
+	uint32_t f2s_en = 0;
+	uint32_t f2s_idleack = 0;
+	uint32_t f2s_respempty = 0;
 
-	/* Enable NOC timeout */
-	mmio_setbits_32(SOCFPGA_SYSMGR(NOC_TIMEOUT), 1);
+	/* Disable s2f bridge */
+	socfpga_s2f_bridge_mask(mask, &brg_mask, &noc_mask);
+	if (brg_mask != 0U) {
+		mmio_setbits_32(SOCFPGA_SYSMGR(NOC_IDLEREQ_SET),
+				noc_mask);
 
-	/* Wait until each idle ack bit toggle to 1 */
-	if (poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLEACK),
-				IDLE_DATA_MASK, IDLE_DATA_MASK))
-		return -ETIMEDOUT;
+		mmio_write_32(SOCFPGA_SYSMGR(NOC_TIMEOUT), 1);
 
-	/* Wait until each idle status bit toggle to 1 */
-	if (poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLESTATUS),
-				IDLE_DATA_MASK, IDLE_DATA_MASK))
-		return -ETIMEDOUT;
+		ret = poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLEACK),
+						noc_mask, noc_mask);
+		if (ret < 0) {
+			ERROR("S2F Bridge disable: "
+					"Timeout waiting for idle ack\n");
+		}
+
+		ret = poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLESTATUS),
+						noc_mask, noc_mask);
+		if (ret < 0) {
+			ERROR("S2F Bridge disable: "
+					"Timeout waiting for idle status\n");
+		}
 
-	/* Assert all bridges */
+		mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST), brg_mask);
+
+		mmio_write_32(SOCFPGA_SYSMGR(NOC_TIMEOUT), 0);
+	}
+
+	/* Disable f2s bridge */
+	socfpga_f2s_bridge_mask(mask, &brg_mask, &f2s_idlereq,
+						&f2s_force_drain, &f2s_en,
+						&f2s_idleack, &f2s_respempty);
+	if (brg_mask != 0U) {
+		mmio_setbits_32(SOCFPGA_RSTMGR(HDSKEN),
+				RSTMGR_HDSKEN_FPGAHSEN);
+
+		mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ),
+				RSTMGR_HDSKREQ_FPGAHSREQ);
+
+		poll_idle_status(SOCFPGA_RSTMGR(HDSKACK),
+				RSTMGR_HDSKACK_FPGAHSACK_MASK,
+				RSTMGR_HDSKACK_FPGAHSACK_MASK);
+
+		mmio_clrbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+				f2s_en);
+		udelay(5);
+
+		mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+				f2s_force_drain);
+		udelay(5);
+
+		do {
+			/* Read response queue status to ensure it is empty */
+			uint32_t idle_status;
+
+			idle_status = mmio_read_32(SOCFPGA_F2SDRAMMGR(
+				SIDEBANDMGR_FLAGINSTATUS0));
+			if ((idle_status & f2s_respempty) != 0U) {
+				idle_status = mmio_read_32(SOCFPGA_F2SDRAMMGR(
+					SIDEBANDMGR_FLAGINSTATUS0));
+				if ((idle_status & f2s_respempty) != 0U) {
+					break;
+				}
+			}
+			udelay(1000);
+		} while (timeout-- > 0);
+
 #if PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10
-	mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST),
-		~(RSTMGR_FIELD(BRG, DDRSCH) | RSTMGR_FIELD(BRG, FPGA2SOC)));
+		/* Software must never write a 0x1 to FPGA2SOC_MASK bit */
+		mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST),
+				brg_mask & ~RSTMGR_FIELD(BRG, FPGA2SOC));
 #else
-	mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST),
-		~(RSTMGR_FIELD(BRG, MPFE) | RSTMGR_FIELD(BRG, FPGA2SOC)));
+		mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST),
+				brg_mask);
 #endif
+		mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ),
+				RSTMGR_HDSKEQ_FPGAHSREQ);
 
-	/* Disable NOC timeout */
-	mmio_clrbits_32(SOCFPGA_SYSMGR(NOC_TIMEOUT), 1);
+		mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTCLR0),
+				f2s_idlereq);
+	}
 
-	return 0;
+	return ret;
 }
diff --git a/plat/intel/soc/common/socfpga_delay_timer.c b/plat/intel/soc/common/socfpga_delay_timer.c
index 957738c..dcd51e2 100644
--- a/plat/intel/soc/common/socfpga_delay_timer.c
+++ b/plat/intel/soc/common/socfpga_delay_timer.c
@@ -36,7 +36,6 @@
 
 	timer_init(&plat_timer_ops);
 
-	NOTICE("BL31: MPU clock frequency: %d MHz\n", plat_timer_ops.clk_div);
 }
 
 void socfpga_delay_timer_init(void)
diff --git a/plat/intel/soc/common/socfpga_sip_svc.c b/plat/intel/soc/common/socfpga_sip_svc.c
index f22c2ee..f079349 100644
--- a/plat/intel/soc/common/socfpga_sip_svc.c
+++ b/plat/intel/soc/common/socfpga_sip_svc.c
@@ -19,6 +19,7 @@
 /* Total buffer the driver can hold */
 #define FPGA_CONFIG_BUFFER_SIZE 4
 
+static config_type request_type = NO_REQUEST;
 static int current_block, current_buffer;
 static int read_block, max_blocks;
 static uint32_t send_id, rcv_id;
@@ -27,10 +28,8 @@
 
 /* RSU static variables */
 static uint32_t rsu_dcmf_ver[4] = {0};
-
-/* RSU Max Retry */
-static uint32_t rsu_max_retry;
 static uint16_t rsu_dcmf_stat[4] = {0};
+static uint32_t rsu_max_retry;
 
 /*  SiP Service UUID */
 DEFINE_SVC_UUID2(intl_svc_uid,
@@ -63,8 +62,9 @@
 			args[2] = buffer->size - buffer->size_written;
 			current_buffer++;
 			current_buffer %= FPGA_CONFIG_BUFFER_SIZE;
-		} else
+		} else {
 			args[2] = bytes_per_block;
+		}
 
 		buffer->size_written += args[2];
 		mailbox_send_cmd_async(&send_id, MBOX_RECONFIG_DATA, args,
@@ -79,35 +79,48 @@
 
 static int intel_fpga_sdm_write_all(void)
 {
-	for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++)
+	for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
 		if (intel_fpga_sdm_write_buffer(
-			&fpga_config_buffers[current_buffer]))
+			&fpga_config_buffers[current_buffer])) {
 			break;
+		}
+	}
 	return 0;
 }
 
-static uint32_t intel_mailbox_fpga_config_isdone(uint32_t query_type)
+static uint32_t intel_mailbox_fpga_config_isdone(void)
 {
 	uint32_t ret;
 
-	if (query_type == 1U) {
-		ret = intel_mailbox_get_config_status(MBOX_CONFIG_STATUS, false);
-	} else {
-		ret = intel_mailbox_get_config_status(MBOX_RECONFIG_STATUS, true);
+	switch (request_type) {
+	case RECONFIGURATION:
+		ret = intel_mailbox_get_config_status(MBOX_RECONFIG_STATUS,
+							true);
+		break;
+	case BITSTREAM_AUTH:
+		ret = intel_mailbox_get_config_status(MBOX_RECONFIG_STATUS,
+							false);
+		break;
+	default:
+		ret = intel_mailbox_get_config_status(MBOX_CONFIG_STATUS,
+							false);
+		break;
 	}
 
 	if (ret != 0U) {
 		if (ret == MBOX_CFGSTAT_STATE_CONFIG) {
 			return INTEL_SIP_SMC_STATUS_BUSY;
 		} else {
+			request_type = NO_REQUEST;
 			return INTEL_SIP_SMC_STATUS_ERROR;
 		}
 	}
 
-	if (bridge_disable) {
-		socfpga_bridges_enable();	/* Enable bridge */
+	if (bridge_disable != 0U) {
+		socfpga_bridges_enable(~0);	/* Enable bridge */
 		bridge_disable = false;
 	}
+	request_type = NO_REQUEST;
 
 	return INTEL_SIP_SMC_STATUS_OK;
 }
@@ -166,6 +179,7 @@
 		if (status != MBOX_NO_RESPONSE &&
 			status != MBOX_TIMEOUT && resp_len != 0) {
 			mailbox_clear_response();
+			request_type = NO_REQUEST;
 			return INTEL_SIP_SMC_STATUS_ERROR;
 		}
 
@@ -174,10 +188,11 @@
 
 	intel_fpga_sdm_write_all();
 
-	if (*count > 0)
+	if (*count > 0) {
 		status = INTEL_SIP_SMC_STATUS_OK;
-	else if (*count == 0)
+	} else if (*count == 0) {
 		status = INTEL_SIP_SMC_STATUS_BUSY;
+	}
 
 	for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
 		if (fpga_config_buffers[i].write_requested != 0) {
@@ -186,8 +201,9 @@
 		}
 	}
 
-	if (all_completed == 1)
+	if (all_completed == 1) {
 		return INTEL_SIP_SMC_STATUS_OK;
+	}
 
 	return status;
 }
@@ -200,6 +216,8 @@
 	unsigned int size = 0;
 	unsigned int resp_len = ARRAY_SIZE(response);
 
+	request_type = RECONFIGURATION;
+
 	if (!CONFIG_TEST_FLAG(flag, PARTIAL_CONFIG)) {
 		bridge_disable = true;
 	}
@@ -207,6 +225,7 @@
 	if (CONFIG_TEST_FLAG(flag, AUTHENTICATION)) {
 		size = 1;
 		bridge_disable = false;
+		request_type = BITSTREAM_AUTH;
 	}
 
 	mailbox_clear_response();
@@ -219,6 +238,7 @@
 
 	if (status < 0) {
 		bridge_disable = false;
+		request_type = NO_REQUEST;
 		return INTEL_SIP_SMC_STATUS_ERROR;
 	}
 
@@ -241,7 +261,7 @@
 
 	/* Disable bridge on full reconfiguration */
 	if (bridge_disable) {
-		socfpga_bridges_disable();
+		socfpga_bridges_disable(~0);
 	}
 
 	return INTEL_SIP_SMC_STATUS_OK;
@@ -249,9 +269,11 @@
 
 static bool is_fpga_config_buffer_full(void)
 {
-	for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++)
-		if (!fpga_config_buffers[i].write_requested)
+	for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
+		if (!fpga_config_buffers[i].write_requested) {
 			return false;
+		}
+	}
 	return true;
 }
 
@@ -260,12 +282,15 @@
 	if (!addr && !size) {
 		return true;
 	}
-	if (size > (UINT64_MAX - addr))
+	if (size > (UINT64_MAX - addr)) {
 		return false;
-	if (addr < BL31_LIMIT)
+	}
+	if (addr < BL31_LIMIT) {
 		return false;
-	if (addr + size > DRAM_BASE + DRAM_SIZE)
+	}
+	if (addr + size > DRAM_BASE + DRAM_SIZE) {
 		return false;
+	}
 
 	return true;
 }
@@ -349,8 +374,9 @@
 /* Secure register access */
 uint32_t intel_secure_reg_read(uint64_t reg_addr, uint32_t *retval)
 {
-	if (is_out_of_sec_range(reg_addr))
+	if (is_out_of_sec_range(reg_addr)) {
 		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
 
 	*retval = mmio_read_32(reg_addr);
 
@@ -360,8 +386,9 @@
 uint32_t intel_secure_reg_write(uint64_t reg_addr, uint32_t val,
 				uint32_t *retval)
 {
-	if (is_out_of_sec_range(reg_addr))
+	if (is_out_of_sec_range(reg_addr)) {
 		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
 
 	mmio_write_32(reg_addr, val);
 
@@ -385,8 +412,9 @@
 
 static uint32_t intel_rsu_status(uint64_t *respbuf, unsigned int respbuf_sz)
 {
-	if (mailbox_rsu_status((uint32_t *)respbuf, respbuf_sz) < 0)
+	if (mailbox_rsu_status((uint32_t *)respbuf, respbuf_sz) < 0) {
 		return INTEL_SIP_SMC_RSU_ERROR;
+	}
 
 	return INTEL_SIP_SMC_STATUS_OK;
 }
@@ -399,8 +427,9 @@
 
 static uint32_t intel_rsu_notify(uint32_t execution_stage)
 {
-	if (mailbox_hps_stage_notify(execution_stage) < 0)
+	if (mailbox_hps_stage_notify(execution_stage) < 0) {
 		return INTEL_SIP_SMC_RSU_ERROR;
+	}
 
 	return INTEL_SIP_SMC_STATUS_OK;
 }
@@ -408,8 +437,9 @@
 static uint32_t intel_rsu_retry_counter(uint32_t *respbuf, uint32_t respbuf_sz,
 					uint32_t *ret_stat)
 {
-	if (mailbox_rsu_status((uint32_t *)respbuf, respbuf_sz) < 0)
+	if (mailbox_rsu_status((uint32_t *)respbuf, respbuf_sz) < 0) {
 		return INTEL_SIP_SMC_RSU_ERROR;
+	}
 
 	*ret_stat = respbuf[8];
 	return INTEL_SIP_SMC_STATUS_OK;
@@ -487,19 +517,19 @@
 }
 
 static uint32_t intel_mbox_send_cmd(uint32_t cmd, uint32_t *args,
-				unsigned int len,
-				uint32_t urgent, uint32_t *response,
+				unsigned int len, uint32_t urgent, uint64_t response,
 				unsigned int resp_len, int *mbox_status,
 				unsigned int *len_in_resp)
 {
 	*len_in_resp = 0;
-	*mbox_status = 0;
+	*mbox_status = GENERIC_RESPONSE_ERROR;
 
-	if (!is_address_in_ddr_range((uint64_t)args, sizeof(uint32_t) * len))
+	if (!is_address_in_ddr_range((uint64_t)args, sizeof(uint32_t) * len)) {
 		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
 
 	int status = mailbox_send_cmd(MBOX_JOB_ID, cmd, args, len, urgent,
-				      response, &resp_len);
+					(uint32_t *) response, &resp_len);
 
 	if (status < 0) {
 		*mbox_status = -status;
@@ -508,6 +538,9 @@
 
 	*mbox_status = 0;
 	*len_in_resp = resp_len;
+
+	flush_dcache_range(response, resp_len * MBOX_WORD_BYTE);
+
 	return INTEL_SIP_SMC_STATUS_OK;
 }
 
@@ -526,13 +559,73 @@
 	return INTEL_SIP_SMC_STATUS_OK;
 }
 
+uint32_t intel_smc_service_completed(uint64_t addr, uint32_t size,
+				uint32_t mode, uint32_t *job_id,
+				uint32_t *ret_size, uint32_t *mbox_error)
+{
+	int status = 0;
+	uint32_t resp_len = size / MBOX_WORD_BYTE;
+
+	if (resp_len > MBOX_DATA_MAX_LEN) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(addr, size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (mode == SERVICE_COMPLETED_MODE_ASYNC) {
+		status = mailbox_read_response_async(job_id,
+				NULL, (uint32_t *) addr, &resp_len, 0);
+	} else {
+		status = mailbox_read_response(job_id,
+				(uint32_t *) addr, &resp_len);
+
+		if (status == MBOX_NO_RESPONSE) {
+			status = MBOX_BUSY;
+		}
+	}
+
+	if (status == MBOX_NO_RESPONSE) {
+		return INTEL_SIP_SMC_STATUS_NO_RESPONSE;
+	}
+
+	if (status == MBOX_BUSY) {
+		return INTEL_SIP_SMC_STATUS_BUSY;
+	}
+
+	*ret_size = resp_len * MBOX_WORD_BYTE;
+	flush_dcache_range(addr, *ret_size);
+
+	if (status != MBOX_RET_OK) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
 /* Miscellaneous HPS services */
-static uint32_t intel_hps_set_bridges(uint64_t enable)
+uint32_t intel_hps_set_bridges(uint64_t enable, uint64_t mask)
 {
-	if (enable != 0U) {
-		socfpga_bridges_enable();
+	int status = 0;
+
+	if ((enable & SOCFPGA_BRIDGE_ENABLE) != 0U) {
+		if ((enable & SOCFPGA_BRIDGE_HAS_MASK) != 0U) {
+			status = socfpga_bridges_enable((uint32_t)mask);
+		} else {
+			status = socfpga_bridges_enable(~0);
+		}
 	} else {
-		socfpga_bridges_disable();
+		if ((enable & SOCFPGA_BRIDGE_HAS_MASK) != 0U) {
+			status = socfpga_bridges_disable((uint32_t)mask);
+		} else {
+			status = socfpga_bridges_disable(~0);
+		}
+	}
+
+	if (status < 0) {
+		return INTEL_SIP_SMC_STATUS_ERROR;
 	}
 
 	return INTEL_SIP_SMC_STATUS_OK;
@@ -542,7 +635,7 @@
  * This function is responsible for handling all SiP calls from the NS world
  */
 
-uintptr_t sip_smc_handler(uint32_t smc_fid,
+uintptr_t sip_smc_handler_v1(uint32_t smc_fid,
 			 u_register_t x1,
 			 u_register_t x2,
 			 u_register_t x3,
@@ -551,14 +644,14 @@
 			 void *handle,
 			 u_register_t flags)
 {
-	uint32_t retval = 0;
+	uint32_t retval = 0, completed_addr[3];
+	uint32_t retval2 = 0;
 	uint32_t mbox_error = 0;
-	uint32_t completed_addr[3];
 	uint64_t retval64, rsu_respbuf[9];
 	int status = INTEL_SIP_SMC_STATUS_OK;
 	int mbox_status;
 	unsigned int len_in_resp;
-	u_register_t x5, x6;
+	u_register_t x5, x6, x7;
 
 	switch (smc_fid) {
 	case SIP_SVC_UID:
@@ -566,7 +659,7 @@
 		SMC_UUID_RET(handle, intl_svc_uid);
 
 	case INTEL_SIP_SMC_FPGA_CONFIG_ISDONE:
-		status = intel_mailbox_fpga_config_isdone(x1);
+		status = intel_mailbox_fpga_config_isdone();
 		SMC_RET4(handle, status, 0, 0, 0);
 
 	case INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM:
@@ -681,6 +774,11 @@
 		status = intel_ecc_dbe_notification(x1);
 		SMC_RET1(handle, status);
 
+	case INTEL_SIP_SMC_SERVICE_COMPLETED:
+		status = intel_smc_service_completed(x1, x2, x3, &rcv_id,
+						&len_in_resp, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x1, len_in_resp);
+
 	case INTEL_SIP_SMC_FIRMWARE_VERSION:
 		status = intel_smc_fw_version(&retval);
 		SMC_RET2(handle, status, retval);
@@ -688,27 +786,69 @@
 	case INTEL_SIP_SMC_MBOX_SEND_CMD:
 		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
 		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
-		status = intel_mbox_send_cmd(x1, (uint32_t *)x2, x3, x4,
-					     (uint32_t *)x5, x6, &mbox_status,
-					     &len_in_resp);
+		status = intel_mbox_send_cmd(x1, (uint32_t *)x2, x3, x4, x5, x6,
+						&mbox_status, &len_in_resp);
 		SMC_RET3(handle, status, mbox_status, len_in_resp);
 
 	case INTEL_SIP_SMC_GET_USERCODE:
 		status = intel_smc_get_usercode(&retval);
 		SMC_RET2(handle, status, retval);
 
-	case INTEL_SIP_SMC_GET_ROM_PATCH_SHA384:
-		status = intel_fcs_get_rom_patch_sha384(x1, &retval64,
+	case INTEL_SIP_SMC_FCS_CRYPTION:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+
+		if (x1 == FCS_MODE_DECRYPT) {
+			status = intel_fcs_decryption(x2, x3, x4, x5, &send_id);
+		} else if (x1 == FCS_MODE_ENCRYPT) {
+			status = intel_fcs_encryption(x2, x3, x4, x5, &send_id);
+		} else {
+			status = INTEL_SIP_SMC_STATUS_REJECTED;
+		}
+
+		SMC_RET3(handle, status, x4, x5);
+
+	case INTEL_SIP_SMC_FCS_CRYPTION_EXT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+
+		if (x3 == FCS_MODE_DECRYPT) {
+			status = intel_fcs_decryption_ext(x1, x2, x4, x5, x6,
+					(uint32_t *) &x7, &mbox_error);
+		} else if (x3 == FCS_MODE_ENCRYPT) {
+			status = intel_fcs_encryption_ext(x1, x2, x4, x5, x6,
+					(uint32_t *) &x7, &mbox_error);
+		} else {
+			status = INTEL_SIP_SMC_STATUS_REJECTED;
+		}
+
+		SMC_RET4(handle, status, mbox_error, x6, x7);
+
+	case INTEL_SIP_SMC_FCS_RANDOM_NUMBER:
+		status = intel_fcs_random_number_gen(x1, &retval64,
 							&mbox_error);
 		SMC_RET4(handle, status, mbox_error, x1, retval64);
 
-	case INTEL_SIP_SMC_SVC_VERSION:
-		SMC_RET3(handle, INTEL_SIP_SMC_STATUS_OK,
-					SIP_SVC_VERSION_MAJOR,
-					SIP_SVC_VERSION_MINOR);
+	case INTEL_SIP_SMC_FCS_RANDOM_NUMBER_EXT:
+		status = intel_fcs_random_number_gen_ext(x1, x2, x3,
+							&send_id);
+		SMC_RET1(handle, status);
+
+	case INTEL_SIP_SMC_FCS_SEND_CERTIFICATE:
+		status = intel_fcs_send_cert(x1, x2, &send_id);
+		SMC_RET1(handle, status);
+
+	case INTEL_SIP_SMC_FCS_GET_PROVISION_DATA:
+		status = intel_fcs_get_provision_data(&send_id);
+		SMC_RET1(handle, status);
+
+	case INTEL_SIP_SMC_FCS_CNTR_SET_PREAUTH:
+		status = intel_fcs_cntr_set_preauth(x1, x2, x3,
+							&mbox_error);
+		SMC_RET2(handle, status, mbox_error);
 
 	case INTEL_SIP_SMC_HPS_SET_BRIDGES:
-		status = intel_hps_set_bridges(x1);
+		status = intel_hps_set_bridges(x1, x2);
 		SMC_RET1(handle, status);
 
 	case INTEL_SIP_SMC_HWMON_READTEMP:
@@ -719,12 +859,259 @@
 		status = intel_hwmon_readvolt(x1, &retval);
 		SMC_RET2(handle, status, retval);
 
+	case INTEL_SIP_SMC_FCS_PSGSIGMA_TEARDOWN:
+		status = intel_fcs_sigma_teardown(x1, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_CHIP_ID:
+		status = intel_fcs_chip_id(&retval, &retval2, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, retval, retval2);
+
+	case INTEL_SIP_SMC_FCS_ATTESTATION_SUBKEY:
+		status = intel_fcs_attestation_subkey(x1, x2, x3,
+					(uint32_t *) &x4, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x3, x4);
+
+	case INTEL_SIP_SMC_FCS_ATTESTATION_MEASUREMENTS:
+		status = intel_fcs_get_measurement(x1, x2, x3,
+					(uint32_t *) &x4, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x3, x4);
+
+	case INTEL_SIP_SMC_FCS_GET_ATTESTATION_CERT:
+		status = intel_fcs_get_attestation_cert(x1, x2,
+					(uint32_t *) &x3, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x2, x3);
+
+	case INTEL_SIP_SMC_FCS_CREATE_CERT_ON_RELOAD:
+		status = intel_fcs_create_cert_on_reload(x1, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_OPEN_CS_SESSION:
+		status = intel_fcs_open_crypto_service_session(&retval, &mbox_error);
+		SMC_RET3(handle, status, mbox_error, retval);
+
+	case INTEL_SIP_SMC_FCS_CLOSE_CS_SESSION:
+		status = intel_fcs_close_crypto_service_session(x1, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_IMPORT_CS_KEY:
+		status = intel_fcs_import_crypto_service_key(x1, x2, &send_id);
+		SMC_RET1(handle, status);
+
+	case INTEL_SIP_SMC_FCS_EXPORT_CS_KEY:
+		status = intel_fcs_export_crypto_service_key(x1, x2, x3,
+					(uint32_t *) &x4, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x3, x4);
+
+	case INTEL_SIP_SMC_FCS_REMOVE_CS_KEY:
+		status = intel_fcs_remove_crypto_service_key(x1, x2,
+					&mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_GET_CS_KEY_INFO:
+		status = intel_fcs_get_crypto_service_key_info(x1, x2, x3,
+					(uint32_t *) &x4, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x3, x4);
+
+	case INTEL_SIP_SMC_FCS_GET_DIGEST_INIT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		status = intel_fcs_get_digest_init(x1, x2, x3,
+					x4, x5, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_GET_DIGEST_UPDATE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		status = intel_fcs_get_digest_update_finalize(x1, x2, x3,
+					x4, x5, (uint32_t *) &x6, false,
+					&mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_GET_DIGEST_FINALIZE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		status = intel_fcs_get_digest_update_finalize(x1, x2, x3,
+					x4, x5, (uint32_t *) &x6, true,
+					&mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_MAC_VERIFY_INIT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		status = intel_fcs_mac_verify_init(x1, x2, x3,
+					x4, x5, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_MAC_VERIFY_UPDATE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+		status = intel_fcs_mac_verify_update_finalize(x1, x2, x3,
+					x4, x5, (uint32_t *) &x6, x7,
+					false, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_MAC_VERIFY_FINALIZE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+		status = intel_fcs_mac_verify_update_finalize(x1, x2, x3,
+					x4, x5, (uint32_t *) &x6, x7,
+					true, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_INIT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		status = intel_fcs_ecdsa_sha2_data_sign_init(x1, x2, x3,
+					x4, x5, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_UPDATE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		status = intel_fcs_ecdsa_sha2_data_sign_update_finalize(x1, x2,
+					x3, x4, x5, (uint32_t *) &x6, false,
+					&mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_FINALIZE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		status = intel_fcs_ecdsa_sha2_data_sign_update_finalize(x1, x2,
+					x3, x4, x5, (uint32_t *) &x6, true,
+					&mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGN_INIT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		status = intel_fcs_ecdsa_hash_sign_init(x1, x2, x3,
+					x4, x5, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGN_FINALIZE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		status = intel_fcs_ecdsa_hash_sign_finalize(x1, x2, x3,
+					 x4, x5, (uint32_t *) &x6, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_HASH_SIG_VERIFY_INIT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		status = intel_fcs_ecdsa_hash_sig_verify_init(x1, x2, x3,
+					x4, x5, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_HASH_SIG_VERIFY_FINALIZE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		status = intel_fcs_ecdsa_hash_sig_verify_finalize(x1, x2, x3,
+					 x4, x5, (uint32_t *) &x6, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_INIT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		status = intel_fcs_ecdsa_sha2_data_sig_verify_init(x1, x2, x3,
+					x4, x5, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_UPDATE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+		status = intel_fcs_ecdsa_sha2_data_sig_verify_update_finalize(
+					x1, x2, x3, x4, x5, (uint32_t *) &x6,
+					x7, false, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_FINALIZE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+		status = intel_fcs_ecdsa_sha2_data_sig_verify_update_finalize(
+					x1, x2, x3, x4, x5, (uint32_t *) &x6,
+					x7, true, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_GET_PUBKEY_INIT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		status = intel_fcs_ecdsa_get_pubkey_init(x1, x2, x3,
+					x4, x5, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_GET_PUBKEY_FINALIZE:
+		status = intel_fcs_ecdsa_get_pubkey_finalize(x1, x2, x3,
+					(uint32_t *) &x4, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x3, x4);
+
+	case INTEL_SIP_SMC_FCS_ECDH_REQUEST_INIT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		status = intel_fcs_ecdh_request_init(x1, x2, x3,
+					x4, x5, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_ECDH_REQUEST_FINALIZE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		status = intel_fcs_ecdh_request_finalize(x1, x2, x3,
+					 x4, x5, (uint32_t *) &x6, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_AES_CRYPT_INIT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		status = intel_fcs_aes_crypt_init(x1, x2, x3, x4, x5,
+					&mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_AES_CRYPT_UPDATE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		status = intel_fcs_aes_crypt_update_finalize(x1, x2, x3, x4,
+					x5, x6, false, &send_id);
+		SMC_RET1(handle, status);
+
+	case INTEL_SIP_SMC_FCS_AES_CRYPT_FINALIZE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		status = intel_fcs_aes_crypt_update_finalize(x1, x2, x3, x4,
+					x5, x6, true, &send_id);
+		SMC_RET1(handle, status);
+
+	case INTEL_SIP_SMC_GET_ROM_PATCH_SHA384:
+		status = intel_fcs_get_rom_patch_sha384(x1, &retval64,
+							&mbox_error);
+		SMC_RET4(handle, status, mbox_error, x1, retval64);
+
+	case INTEL_SIP_SMC_SVC_VERSION:
+		SMC_RET3(handle, INTEL_SIP_SMC_STATUS_OK,
+					SIP_SVC_VERSION_MAJOR,
+					SIP_SVC_VERSION_MINOR);
+
 	default:
 		return socfpga_sip_handler(smc_fid, x1, x2, x3, x4,
 			cookie, handle, flags);
 	}
 }
 
+uintptr_t sip_smc_handler(uint32_t smc_fid,
+			 u_register_t x1,
+			 u_register_t x2,
+			 u_register_t x3,
+			 u_register_t x4,
+			 void *cookie,
+			 void *handle,
+			 u_register_t flags)
+{
+	uint32_t cmd = smc_fid & INTEL_SIP_SMC_CMD_MASK;
+
+	if (cmd >= INTEL_SIP_SMC_CMD_V2_RANGE_BEGIN &&
+	    cmd <= INTEL_SIP_SMC_CMD_V2_RANGE_END) {
+		return sip_smc_handler_v2(smc_fid, x1, x2, x3, x4,
+			cookie, handle, flags);
+	} else {
+		return sip_smc_handler_v1(smc_fid, x1, x2, x3, x4,
+			cookie, handle, flags);
+	}
+}
+
 DECLARE_RT_SVC(
 	socfpga_sip_svc,
 	OEN_SIP_START,
diff --git a/plat/intel/soc/common/socfpga_sip_svc_v2.c b/plat/intel/soc/common/socfpga_sip_svc_v2.c
new file mode 100644
index 0000000..791c714
--- /dev/null
+++ b/plat/intel/soc/common/socfpga_sip_svc_v2.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/mmio.h>
+
+#include "socfpga_mailbox.h"
+#include "socfpga_sip_svc.h"
+
+static uint32_t intel_v2_mbox_send_cmd(uint32_t req_header,
+				uint32_t *data, uint32_t data_size)
+{
+	uint32_t value;
+	uint32_t len;
+
+	if ((data == NULL) || (data_size == 0)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (data_size > (MBOX_INC_HEADER_MAX_WORD_SIZE * MBOX_WORD_BYTE)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_size_4_bytes_aligned(data_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	/* Make sure client id align in SMC SiP V2 header and mailbox header */
+	value = (req_header >> INTEL_SIP_SMC_HEADER_CID_OFFSET) &
+				INTEL_SIP_SMC_HEADER_CID_MASK;
+
+	if (value != MBOX_RESP_CLIENT_ID(data[0])) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	/* Make sure job id align in SMC SiP V2 header and mailbox header */
+	value = (req_header >> INTEL_SIP_SMC_HEADER_JOB_ID_OFFSET) &
+				INTEL_SIP_SMC_HEADER_JOB_ID_MASK;
+
+	if (value != MBOX_RESP_JOB_ID(data[0])) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	/*
+	 * Make sure data length align in SMC SiP V2 header and
+	 * mailbox header
+	 */
+	len = (data_size / MBOX_WORD_BYTE) - 1;
+
+	if (len != MBOX_RESP_LEN(data[0])) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	return mailbox_send_cmd_async_ext(data[0], &data[1], len);
+}
+
+static uint32_t intel_v2_mbox_poll_resp(uint64_t req_header,
+				uint32_t *data, uint32_t *data_size,
+				uint64_t *resp_header)
+{
+	int status = 0;
+	uint32_t resp_len;
+	uint32_t job_id = 0;
+	uint32_t client_id = 0;
+	uint32_t version;
+
+	if ((data == NULL) || (data_size == NULL) || (resp_header == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_size_4_bytes_aligned(*data_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	resp_len = (*data_size / MBOX_WORD_BYTE) - 1;
+	status = mailbox_read_response_async(&job_id, &data[0], &data[1],
+				&resp_len, 1);
+
+	if (status == MBOX_BUSY) {
+		status = INTEL_SIP_SMC_STATUS_BUSY;
+	} else if (status == MBOX_NO_RESPONSE) {
+		status = INTEL_SIP_SMC_STATUS_NO_RESPONSE;
+	} else {
+		*data_size = 0;
+
+		if (resp_len > 0) {
+			/*
+			 * Fill in the final response length,
+			 * the length include both mailbox header and payload
+			 */
+			*data_size = (resp_len + 1) * MBOX_WORD_BYTE;
+
+			/* Extract the client id from mailbox header */
+			client_id = MBOX_RESP_CLIENT_ID(data[0]);
+		}
+
+		/*
+		 * Extract SMC SiP V2 protocol version from
+		 * SMC request header
+		 */
+		version = (req_header >> INTEL_SIP_SMC_HEADER_VERSION_OFFSET) &
+				INTEL_SIP_SMC_HEADER_VERSION_MASK;
+
+		/* Fill in SMC SiP V2 protocol response header */
+		*resp_header = 0;
+		*resp_header |= (((uint64_t)job_id) &
+				INTEL_SIP_SMC_HEADER_JOB_ID_MASK) <<
+				INTEL_SIP_SMC_HEADER_JOB_ID_OFFSET;
+		*resp_header |= (((uint64_t)client_id) &
+				INTEL_SIP_SMC_HEADER_CID_MASK) <<
+				INTEL_SIP_SMC_HEADER_CID_OFFSET;
+		*resp_header |= (((uint64_t)version) &
+				INTEL_SIP_SMC_HEADER_VERSION_MASK) <<
+				INTEL_SIP_SMC_HEADER_VERSION_OFFSET;
+	}
+
+	return status;
+}
+
+uintptr_t sip_smc_handler_v2(uint32_t smc_fid,
+				u_register_t x1,
+				u_register_t x2,
+				u_register_t x3,
+				u_register_t x4,
+				void *cookie,
+				void *handle,
+				u_register_t flags)
+{
+	uint32_t retval = 0;
+	uint64_t retval64 = 0;
+	int status = INTEL_SIP_SMC_STATUS_OK;
+
+	switch (smc_fid) {
+	case INTEL_SIP_SMC_V2_GET_SVC_VERSION:
+		SMC_RET4(handle, INTEL_SIP_SMC_STATUS_OK, x1,
+				SIP_SVC_VERSION_MAJOR,
+				SIP_SVC_VERSION_MINOR);
+
+	case INTEL_SIP_SMC_V2_REG_READ:
+		status = intel_secure_reg_read(x2, &retval);
+		SMC_RET4(handle, status, x1, retval, x2);
+
+	case INTEL_SIP_SMC_V2_REG_WRITE:
+		status = intel_secure_reg_write(x2, (uint32_t)x3, &retval);
+		SMC_RET4(handle, status, x1, retval, x2);
+
+	case INTEL_SIP_SMC_V2_REG_UPDATE:
+		status = intel_secure_reg_update(x2, (uint32_t)x3,
+				(uint32_t)x4, &retval);
+		SMC_RET4(handle, status, x1, retval, x2);
+
+	case INTEL_SIP_SMC_V2_HPS_SET_BRIDGES:
+		status = intel_hps_set_bridges(x2, x3);
+		SMC_RET2(handle, status, x1);
+
+	case INTEL_SIP_SMC_V2_MAILBOX_SEND_COMMAND:
+		status = intel_v2_mbox_send_cmd(x1, (uint32_t *)x2, x3);
+		SMC_RET2(handle, status, x1);
+
+	case INTEL_SIP_SMC_V2_MAILBOX_POLL_RESPONSE:
+		status = intel_v2_mbox_poll_resp(x1, (uint32_t *)x2,
+				(uint32_t *) &x3, &retval64);
+		SMC_RET4(handle, status, retval64, x2, x3);
+
+	default:
+		ERROR("%s: unhandled SMC V2 (0x%x)\n", __func__, smc_fid);
+		SMC_RET1(handle, SMC_UNK);
+	}
+}
diff --git a/plat/intel/soc/n5x/include/socfpga_plat_def.h b/plat/intel/soc/n5x/include/socfpga_plat_def.h
index 3ce03dc..4c36f91 100644
--- a/plat/intel/soc/n5x/include/socfpga_plat_def.h
+++ b/plat/intel/soc/n5x/include/socfpga_plat_def.h
@@ -19,6 +19,9 @@
 #define INTEL_SIP_SMC_FPGA_CONFIG_SIZE		0x2000000
 
 /* Register Mapping */
+#define SOCFPGA_CCU_NOC_REG_BASE		U(0xf7000000)
+#define SOCFPGA_F2SDRAMMGR_REG_BASE		U(0xf8024000)
+
 #define SOCFPGA_MMC_REG_BASE			U(0xff808000)
 
 #define SOCFPGA_RSTMGR_REG_BASE			U(0xffd11000)
diff --git a/plat/intel/soc/n5x/platform.mk b/plat/intel/soc/n5x/platform.mk
index b72bcc4..953bf0c 100644
--- a/plat/intel/soc/n5x/platform.mk
+++ b/plat/intel/soc/n5x/platform.mk
@@ -38,6 +38,7 @@
 		plat/intel/soc/n5x/bl31_plat_setup.c			\
 		plat/intel/soc/common/socfpga_psci.c			\
 		plat/intel/soc/common/socfpga_sip_svc.c			\
+		plat/intel/soc/common/socfpga_sip_svc_v2.c		\
 		plat/intel/soc/common/socfpga_topology.c		\
 		plat/intel/soc/common/sip/socfpga_sip_ecc.c             \
 		plat/intel/soc/common/sip/socfpga_sip_fcs.c		\
diff --git a/plat/intel/soc/stratix10/bl2_plat_setup.c b/plat/intel/soc/stratix10/bl2_plat_setup.c
index cca564a..73e3216 100644
--- a/plat/intel/soc/stratix10/bl2_plat_setup.c
+++ b/plat/intel/soc/stratix10/bl2_plat_setup.c
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2019-2021, ARM Limited and Contributors. All rights reserved.
- * Copyright (c) 2019-2021, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -18,6 +18,7 @@
 
 #include "qspi/cadence_qspi.h"
 #include "socfpga_emac.h"
+#include "socfpga_f2sdram_manager.h"
 #include "socfpga_handoff.h"
 #include "socfpga_mailbox.h"
 #include "socfpga_private.h"
@@ -79,8 +80,11 @@
 	mailbox_init();
 	s10_mmc_init();
 
-	if (!intel_mailbox_is_fpga_not_ready())
-		socfpga_bridges_enable();
+	if (!intel_mailbox_is_fpga_not_ready()) {
+		socfpga_bridges_enable(SOC2FPGA_MASK | LWHPS2FPGA_MASK |
+					FPGA2SOC_MASK | F2SDRAM0_MASK | F2SDRAM1_MASK |
+					F2SDRAM2_MASK);
+	}
 }
 
 
diff --git a/plat/intel/soc/stratix10/include/s10_noc.h b/plat/intel/soc/stratix10/include/s10_noc.h
deleted file mode 100644
index 3e1e527..0000000
--- a/plat/intel/soc/stratix10/include/s10_noc.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2019, Intel Corporation. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#define AXI_AP				(1<<0)
-#define FPGA2SOC			(1<<16)
-#define MPU				(1<<24)
-#define S10_NOC_PER_SCR_NAND		0xffd21000
-#define S10_NOC_PER_SCR_NAND_DATA	0xffd21004
-#define S10_NOC_PER_SCR_USB0		0xffd2100c
-#define S10_NOC_PER_SCR_USB1		0xffd21010
-#define S10_NOC_PER_SCR_SPI_M0		0xffd2101c
-#define S10_NOC_PER_SCR_SPI_M1		0xffd21020
-#define S10_NOC_PER_SCR_SPI_S0		0xffd21024
-#define S10_NOC_PER_SCR_SPI_S1		0xffd21028
-#define S10_NOC_PER_SCR_EMAC0		0xffd2102c
-#define S10_NOC_PER_SCR_EMAC1		0xffd21030
-#define S10_NOC_PER_SCR_EMAC2		0xffd21034
-#define S10_NOC_PER_SCR_SDMMC		0xffd21040
-#define S10_NOC_PER_SCR_GPIO0		0xffd21044
-#define S10_NOC_PER_SCR_GPIO1		0xffd21048
-#define S10_NOC_PER_SCR_I2C0		0xffd21050
-#define S10_NOC_PER_SCR_I2C1		0xffd21058
-#define S10_NOC_PER_SCR_I2C2		0xffd2105c
-#define S10_NOC_PER_SCR_I2C3		0xffd21060
-#define S10_NOC_PER_SCR_SP_TIMER0	0xffd21064
-#define S10_NOC_PER_SCR_SP_TIMER1	0xffd21068
-#define S10_NOC_PER_SCR_UART0		0xffd2106c
-#define S10_NOC_PER_SCR_UART1		0xffd21070
-
-
-#define S10_NOC_SYS_SCR_DMA_ECC			0xffd21108
-#define S10_NOC_SYS_SCR_EMAC0RX_ECC		0xffd2110c
-#define S10_NOC_SYS_SCR_EMAC0TX_ECC		0xffd21110
-#define S10_NOC_SYS_SCR_EMAC1RX_ECC		0xffd21114
-#define S10_NOC_SYS_SCR_EMAC1TX_ECC		0xffd21118
-#define S10_NOC_SYS_SCR_EMAC2RX_ECC		0xffd2111c
-#define S10_NOC_SYS_SCR_EMAC2TX_ECC		0xffd21120
-#define S10_NOC_SYS_SCR_NAND_ECC		0xffd2112c
-#define S10_NOC_SYS_SCR_NAND_READ_ECC		0xffd21130
-#define S10_NOC_SYS_SCR_NAND_WRITE_ECC		0xffd21134
-#define S10_NOC_SYS_SCR_OCRAM_ECC		0xffd21138
-#define S10_NOC_SYS_SCR_SDMMC_ECC		0xffd21140
-#define S10_NOC_SYS_SCR_USB0_ECC		0xffd21144
-#define S10_NOC_SYS_SCR_USB1_ECC		0xffd21148
-#define S10_NOC_SYS_SCR_CLK_MGR			0xffd2114c
-#define S10_NOC_SYS_SCR_IO_MGR			0xffd21154
-#define S10_NOC_SYS_SCR_RST_MGR			0xffd21158
-#define S10_NOC_SYS_SCR_SYS_MGR			0xffd2115c
-#define S10_NOC_SYS_SCR_OSC0_TIMER		0xffd21160
-#define S10_NOC_SYS_SCR_OSC1_TIMER		0xffd21164
-#define S10_NOC_SYS_SCR_WATCHDOG0		0xffd21168
-#define S10_NOC_SYS_SCR_WATCHDOG1		0xffd2116c
-#define S10_NOC_SYS_SCR_WATCHDOG2		0xffd21170
-#define S10_NOC_SYS_SCR_WATCHDOG3		0xffd21174
-#define S10_NOC_SYS_SCR_DAP			0xffd21178
-#define S10_NOC_SYS_SCR_L4_NOC_PROBES		0xffd21190
-#define S10_NOC_SYS_SCR_L4_NOC_QOS		0xffd21194
-
-#define S10_CCU_NOC_BRIDGE_CPU0_RAM		0xf7004688
-#define S10_CCU_NOC_BRIDGE_IOM_RAM		0xf7004688
diff --git a/plat/intel/soc/stratix10/include/socfpga_plat_def.h b/plat/intel/soc/stratix10/include/socfpga_plat_def.h
index ae4b674..516cc75 100644
--- a/plat/intel/soc/stratix10/include/socfpga_plat_def.h
+++ b/plat/intel/soc/stratix10/include/socfpga_plat_def.h
@@ -19,6 +19,7 @@
 
 /* Register Mapping */
 #define SOCFPGA_CCU_NOC_REG_BASE		0xf7000000
+#define SOCFPGA_F2SDRAMMGR_REG_BASE		U(0xf8024000)
 
 #define SOCFPGA_MMC_REG_BASE                    0xff808000
 
diff --git a/plat/intel/soc/stratix10/platform.mk b/plat/intel/soc/stratix10/platform.mk
index 273b975..8b39b6f 100644
--- a/plat/intel/soc/stratix10/platform.mk
+++ b/plat/intel/soc/stratix10/platform.mk
@@ -64,6 +64,7 @@
 		plat/intel/soc/stratix10/bl31_plat_setup.c	 	\
 		plat/intel/soc/common/socfpga_psci.c			\
 		plat/intel/soc/common/socfpga_sip_svc.c			\
+		plat/intel/soc/common/socfpga_sip_svc_v2.c		\
 		plat/intel/soc/common/socfpga_topology.c		\
 		plat/intel/soc/common/sip/socfpga_sip_ecc.c		\
 		plat/intel/soc/common/sip/socfpga_sip_fcs.c		\
diff --git a/plat/st/common/bl2_io_storage.c b/plat/st/common/bl2_io_storage.c
index 7cd5eb5..b2038bc 100644
--- a/plat/st/common/bl2_io_storage.c
+++ b/plat/st/common/bl2_io_storage.c
@@ -38,6 +38,7 @@
 #include <platform_def.h>
 #include <stm32cubeprogrammer.h>
 #include <stm32mp_fconf_getter.h>
+#include <stm32mp_io_storage.h>
 #include <usb_dfu.h>
 
 /* IO devices */
diff --git a/plat/xilinx/zynqmp/platform.mk b/plat/xilinx/zynqmp/platform.mk
index 620bf6c..ea8a5d1 100644
--- a/plat/xilinx/zynqmp/platform.mk
+++ b/plat/xilinx/zynqmp/platform.mk
@@ -21,6 +21,10 @@
 
 WORKAROUND_CVE_2017_5715	:=	0
 
+ARM_XLAT_TABLES_LIB_V1         :=      1
+$(eval $(call assert_boolean,ARM_XLAT_TABLES_LIB_V1))
+$(eval $(call add_define,ARM_XLAT_TABLES_LIB_V1))
+
 ifdef ZYNQMP_ATF_MEM_BASE
     $(eval $(call add_define,ZYNQMP_ATF_MEM_BASE))
 
diff --git a/services/std_svc/spm/el3_spmc/spmc.h b/services/std_svc/spm/el3_spmc/spmc.h
index 0915d0b..faa604f 100644
--- a/services/std_svc/spm/el3_spmc/spmc.h
+++ b/services/std_svc/spm/el3_spmc/spmc.h
@@ -221,4 +221,10 @@
  */
 struct el3_lp_desc *get_el3_lp_array(void);
 
+/*
+ * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
+ * or OS kernel in the normal world or the last SP that was run.
+ */
+struct mailbox *spmc_get_mbox_desc(bool secure_origin);
+
 #endif /* SPMC_H */
diff --git a/services/std_svc/spm/el3_spmc/spmc_main.c b/services/std_svc/spm/el3_spmc/spmc_main.c
index 35def25..33a25a2 100644
--- a/services/std_svc/spm/el3_spmc/spmc_main.c
+++ b/services/std_svc/spm/el3_spmc/spmc_main.c
@@ -72,7 +72,7 @@
 /* Helper function to get pointer to SP context from its ID. */
 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
 {
-	/* Check for SWd Partitions. */
+	/* Check for Secure World Partitions. */
 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
 		if (sp_desc[i].sp_id == id) {
 			return &(sp_desc[i]);
@@ -81,6 +81,29 @@
 	return NULL;
 }
 
+/*
+ * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
+ * We assume that the first descriptor is reserved for this entity.
+ */
+struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
+{
+	return &(ns_ep_desc[0]);
+}
+
+/*
+ * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
+ * or OS kernel in the normal world or the last SP that was run.
+ */
+struct mailbox *spmc_get_mbox_desc(bool secure_origin)
+{
+	/* Obtain the RX/TX buffer pair descriptor. */
+	if (secure_origin) {
+		return &(spmc_get_current_sp_ctx()->mailbox);
+	} else {
+		return &(spmc_get_hyp_ctx()->mailbox);
+	}
+}
+
 /******************************************************************************
  * This function returns to the place where spmc_sp_synchronous_entry() was
  * called originally.
@@ -491,6 +514,59 @@
 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
 }
 
+static uint64_t ffa_version_handler(uint32_t smc_fid,
+				    bool secure_origin,
+				    uint64_t x1,
+				    uint64_t x2,
+				    uint64_t x3,
+				    uint64_t x4,
+				    void *cookie,
+				    void *handle,
+				    uint64_t flags)
+{
+	uint32_t requested_version = x1 & FFA_VERSION_MASK;
+
+	if (requested_version & FFA_VERSION_BIT31_MASK) {
+		/* Invalid encoding, return an error. */
+		SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
+		/* Execution stops here. */
+	}
+
+	/* Determine the caller to store the requested version. */
+	if (secure_origin) {
+		/*
+		 * Ensure that the SP is reporting the same version as
+		 * specified in its manifest. If these do not match there is
+		 * something wrong with the SP.
+		 * TODO: Should we abort the SP? For now assert this is not
+		 *       case.
+		 */
+		assert(requested_version ==
+		       spmc_get_current_sp_ctx()->ffa_version);
+	} else {
+		/*
+		 * If this is called by the normal world, record this
+		 * information in its descriptor.
+		 */
+		spmc_get_hyp_ctx()->ffa_version = requested_version;
+	}
+
+	SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
+					  FFA_VERSION_MINOR));
+}
+
+/*******************************************************************************
+ * Helper function to obtain the FF-A version of the calling partition.
+ ******************************************************************************/
+uint32_t get_partition_ffa_version(bool secure_origin)
+{
+	if (secure_origin) {
+		return spmc_get_current_sp_ctx()->ffa_version;
+	} else {
+		return spmc_get_hyp_ctx()->ffa_version;
+	}
+}
+
 /*******************************************************************************
  * This function will parse the Secure Partition Manifest. From manifest, it
  * will fetch details for preparing Secure partition image context and secure
@@ -545,6 +621,23 @@
 	sp->execution_state = config_32;
 
 	ret = fdt_read_uint32(sp_manifest, node,
+			      "messaging-method", &config_32);
+	if (ret != 0) {
+		ERROR("Missing Secure Partition messaging method.\n");
+		return ret;
+	}
+
+	/* Validate this entry, we currently only support direct messaging. */
+	if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
+			  FFA_PARTITION_DIRECT_REQ_SEND)) != 0U) {
+		WARN("Invalid Secure Partition messaging method (0x%x)\n",
+		     config_32);
+		return -EINVAL;
+	}
+
+	sp->properties = config_32;
+
+	ret = fdt_read_uint32(sp_manifest, node,
 			      "execution-ctx-count", &config_32);
 
 	if (ret != 0) {
@@ -866,6 +959,10 @@
 {
 	switch (smc_fid) {
 
+	case FFA_VERSION:
+		return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
+					   x4, cookie, handle, flags);
+
 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
 		return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
diff --git a/services/std_svc/spmd/spmd_main.c b/services/std_svc/spmd/spmd_main.c
index 5b131cd..777a962 100644
--- a/services/std_svc/spmd/spmd_main.c
+++ b/services/std_svc/spmd/spmd_main.c
@@ -626,7 +626,8 @@
 		 * If caller is secure and SPMC was initialized,
 		 * return FFA_VERSION of SPMD.
 		 * If caller is non secure and SPMC was initialized,
-		 * return SPMC's version.
+		 * forward to the EL3 SPMC if enabled, otherwise return
+		 * the SPMC version if implemented at a lower EL.
 		 * Sanity check to "input_version".
 		 * If the EL3 SPMC is enabled, ignore the SPMC state as
 		 * this is not used.
@@ -635,6 +636,17 @@
 		    (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) {
 			ret = FFA_ERROR_NOT_SUPPORTED;
 		} else if (!secure_origin) {
+			if (is_spmc_at_el3()) {
+				/*
+				 * Forward the call directly to the EL3 SPMC, if
+				 * enabled, as we don't need to wrap the call in
+				 * a direct request.
+				 */
+				return spmd_smc_forward(smc_fid, secure_origin,
+							x1, x2, x3, x4, cookie,
+							handle, flags);
+			}
+
 			gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
 			uint64_t rc;