Merge "chore(xilinx): reorder include files as per TF-A guidelines" into integration
diff --git a/Makefile b/Makefile
index 03f9320..8a0a2e0 100644
--- a/Makefile
+++ b/Makefile
@@ -217,18 +217,16 @@
 ################################################################################
 ifeq (${ARM_ARCH_MAJOR},7)
 	target32-directive	= 	-target arm-none-eabi
-# Will set march32-directive from platform configuration
+# Will set march-directive from platform configuration
 else
 	target32-directive	= 	-target armv8a-none-eabi
 
 # Set the compiler's target architecture profile based on
 # ARM_ARCH_MAJOR ARM_ARCH_MINOR options
 	ifeq (${ARM_ARCH_MINOR},0)
-		march32-directive	= 	-march=armv${ARM_ARCH_MAJOR}-a
-		march64-directive	= 	-march=armv${ARM_ARCH_MAJOR}-a
+		march-directive	= 	-march=armv${ARM_ARCH_MAJOR}-a
 	else
-		march32-directive	= 	-march=armv${ARM_ARCH_MAJOR}.${ARM_ARCH_MINOR}-a
-		march64-directive	= 	-march=armv${ARM_ARCH_MAJOR}.${ARM_ARCH_MINOR}-a
+		march-directive	= 	-march=armv${ARM_ARCH_MAJOR}.${ARM_ARCH_MINOR}-a
 	endif #(ARM_ARCH_MINOR)
 endif #(ARM_ARCH_MAJOR)
 
@@ -273,24 +271,20 @@
 # Set the compiler's architecture feature modifiers
 ifneq ($(arch-features), none)
 	# Strip "none+" from arch-features
-	arch-features		:=	$(subst none+,,$(arch-features))
-	ifeq ($(ARCH), aarch32)
-		march32-directive	:=	$(march32-directive)+$(arch-features)
-	else
-		march64-directive	:=	$(march64-directive)+$(arch-features)
-	endif
+	arch-features	:=	$(subst none+,,$(arch-features))
+	march-directive	:=	$(march-directive)+$(arch-features)
 # Print features
         $(info Arm Architecture Features specified: $(subst +, ,$(arch-features)))
 endif #(arch-features)
 
 ifneq ($(findstring clang,$(notdir $(CC))),)
 	ifneq ($(findstring armclang,$(notdir $(CC))),)
-		TF_CFLAGS_aarch32	:=	-target arm-arm-none-eabi $(march32-directive)
-		TF_CFLAGS_aarch64	:=	-target aarch64-arm-none-eabi $(march64-directive)
+		TF_CFLAGS_aarch32	:=	-target arm-arm-none-eabi $(march-directive)
+		TF_CFLAGS_aarch64	:=	-target aarch64-arm-none-eabi $(march-directive)
 		LD			:=	$(LINKER)
 	else
-		TF_CFLAGS_aarch32	=	$(target32-directive) $(march32-directive)
-		TF_CFLAGS_aarch64	:=	-target aarch64-elf $(march64-directive)
+		TF_CFLAGS_aarch32	=	$(target32-directive) $(march-directive)
+		TF_CFLAGS_aarch64	:=	-target aarch64-elf $(march-directive)
 		LD			:=	$(shell $(CC) --print-prog-name ld.lld)
 
 		AR			:=	$(shell $(CC) --print-prog-name llvm-ar)
@@ -302,8 +296,8 @@
 	PP		:=	$(CC) -E $(TF_CFLAGS_$(ARCH))
 	AS		:=	$(CC) -c -x assembler-with-cpp $(TF_CFLAGS_$(ARCH))
 else ifneq ($(findstring gcc,$(notdir $(CC))),)
-	TF_CFLAGS_aarch32	=	$(march32-directive)
-	TF_CFLAGS_aarch64	=	$(march64-directive)
+	TF_CFLAGS_aarch32	=	$(march-directive)
+	TF_CFLAGS_aarch64	=	$(march-directive)
 	ifeq ($(ENABLE_LTO),1)
 		# Enable LTO only for aarch64
 		ifeq (${ARCH},aarch64)
@@ -314,8 +308,8 @@
 	endif
 	LD			=	$(LINKER)
 else
-	TF_CFLAGS_aarch32	=	$(march32-directive)
-	TF_CFLAGS_aarch64	=	$(march64-directive)
+	TF_CFLAGS_aarch32	=	$(march-directive)
+	TF_CFLAGS_aarch64	=	$(march-directive)
 	LD			=	$(LINKER)
 endif #(clang)
 
@@ -355,8 +349,7 @@
 	TF_CFLAGS_aarch64	+=	-mbranch-protection=${BP_OPTION}
 endif #(BP_OPTION)
 
-ASFLAGS_aarch32		=	$(march32-directive)
-ASFLAGS_aarch64		=	$(march64-directive)
+ASFLAGS		+=	$(march-directive)
 
 ##############################################################################
 # WARNINGS Configuration
@@ -444,7 +437,7 @@
 ################################################################################
 CPPFLAGS		=	${DEFINES} ${INCLUDES} ${MBEDTLS_INC} -nostdinc	\
 				$(ERRORS) $(WARNINGS)
-ASFLAGS			+=	$(CPPFLAGS) $(ASFLAGS_$(ARCH))			\
+ASFLAGS			+=	$(CPPFLAGS)                 			\
 				-ffreestanding -Wa,--fatal-warnings
 TF_CFLAGS		+=	$(CPPFLAGS) $(TF_CFLAGS_$(ARCH))		\
 				-ffunction-sections -fdata-sections		\
@@ -530,6 +523,7 @@
 				drivers/console/multi_console.c		\
 				lib/${ARCH}/cache_helpers.S		\
 				lib/${ARCH}/misc_helpers.S		\
+				lib/extensions/pmuv3/${ARCH}/pmuv3.c	\
 				plat/common/plat_bl_common.c		\
 				plat/common/plat_log_common.c		\
 				plat/common/${ARCH}/plat_common.c	\
@@ -1154,7 +1148,6 @@
 	CTX_INCLUDE_FPREGS \
 	CTX_INCLUDE_EL2_REGS \
 	DEBUG \
-	DISABLE_MTPMU \
 	DYN_DISABLE_AUTH \
 	EL3_EXCEPTION_HANDLING \
 	ENABLE_AMU_AUXILIARY_COUNTERS \
@@ -1232,6 +1225,7 @@
 	CTX_INCLUDE_MTE_REGS \
 	CTX_INCLUDE_NEVE_REGS \
 	CRYPTO_SUPPORT \
+	DISABLE_MTPMU \
 	ENABLE_BRBE_FOR_NS \
 	ENABLE_TRBE_FOR_NS \
 	ENABLE_BTI \
diff --git a/bl1/bl1.mk b/bl1/bl1.mk
index b1791b1..95fe50e 100644
--- a/bl1/bl1.mk
+++ b/bl1/bl1.mk
@@ -16,10 +16,6 @@
 				plat/common/${ARCH}/platform_up_stack.S \
 				${MBEDTLS_SOURCES}
 
-ifeq (${DISABLE_MTPMU},1)
-BL1_SOURCES		+=	lib/extensions/mtpmu/${ARCH}/mtpmu.S
-endif
-
 ifeq (${ARCH},aarch64)
 BL1_SOURCES		+=	lib/cpus/aarch64/dsu_helpers.S		\
 				lib/el3_runtime/aarch64/context.S
diff --git a/bl2/bl2.mk b/bl2/bl2.mk
index 19b955f..1663c52 100644
--- a/bl2/bl2.mk
+++ b/bl2/bl2.mk
@@ -43,10 +43,6 @@
 				bl2/${ARCH}/bl2_run_next_image.S        \
 				lib/cpus/${ARCH}/cpu_helpers.S
 
-ifeq (${DISABLE_MTPMU},1)
-BL2_SOURCES		+=	lib/extensions/mtpmu/${ARCH}/mtpmu.S
-endif
-
 ifeq (${ARCH},aarch64)
 BL2_SOURCES		+=	lib/cpus/aarch64/dsu_helpers.S
 endif
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index d7c9a52..0c1d657 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -54,10 +54,6 @@
 				${SPMC_SOURCES}					\
 				${SPM_SOURCES}
 
-ifeq (${DISABLE_MTPMU},1)
-BL31_SOURCES		+=	lib/extensions/mtpmu/aarch64/mtpmu.S
-endif
-
 ifeq (${ENABLE_PMF}, 1)
 BL31_SOURCES		+=	lib/pmf/pmf_main.c
 endif
diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c
index e70eb55..8f1f043 100644
--- a/bl31/bl31_main.c
+++ b/bl31/bl31_main.c
@@ -112,6 +112,9 @@
  ******************************************************************************/
 void bl31_main(void)
 {
+	/* Init registers that never change for the lifetime of TF-A */
+	cm_manage_extensions_el3();
+
 	NOTICE("BL31: %s\n", version_string);
 	NOTICE("BL31: %s\n", build_message);
 
diff --git a/bl32/sp_min/sp_min.mk b/bl32/sp_min/sp_min.mk
index ec75d88..0b7bc57 100644
--- a/bl32/sp_min/sp_min.mk
+++ b/bl32/sp_min/sp_min.mk
@@ -20,10 +20,6 @@
 				services/std_svc/std_svc_setup.c	\
 				${PSCI_LIB_SOURCES}
 
-ifeq (${DISABLE_MTPMU},1)
-BL32_SOURCES		+=	lib/extensions/mtpmu/aarch32/mtpmu.S
-endif
-
 ifeq (${ENABLE_PMF}, 1)
 BL32_SOURCES		+=	lib/pmf/pmf_main.c
 endif
diff --git a/common/feat_detect.c b/common/feat_detect.c
index 50b74d0..d2e94e9 100644
--- a/common/feat_detect.c
+++ b/common/feat_detect.c
@@ -144,6 +144,14 @@
 	check_feature(ENABLE_FEAT_SB, read_feat_sb_id_field(), "SB", 1, 1);
 	check_feature(ENABLE_FEAT_CSV2_2, read_feat_csv2_id_field(),
 		      "CSV2_2", 2, 3);
+	/*
+	 * Even though the PMUv3 is an OPTIONAL feature, it is always
+	 * implemented and Arm prescribes so. So assume it will be there and do
+	 * away with a flag for it. This is used to check minor PMUv3px
+	 * revisions so that we catch them as they come along
+	 */
+	check_feature(FEAT_STATE_ALWAYS, read_feat_pmuv3_id_field(),
+		      "PMUv3", 1, ID_AA64DFR0_PMUVER_PMUV3P7);
 
 	/* v8.1 features */
 	check_feature(ENABLE_FEAT_PAN, read_feat_pan_id_field(), "PAN", 1, 3);
@@ -184,6 +192,13 @@
 	check_feature(ENABLE_FEAT_TWED, read_feat_twed_id_field(),
 		      "TWED", 1, 1);
 
+	/*
+	 * even though this is a "DISABLE" it does confusingly perform feature
+	 * enablement duties like all other flags here. Check it against the HW
+	 * feature when we intend to diverge from the default behaviour
+	 */
+	check_feature(DISABLE_MTPMU, read_feat_mtpmu_id_field(), "MTPMU", 1, 1);
+
 	/* v8.7 features */
 	check_feature(ENABLE_FEAT_HCX, read_feat_hcx_id_field(), "HCX", 1, 1);
 
diff --git a/docs/design/firmware-design.rst b/docs/design/firmware-design.rst
index 131cca1..3d648c4 100644
--- a/docs/design/firmware-design.rst
+++ b/docs/design/firmware-design.rst
@@ -2733,12 +2733,12 @@
 the toolchain  target architecture directive.
 
 Platform may choose to not define straight the toolchain target architecture
-directive by defining ``MARCH32_DIRECTIVE``.
+directive by defining ``MARCH_DIRECTIVE``.
 I.e:
 
 .. code:: make
 
-   MARCH32_DIRECTIVE := -mach=armv7-a
+   MARCH_DIRECTIVE := -mach=armv7-a
 
 Code Structure
 --------------
diff --git a/docs/getting_started/build-options.rst b/docs/getting_started/build-options.rst
index 7ca8aa9..a5633e9 100644
--- a/docs/getting_started/build-options.rst
+++ b/docs/getting_started/build-options.rst
@@ -207,10 +207,10 @@
    of the binary image. If set to 1, then only the ELF image is built.
    0 is the default.
 
--  ``DISABLE_MTPMU``: Boolean option to disable FEAT_MTPMU if implemented
-   (Armv8.6 onwards). Its default value is 0 to keep consistency with platforms
-   that do not implement FEAT_MTPMU. For more information on FEAT_MTPMU,
-   check the latest Arm ARM.
+-  ``DISABLE_MTPMU``: Numeric option to disable ``FEAT_MTPMU`` (Multi Threaded
+   PMU). ``FEAT_MTPMU`` is an optional feature available on Armv8.6 onwards.
+   This flag can take values 0 to 2, to align with the ``FEATURE_DETECTION``
+   mechanism. Default is ``0``.
 
 -  ``DYN_DISABLE_AUTH``: Provides the capability to dynamically disable Trusted
    Board Boot authentication at runtime. This option is meant to be enabled only
diff --git a/docs/plat/arm/fvp/index.rst b/docs/plat/arm/fvp/index.rst
index 42c0eda..fcfa04a 100644
--- a/docs/plat/arm/fvp/index.rst
+++ b/docs/plat/arm/fvp/index.rst
@@ -51,7 +51,6 @@
 -  ``FVP_Morello``            (Version 0.11/33)
 -  ``FVP_RD_E1_edge``         (Version 11.17/29)
 -  ``FVP_RD_V1``              (Version 11.17/29)
--  ``FVP_TC0`` (Version 11.17/18)
 -  ``FVP_TC1`` (Version 11.17/33)
 -  ``FVP_TC2`` (Version 11.18/28)
 
@@ -631,7 +630,7 @@
 
 --------------
 
-*Copyright (c) 2019-2022, Arm Limited. All rights reserved.*
+*Copyright (c) 2019-2023, Arm Limited. All rights reserved.*
 
 .. _FW_CONFIG for FVP: https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/tree/plat/arm/board/fvp/fdts/fvp_fw_config.dts
 .. _Arm's website: `FVP models`_
diff --git a/docs/plat/arm/tc/index.rst b/docs/plat/arm/tc/index.rst
index df1847d..c5058f5 100644
--- a/docs/plat/arm/tc/index.rst
+++ b/docs/plat/arm/tc/index.rst
@@ -17,10 +17,9 @@
 (TARGET_PLATFORM=1), TC2 (TARGET_PLATFORM=2) platforms w.r.t to TF-A
 is the CPUs supported as below:
 
--  TC0 has support for Cortex A510, Cortex A710 and Cortex X2.
--  TC1 has support for Cortex A510, Cortex Makalu and Cortex X3.
--  TC2 has support for Hayes and Hunter Arm CPUs.
-
+-  TC0 has support for Cortex A510, Cortex A710 and Cortex X2. (Note TC0 is now deprecated)
+-  TC1 has support for Cortex A510, Cortex A715 and Cortex X3.
+-  TC2 has support for Cortex A520, Cortex A720 and Cortex x4.
 
 Boot Sequence
 -------------
@@ -58,6 +57,6 @@
 
 --------------
 
-*Copyright (c) 2020-2022, Arm Limited. All rights reserved.*
+*Copyright (c) 2020-2023, Arm Limited. All rights reserved.*
 
 .. _Arm Toolchain: https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/downloads
diff --git a/docs/security_advisories/security-advisory-tfv-9.rst b/docs/security_advisories/security-advisory-tfv-9.rst
index d73e74b..762801d 100644
--- a/docs/security_advisories/security-advisory-tfv-9.rst
+++ b/docs/security_advisories/security-advisory-tfv-9.rst
@@ -77,7 +77,7 @@
 +----------------------+
 | Cortex-A715          |
 +----------------------+
-| Cortex-Hunter        |
+| Cortex-A720          |
 +----------------------+
 | Neoverse-N1          |
 +----------------------+
diff --git a/drivers/st/uart/aarch32/stm32_console.S b/drivers/st/uart/aarch32/stm32_console.S
index d64a6cd..e063941 100644
--- a/drivers/st/uart/aarch32/stm32_console.S
+++ b/drivers/st/uart/aarch32/stm32_console.S
@@ -236,7 +236,7 @@
 #endif /* ENABLE_ASSERTIONS */
 	/* Skip flush if UART is not enabled */
 	ldr	r1, [r0, #USART_CR1]
-	ands	r1, r1, #USART_CR1_UE
+	tst	r1, #USART_CR1_UE
 	beq	1f
 	/* Check Transmit Data Register Empty */
 txe_loop_3:
diff --git a/include/arch/aarch32/arch.h b/include/arch/aarch32/arch.h
index c8a6334..dd2c0a6 100644
--- a/include/arch/aarch32/arch.h
+++ b/include/arch/aarch32/arch.h
@@ -104,7 +104,11 @@
 /* CSSELR definitions */
 #define LEVEL_SHIFT		U(1)
 
-/* ID_DFR0_EL1 definitions */
+/* ID_DFR0 definitions */
+#define ID_DFR0_PERFMON_SHIFT		U(24)
+#define ID_DFR0_PERFMON_MASK		U(0xf)
+#define ID_DFR0_PERFMON_PMUV3		U(3)
+#define ID_DFR0_PERFMON_PMUV3P5		U(6)
 #define ID_DFR0_COPTRC_SHIFT		U(12)
 #define ID_DFR0_COPTRC_MASK		U(0xf)
 #define ID_DFR0_COPTRC_SUPPORTED	U(1)
@@ -118,6 +122,7 @@
 #define ID_DFR1_MTPMU_SHIFT	U(0)
 #define ID_DFR1_MTPMU_MASK	U(0xf)
 #define ID_DFR1_MTPMU_SUPPORTED	U(1)
+#define ID_DFR1_MTPMU_DISABLED	U(15)
 
 /* ID_MMFR3 definitions */
 #define ID_MMFR3_PAN_SHIFT	U(16)
@@ -464,6 +469,10 @@
 #define PMCR_LP_BIT		(U(1) << 7)
 #define PMCR_LC_BIT		(U(1) << 6)
 #define PMCR_DP_BIT		(U(1) << 5)
+#define PMCR_X_BIT		(U(1) << 4)
+#define PMCR_C_BIT		(U(1) << 2)
+#define PMCR_P_BIT		(U(1) << 1)
+#define PMCR_E_BIT		(U(1) << 0)
 #define	PMCR_RESET_VAL		U(0x0)
 
 /*******************************************************************************
diff --git a/include/arch/aarch32/arch_features.h b/include/arch/aarch32/arch_features.h
index 99e3fd0..f19c4c2 100644
--- a/include/arch/aarch32/arch_features.h
+++ b/include/arch/aarch32/arch_features.h
@@ -162,4 +162,29 @@
 static inline bool is_feat_s1pie_supported(void) { return false; }
 static inline bool is_feat_sxpie_supported(void) { return false; }
 
+static inline unsigned int read_feat_pmuv3_id_field(void)
+{
+	return ISOLATE_FIELD(read_id_dfr0(), ID_DFR0_PERFMON);
+}
+
+static inline unsigned int read_feat_mtpmu_id_field(void)
+{
+	return ISOLATE_FIELD(read_id_dfr1(), ID_DFR1_MTPMU);
+}
+
+static inline bool is_feat_mtpmu_supported(void)
+{
+	if (DISABLE_MTPMU == FEAT_STATE_DISABLED) {
+		return false;
+	}
+
+	if (DISABLE_MTPMU == FEAT_STATE_ALWAYS) {
+		return true;
+	}
+
+	unsigned int mtpmu = read_feat_mtpmu_id_field();
+
+	return mtpmu != 0U && mtpmu != ID_DFR1_MTPMU_DISABLED;
+}
+
 #endif /* ARCH_FEATURES_H */
diff --git a/include/arch/aarch32/arch_helpers.h b/include/arch/aarch32/arch_helpers.h
index ca5a44b..3a7c768 100644
--- a/include/arch/aarch32/arch_helpers.h
+++ b/include/arch/aarch32/arch_helpers.h
@@ -221,6 +221,7 @@
 DEFINE_COPROCR_READ_FUNC(id_mmfr3, ID_MMFR3)
 DEFINE_COPROCR_READ_FUNC(id_mmfr4, ID_MMFR4)
 DEFINE_COPROCR_READ_FUNC(id_dfr0, ID_DFR0)
+DEFINE_COPROCR_READ_FUNC(id_dfr1, ID_DFR1)
 DEFINE_COPROCR_READ_FUNC(id_pfr0, ID_PFR0)
 DEFINE_COPROCR_READ_FUNC(id_pfr1, ID_PFR1)
 DEFINE_COPROCR_READ_FUNC(isr, ISR)
@@ -290,7 +291,7 @@
 DEFINE_COPROCR_RW_FUNCS(sdcr, SDCR)
 DEFINE_COPROCR_RW_FUNCS(hdcr, HDCR)
 DEFINE_COPROCR_RW_FUNCS(cnthp_ctl, CNTHP_CTL)
-DEFINE_COPROCR_READ_FUNC(pmcr, PMCR)
+DEFINE_COPROCR_RW_FUNCS(pmcr, PMCR)
 
 /*
  * Address translation
diff --git a/include/arch/aarch32/el3_common_macros.S b/include/arch/aarch32/el3_common_macros.S
index 585a9ae..697eb82 100644
--- a/include/arch/aarch32/el3_common_macros.S
+++ b/include/arch/aarch32/el3_common_macros.S
@@ -277,10 +277,6 @@
 	cps	#MODE32_mon
 	isb
 
-#if DISABLE_MTPMU
-	bl	mtpmu_disable
-#endif
-
 	.if \_warm_boot_mailbox
 		/* -------------------------------------------------------------
 		 * This code will be executed for both warm and cold resets.
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index f3bccc4..5dbcd0a 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -221,6 +221,12 @@
 #define ID_AA64DFR0_TRACEFILT_MASK	U(0xf)
 #define ID_AA64DFR0_TRACEFILT_SUPPORTED	U(1)
 #define ID_AA64DFR0_TRACEFILT_LENGTH	U(4)
+#define ID_AA64DFR0_PMUVER_LENGTH	U(4)
+#define ID_AA64DFR0_PMUVER_SHIFT	U(8)
+#define ID_AA64DFR0_PMUVER_MASK		U(0xf)
+#define ID_AA64DFR0_PMUVER_PMUV3	U(1)
+#define ID_AA64DFR0_PMUVER_PMUV3P7	U(7)
+#define ID_AA64DFR0_PMUVER_IMP_DEF	U(0xf)
 
 /* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
 #define ID_AA64DFR0_PMS_SHIFT		U(32)
@@ -237,6 +243,7 @@
 #define ID_AA64DFR0_MTPMU_SHIFT		U(48)
 #define ID_AA64DFR0_MTPMU_MASK		ULL(0xf)
 #define ID_AA64DFR0_MTPMU_SUPPORTED	ULL(1)
+#define ID_AA64DFR0_MTPMU_DISABLED	ULL(15)
 
 /* ID_AA64DFR0_EL1.BRBE definitions */
 #define ID_AA64DFR0_BRBE_SHIFT		U(52)
@@ -595,16 +602,16 @@
 #define MDCR_TDOSA_BIT		(ULL(1) << 10)
 #define MDCR_TDA_BIT		(ULL(1) << 9)
 #define MDCR_TPM_BIT		(ULL(1) << 6)
-#define MDCR_EL3_RESET_VAL	ULL(0x0)
+#define MDCR_EL3_RESET_VAL	MDCR_MTPME_BIT
 
 /* MDCR_EL2 definitions */
 #define MDCR_EL2_MTPME		(U(1) << 28)
-#define MDCR_EL2_HLP		(U(1) << 26)
+#define MDCR_EL2_HLP_BIT	(U(1) << 26)
 #define MDCR_EL2_E2TB(x)	((x) << 24)
 #define MDCR_EL2_E2TB_EL1	U(0x3)
-#define MDCR_EL2_HCCD		(U(1) << 23)
+#define MDCR_EL2_HCCD_BIT	(U(1) << 23)
 #define MDCR_EL2_TTRF		(U(1) << 19)
-#define MDCR_EL2_HPMD		(U(1) << 17)
+#define MDCR_EL2_HPMD_BIT	(U(1) << 17)
 #define MDCR_EL2_TPMS		(U(1) << 14)
 #define MDCR_EL2_E2PB(x)	((x) << 12)
 #define MDCR_EL2_E2PB_EL1	U(0x3)
@@ -615,6 +622,7 @@
 #define MDCR_EL2_HPME_BIT	(U(1) << 7)
 #define MDCR_EL2_TPM_BIT	(U(1) << 6)
 #define MDCR_EL2_TPMCR_BIT	(U(1) << 5)
+#define MDCR_EL2_HPMN_MASK	U(0x1f)
 #define MDCR_EL2_RESET_VAL	U(0x0)
 
 /* HSTR_EL2 definitions */
diff --git a/include/arch/aarch64/arch_features.h b/include/arch/aarch64/arch_features.h
index 609a95b..9d71987 100644
--- a/include/arch/aarch64/arch_features.h
+++ b/include/arch/aarch64/arch_features.h
@@ -639,6 +639,7 @@
 	return read_feat_trbe_id_field() != 0U;
 
 }
+
 /*******************************************************************************
  * Function to identify the presence of FEAT_SMEx (Scalar Matrix Extension)
  ******************************************************************************/
@@ -699,4 +700,29 @@
 			     ID_AA64MMFR0_EL1_TGRAN64);
 }
 
+static inline unsigned int read_feat_pmuv3_id_field(void)
+{
+	return ISOLATE_FIELD(read_id_aa64dfr0_el1(), ID_AA64DFR0_PMUVER);
+}
+
+static inline unsigned int read_feat_mtpmu_id_field(void)
+{
+	return ISOLATE_FIELD(read_id_aa64dfr0_el1(), ID_AA64DFR0_MTPMU);
+}
+
+static inline bool is_feat_mtpmu_supported(void)
+{
+	if (DISABLE_MTPMU == FEAT_STATE_DISABLED) {
+		return false;
+	}
+
+	if (DISABLE_MTPMU == FEAT_STATE_ALWAYS) {
+		return true;
+	}
+
+	unsigned int mtpmu = read_feat_mtpmu_id_field();
+
+	return (mtpmu != 0U) && (mtpmu != ID_AA64DFR0_MTPMU_DISABLED);
+}
+
 #endif /* ARCH_FEATURES_H */
diff --git a/include/arch/aarch64/el2_common_macros.S b/include/arch/aarch64/el2_common_macros.S
index dcaea3d..9609c0d 100644
--- a/include/arch/aarch64/el2_common_macros.S
+++ b/include/arch/aarch64/el2_common_macros.S
@@ -103,7 +103,7 @@
 	 */
 	mov_imm	x0, ((MDCR_EL2_RESET_VAL | \
 		      MDCR_SPD32(MDCR_SPD32_DISABLE)) \
-		      & ~(MDCR_EL2_HPMD | MDCR_TDOSA_BIT | \
+		      & ~(MDCR_EL2_HPMD_BIT | MDCR_TDOSA_BIT | \
 		      MDCR_TDA_BIT | MDCR_TPM_BIT))
 
 	msr	mdcr_el2, x0
@@ -244,10 +244,6 @@
 		isb
 	.endif /* _init_sctlr */
 
-#if DISABLE_MTPMU
-		bl	mtpmu_disable
-#endif
-
 	.if \_warm_boot_mailbox
 		/* -------------------------------------------------------------
 		 * This code will be executed for both warm and cold resets.
diff --git a/include/arch/aarch64/el3_common_macros.S b/include/arch/aarch64/el3_common_macros.S
index 2dee07d..6360461 100644
--- a/include/arch/aarch64/el3_common_macros.S
+++ b/include/arch/aarch64/el3_common_macros.S
@@ -119,22 +119,6 @@
 	 * MDCR_EL3.TPM: Set to zero so that EL0, EL1, and EL2 System register
 	 *  accesses to all Performance Monitors registers do not trap to EL3.
 	 *
-	 * MDCR_EL3.SCCD: Set to one so that cycle counting by PMCCNTR_EL0 is
-	 *  prohibited in Secure state. This bit is RES0 in versions of the
-	 *  architecture with FEAT_PMUv3p5 not implemented, setting it to 1
-	 *  doesn't have any effect on them.
-	 *
-	 * MDCR_EL3.MCCD: Set to one so that cycle counting by PMCCNTR_EL0 is
-	 *  prohibited in EL3. This bit is RES0 in versions of the
-	 *  architecture with FEAT_PMUv3p7 not implemented, setting it to 1
-	 *  doesn't have any effect on them.
-	 *
-	 * MDCR_EL3.SPME: Set to zero so that event counting by the programmable
-	 *  counters PMEVCNTR<n>_EL0 is prohibited in Secure state. If ARMv8.2
-	 *  Debug is not implemented this bit does not have any effect on the
-	 *  counters unless there is support for the implementation defined
-	 *  authentication interface ExternalSecureNoninvasiveDebugEnabled().
-	 *
 	 * MDCR_EL3.NSTB, MDCR_EL3.NSTBE: Set to zero so that Trace Buffer
 	 *  owning security state is Secure state. If FEAT_TRBE is implemented,
 	 *  accesses to Trace Buffer control registers at EL2 and EL1 in any
@@ -149,10 +133,9 @@
 	 * ---------------------------------------------------------------------
 	 */
 	mov_imm	x0, ((MDCR_EL3_RESET_VAL | MDCR_SDD_BIT | \
-		      MDCR_SPD32(MDCR_SPD32_DISABLE) | MDCR_SCCD_BIT | \
-		      MDCR_MCCD_BIT) & ~(MDCR_SPME_BIT | MDCR_TDOSA_BIT | \
-		      MDCR_TDA_BIT | MDCR_TPM_BIT | MDCR_NSTB(MDCR_NSTB_EL1) | \
-		      MDCR_NSTBE | MDCR_TTRF_BIT))
+		      MDCR_SPD32(MDCR_SPD32_DISABLE)) & \
+		    ~(MDCR_TDOSA_BIT | MDCR_TDA_BIT | MDCR_TPM_BIT | \
+		      MDCR_NSTB(MDCR_NSTB_EL1) | MDCR_NSTBE | MDCR_TTRF_BIT))
 
 	mrs	x1, id_aa64dfr0_el1
 	ubfx	x1, x1, #ID_AA64DFR0_TRACEFILT_SHIFT, #ID_AA64DFR0_TRACEFILT_LENGTH
@@ -162,36 +145,6 @@
 	msr	mdcr_el3, x0
 
 	/* ---------------------------------------------------------------------
-	 * Initialise PMCR_EL0 setting all fields rather than relying
-	 * on hw. Some fields are architecturally UNKNOWN on reset.
-	 *
-	 * PMCR_EL0.LP: Set to one so that event counter overflow, that
-	 *  is recorded in PMOVSCLR_EL0[0-30], occurs on the increment
-	 *  that changes PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU
-	 *  is implemented. This bit is RES0 in versions of the architecture
-	 *  earlier than ARMv8.5, setting it to 1 doesn't have any effect
-	 *  on them.
-	 *
-	 * PMCR_EL0.LC: Set to one so that cycle counter overflow, that
-	 *  is recorded in PMOVSCLR_EL0[31], occurs on the increment
-	 *  that changes PMCCNTR_EL0[63] from 1 to 0.
-	 *
-	 * PMCR_EL0.DP: Set to one so that the cycle counter,
-	 *  PMCCNTR_EL0 does not count when event counting is prohibited.
-	 *
-	 * PMCR_EL0.X: Set to zero to disable export of events.
-	 *
-	 * PMCR_EL0.D: Set to zero so that, when enabled, PMCCNTR_EL0
-	 *  counts on every clock cycle.
-	 * ---------------------------------------------------------------------
-	 */
-	mov_imm	x0, ((PMCR_EL0_RESET_VAL | PMCR_EL0_LP_BIT | \
-		      PMCR_EL0_LC_BIT | PMCR_EL0_DP_BIT) & \
-		    ~(PMCR_EL0_X_BIT | PMCR_EL0_D_BIT))
-
-	msr	pmcr_el0, x0
-
-	/* ---------------------------------------------------------------------
 	 * Enable External Aborts and SError Interrupts now that the exception
 	 * vectors have been setup.
 	 * ---------------------------------------------------------------------
@@ -340,10 +293,6 @@
 		isb
 	.endif /* _init_sctlr */
 
-#if DISABLE_MTPMU
-		bl	mtpmu_disable
-#endif
-
 	.if \_warm_boot_mailbox
 		/* -------------------------------------------------------------
 		 * This code will be executed for both warm and cold resets.
diff --git a/include/common/fdt_wrappers.h b/include/common/fdt_wrappers.h
index b16510f..abbf976 100644
--- a/include/common/fdt_wrappers.h
+++ b/include/common/fdt_wrappers.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -49,7 +49,7 @@
 
 static inline uint32_t fdt_blob_size(const void *dtb)
 {
-	const uint32_t *dtb_header = dtb;
+	const uint32_t *dtb_header = (const uint32_t *)dtb;
 
 	return fdt32_to_cpu(dtb_header[1]);
 }
@@ -60,7 +60,8 @@
 	const void *prop = fdt_getprop(fdt, node, "status", &len);
 
 	/* A non-existing status property means the device is enabled. */
-	return (prop == NULL) || (len == 5 && strcmp(prop, "okay") == 0);
+	return (prop == NULL) || (len == 5 && strcmp((const char *)prop,
+		"okay") == 0);
 }
 
 #define fdt_for_each_compatible_node(dtb, node, compatible_str)       \
diff --git a/include/lib/cpus/aarch64/cortex_a520.h b/include/lib/cpus/aarch64/cortex_a520.h
new file mode 100644
index 0000000..4176981
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a520.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A520_H
+#define CORTEX_A520_H
+
+#define CORTEX_A520_MIDR					U(0x410FD800)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A520_CPUECTLR_EL1				S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A520_CPUPWRCTLR_EL1				S3_0_C15_C2_7
+#define CORTEX_A520_CPUPWRCTLR_EL1_CORE_PWRDN_BIT		U(1)
+
+#endif /* CORTEX_A520_H */
diff --git a/include/lib/cpus/aarch64/cortex_a720.h b/include/lib/cpus/aarch64/cortex_a720.h
new file mode 100644
index 0000000..47bbbc0
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a720.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A720_H
+#define CORTEX_A720_H
+
+#define CORTEX_A720_MIDR					U(0x410FD810)
+
+/* Cortex A720 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A720_BHB_LOOP_COUNT				U(132)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A720_CPUECTLR_EL1				S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A720_CPUPWRCTLR_EL1				S3_0_C15_C2_7
+#define CORTEX_A720_CPUPWRCTLR_EL1_CORE_PWRDN_BIT		U(1)
+
+#endif /* CORTEX_A720_H */
diff --git a/include/lib/cpus/aarch64/cortex_hayes.h b/include/lib/cpus/aarch64/cortex_hayes.h
deleted file mode 100644
index 82022e9..0000000
--- a/include/lib/cpus/aarch64/cortex_hayes.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef CORTEX_HAYES_H
-#define CORTEX_HAYES_H
-
-#define CORTEX_HAYES_MIDR					U(0x410FD800)
-
-/*******************************************************************************
- * CPU Extended Control register specific definitions
- ******************************************************************************/
-#define CORTEX_HAYES_CPUECTLR_EL1				S3_0_C15_C1_4
-
-/*******************************************************************************
- * CPU Power Control register specific definitions
- ******************************************************************************/
-#define CORTEX_HAYES_CPUPWRCTLR_EL1				S3_0_C15_C2_7
-#define CORTEX_HAYES_CPUPWRCTLR_EL1_CORE_PWRDN_BIT		U(1)
-
-#endif /* CORTEX_HAYES_H */
diff --git a/include/lib/cpus/aarch64/cortex_hunter.h b/include/lib/cpus/aarch64/cortex_hunter.h
deleted file mode 100644
index 24bd217..0000000
--- a/include/lib/cpus/aarch64/cortex_hunter.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef CORTEX_HUNTER_H
-#define CORTEX_HUNTER_H
-
-#define CORTEX_HUNTER_MIDR					U(0x410FD810)
-
-/* Cortex Hunter loop count for CVE-2022-23960 mitigation */
-#define CORTEX_HUNTER_BHB_LOOP_COUNT				U(132)
-
-/*******************************************************************************
- * CPU Extended Control register specific definitions
- ******************************************************************************/
-#define CORTEX_HUNTER_CPUECTLR_EL1				S3_0_C15_C1_4
-
-/*******************************************************************************
- * CPU Power Control register specific definitions
- ******************************************************************************/
-#define CORTEX_HUNTER_CPUPWRCTLR_EL1				S3_0_C15_C2_7
-#define CORTEX_HUNTER_CPUPWRCTLR_EL1_CORE_PWRDN_BIT		U(1)
-
-#endif /* CORTEX_HUNTER_H */
diff --git a/include/lib/cpus/aarch64/cortex_hunter_elp_arm.h b/include/lib/cpus/aarch64/cortex_hunter_elp_arm.h
deleted file mode 100644
index f9bb0f3..0000000
--- a/include/lib/cpus/aarch64/cortex_hunter_elp_arm.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (c) 2022, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef CORTEX_HUNTER_ELP_ARM_H
-#define CORTEX_HUNTER_ELP_ARM_H
-
-#define CORTEX_HUNTER_ELP_ARM_MIDR					U(0x410FD821)
-
-/* Cortex Hunter ELP loop count for CVE-2022-23960 mitigation */
-#define CORTEX_HUNTER_ELP_ARM_BHB_LOOP_COUNT				U(132)
-
-/*******************************************************************************
- * CPU Extended Control register specific definitions
- ******************************************************************************/
-#define CORTEX_HUNTER_ELP_ARM_CPUECTLR_EL1				S3_0_C15_C1_4
-
-/*******************************************************************************
- * CPU Power Control register specific definitions
- ******************************************************************************/
-#define CORTEX_HUNTER_ELP_ARM_CPUPWRCTLR_EL1				S3_0_C15_C2_7
-#define CORTEX_HUNTER_ELP_ARM_CPUPWRCTLR_EL1_CORE_PWRDN_BIT		U(1)
-
-#endif /* CORTEX_HUNTER_ELP_ARM_H */
diff --git a/include/lib/cpus/aarch64/cortex_x4.h b/include/lib/cpus/aarch64/cortex_x4.h
new file mode 100644
index 0000000..17d07c8
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_x4.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_X4_H
+#define CORTEX_X4_H
+
+#define CORTEX_X4_MIDR					U(0x410FD821)
+
+/* Cortex X4 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_X4_BHB_LOOP_COUNT			U(132)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_X4_CPUECTLR_EL1				S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_X4_CPUPWRCTLR_EL1			S3_0_C15_C2_7
+#define CORTEX_X4_CPUPWRCTLR_EL1_CORE_PWRDN_BIT		U(1)
+
+#endif /* CORTEX_X4_H */
diff --git a/include/lib/cpus/aarch64/neoverse_hermes.h b/include/lib/cpus/aarch64/neoverse_hermes.h
new file mode 100644
index 0000000..22492c3
--- /dev/null
+++ b/include/lib/cpus/aarch64/neoverse_hermes.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef NEOVERSE_HERMES_H
+#define NEOVERSE_HERMES_H
+
+#define NEOVERSE_HERMES_MIDR				U(0x410FD8E0)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define NEOVERSE_HERMES_CPUECTLR_EL1			S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define NEOVERSE_HERMES_CPUPWRCTLR_EL1			S3_0_C15_C2_7
+#define NEOVERSE_HERMES_CPUPWRCTLR_EL1_CORE_PWRDN_BIT	U(1)
+
+#endif /* NEOVERSE_HERMES_H */
diff --git a/include/lib/el3_runtime/context_mgmt.h b/include/lib/el3_runtime/context_mgmt.h
index 1a76d8e..aa76f3b 100644
--- a/include/lib/el3_runtime/context_mgmt.h
+++ b/include/lib/el3_runtime/context_mgmt.h
@@ -37,6 +37,9 @@
 void cm_prepare_el3_exit_ns(void);
 
 #ifdef __aarch64__
+#if IMAGE_BL31
+void cm_manage_extensions_el3(void);
+#endif
 #if CTX_INCLUDE_EL2_REGS
 void cm_el2_sysregs_context_save(uint32_t security_state);
 void cm_el2_sysregs_context_restore(uint32_t security_state);
@@ -84,6 +87,7 @@
 #else
 void *cm_get_next_context(void);
 void cm_set_next_context(void *context);
+static inline void cm_manage_extensions_el3(void) {}
 #endif /* __aarch64__ */
 
 #endif /* CONTEXT_MGMT_H */
diff --git a/include/lib/extensions/pmuv3.h b/include/lib/extensions/pmuv3.h
new file mode 100644
index 0000000..5d5d055
--- /dev/null
+++ b/include/lib/extensions/pmuv3.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PMUV3_H
+#define PMUV3_H
+
+#include <context.h>
+
+void pmuv3_disable_el3(void);
+
+#ifdef __aarch64__
+void pmuv3_enable(cpu_context_t *ctx);
+void pmuv3_init_el2_unused(void);
+#endif /* __aarch64__ */
+
+#endif /* PMUV3_H */
diff --git a/include/plat/arm/common/arm_def.h b/include/plat/arm/common/arm_def.h
index 6be6ffd..bf1f93a 100644
--- a/include/plat/arm/common/arm_def.h
+++ b/include/plat/arm/common/arm_def.h
@@ -539,7 +539,8 @@
  * Define limit of firmware configuration memory:
  * ARM_FW_CONFIG + ARM_BL2_MEM_DESC memory
  */
-#define ARM_FW_CONFIGS_LIMIT		(ARM_BL_RAM_BASE + (PAGE_SIZE * 2))
+#define ARM_FW_CONFIGS_SIZE		(PAGE_SIZE * 2)
+#define ARM_FW_CONFIGS_LIMIT		(ARM_BL_RAM_BASE + ARM_FW_CONFIGS_SIZE)
 
 #if ENABLE_RME
 /*
diff --git a/lib/cpus/aarch32/cortex_a72.S b/lib/cpus/aarch32/cortex_a72.S
index 03914b2..77cf84d 100644
--- a/lib/cpus/aarch32/cortex_a72.S
+++ b/lib/cpus/aarch32/cortex_a72.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -87,11 +87,15 @@
 	b		cpu_rev_var_ls
 endfunc check_errata_859971
 
+add_erratum_entry cortex_a72, ERRATUM(859971), ERRATA_A72_859971
+
 func check_errata_cve_2017_5715
 	mov	r0, #ERRATA_MISSING
 	bx	lr
 endfunc check_errata_cve_2017_5715
 
+add_erratum_entry cortex_a72, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
+
 func check_errata_cve_2018_3639
 #if WORKAROUND_CVE_2018_3639
 	mov	r0, #ERRATA_APPLIES
@@ -101,11 +105,15 @@
 	bx	lr
 endfunc check_errata_cve_2018_3639
 
+add_erratum_entry cortex_a72, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
+
 func check_errata_cve_2022_23960
 	mov	r0, #ERRATA_MISSING
 	bx	lr
 endfunc check_errata_cve_2022_23960
 
+add_erratum_entry cortex_a72, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A72.
 	 * -------------------------------------------------
@@ -248,29 +256,7 @@
 	b	cortex_a72_disable_ext_debug
 endfunc cortex_a72_cluster_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for Cortex A72. Must follow AAPCS.
- */
-func cortex_a72_errata_report
-	push	{r12, lr}
-
-	bl	cpu_get_rev_var
-	mov	r4, r0
-
-	/*
-	 * Report all errata. The revision-variant information is passed to
-	 * checking functions of each errata.
-	 */
-	report_errata ERRATA_A72_859971, cortex_a72, 859971
-	report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715
-	report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639
-	report_errata WORKAROUND_CVE_2022_23960, cortex_a72, cve_2022_23960
-
-	pop	{r12, lr}
-	bx	lr
-endfunc cortex_a72_errata_report
-#endif
+errata_report_shim cortex_a72
 
 declare_cpu_ops cortex_a72, CORTEX_A72_MIDR, \
 	cortex_a72_reset_func, \
diff --git a/lib/cpus/aarch64/cortex_a520.S b/lib/cpus/aarch64/cortex_a520.S
new file mode 100644
index 0000000..5bbe862
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a520.S
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a520.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex A520 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex A520 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+	/* ----------------------------------------------------
+	 * HW will do the cache maintenance while powering down
+	 * ----------------------------------------------------
+	 */
+func cortex_a520_core_pwr_dwn
+	/* ---------------------------------------------------
+	 * Enable CPU power down bit in power control register
+	 * ---------------------------------------------------
+	 */
+	mrs	x0, CORTEX_A520_CPUPWRCTLR_EL1
+	orr	x0, x0, #CORTEX_A520_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+	msr	CORTEX_A520_CPUPWRCTLR_EL1, x0
+	isb
+	ret
+endfunc cortex_a520_core_pwr_dwn
+
+	/*
+	 * Errata printing function for Cortex A520. Must follow AAPCS.
+	 */
+#if REPORT_ERRATA
+func cortex_a520_errata_report
+	ret
+endfunc cortex_a520_errata_report
+#endif
+
+func cortex_a520_reset_func
+	/* Disable speculative loads */
+	msr	SSBS, xzr
+	isb
+	ret
+endfunc cortex_a520_reset_func
+
+	/* ---------------------------------------------
+	 * This function provides Cortex A520 specific
+	 * register information for crash reporting.
+	 * It needs to return with x6 pointing to
+	 * a list of register names in ascii and
+	 * x8 - x15 having values of registers to be
+	 * reported.
+	 * ---------------------------------------------
+	 */
+.section .rodata.cortex_a520_regs, "aS"
+cortex_a520_regs:  /* The ascii list of register names to be reported */
+	.asciz	"cpuectlr_el1", ""
+
+func cortex_a520_cpu_reg_dump
+	adr	x6, cortex_a520_regs
+	mrs	x8, CORTEX_A520_CPUECTLR_EL1
+	ret
+endfunc cortex_a520_cpu_reg_dump
+
+declare_cpu_ops cortex_a520, CORTEX_A520_MIDR, \
+	cortex_a520_reset_func, \
+	cortex_a520_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a720.S b/lib/cpus/aarch64/cortex_a720.S
new file mode 100644
index 0000000..529ab50
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a720.S
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a720.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex A720 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex A720 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+        wa_cve_2022_23960_bhb_vector_table CORTEX_A720_BHB_LOOP_COUNT, cortex_a720
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
+func cortex_a720_reset_func
+	/* Disable speculative loads */
+	msr	SSBS, xzr
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex A720 generic vectors are overridden to apply errata
+	 * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_cortex_a720
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+	isb
+	ret
+endfunc cortex_a720_reset_func
+
+	/* ----------------------------------------------------
+	 * HW will do the cache maintenance while powering down
+	 * ----------------------------------------------------
+	 */
+func cortex_a720_core_pwr_dwn
+	/* ---------------------------------------------------
+	 * Enable CPU power down bit in power control register
+	 * ---------------------------------------------------
+	 */
+	mrs	x0, CORTEX_A720_CPUPWRCTLR_EL1
+	orr	x0, x0, #CORTEX_A720_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+	msr	CORTEX_A720_CPUPWRCTLR_EL1, x0
+	isb
+	ret
+endfunc cortex_a720_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A720. Must follow AAPCS.
+ */
+func cortex_a720_errata_report
+	stp	x8, x30, [sp, #-16]!
+
+	bl	cpu_get_rev_var
+	mov	x8, x0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a720, cve_2022_23960
+
+	ldp	x8, x30, [sp], #16
+	ret
+endfunc cortex_a720_errata_report
+#endif
+
+	/* ---------------------------------------------
+	 * This function provides Cortex A720-specific
+	 * register information for crash reporting.
+	 * It needs to return with x6 pointing to
+	 * a list of register names in ascii and
+	 * x8 - x15 having values of registers to be
+	 * reported.
+	 * ---------------------------------------------
+	 */
+.section .rodata.cortex_a720_regs, "aS"
+cortex_a720_regs:  /* The ascii list of register names to be reported */
+	.asciz	"cpuectlr_el1", ""
+
+func cortex_a720_cpu_reg_dump
+	adr	x6, cortex_a720_regs
+	mrs	x8, CORTEX_A720_CPUECTLR_EL1
+	ret
+endfunc cortex_a720_cpu_reg_dump
+
+declare_cpu_ops cortex_a720, CORTEX_A720_MIDR, \
+	cortex_a720_reset_func, \
+	cortex_a720_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_hayes.S b/lib/cpus/aarch64/cortex_hayes.S
deleted file mode 100644
index 445a691..0000000
--- a/lib/cpus/aarch64/cortex_hayes.S
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-#include <common/bl_common.h>
-#include <cortex_hayes.h>
-#include <cpu_macros.S>
-#include <plat_macros.S>
-
-/* Hardware handled coherency */
-#if HW_ASSISTED_COHERENCY == 0
-#error "Cortex Hayes must be compiled with HW_ASSISTED_COHERENCY enabled"
-#endif
-
-/* 64-bit only core */
-#if CTX_INCLUDE_AARCH32_REGS == 1
-#error "Cortex Hayes supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
-#endif
-
-	/* ----------------------------------------------------
-	 * HW will do the cache maintenance while powering down
-	 * ----------------------------------------------------
-	 */
-func cortex_hayes_core_pwr_dwn
-	/* ---------------------------------------------------
-	 * Enable CPU power down bit in power control register
-	 * ---------------------------------------------------
-	 */
-	mrs	x0, CORTEX_HAYES_CPUPWRCTLR_EL1
-	orr	x0, x0, #CORTEX_HAYES_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
-	msr	CORTEX_HAYES_CPUPWRCTLR_EL1, x0
-	isb
-	ret
-endfunc cortex_hayes_core_pwr_dwn
-
-	/*
-	 * Errata printing function for Cortex Hayes. Must follow AAPCS.
-	 */
-#if REPORT_ERRATA
-func cortex_hayes_errata_report
-	ret
-endfunc cortex_hayes_errata_report
-#endif
-
-func cortex_hayes_reset_func
-	/* Disable speculative loads */
-	msr	SSBS, xzr
-	isb
-	ret
-endfunc cortex_hayes_reset_func
-
-	/* ---------------------------------------------
-	 * This function provides Cortex Hayes specific
-	 * register information for crash reporting.
-	 * It needs to return with x6 pointing to
-	 * a list of register names in ascii and
-	 * x8 - x15 having values of registers to be
-	 * reported.
-	 * ---------------------------------------------
-	 */
-.section .rodata.cortex_hayes_regs, "aS"
-cortex_hayes_regs:  /* The ascii list of register names to be reported */
-	.asciz	"cpuectlr_el1", ""
-
-func cortex_hayes_cpu_reg_dump
-	adr	x6, cortex_hayes_regs
-	mrs	x8, CORTEX_HAYES_CPUECTLR_EL1
-	ret
-endfunc cortex_hayes_cpu_reg_dump
-
-declare_cpu_ops cortex_hayes, CORTEX_HAYES_MIDR, \
-	cortex_hayes_reset_func, \
-	cortex_hayes_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_hunter.S b/lib/cpus/aarch64/cortex_hunter.S
deleted file mode 100644
index 973637e..0000000
--- a/lib/cpus/aarch64/cortex_hunter.S
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-#include <common/bl_common.h>
-#include <cortex_hunter.h>
-#include <cpu_macros.S>
-#include <plat_macros.S>
-#include "wa_cve_2022_23960_bhb_vector.S"
-
-/* Hardware handled coherency */
-#if HW_ASSISTED_COHERENCY == 0
-#error "Cortex Hunter must be compiled with HW_ASSISTED_COHERENCY enabled"
-#endif
-
-/* 64-bit only core */
-#if CTX_INCLUDE_AARCH32_REGS == 1
-#error "Cortex Hunter supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
-#endif
-
-#if WORKAROUND_CVE_2022_23960
-        wa_cve_2022_23960_bhb_vector_table CORTEX_HUNTER_BHB_LOOP_COUNT, cortex_hunter
-#endif /* WORKAROUND_CVE_2022_23960 */
-
-func check_errata_cve_2022_23960
-#if WORKAROUND_CVE_2022_23960
-	mov	x0, #ERRATA_APPLIES
-#else
-	mov	x0, #ERRATA_MISSING
-#endif
-	ret
-endfunc check_errata_cve_2022_23960
-
-func cortex_hunter_reset_func
-	/* Disable speculative loads */
-	msr	SSBS, xzr
-
-#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
-	/*
-	 * The Cortex Hunter generic vectors are overridden to apply errata
-	 * mitigation on exception entry from lower ELs.
-	 */
-	adr	x0, wa_cve_vbar_cortex_hunter
-	msr	vbar_el3, x0
-#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
-
-	isb
-	ret
-endfunc cortex_hunter_reset_func
-
-	/* ----------------------------------------------------
-	 * HW will do the cache maintenance while powering down
-	 * ----------------------------------------------------
-	 */
-func cortex_hunter_core_pwr_dwn
-	/* ---------------------------------------------------
-	 * Enable CPU power down bit in power control register
-	 * ---------------------------------------------------
-	 */
-	mrs	x0, CORTEX_HUNTER_CPUPWRCTLR_EL1
-	orr	x0, x0, #CORTEX_HUNTER_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
-	msr	CORTEX_HUNTER_CPUPWRCTLR_EL1, x0
-	isb
-	ret
-endfunc cortex_hunter_core_pwr_dwn
-
-#if REPORT_ERRATA
-/*
- * Errata printing function for Cortex Hunter. Must follow AAPCS.
- */
-func cortex_hunter_errata_report
-	stp	x8, x30, [sp, #-16]!
-
-	bl	cpu_get_rev_var
-	mov	x8, x0
-
-	/*
-	 * Report all errata. The revision-variant information is passed to
-	 * checking functions of each errata.
-	 */
-	report_errata WORKAROUND_CVE_2022_23960, cortex_hunter, cve_2022_23960
-
-	ldp	x8, x30, [sp], #16
-	ret
-endfunc cortex_hunter_errata_report
-#endif
-
-	/* ---------------------------------------------
-	 * This function provides Cortex Hunter-specific
-	 * register information for crash reporting.
-	 * It needs to return with x6 pointing to
-	 * a list of register names in ascii and
-	 * x8 - x15 having values of registers to be
-	 * reported.
-	 * ---------------------------------------------
-	 */
-.section .rodata.cortex_hunter_regs, "aS"
-cortex_hunter_regs:  /* The ascii list of register names to be reported */
-	.asciz	"cpuectlr_el1", ""
-
-func cortex_hunter_cpu_reg_dump
-	adr	x6, cortex_hunter_regs
-	mrs	x8, CORTEX_HUNTER_CPUECTLR_EL1
-	ret
-endfunc cortex_hunter_cpu_reg_dump
-
-declare_cpu_ops cortex_hunter, CORTEX_HUNTER_MIDR, \
-	cortex_hunter_reset_func, \
-	cortex_hunter_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_hunter_elp_arm.S b/lib/cpus/aarch64/cortex_hunter_elp_arm.S
deleted file mode 100644
index 5f86d4e..0000000
--- a/lib/cpus/aarch64/cortex_hunter_elp_arm.S
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (c) 2022, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-#include <common/bl_common.h>
-#include <cortex_hunter_elp_arm.h>
-#include <cpu_macros.S>
-#include <plat_macros.S>
-#include "wa_cve_2022_23960_bhb_vector.S"
-
-/* Hardware handled coherency */
-#if HW_ASSISTED_COHERENCY == 0
-#error "Cortex Hunter ELP must be compiled with HW_ASSISTED_COHERENCY enabled"
-#endif
-
-/* 64-bit only core */
-#if CTX_INCLUDE_AARCH32_REGS == 1
-#error "Cortex Hunter ELP supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
-#endif
-
-#if WORKAROUND_CVE_2022_23960
-        wa_cve_2022_23960_bhb_vector_table CORTEX_HUNTER_ELP_ARM_BHB_LOOP_COUNT, cortex_hunter_elp_arm
-#endif /* WORKAROUND_CVE_2022_23960 */
-
-func check_errata_cve_2022_23960
-#if WORKAROUND_CVE_2022_23960
-	mov	x0, #ERRATA_APPLIES
-#else
-	mov	x0, #ERRATA_MISSING
-#endif
-	ret
-endfunc check_errata_cve_2022_23960
-
-func cortex_hunter_elp_arm_reset_func
-	/* Disable speculative loads */
-	msr	SSBS, xzr
-
-#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
-	/*
-	 * The Cortex Hunter ELP generic vectors are overridden to apply errata
-	 * mitigation on exception entry from lower ELs.
-	 */
-	adr	x0, wa_cve_vbar_cortex_hunter_elp_arm
-	msr	vbar_el3, x0
-#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
-
-	isb
-	ret
-endfunc cortex_hunter_elp_arm_reset_func
-
-	/* ----------------------------------------------------
-	 * HW will do the cache maintenance while powering down
-	 * ----------------------------------------------------
-	 */
-func cortex_hunter_elp_arm_core_pwr_dwn
-	/* ---------------------------------------------------
-	 * Enable CPU power down bit in power control register
-	 * ---------------------------------------------------
-	 */
-	mrs	x0, CORTEX_HUNTER_ELP_ARM_CPUPWRCTLR_EL1
-	orr	x0, x0, #CORTEX_HUNTER_ELP_ARM_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
-	msr	CORTEX_HUNTER_ELP_ARM_CPUPWRCTLR_EL1, x0
-	isb
-	ret
-endfunc cortex_hunter_elp_arm_core_pwr_dwn
-
-#if REPORT_ERRATA
-/*
- * Errata printing function for Cortex Hunter ELP. Must follow AAPCS.
- */
-func cortex_hunter_elp_arm_errata_report
-	stp	x8, x30, [sp, #-16]!
-
-	bl	cpu_get_rev_var
-	mov	x8, x0
-
-	/*
-	 * Report all errata. The revision-variant information is passed to
-	 * checking functions of each errata.
-	 */
-	report_errata WORKAROUND_CVE_2022_23960, cortex_hunter_elp_arm, cve_2022_23960
-
-	ldp	x8, x30, [sp], #16
-	ret
-endfunc cortex_hunter_elp_arm_errata_report
-#endif
-
-	/* ---------------------------------------------
-	 * This function provides Cortex Hunter ELP-specific
-	 * register information for crash reporting.
-	 * It needs to return with x6 pointing to
-	 * a list of register names in ascii and
-	 * x8 - x15 having values of registers to be
-	 * reported.
-	 * ---------------------------------------------
-	 */
-.section .rodata.cortex_hunter_elp_arm_regs, "aS"
-cortex_hunter_elp_arm_regs:  /* The ascii list of register names to be reported */
-	.asciz	"cpuectlr_el1", ""
-
-func cortex_hunter_elp_arm_cpu_reg_dump
-	adr	x6, cortex_hunter_elp_arm_regs
-	mrs	x8, CORTEX_HUNTER_ELP_ARM_CPUECTLR_EL1
-	ret
-endfunc cortex_hunter_elp_arm_cpu_reg_dump
-
-declare_cpu_ops cortex_hunter_elp_arm, CORTEX_HUNTER_ELP_ARM_MIDR, \
-	cortex_hunter_elp_arm_reset_func, \
-	cortex_hunter_elp_arm_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_x4.S b/lib/cpus/aarch64/cortex_x4.S
new file mode 100644
index 0000000..db87008
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_x4.S
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_x4.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex X4 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex X4 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+        wa_cve_2022_23960_bhb_vector_table CORTEX_X4_BHB_LOOP_COUNT, cortex_x4
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
+func cortex_x4_reset_func
+	/* Disable speculative loads */
+	msr	SSBS, xzr
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex X4 generic vectors are overridden to apply errata
+	 * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_cortex_x4
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+	isb
+	ret
+endfunc cortex_x4_reset_func
+
+	/* ----------------------------------------------------
+	 * HW will do the cache maintenance while powering down
+	 * ----------------------------------------------------
+	 */
+func cortex_x4_core_pwr_dwn
+	/* ---------------------------------------------------
+	 * Enable CPU power down bit in power control register
+	 * ---------------------------------------------------
+	 */
+	mrs	x0, CORTEX_X4_CPUPWRCTLR_EL1
+	orr	x0, x0, #CORTEX_X4_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+	msr	CORTEX_X4_CPUPWRCTLR_EL1, x0
+	isb
+	ret
+endfunc cortex_x4_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex X4. Must follow AAPCS.
+ */
+func cortex_x4_errata_report
+	stp	x8, x30, [sp, #-16]!
+
+	bl	cpu_get_rev_var
+	mov	x8, x0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata WORKAROUND_CVE_2022_23960, cortex_x4, cve_2022_23960
+
+	ldp	x8, x30, [sp], #16
+	ret
+endfunc cortex_x4_errata_report
+#endif
+
+	/* ---------------------------------------------
+	 * This function provides Cortex X4-specific
+	 * register information for crash reporting.
+	 * It needs to return with x6 pointing to
+	 * a list of register names in ascii and
+	 * x8 - x15 having values of registers to be
+	 * reported.
+	 * ---------------------------------------------
+	 */
+.section .rodata.cortex_x4_regs, "aS"
+cortex_x4_regs:  /* The ascii list of register names to be reported */
+	.asciz	"cpuectlr_el1", ""
+
+func cortex_x4_cpu_reg_dump
+	adr	x6, cortex_x4_regs
+	mrs	x8, CORTEX_X4_CPUECTLR_EL1
+	ret
+endfunc cortex_x4_cpu_reg_dump
+
+declare_cpu_ops cortex_x4, CORTEX_X4_MIDR, \
+	cortex_x4_reset_func, \
+	cortex_x4_core_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_hermes.S b/lib/cpus/aarch64/neoverse_hermes.S
new file mode 100644
index 0000000..cb90b71
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_hermes.S
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <neoverse_hermes.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Neoverse Hermes must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Neoverse Hermes supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+cpu_reset_func_start neoverse_hermes
+	/* Disable speculative loads */
+	msr	SSBS, xzr
+cpu_reset_func_end neoverse_hermes
+
+	/* ----------------------------------------------------
+	 * HW will do the cache maintenance while powering down
+	 * ----------------------------------------------------
+	 */
+func neoverse_hermes_core_pwr_dwn
+	/* ---------------------------------------------------
+	 * Enable CPU power down bit in power control register
+	 * ---------------------------------------------------
+	 */
+	sysreg_bit_set NEOVERSE_HERMES_CPUPWRCTLR_EL1, NEOVERSE_HERMES_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+	isb
+	ret
+endfunc neoverse_hermes_core_pwr_dwn
+
+errata_report_shim neoverse_hermes
+
+	/* ---------------------------------------------
+	 * This function provides Neoverse Hermes specific
+	 * register information for crash reporting.
+	 * It needs to return with x6 pointing to
+	 * a list of register names in ascii and
+	 * x8 - x15 having values of registers to be
+	 * reported.
+	 * ---------------------------------------------
+	 */
+.section .rodata.neoverse_hermes_regs, "aS"
+neoverse_hermes_regs:  /* The ascii list of register names to be reported */
+	.asciz	"cpuectlr_el1", ""
+
+func neoverse_hermes_cpu_reg_dump
+	adr	x6, neoverse_hermes_regs
+	mrs	x8, NEOVERSE_HERMES_CPUECTLR_EL1
+	ret
+endfunc neoverse_hermes_cpu_reg_dump
+
+declare_cpu_ops neoverse_hermes, NEOVERSE_HERMES_MIDR, \
+	neoverse_hermes_reset_func, \
+	neoverse_hermes_core_pwr_dwn
diff --git a/lib/cpus/aarch64/qemu_max.S b/lib/cpus/aarch64/qemu_max.S
index 8948fda..00963bc 100644
--- a/lib/cpus/aarch64/qemu_max.S
+++ b/lib/cpus/aarch64/qemu_max.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -47,14 +47,7 @@
 	b	dcsw_op_all
 endfunc qemu_max_cluster_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for QEMU "max". Must follow AAPCS.
- */
-func qemu_max_errata_report
-	ret
-endfunc qemu_max_errata_report
-#endif
+errata_report_shim qemu_max
 
 	/* ---------------------------------------------
 	 * This function provides cpu specific
diff --git a/lib/cpus/aarch64/rainier.S b/lib/cpus/aarch64/rainier.S
index 3b7b8b2..c770f54 100644
--- a/lib/cpus/aarch64/rainier.S
+++ b/lib/cpus/aarch64/rainier.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -41,78 +41,30 @@
 	ret
 endfunc rainier_disable_speculative_loads
 
-	/* --------------------------------------------------
-	 * Errata Workaround for Neoverse N1 Errata #1868343.
-	 * This applies to revision <= r4p0 of Neoverse N1.
-	 * This workaround is the same as the workaround for
-	 * errata 1262606 and 1275112 but applies to a wider
-	 * revision range.
-	 * Rainier R0P0 is based on Neoverse N1 R4P0 so the
-	 * workaround checks for r0p0 version of Rainier CPU.
-	 * Inputs:
-	 * x0: variant[4:7] and revision[0:3] of current cpu.
-	 * Shall clobber: x0, x1 & x17
-	 * --------------------------------------------------
-	 */
-func errata_n1_1868343_wa
-	/*
-	 * Compare x0 against revision r4p0
-	 */
-	mov	x17, x30
-	bl	check_errata_1868343
-	cbz	x0, 1f
-	mrs	x1, RAINIER_CPUACTLR_EL1
-	orr	x1, x1, RAINIER_CPUACTLR_EL1_BIT_13
-	msr	RAINIER_CPUACTLR_EL1, x1
-	isb
-1:
-	ret	x17
-endfunc errata_n1_1868343_wa
-
-func check_errata_1868343
-	/* Applies to r0p0 of Rainier CPU */
-	mov	x1, #0x00
-	b	cpu_rev_var_ls
-endfunc check_errata_1868343
+	/* Rainier R0P0 is based on Neoverse N1 R4P0. */
+workaround_reset_start rainier, ERRATUM(1868343), ERRATA_N1_1868343
+	sysreg_bit_set RAINIER_CPUACTLR_EL1, RAINIER_CPUACTLR_EL1_BIT_13
+workaround_reset_end rainier, ERRATUM(1868343)
 
-func rainier_reset_func
-	mov	x19, x30
+check_erratum_ls rainier, ERRATUM(1868343), CPU_REV(0, 0)
 
+cpu_reset_func_start rainier
 	bl	rainier_disable_speculative_loads
-
 	/* Forces all cacheable atomic instructions to be near */
-	mrs	x0, RAINIER_CPUACTLR2_EL1
-	orr	x0, x0, #RAINIER_CPUACTLR2_EL1_BIT_2
-	msr	RAINIER_CPUACTLR2_EL1, x0
-	isb
-
-	bl	cpu_get_rev_var
-	mov	x18, x0
-
-#if ERRATA_N1_1868343
-	mov	x0, x18
-	bl	errata_n1_1868343_wa
-#endif
+	sysreg_bit_set RAINIER_CPUACTLR2_EL1, RAINIER_CPUACTLR2_EL1_BIT_2
 
 #if ENABLE_FEAT_AMU
 	/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
-	mrs	x0, actlr_el3
-	orr	x0, x0, #RAINIER_ACTLR_AMEN_BIT
-	msr	actlr_el3, x0
+	sysreg_bit_set actlr_el3, RAINIER_ACTLR_AMEN_BIT
 
 	/* Make sure accesses from EL0/EL1 are not trapped to EL2 */
-	mrs	x0, actlr_el2
-	orr	x0, x0, #RAINIER_ACTLR_AMEN_BIT
-	msr	actlr_el2, x0
+	sysreg_bit_set actlr_el2, RAINIER_ACTLR_AMEN_BIT
 
 	/* Enable group0 counters */
 	mov	x0, #RAINIER_AMU_GROUP0_MASK
 	msr	CPUAMCNTENSET_EL0, x0
 #endif
-
-	isb
-	ret	x19
-endfunc rainier_reset_func
+cpu_reset_func_end rainier
 
 	/* ---------------------------------------------
 	 * HW will do the cache maintenance while powering down
@@ -123,33 +75,12 @@
 	 * Enable CPU power down bit in power control register
 	 * ---------------------------------------------
 	 */
-	mrs	x0, RAINIER_CPUPWRCTLR_EL1
-	orr	x0, x0, #RAINIER_CORE_PWRDN_EN_MASK
-	msr	RAINIER_CPUPWRCTLR_EL1, x0
+	 sysreg_bit_set RAINIER_CPUPWRCTLR_EL1, RAINIER_CORE_PWRDN_EN_MASK
 	isb
 	ret
 endfunc rainier_core_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for Rainier. Must follow AAPCS.
- */
-func rainier_errata_report
-	stp	x8, x30, [sp, #-16]!
-
-	bl	cpu_get_rev_var
-	mov	x8, x0
-
-	/*
-	 * Report all errata. The revision-variant information is passed to
-	 * checking functions of each errata.
-	 */
-	report_errata ERRATA_N1_1868343, rainier, 1868343
-
-	ldp	x8, x30, [sp], #16
-	ret
-endfunc rainier_errata_report
-#endif
+errata_report_shim rainier
 
 	/* ---------------------------------------------
 	 * This function provides Rainier specific
diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c
index 62e30fc..6414aaa 100644
--- a/lib/el3_runtime/aarch32/context_mgmt.c
+++ b/lib/el3_runtime/aarch32/context_mgmt.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2023, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -17,6 +17,7 @@
 #include <context.h>
 #include <lib/el3_runtime/context_mgmt.h>
 #include <lib/extensions/amu.h>
+#include <lib/extensions/pmuv3.h>
 #include <lib/extensions/sys_reg_trace.h>
 #include <lib/extensions/trf.h>
 #include <lib/utils.h>
@@ -147,6 +148,12 @@
 	if (is_feat_trf_supported()) {
 		trf_enable();
 	}
+
+	/*
+	 * Also applies to PMU < v3. The PMU is only disabled for EL3 and Secure
+	 * state execution. This does not affect lower NS ELs.
+	 */
+	pmuv3_disable_el3();
 #endif
 }
 
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index 9922fb1..771fcdc 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -568,6 +568,8 @@
 	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
 	mrs	x18, sp_el0
 	str	x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
+
+	/* PMUv3 is presumed to be always present */
 	mrs	x9, pmcr_el0
 	str	x9, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
 	/* Disable cycle counter when event counting is prohibited */
@@ -651,6 +653,8 @@
 	msr	APGAKeyLo_EL1, x8
 	msr	APGAKeyHi_EL1, x9
 #endif /* CTX_INCLUDE_PAUTH_REGS */
+
+	/* PMUv3 is presumed to be always present */
 	ldr	x0, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
 	msr	pmcr_el0, x0
 	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index 3760b8f..4a6598a 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -24,6 +24,7 @@
 #include <lib/extensions/amu.h>
 #include <lib/extensions/brbe.h>
 #include <lib/extensions/mpam.h>
+#include <lib/extensions/pmuv3.h>
 #include <lib/extensions/sme.h>
 #include <lib/extensions/spe.h>
 #include <lib/extensions/sve.h>
@@ -37,6 +38,7 @@
 CASSERT(((TWED_DELAY & ~SCR_TWEDEL_MASK) == 0U), assert_twed_delay_value_check);
 #endif /* ENABLE_FEAT_TWED */
 
+static void manage_extensions_nonsecure(cpu_context_t *ctx);
 static void manage_extensions_secure(cpu_context_t *ctx);
 
 static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info *ep)
@@ -265,16 +267,6 @@
 	write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_ICC_SRE_EL2,
 			icc_sre_el2);
 
-	/*
-	 * Initialize MDCR_EL2.HPMN to its hardware reset value so we don't
-	 * throw anyone off who expects this to be sensible.
-	 * TODO: A similar thing happens in cm_prepare_el3_exit. They should be
-	 * unified with the proper PMU implementation
-	 */
-	u_register_t mdcr_el2 = ((read_pmcr_el0() >> PMCR_EL0_N_SHIFT) &
-			PMCR_EL0_N_MASK);
-	write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_MDCR_EL2, mdcr_el2);
-
 	if (is_feat_hcx_supported()) {
 		/*
 		 * Initialize register HCRX_EL2 with its init value.
@@ -288,6 +280,8 @@
 			HCRX_EL2_INIT_VAL);
 	}
 #endif /* CTX_INCLUDE_EL2_REGS */
+
+	manage_extensions_nonsecure(ctx);
 }
 
 /*******************************************************************************
@@ -504,9 +498,11 @@
 /*******************************************************************************
  * Enable architecture extensions on first entry to Non-secure world.
  * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
- * it is zero.
+ * it is zero. This function updates some registers in-place and its contents
+ * are being prepared to be moved to cm_manage_extensions_el3 and
+ * cm_manage_extensions_nonsecure.
  ******************************************************************************/
-static void manage_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx)
+static void manage_extensions_nonsecure_mixed(bool el2_unused, cpu_context_t *ctx)
 {
 #if IMAGE_BL31
 	if (is_feat_spe_supported()) {
@@ -549,6 +545,39 @@
 }
 
 /*******************************************************************************
+ * Enable architecture extensions for EL3 execution. This function only updates
+ * registers in-place which are expected to either never change or be
+ * overwritten by el3_exit.
+ ******************************************************************************/
+#if IMAGE_BL31
+void cm_manage_extensions_el3(void)
+{
+	pmuv3_disable_el3();
+}
+#endif /* IMAGE_BL31 */
+
+/*******************************************************************************
+ * Enable architecture extensions on first entry to Non-secure world.
+ ******************************************************************************/
+static void manage_extensions_nonsecure(cpu_context_t *ctx)
+{
+#if IMAGE_BL31
+	pmuv3_enable(ctx);
+#endif /* IMAGE_BL31 */
+}
+
+/*******************************************************************************
+ * Enable architecture extensions in-place at EL2 on first entry to Non-secure
+ * world when EL2 is empty and unused.
+ ******************************************************************************/
+static void manage_extensions_nonsecure_el2_unused(void)
+{
+#if IMAGE_BL31
+	pmuv3_init_el2_unused();
+#endif /* IMAGE_BL31 */
+}
+
+/*******************************************************************************
  * Enable architecture extensions on first entry to Secure world.
  ******************************************************************************/
 static void manage_extensions_secure(cpu_context_t *ctx)
@@ -758,24 +787,11 @@
 			 * relying on hw. Some fields are architecturally
 			 * UNKNOWN on reset.
 			 *
-			 * MDCR_EL2.HLP: Set to one so that event counter
-			 *  overflow, that is recorded in PMOVSCLR_EL0[0-30],
-			 *  occurs on the increment that changes
-			 *  PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU is
-			 *  implemented. This bit is RES0 in versions of the
-			 *  architecture earlier than ARMv8.5, setting it to 1
-			 *  doesn't have any effect on them.
-			 *
 			 * MDCR_EL2.TTRF: Set to zero so that access to Trace
 			 *  Filter Control register TRFCR_EL1 at EL1 is not
 			 *  trapped to EL2. This bit is RES0 in versions of
 			 *  the architecture earlier than ARMv8.4.
 			 *
-			 * MDCR_EL2.HPMD: Set to one so that event counting is
-			 *  prohibited at EL2. This bit is RES0 in versions of
-			 *  the architecture earlier than ARMv8.1, setting it
-			 *  to 1 doesn't have any effect on them.
-			 *
 			 * MDCR_EL2.TPMS: Set to zero so that accesses to
 			 *  Statistical Profiling control registers from EL1
 			 *  do not trap to EL2. This bit is RES0 when SPE is
@@ -795,35 +811,15 @@
 			 * MDCR_EL2.TDE: Set to zero so that debug exceptions
 			 *  are not routed to EL2.
 			 *
-			 * MDCR_EL2.HPME: Set to zero to disable EL2 Performance
-			 *  Monitors.
-			 *
-			 * MDCR_EL2.TPM: Set to zero so that Non-secure EL0 and
-			 *  EL1 accesses to all Performance Monitors registers
-			 *  are not trapped to EL2.
-			 *
-			 * MDCR_EL2.TPMCR: Set to zero so that Non-secure EL0
-			 *  and EL1 accesses to the PMCR_EL0 or PMCR are not
-			 *  trapped to EL2.
-			 *
-			 * MDCR_EL2.HPMN: Set to value of PMCR_EL0.N which is the
-			 *  architecturally-defined reset value.
-			 *
 			 * MDCR_EL2.E2TB: Set to zero so that the trace Buffer
 			 *  owning exception level is NS-EL1 and, tracing is
 			 *  prohibited at NS-EL2. These bits are RES0 when
 			 *  FEAT_TRBE is not implemented.
 			 */
-			mdcr_el2 = ((MDCR_EL2_RESET_VAL | MDCR_EL2_HLP |
-				     MDCR_EL2_HPMD) |
-				   ((read_pmcr_el0() & PMCR_EL0_N_BITS)
-				   >> PMCR_EL0_N_SHIFT)) &
-				   ~(MDCR_EL2_TTRF | MDCR_EL2_TPMS |
+			mdcr_el2 = ((MDCR_EL2_RESET_VAL) & ~(MDCR_EL2_TTRF |
 				     MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT |
 				     MDCR_EL2_TDA_BIT | MDCR_EL2_TDE_BIT |
-				     MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT |
-				     MDCR_EL2_TPMCR_BIT |
-				     MDCR_EL2_E2TB(MDCR_EL2_E2TB_EL1));
+				     MDCR_EL2_E2TB(MDCR_EL2_E2TB_EL1)));
 
 			write_mdcr_el2(mdcr_el2);
 
@@ -845,8 +841,10 @@
 			 */
 			write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL &
 						~(CNTHP_CTL_ENABLE_BIT));
+
+			manage_extensions_nonsecure_el2_unused();
 		}
-		manage_extensions_nonsecure(el2_unused, ctx);
+		manage_extensions_nonsecure_mixed(el2_unused, ctx);
 	}
 
 	cm_el1_sysregs_context_restore(security_state);
@@ -1167,7 +1165,7 @@
 	 * direct register updates. Therefore, do this here
 	 * instead of when setting up context.
 	 */
-	manage_extensions_nonsecure(0, ctx);
+	manage_extensions_nonsecure_mixed(0, ctx);
 
 	/*
 	 * Set the NS bit to be able to access the ICC_SRE_EL2
diff --git a/lib/extensions/mtpmu/aarch32/mtpmu.S b/lib/extensions/mtpmu/aarch32/mtpmu.S
deleted file mode 100644
index 834cee3..0000000
--- a/lib/extensions/mtpmu/aarch32/mtpmu.S
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-
-	.global	mtpmu_disable
-
-/* -------------------------------------------------------------
- * The functions in this file are called at entrypoint, before
- * the CPU has decided whether this is a cold or a warm boot.
- * Therefore there are no stack yet to rely on for a C function
- * call.
- * -------------------------------------------------------------
- */
-
-/*
- * bool mtpmu_supported(void)
- *
- * Return a boolean indicating whether FEAT_MTPMU is supported or not.
- *
- * Trash registers: r0.
- */
-func mtpmu_supported
-	ldcopr	r0, ID_DFR1
-	and	r0, r0, #(ID_DFR1_MTPMU_MASK >> ID_DFR1_MTPMU_SHIFT)
-	cmp	r0, #ID_DFR1_MTPMU_SUPPORTED
-	mov	r0, #0
-	addeq	r0, r0, #1
-	bx	lr
-endfunc mtpmu_supported
-
-/*
- * bool el_implemented(unsigned int el)
- *
- * Return a boolean indicating if the specified EL (2 or 3) is implemented.
- *
- * Trash registers: r0
- */
-func el_implemented
-	cmp	r0, #3
-	ldcopr	r0, ID_PFR1
-	lsreq	r0, r0, #ID_PFR1_SEC_SHIFT
-	lsrne	r0, r0, #ID_PFR1_VIRTEXT_SHIFT
-	/*
-	 * ID_PFR1_VIRTEXT_MASK is the same as ID_PFR1_SEC_MASK
-	 * so use any one of them
-	 */
-	and	r0, r0, #ID_PFR1_VIRTEXT_MASK
-	cmp	r0, #ID_PFR1_ELx_ENABLED
-	mov	r0, #0
-	addeq	r0, r0, #1
-	bx	lr
-endfunc el_implemented
-
-/*
- * void mtpmu_disable(void)
- *
- * Disable mtpmu feature if supported.
- *
- * Trash register: r0, r1, r2
- */
-func mtpmu_disable
-	mov	r2, lr
-	bl	mtpmu_supported
-	cmp	r0, #0
-	bxeq	r2	/* FEAT_MTPMU not supported */
-
-	/* FEAT_MTMPU Supported */
-	mov	r0, #3
-	bl	el_implemented
-	cmp	r0, #0
-	beq	1f
-
-	/* EL3 implemented */
-	ldcopr	r0, SDCR
-	ldr	r1, =SDCR_MTPME_BIT
-	bic	r0, r0, r1
-	stcopr	r0, SDCR
-
-	/*
-	 * If EL3 is implemented, HDCR.MTPME is implemented as Res0 and
-	 * FEAT_MTPMU is controlled only from EL3, so no need to perform
-	 * any operations for EL2.
-	 */
-	isb
-	bx	r2
-1:
-	/* EL3 not implemented */
-	mov	r0, #2
-	bl	el_implemented
-	cmp	r0, #0
-	bxeq	r2	/* No EL2 or EL3 implemented */
-
-	/* EL2 implemented */
-	ldcopr	r0, HDCR
-	ldr	r1, =HDCR_MTPME_BIT
-	orr	r0, r0, r1
-	stcopr	r0, HDCR
-	isb
-	bx	r2
-endfunc mtpmu_disable
diff --git a/lib/extensions/mtpmu/aarch64/mtpmu.S b/lib/extensions/mtpmu/aarch64/mtpmu.S
deleted file mode 100644
index 0a1d57b..0000000
--- a/lib/extensions/mtpmu/aarch64/mtpmu.S
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-
-	.global	mtpmu_disable
-
-/* -------------------------------------------------------------
- * The functions in this file are called at entrypoint, before
- * the CPU has decided whether this is a cold or a warm boot.
- * Therefore there are no stack yet to rely on for a C function
- * call.
- * -------------------------------------------------------------
- */
-
-/*
- * bool mtpmu_supported(void)
- *
- * Return a boolean indicating whether FEAT_MTPMU is supported or not.
- *
- * Trash registers: x0, x1
- */
-func mtpmu_supported
-	mrs	x0, id_aa64dfr0_el1
-	mov_imm	x1, ID_AA64DFR0_MTPMU_MASK
-	and	x0, x1, x0, LSR #ID_AA64DFR0_MTPMU_SHIFT
-	cmp	x0, ID_AA64DFR0_MTPMU_SUPPORTED
-	cset	x0, eq
-	ret
-endfunc mtpmu_supported
-
-/*
- * bool el_implemented(unsigned int el_shift)
- *
- * Return a boolean indicating if the specified EL is implemented.
- * The EL is represented as the bitmask shift on id_aa64pfr0_el1 register.
- *
- * Trash registers: x0, x1
- */
-func el_implemented
-	mrs	x1, id_aa64pfr0_el1
-	lsr	x1, x1, x0
-	cmp	x1, #ID_AA64PFR0_ELX_MASK
-	cset	x0, eq
-	ret
-endfunc el_implemented
-
-/*
- * void mtpmu_disable(void)
- *
- * Disable mtpmu feature if supported.
- *
- * Trash register: x0, x1, x30
- */
-func mtpmu_disable
-	mov	x10, x30
-	bl	mtpmu_supported
-	cbz	x0, exit_disable
-
-	/* FEAT_MTMPU Supported */
-	mov_imm	x0, ID_AA64PFR0_EL3_SHIFT
-	bl	el_implemented
-	cbz	x0, 1f
-
-	/* EL3 implemented */
-	mrs	x0, mdcr_el3
-	mov_imm x1, MDCR_MTPME_BIT
-	bic	x0, x0, x1
-	msr	mdcr_el3, x0
-
-	/*
-	 * If EL3 is implemented, MDCR_EL2.MTPME is implemented as Res0 and
-	 * FEAT_MTPMU is controlled only from EL3, so no need to perform
-	 * any operations for EL2.
-	 */
-	isb
-exit_disable:
-	ret	x10
-1:
-	/* EL3 not implemented */
-	mov_imm	x0, ID_AA64PFR0_EL2_SHIFT
-	bl	el_implemented
-	cbz	x0, exit_disable
-
-	/* EL2 implemented */
-	mrs	x0, mdcr_el2
-	mov_imm x1, MDCR_EL2_MTPME
-	bic	x0, x0, x1
-	msr	mdcr_el2, x0
-	isb
-	ret	x10
-endfunc mtpmu_disable
diff --git a/lib/extensions/pmuv3/aarch32/pmuv3.c b/lib/extensions/pmuv3/aarch32/pmuv3.c
new file mode 100644
index 0000000..fe4205e
--- /dev/null
+++ b/lib/extensions/pmuv3/aarch32/pmuv3.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <lib/extensions/pmuv3.h>
+
+static u_register_t mtpmu_disable_el3(u_register_t sdcr)
+{
+	if (!is_feat_mtpmu_supported()) {
+		return sdcr;
+	}
+
+	/*
+	 * SDCR.MTPME = 0
+	 * FEAT_MTPMU is disabled. The Effective value of PMEVTYPER<n>.MT is
+	 * zero.
+	 */
+	sdcr &= ~SDCR_MTPME_BIT;
+
+	return sdcr;
+}
+
+/*
+ * Applies to all PMU versions. Name is PMUv3 for compatibility with aarch64 and
+ * to not clash with platforms which reuse the PMU name
+ */
+void pmuv3_disable_el3(void)
+{
+	u_register_t sdcr = read_sdcr();
+
+	/* ---------------------------------------------------------------------
+	 * Initialise SDCR, setting all the fields rather than relying on hw.
+	 *
+	 * SDCR.SCCD: Set to one so that cycle counting by PMCCNTR is prohibited
+	 *  in Secure state. This bit is RES0 in versions of the architecture
+	 *  earlier than ARMv8.5
+	 *
+	 * SDCR.SPME: Set to zero so that event counting is prohibited in Secure
+	 *  state (and explicitly EL3 with later revisions). If ARMv8.2 Debug is
+	 *  not implemented this bit does not have any effect on the counters
+	 *  unless there is support for the implementation defined
+	 *  authentication interface ExternalSecureNoninvasiveDebugEnabled().
+	 * ---------------------------------------------------------------------
+	 */
+	sdcr = (sdcr | SDCR_SCCD_BIT) & ~SDCR_SPME_BIT;
+	sdcr = mtpmu_disable_el3(sdcr);
+	write_sdcr(sdcr);
+
+	/* ---------------------------------------------------------------------
+	 * Initialise PMCR, setting all fields rather than relying
+	 * on hw. Some fields are architecturally UNKNOWN on reset.
+	 *
+	 * PMCR.DP: Set to one to prohibit cycle counting whilst in Secure mode.
+	 *
+	 * PMCR.X: Set to zero to disable export of events.
+	 *
+	 * PMCR.C: Set to one to reset PMCCNTR.
+	 *
+	 * PMCR.P: Set to one to reset each event counter PMEVCNTR<n> to zero.
+	 *
+	 * PMCR.E: Set to zero to disable cycle and event counters.
+	 * ---------------------------------------------------------------------
+	 */
+
+	write_pmcr(read_pmcr() | PMCR_DP_BIT | PMCR_C_BIT | PMCR_P_BIT |
+		 ~(PMCR_X_BIT | PMCR_E_BIT));
+}
diff --git a/lib/extensions/pmuv3/aarch64/pmuv3.c b/lib/extensions/pmuv3/aarch64/pmuv3.c
new file mode 100644
index 0000000..f83a5ee
--- /dev/null
+++ b/lib/extensions/pmuv3/aarch64/pmuv3.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <lib/extensions/pmuv3.h>
+
+static u_register_t init_mdcr_el2_hpmn(u_register_t mdcr_el2)
+{
+	/*
+	 * Initialize MDCR_EL2.HPMN to its hardware reset value so we don't
+	 * throw anyone off who expects this to be sensible.
+	 */
+	mdcr_el2 &= ~MDCR_EL2_HPMN_MASK;
+	mdcr_el2 |= ((read_pmcr_el0() >> PMCR_EL0_N_SHIFT) & PMCR_EL0_N_MASK);
+
+	return mdcr_el2;
+}
+
+void pmuv3_enable(cpu_context_t *ctx)
+{
+#if CTX_INCLUDE_EL2_REGS
+	u_register_t mdcr_el2;
+
+	mdcr_el2 = read_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_MDCR_EL2);
+	mdcr_el2 = init_mdcr_el2_hpmn(mdcr_el2);
+	write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_MDCR_EL2, mdcr_el2);
+#endif /* CTX_INCLUDE_EL2_REGS */
+}
+
+static u_register_t mtpmu_disable_el3(u_register_t mdcr_el3)
+{
+	if (!is_feat_mtpmu_supported()) {
+		return mdcr_el3;
+	}
+
+	/*
+	 * MDCR_EL3.MTPME = 0
+	 * FEAT_MTPMU is disabled. The Effective value of PMEVTYPER<n>_EL0.MT is
+	 * zero.
+	 */
+	mdcr_el3 &= ~MDCR_MTPME_BIT;
+
+	return mdcr_el3;
+}
+
+void pmuv3_disable_el3(void)
+{
+	u_register_t mdcr_el3 = read_mdcr_el3();
+
+	/* ---------------------------------------------------------------------
+	 * Initialise MDCR_EL3, setting all fields rather than relying on hw.
+	 * Some fields are architecturally UNKNOWN on reset.
+	 *
+	 * MDCR_EL3.MPMX: Set to zero to not affect event counters (when
+	 * SPME = 0).
+	 *
+	 * MDCR_EL3.MCCD: Set to one so that cycle counting by PMCCNTR_EL0 is
+	 *  prohibited in EL3. This bit is RES0 in versions of the
+	 *  architecture with FEAT_PMUv3p7 not implemented.
+	 *
+	 * MDCR_EL3.SCCD: Set to one so that cycle counting by PMCCNTR_EL0 is
+	 *  prohibited in Secure state. This bit is RES0 in versions of the
+	 *  architecture with FEAT_PMUv3p5 not implemented.
+	 *
+	 * MDCR_EL3.SPME: Set to zero so that event counting is prohibited in
+	 *  Secure state (and explicitly EL3 with later revisions). If ARMv8.2
+	 *  Debug is not implemented this bit does not have any effect on the
+	 *  counters unless there is support for the implementation defined
+	 *  authentication interface ExternalSecureNoninvasiveDebugEnabled().
+	 *
+	 * The SPME/MPMX combination is a little tricky. Below is a small
+	 * summary if another combination is ever needed:
+	 * SPME | MPMX | secure world |   EL3
+	 * -------------------------------------
+	 *   0  |  0   |    disabled  | disabled
+	 *   1  |  0   |    enabled   | enabled
+	 *   0  |  1   |    enabled   | disabled
+	 *   1  |  1   |    enabled   | disabled only for counters 0 to
+	 *                              MDCR_EL2.HPMN - 1. Enabled for the rest
+	 */
+	mdcr_el3 = (mdcr_el3 | MDCR_SCCD_BIT | MDCR_MCCD_BIT) &
+		  ~(MDCR_MPMX_BIT | MDCR_SPME_BIT);
+	mdcr_el3 = mtpmu_disable_el3(mdcr_el3);
+	write_mdcr_el3(mdcr_el3);
+
+	/* ---------------------------------------------------------------------
+	 * Initialise PMCR_EL0 setting all fields rather than relying
+	 * on hw. Some fields are architecturally UNKNOWN on reset.
+	 *
+	 * PMCR_EL0.DP: Set to one so that the cycle counter,
+	 *  PMCCNTR_EL0 does not count when event counting is prohibited.
+	 *  Necessary on PMUv3 <= p7 where MDCR_EL3.{SCCD,MCCD} are not
+	 *  available
+	 *
+	 * PMCR_EL0.X: Set to zero to disable export of events.
+	 *
+	 * PMCR_EL0.C: Set to one to reset PMCCNTR_EL0 to zero.
+	 *
+	 * PMCR_EL0.P: Set to one to reset each event counter PMEVCNTR<n>_EL0 to
+	 *  zero.
+	 *
+	 * PMCR_EL0.E: Set to zero to disable cycle and event counters.
+	 * ---------------------------------------------------------------------
+	 */
+	write_pmcr_el0((read_pmcr_el0() | PMCR_EL0_DP_BIT | PMCR_EL0_C_BIT |
+			PMCR_EL0_P_BIT) & ~(PMCR_EL0_X_BIT | PMCR_EL0_E_BIT));
+}
+
+static u_register_t mtpmu_disable_el2(u_register_t mdcr_el2)
+{
+	if (!is_feat_mtpmu_supported()) {
+		return mdcr_el2;
+	}
+
+	/*
+	 * MDCR_EL2.MTPME = 0
+	 * FEAT_MTPMU is disabled. The Effective value of PMEVTYPER<n>_EL0.MT is
+	 * zero.
+	 */
+	mdcr_el2 &= ~MDCR_EL2_MTPME;
+
+	return mdcr_el2;
+}
+
+void pmuv3_init_el2_unused(void)
+{
+	u_register_t mdcr_el2 = read_mdcr_el2();
+
+	/*
+	 * Initialise MDCR_EL2, setting all fields rather than
+	 * relying on hw. Some fields are architecturally
+	 * UNKNOWN on reset.
+	 *
+	 * MDCR_EL2.HLP: Set to one so that event counter overflow, that is
+	 *  recorded in PMOVSCLR_EL0[0-30], occurs on the increment that changes
+	 *  PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU is implemented.
+	 *  This bit is RES0 in versions of the architecture earlier than
+	 *  ARMv8.5, setting it to 1 doesn't have any effect on them.
+	 *
+	 * MDCR_EL2.HCCD: Set to one to prohibit cycle counting at EL2. This bit
+	 *  is RES0 in versions of the architecture with FEAT_PMUv3p5 not
+	 *  implemented.
+	 *
+	 * MDCR_EL2.HPMD: Set to one so that event counting is
+	 *  prohibited at EL2 for counter n < MDCR_EL2.HPMN. This bit  is RES0
+	 *  in versions of the architecture with FEAT_PMUv3p1 not implemented.
+	 *
+	 * MDCR_EL2.HPME: Set to zero to disable event counters for counters
+	 *  n >= MDCR_EL2.HPMN.
+	 *
+	 * MDCR_EL2.TPM: Set to zero so that Non-secure EL0 and
+	 *  EL1 accesses to all Performance Monitors registers
+	 *  are not trapped to EL2.
+	 *
+	 * MDCR_EL2.TPMCR: Set to zero so that Non-secure EL0
+	 *  and EL1 accesses to the PMCR_EL0 or PMCR are not
+	 *  trapped to EL2.
+	 */
+	mdcr_el2 = (mdcr_el2 | MDCR_EL2_HLP_BIT | MDCR_EL2_HPMD_BIT |
+		    MDCR_EL2_HCCD_BIT) &
+		  ~(MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT | MDCR_EL2_TPMCR_BIT);
+	mdcr_el2 = init_mdcr_el2_hpmn(mdcr_el2);
+	mdcr_el2 = mtpmu_disable_el2(mdcr_el2);
+	write_mdcr_el2(mdcr_el2);
+}
diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c
index bfc09cc..8aa0cce 100644
--- a/lib/psci/psci_common.c
+++ b/lib/psci/psci_common.c
@@ -985,6 +985,9 @@
 	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
 	psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
 
+	/* Init registers that never change for the lifetime of TF-A */
+	cm_manage_extensions_el3();
+
 	/*
 	 * Verify that we have been explicitly turned ON or resumed from
 	 * suspend.
diff --git a/make_helpers/armv7-a-cpus.mk b/make_helpers/armv7-a-cpus.mk
index eec85cc..a8e9d50 100644
--- a/make_helpers/armv7-a-cpus.mk
+++ b/make_helpers/armv7-a-cpus.mk
@@ -15,9 +15,9 @@
 # armClang requires -march=armv7-a for all ARMv7 Cortex-A. To comply with
 # all, just drop -march and supply only -mcpu.
 
-# Platform can override march32-directive through MARCH32_DIRECTIVE
-ifdef MARCH32_DIRECTIVE
-march32-directive		:= $(MARCH32_DIRECTIVE)
+# Platform can override march-directive through MARCH_DIRECTIVE
+ifdef MARCH_DIRECTIVE
+march-directive		:= $(MARCH_DIRECTIVE)
 else
 march32-set-${ARM_CORTEX_A5}	:= -mcpu=cortex-a5
 march32-set-${ARM_CORTEX_A7}	:= -mcpu=cortex-a7
@@ -29,7 +29,7 @@
 
 # default to -march=armv7-a as target directive
 march32-set-yes			?= -march=armv7-a
-march32-directive		:= ${march32-set-yes} ${march32-neon-yes}
+march-directive		:= ${march32-set-yes} ${march32-neon-yes}
 endif
 
 # Platform may override these extension support directives:
diff --git a/plat/arm/board/arm_fpga/platform.mk b/plat/arm/board/arm_fpga/platform.mk
index f88eaa8..c9425c5 100644
--- a/plat/arm/board/arm_fpga/platform.mk
+++ b/plat/arm/board/arm_fpga/platform.mk
@@ -58,18 +58,18 @@
 				lib/cpus/aarch64/cortex_a73.S
 else
 # AArch64-only cores
-	FPGA_CPU_LIBS	+=lib/cpus/aarch64/cortex_a510.S		\
-				lib/cpus/aarch64/cortex_a710.S				\
-				lib/cpus/aarch64/cortex_a715.S				\
-				lib/cpus/aarch64/cortex_x3.S 				\
+	FPGA_CPU_LIBS	+=	lib/cpus/aarch64/cortex_a510.S			\
+				lib/cpus/aarch64/cortex_a520.S			\
+				lib/cpus/aarch64/cortex_a710.S			\
+				lib/cpus/aarch64/cortex_a715.S			\
+				lib/cpus/aarch64/cortex_a720.S			\
+				lib/cpus/aarch64/cortex_x3.S 			\
+				lib/cpus/aarch64/cortex_x4.S			\
 				lib/cpus/aarch64/neoverse_n_common.S		\
-				lib/cpus/aarch64/neoverse_n1.S				\
-				lib/cpus/aarch64/neoverse_n2.S				\
-				lib/cpus/aarch64/neoverse_v1.S				\
-				lib/cpus/aarch64/cortex_hayes.S				\
-				lib/cpus/aarch64/cortex_hunter.S			\
-				lib/cpus/aarch64/cortex_hunter_elp_arm.S	\
-				lib/cpus/aarch64/cortex_chaberton.S			\
+				lib/cpus/aarch64/neoverse_n1.S			\
+				lib/cpus/aarch64/neoverse_n2.S			\
+				lib/cpus/aarch64/neoverse_v1.S			\
+				lib/cpus/aarch64/cortex_chaberton.S		\
 				lib/cpus/aarch64/cortex_blackhawk.S
 
 # AArch64/AArch32 cores
diff --git a/plat/arm/board/fvp/include/platform_def.h b/plat/arm/board/fvp/include/platform_def.h
index cd468e1..99dd6c7 100644
--- a/plat/arm/board/fvp/include/platform_def.h
+++ b/plat/arm/board/fvp/include/platform_def.h
@@ -230,9 +230,12 @@
 /*
  * Since BL31 NOBITS overlays BL2 and BL1-RW, PLAT_ARM_MAX_BL31_SIZE is
  * calculated using the current BL31 PROGBITS debug size plus the sizes of
- * BL2 and BL1-RW
+ * BL2 and BL1-RW.
+ * Size of the BL31 PROGBITS increases as the SRAM size increases.
  */
-#define PLAT_ARM_MAX_BL31_SIZE		(UL(0x3D000) - ARM_L0_GPT_SIZE)
+#define PLAT_ARM_MAX_BL31_SIZE		(PLAT_ARM_TRUSTED_SRAM_SIZE - \
+					 ARM_SHARED_RAM_SIZE - \
+					 ARM_FW_CONFIGS_SIZE - ARM_L0_GPT_SIZE)
 #endif /* RESET_TO_BL31 */
 
 #ifndef __aarch64__
diff --git a/plat/arm/board/tc/platform.mk b/plat/arm/board/tc/platform.mk
index c29537c..5a1d83a 100644
--- a/plat/arm/board/tc/platform.mk
+++ b/plat/arm/board/tc/platform.mk
@@ -6,8 +6,7 @@
 include common/fdt_wrappers.mk
 
 ifeq ($(TARGET_PLATFORM), 0)
-$(warning Platform ${PLAT}$(TARGET_PLATFORM) is deprecated. \
-Some of the features might not work as expected)
+	$(error Platform ${PLAT}$(TARGET_PLATFORM) is deprecated.)
 endif
 
 ifeq ($(shell expr $(TARGET_PLATFORM) \<= 2), 0)
@@ -70,13 +69,6 @@
 
 PLAT_INCLUDES		+=	-I${TC_BASE}/include/
 
-# CPU libraries for TARGET_PLATFORM=0
-ifeq (${TARGET_PLATFORM}, 0)
-TC_CPU_SOURCES	+=	lib/cpus/aarch64/cortex_a510.S	\
-			lib/cpus/aarch64/cortex_a710.S	\
-			lib/cpus/aarch64/cortex_x2.S
-endif
-
 # CPU libraries for TARGET_PLATFORM=1
 ifeq (${TARGET_PLATFORM}, 1)
 TC_CPU_SOURCES	+=	lib/cpus/aarch64/cortex_a510.S \
@@ -86,9 +78,9 @@
 
 # CPU libraries for TARGET_PLATFORM=2
 ifeq (${TARGET_PLATFORM}, 2)
-TC_CPU_SOURCES	+=	lib/cpus/aarch64/cortex_hayes.S \
-			lib/cpus/aarch64/cortex_hunter.S \
-			lib/cpus/aarch64/cortex_hunter_elp_arm.S
+TC_CPU_SOURCES	+=	lib/cpus/aarch64/cortex_a520.S \
+			lib/cpus/aarch64/cortex_a720.S \
+			lib/cpus/aarch64/cortex_x4.S
 endif
 
 INTERCONNECT_SOURCES	:=	${TC_BASE}/tc_interconnect.c
diff --git a/plat/intel/soc/common/drivers/ccu/ncore_ccu.c b/plat/intel/soc/common/drivers/ccu/ncore_ccu.c
index 38f8b94..0148b75 100644
--- a/plat/intel/soc/common/drivers/ccu/ncore_ccu.c
+++ b/plat/intel/soc/common/drivers/ccu/ncore_ccu.c
@@ -72,12 +72,12 @@
 
 			/* Coh Agent Snoop Enable */
 			if (CACHING_AGENT_BIT(ca_id))
-				mmio_write_32(ca_snoop_en, BIT(ca));
+				mmio_setbits_32(ca_snoop_en, BIT(ca));
 
 			/* Coh Agent Snoop DVM Enable */
 			ca_type = CACHING_AGENT_TYPE(ca_id);
 			if (ca_type == ACE_W_DVM || ca_type == ACE_L_W_DVM)
-				mmio_write_32(NCORE_CCU_CSR(NCORE_CSADSER0),
+				mmio_setbits_32(NCORE_CCU_CSR(NCORE_CSADSER0),
 				BIT(ca));
 		}
 	}
diff --git a/plat/mediatek/drivers/gic600/mt_gic_v3.c b/plat/mediatek/drivers/gic600/mt_gic_v3.c
index cca5d0a..85f9e37 100644
--- a/plat/mediatek/drivers/gic600/mt_gic_v3.c
+++ b/plat/mediatek/drivers/gic600/mt_gic_v3.c
@@ -42,13 +42,19 @@
 
 struct gic_chip_data {
 	/* All cores share the same configuration */
+	unsigned int saved_ctlr;
 	unsigned int saved_group;
 	unsigned int saved_enable;
 	unsigned int saved_conf0;
 	unsigned int saved_conf1;
 	unsigned int saved_grpmod;
+	unsigned int saved_ispendr;
+	unsigned int saved_isactiver;
+	unsigned int saved_nsacr;
 	/* Per-core sgi */
 	unsigned int saved_sgi[PLATFORM_CORE_COUNT];
+	/* Per-core priority */
+	unsigned int saved_prio[PLATFORM_CORE_COUNT][GICR_NUM_REGS(IPRIORITYR)];
 };
 
 static struct gic_chip_data gic_data;
@@ -94,53 +100,84 @@
 
 void mt_gic_rdistif_save(void)
 {
-	unsigned int proc_num;
+	unsigned int i, proc_num;
 	uintptr_t gicr_base;
 
 	proc_num = plat_my_core_pos();
 	gicr_base = gicv3_driver_data->rdistif_base_addrs[proc_num];
 
+	/*
+	 * Wait for any write to GICR_CTLR to complete before trying to save any
+	 * state.
+	 */
+	gicr_wait_for_pending_write(gicr_base);
+
+	gic_data.saved_ctlr = mmio_read_32(gicr_base + GICR_CTLR);
 	gic_data.saved_group = mmio_read_32(gicr_base + GICR_IGROUPR0);
 	gic_data.saved_enable = mmio_read_32(gicr_base + GICR_ISENABLER0);
 	gic_data.saved_conf0 = mmio_read_32(gicr_base + GICR_ICFGR0);
 	gic_data.saved_conf1 = mmio_read_32(gicr_base + GICR_ICFGR1);
 	gic_data.saved_grpmod = mmio_read_32(gicr_base + GICR_IGRPMODR0);
+	gic_data.saved_ispendr = mmio_read_32(gicr_base + GICR_ISPENDR0);
+	gic_data.saved_isactiver = mmio_read_32(gicr_base + GICR_ISACTIVER0);
+	gic_data.saved_nsacr = mmio_read_32(gicr_base + GICR_NSACR);
+
+	for (i = 0U; i < 8U; ++i)
+		gic_data.saved_prio[proc_num][i] = gicr_ipriorityr_read(gicr_base, i);
 
 	rdist_has_saved[proc_num] = 1;
 }
 
 void mt_gic_rdistif_restore(void)
 {
-	unsigned int proc_num;
+	unsigned int i, proc_num;
 	uintptr_t gicr_base;
 
 	proc_num = plat_my_core_pos();
 	if (rdist_has_saved[proc_num] == 1) {
 		gicr_base = gicv3_driver_data->rdistif_base_addrs[proc_num];
+
 		mmio_write_32(gicr_base + GICR_IGROUPR0, gic_data.saved_group);
-		mmio_write_32(gicr_base + GICR_ISENABLER0,
-			gic_data.saved_enable);
+		mmio_write_32(gicr_base + GICR_IGRPMODR0, gic_data.saved_grpmod);
+		mmio_write_32(gicr_base + GICR_NSACR, gic_data.saved_nsacr);
 		mmio_write_32(gicr_base + GICR_ICFGR0, gic_data.saved_conf0);
 		mmio_write_32(gicr_base + GICR_ICFGR1, gic_data.saved_conf1);
-		mmio_write_32(gicr_base + GICR_IGRPMODR0,
-			gic_data.saved_grpmod);
+
+		for (i = 0U; i < 8U; ++i)
+			gicr_ipriorityr_write(gicr_base, i, gic_data.saved_prio[proc_num][i]);
+
+		mmio_write_32(gicr_base + GICR_ISPENDR0, gic_data.saved_ispendr);
+		mmio_write_32(gicr_base + GICR_ISACTIVER0, gic_data.saved_isactiver);
+		mmio_write_32(gicr_base + GICR_ISENABLER0, gic_data.saved_enable);
+		mmio_write_32(gicr_base + GICR_CTLR, gic_data.saved_ctlr);
+
+		gicr_wait_for_pending_write(gicr_base);
 	}
 }
 
 void mt_gic_rdistif_restore_all(void)
 {
-	unsigned int proc_num;
+	unsigned int i, proc_num;
 	uintptr_t gicr_base;
 
 	for (proc_num = 0; proc_num < PLATFORM_CORE_COUNT; proc_num++) {
 		gicr_base = gicv3_driver_data->rdistif_base_addrs[proc_num];
+
 		mmio_write_32(gicr_base + GICR_IGROUPR0, gic_data.saved_group);
-		mmio_write_32(gicr_base + GICR_ISENABLER0,
-			gic_data.saved_enable);
+		mmio_write_32(gicr_base + GICR_IGRPMODR0, gic_data.saved_grpmod);
+		mmio_write_32(gicr_base + GICR_NSACR, gic_data.saved_nsacr);
 		mmio_write_32(gicr_base + GICR_ICFGR0, gic_data.saved_conf0);
 		mmio_write_32(gicr_base + GICR_ICFGR1, gic_data.saved_conf1);
-		mmio_write_32(gicr_base + GICR_IGRPMODR0,
-			gic_data.saved_grpmod);
+
+		for (i = 0U; i < 8U; ++i)
+			gicr_ipriorityr_write(gicr_base, i, gic_data.saved_prio[proc_num][i]);
+
+		mmio_write_32(gicr_base + GICR_ISPENDR0, gic_data.saved_ispendr);
+		mmio_write_32(gicr_base + GICR_ISACTIVER0, gic_data.saved_isactiver);
+		mmio_write_32(gicr_base + GICR_ISENABLER0, gic_data.saved_enable);
+		mmio_write_32(gicr_base + GICR_CTLR, gic_data.saved_ctlr);
+
+		gicr_wait_for_pending_write(gicr_base);
 	}
 }
 
diff --git a/plat/qemu/qemu/platform.mk b/plat/qemu/qemu/platform.mk
index 7a1dccd..56c96a1 100644
--- a/plat/qemu/qemu/platform.mk
+++ b/plat/qemu/qemu/platform.mk
@@ -12,7 +12,7 @@
 # Qemu Cortex-A15 model does not implement the virtualization extension.
 # For this reason, we cannot set ARM_CORTEX_A15=yes and must define all
 # the ARMv7 build directives.
-MARCH32_DIRECTIVE 	:= 	-mcpu=cortex-a15
+MARCH_DIRECTIVE 	:= 	-mcpu=cortex-a15
 $(eval $(call add_define,ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING))
 $(eval $(call add_define,ARMV7_SUPPORTS_GENERIC_TIMER))
 $(eval $(call add_define,ARMV7_SUPPORTS_VFP))
diff --git a/plat/qemu/qemu_sbsa/sbsa_sip_svc.c b/plat/qemu/qemu_sbsa/sbsa_sip_svc.c
index 37460d7..05ebec4 100644
--- a/plat/qemu/qemu_sbsa/sbsa_sip_svc.c
+++ b/plat/qemu/qemu_sbsa/sbsa_sip_svc.c
@@ -26,8 +26,10 @@
  * need version of whole 'virtual hardware platform'.
  */
 #define SIP_SVC_VERSION  SIP_FUNCTION_ID(1)
-
 #define SIP_SVC_GET_GIC  SIP_FUNCTION_ID(100)
+#define SIP_SVC_GET_GIC_ITS SIP_FUNCTION_ID(101)
+
+static uint64_t gic_its_addr;
 
 void sbsa_set_gic_bases(const uintptr_t gicd_base, const uintptr_t gicr_base);
 uintptr_t sbsa_get_gicd(void);
@@ -45,9 +47,12 @@
 	 * QEMU gives us this DeviceTree node:
 	 *
 	 * intc {
-		reg = < 0x00 0x40060000 0x00 0x10000
-			0x00 0x40080000 0x00 0x4000000>;
-	};
+	 *	 reg = < 0x00 0x40060000 0x00 0x10000
+	 *		 0x00 0x40080000 0x00 0x4000000>;
+	 *       its {
+	 *               reg = <0x00 0x44081000 0x00 0x20000>;
+	 *       };
+	 * };
 	 */
 	node = fdt_path_offset(dtb, "/intc");
 	if (node < 0) {
@@ -74,6 +79,18 @@
 	INFO("GICR base = 0x%lx\n", gicr_base);
 
 	sbsa_set_gic_bases(gicd_base, gicr_base);
+
+	node = fdt_path_offset(dtb, "/intc/its");
+	if (node < 0) {
+		return;
+	}
+
+	err = fdt_get_reg_props_by_index(dtb, node, 0, &gic_its_addr, NULL);
+	if (err < 0) {
+		ERROR("Failed to read GICI reg property of GIC node\n");
+		return;
+	}
+	INFO("GICI base = 0x%lx\n", gic_its_addr);
 }
 
 void read_platform_version(void *dtb)
@@ -143,6 +160,9 @@
 	case SIP_SVC_GET_GIC:
 		SMC_RET3(handle, NULL, sbsa_get_gicd(), sbsa_get_gicr());
 
+	case SIP_SVC_GET_GIC_ITS:
+		SMC_RET2(handle, NULL, gic_its_addr);
+
 	default:
 		ERROR("%s: unhandled SMC (0x%x) (function id: %d)\n", __func__, smc_fid,
 		      smc_fid - SIP_FUNCTION);
diff --git a/plat/xilinx/common/include/plat_startup.h b/plat/xilinx/common/include/plat_startup.h
index d1c5303..5270e13 100644
--- a/plat/xilinx/common/include/plat_startup.h
+++ b/plat/xilinx/common/include/plat_startup.h
@@ -10,34 +10,34 @@
 
 #include <common/bl_common.h>
 
-/* For FSBL handover */
-enum fsbl_handoff {
-	FSBL_HANDOFF_SUCCESS = 0,
-	FSBL_HANDOFF_NO_STRUCT,
-	FSBL_HANDOFF_INVAL_STRUCT,
-	FSBL_HANDOFF_TOO_MANY_PARTS
+/* For Xilinx bootloader XBL handover */
+enum xbl_handoff {
+	XBL_HANDOFF_SUCCESS = 0,
+	XBL_HANDOFF_NO_STRUCT,
+	XBL_HANDOFF_INVAL_STRUCT,
+	XBL_HANDOFF_TOO_MANY_PARTS
 };
 
-#define FSBL_MAX_PARTITIONS		8U
+#define XBL_MAX_PARTITIONS		8U
 
 /* Structure corresponding to each partition entry */
-struct xfsbl_partition {
+struct xbl_partition {
 	uint64_t entry_point;
 	uint64_t flags;
 };
 
 /* Structure for handoff parameters to TrustedFirmware-A (TF-A) */
-struct xfsbl_tfa_handoff_params {
+struct xbl_handoff_params {
 	uint8_t magic[4];
 	uint32_t num_entries;
-	struct xfsbl_partition partition[FSBL_MAX_PARTITIONS];
+	struct xbl_partition partition[XBL_MAX_PARTITIONS];
 };
 
-#define TFA_HANDOFF_PARAMS_MAX_SIZE	sizeof(struct xfsbl_tfa_handoff_params)
+#define HANDOFF_PARAMS_MAX_SIZE	 sizeof(struct xbl_handoff_params)
 
-enum fsbl_handoff fsbl_tfa_handover(entry_point_info_t *bl32,
+enum xbl_handoff xbl_handover(entry_point_info_t *bl32,
 					entry_point_info_t *bl33,
-					uint64_t tfa_handoff_addr);
+					uint64_t handoff_addr);
 
 /* JEDEC Standard Manufacturer's Identification Code and Bank ID JEP106 */
 #define JEDEC_XILINX_MFID	U(0x49)
diff --git a/plat/xilinx/common/include/pm_node.h b/plat/xilinx/common/include/pm_node.h
index b6c2d81..46f6bcf 100644
--- a/plat/xilinx/common/include/pm_node.h
+++ b/plat/xilinx/common/include/pm_node.h
@@ -188,54 +188,54 @@
 	XPM_NODEIDX_DEV_GT_10 = 0x53,
 
 #if defined(PLAT_versal_net)
-	XPM_NODEIDX_DEV_ACPU_0_0 = 0x54,
-	XPM_NODEIDX_DEV_ACPU_0_1 = 0x55,
-	XPM_NODEIDX_DEV_ACPU_0_2 = 0x56,
-	XPM_NODEIDX_DEV_ACPU_0_3 = 0x57,
-	XPM_NODEIDX_DEV_ACPU_1_0 = 0x58,
-	XPM_NODEIDX_DEV_ACPU_1_1 = 0x59,
-	XPM_NODEIDX_DEV_ACPU_1_2 = 0x5A,
-	XPM_NODEIDX_DEV_ACPU_1_3 = 0x5B,
-	XPM_NODEIDX_DEV_ACPU_2_0 = 0x5C,
-	XPM_NODEIDX_DEV_ACPU_2_1 = 0x5D,
-	XPM_NODEIDX_DEV_ACPU_2_2 = 0x5E,
-	XPM_NODEIDX_DEV_ACPU_2_3 = 0x5F,
-	XPM_NODEIDX_DEV_ACPU_3_0 = 0x60,
-	XPM_NODEIDX_DEV_ACPU_3_1 = 0x61,
-	XPM_NODEIDX_DEV_ACPU_3_2 = 0x62,
-	XPM_NODEIDX_DEV_ACPU_3_3 = 0x63,
-	XPM_NODEIDX_DEV_RPU_A_0 = 0x64,
-	XPM_NODEIDX_DEV_RPU_A_1 = 0x65,
-	XPM_NODEIDX_DEV_RPU_B_0 = 0x66,
-	XPM_NODEIDX_DEV_RPU_B_1 = 0x67,
-	XPM_NODEIDX_DEV_OCM_0_0 = 0x68,
-	XPM_NODEIDX_DEV_OCM_0_1 = 0x69,
-	XPM_NODEIDX_DEV_OCM_0_2 = 0x6A,
-	XPM_NODEIDX_DEV_OCM_0_3 = 0x6B,
-	XPM_NODEIDX_DEV_OCM_1_0 = 0x6C,
-	XPM_NODEIDX_DEV_OCM_1_1 = 0x6D,
-	XPM_NODEIDX_DEV_OCM_1_2 = 0x6E,
-	XPM_NODEIDX_DEV_OCM_1_3 = 0x6F,
-	XPM_NODEIDX_DEV_TCM_A_0A = 0x70,
-	XPM_NODEIDX_DEV_TCM_A_0B = 0x71,
-	XPM_NODEIDX_DEV_TCM_A_0C = 0x72,
-	XPM_NODEIDX_DEV_TCM_A_1A = 0x73,
-	XPM_NODEIDX_DEV_TCM_A_1B = 0x74,
-	XPM_NODEIDX_DEV_TCM_A_1C = 0x75,
-	XPM_NODEIDX_DEV_TCM_B_0A = 0x76,
-	XPM_NODEIDX_DEV_TCM_B_0B = 0x77,
-	XPM_NODEIDX_DEV_TCM_B_0C = 0x78,
-	XPM_NODEIDX_DEV_TCM_B_1A = 0x79,
-	XPM_NODEIDX_DEV_TCM_B_1B = 0x7A,
-	XPM_NODEIDX_DEV_TCM_B_1C = 0x7B,
-	XPM_NODEIDX_DEV_USB_1 = 0x7C,
-	XPM_NODEIDX_DEV_PMC_WWDT = 0x7D,
-	XPM_NODEIDX_DEV_LPD_SWDT_0 = 0x7E,
-	XPM_NODEIDX_DEV_LPD_SWDT_1 = 0x7F,
-	XPM_NODEIDX_DEV_FPD_SWDT_0 = 0x80,
-	XPM_NODEIDX_DEV_FPD_SWDT_1 = 0x81,
-	XPM_NODEIDX_DEV_FPD_SWDT_2 = 0x82,
-	XPM_NODEIDX_DEV_FPD_SWDT_3 = 0x83,
+	XPM_NODEIDX_DEV_ACPU_0_0 = 0xAF,
+	XPM_NODEIDX_DEV_ACPU_0_1 = 0xB0,
+	XPM_NODEIDX_DEV_ACPU_0_2 = 0xB1,
+	XPM_NODEIDX_DEV_ACPU_0_3 = 0xB2,
+	XPM_NODEIDX_DEV_ACPU_1_0 = 0xB3,
+	XPM_NODEIDX_DEV_ACPU_1_1 = 0xB4,
+	XPM_NODEIDX_DEV_ACPU_1_2 = 0xB5,
+	XPM_NODEIDX_DEV_ACPU_1_3 = 0xB6,
+	XPM_NODEIDX_DEV_ACPU_2_0 = 0xB7,
+	XPM_NODEIDX_DEV_ACPU_2_1 = 0xB8,
+	XPM_NODEIDX_DEV_ACPU_2_2 = 0xB9,
+	XPM_NODEIDX_DEV_ACPU_2_3 = 0xBA,
+	XPM_NODEIDX_DEV_ACPU_3_0 = 0xBB,
+	XPM_NODEIDX_DEV_ACPU_3_1 = 0xBC,
+	XPM_NODEIDX_DEV_ACPU_3_2 = 0xBD,
+	XPM_NODEIDX_DEV_ACPU_3_3 = 0xBE,
+	XPM_NODEIDX_DEV_RPU_A_0 = 0xBF,
+	XPM_NODEIDX_DEV_RPU_A_1 = 0xC0,
+	XPM_NODEIDX_DEV_RPU_B_0 = 0xC1,
+	XPM_NODEIDX_DEV_RPU_B_1 = 0xC2,
+	XPM_NODEIDX_DEV_OCM_0_0 = 0xC3,
+	XPM_NODEIDX_DEV_OCM_0_1 = 0xC4,
+	XPM_NODEIDX_DEV_OCM_0_2 = 0xC5,
+	XPM_NODEIDX_DEV_OCM_0_3 = 0xC6,
+	XPM_NODEIDX_DEV_OCM_1_0 = 0xC7,
+	XPM_NODEIDX_DEV_OCM_1_1 = 0xC8,
+	XPM_NODEIDX_DEV_OCM_1_2 = 0xC9,
+	XPM_NODEIDX_DEV_OCM_1_3 = 0xCA,
+	XPM_NODEIDX_DEV_TCM_A_0A = 0xCB,
+	XPM_NODEIDX_DEV_TCM_A_0B = 0xCC,
+	XPM_NODEIDX_DEV_TCM_A_0C = 0xCD,
+	XPM_NODEIDX_DEV_TCM_A_1A = 0xCE,
+	XPM_NODEIDX_DEV_TCM_A_1B = 0xCF,
+	XPM_NODEIDX_DEV_TCM_A_1C = 0xD0,
+	XPM_NODEIDX_DEV_TCM_B_0A = 0xD1,
+	XPM_NODEIDX_DEV_TCM_B_0B = 0xD2,
+	XPM_NODEIDX_DEV_TCM_B_0C = 0xD3,
+	XPM_NODEIDX_DEV_TCM_B_1A = 0xD4,
+	XPM_NODEIDX_DEV_TCM_B_1B = 0xD5,
+	XPM_NODEIDX_DEV_TCM_B_1C = 0xD6,
+	XPM_NODEIDX_DEV_USB_1 = 0xD7,
+	XPM_NODEIDX_DEV_PMC_WWDT = 0xD8,
+	XPM_NODEIDX_DEV_LPD_SWDT_0 = 0xD9,
+	XPM_NODEIDX_DEV_LPD_SWDT_1 = 0xDA,
+	XPM_NODEIDX_DEV_FPD_SWDT_0 = 0xDB,
+	XPM_NODEIDX_DEV_FPD_SWDT_1 = 0xDC,
+	XPM_NODEIDX_DEV_FPD_SWDT_2 = 0xDD,
+	XPM_NODEIDX_DEV_FPD_SWDT_3 = 0xDE,
 #endif
 	XPM_NODEIDX_DEV_MAX,
 };
diff --git a/plat/xilinx/common/plat_startup.c b/plat/xilinx/common/plat_startup.c
index d034e00..f45c9f0 100644
--- a/plat/xilinx/common/plat_startup.c
+++ b/plat/xilinx/common/plat_startup.c
@@ -15,7 +15,7 @@
 
 
 /*
- * TFAHandoffParams
+ * HandoffParams
  * Parameter		bitfield	encoding
  * -----------------------------------------------------------------------------
  * Exec State		0		0 -> Aarch64, 1-> Aarch32
@@ -23,94 +23,104 @@
  * secure (TZ)		2		0 -> Non secure, 1 -> secure
  * EL			3:4		00 -> EL0, 01 -> EL1, 10 -> EL2, 11 -> EL3
  * CPU#			5:6		00 -> A53_0, 01 -> A53_1, 10 -> A53_2, 11 -> A53_3
+ * Reserved		7:10		Reserved
+ * Cluster#		11:12		00 -> Cluster 0, 01 -> Cluster 1, 10 -> Cluster 2,
+ *					11 -> Cluster (Applicable for Versal NET only).
+ * Reserved		13:16		Reserved
  */
 
-#define FSBL_FLAGS_ESTATE_SHIFT		0U
-#define FSBL_FLAGS_ESTATE_MASK		(1U << FSBL_FLAGS_ESTATE_SHIFT)
-#define FSBL_FLAGS_ESTATE_A64		0U
-#define FSBL_FLAGS_ESTATE_A32		1U
+#define XBL_FLAGS_ESTATE_SHIFT		0U
+#define XBL_FLAGS_ESTATE_MASK		(1U << XBL_FLAGS_ESTATE_SHIFT)
+#define XBL_FLAGS_ESTATE_A64		0U
+#define XBL_FLAGS_ESTATE_A32		1U
 
-#define FSBL_FLAGS_ENDIAN_SHIFT		1U
-#define FSBL_FLAGS_ENDIAN_MASK		(1U << FSBL_FLAGS_ENDIAN_SHIFT)
-#define FSBL_FLAGS_ENDIAN_LE		0U
-#define FSBL_FLAGS_ENDIAN_BE		1U
+#define XBL_FLAGS_ENDIAN_SHIFT		1U
+#define XBL_FLAGS_ENDIAN_MASK		(1U << XBL_FLAGS_ENDIAN_SHIFT)
+#define XBL_FLAGS_ENDIAN_LE		0U
+#define XBL_FLAGS_ENDIAN_BE		1U
 
-#define FSBL_FLAGS_TZ_SHIFT		2U
-#define FSBL_FLAGS_TZ_MASK		(1U << FSBL_FLAGS_TZ_SHIFT)
-#define FSBL_FLAGS_NON_SECURE		0U
-#define FSBL_FLAGS_SECURE		1U
+#define XBL_FLAGS_TZ_SHIFT		2U
+#define XBL_FLAGS_TZ_MASK		(1U << XBL_FLAGS_TZ_SHIFT)
+#define XBL_FLAGS_NON_SECURE		0U
+#define XBL_FLAGS_SECURE		1U
 
-#define FSBL_FLAGS_EL_SHIFT		3U
-#define FSBL_FLAGS_EL_MASK		(3U << FSBL_FLAGS_EL_SHIFT)
-#define FSBL_FLAGS_EL0			0U
-#define FSBL_FLAGS_EL1			1U
-#define FSBL_FLAGS_EL2			2U
-#define FSBL_FLAGS_EL3			3U
+#define XBL_FLAGS_EL_SHIFT		3U
+#define XBL_FLAGS_EL_MASK		(3U << XBL_FLAGS_EL_SHIFT)
+#define XBL_FLAGS_EL0			0U
+#define XBL_FLAGS_EL1			1U
+#define XBL_FLAGS_EL2			2U
+#define XBL_FLAGS_EL3			3U
 
-#define FSBL_FLAGS_CPU_SHIFT		5U
-#define FSBL_FLAGS_CPU_MASK		(3U << FSBL_FLAGS_CPU_SHIFT)
-#define FSBL_FLAGS_A53_0		0U
-#define FSBL_FLAGS_A53_1		1U
-#define FSBL_FLAGS_A53_2		2U
-#define FSBL_FLAGS_A53_3		3U
+#define XBL_FLAGS_CPU_SHIFT		5U
+#define XBL_FLAGS_CPU_MASK		(3U << XBL_FLAGS_CPU_SHIFT)
+#define XBL_FLAGS_A53_0		0U
+#define XBL_FLAGS_A53_1		1U
+#define XBL_FLAGS_A53_2		2U
+#define XBL_FLAGS_A53_3		3U
+
+#if defined(PLAT_versal_net)
+#define XBL_FLAGS_CLUSTER_SHIFT		11U
+#define XBL_FLAGS_CLUSTER_MASK		GENMASK(11, 12)
+
+#define XBL_FLAGS_CLUSTER_0		0U
+#endif /* PLAT_versal_net */
 
 /**
- * get_fsbl_cpu() - Get the target CPU for partition.
+ * get_xbl_cpu() - Get the target CPU for partition.
  * @partition: Pointer to partition struct.
  *
- * Return: FSBL_FLAGS_A53_0, FSBL_FLAGS_A53_1, FSBL_FLAGS_A53_2 or
- *         FSBL_FLAGS_A53_3.
+ * Return: XBL_FLAGS_A53_0, XBL_FLAGS_A53_1, XBL_FLAGS_A53_2 or XBL_FLAGS_A53_3.
  *
  */
-static int32_t get_fsbl_cpu(const struct xfsbl_partition *partition)
+static int32_t get_xbl_cpu(const struct xbl_partition *partition)
 {
-	uint64_t flags = partition->flags & FSBL_FLAGS_CPU_MASK;
+	uint64_t flags = partition->flags & XBL_FLAGS_CPU_MASK;
 
-	return flags >> FSBL_FLAGS_CPU_SHIFT;
+	return flags >> XBL_FLAGS_CPU_SHIFT;
 }
 
 /**
- * get_fsbl_el() - Get the target exception level for partition.
+ * get_xbl_el() - Get the target exception level for partition.
  * @partition: Pointer to partition struct.
  *
- * Return: FSBL_FLAGS_EL0, FSBL_FLAGS_EL1, FSBL_FLAGS_EL2 or FSBL_FLAGS_EL3.
+ * Return: XBL_FLAGS_EL0, XBL_FLAGS_EL1, XBL_FLAGS_EL2 or XBL_FLAGS_EL3.
  *
  */
-static int32_t get_fsbl_el(const struct xfsbl_partition *partition)
+static int32_t get_xbl_el(const struct xbl_partition *partition)
 {
-	uint64_t flags = partition->flags & FSBL_FLAGS_EL_MASK;
+	uint64_t flags = partition->flags & XBL_FLAGS_EL_MASK;
 
-	return flags >> FSBL_FLAGS_EL_SHIFT;
+	return flags >> XBL_FLAGS_EL_SHIFT;
 }
 
 /**
- * get_fsbl_ss() - Get the target security state for partition.
+ * get_xbl_ss() - Get the target security state for partition.
  * @partition: Pointer to partition struct.
  *
- * Return: FSBL_FLAGS_NON_SECURE or FSBL_FLAGS_SECURE.
+ * Return: XBL_FLAGS_NON_SECURE or XBL_FLAGS_SECURE.
  *
  */
-static int32_t get_fsbl_ss(const struct xfsbl_partition *partition)
+static int32_t get_xbl_ss(const struct xbl_partition *partition)
 {
-	uint64_t flags = partition->flags & FSBL_FLAGS_TZ_MASK;
+	uint64_t flags = partition->flags & XBL_FLAGS_TZ_MASK;
 
-	return flags >> FSBL_FLAGS_TZ_SHIFT;
+	return flags >> XBL_FLAGS_TZ_SHIFT;
 }
 
 /**
- * get_fsbl_endian() - Get the target endianness for partition.
+ * get_xbl_endian() - Get the target endianness for partition.
  * @partition: Pointer to partition struct.
  *
  * Return: SPSR_E_LITTLE or SPSR_E_BIG.
  *
  */
-static int32_t get_fsbl_endian(const struct xfsbl_partition *partition)
+static int32_t get_xbl_endian(const struct xbl_partition *partition)
 {
-	uint64_t flags = partition->flags & FSBL_FLAGS_ENDIAN_MASK;
+	uint64_t flags = partition->flags & XBL_FLAGS_ENDIAN_MASK;
 
-	flags >>= FSBL_FLAGS_ENDIAN_SHIFT;
+	flags >>= XBL_FLAGS_ENDIAN_SHIFT;
 
-	if (flags == FSBL_FLAGS_ENDIAN_BE) {
+	if (flags == XBL_FLAGS_ENDIAN_BE) {
 		return SPSR_E_BIG;
 	} else {
 		return SPSR_E_LITTLE;
@@ -118,58 +128,73 @@
 }
 
 /**
- * get_fsbl_estate() - Get the target execution state for partition.
+ * get_xbl_estate() - Get the target execution state for partition.
  * @partition: Pointer to partition struct.
  *
- * Return: FSBL_FLAGS_ESTATE_A32 or FSBL_FLAGS_ESTATE_A64.
+ * Return: XBL_FLAGS_ESTATE_A32 or XBL_FLAGS_ESTATE_A64.
+ *
+ */
+static int32_t get_xbl_estate(const struct xbl_partition *partition)
+{
+	uint64_t flags = partition->flags & XBL_FLAGS_ESTATE_MASK;
+
+	return flags >> XBL_FLAGS_ESTATE_SHIFT;
+}
+
+#if defined(PLAT_versal_net)
+/**
+ * get_xbl_cluster - Get the cluster number
+ * @partition: pointer to the partition structure.
  *
+ * Return: cluster number for the partition.
  */
-static int32_t get_fsbl_estate(const struct xfsbl_partition *partition)
+static int32_t get_xbl_cluster(const struct xbl_partition *partition)
 {
-	uint64_t flags = partition->flags & FSBL_FLAGS_ESTATE_MASK;
+	uint64_t flags = partition->flags & XBL_FLAGS_CLUSTER_MASK;
 
-	return flags >> FSBL_FLAGS_ESTATE_SHIFT;
+	return (int32_t)(flags >> XBL_FLAGS_CLUSTER_SHIFT);
 }
+#endif /* PLAT_versal_net */
 
 /**
- * fsbl_tfa_handover() - Populates the bl32 and bl33 image info structures.
+ * xbl_tfa_handover() - Populates the bl32 and bl33 image info structures.
  * @bl32: BL32 image info structure.
  * @bl33: BL33 image info structure.
  * @tfa_handoff_addr: TF-A handoff address.
  *
- * Process the handoff parameters from the FSBL and populate the BL32 and BL33
+ * Process the handoff parameters from the XBL and populate the BL32 and BL33
  * image info structures accordingly.
  *
  * Return: Return the status of the handoff. The value will be from the
- *         fsbl_handoff enum.
+ *         xbl_handoff enum.
  *
  */
-enum fsbl_handoff fsbl_tfa_handover(entry_point_info_t *bl32,
+enum xbl_handoff xbl_handover(entry_point_info_t *bl32,
 					entry_point_info_t *bl33,
-					uint64_t tfa_handoff_addr)
+					uint64_t handoff_addr)
 {
-	const struct xfsbl_tfa_handoff_params *TFAHandoffParams;
-	if (!tfa_handoff_addr) {
-		WARN("BL31: No TFA handoff structure passed\n");
-		return FSBL_HANDOFF_NO_STRUCT;
+	const struct xbl_handoff_params *HandoffParams;
+
+	if (!handoff_addr) {
+		WARN("BL31: No handoff structure passed\n");
+		return XBL_HANDOFF_NO_STRUCT;
 	}
 
-	TFAHandoffParams = (struct xfsbl_tfa_handoff_params *)tfa_handoff_addr;
-	if ((TFAHandoffParams->magic[0] != 'X') ||
-	    (TFAHandoffParams->magic[1] != 'L') ||
-	    (TFAHandoffParams->magic[2] != 'N') ||
-	    (TFAHandoffParams->magic[3] != 'X')) {
-		ERROR("BL31: invalid TF-A handoff structure at %" PRIx64 "\n",
-		      tfa_handoff_addr);
-		return FSBL_HANDOFF_INVAL_STRUCT;
+	HandoffParams = (struct xbl_handoff_params *)handoff_addr;
+	if ((HandoffParams->magic[0] != 'X') ||
+	    (HandoffParams->magic[1] != 'L') ||
+	    (HandoffParams->magic[2] != 'N') ||
+	    (HandoffParams->magic[3] != 'X')) {
+		ERROR("BL31: invalid handoff structure at %" PRIx64 "\n", handoff_addr);
+		return XBL_HANDOFF_INVAL_STRUCT;
 	}
 
 	VERBOSE("BL31: TF-A handoff params at:0x%" PRIx64 ", entries:%u\n",
-		tfa_handoff_addr, TFAHandoffParams->num_entries);
-	if (TFAHandoffParams->num_entries > FSBL_MAX_PARTITIONS) {
+		handoff_addr, HandoffParams->num_entries);
+	if (HandoffParams->num_entries > XBL_MAX_PARTITIONS) {
 		ERROR("BL31: TF-A handoff params: too many partitions (%u/%u)\n",
-		      TFAHandoffParams->num_entries, FSBL_MAX_PARTITIONS);
-		return FSBL_HANDOFF_TOO_MANY_PARTS;
+		      HandoffParams->num_entries, XBL_MAX_PARTITIONS);
+		return XBL_HANDOFF_TOO_MANY_PARTS;
 	}
 
 	/*
@@ -177,43 +202,55 @@
 	 * (bl32, bl33). I.e. the last applicable images in the handoff
 	 * structure will be used for the hand off
 	 */
-	for (size_t i = 0; i < TFAHandoffParams->num_entries; i++) {
+	for (size_t i = 0; i < HandoffParams->num_entries; i++) {
 		entry_point_info_t *image;
 		int32_t target_estate, target_secure, target_cpu;
 		uint32_t target_endianness, target_el;
 
 		VERBOSE("BL31: %zd: entry:0x%" PRIx64 ", flags:0x%" PRIx64 "\n", i,
-			TFAHandoffParams->partition[i].entry_point,
-			TFAHandoffParams->partition[i].flags);
+			HandoffParams->partition[i].entry_point,
+			HandoffParams->partition[i].flags);
+
+#if defined(PLAT_versal_net)
+		uint32_t target_cluster;
 
-		target_cpu = get_fsbl_cpu(&TFAHandoffParams->partition[i]);
-		if (target_cpu != FSBL_FLAGS_A53_0) {
+		target_cluster = get_xbl_cluster(&HandoffParams->partition[i]);
+		if (target_cluster != XBL_FLAGS_CLUSTER_0) {
+			WARN("BL31: invalid target Cluster (%i)\n",
+			     target_cluster);
+			continue;
+		}
+#endif /* PLAT_versal_net */
+
+		target_cpu = get_xbl_cpu(&HandoffParams->partition[i]);
+		if (target_cpu != XBL_FLAGS_A53_0) {
 			WARN("BL31: invalid target CPU (%i)\n", target_cpu);
 			continue;
 		}
 
-		target_el = get_fsbl_el(&TFAHandoffParams->partition[i]);
-		if ((target_el == FSBL_FLAGS_EL3) ||
-		    (target_el == FSBL_FLAGS_EL0)) {
-			WARN("BL31: invalid exception level (%i)\n", target_el);
+		target_el = get_xbl_el(&HandoffParams->partition[i]);
+		if ((target_el == XBL_FLAGS_EL3) ||
+		    (target_el == XBL_FLAGS_EL0)) {
+			WARN("BL31: invalid target exception level(%i)\n",
+			     target_el);
 			continue;
 		}
 
-		target_secure = get_fsbl_ss(&TFAHandoffParams->partition[i]);
-		if (target_secure == FSBL_FLAGS_SECURE &&
-		    target_el == FSBL_FLAGS_EL2) {
+		target_secure = get_xbl_ss(&HandoffParams->partition[i]);
+		if (target_secure == XBL_FLAGS_SECURE &&
+		    target_el == XBL_FLAGS_EL2) {
 			WARN("BL31: invalid security state (%i) for exception level (%i)\n",
 			     target_secure, target_el);
 			continue;
 		}
 
-		target_estate = get_fsbl_estate(&TFAHandoffParams->partition[i]);
-		target_endianness = get_fsbl_endian(&TFAHandoffParams->partition[i]);
+		target_estate = get_xbl_estate(&HandoffParams->partition[i]);
+		target_endianness = get_xbl_endian(&HandoffParams->partition[i]);
 
-		if (target_secure == FSBL_FLAGS_SECURE) {
+		if (target_secure == XBL_FLAGS_SECURE) {
 			image = bl32;
 
-			if (target_estate == FSBL_FLAGS_ESTATE_A32) {
+			if (target_estate == XBL_FLAGS_ESTATE_A32) {
 				bl32->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
 							 target_endianness,
 							 DISABLE_ALL_EXCEPTIONS);
@@ -224,8 +261,8 @@
 		} else {
 			image = bl33;
 
-			if (target_estate == FSBL_FLAGS_ESTATE_A32) {
-				if (target_el == FSBL_FLAGS_EL2) {
+			if (target_estate == XBL_FLAGS_ESTATE_A32) {
+				if (target_el == XBL_FLAGS_EL2) {
 					target_el = MODE32_hyp;
 				} else {
 					target_el = MODE32_sys;
@@ -235,7 +272,7 @@
 							 target_endianness,
 							 DISABLE_ALL_EXCEPTIONS);
 			} else {
-				if (target_el == FSBL_FLAGS_EL2) {
+				if (target_el == XBL_FLAGS_EL2) {
 					target_el = MODE_EL2;
 				} else {
 					target_el = MODE_EL1;
@@ -247,10 +284,10 @@
 		}
 
 		VERBOSE("Setting up %s entry point to:%" PRIx64 ", el:%x\n",
-			target_secure == FSBL_FLAGS_SECURE ? "BL32" : "BL33",
-			TFAHandoffParams->partition[i].entry_point,
+			target_secure == XBL_FLAGS_SECURE ? "BL32" : "BL33",
+			HandoffParams->partition[i].entry_point,
 			target_el);
-		image->pc = TFAHandoffParams->partition[i].entry_point;
+		image->pc = HandoffParams->partition[i].entry_point;
 
 		if (target_endianness == SPSR_E_BIG) {
 			EP_SET_EE(image->h.attr, EP_EE_BIG);
@@ -259,5 +296,5 @@
 		}
 	}
 
-	return FSBL_HANDOFF_SUCCESS;
+	return XBL_HANDOFF_SUCCESS;
 }
diff --git a/plat/xilinx/versal/bl31_versal_setup.c b/plat/xilinx/versal/bl31_versal_setup.c
index f90cc78..0ef92a6 100644
--- a/plat/xilinx/versal/bl31_versal_setup.c
+++ b/plat/xilinx/versal/bl31_versal_setup.c
@@ -69,9 +69,9 @@
 				u_register_t arg2, u_register_t arg3)
 {
 	uint64_t tfa_handoff_addr;
-	uint32_t payload[PAYLOAD_ARG_CNT], max_size = TFA_HANDOFF_PARAMS_MAX_SIZE;
+	uint32_t payload[PAYLOAD_ARG_CNT], max_size = HANDOFF_PARAMS_MAX_SIZE;
 	enum pm_ret_status ret_status;
-	uint64_t addr[TFA_HANDOFF_PARAMS_MAX_SIZE];
+	uint64_t addr[HANDOFF_PARAMS_MAX_SIZE];
 
 	if (VERSAL_CONSOLE_IS(pl011) || (VERSAL_CONSOLE_IS(pl011_1))) {
 		static console_t versal_runtime_console;
@@ -126,14 +126,14 @@
 		tfa_handoff_addr = mmio_read_32(PMC_GLOBAL_GLOB_GEN_STORAGE4);
 	}
 
-	enum fsbl_handoff ret = fsbl_tfa_handover(&bl32_image_ep_info,
+	enum xbl_handoff ret = xbl_handover(&bl32_image_ep_info,
 						  &bl33_image_ep_info,
 						  tfa_handoff_addr);
-	if (ret == FSBL_HANDOFF_NO_STRUCT || ret == FSBL_HANDOFF_INVAL_STRUCT) {
+	if (ret == XBL_HANDOFF_NO_STRUCT || ret == XBL_HANDOFF_INVAL_STRUCT) {
 		bl31_set_default_config();
-	} else if (ret == FSBL_HANDOFF_TOO_MANY_PARTS) {
+	} else if (ret == XBL_HANDOFF_TOO_MANY_PARTS) {
 		ERROR("BL31: Error too many partitions %u\n", ret);
-	} else if (ret != FSBL_HANDOFF_SUCCESS) {
+	} else if (ret != XBL_HANDOFF_SUCCESS) {
 		panic();
 	} else {
 		INFO("BL31: PLM to TF-A handover success %u\n", ret);
diff --git a/plat/xilinx/versal_net/bl31_versal_net_setup.c b/plat/xilinx/versal_net/bl31_versal_net_setup.c
index ae9dfe8..79205a3 100644
--- a/plat/xilinx/versal_net/bl31_versal_net_setup.c
+++ b/plat/xilinx/versal_net/bl31_versal_net_setup.c
@@ -25,6 +25,9 @@
 
 #include <plat_private.h>
 #include <plat_startup.h>
+#include <pm_api_sys.h>
+#include <pm_client.h>
+#include <pm_ipi.h>
 #include <versal_net_def.h>
 
 static entry_point_info_t bl32_image_ep_info;
@@ -70,6 +73,11 @@
 {
 	uint32_t uart_clock;
 	int32_t rc;
+#if !(TFA_NO_PM)
+	uint64_t tfa_handoff_addr, buff[HANDOFF_PARAMS_MAX_SIZE] = {0};
+	uint32_t payload[PAYLOAD_ARG_CNT], max_size = HANDOFF_PARAMS_MAX_SIZE;
+	enum pm_ret_status ret_status;
+#endif /* !(TFA_NO_PM) */
 
 	board_detection();
 
@@ -136,8 +144,32 @@
 	SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
 	SET_PARAM_HEAD(&bl33_image_ep_info, PARAM_EP, VERSION_1, 0);
 	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+#if !(TFA_NO_PM)
+	PM_PACK_PAYLOAD4(payload, LOADER_MODULE_ID, 1, PM_LOAD_GET_HANDOFF_PARAMS,
+			 (uintptr_t)buff >> 32U, (uintptr_t)buff, max_size);
 
+	ret_status = pm_ipi_send_sync(primary_proc, payload, NULL, 0);
+	if (ret_status == PM_RET_SUCCESS) {
+		enum xbl_handoff xbl_ret;
+
+		tfa_handoff_addr = (uintptr_t)&buff;
+
+		xbl_ret = xbl_handover(&bl32_image_ep_info, &bl33_image_ep_info,
+				       tfa_handoff_addr);
+		if (xbl_ret != XBL_HANDOFF_SUCCESS) {
+			ERROR("BL31: PLM to TF-A handover failed %u\n", xbl_ret);
+			panic();
+		}
+
+		INFO("BL31: PLM to TF-A handover success\n");
+	} else {
+		INFO("BL31: setting up default configs\n");
+
+		bl31_set_default_config();
+	}
+#else
 	bl31_set_default_config();
+#endif /* !(TFA_NO_PM) */
 
 	NOTICE("BL31: Secure code at 0x%lx\n", bl32_image_ep_info.pc);
 	NOTICE("BL31: Non secure code at 0x%lx\n", bl33_image_ep_info.pc);
diff --git a/plat/xilinx/zynqmp/bl31_zynqmp_setup.c b/plat/xilinx/zynqmp/bl31_zynqmp_setup.c
index 87f027a..56d402f 100644
--- a/plat/xilinx/zynqmp/bl31_zynqmp_setup.c
+++ b/plat/xilinx/zynqmp/bl31_zynqmp_setup.c
@@ -112,11 +112,11 @@
 	if (zynqmp_get_bootmode() == ZYNQMP_BOOTMODE_JTAG) {
 		bl31_set_default_config();
 	} else {
-		/* use parameters from FSBL */
-		enum fsbl_handoff ret = fsbl_tfa_handover(&bl32_image_ep_info,
+		/* use parameters from XBL */
+		enum xbl_handoff ret = xbl_handover(&bl32_image_ep_info,
 							  &bl33_image_ep_info,
 							  tfa_handoff_addr);
-		if (ret != FSBL_HANDOFF_SUCCESS) {
+		if (ret != XBL_HANDOFF_SUCCESS) {
 			panic();
 		}
 	}
diff --git a/services/std_svc/rmmd/rmmd_main.c b/services/std_svc/rmmd/rmmd_main.c
index 24f6c41..c80b524 100644
--- a/services/std_svc/rmmd/rmmd_main.c
+++ b/services/std_svc/rmmd/rmmd_main.c
@@ -18,6 +18,8 @@
 #include <context.h>
 #include <lib/el3_runtime/context_mgmt.h>
 #include <lib/el3_runtime/pubsub.h>
+#include <lib/extensions/pmuv3.h>
+#include <lib/extensions/sys_reg_trace.h>
 #include <lib/gpt_rme/gpt_rme.h>
 
 #include <lib/spinlock.h>
@@ -125,6 +127,8 @@
 	 */
 		sve_enable(ctx);
 	}
+
+	pmuv3_enable(ctx);
 }
 
 /*******************************************************************************