Merge "fix(fvp): adjust BL31 maximum size as per total SRAM size" into integration
diff --git a/Makefile b/Makefile
index 8d3ffe1..8a0a2e0 100644
--- a/Makefile
+++ b/Makefile
@@ -523,6 +523,7 @@
 				drivers/console/multi_console.c		\
 				lib/${ARCH}/cache_helpers.S		\
 				lib/${ARCH}/misc_helpers.S		\
+				lib/extensions/pmuv3/${ARCH}/pmuv3.c	\
 				plat/common/plat_bl_common.c		\
 				plat/common/plat_log_common.c		\
 				plat/common/${ARCH}/plat_common.c	\
@@ -1147,7 +1148,6 @@
 	CTX_INCLUDE_FPREGS \
 	CTX_INCLUDE_EL2_REGS \
 	DEBUG \
-	DISABLE_MTPMU \
 	DYN_DISABLE_AUTH \
 	EL3_EXCEPTION_HANDLING \
 	ENABLE_AMU_AUXILIARY_COUNTERS \
@@ -1225,6 +1225,7 @@
 	CTX_INCLUDE_MTE_REGS \
 	CTX_INCLUDE_NEVE_REGS \
 	CRYPTO_SUPPORT \
+	DISABLE_MTPMU \
 	ENABLE_BRBE_FOR_NS \
 	ENABLE_TRBE_FOR_NS \
 	ENABLE_BTI \
diff --git a/bl1/bl1.mk b/bl1/bl1.mk
index b1791b1..95fe50e 100644
--- a/bl1/bl1.mk
+++ b/bl1/bl1.mk
@@ -16,10 +16,6 @@
 				plat/common/${ARCH}/platform_up_stack.S \
 				${MBEDTLS_SOURCES}
 
-ifeq (${DISABLE_MTPMU},1)
-BL1_SOURCES		+=	lib/extensions/mtpmu/${ARCH}/mtpmu.S
-endif
-
 ifeq (${ARCH},aarch64)
 BL1_SOURCES		+=	lib/cpus/aarch64/dsu_helpers.S		\
 				lib/el3_runtime/aarch64/context.S
diff --git a/bl2/bl2.mk b/bl2/bl2.mk
index 19b955f..1663c52 100644
--- a/bl2/bl2.mk
+++ b/bl2/bl2.mk
@@ -43,10 +43,6 @@
 				bl2/${ARCH}/bl2_run_next_image.S        \
 				lib/cpus/${ARCH}/cpu_helpers.S
 
-ifeq (${DISABLE_MTPMU},1)
-BL2_SOURCES		+=	lib/extensions/mtpmu/${ARCH}/mtpmu.S
-endif
-
 ifeq (${ARCH},aarch64)
 BL2_SOURCES		+=	lib/cpus/aarch64/dsu_helpers.S
 endif
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index d7c9a52..0c1d657 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -54,10 +54,6 @@
 				${SPMC_SOURCES}					\
 				${SPM_SOURCES}
 
-ifeq (${DISABLE_MTPMU},1)
-BL31_SOURCES		+=	lib/extensions/mtpmu/aarch64/mtpmu.S
-endif
-
 ifeq (${ENABLE_PMF}, 1)
 BL31_SOURCES		+=	lib/pmf/pmf_main.c
 endif
diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c
index e70eb55..8f1f043 100644
--- a/bl31/bl31_main.c
+++ b/bl31/bl31_main.c
@@ -112,6 +112,9 @@
  ******************************************************************************/
 void bl31_main(void)
 {
+	/* Init registers that never change for the lifetime of TF-A */
+	cm_manage_extensions_el3();
+
 	NOTICE("BL31: %s\n", version_string);
 	NOTICE("BL31: %s\n", build_message);
 
diff --git a/bl32/sp_min/sp_min.mk b/bl32/sp_min/sp_min.mk
index ec75d88..0b7bc57 100644
--- a/bl32/sp_min/sp_min.mk
+++ b/bl32/sp_min/sp_min.mk
@@ -20,10 +20,6 @@
 				services/std_svc/std_svc_setup.c	\
 				${PSCI_LIB_SOURCES}
 
-ifeq (${DISABLE_MTPMU},1)
-BL32_SOURCES		+=	lib/extensions/mtpmu/aarch32/mtpmu.S
-endif
-
 ifeq (${ENABLE_PMF}, 1)
 BL32_SOURCES		+=	lib/pmf/pmf_main.c
 endif
diff --git a/common/feat_detect.c b/common/feat_detect.c
index 50b74d0..d2e94e9 100644
--- a/common/feat_detect.c
+++ b/common/feat_detect.c
@@ -144,6 +144,14 @@
 	check_feature(ENABLE_FEAT_SB, read_feat_sb_id_field(), "SB", 1, 1);
 	check_feature(ENABLE_FEAT_CSV2_2, read_feat_csv2_id_field(),
 		      "CSV2_2", 2, 3);
+	/*
+	 * Even though the PMUv3 is an OPTIONAL feature, it is always
+	 * implemented and Arm prescribes so. So assume it will be there and do
+	 * away with a flag for it. This is used to check minor PMUv3px
+	 * revisions so that we catch them as they come along
+	 */
+	check_feature(FEAT_STATE_ALWAYS, read_feat_pmuv3_id_field(),
+		      "PMUv3", 1, ID_AA64DFR0_PMUVER_PMUV3P7);
 
 	/* v8.1 features */
 	check_feature(ENABLE_FEAT_PAN, read_feat_pan_id_field(), "PAN", 1, 3);
@@ -184,6 +192,13 @@
 	check_feature(ENABLE_FEAT_TWED, read_feat_twed_id_field(),
 		      "TWED", 1, 1);
 
+	/*
+	 * even though this is a "DISABLE" it does confusingly perform feature
+	 * enablement duties like all other flags here. Check it against the HW
+	 * feature when we intend to diverge from the default behaviour
+	 */
+	check_feature(DISABLE_MTPMU, read_feat_mtpmu_id_field(), "MTPMU", 1, 1);
+
 	/* v8.7 features */
 	check_feature(ENABLE_FEAT_HCX, read_feat_hcx_id_field(), "HCX", 1, 1);
 
diff --git a/docs/getting_started/build-options.rst b/docs/getting_started/build-options.rst
index 7ca8aa9..a5633e9 100644
--- a/docs/getting_started/build-options.rst
+++ b/docs/getting_started/build-options.rst
@@ -207,10 +207,10 @@
    of the binary image. If set to 1, then only the ELF image is built.
    0 is the default.
 
--  ``DISABLE_MTPMU``: Boolean option to disable FEAT_MTPMU if implemented
-   (Armv8.6 onwards). Its default value is 0 to keep consistency with platforms
-   that do not implement FEAT_MTPMU. For more information on FEAT_MTPMU,
-   check the latest Arm ARM.
+-  ``DISABLE_MTPMU``: Numeric option to disable ``FEAT_MTPMU`` (Multi Threaded
+   PMU). ``FEAT_MTPMU`` is an optional feature available on Armv8.6 onwards.
+   This flag can take values 0 to 2, to align with the ``FEATURE_DETECTION``
+   mechanism. Default is ``0``.
 
 -  ``DYN_DISABLE_AUTH``: Provides the capability to dynamically disable Trusted
    Board Boot authentication at runtime. This option is meant to be enabled only
diff --git a/docs/plat/arm/fvp/index.rst b/docs/plat/arm/fvp/index.rst
index 42c0eda..fcfa04a 100644
--- a/docs/plat/arm/fvp/index.rst
+++ b/docs/plat/arm/fvp/index.rst
@@ -51,7 +51,6 @@
 -  ``FVP_Morello``            (Version 0.11/33)
 -  ``FVP_RD_E1_edge``         (Version 11.17/29)
 -  ``FVP_RD_V1``              (Version 11.17/29)
--  ``FVP_TC0`` (Version 11.17/18)
 -  ``FVP_TC1`` (Version 11.17/33)
 -  ``FVP_TC2`` (Version 11.18/28)
 
@@ -631,7 +630,7 @@
 
 --------------
 
-*Copyright (c) 2019-2022, Arm Limited. All rights reserved.*
+*Copyright (c) 2019-2023, Arm Limited. All rights reserved.*
 
 .. _FW_CONFIG for FVP: https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/tree/plat/arm/board/fvp/fdts/fvp_fw_config.dts
 .. _Arm's website: `FVP models`_
diff --git a/docs/plat/arm/tc/index.rst b/docs/plat/arm/tc/index.rst
index df1847d..925befc 100644
--- a/docs/plat/arm/tc/index.rst
+++ b/docs/plat/arm/tc/index.rst
@@ -17,7 +17,7 @@
 (TARGET_PLATFORM=1), TC2 (TARGET_PLATFORM=2) platforms w.r.t to TF-A
 is the CPUs supported as below:
 
--  TC0 has support for Cortex A510, Cortex A710 and Cortex X2.
+-  TC0 has support for Cortex A510, Cortex A710 and Cortex X2. (Note TC0 is now deprecated)
 -  TC1 has support for Cortex A510, Cortex Makalu and Cortex X3.
 -  TC2 has support for Hayes and Hunter Arm CPUs.
 
diff --git a/include/arch/aarch32/arch.h b/include/arch/aarch32/arch.h
index c8a6334..dd2c0a6 100644
--- a/include/arch/aarch32/arch.h
+++ b/include/arch/aarch32/arch.h
@@ -104,7 +104,11 @@
 /* CSSELR definitions */
 #define LEVEL_SHIFT		U(1)
 
-/* ID_DFR0_EL1 definitions */
+/* ID_DFR0 definitions */
+#define ID_DFR0_PERFMON_SHIFT		U(24)
+#define ID_DFR0_PERFMON_MASK		U(0xf)
+#define ID_DFR0_PERFMON_PMUV3		U(3)
+#define ID_DFR0_PERFMON_PMUV3P5		U(6)
 #define ID_DFR0_COPTRC_SHIFT		U(12)
 #define ID_DFR0_COPTRC_MASK		U(0xf)
 #define ID_DFR0_COPTRC_SUPPORTED	U(1)
@@ -118,6 +122,7 @@
 #define ID_DFR1_MTPMU_SHIFT	U(0)
 #define ID_DFR1_MTPMU_MASK	U(0xf)
 #define ID_DFR1_MTPMU_SUPPORTED	U(1)
+#define ID_DFR1_MTPMU_DISABLED	U(15)
 
 /* ID_MMFR3 definitions */
 #define ID_MMFR3_PAN_SHIFT	U(16)
@@ -464,6 +469,10 @@
 #define PMCR_LP_BIT		(U(1) << 7)
 #define PMCR_LC_BIT		(U(1) << 6)
 #define PMCR_DP_BIT		(U(1) << 5)
+#define PMCR_X_BIT		(U(1) << 4)
+#define PMCR_C_BIT		(U(1) << 2)
+#define PMCR_P_BIT		(U(1) << 1)
+#define PMCR_E_BIT		(U(1) << 0)
 #define	PMCR_RESET_VAL		U(0x0)
 
 /*******************************************************************************
diff --git a/include/arch/aarch32/arch_features.h b/include/arch/aarch32/arch_features.h
index 99e3fd0..f19c4c2 100644
--- a/include/arch/aarch32/arch_features.h
+++ b/include/arch/aarch32/arch_features.h
@@ -162,4 +162,29 @@
 static inline bool is_feat_s1pie_supported(void) { return false; }
 static inline bool is_feat_sxpie_supported(void) { return false; }
 
+static inline unsigned int read_feat_pmuv3_id_field(void)
+{
+	return ISOLATE_FIELD(read_id_dfr0(), ID_DFR0_PERFMON);
+}
+
+static inline unsigned int read_feat_mtpmu_id_field(void)
+{
+	return ISOLATE_FIELD(read_id_dfr1(), ID_DFR1_MTPMU);
+}
+
+static inline bool is_feat_mtpmu_supported(void)
+{
+	if (DISABLE_MTPMU == FEAT_STATE_DISABLED) {
+		return false;
+	}
+
+	if (DISABLE_MTPMU == FEAT_STATE_ALWAYS) {
+		return true;
+	}
+
+	unsigned int mtpmu = read_feat_mtpmu_id_field();
+
+	return mtpmu != 0U && mtpmu != ID_DFR1_MTPMU_DISABLED;
+}
+
 #endif /* ARCH_FEATURES_H */
diff --git a/include/arch/aarch32/arch_helpers.h b/include/arch/aarch32/arch_helpers.h
index ca5a44b..3a7c768 100644
--- a/include/arch/aarch32/arch_helpers.h
+++ b/include/arch/aarch32/arch_helpers.h
@@ -221,6 +221,7 @@
 DEFINE_COPROCR_READ_FUNC(id_mmfr3, ID_MMFR3)
 DEFINE_COPROCR_READ_FUNC(id_mmfr4, ID_MMFR4)
 DEFINE_COPROCR_READ_FUNC(id_dfr0, ID_DFR0)
+DEFINE_COPROCR_READ_FUNC(id_dfr1, ID_DFR1)
 DEFINE_COPROCR_READ_FUNC(id_pfr0, ID_PFR0)
 DEFINE_COPROCR_READ_FUNC(id_pfr1, ID_PFR1)
 DEFINE_COPROCR_READ_FUNC(isr, ISR)
@@ -290,7 +291,7 @@
 DEFINE_COPROCR_RW_FUNCS(sdcr, SDCR)
 DEFINE_COPROCR_RW_FUNCS(hdcr, HDCR)
 DEFINE_COPROCR_RW_FUNCS(cnthp_ctl, CNTHP_CTL)
-DEFINE_COPROCR_READ_FUNC(pmcr, PMCR)
+DEFINE_COPROCR_RW_FUNCS(pmcr, PMCR)
 
 /*
  * Address translation
diff --git a/include/arch/aarch32/el3_common_macros.S b/include/arch/aarch32/el3_common_macros.S
index 585a9ae..697eb82 100644
--- a/include/arch/aarch32/el3_common_macros.S
+++ b/include/arch/aarch32/el3_common_macros.S
@@ -277,10 +277,6 @@
 	cps	#MODE32_mon
 	isb
 
-#if DISABLE_MTPMU
-	bl	mtpmu_disable
-#endif
-
 	.if \_warm_boot_mailbox
 		/* -------------------------------------------------------------
 		 * This code will be executed for both warm and cold resets.
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index f3bccc4..5dbcd0a 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -221,6 +221,12 @@
 #define ID_AA64DFR0_TRACEFILT_MASK	U(0xf)
 #define ID_AA64DFR0_TRACEFILT_SUPPORTED	U(1)
 #define ID_AA64DFR0_TRACEFILT_LENGTH	U(4)
+#define ID_AA64DFR0_PMUVER_LENGTH	U(4)
+#define ID_AA64DFR0_PMUVER_SHIFT	U(8)
+#define ID_AA64DFR0_PMUVER_MASK		U(0xf)
+#define ID_AA64DFR0_PMUVER_PMUV3	U(1)
+#define ID_AA64DFR0_PMUVER_PMUV3P7	U(7)
+#define ID_AA64DFR0_PMUVER_IMP_DEF	U(0xf)
 
 /* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
 #define ID_AA64DFR0_PMS_SHIFT		U(32)
@@ -237,6 +243,7 @@
 #define ID_AA64DFR0_MTPMU_SHIFT		U(48)
 #define ID_AA64DFR0_MTPMU_MASK		ULL(0xf)
 #define ID_AA64DFR0_MTPMU_SUPPORTED	ULL(1)
+#define ID_AA64DFR0_MTPMU_DISABLED	ULL(15)
 
 /* ID_AA64DFR0_EL1.BRBE definitions */
 #define ID_AA64DFR0_BRBE_SHIFT		U(52)
@@ -595,16 +602,16 @@
 #define MDCR_TDOSA_BIT		(ULL(1) << 10)
 #define MDCR_TDA_BIT		(ULL(1) << 9)
 #define MDCR_TPM_BIT		(ULL(1) << 6)
-#define MDCR_EL3_RESET_VAL	ULL(0x0)
+#define MDCR_EL3_RESET_VAL	MDCR_MTPME_BIT
 
 /* MDCR_EL2 definitions */
 #define MDCR_EL2_MTPME		(U(1) << 28)
-#define MDCR_EL2_HLP		(U(1) << 26)
+#define MDCR_EL2_HLP_BIT	(U(1) << 26)
 #define MDCR_EL2_E2TB(x)	((x) << 24)
 #define MDCR_EL2_E2TB_EL1	U(0x3)
-#define MDCR_EL2_HCCD		(U(1) << 23)
+#define MDCR_EL2_HCCD_BIT	(U(1) << 23)
 #define MDCR_EL2_TTRF		(U(1) << 19)
-#define MDCR_EL2_HPMD		(U(1) << 17)
+#define MDCR_EL2_HPMD_BIT	(U(1) << 17)
 #define MDCR_EL2_TPMS		(U(1) << 14)
 #define MDCR_EL2_E2PB(x)	((x) << 12)
 #define MDCR_EL2_E2PB_EL1	U(0x3)
@@ -615,6 +622,7 @@
 #define MDCR_EL2_HPME_BIT	(U(1) << 7)
 #define MDCR_EL2_TPM_BIT	(U(1) << 6)
 #define MDCR_EL2_TPMCR_BIT	(U(1) << 5)
+#define MDCR_EL2_HPMN_MASK	U(0x1f)
 #define MDCR_EL2_RESET_VAL	U(0x0)
 
 /* HSTR_EL2 definitions */
diff --git a/include/arch/aarch64/arch_features.h b/include/arch/aarch64/arch_features.h
index 609a95b..9d71987 100644
--- a/include/arch/aarch64/arch_features.h
+++ b/include/arch/aarch64/arch_features.h
@@ -639,6 +639,7 @@
 	return read_feat_trbe_id_field() != 0U;
 
 }
+
 /*******************************************************************************
  * Function to identify the presence of FEAT_SMEx (Scalar Matrix Extension)
  ******************************************************************************/
@@ -699,4 +700,29 @@
 			     ID_AA64MMFR0_EL1_TGRAN64);
 }
 
+static inline unsigned int read_feat_pmuv3_id_field(void)
+{
+	return ISOLATE_FIELD(read_id_aa64dfr0_el1(), ID_AA64DFR0_PMUVER);
+}
+
+static inline unsigned int read_feat_mtpmu_id_field(void)
+{
+	return ISOLATE_FIELD(read_id_aa64dfr0_el1(), ID_AA64DFR0_MTPMU);
+}
+
+static inline bool is_feat_mtpmu_supported(void)
+{
+	if (DISABLE_MTPMU == FEAT_STATE_DISABLED) {
+		return false;
+	}
+
+	if (DISABLE_MTPMU == FEAT_STATE_ALWAYS) {
+		return true;
+	}
+
+	unsigned int mtpmu = read_feat_mtpmu_id_field();
+
+	return (mtpmu != 0U) && (mtpmu != ID_AA64DFR0_MTPMU_DISABLED);
+}
+
 #endif /* ARCH_FEATURES_H */
diff --git a/include/arch/aarch64/el2_common_macros.S b/include/arch/aarch64/el2_common_macros.S
index dcaea3d..9609c0d 100644
--- a/include/arch/aarch64/el2_common_macros.S
+++ b/include/arch/aarch64/el2_common_macros.S
@@ -103,7 +103,7 @@
 	 */
 	mov_imm	x0, ((MDCR_EL2_RESET_VAL | \
 		      MDCR_SPD32(MDCR_SPD32_DISABLE)) \
-		      & ~(MDCR_EL2_HPMD | MDCR_TDOSA_BIT | \
+		      & ~(MDCR_EL2_HPMD_BIT | MDCR_TDOSA_BIT | \
 		      MDCR_TDA_BIT | MDCR_TPM_BIT))
 
 	msr	mdcr_el2, x0
@@ -244,10 +244,6 @@
 		isb
 	.endif /* _init_sctlr */
 
-#if DISABLE_MTPMU
-		bl	mtpmu_disable
-#endif
-
 	.if \_warm_boot_mailbox
 		/* -------------------------------------------------------------
 		 * This code will be executed for both warm and cold resets.
diff --git a/include/arch/aarch64/el3_common_macros.S b/include/arch/aarch64/el3_common_macros.S
index 2dee07d..6360461 100644
--- a/include/arch/aarch64/el3_common_macros.S
+++ b/include/arch/aarch64/el3_common_macros.S
@@ -119,22 +119,6 @@
 	 * MDCR_EL3.TPM: Set to zero so that EL0, EL1, and EL2 System register
 	 *  accesses to all Performance Monitors registers do not trap to EL3.
 	 *
-	 * MDCR_EL3.SCCD: Set to one so that cycle counting by PMCCNTR_EL0 is
-	 *  prohibited in Secure state. This bit is RES0 in versions of the
-	 *  architecture with FEAT_PMUv3p5 not implemented, setting it to 1
-	 *  doesn't have any effect on them.
-	 *
-	 * MDCR_EL3.MCCD: Set to one so that cycle counting by PMCCNTR_EL0 is
-	 *  prohibited in EL3. This bit is RES0 in versions of the
-	 *  architecture with FEAT_PMUv3p7 not implemented, setting it to 1
-	 *  doesn't have any effect on them.
-	 *
-	 * MDCR_EL3.SPME: Set to zero so that event counting by the programmable
-	 *  counters PMEVCNTR<n>_EL0 is prohibited in Secure state. If ARMv8.2
-	 *  Debug is not implemented this bit does not have any effect on the
-	 *  counters unless there is support for the implementation defined
-	 *  authentication interface ExternalSecureNoninvasiveDebugEnabled().
-	 *
 	 * MDCR_EL3.NSTB, MDCR_EL3.NSTBE: Set to zero so that Trace Buffer
 	 *  owning security state is Secure state. If FEAT_TRBE is implemented,
 	 *  accesses to Trace Buffer control registers at EL2 and EL1 in any
@@ -149,10 +133,9 @@
 	 * ---------------------------------------------------------------------
 	 */
 	mov_imm	x0, ((MDCR_EL3_RESET_VAL | MDCR_SDD_BIT | \
-		      MDCR_SPD32(MDCR_SPD32_DISABLE) | MDCR_SCCD_BIT | \
-		      MDCR_MCCD_BIT) & ~(MDCR_SPME_BIT | MDCR_TDOSA_BIT | \
-		      MDCR_TDA_BIT | MDCR_TPM_BIT | MDCR_NSTB(MDCR_NSTB_EL1) | \
-		      MDCR_NSTBE | MDCR_TTRF_BIT))
+		      MDCR_SPD32(MDCR_SPD32_DISABLE)) & \
+		    ~(MDCR_TDOSA_BIT | MDCR_TDA_BIT | MDCR_TPM_BIT | \
+		      MDCR_NSTB(MDCR_NSTB_EL1) | MDCR_NSTBE | MDCR_TTRF_BIT))
 
 	mrs	x1, id_aa64dfr0_el1
 	ubfx	x1, x1, #ID_AA64DFR0_TRACEFILT_SHIFT, #ID_AA64DFR0_TRACEFILT_LENGTH
@@ -162,36 +145,6 @@
 	msr	mdcr_el3, x0
 
 	/* ---------------------------------------------------------------------
-	 * Initialise PMCR_EL0 setting all fields rather than relying
-	 * on hw. Some fields are architecturally UNKNOWN on reset.
-	 *
-	 * PMCR_EL0.LP: Set to one so that event counter overflow, that
-	 *  is recorded in PMOVSCLR_EL0[0-30], occurs on the increment
-	 *  that changes PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU
-	 *  is implemented. This bit is RES0 in versions of the architecture
-	 *  earlier than ARMv8.5, setting it to 1 doesn't have any effect
-	 *  on them.
-	 *
-	 * PMCR_EL0.LC: Set to one so that cycle counter overflow, that
-	 *  is recorded in PMOVSCLR_EL0[31], occurs on the increment
-	 *  that changes PMCCNTR_EL0[63] from 1 to 0.
-	 *
-	 * PMCR_EL0.DP: Set to one so that the cycle counter,
-	 *  PMCCNTR_EL0 does not count when event counting is prohibited.
-	 *
-	 * PMCR_EL0.X: Set to zero to disable export of events.
-	 *
-	 * PMCR_EL0.D: Set to zero so that, when enabled, PMCCNTR_EL0
-	 *  counts on every clock cycle.
-	 * ---------------------------------------------------------------------
-	 */
-	mov_imm	x0, ((PMCR_EL0_RESET_VAL | PMCR_EL0_LP_BIT | \
-		      PMCR_EL0_LC_BIT | PMCR_EL0_DP_BIT) & \
-		    ~(PMCR_EL0_X_BIT | PMCR_EL0_D_BIT))
-
-	msr	pmcr_el0, x0
-
-	/* ---------------------------------------------------------------------
 	 * Enable External Aborts and SError Interrupts now that the exception
 	 * vectors have been setup.
 	 * ---------------------------------------------------------------------
@@ -340,10 +293,6 @@
 		isb
 	.endif /* _init_sctlr */
 
-#if DISABLE_MTPMU
-		bl	mtpmu_disable
-#endif
-
 	.if \_warm_boot_mailbox
 		/* -------------------------------------------------------------
 		 * This code will be executed for both warm and cold resets.
diff --git a/include/lib/el3_runtime/context_mgmt.h b/include/lib/el3_runtime/context_mgmt.h
index 1a76d8e..aa76f3b 100644
--- a/include/lib/el3_runtime/context_mgmt.h
+++ b/include/lib/el3_runtime/context_mgmt.h
@@ -37,6 +37,9 @@
 void cm_prepare_el3_exit_ns(void);
 
 #ifdef __aarch64__
+#if IMAGE_BL31
+void cm_manage_extensions_el3(void);
+#endif
 #if CTX_INCLUDE_EL2_REGS
 void cm_el2_sysregs_context_save(uint32_t security_state);
 void cm_el2_sysregs_context_restore(uint32_t security_state);
@@ -84,6 +87,7 @@
 #else
 void *cm_get_next_context(void);
 void cm_set_next_context(void *context);
+static inline void cm_manage_extensions_el3(void) {}
 #endif /* __aarch64__ */
 
 #endif /* CONTEXT_MGMT_H */
diff --git a/include/lib/extensions/pmuv3.h b/include/lib/extensions/pmuv3.h
new file mode 100644
index 0000000..5d5d055
--- /dev/null
+++ b/include/lib/extensions/pmuv3.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PMUV3_H
+#define PMUV3_H
+
+#include <context.h>
+
+void pmuv3_disable_el3(void);
+
+#ifdef __aarch64__
+void pmuv3_enable(cpu_context_t *ctx);
+void pmuv3_init_el2_unused(void);
+#endif /* __aarch64__ */
+
+#endif /* PMUV3_H */
diff --git a/lib/cpus/aarch32/cortex_a72.S b/lib/cpus/aarch32/cortex_a72.S
index 03914b2..77cf84d 100644
--- a/lib/cpus/aarch32/cortex_a72.S
+++ b/lib/cpus/aarch32/cortex_a72.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -87,11 +87,15 @@
 	b		cpu_rev_var_ls
 endfunc check_errata_859971
 
+add_erratum_entry cortex_a72, ERRATUM(859971), ERRATA_A72_859971
+
 func check_errata_cve_2017_5715
 	mov	r0, #ERRATA_MISSING
 	bx	lr
 endfunc check_errata_cve_2017_5715
 
+add_erratum_entry cortex_a72, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
+
 func check_errata_cve_2018_3639
 #if WORKAROUND_CVE_2018_3639
 	mov	r0, #ERRATA_APPLIES
@@ -101,11 +105,15 @@
 	bx	lr
 endfunc check_errata_cve_2018_3639
 
+add_erratum_entry cortex_a72, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
+
 func check_errata_cve_2022_23960
 	mov	r0, #ERRATA_MISSING
 	bx	lr
 endfunc check_errata_cve_2022_23960
 
+add_erratum_entry cortex_a72, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A72.
 	 * -------------------------------------------------
@@ -248,29 +256,7 @@
 	b	cortex_a72_disable_ext_debug
 endfunc cortex_a72_cluster_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for Cortex A72. Must follow AAPCS.
- */
-func cortex_a72_errata_report
-	push	{r12, lr}
-
-	bl	cpu_get_rev_var
-	mov	r4, r0
-
-	/*
-	 * Report all errata. The revision-variant information is passed to
-	 * checking functions of each errata.
-	 */
-	report_errata ERRATA_A72_859971, cortex_a72, 859971
-	report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715
-	report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639
-	report_errata WORKAROUND_CVE_2022_23960, cortex_a72, cve_2022_23960
-
-	pop	{r12, lr}
-	bx	lr
-endfunc cortex_a72_errata_report
-#endif
+errata_report_shim cortex_a72
 
 declare_cpu_ops cortex_a72, CORTEX_A72_MIDR, \
 	cortex_a72_reset_func, \
diff --git a/lib/cpus/aarch64/qemu_max.S b/lib/cpus/aarch64/qemu_max.S
index 8948fda..00963bc 100644
--- a/lib/cpus/aarch64/qemu_max.S
+++ b/lib/cpus/aarch64/qemu_max.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -47,14 +47,7 @@
 	b	dcsw_op_all
 endfunc qemu_max_cluster_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for QEMU "max". Must follow AAPCS.
- */
-func qemu_max_errata_report
-	ret
-endfunc qemu_max_errata_report
-#endif
+errata_report_shim qemu_max
 
 	/* ---------------------------------------------
 	 * This function provides cpu specific
diff --git a/lib/cpus/aarch64/rainier.S b/lib/cpus/aarch64/rainier.S
index 3b7b8b2..c770f54 100644
--- a/lib/cpus/aarch64/rainier.S
+++ b/lib/cpus/aarch64/rainier.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -41,78 +41,30 @@
 	ret
 endfunc rainier_disable_speculative_loads
 
-	/* --------------------------------------------------
-	 * Errata Workaround for Neoverse N1 Errata #1868343.
-	 * This applies to revision <= r4p0 of Neoverse N1.
-	 * This workaround is the same as the workaround for
-	 * errata 1262606 and 1275112 but applies to a wider
-	 * revision range.
-	 * Rainier R0P0 is based on Neoverse N1 R4P0 so the
-	 * workaround checks for r0p0 version of Rainier CPU.
-	 * Inputs:
-	 * x0: variant[4:7] and revision[0:3] of current cpu.
-	 * Shall clobber: x0, x1 & x17
-	 * --------------------------------------------------
-	 */
-func errata_n1_1868343_wa
-	/*
-	 * Compare x0 against revision r4p0
-	 */
-	mov	x17, x30
-	bl	check_errata_1868343
-	cbz	x0, 1f
-	mrs	x1, RAINIER_CPUACTLR_EL1
-	orr	x1, x1, RAINIER_CPUACTLR_EL1_BIT_13
-	msr	RAINIER_CPUACTLR_EL1, x1
-	isb
-1:
-	ret	x17
-endfunc errata_n1_1868343_wa
-
-func check_errata_1868343
-	/* Applies to r0p0 of Rainier CPU */
-	mov	x1, #0x00
-	b	cpu_rev_var_ls
-endfunc check_errata_1868343
+	/* Rainier R0P0 is based on Neoverse N1 R4P0. */
+workaround_reset_start rainier, ERRATUM(1868343), ERRATA_N1_1868343
+	sysreg_bit_set RAINIER_CPUACTLR_EL1, RAINIER_CPUACTLR_EL1_BIT_13
+workaround_reset_end rainier, ERRATUM(1868343)
 
-func rainier_reset_func
-	mov	x19, x30
+check_erratum_ls rainier, ERRATUM(1868343), CPU_REV(0, 0)
 
+cpu_reset_func_start rainier
 	bl	rainier_disable_speculative_loads
-
 	/* Forces all cacheable atomic instructions to be near */
-	mrs	x0, RAINIER_CPUACTLR2_EL1
-	orr	x0, x0, #RAINIER_CPUACTLR2_EL1_BIT_2
-	msr	RAINIER_CPUACTLR2_EL1, x0
-	isb
-
-	bl	cpu_get_rev_var
-	mov	x18, x0
-
-#if ERRATA_N1_1868343
-	mov	x0, x18
-	bl	errata_n1_1868343_wa
-#endif
+	sysreg_bit_set RAINIER_CPUACTLR2_EL1, RAINIER_CPUACTLR2_EL1_BIT_2
 
 #if ENABLE_FEAT_AMU
 	/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
-	mrs	x0, actlr_el3
-	orr	x0, x0, #RAINIER_ACTLR_AMEN_BIT
-	msr	actlr_el3, x0
+	sysreg_bit_set actlr_el3, RAINIER_ACTLR_AMEN_BIT
 
 	/* Make sure accesses from EL0/EL1 are not trapped to EL2 */
-	mrs	x0, actlr_el2
-	orr	x0, x0, #RAINIER_ACTLR_AMEN_BIT
-	msr	actlr_el2, x0
+	sysreg_bit_set actlr_el2, RAINIER_ACTLR_AMEN_BIT
 
 	/* Enable group0 counters */
 	mov	x0, #RAINIER_AMU_GROUP0_MASK
 	msr	CPUAMCNTENSET_EL0, x0
 #endif
-
-	isb
-	ret	x19
-endfunc rainier_reset_func
+cpu_reset_func_end rainier
 
 	/* ---------------------------------------------
 	 * HW will do the cache maintenance while powering down
@@ -123,33 +75,12 @@
 	 * Enable CPU power down bit in power control register
 	 * ---------------------------------------------
 	 */
-	mrs	x0, RAINIER_CPUPWRCTLR_EL1
-	orr	x0, x0, #RAINIER_CORE_PWRDN_EN_MASK
-	msr	RAINIER_CPUPWRCTLR_EL1, x0
+	 sysreg_bit_set RAINIER_CPUPWRCTLR_EL1, RAINIER_CORE_PWRDN_EN_MASK
 	isb
 	ret
 endfunc rainier_core_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for Rainier. Must follow AAPCS.
- */
-func rainier_errata_report
-	stp	x8, x30, [sp, #-16]!
-
-	bl	cpu_get_rev_var
-	mov	x8, x0
-
-	/*
-	 * Report all errata. The revision-variant information is passed to
-	 * checking functions of each errata.
-	 */
-	report_errata ERRATA_N1_1868343, rainier, 1868343
-
-	ldp	x8, x30, [sp], #16
-	ret
-endfunc rainier_errata_report
-#endif
+errata_report_shim rainier
 
 	/* ---------------------------------------------
 	 * This function provides Rainier specific
diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c
index 62e30fc..6414aaa 100644
--- a/lib/el3_runtime/aarch32/context_mgmt.c
+++ b/lib/el3_runtime/aarch32/context_mgmt.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2023, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -17,6 +17,7 @@
 #include <context.h>
 #include <lib/el3_runtime/context_mgmt.h>
 #include <lib/extensions/amu.h>
+#include <lib/extensions/pmuv3.h>
 #include <lib/extensions/sys_reg_trace.h>
 #include <lib/extensions/trf.h>
 #include <lib/utils.h>
@@ -147,6 +148,12 @@
 	if (is_feat_trf_supported()) {
 		trf_enable();
 	}
+
+	/*
+	 * Also applies to PMU < v3. The PMU is only disabled for EL3 and Secure
+	 * state execution. This does not affect lower NS ELs.
+	 */
+	pmuv3_disable_el3();
 #endif
 }
 
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index 9922fb1..771fcdc 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -568,6 +568,8 @@
 	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
 	mrs	x18, sp_el0
 	str	x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
+
+	/* PMUv3 is presumed to be always present */
 	mrs	x9, pmcr_el0
 	str	x9, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
 	/* Disable cycle counter when event counting is prohibited */
@@ -651,6 +653,8 @@
 	msr	APGAKeyLo_EL1, x8
 	msr	APGAKeyHi_EL1, x9
 #endif /* CTX_INCLUDE_PAUTH_REGS */
+
+	/* PMUv3 is presumed to be always present */
 	ldr	x0, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
 	msr	pmcr_el0, x0
 	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index 3760b8f..4a6598a 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -24,6 +24,7 @@
 #include <lib/extensions/amu.h>
 #include <lib/extensions/brbe.h>
 #include <lib/extensions/mpam.h>
+#include <lib/extensions/pmuv3.h>
 #include <lib/extensions/sme.h>
 #include <lib/extensions/spe.h>
 #include <lib/extensions/sve.h>
@@ -37,6 +38,7 @@
 CASSERT(((TWED_DELAY & ~SCR_TWEDEL_MASK) == 0U), assert_twed_delay_value_check);
 #endif /* ENABLE_FEAT_TWED */
 
+static void manage_extensions_nonsecure(cpu_context_t *ctx);
 static void manage_extensions_secure(cpu_context_t *ctx);
 
 static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info *ep)
@@ -265,16 +267,6 @@
 	write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_ICC_SRE_EL2,
 			icc_sre_el2);
 
-	/*
-	 * Initialize MDCR_EL2.HPMN to its hardware reset value so we don't
-	 * throw anyone off who expects this to be sensible.
-	 * TODO: A similar thing happens in cm_prepare_el3_exit. They should be
-	 * unified with the proper PMU implementation
-	 */
-	u_register_t mdcr_el2 = ((read_pmcr_el0() >> PMCR_EL0_N_SHIFT) &
-			PMCR_EL0_N_MASK);
-	write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_MDCR_EL2, mdcr_el2);
-
 	if (is_feat_hcx_supported()) {
 		/*
 		 * Initialize register HCRX_EL2 with its init value.
@@ -288,6 +280,8 @@
 			HCRX_EL2_INIT_VAL);
 	}
 #endif /* CTX_INCLUDE_EL2_REGS */
+
+	manage_extensions_nonsecure(ctx);
 }
 
 /*******************************************************************************
@@ -504,9 +498,11 @@
 /*******************************************************************************
  * Enable architecture extensions on first entry to Non-secure world.
  * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
- * it is zero.
+ * it is zero. This function updates some registers in-place and its contents
+ * are being prepared to be moved to cm_manage_extensions_el3 and
+ * cm_manage_extensions_nonsecure.
  ******************************************************************************/
-static void manage_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx)
+static void manage_extensions_nonsecure_mixed(bool el2_unused, cpu_context_t *ctx)
 {
 #if IMAGE_BL31
 	if (is_feat_spe_supported()) {
@@ -549,6 +545,39 @@
 }
 
 /*******************************************************************************
+ * Enable architecture extensions for EL3 execution. This function only updates
+ * registers in-place which are expected to either never change or be
+ * overwritten by el3_exit.
+ ******************************************************************************/
+#if IMAGE_BL31
+void cm_manage_extensions_el3(void)
+{
+	pmuv3_disable_el3();
+}
+#endif /* IMAGE_BL31 */
+
+/*******************************************************************************
+ * Enable architecture extensions on first entry to Non-secure world.
+ ******************************************************************************/
+static void manage_extensions_nonsecure(cpu_context_t *ctx)
+{
+#if IMAGE_BL31
+	pmuv3_enable(ctx);
+#endif /* IMAGE_BL31 */
+}
+
+/*******************************************************************************
+ * Enable architecture extensions in-place at EL2 on first entry to Non-secure
+ * world when EL2 is empty and unused.
+ ******************************************************************************/
+static void manage_extensions_nonsecure_el2_unused(void)
+{
+#if IMAGE_BL31
+	pmuv3_init_el2_unused();
+#endif /* IMAGE_BL31 */
+}
+
+/*******************************************************************************
  * Enable architecture extensions on first entry to Secure world.
  ******************************************************************************/
 static void manage_extensions_secure(cpu_context_t *ctx)
@@ -758,24 +787,11 @@
 			 * relying on hw. Some fields are architecturally
 			 * UNKNOWN on reset.
 			 *
-			 * MDCR_EL2.HLP: Set to one so that event counter
-			 *  overflow, that is recorded in PMOVSCLR_EL0[0-30],
-			 *  occurs on the increment that changes
-			 *  PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU is
-			 *  implemented. This bit is RES0 in versions of the
-			 *  architecture earlier than ARMv8.5, setting it to 1
-			 *  doesn't have any effect on them.
-			 *
 			 * MDCR_EL2.TTRF: Set to zero so that access to Trace
 			 *  Filter Control register TRFCR_EL1 at EL1 is not
 			 *  trapped to EL2. This bit is RES0 in versions of
 			 *  the architecture earlier than ARMv8.4.
 			 *
-			 * MDCR_EL2.HPMD: Set to one so that event counting is
-			 *  prohibited at EL2. This bit is RES0 in versions of
-			 *  the architecture earlier than ARMv8.1, setting it
-			 *  to 1 doesn't have any effect on them.
-			 *
 			 * MDCR_EL2.TPMS: Set to zero so that accesses to
 			 *  Statistical Profiling control registers from EL1
 			 *  do not trap to EL2. This bit is RES0 when SPE is
@@ -795,35 +811,15 @@
 			 * MDCR_EL2.TDE: Set to zero so that debug exceptions
 			 *  are not routed to EL2.
 			 *
-			 * MDCR_EL2.HPME: Set to zero to disable EL2 Performance
-			 *  Monitors.
-			 *
-			 * MDCR_EL2.TPM: Set to zero so that Non-secure EL0 and
-			 *  EL1 accesses to all Performance Monitors registers
-			 *  are not trapped to EL2.
-			 *
-			 * MDCR_EL2.TPMCR: Set to zero so that Non-secure EL0
-			 *  and EL1 accesses to the PMCR_EL0 or PMCR are not
-			 *  trapped to EL2.
-			 *
-			 * MDCR_EL2.HPMN: Set to value of PMCR_EL0.N which is the
-			 *  architecturally-defined reset value.
-			 *
 			 * MDCR_EL2.E2TB: Set to zero so that the trace Buffer
 			 *  owning exception level is NS-EL1 and, tracing is
 			 *  prohibited at NS-EL2. These bits are RES0 when
 			 *  FEAT_TRBE is not implemented.
 			 */
-			mdcr_el2 = ((MDCR_EL2_RESET_VAL | MDCR_EL2_HLP |
-				     MDCR_EL2_HPMD) |
-				   ((read_pmcr_el0() & PMCR_EL0_N_BITS)
-				   >> PMCR_EL0_N_SHIFT)) &
-				   ~(MDCR_EL2_TTRF | MDCR_EL2_TPMS |
+			mdcr_el2 = ((MDCR_EL2_RESET_VAL) & ~(MDCR_EL2_TTRF |
 				     MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT |
 				     MDCR_EL2_TDA_BIT | MDCR_EL2_TDE_BIT |
-				     MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT |
-				     MDCR_EL2_TPMCR_BIT |
-				     MDCR_EL2_E2TB(MDCR_EL2_E2TB_EL1));
+				     MDCR_EL2_E2TB(MDCR_EL2_E2TB_EL1)));
 
 			write_mdcr_el2(mdcr_el2);
 
@@ -845,8 +841,10 @@
 			 */
 			write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL &
 						~(CNTHP_CTL_ENABLE_BIT));
+
+			manage_extensions_nonsecure_el2_unused();
 		}
-		manage_extensions_nonsecure(el2_unused, ctx);
+		manage_extensions_nonsecure_mixed(el2_unused, ctx);
 	}
 
 	cm_el1_sysregs_context_restore(security_state);
@@ -1167,7 +1165,7 @@
 	 * direct register updates. Therefore, do this here
 	 * instead of when setting up context.
 	 */
-	manage_extensions_nonsecure(0, ctx);
+	manage_extensions_nonsecure_mixed(0, ctx);
 
 	/*
 	 * Set the NS bit to be able to access the ICC_SRE_EL2
diff --git a/lib/extensions/mtpmu/aarch32/mtpmu.S b/lib/extensions/mtpmu/aarch32/mtpmu.S
deleted file mode 100644
index 834cee3..0000000
--- a/lib/extensions/mtpmu/aarch32/mtpmu.S
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-
-	.global	mtpmu_disable
-
-/* -------------------------------------------------------------
- * The functions in this file are called at entrypoint, before
- * the CPU has decided whether this is a cold or a warm boot.
- * Therefore there are no stack yet to rely on for a C function
- * call.
- * -------------------------------------------------------------
- */
-
-/*
- * bool mtpmu_supported(void)
- *
- * Return a boolean indicating whether FEAT_MTPMU is supported or not.
- *
- * Trash registers: r0.
- */
-func mtpmu_supported
-	ldcopr	r0, ID_DFR1
-	and	r0, r0, #(ID_DFR1_MTPMU_MASK >> ID_DFR1_MTPMU_SHIFT)
-	cmp	r0, #ID_DFR1_MTPMU_SUPPORTED
-	mov	r0, #0
-	addeq	r0, r0, #1
-	bx	lr
-endfunc mtpmu_supported
-
-/*
- * bool el_implemented(unsigned int el)
- *
- * Return a boolean indicating if the specified EL (2 or 3) is implemented.
- *
- * Trash registers: r0
- */
-func el_implemented
-	cmp	r0, #3
-	ldcopr	r0, ID_PFR1
-	lsreq	r0, r0, #ID_PFR1_SEC_SHIFT
-	lsrne	r0, r0, #ID_PFR1_VIRTEXT_SHIFT
-	/*
-	 * ID_PFR1_VIRTEXT_MASK is the same as ID_PFR1_SEC_MASK
-	 * so use any one of them
-	 */
-	and	r0, r0, #ID_PFR1_VIRTEXT_MASK
-	cmp	r0, #ID_PFR1_ELx_ENABLED
-	mov	r0, #0
-	addeq	r0, r0, #1
-	bx	lr
-endfunc el_implemented
-
-/*
- * void mtpmu_disable(void)
- *
- * Disable mtpmu feature if supported.
- *
- * Trash register: r0, r1, r2
- */
-func mtpmu_disable
-	mov	r2, lr
-	bl	mtpmu_supported
-	cmp	r0, #0
-	bxeq	r2	/* FEAT_MTPMU not supported */
-
-	/* FEAT_MTMPU Supported */
-	mov	r0, #3
-	bl	el_implemented
-	cmp	r0, #0
-	beq	1f
-
-	/* EL3 implemented */
-	ldcopr	r0, SDCR
-	ldr	r1, =SDCR_MTPME_BIT
-	bic	r0, r0, r1
-	stcopr	r0, SDCR
-
-	/*
-	 * If EL3 is implemented, HDCR.MTPME is implemented as Res0 and
-	 * FEAT_MTPMU is controlled only from EL3, so no need to perform
-	 * any operations for EL2.
-	 */
-	isb
-	bx	r2
-1:
-	/* EL3 not implemented */
-	mov	r0, #2
-	bl	el_implemented
-	cmp	r0, #0
-	bxeq	r2	/* No EL2 or EL3 implemented */
-
-	/* EL2 implemented */
-	ldcopr	r0, HDCR
-	ldr	r1, =HDCR_MTPME_BIT
-	orr	r0, r0, r1
-	stcopr	r0, HDCR
-	isb
-	bx	r2
-endfunc mtpmu_disable
diff --git a/lib/extensions/mtpmu/aarch64/mtpmu.S b/lib/extensions/mtpmu/aarch64/mtpmu.S
deleted file mode 100644
index 0a1d57b..0000000
--- a/lib/extensions/mtpmu/aarch64/mtpmu.S
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-
-	.global	mtpmu_disable
-
-/* -------------------------------------------------------------
- * The functions in this file are called at entrypoint, before
- * the CPU has decided whether this is a cold or a warm boot.
- * Therefore there are no stack yet to rely on for a C function
- * call.
- * -------------------------------------------------------------
- */
-
-/*
- * bool mtpmu_supported(void)
- *
- * Return a boolean indicating whether FEAT_MTPMU is supported or not.
- *
- * Trash registers: x0, x1
- */
-func mtpmu_supported
-	mrs	x0, id_aa64dfr0_el1
-	mov_imm	x1, ID_AA64DFR0_MTPMU_MASK
-	and	x0, x1, x0, LSR #ID_AA64DFR0_MTPMU_SHIFT
-	cmp	x0, ID_AA64DFR0_MTPMU_SUPPORTED
-	cset	x0, eq
-	ret
-endfunc mtpmu_supported
-
-/*
- * bool el_implemented(unsigned int el_shift)
- *
- * Return a boolean indicating if the specified EL is implemented.
- * The EL is represented as the bitmask shift on id_aa64pfr0_el1 register.
- *
- * Trash registers: x0, x1
- */
-func el_implemented
-	mrs	x1, id_aa64pfr0_el1
-	lsr	x1, x1, x0
-	cmp	x1, #ID_AA64PFR0_ELX_MASK
-	cset	x0, eq
-	ret
-endfunc el_implemented
-
-/*
- * void mtpmu_disable(void)
- *
- * Disable mtpmu feature if supported.
- *
- * Trash register: x0, x1, x30
- */
-func mtpmu_disable
-	mov	x10, x30
-	bl	mtpmu_supported
-	cbz	x0, exit_disable
-
-	/* FEAT_MTMPU Supported */
-	mov_imm	x0, ID_AA64PFR0_EL3_SHIFT
-	bl	el_implemented
-	cbz	x0, 1f
-
-	/* EL3 implemented */
-	mrs	x0, mdcr_el3
-	mov_imm x1, MDCR_MTPME_BIT
-	bic	x0, x0, x1
-	msr	mdcr_el3, x0
-
-	/*
-	 * If EL3 is implemented, MDCR_EL2.MTPME is implemented as Res0 and
-	 * FEAT_MTPMU is controlled only from EL3, so no need to perform
-	 * any operations for EL2.
-	 */
-	isb
-exit_disable:
-	ret	x10
-1:
-	/* EL3 not implemented */
-	mov_imm	x0, ID_AA64PFR0_EL2_SHIFT
-	bl	el_implemented
-	cbz	x0, exit_disable
-
-	/* EL2 implemented */
-	mrs	x0, mdcr_el2
-	mov_imm x1, MDCR_EL2_MTPME
-	bic	x0, x0, x1
-	msr	mdcr_el2, x0
-	isb
-	ret	x10
-endfunc mtpmu_disable
diff --git a/lib/extensions/pmuv3/aarch32/pmuv3.c b/lib/extensions/pmuv3/aarch32/pmuv3.c
new file mode 100644
index 0000000..fe4205e
--- /dev/null
+++ b/lib/extensions/pmuv3/aarch32/pmuv3.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <lib/extensions/pmuv3.h>
+
+static u_register_t mtpmu_disable_el3(u_register_t sdcr)
+{
+	if (!is_feat_mtpmu_supported()) {
+		return sdcr;
+	}
+
+	/*
+	 * SDCR.MTPME = 0
+	 * FEAT_MTPMU is disabled. The Effective value of PMEVTYPER<n>.MT is
+	 * zero.
+	 */
+	sdcr &= ~SDCR_MTPME_BIT;
+
+	return sdcr;
+}
+
+/*
+ * Applies to all PMU versions. Name is PMUv3 for compatibility with aarch64 and
+ * to not clash with platforms which reuse the PMU name
+ */
+void pmuv3_disable_el3(void)
+{
+	u_register_t sdcr = read_sdcr();
+
+	/* ---------------------------------------------------------------------
+	 * Initialise SDCR, setting all the fields rather than relying on hw.
+	 *
+	 * SDCR.SCCD: Set to one so that cycle counting by PMCCNTR is prohibited
+	 *  in Secure state. This bit is RES0 in versions of the architecture
+	 *  earlier than ARMv8.5
+	 *
+	 * SDCR.SPME: Set to zero so that event counting is prohibited in Secure
+	 *  state (and explicitly EL3 with later revisions). If ARMv8.2 Debug is
+	 *  not implemented this bit does not have any effect on the counters
+	 *  unless there is support for the implementation defined
+	 *  authentication interface ExternalSecureNoninvasiveDebugEnabled().
+	 * ---------------------------------------------------------------------
+	 */
+	sdcr = (sdcr | SDCR_SCCD_BIT) & ~SDCR_SPME_BIT;
+	sdcr = mtpmu_disable_el3(sdcr);
+	write_sdcr(sdcr);
+
+	/* ---------------------------------------------------------------------
+	 * Initialise PMCR, setting all fields rather than relying
+	 * on hw. Some fields are architecturally UNKNOWN on reset.
+	 *
+	 * PMCR.DP: Set to one to prohibit cycle counting whilst in Secure mode.
+	 *
+	 * PMCR.X: Set to zero to disable export of events.
+	 *
+	 * PMCR.C: Set to one to reset PMCCNTR.
+	 *
+	 * PMCR.P: Set to one to reset each event counter PMEVCNTR<n> to zero.
+	 *
+	 * PMCR.E: Set to zero to disable cycle and event counters.
+	 * ---------------------------------------------------------------------
+	 */
+
+	write_pmcr(read_pmcr() | PMCR_DP_BIT | PMCR_C_BIT | PMCR_P_BIT |
+		 ~(PMCR_X_BIT | PMCR_E_BIT));
+}
diff --git a/lib/extensions/pmuv3/aarch64/pmuv3.c b/lib/extensions/pmuv3/aarch64/pmuv3.c
new file mode 100644
index 0000000..f83a5ee
--- /dev/null
+++ b/lib/extensions/pmuv3/aarch64/pmuv3.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <lib/extensions/pmuv3.h>
+
+static u_register_t init_mdcr_el2_hpmn(u_register_t mdcr_el2)
+{
+	/*
+	 * Initialize MDCR_EL2.HPMN to its hardware reset value so we don't
+	 * throw anyone off who expects this to be sensible.
+	 */
+	mdcr_el2 &= ~MDCR_EL2_HPMN_MASK;
+	mdcr_el2 |= ((read_pmcr_el0() >> PMCR_EL0_N_SHIFT) & PMCR_EL0_N_MASK);
+
+	return mdcr_el2;
+}
+
+void pmuv3_enable(cpu_context_t *ctx)
+{
+#if CTX_INCLUDE_EL2_REGS
+	u_register_t mdcr_el2;
+
+	mdcr_el2 = read_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_MDCR_EL2);
+	mdcr_el2 = init_mdcr_el2_hpmn(mdcr_el2);
+	write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_MDCR_EL2, mdcr_el2);
+#endif /* CTX_INCLUDE_EL2_REGS */
+}
+
+static u_register_t mtpmu_disable_el3(u_register_t mdcr_el3)
+{
+	if (!is_feat_mtpmu_supported()) {
+		return mdcr_el3;
+	}
+
+	/*
+	 * MDCR_EL3.MTPME = 0
+	 * FEAT_MTPMU is disabled. The Effective value of PMEVTYPER<n>_EL0.MT is
+	 * zero.
+	 */
+	mdcr_el3 &= ~MDCR_MTPME_BIT;
+
+	return mdcr_el3;
+}
+
+void pmuv3_disable_el3(void)
+{
+	u_register_t mdcr_el3 = read_mdcr_el3();
+
+	/* ---------------------------------------------------------------------
+	 * Initialise MDCR_EL3, setting all fields rather than relying on hw.
+	 * Some fields are architecturally UNKNOWN on reset.
+	 *
+	 * MDCR_EL3.MPMX: Set to zero to not affect event counters (when
+	 * SPME = 0).
+	 *
+	 * MDCR_EL3.MCCD: Set to one so that cycle counting by PMCCNTR_EL0 is
+	 *  prohibited in EL3. This bit is RES0 in versions of the
+	 *  architecture with FEAT_PMUv3p7 not implemented.
+	 *
+	 * MDCR_EL3.SCCD: Set to one so that cycle counting by PMCCNTR_EL0 is
+	 *  prohibited in Secure state. This bit is RES0 in versions of the
+	 *  architecture with FEAT_PMUv3p5 not implemented.
+	 *
+	 * MDCR_EL3.SPME: Set to zero so that event counting is prohibited in
+	 *  Secure state (and explicitly EL3 with later revisions). If ARMv8.2
+	 *  Debug is not implemented this bit does not have any effect on the
+	 *  counters unless there is support for the implementation defined
+	 *  authentication interface ExternalSecureNoninvasiveDebugEnabled().
+	 *
+	 * The SPME/MPMX combination is a little tricky. Below is a small
+	 * summary if another combination is ever needed:
+	 * SPME | MPMX | secure world |   EL3
+	 * -------------------------------------
+	 *   0  |  0   |    disabled  | disabled
+	 *   1  |  0   |    enabled   | enabled
+	 *   0  |  1   |    enabled   | disabled
+	 *   1  |  1   |    enabled   | disabled only for counters 0 to
+	 *                              MDCR_EL2.HPMN - 1. Enabled for the rest
+	 */
+	mdcr_el3 = (mdcr_el3 | MDCR_SCCD_BIT | MDCR_MCCD_BIT) &
+		  ~(MDCR_MPMX_BIT | MDCR_SPME_BIT);
+	mdcr_el3 = mtpmu_disable_el3(mdcr_el3);
+	write_mdcr_el3(mdcr_el3);
+
+	/* ---------------------------------------------------------------------
+	 * Initialise PMCR_EL0 setting all fields rather than relying
+	 * on hw. Some fields are architecturally UNKNOWN on reset.
+	 *
+	 * PMCR_EL0.DP: Set to one so that the cycle counter,
+	 *  PMCCNTR_EL0 does not count when event counting is prohibited.
+	 *  Necessary on PMUv3 <= p7 where MDCR_EL3.{SCCD,MCCD} are not
+	 *  available
+	 *
+	 * PMCR_EL0.X: Set to zero to disable export of events.
+	 *
+	 * PMCR_EL0.C: Set to one to reset PMCCNTR_EL0 to zero.
+	 *
+	 * PMCR_EL0.P: Set to one to reset each event counter PMEVCNTR<n>_EL0 to
+	 *  zero.
+	 *
+	 * PMCR_EL0.E: Set to zero to disable cycle and event counters.
+	 * ---------------------------------------------------------------------
+	 */
+	write_pmcr_el0((read_pmcr_el0() | PMCR_EL0_DP_BIT | PMCR_EL0_C_BIT |
+			PMCR_EL0_P_BIT) & ~(PMCR_EL0_X_BIT | PMCR_EL0_E_BIT));
+}
+
+static u_register_t mtpmu_disable_el2(u_register_t mdcr_el2)
+{
+	if (!is_feat_mtpmu_supported()) {
+		return mdcr_el2;
+	}
+
+	/*
+	 * MDCR_EL2.MTPME = 0
+	 * FEAT_MTPMU is disabled. The Effective value of PMEVTYPER<n>_EL0.MT is
+	 * zero.
+	 */
+	mdcr_el2 &= ~MDCR_EL2_MTPME;
+
+	return mdcr_el2;
+}
+
+void pmuv3_init_el2_unused(void)
+{
+	u_register_t mdcr_el2 = read_mdcr_el2();
+
+	/*
+	 * Initialise MDCR_EL2, setting all fields rather than
+	 * relying on hw. Some fields are architecturally
+	 * UNKNOWN on reset.
+	 *
+	 * MDCR_EL2.HLP: Set to one so that event counter overflow, that is
+	 *  recorded in PMOVSCLR_EL0[0-30], occurs on the increment that changes
+	 *  PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU is implemented.
+	 *  This bit is RES0 in versions of the architecture earlier than
+	 *  ARMv8.5, setting it to 1 doesn't have any effect on them.
+	 *
+	 * MDCR_EL2.HCCD: Set to one to prohibit cycle counting at EL2. This bit
+	 *  is RES0 in versions of the architecture with FEAT_PMUv3p5 not
+	 *  implemented.
+	 *
+	 * MDCR_EL2.HPMD: Set to one so that event counting is
+	 *  prohibited at EL2 for counter n < MDCR_EL2.HPMN. This bit  is RES0
+	 *  in versions of the architecture with FEAT_PMUv3p1 not implemented.
+	 *
+	 * MDCR_EL2.HPME: Set to zero to disable event counters for counters
+	 *  n >= MDCR_EL2.HPMN.
+	 *
+	 * MDCR_EL2.TPM: Set to zero so that Non-secure EL0 and
+	 *  EL1 accesses to all Performance Monitors registers
+	 *  are not trapped to EL2.
+	 *
+	 * MDCR_EL2.TPMCR: Set to zero so that Non-secure EL0
+	 *  and EL1 accesses to the PMCR_EL0 or PMCR are not
+	 *  trapped to EL2.
+	 */
+	mdcr_el2 = (mdcr_el2 | MDCR_EL2_HLP_BIT | MDCR_EL2_HPMD_BIT |
+		    MDCR_EL2_HCCD_BIT) &
+		  ~(MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT | MDCR_EL2_TPMCR_BIT);
+	mdcr_el2 = init_mdcr_el2_hpmn(mdcr_el2);
+	mdcr_el2 = mtpmu_disable_el2(mdcr_el2);
+	write_mdcr_el2(mdcr_el2);
+}
diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c
index bfc09cc..8aa0cce 100644
--- a/lib/psci/psci_common.c
+++ b/lib/psci/psci_common.c
@@ -985,6 +985,9 @@
 	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
 	psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
 
+	/* Init registers that never change for the lifetime of TF-A */
+	cm_manage_extensions_el3();
+
 	/*
 	 * Verify that we have been explicitly turned ON or resumed from
 	 * suspend.
diff --git a/plat/arm/board/tc/platform.mk b/plat/arm/board/tc/platform.mk
index c29537c..d383ead 100644
--- a/plat/arm/board/tc/platform.mk
+++ b/plat/arm/board/tc/platform.mk
@@ -6,8 +6,7 @@
 include common/fdt_wrappers.mk
 
 ifeq ($(TARGET_PLATFORM), 0)
-$(warning Platform ${PLAT}$(TARGET_PLATFORM) is deprecated. \
-Some of the features might not work as expected)
+	$(error Platform ${PLAT}$(TARGET_PLATFORM) is deprecated.)
 endif
 
 ifeq ($(shell expr $(TARGET_PLATFORM) \<= 2), 0)
@@ -70,13 +69,6 @@
 
 PLAT_INCLUDES		+=	-I${TC_BASE}/include/
 
-# CPU libraries for TARGET_PLATFORM=0
-ifeq (${TARGET_PLATFORM}, 0)
-TC_CPU_SOURCES	+=	lib/cpus/aarch64/cortex_a510.S	\
-			lib/cpus/aarch64/cortex_a710.S	\
-			lib/cpus/aarch64/cortex_x2.S
-endif
-
 # CPU libraries for TARGET_PLATFORM=1
 ifeq (${TARGET_PLATFORM}, 1)
 TC_CPU_SOURCES	+=	lib/cpus/aarch64/cortex_a510.S \
diff --git a/plat/qemu/qemu_sbsa/sbsa_sip_svc.c b/plat/qemu/qemu_sbsa/sbsa_sip_svc.c
index 37460d7..05ebec4 100644
--- a/plat/qemu/qemu_sbsa/sbsa_sip_svc.c
+++ b/plat/qemu/qemu_sbsa/sbsa_sip_svc.c
@@ -26,8 +26,10 @@
  * need version of whole 'virtual hardware platform'.
  */
 #define SIP_SVC_VERSION  SIP_FUNCTION_ID(1)
-
 #define SIP_SVC_GET_GIC  SIP_FUNCTION_ID(100)
+#define SIP_SVC_GET_GIC_ITS SIP_FUNCTION_ID(101)
+
+static uint64_t gic_its_addr;
 
 void sbsa_set_gic_bases(const uintptr_t gicd_base, const uintptr_t gicr_base);
 uintptr_t sbsa_get_gicd(void);
@@ -45,9 +47,12 @@
 	 * QEMU gives us this DeviceTree node:
 	 *
 	 * intc {
-		reg = < 0x00 0x40060000 0x00 0x10000
-			0x00 0x40080000 0x00 0x4000000>;
-	};
+	 *	 reg = < 0x00 0x40060000 0x00 0x10000
+	 *		 0x00 0x40080000 0x00 0x4000000>;
+	 *       its {
+	 *               reg = <0x00 0x44081000 0x00 0x20000>;
+	 *       };
+	 * };
 	 */
 	node = fdt_path_offset(dtb, "/intc");
 	if (node < 0) {
@@ -74,6 +79,18 @@
 	INFO("GICR base = 0x%lx\n", gicr_base);
 
 	sbsa_set_gic_bases(gicd_base, gicr_base);
+
+	node = fdt_path_offset(dtb, "/intc/its");
+	if (node < 0) {
+		return;
+	}
+
+	err = fdt_get_reg_props_by_index(dtb, node, 0, &gic_its_addr, NULL);
+	if (err < 0) {
+		ERROR("Failed to read GICI reg property of GIC node\n");
+		return;
+	}
+	INFO("GICI base = 0x%lx\n", gic_its_addr);
 }
 
 void read_platform_version(void *dtb)
@@ -143,6 +160,9 @@
 	case SIP_SVC_GET_GIC:
 		SMC_RET3(handle, NULL, sbsa_get_gicd(), sbsa_get_gicr());
 
+	case SIP_SVC_GET_GIC_ITS:
+		SMC_RET2(handle, NULL, gic_its_addr);
+
 	default:
 		ERROR("%s: unhandled SMC (0x%x) (function id: %d)\n", __func__, smc_fid,
 		      smc_fid - SIP_FUNCTION);
diff --git a/services/std_svc/rmmd/rmmd_main.c b/services/std_svc/rmmd/rmmd_main.c
index 24f6c41..c80b524 100644
--- a/services/std_svc/rmmd/rmmd_main.c
+++ b/services/std_svc/rmmd/rmmd_main.c
@@ -18,6 +18,8 @@
 #include <context.h>
 #include <lib/el3_runtime/context_mgmt.h>
 #include <lib/el3_runtime/pubsub.h>
+#include <lib/extensions/pmuv3.h>
+#include <lib/extensions/sys_reg_trace.h>
 #include <lib/gpt_rme/gpt_rme.h>
 
 #include <lib/spinlock.h>
@@ -125,6 +127,8 @@
 	 */
 		sve_enable(ctx);
 	}
+
+	pmuv3_enable(ctx);
 }
 
 /*******************************************************************************