Merge "fix(amu): limit virtual offset register access to NS world" into integration
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index bbbc77a..5866af8 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -488,7 +488,8 @@
 #define SCR_HXEn_BIT		(UL(1) << 38)
 #define SCR_ENTP2_SHIFT		U(41)
 #define SCR_ENTP2_BIT		(UL(1) << SCR_ENTP2_SHIFT)
-#define SCR_AMVOFFEN_BIT	(UL(1) << 35)
+#define SCR_AMVOFFEN_SHIFT	U(35)
+#define SCR_AMVOFFEN_BIT	(UL(1) << SCR_AMVOFFEN_SHIFT)
 #define SCR_TWEDEn_BIT		(UL(1) << 29)
 #define SCR_ECVEN_BIT		(UL(1) << 28)
 #define SCR_FGTEN_BIT		(UL(1) << 27)
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index 6051039..0f09ebe 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -284,16 +284,6 @@
 	}
 
 	/*
-	 * FEAT_AMUv1p1 virtual offset registers are only accessible from EL3
-	 * and EL2, when clear, this bit traps accesses from EL2 so we set it
-	 * to 1 when EL2 is present.
-	 */
-	if (is_armv8_6_feat_amuv1p1_present() &&
-		(el_implemented(2) != EL_IMPL_NONE)) {
-		scr_el3 |= SCR_AMVOFFEN_BIT;
-	}
-
-	/*
 	 * Initialise SCTLR_EL1 to the reset value corresponding to the target
 	 * execution state setting all fields rather than relying of the hw.
 	 * Some fields have architecturally UNKNOWN reset values and these are
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
index d329c3d..72566fd 100644
--- a/lib/extensions/amu/aarch64/amu.c
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -75,7 +75,7 @@
 		((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
 }
 
-static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
+static inline __unused void ctx_write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
 {
 	uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
 
@@ -85,6 +85,16 @@
 	write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
 }
 
+static inline __unused void ctx_write_scr_el3_amvoffen(cpu_context_t *ctx, uint64_t amvoffen)
+{
+	uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
+
+	value &= ~SCR_AMVOFFEN_BIT;
+	value |= (amvoffen << SCR_AMVOFFEN_SHIFT) & SCR_AMVOFFEN_BIT;
+
+	write_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3, value);
+}
+
 static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
 {
 	write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
@@ -226,7 +236,7 @@
 	 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
 	 * the Activity Monitor registers do not trap to EL3.
 	 */
-	write_cptr_el3_tam(ctx, 0U);
+	ctx_write_cptr_el3_tam(ctx, 0U);
 
 	/*
 	 * Retrieve the number of architected counters. All of these counters
@@ -285,6 +295,13 @@
 			 * used.
 			 */
 			write_hcr_el2_amvoffen(0U);
+		} else {
+			/*
+			 * Virtual offset registers are only accessible from EL3
+			 * and EL2, when clear, this bit traps accesses from EL2
+			 * so we set it to 1 when EL2 is present.
+			 */
+			ctx_write_scr_el3_amvoffen(ctx, 1U);
 		}
 
 #if AMU_RESTRICT_COUNTERS