refactor(amu): detect architected counters at runtime

This change removes the `AMU_GROUP0_COUNTERS_MASK` and
`AMU_GROUP0_MAX_COUNTERS` preprocessor definitions, instead retrieving
the number of group 0 counters dynamically through `AMCGCR_EL0.CG0NC`.

Change-Id: I70e39c30fbd5df89b214276fac79cc8758a89f72
Signed-off-by: Chris Kay <chris.kay@arm.com>
diff --git a/include/arch/aarch32/arch.h b/include/arch/aarch32/arch.h
index 59680b7..a1bd942 100644
--- a/include/arch/aarch32/arch.h
+++ b/include/arch/aarch32/arch.h
@@ -755,6 +755,8 @@
 #define AMCFGR_N_MASK		U(0xff)
 
 /* AMCGCR definitions */
+#define AMCGCR_CG0NC_SHIFT	U(0)
+#define AMCGCR_CG0NC_MASK	U(0xff)
 #define AMCGCR_CG1NC_SHIFT	U(8)
 #define AMCGCR_CG1NC_MASK	U(0xff)
 
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index a72087e..8b362f1 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -1069,6 +1069,8 @@
 #define AMCFGR_EL0_N_MASK	U(0xff)
 
 /* AMCGCR_EL0 definitions */
+#define AMCGCR_EL0_CG0NC_SHIFT	U(0)
+#define AMCGCR_EL0_CG0NC_MASK	U(0xff)
 #define AMCGCR_EL0_CG1NC_SHIFT	U(8)
 #define AMCGCR_EL0_CG1NC_MASK	U(0xff)
 
diff --git a/include/lib/extensions/amu_private.h b/include/lib/extensions/amu_private.h
index db44e64..9b4c29c 100644
--- a/include/lib/extensions/amu_private.h
+++ b/include/lib/extensions/amu_private.h
@@ -15,9 +15,7 @@
 
 #include <platform_def.h>
 
-/* All group 0 counters */
-#define AMU_GROUP0_COUNTERS_MASK	U(0xf)
-#define AMU_GROUP0_NR_COUNTERS		U(4)
+#define AMU_GROUP0_MAX_COUNTERS		U(16)
 
 #if ENABLE_AMU_AUXILIARY_COUNTERS
 #define AMU_GROUP1_COUNTERS_MASK	U(0)
@@ -63,10 +61,10 @@
 #endif
 
 struct amu_ctx {
-	uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
+	uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS];
 #if __aarch64__
 	/* Architected event counter 1 does not have an offset register. */
-	uint64_t group0_voffsets[AMU_GROUP0_NR_COUNTERS-1];
+	uint64_t group0_voffsets[AMU_GROUP0_MAX_COUNTERS-1];
 #endif
 
 #if ENABLE_AMU_AUXILIARY_COUNTERS
diff --git a/lib/extensions/amu/aarch32/amu.c b/lib/extensions/amu/aarch32/amu.c
index d192a89..4733086 100644
--- a/lib/extensions/amu/aarch32/amu.c
+++ b/lib/extensions/amu/aarch32/amu.c
@@ -43,6 +43,12 @@
 		AMCFGR_NCG_MASK;
 }
 
+static inline __unused uint32_t read_amcgcr_cg0nc(void)
+{
+	return (read_amcgcr() >> AMCGCR_CG0NC_SHIFT) &
+		AMCGCR_CG0NC_MASK;
+}
+
 static inline __unused uint32_t read_amcgcr_cg1nc(void)
 {
 	return (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
@@ -163,7 +169,7 @@
 	}
 
 	/* Enable group 0 counters */
-	write_amcntenset0_px(AMU_GROUP0_COUNTERS_MASK);
+	write_amcntenset0_px((UINT32_C(1) << read_amcgcr_cg0nc()) - 1U);
 
 #if ENABLE_AMU_AUXILIARY_COUNTERS
 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
@@ -196,7 +202,7 @@
 static uint64_t amu_group0_cnt_read(unsigned int idx)
 {
 	assert(amu_supported());
-	assert(idx < AMU_GROUP0_NR_COUNTERS);
+	assert(idx < read_amcgcr_cg0nc());
 
 	return amu_group0_cnt_read_internal(idx);
 }
@@ -205,7 +211,7 @@
 static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
 {
 	assert(amu_supported());
-	assert(idx < AMU_GROUP0_NR_COUNTERS);
+	assert(idx < read_amcgcr_cg0nc());
 
 	amu_group0_cnt_write_internal(idx, val);
 	isb();
@@ -252,7 +258,8 @@
 #endif
 
 	/* Assert that group 0/1 counter configuration is what we expect */
-	assert(read_amcntenset0_px() == AMU_GROUP0_COUNTERS_MASK);
+	assert(read_amcntenset0_px() ==
+		((UINT32_C(1) << read_amcgcr_cg0nc()) - 1U));
 
 #if ENABLE_AMU_AUXILIARY_COUNTERS
 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
@@ -263,7 +270,7 @@
 	 * Disable group 0/1 counters to avoid other observers like SCP sampling
 	 * counter values from the future via the memory mapped view.
 	 */
-	write_amcntenclr0_px(AMU_GROUP0_COUNTERS_MASK);
+	write_amcntenclr0_px((UINT32_C(1) << read_amcgcr_cg0nc()) - 1U);
 
 #if ENABLE_AMU_AUXILIARY_COUNTERS
 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
@@ -274,7 +281,7 @@
 	isb();
 
 	/* Save all group 0 counters */
-	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
+	for (i = 0U; i < read_amcgcr_cg0nc(); i++) {
 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
 	}
 
@@ -319,12 +326,12 @@
 #endif
 
 	/* Restore all group 0 counters */
-	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
+	for (i = 0U; i < read_amcgcr_cg0nc(); i++) {
 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
 	}
 
 	/* Restore group 0 counter configuration */
-	write_amcntenset0_px(AMU_GROUP0_COUNTERS_MASK);
+	write_amcntenset0_px((UINT32_C(1) << read_amcgcr_cg0nc()) - 1U);
 
 #if ENABLE_AMU_AUXILIARY_COUNTERS
 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
index b2a90ee..129616e 100644
--- a/lib/extensions/amu/aarch64/amu.c
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -66,6 +66,12 @@
 		AMCFGR_EL0_NCG_MASK;
 }
 
+static inline uint64_t read_amcgcr_el0_cg0nc(void)
+{
+	return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) &
+		AMCGCR_EL0_CG0NC_MASK;
+}
+
 static inline __unused uint64_t read_amcg1idr_el0_voff(void)
 {
 	return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
@@ -197,7 +203,7 @@
 	write_cptr_el3_tam(ctx, 0U);
 
 	/* Enable group 0 counters */
-	write_amcntenset0_el0_px(AMU_GROUP0_COUNTERS_MASK);
+	write_amcntenset0_el0_px((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U);
 
 #if ENABLE_AMU_AUXILIARY_COUNTERS
 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
@@ -235,7 +241,7 @@
 static uint64_t amu_group0_cnt_read(unsigned int idx)
 {
 	assert(amu_supported());
-	assert(idx < AMU_GROUP0_NR_COUNTERS);
+	assert(idx < read_amcgcr_el0_cg0nc());
 
 	return amu_group0_cnt_read_internal(idx);
 }
@@ -244,7 +250,7 @@
 static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
 {
 	assert(amu_supported());
-	assert(idx < AMU_GROUP0_NR_COUNTERS);
+	assert(idx < read_amcgcr_el0_cg0nc());
 
 	amu_group0_cnt_write_internal(idx, val);
 	isb();
@@ -259,7 +265,7 @@
 static uint64_t amu_group0_voffset_read(unsigned int idx)
 {
 	assert(amu_v1p1_supported());
-	assert(idx < AMU_GROUP0_NR_COUNTERS);
+	assert(idx < read_amcgcr_el0_cg0nc());
 	assert(idx != 1U);
 
 	return amu_group0_voffset_read_internal(idx);
@@ -274,7 +280,7 @@
 static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
 {
 	assert(amu_v1p1_supported());
-	assert(idx < AMU_GROUP0_NR_COUNTERS);
+	assert(idx < read_amcgcr_el0_cg0nc());
 	assert(idx != 1U);
 
 	amu_group0_voffset_write_internal(idx, val);
@@ -353,7 +359,8 @@
 #endif
 
 	/* Assert that group 0/1 counter configuration is what we expect */
-	assert(read_amcntenset0_el0_px() == AMU_GROUP0_COUNTERS_MASK);
+	assert(read_amcntenset0_el0_px() ==
+		((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U));
 
 #if ENABLE_AMU_AUXILIARY_COUNTERS
 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
@@ -365,7 +372,7 @@
 	 * Disable group 0/1 counters to avoid other observers like SCP sampling
 	 * counter values from the future via the memory mapped view.
 	 */
-	write_amcntenclr0_el0_px(AMU_GROUP0_COUNTERS_MASK);
+	write_amcntenclr0_el0_px((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U);
 
 #if ENABLE_AMU_AUXILIARY_COUNTERS
 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
@@ -376,7 +383,7 @@
 	isb();
 
 	/* Save all group 0 counters */
-	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
+	for (i = 0U; i < read_amcgcr_el0_cg0nc(); i++) {
 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
 	}
 
@@ -442,7 +449,7 @@
 #endif
 
 	/* Restore all group 0 counters */
-	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
+	for (i = 0U; i < read_amcgcr_el0_cg0nc(); i++) {
 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
 	}
 
@@ -455,7 +462,7 @@
 	}
 
 	/* Restore group 0 counter configuration */
-	write_amcntenset0_el0_px(AMU_GROUP0_COUNTERS_MASK);
+	write_amcntenset0_el0_px((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U);
 
 #if ENABLE_AMU_AUXILIARY_COUNTERS
 	if (AMU_GROUP1_NR_COUNTERS > 0U) {