Merge changes from topic "ck/mpmm" into integration

* changes:
  docs(maintainers): add Chris Kay to AMU and MPMM
  feat(tc): enable MPMM
  feat(mpmm): add support for MPMM
  feat(amu): enable per-core AMU auxiliary counters
  docs(amu): add AMU documentation
  refactor(amu): refactor enablement and context switching
  refactor(amu): detect auxiliary counters at runtime
  refactor(amu): detect architected counters at runtime
  refactor(amu): conditionally compile auxiliary counter support
  refactor(amu): factor out register accesses
  refactor(amu)!: privatize unused AMU APIs
  refactor(amu)!: remove `PLAT_AMU_GROUP1_COUNTERS_MASK`
  build(amu): introduce `amu.mk`
  build(fconf)!: clean up source collection
  feat(fdt-wrappers): add CPU enumeration utility function
  build(fdt-wrappers): introduce FDT wrappers makefile
  build(bl2): deduplicate sources
  build(bl1): deduplicate sources
diff --git a/Makefile b/Makefile
index c4e2ee3..e5ab324 100644
--- a/Makefile
+++ b/Makefile
@@ -931,6 +931,8 @@
         DYN_DISABLE_AUTH \
         EL3_EXCEPTION_HANDLING \
         ENABLE_AMU \
+        ENABLE_AMU_AUXILIARY_COUNTERS \
+        ENABLE_AMU_FCONF \
         AMU_RESTRICT_COUNTERS \
         ENABLE_ASSERTIONS \
         ENABLE_MPAM_FOR_LOWER_ELS \
@@ -989,6 +991,8 @@
         ENABLE_SYS_REG_TRACE_FOR_NS \
         ENABLE_TRF_FOR_NS \
         ENABLE_FEAT_HCX \
+        ENABLE_MPMM \
+        ENABLE_MPMM_FCONF \
 )))
 
 $(eval $(call assert_numerics,\
@@ -1032,6 +1036,8 @@
         DECRYPTION_SUPPORT_${DECRYPTION_SUPPORT} \
         DISABLE_MTPMU \
         ENABLE_AMU \
+        ENABLE_AMU_AUXILIARY_COUNTERS \
+        ENABLE_AMU_FCONF \
         AMU_RESTRICT_COUNTERS \
         ENABLE_ASSERTIONS \
         ENABLE_BTI \
@@ -1095,6 +1101,8 @@
         ENABLE_SYS_REG_TRACE_FOR_NS \
         ENABLE_TRF_FOR_NS \
         ENABLE_FEAT_HCX \
+        ENABLE_MPMM \
+        ENABLE_MPMM_FCONF \
 )))
 
 ifeq (${SANITIZE_UB},trap)
@@ -1161,6 +1169,8 @@
 
 # Expand build macros for the different images
 ifeq (${NEED_BL1},yes)
+BL1_SOURCES := $(sort ${BL1_SOURCES})
+
 $(eval $(call MAKE_BL,bl1))
 endif
 
@@ -1169,6 +1179,8 @@
 FIP_BL2_ARGS := tb-fw
 endif
 
+BL2_SOURCES := $(sort ${BL2_SOURCES})
+
 $(if ${BL2}, $(eval $(call TOOL_ADD_IMG,bl2,--${FIP_BL2_ARGS})),\
 	$(eval $(call MAKE_BL,bl2,${FIP_BL2_ARGS})))
 endif
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 106d410..9baa0c2 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -22,6 +22,8 @@
   endif
 endif
 
+include lib/extensions/amu/amu.mk
+include lib/mpmm/mpmm.mk
 include lib/psci/psci_lib.mk
 
 BL31_SOURCES		+=	bl31/bl31_main.c				\
@@ -78,8 +80,11 @@
 endif
 
 ifeq (${ENABLE_AMU},1)
-BL31_SOURCES		+=	lib/extensions/amu/aarch64/amu.c		\
-				lib/extensions/amu/aarch64/amu_helpers.S
+BL31_SOURCES		+=	${AMU_SOURCES}
+endif
+
+ifeq (${ENABLE_MPMM},1)
+BL31_SOURCES		+=	${MPMM_SOURCES}
 endif
 
 ifeq (${ENABLE_SVE_FOR_NS},1)
diff --git a/bl32/sp_min/sp_min.mk b/bl32/sp_min/sp_min.mk
index 6339cf8..590b032 100644
--- a/bl32/sp_min/sp_min.mk
+++ b/bl32/sp_min/sp_min.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -8,6 +8,7 @@
 	$(error SP_MIN is only supported on AArch32 platforms)
 endif
 
+include lib/extensions/amu/amu.mk
 include lib/psci/psci_lib.mk
 
 INCLUDES		+=	-Iinclude/bl32/sp_min
@@ -27,9 +28,8 @@
 BL32_SOURCES		+=	lib/pmf/pmf_main.c
 endif
 
-ifeq (${ENABLE_AMU}, 1)
-BL32_SOURCES		+=	lib/extensions/amu/aarch32/amu.c\
-				lib/extensions/amu/aarch32/amu_helpers.S
+ifeq (${ENABLE_AMU},1)
+BL32_SOURCES		+=	${AMU_SOURCES}
 endif
 
 ifeq (${WORKAROUND_CVE_2017_5715},1)
diff --git a/common/fdt_wrappers.c b/common/fdt_wrappers.c
index dd7a0fa..64e01ea 100644
--- a/common/fdt_wrappers.c
+++ b/common/fdt_wrappers.c
@@ -572,3 +572,47 @@
 	/* Translate the local device address recursively */
 	return fdtw_translate_address(dtb, local_bus_node, global_address);
 }
+
+/*
+ * For every CPU node (`/cpus/cpu@n`) in an FDT, execute a callback passing a
+ * pointer to the FDT and the offset of the CPU node. If the return value of the
+ * callback is negative, it is treated as an error and the loop is aborted. In
+ * this situation, the value of the callback is returned from the function.
+ *
+ * Returns `0` on success, or a negative integer representing an error code.
+ */
+int fdtw_for_each_cpu(const void *dtb,
+		      int (*callback)(const void *dtb, int node, uintptr_t mpidr))
+{
+	int ret = 0;
+	int parent, node = 0;
+
+	parent = fdt_path_offset(dtb, "/cpus");
+	if (parent < 0) {
+		return parent;
+	}
+
+	fdt_for_each_subnode(node, dtb, parent) {
+		const char *name;
+		int len;
+
+		uintptr_t mpidr = 0U;
+
+		name = fdt_get_name(dtb, node, &len);
+		if (strncmp(name, "cpu@", 4) != 0) {
+			continue;
+		}
+
+		ret = fdt_get_reg_props_by_index(dtb, node, 0, &mpidr, NULL);
+		if (ret < 0) {
+			break;
+		}
+
+		ret = callback(dtb, node, mpidr);
+		if (ret < 0) {
+			break;
+		}
+	}
+
+	return ret;
+}
diff --git a/common/fdt_wrappers.mk b/common/fdt_wrappers.mk
new file mode 100644
index 0000000..62b8c6e
--- /dev/null
+++ b/common/fdt_wrappers.mk
@@ -0,0 +1,7 @@
+#
+# Copyright (c) 2021, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+FDT_WRAPPERS_SOURCES	:=	common/fdt_wrappers.c
diff --git a/docs/about/maintainers.rst b/docs/about/maintainers.rst
index 7db81fc..337dde6 100644
--- a/docs/about/maintainers.rst
+++ b/docs/about/maintainers.rst
@@ -217,6 +217,8 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 :|M|: Alexei Fedorov <Alexei.Fedorov@arm.com>
 :|G|: `AlexeiFedorov`_
+:|M|: Chris Kay <chris.kay@arm.com>
+:|G|: `CJKay`_
 :|F|: lib/extensions/amu/
 
 Memory Partitioning And Monitoring (MPAM) extensions
@@ -326,6 +328,13 @@
 :|F|: drivers/scmi-msg
 :|F|: include/drivers/scmi\*
 
+Max Power Mitigation Mechanism (MPMM)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+:|M|: Chris Kay <chris.kay@arm.com>
+:|G|: `CJKay`_
+:|F|: include/lib/mpmm/
+:|F|: lib/mpmm/
+
 Platform Ports
 ~~~~~~~~~~~~~~
 
diff --git a/docs/components/activity-monitors.rst b/docs/components/activity-monitors.rst
new file mode 100644
index 0000000..dd45c43
--- /dev/null
+++ b/docs/components/activity-monitors.rst
@@ -0,0 +1,34 @@
+Activity Monitors
+=================
+
+FEAT_AMUv1 of the Armv8-A architecture introduces the Activity Monitors
+extension. This extension describes the architecture for the Activity Monitor
+Unit (|AMU|), an optional non-invasive component for monitoring core events
+through a set of 64-bit counters.
+
+When the ``ENABLE_AMU=1`` build option is provided, Trusted Firmware-A sets up
+the |AMU| prior to its exit from EL3, and will save and restore architected
+|AMU| counters as necessary upon suspend and resume.
+
+.. _Activity Monitor Auxiliary Counters:
+
+Auxiliary counters
+------------------
+
+FEAT_AMUv1 describes a set of implementation-defined auxiliary counters (also
+known as group 1 counters), controlled by the ``ENABLE_AMU_AUXILIARY_COUNTERS``
+build option.
+
+As a security precaution, Trusted Firmware-A does not enable these by default.
+Instead, platforms may configure their auxiliary counters through one of two
+possible mechanisms:
+
+- |FCONF|, controlled by the ``ENABLE_AMU_FCONF`` build option.
+- A platform implementation of the ``plat_amu_topology`` function (the default).
+
+See :ref:`Activity Monitor Unit (AMU) Bindings` for documentation on the |FCONF|
+device tree bindings.
+
+--------------
+
+*Copyright (c) 2021, Arm Limited. All rights reserved.*
diff --git a/docs/components/fconf/amu-bindings.rst b/docs/components/fconf/amu-bindings.rst
new file mode 100644
index 0000000..047f75e
--- /dev/null
+++ b/docs/components/fconf/amu-bindings.rst
@@ -0,0 +1,142 @@
+Activity Monitor Unit (AMU) Bindings
+====================================
+
+To support platform-defined Activity Monitor Unit (|AMU|) auxiliary counters
+through FCONF, the ``HW_CONFIG`` device tree accepts several |AMU|-specific
+nodes and properties.
+
+Bindings
+^^^^^^^^
+
+.. contents::
+    :local:
+
+``/cpus/cpus/cpu*`` node properties
+"""""""""""""""""""""""""""""""""""
+
+The ``cpu`` node has been augmented to support a handle to an associated |AMU|
+view, which should describe the counters offered by the core.
+
++---------------+-------+---------------+-------------------------------------+
+| Property name | Usage | Value type    | Description                         |
++===============+=======+===============+=====================================+
+| ``amu``       | O     | ``<phandle>`` | If present, indicates that an |AMU| |
+|               |       |               | is available and its counters are   |
+|               |       |               | described by the node provided.     |
++---------------+-------+---------------+-------------------------------------+
+
+``/cpus/amus`` node properties
+""""""""""""""""""""""""""""""
+
+The ``amus`` node describes the |AMUs| implemented by the cores in the system.
+This node does not have any properties.
+
+``/cpus/amus/amu*`` node properties
+"""""""""""""""""""""""""""""""""""
+
+An ``amu`` node describes the layout and meaning of the auxiliary counter
+registers of one or more |AMUs|, and may be shared by multiple cores.
+
++--------------------+-------+------------+------------------------------------+
+| Property name      | Usage | Value type | Description                        |
++====================+=======+============+====================================+
+| ``#address-cells`` | R     | ``<u32>``  | Value shall be 1. Specifies that   |
+|                    |       |            | the ``reg`` property array of      |
+|                    |       |            | children of this node uses a       |
+|                    |       |            | single cell.                       |
++--------------------+-------+------------+------------------------------------+
+| ``#size-cells``    | R     | ``<u32>``  | Value shall be 0. Specifies that   |
+|                    |       |            | no size is required in the ``reg`` |
+|                    |       |            | property in children of this node. |
++--------------------+-------+------------+------------------------------------+
+
+``/cpus/amus/amu*/counter*`` node properties
+""""""""""""""""""""""""""""""""""""""""""""
+
+A ``counter`` node describes an auxiliary counter belonging to the parent |AMU|
+view.
+
++-------------------+-------+-------------+------------------------------------+
+| Property name     | Usage | Value type  | Description                        |
++===================+=======+=============+====================================+
+| ``reg``           | R     | array       | Represents the counter register    |
+|                   |       |             | index, and must be a single cell.  |
++-------------------+-------+-------------+------------------------------------+
+| ``enable-at-el3`` | O     | ``<empty>`` | The presence of this property      |
+|                   |       |             | indicates that this counter should |
+|                   |       |             | be enabled prior to EL3 exit.      |
++-------------------+-------+-------------+------------------------------------+
+
+Example
+^^^^^^^
+
+An example system offering four cores made up of two clusters, where the cores
+of each cluster share different |AMUs|, may use something like the following:
+
+.. code-block::
+
+    cpus {
+        #address-cells = <2>;
+        #size-cells = <0>;
+
+        amus {
+            amu0: amu-0 {
+                #address-cells = <1>;
+                #size-cells = <0>;
+
+                counterX: counter@0 {
+                    reg = <0>;
+
+                    enable-at-el3;
+                };
+
+                counterY: counter@1 {
+                    reg = <1>;
+
+                    enable-at-el3;
+                };
+            };
+
+            amu1: amu-1 {
+                #address-cells = <1>;
+                #size-cells = <0>;
+
+                counterZ: counter@0 {
+                    reg = <0>;
+
+                    enable-at-el3;
+                };
+            };
+        };
+
+        cpu0@00000 {
+            ...
+
+            amu = <&amu0>;
+        };
+
+        cpu1@00100 {
+            ...
+
+            amu = <&amu0>;
+        };
+
+        cpu2@10000 {
+            ...
+
+            amu = <&amu1>;
+        };
+
+        cpu3@10100 {
+            ...
+
+            amu = <&amu1>;
+        };
+    }
+
+In this situation, ``cpu0`` and ``cpu1`` (the two cores in the first cluster),
+share the view of their AMUs defined by ``amu0``. Likewise, ``cpu2`` and
+``cpu3`` (the two cores in the second cluster), share the view of their |AMUs|
+defined by ``amu1``. This will cause ``counterX`` and ``counterY`` to be enabled
+for both ``cpu0`` and ``cpu1``, and ``counterZ`` to be enabled for both ``cpu2``
+and ``cpu3``.
diff --git a/docs/components/fconf/index.rst b/docs/components/fconf/index.rst
index 9020633..029f324 100644
--- a/docs/components/fconf/index.rst
+++ b/docs/components/fconf/index.rst
@@ -145,3 +145,5 @@
   :maxdepth: 1
 
   fconf_properties
+  amu-bindings
+  mpmm-bindings
diff --git a/docs/components/fconf/mpmm-bindings.rst b/docs/components/fconf/mpmm-bindings.rst
new file mode 100644
index 0000000..d3cc857
--- /dev/null
+++ b/docs/components/fconf/mpmm-bindings.rst
@@ -0,0 +1,48 @@
+Maximum Power Mitigation Mechanism (MPMM) Bindings
+==================================================
+
+|MPMM| support cannot be determined at runtime by the firmware. Instead, these
+DTB bindings allow the platform to communicate per-core support for |MPMM| via
+the ``HW_CONFIG`` device tree blob.
+
+Bindings
+^^^^^^^^
+
+.. contents::
+    :local:
+
+``/cpus/cpus/cpu*`` node properties
+"""""""""""""""""""""""""""""""""""
+
+The ``cpu`` node has been augmented to allow the platform to indicate support
+for |MPMM| on a given core.
+
++-------------------+-------+-------------+------------------------------------+
+| Property name     | Usage | Value type  | Description                        |
++===================+=======+=============+====================================+
+| ``supports-mpmm`` | O     | ``<empty>`` | If present, indicates that |MPMM|  |
+|                   |       |             | is available on this core.         |
++-------------------+-------+-------------+------------------------------------+
+
+Example
+^^^^^^^
+
+An example system offering two cores, one with support for |MPMM| and one
+without, can be described as follows:
+
+.. code-block::
+
+    cpus {
+        #address-cells = <2>;
+        #size-cells = <0>;
+
+        cpu0@00000 {
+            ...
+
+            supports-mpmm;
+        };
+
+        cpu1@00100 {
+            ...
+        };
+    }
diff --git a/docs/components/index.rst b/docs/components/index.rst
index f349d8d..754526d 100644
--- a/docs/components/index.rst
+++ b/docs/components/index.rst
@@ -7,12 +7,14 @@
    :numbered:
 
    spd/index
+   activity-monitors
    arm-sip-service
    debugfs-design
    exception-handling
    fconf/index
    firmware-update
    measured_boot/index
+   mpmm
    platform-interrupt-controller-API
    ras
    romlib-design
diff --git a/docs/components/mpmm.rst b/docs/components/mpmm.rst
new file mode 100644
index 0000000..1b1c6d8
--- /dev/null
+++ b/docs/components/mpmm.rst
@@ -0,0 +1,30 @@
+Maximum Power Mitigation Mechanism (MPMM)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+|MPMM| is an optional microarchitectural power management mechanism supported by
+some Arm Armv9-A cores, beginning with the Cortex-X2, Cortex-A710 and
+Cortex-A510 cores. This mechanism detects and limits high-activity events to
+assist in |SoC| processor power domain dynamic power budgeting and limit the
+triggering of whole-rail (i.e. clock chopping) responses to overcurrent
+conditions.
+
+|MPMM| is enabled on a per-core basis by the EL3 runtime firmware. The presence
+of |MPMM| cannot be determined at runtime by the firmware, and therefore the
+platform must expose this information through one of two possible mechanisms:
+
+- |FCONF|, controlled by the ``ENABLE_MPMM_FCONF`` build option.
+- A platform implementation of the ``plat_mpmm_topology`` function (the
+  default).
+
+See :ref:`Maximum Power Mitigation Mechanism (MPMM) Bindings` for documentation
+on the |FCONF| device tree bindings.
+
+.. warning::
+
+    |MPMM| exposes gear metrics through the auxiliary |AMU| counters. An
+    external power controller can use these metrics to budget SoC power by
+    limiting the number of cores that can execute higher-activity workloads or
+    switching to a different DVFS operating point. When this is the case, the
+    |AMU| counters that make up the |MPMM| gears must be enabled by the EL3
+    runtime firmware - please see :ref:`Activity Monitor Auxiliary Counters` for
+    documentation on enabling auxiliary |AMU| counters.
diff --git a/docs/getting_started/build-options.rst b/docs/getting_started/build-options.rst
index 8b5e92e..04e3c0b 100644
--- a/docs/getting_started/build-options.rst
+++ b/docs/getting_started/build-options.rst
@@ -220,6 +220,14 @@
    v8.2 implementations also implement an AMU and this option can be used to
    enable this feature on those systems as well. Default is 0.
 
+-  ``ENABLE_AMU_AUXILIARY_COUNTERS``: Enables support for AMU auxiliary counters
+   (also known as group 1 counters). These are implementation-defined counters,
+   and as such require additional platform configuration. Default is 0.
+
+-  ``ENABLE_AMU_FCONF``: Enables configuration of the AMU through FCONF, which
+   allows platforms with auxiliary counters to describe them via the
+   ``HW_CONFIG`` device tree blob. Default is 0.
+
 -  ``ENABLE_ASSERTIONS``: This option controls whether or not calls to ``assert()``
    are compiled out. For debug builds, this option defaults to 1, and calls to
    ``assert()`` are left in place. For release builds, this option defaults to 0
@@ -257,6 +265,16 @@
    partitioning in EL3, however. Platform initialisation code should configure
    and use partitions in EL3 as required. This option defaults to ``0``.
 
+-  ``ENABLE_MPMM``: Boolean option to enable support for the Maximum Power
+   Mitigation Mechanism supported by certain Arm cores, which allows the SoC
+   firmware to detect and limit high activity events to assist in SoC processor
+   power domain dynamic power budgeting and limit the triggering of whole-rail
+   (i.e. clock chopping) responses to overcurrent conditions. Defaults to ``0``.
+
+-  ``ENABLE_MPMM_FCONF``: Enables configuration of MPMM through FCONF, which
+   allows platforms with cores supporting MPMM to describe them via the
+   ``HW_CONFIG`` device tree blob. Default is 0.
+
 -  ``ENABLE_PIE``: Boolean option to enable Position Independent Executable(PIE)
    support within generic code in TF-A. This option is currently only supported
    in BL2_AT_EL3, BL31, and BL32 (TSP) for AARCH64 binaries, and in BL32
@@ -913,4 +931,3 @@
 
 .. _DEN0115: https://developer.arm.com/docs/den0115/latest
 .. _PSA FW update specification: https://developer.arm.com/documentation/den0118/a/
-
diff --git a/docs/getting_started/porting-guide.rst b/docs/getting_started/porting-guide.rst
index 57eba3c..92ff39f 100644
--- a/docs/getting_started/porting-guide.rst
+++ b/docs/getting_started/porting-guide.rst
@@ -562,15 +562,6 @@
    doesn't print anything to the console. If ``PLAT_LOG_LEVEL_ASSERT`` isn't
    defined, it defaults to ``LOG_LEVEL``.
 
-If the platform port uses the Activity Monitor Unit, the following constant
-may be defined:
-
--  **PLAT_AMU_GROUP1_COUNTERS_MASK**
-   This mask reflects the set of group counters that should be enabled.  The
-   maximum number of group 1 counters supported by AMUv1 is 16 so the mask
-   can be at most 0xffff. If the platform does not define this mask, no group 1
-   counters are enabled.
-
 File : plat_macros.S [mandatory]
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
diff --git a/docs/global_substitutions.txt b/docs/global_substitutions.txt
index 24ac830..af15146 100644
--- a/docs/global_substitutions.txt
+++ b/docs/global_substitutions.txt
@@ -1,5 +1,7 @@
 .. |AArch32| replace:: :term:`AArch32`
 .. |AArch64| replace:: :term:`AArch64`
+.. |AMU| replace:: :term:`AMU`
+.. |AMUs| replace:: :term:`AMUs <AMU>`
 .. |API| replace:: :term:`API`
 .. |BTI| replace:: :term:`BTI`
 .. |CoT| replace:: :term:`CoT`
@@ -23,6 +25,7 @@
 .. |Linaro| replace:: :term:`Linaro`
 .. |MMU| replace:: :term:`MMU`
 .. |MPAM| replace:: :term:`MPAM`
+.. |MPMM| replace:: :term:`MPMM`
 .. |MPIDR| replace:: :term:`MPIDR`
 .. |MTE| replace:: :term:`MTE`
 .. |OEN| replace:: :term:`OEN`
diff --git a/docs/glossary.rst b/docs/glossary.rst
index f4912f5..aeeb133 100644
--- a/docs/glossary.rst
+++ b/docs/glossary.rst
@@ -15,6 +15,10 @@
    AArch64
       64-bit execution state of the ARMv8 ISA
 
+   AMU
+      Activity Monitor Unit, a hardware monitoring unit introduced by FEAT_AMUv1
+      that exposes CPU core runtime metrics as a set of counter registers.
+
    API
       Application Programming Interface
 
@@ -88,6 +92,10 @@
    MPAM
       Memory Partitioning And Monitoring. An optional Armv8.4 extension.
 
+   MPMM
+     Maximum Power Mitigation Mechanism, an optional power management mechanism
+     supported by some Arm Armv9-A cores.
+
    MPIDR
       Multiprocessor Affinity Register
 
diff --git a/fdts/tc.dts b/fdts/tc.dts
index 31fcfe2..13c9e16 100644
--- a/fdts/tc.dts
+++ b/fdts/tc.dts
@@ -79,6 +79,31 @@
 			};
 		};
 
+		amus {
+			amu: amu-0 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				mpmm_gear0: counter@0 {
+					reg = <0>;
+
+					enable-at-el3;
+				};
+
+				mpmm_gear1: counter@1 {
+					reg = <1>;
+
+					enable-at-el3;
+				};
+
+				mpmm_gear2: counter@2 {
+					reg = <2>;
+
+					enable-at-el3;
+				};
+			};
+		};
+
 		CPU0:cpu@0 {
 			device_type = "cpu";
 			compatible = "arm,armv8";
@@ -87,6 +112,8 @@
 			clocks = <&scmi_dvfs 0>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
 			capacity-dmips-mhz = <406>;
+			amu = <&amu>;
+			supports-mpmm;
 		};
 
 		CPU1:cpu@100 {
@@ -97,6 +124,8 @@
 			clocks = <&scmi_dvfs 0>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
 			capacity-dmips-mhz = <406>;
+			amu = <&amu>;
+			supports-mpmm;
 		};
 
 		CPU2:cpu@200 {
@@ -107,6 +136,8 @@
 			clocks = <&scmi_dvfs 0>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
 			capacity-dmips-mhz = <406>;
+			amu = <&amu>;
+			supports-mpmm;
 		};
 
 		CPU3:cpu@300 {
@@ -117,6 +148,8 @@
 			clocks = <&scmi_dvfs 0>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
 			capacity-dmips-mhz = <406>;
+			amu = <&amu>;
+			supports-mpmm;
 		};
 
 		CPU4:cpu@400 {
@@ -127,6 +160,8 @@
 			clocks = <&scmi_dvfs 1>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
 			capacity-dmips-mhz = <912>;
+			amu = <&amu>;
+			supports-mpmm;
 		};
 
 		CPU5:cpu@500 {
@@ -137,6 +172,8 @@
 			clocks = <&scmi_dvfs 1>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
 			capacity-dmips-mhz = <912>;
+			amu = <&amu>;
+			supports-mpmm;
 		};
 
 		CPU6:cpu@600 {
@@ -147,6 +184,8 @@
 			clocks = <&scmi_dvfs 1>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
 			capacity-dmips-mhz = <912>;
+			amu = <&amu>;
+			supports-mpmm;
 		};
 
 		CPU7:cpu@700 {
@@ -157,6 +196,8 @@
 			clocks = <&scmi_dvfs 2>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
 			capacity-dmips-mhz = <1024>;
+			amu = <&amu>;
+			supports-mpmm;
 		};
 
 	};
diff --git a/include/arch/aarch32/arch.h b/include/arch/aarch32/arch.h
index 7221b62..a1bd942 100644
--- a/include/arch/aarch32/arch.h
+++ b/include/arch/aarch32/arch.h
@@ -253,7 +253,8 @@
 /* HCPTR definitions */
 #define HCPTR_RES1		((U(1) << 13) | (U(1) << 12) | U(0x3ff))
 #define TCPAC_BIT		(U(1) << 31)
-#define TAM_BIT			(U(1) << 30)
+#define TAM_SHIFT		U(30)
+#define TAM_BIT			(U(1) << TAM_SHIFT)
 #define TTA_BIT			(U(1) << 20)
 #define TCP11_BIT		(U(1) << 11)
 #define TCP10_BIT		(U(1) << 10)
@@ -727,8 +728,25 @@
 #define AMEVTYPER1E	p15, 0, c13, c15, 6
 #define AMEVTYPER1F	p15, 0, c13, c15, 7
 
+/* AMCNTENSET0 definitions */
+#define AMCNTENSET0_Pn_SHIFT	U(0)
+#define AMCNTENSET0_Pn_MASK	U(0xffff)
+
+/* AMCNTENSET1 definitions */
+#define AMCNTENSET1_Pn_SHIFT	U(0)
+#define AMCNTENSET1_Pn_MASK	U(0xffff)
+
+/* AMCNTENCLR0 definitions */
+#define AMCNTENCLR0_Pn_SHIFT	U(0)
+#define AMCNTENCLR0_Pn_MASK	U(0xffff)
+
+/* AMCNTENCLR1 definitions */
+#define AMCNTENCLR1_Pn_SHIFT	U(0)
+#define AMCNTENCLR1_Pn_MASK	U(0xffff)
+
 /* AMCR definitions */
-#define AMCR_CG1RZ_BIT		(ULL(1) << 17)
+#define AMCR_CG1RZ_SHIFT	U(17)
+#define AMCR_CG1RZ_BIT		(ULL(1) << AMCR_CG1RZ_SHIFT)
 
 /* AMCFGR definitions */
 #define AMCFGR_NCG_SHIFT	U(28)
@@ -737,6 +755,8 @@
 #define AMCFGR_N_MASK		U(0xff)
 
 /* AMCGCR definitions */
+#define AMCGCR_CG0NC_SHIFT	U(0)
+#define AMCGCR_CG0NC_MASK	U(0xff)
 #define AMCGCR_CG1NC_SHIFT	U(8)
 #define AMCGCR_CG1NC_MASK	U(0xff)
 
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index 74bc8cb..5408acf 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -532,7 +532,8 @@
 
 /* HCR definitions */
 #define HCR_RESET_VAL		ULL(0x0)
-#define HCR_AMVOFFEN_BIT	(ULL(1) << 51)
+#define HCR_AMVOFFEN_SHIFT	U(51)
+#define HCR_AMVOFFEN_BIT	(ULL(1) << HCR_AMVOFFEN_SHIFT)
 #define HCR_TEA_BIT		(ULL(1) << 47)
 #define HCR_API_BIT		(ULL(1) << 41)
 #define HCR_APK_BIT		(ULL(1) << 40)
@@ -570,7 +571,8 @@
 
 /* CPTR_EL3 definitions */
 #define TCPAC_BIT		(U(1) << 31)
-#define TAM_BIT			(U(1) << 30)
+#define TAM_SHIFT		U(30)
+#define TAM_BIT			(U(1) << TAM_SHIFT)
 #define TTA_BIT			(U(1) << 20)
 #define TFP_BIT			(U(1) << 10)
 #define CPTR_EZ_BIT		(U(1) << 8)
@@ -579,7 +581,8 @@
 /* CPTR_EL2 definitions */
 #define CPTR_EL2_RES1		((U(1) << 13) | (U(1) << 12) | (U(0x3ff)))
 #define CPTR_EL2_TCPAC_BIT	(U(1) << 31)
-#define CPTR_EL2_TAM_BIT	(U(1) << 30)
+#define CPTR_EL2_TAM_SHIFT	U(30)
+#define CPTR_EL2_TAM_BIT	(U(1) << CPTR_EL2_TAM_SHIFT)
 #define CPTR_EL2_TTA_BIT	(U(1) << 20)
 #define CPTR_EL2_TFP_BIT	(U(1) << 10)
 #define CPTR_EL2_TZ_BIT		(U(1) << 8)
@@ -1043,6 +1046,22 @@
 #define AMEVTYPER1E_EL0		S3_3_C13_C15_6
 #define AMEVTYPER1F_EL0		S3_3_C13_C15_7
 
+/* AMCNTENSET0_EL0 definitions */
+#define AMCNTENSET0_EL0_Pn_SHIFT	U(0)
+#define AMCNTENSET0_EL0_Pn_MASK		ULL(0xffff)
+
+/* AMCNTENSET1_EL0 definitions */
+#define AMCNTENSET1_EL0_Pn_SHIFT	U(0)
+#define AMCNTENSET1_EL0_Pn_MASK		ULL(0xffff)
+
+/* AMCNTENCLR0_EL0 definitions */
+#define AMCNTENCLR0_EL0_Pn_SHIFT	U(0)
+#define AMCNTENCLR0_EL0_Pn_MASK		ULL(0xffff)
+
+/* AMCNTENCLR1_EL0 definitions */
+#define AMCNTENCLR1_EL0_Pn_SHIFT	U(0)
+#define AMCNTENCLR1_EL0_Pn_MASK		ULL(0xffff)
+
 /* AMCFGR_EL0 definitions */
 #define AMCFGR_EL0_NCG_SHIFT	U(28)
 #define AMCFGR_EL0_NCG_MASK	U(0xf)
@@ -1050,6 +1069,8 @@
 #define AMCFGR_EL0_N_MASK	U(0xff)
 
 /* AMCGCR_EL0 definitions */
+#define AMCGCR_EL0_CG0NC_SHIFT	U(0)
+#define AMCGCR_EL0_CG0NC_MASK	U(0xff)
 #define AMCGCR_EL0_CG1NC_SHIFT	U(8)
 #define AMCGCR_EL0_CG1NC_MASK	U(0xff)
 
@@ -1074,7 +1095,8 @@
 #define AMCG1IDR_VOFF_SHIFT	U(16)
 
 /* New bit added to AMCR_EL0 */
-#define AMCR_CG1RZ_BIT		(ULL(0x1) << 17)
+#define AMCR_CG1RZ_SHIFT	U(17)
+#define AMCR_CG1RZ_BIT		(ULL(0x1) << AMCR_CG1RZ_SHIFT)
 
 /*
  * Definitions for virtual offset registers for architected activity monitor
@@ -1194,4 +1216,16 @@
 #define DSU_CLUSTER_PWR_ON	1
 #define DSU_CLUSTER_PWR_MASK	U(1)
 
+/*******************************************************************************
+ * Definitions for CPU Power/Performance Management registers
+ ******************************************************************************/
+
+#define CPUPPMCR_EL3			S3_6_C15_C2_0
+#define CPUPPMCR_EL3_MPMMPINCTL_SHIFT	UINT64_C(0)
+#define CPUPPMCR_EL3_MPMMPINCTL_MASK	UINT64_C(0x1)
+
+#define CPUMPMMCR_EL3			S3_6_C15_C2_1
+#define CPUMPMMCR_EL3_MPMM_EN_SHIFT	UINT64_C(0)
+#define CPUMPMMCR_EL3_MPMM_EN_MASK	UINT64_C(0x1)
+
 #endif /* ARCH_H */
diff --git a/include/arch/aarch64/arch_helpers.h b/include/arch/aarch64/arch_helpers.h
index cae05dc..37fa047 100644
--- a/include/arch/aarch64/arch_helpers.h
+++ b/include/arch/aarch64/arch_helpers.h
@@ -542,6 +542,10 @@
 /* DynamIQ Shared Unit power management */
 DEFINE_RENAME_SYSREG_RW_FUNCS(clusterpwrdn_el1, CLUSTERPWRDN_EL1)
 
+/* CPU Power/Performance Management registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(cpuppmcr_el3, CPUPPMCR_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(cpumpmmcr_el3, CPUMPMMCR_EL3)
+
 /* Armv9.2 RME Registers */
 DEFINE_RENAME_SYSREG_RW_FUNCS(gptbr_el3, GPTBR_EL3)
 DEFINE_RENAME_SYSREG_RW_FUNCS(gpccr_el3, GPCCR_EL3)
diff --git a/include/common/fdt_wrappers.h b/include/common/fdt_wrappers.h
index 98e7a3e..9c7180c 100644
--- a/include/common/fdt_wrappers.h
+++ b/include/common/fdt_wrappers.h
@@ -41,6 +41,9 @@
 uint64_t fdtw_translate_address(const void *dtb, int bus_node,
 				uint64_t base_address);
 
+int fdtw_for_each_cpu(const void *fdt,
+		      int (*callback)(const void *dtb, int node, uintptr_t mpidr));
+
 static inline uint32_t fdt_blob_size(const void *dtb)
 {
 	const uint32_t *dtb_header = dtb;
diff --git a/include/lib/extensions/amu.h b/include/lib/extensions/amu.h
index 3a254c9..6452f7e 100644
--- a/include/lib/extensions/amu.h
+++ b/include/lib/extensions/amu.h
@@ -10,105 +10,38 @@
 #include <stdbool.h>
 #include <stdint.h>
 
-#include <lib/cassert.h>
-#include <lib/utils_def.h>
-
 #include <context.h>
-#include <platform_def.h>
-
-/* All group 0 counters */
-#define AMU_GROUP0_COUNTERS_MASK	U(0xf)
-#define AMU_GROUP0_NR_COUNTERS		U(4)
-
-#ifdef PLAT_AMU_GROUP1_COUNTERS_MASK
-#define AMU_GROUP1_COUNTERS_MASK	PLAT_AMU_GROUP1_COUNTERS_MASK
-#else
-#define AMU_GROUP1_COUNTERS_MASK	U(0)
-#endif
-
-/* Calculate number of group 1 counters */
-#if (AMU_GROUP1_COUNTERS_MASK	& (1 << 15))
-#define	AMU_GROUP1_NR_COUNTERS		16U
-#elif (AMU_GROUP1_COUNTERS_MASK	& (1 << 14))
-#define	AMU_GROUP1_NR_COUNTERS		15U
-#elif (AMU_GROUP1_COUNTERS_MASK	& (1 << 13))
-#define	AMU_GROUP1_NR_COUNTERS		14U
-#elif (AMU_GROUP1_COUNTERS_MASK	& (1 << 12))
-#define	AMU_GROUP1_NR_COUNTERS		13U
-#elif (AMU_GROUP1_COUNTERS_MASK	& (1 << 11))
-#define	AMU_GROUP1_NR_COUNTERS		12U
-#elif (AMU_GROUP1_COUNTERS_MASK	& (1 << 10))
-#define	AMU_GROUP1_NR_COUNTERS		11U
-#elif (AMU_GROUP1_COUNTERS_MASK	& (1 << 9))
-#define	AMU_GROUP1_NR_COUNTERS		10U
-#elif (AMU_GROUP1_COUNTERS_MASK	& (1 << 8))
-#define	AMU_GROUP1_NR_COUNTERS		9U
-#elif (AMU_GROUP1_COUNTERS_MASK	& (1 << 7))
-#define	AMU_GROUP1_NR_COUNTERS		8U
-#elif (AMU_GROUP1_COUNTERS_MASK	& (1 << 6))
-#define	AMU_GROUP1_NR_COUNTERS		7U
-#elif (AMU_GROUP1_COUNTERS_MASK	& (1 << 5))
-#define	AMU_GROUP1_NR_COUNTERS		6U
-#elif (AMU_GROUP1_COUNTERS_MASK	& (1 << 4))
-#define	AMU_GROUP1_NR_COUNTERS		5U
-#elif (AMU_GROUP1_COUNTERS_MASK	& (1 << 3))
-#define	AMU_GROUP1_NR_COUNTERS		4U
-#elif (AMU_GROUP1_COUNTERS_MASK	& (1 << 2))
-#define	AMU_GROUP1_NR_COUNTERS		3U
-#elif (AMU_GROUP1_COUNTERS_MASK	& (1 << 1))
-#define	AMU_GROUP1_NR_COUNTERS		2U
-#elif (AMU_GROUP1_COUNTERS_MASK	& (1 << 0))
-#define	AMU_GROUP1_NR_COUNTERS		1U
-#else
-#define	AMU_GROUP1_NR_COUNTERS		0U
-#endif
 
-CASSERT(AMU_GROUP1_COUNTERS_MASK <= 0xffff, invalid_amu_group1_counters_mask);
-
-struct amu_ctx {
-	uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
-#if __aarch64__
-	/* Architected event counter 1 does not have an offset register. */
-	uint64_t group0_voffsets[AMU_GROUP0_NR_COUNTERS-1];
-#endif
-
-#if AMU_GROUP1_NR_COUNTERS
-	uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
-#if __aarch64__
-	uint64_t group1_voffsets[AMU_GROUP1_NR_COUNTERS];
-#endif
-#endif
-};
+#include <platform_def.h>
 
-unsigned int amu_get_version(void);
 #if __aarch64__
 void amu_enable(bool el2_unused, cpu_context_t *ctx);
 #else
 void amu_enable(bool el2_unused);
 #endif
 
-/* Group 0 configuration helpers */
-uint64_t amu_group0_cnt_read(unsigned int idx);
-void amu_group0_cnt_write(unsigned int idx, uint64_t val);
-
-#if __aarch64__
-uint64_t amu_group0_voffset_read(unsigned int idx);
-void amu_group0_voffset_write(unsigned int idx, uint64_t val);
-#endif
-
-#if AMU_GROUP1_NR_COUNTERS
-bool amu_group1_supported(void);
-
-/* Group 1 configuration helpers */
-uint64_t amu_group1_cnt_read(unsigned int idx);
-void amu_group1_cnt_write(unsigned int idx, uint64_t val);
-void amu_group1_set_evtype(unsigned int idx, unsigned int val);
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+/*
+ * AMU data for a single core.
+ */
+struct amu_core {
+	uint16_t enable; /* Mask of auxiliary counters to enable */
+};
 
-#if __aarch64__
-uint64_t amu_group1_voffset_read(unsigned int idx);
-void amu_group1_voffset_write(unsigned int idx, uint64_t val);
-#endif
+/*
+ * Topological platform data specific to the AMU.
+ */
+struct amu_topology {
+	struct amu_core cores[PLATFORM_CORE_COUNT]; /* Per-core data */
+};
 
-#endif
+#if !ENABLE_AMU_FCONF
+/*
+ * Retrieve the platform's AMU topology. A `NULL` return value is treated as a
+ * non-fatal error, in which case no auxiliary counters will be enabled.
+ */
+const struct amu_topology *plat_amu_topology(void);
+#endif /* ENABLE_AMU_FCONF */
+#endif /* ENABLE_AMU_AUXILIARY_COUNTERS */
 
 #endif /* AMU_H */
diff --git a/include/lib/fconf/fconf_amu_getter.h b/include/lib/fconf/fconf_amu_getter.h
new file mode 100644
index 0000000..2faee73
--- /dev/null
+++ b/include/lib/fconf/fconf_amu_getter.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FCONF_AMU_GETTER_H
+#define FCONF_AMU_GETTER_H
+
+#include <lib/extensions/amu.h>
+
+#define amu__config_getter(id)	fconf_amu_config.id
+
+struct fconf_amu_config {
+	const struct amu_topology *topology;
+};
+
+extern struct fconf_amu_config fconf_amu_config;
+
+#endif /* FCONF_AMU_GETTER_H */
diff --git a/include/lib/fconf/fconf_mpmm_getter.h b/include/lib/fconf/fconf_mpmm_getter.h
new file mode 100644
index 0000000..50d991a
--- /dev/null
+++ b/include/lib/fconf/fconf_mpmm_getter.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FCONF_MPMM_GETTER_H
+#define FCONF_MPMM_GETTER_H
+
+#include <lib/mpmm/mpmm.h>
+
+#define mpmm__config_getter(id)	fconf_mpmm_config.id
+
+struct fconf_mpmm_config {
+	const struct mpmm_topology *topology;
+};
+
+extern struct fconf_mpmm_config fconf_mpmm_config;
+
+#endif /* FCONF_MPMM_GETTER_H */
diff --git a/include/lib/mpmm/mpmm.h b/include/lib/mpmm/mpmm.h
new file mode 100644
index 0000000..955c530
--- /dev/null
+++ b/include/lib/mpmm/mpmm.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MPMM_H
+#define MPMM_H
+
+#include <stdbool.h>
+
+#include <platform_def.h>
+
+/*
+ * Enable the Maximum Power Mitigation Mechanism.
+ *
+ * This function will enable MPMM for the current core. The AMU counters
+ * representing the MPMM gears must have been configured and enabled prior to
+ * calling this function.
+ */
+void mpmm_enable(void);
+
+/*
+ * MPMM core data.
+ *
+ * This structure represents per-core data retrieved from the hardware
+ * configuration device tree.
+ */
+struct mpmm_core {
+	/*
+	 * Whether MPMM is supported.
+	 *
+	 * Cores with support for MPMM offer one or more auxiliary AMU counters
+	 * representing MPMM gears.
+	 */
+	bool supported;
+};
+
+/*
+ * MPMM topology.
+ *
+ * This topology structure describes the system-wide representation of the
+ * information retrieved from the hardware configuration device tree.
+ */
+struct mpmm_topology {
+	struct mpmm_core cores[PLATFORM_CORE_COUNT]; /* Per-core data */
+};
+
+#if !ENABLE_MPMM_FCONF
+/*
+ * Retrieve the platform's MPMM topology. A `NULL` return value is treated as a
+ * non-fatal error, in which case MPMM will not be enabled for any core.
+ */
+const struct mpmm_topology *plat_mpmm_topology(void);
+#endif /* ENABLE_MPMM_FCONF */
+
+#endif /* MPMM_H */
diff --git a/lib/extensions/amu/aarch32/amu.c b/lib/extensions/amu/aarch32/amu.c
index ed56ddd..57b1158 100644
--- a/lib/extensions/amu/aarch32/amu.c
+++ b/lib/extensions/amu/aarch32/amu.c
@@ -5,95 +5,224 @@
  */
 
 #include <assert.h>
+#include <cdefs.h>
 #include <stdbool.h>
 
+#include "../amu_private.h"
 #include <arch.h>
 #include <arch_helpers.h>
-
+#include <common/debug.h>
 #include <lib/el3_runtime/pubsub_events.h>
 #include <lib/extensions/amu.h>
-#include <lib/extensions/amu_private.h>
 
 #include <plat/common/platform.h>
 
-static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
+struct amu_ctx {
+	uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS];
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS];
+#endif
 
-/*
- * Get AMU version value from pfr0.
- * Return values
- *   ID_PFR0_AMU_V1: FEAT_AMUv1 supported (introduced in ARM v8.4)
- *   ID_PFR0_AMU_V1P1: FEAT_AMUv1p1 supported (introduced in ARM v8.6)
- *   ID_PFR0_AMU_NOT_SUPPORTED: not supported
- */
-unsigned int amu_get_version(void)
+	uint16_t group0_enable;
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	uint16_t group1_enable;
+#endif
+};
+
+static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT];
+
+CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS,
+	amu_ctx_group0_enable_cannot_represent_all_group0_counters);
+
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS,
+	amu_ctx_group1_enable_cannot_represent_all_group1_counters);
+#endif
+
+static inline __unused uint32_t read_id_pfr0_amu(void)
 {
-	return (unsigned int)(read_id_pfr0() >> ID_PFR0_AMU_SHIFT) &
+	return (read_id_pfr0() >> ID_PFR0_AMU_SHIFT) &
 		ID_PFR0_AMU_MASK;
 }
 
+static inline __unused void write_hcptr_tam(uint32_t value)
+{
+	write_hcptr((read_hcptr() & ~TAM_BIT) |
+		((value << TAM_SHIFT) & TAM_BIT));
+}
+
+static inline __unused void write_amcr_cg1rz(uint32_t value)
+{
+	write_amcr((read_amcr() & ~AMCR_CG1RZ_BIT) |
+		((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
+}
+
+static inline __unused uint32_t read_amcfgr_ncg(void)
+{
+	return (read_amcfgr() >> AMCFGR_NCG_SHIFT) &
+		AMCFGR_NCG_MASK;
+}
+
+static inline __unused uint32_t read_amcgcr_cg0nc(void)
+{
+	return (read_amcgcr() >> AMCGCR_CG0NC_SHIFT) &
+		AMCGCR_CG0NC_MASK;
+}
+
+static inline __unused uint32_t read_amcgcr_cg1nc(void)
+{
+	return (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
+		AMCGCR_CG1NC_MASK;
+}
+
+static inline __unused uint32_t read_amcntenset0_px(void)
+{
+	return (read_amcntenset0() >> AMCNTENSET0_Pn_SHIFT) &
+		AMCNTENSET0_Pn_MASK;
+}
+
+static inline __unused uint32_t read_amcntenset1_px(void)
+{
+	return (read_amcntenset1() >> AMCNTENSET1_Pn_SHIFT) &
+		AMCNTENSET1_Pn_MASK;
+}
+
+static inline __unused void write_amcntenset0_px(uint32_t px)
+{
+	uint32_t value = read_amcntenset0();
+
+	value &= ~AMCNTENSET0_Pn_MASK;
+	value |= (px << AMCNTENSET0_Pn_SHIFT) &
+		AMCNTENSET0_Pn_MASK;
+
+	write_amcntenset0(value);
+}
+
+static inline __unused void write_amcntenset1_px(uint32_t px)
+{
+	uint32_t value = read_amcntenset1();
+
+	value &= ~AMCNTENSET1_Pn_MASK;
+	value |= (px << AMCNTENSET1_Pn_SHIFT) &
+		AMCNTENSET1_Pn_MASK;
+
+	write_amcntenset1(value);
+}
+
+static inline __unused void write_amcntenclr0_px(uint32_t px)
+{
+	uint32_t value = read_amcntenclr0();
+
+	value &= ~AMCNTENCLR0_Pn_MASK;
+	value |= (px << AMCNTENCLR0_Pn_SHIFT) & AMCNTENCLR0_Pn_MASK;
+
+	write_amcntenclr0(value);
+}
+
+static inline __unused void write_amcntenclr1_px(uint32_t px)
+{
+	uint32_t value = read_amcntenclr1();
+
+	value &= ~AMCNTENCLR1_Pn_MASK;
+	value |= (px << AMCNTENCLR1_Pn_SHIFT) & AMCNTENCLR1_Pn_MASK;
+
+	write_amcntenclr1(value);
+}
+
-#if AMU_GROUP1_NR_COUNTERS
-/* Check if group 1 counters is implemented */
-bool amu_group1_supported(void)
+static __unused bool amu_supported(void)
 {
-	uint32_t features = read_amcfgr() >> AMCFGR_NCG_SHIFT;
+	return read_id_pfr0_amu() >= ID_PFR0_AMU_V1;
+}
 
-	return (features & AMCFGR_NCG_MASK) == 1U;
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+static __unused bool amu_group1_supported(void)
+{
+	return read_amcfgr_ncg() > 0U;
 }
 #endif
 
 /*
- * Enable counters. This function is meant to be invoked
- * by the context management library before exiting from EL3.
+ * Enable counters. This function is meant to be invoked by the context
+ * management library before exiting from EL3.
  */
 void amu_enable(bool el2_unused)
 {
-	if (amu_get_version() == ID_PFR0_AMU_NOT_SUPPORTED) {
-		return;
-	}
+	uint32_t id_pfr0_amu;		/* AMU version */
 
-#if AMU_GROUP1_NR_COUNTERS
-	/* Check and set presence of group 1 counters */
-	if (!amu_group1_supported()) {
-		ERROR("AMU Counter Group 1 is not implemented\n");
-		panic();
-	}
+	uint32_t amcfgr_ncg;		/* Number of counter groups */
+	uint32_t amcgcr_cg0nc;		/* Number of group 0 counters */
 
-	/* Check number of group 1 counters */
-	uint32_t cnt_num = (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
-				AMCGCR_CG1NC_MASK;
-	VERBOSE("%s%u. %s%u\n",
-		"Number of AMU Group 1 Counters ", cnt_num,
-		"Requested number ", AMU_GROUP1_NR_COUNTERS);
+	uint32_t amcntenset0_px = 0x0;	/* Group 0 enable mask */
+	uint32_t amcntenset1_px = 0x0;	/* Group 1 enable mask */
 
-	if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
-		ERROR("%s%u is less than %s%u\n",
-		"Number of AMU Group 1 Counters ", cnt_num,
-		"Requested number ", AMU_GROUP1_NR_COUNTERS);
-		panic();
+	id_pfr0_amu = read_id_pfr0_amu();
+	if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
+		/*
+		 * If the AMU is unsupported, nothing needs to be done.
+		 */
+
+		return;
 	}
-#endif
 
 	if (el2_unused) {
-		uint64_t v;
 		/*
-		 * Non-secure access from EL0 or EL1 to the Activity Monitor
-		 * registers do not trap to EL2.
+		 * HCPTR.TAM: Set to zero so any accesses to the Activity
+		 * Monitor registers do not trap to EL2.
 		 */
-		v = read_hcptr();
-		v &= ~TAM_BIT;
-		write_hcptr(v);
+		write_hcptr_tam(0U);
 	}
 
+	/*
+	 * Retrieve the number of architected counters. All of these counters
+	 * are enabled by default.
+	 */
+
-	/* Enable group 0 counters */
-	write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
+	amcgcr_cg0nc = read_amcgcr_cg0nc();
+	amcntenset0_px = (UINT32_C(1) << (amcgcr_cg0nc)) - 1U;
 
-#if AMU_GROUP1_NR_COUNTERS
-	/* Enable group 1 counters */
-	write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
+	assert(amcgcr_cg0nc <= AMU_AMCGCR_CG0NC_MAX);
+
+	/*
+	 * The platform may opt to enable specific auxiliary counters. This can
+	 * be done via the common FCONF getter, or via the platform-implemented
+	 * function.
+	 */
+
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	const struct amu_topology *topology;
+
+#if ENABLE_AMU_FCONF
+	topology = FCONF_GET_PROPERTY(amu, config, topology);
+#else
+	topology = plat_amu_topology();
+#endif /* ENABLE_AMU_FCONF */
+
+	if (topology != NULL) {
+		unsigned int core_pos = plat_my_core_pos();
+
+		amcntenset1_el0_px = topology->cores[core_pos].enable;
+	} else {
+		ERROR("AMU: failed to generate AMU topology\n");
+	}
+#endif /* ENABLE_AMU_AUXILIARY_COUNTERS */
+
+	/*
+	 * Enable the requested counters.
+	 */
+
+	write_amcntenset0_px(amcntenset0_px);
+
+	amcfgr_ncg = read_amcfgr_ncg();
+	if (amcfgr_ncg > 0U) {
+		write_amcntenset1_px(amcntenset1_px);
+
+#if !ENABLE_AMU_AUXILIARY_COUNTERS
+		VERBOSE("AMU: auxiliary counters detected but support is disabled\n");
 #endif
+	}
 
 	/* Initialize FEAT_AMUv1p1 features if present. */
-	if (amu_get_version() < ID_PFR0_AMU_V1P1) {
+	if (id_pfr0_amu < ID_PFR0_AMU_V1P1) {
 		return;
 	}
 
@@ -106,154 +235,183 @@
 	 * mapped view are unaffected.
 	 */
 	VERBOSE("AMU group 1 counter access restricted.\n");
-	write_amcr(read_amcr() | AMCR_CG1RZ_BIT);
+	write_amcr_cg1rz(1U);
 #else
-	write_amcr(read_amcr() & ~AMCR_CG1RZ_BIT);
+	write_amcr_cg1rz(0U);
 #endif
 }
 
 /* Read the group 0 counter identified by the given `idx`. */
-uint64_t amu_group0_cnt_read(unsigned int idx)
+static uint64_t amu_group0_cnt_read(unsigned int idx)
 {
-	assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED);
-	assert(idx < AMU_GROUP0_NR_COUNTERS);
+	assert(amu_supported());
+	assert(idx < read_amcgcr_cg0nc());
 
 	return amu_group0_cnt_read_internal(idx);
 }
 
 /* Write the group 0 counter identified by the given `idx` with `val` */
-void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
+static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
 {
-	assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED);
-	assert(idx < AMU_GROUP0_NR_COUNTERS);
+	assert(amu_supported());
+	assert(idx < read_amcgcr_cg0nc());
 
 	amu_group0_cnt_write_internal(idx, val);
 	isb();
 }
 
-#if AMU_GROUP1_NR_COUNTERS
+#if ENABLE_AMU_AUXILIARY_COUNTERS
 /* Read the group 1 counter identified by the given `idx` */
-uint64_t amu_group1_cnt_read(unsigned  int idx)
+static uint64_t amu_group1_cnt_read(unsigned  int idx)
 {
-	assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED);
+	assert(amu_supported());
 	assert(amu_group1_supported());
-	assert(idx < AMU_GROUP1_NR_COUNTERS);
+	assert(idx < read_amcgcr_cg1nc());
 
 	return amu_group1_cnt_read_internal(idx);
 }
 
 /* Write the group 1 counter identified by the given `idx` with `val` */
-void amu_group1_cnt_write(unsigned  int idx, uint64_t val)
+static void amu_group1_cnt_write(unsigned  int idx, uint64_t val)
 {
-	assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED);
+	assert(amu_supported());
 	assert(amu_group1_supported());
-	assert(idx < AMU_GROUP1_NR_COUNTERS);
+	assert(idx < read_amcgcr_cg1nc());
 
 	amu_group1_cnt_write_internal(idx, val);
 	isb();
 }
+#endif
 
-/*
- * Program the event type register for the given `idx` with
- * the event number `val`
- */
-void amu_group1_set_evtype(unsigned int idx, unsigned int val)
+static void *amu_context_save(const void *arg)
 {
-	assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED);
-	assert(amu_group1_supported());
-	assert(idx < AMU_GROUP1_NR_COUNTERS);
+	uint32_t i;
 
-	amu_group1_set_evtype_internal(idx, val);
-	isb();
-}
-#endif	/* AMU_GROUP1_NR_COUNTERS */
+	unsigned int core_pos;
+	struct amu_ctx *ctx;
 
-static void *amu_context_save(const void *arg)
-{
-	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
-	unsigned int i;
+	uint32_t id_pfr0_amu;	/* AMU version */
+	uint32_t amcgcr_cg0nc;	/* Number of group 0 counters */
 
-	if (amu_get_version() == ID_PFR0_AMU_NOT_SUPPORTED) {
-		return (void *)-1;
-	}
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	uint32_t amcfgr_ncg;	/* Number of counter groups */
+	uint32_t amcgcr_cg1nc;	/* Number of group 1 counters */
+#endif
 
-#if AMU_GROUP1_NR_COUNTERS
-	if (!amu_group1_supported()) {
-		return (void *)-1;
+	id_pfr0_amu = read_id_pfr0_amu();
+	if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
+		return (void *)0;
 	}
-#endif
-	/* Assert that group 0/1 counter configuration is what we expect */
-	assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
 
-#if AMU_GROUP1_NR_COUNTERS
-	assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
+	core_pos = plat_my_core_pos();
+	ctx = &amu_ctxs_[core_pos];
+
+	amcgcr_cg0nc = read_amcgcr_cg0nc();
+
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	amcfgr_ncg = read_amcfgr_ncg();
+	amcgcr_cg1nc = (amcfgr_ncg > 0U) ? read_amcgcr_cg1nc() : 0U;
 #endif
+
 	/*
-	 * Disable group 0/1 counters to avoid other observers like SCP sampling
-	 * counter values from the future via the memory mapped view.
+	 * Disable all AMU counters.
 	 */
-	write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
 
-#if AMU_GROUP1_NR_COUNTERS
-	write_amcntenclr1(AMU_GROUP1_COUNTERS_MASK);
+	ctx->group0_enable = read_amcntenset0_px();
+	write_amcntenclr0_px(ctx->group0_enable);
+
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	if (amcfgr_ncg > 0U) {
+		ctx->group1_enable = read_amcntenset1_px();
+		write_amcntenclr1_px(ctx->group1_enable);
+	}
 #endif
-	isb();
+
+	/*
+	 * Save the counters to the local context.
+	 */
+
+	isb(); /* Ensure counters have been stopped */
 
-	/* Save all group 0 counters */
-	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
+	for (i = 0U; i < amcgcr_cg0nc; i++) {
 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
 	}
 
-#if AMU_GROUP1_NR_COUNTERS
-	/* Save group 1 counters */
-	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
-		if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
-			ctx->group1_cnts[i] = amu_group1_cnt_read(i);
-		}
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	for (i = 0U; i < amcgcr_cg1nc; i++) {
+		ctx->group1_cnts[i] = amu_group1_cnt_read(i);
 	}
 #endif
+
 	return (void *)0;
 }
 
 static void *amu_context_restore(const void *arg)
 {
-	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
-	unsigned int i;
+	uint32_t i;
 
-	if (amu_get_version() == ID_PFR0_AMU_NOT_SUPPORTED) {
-		return (void *)-1;
-	}
+	unsigned int core_pos;
+	struct amu_ctx *ctx;
 
-#if AMU_GROUP1_NR_COUNTERS
-	if (!amu_group1_supported()) {
-		return (void *)-1;
-	}
+	uint32_t id_pfr0_amu;	/* AMU version */
+
+	uint32_t amcfgr_ncg;	/* Number of counter groups */
+	uint32_t amcgcr_cg0nc;	/* Number of group 0 counters */
+
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	uint32_t amcgcr_cg1nc;	/* Number of group 1 counters */
 #endif
-	/* Counters were disabled in `amu_context_save()` */
-	assert(read_amcntenset0_el0() == 0U);
+
+	id_pfr0_amu = read_id_pfr0_amu();
+	if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
+		return (void *)0;
+	}
+
+	core_pos = plat_my_core_pos();
+	ctx = &amu_ctxs_[core_pos];
 
-#if AMU_GROUP1_NR_COUNTERS
-	assert(read_amcntenset1_el0() == 0U);
+	amcfgr_ncg = read_amcfgr_ncg();
+	amcgcr_cg0nc = read_amcgcr_cg0nc();
+
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	amcgcr_cg1nc = (amcfgr_ncg > 0U) ? read_amcgcr_cg1nc() : 0U;
 #endif
 
-	/* Restore all group 0 counters */
-	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
-		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
+	/*
+	 * Sanity check that all counters were disabled when the context was
+	 * previously saved.
+	 */
+
+	assert(read_amcntenset0_px() == 0U);
+
+	if (amcfgr_ncg > 0U) {
+		assert(read_amcntenset1_px() == 0U);
 	}
 
-	/* Restore group 0 counter configuration */
-	write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
+	/*
+	 * Restore the counter values from the local context.
+	 */
 
-#if AMU_GROUP1_NR_COUNTERS
-	/* Restore group 1 counters */
-	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
-		if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
-			amu_group1_cnt_write(i, ctx->group1_cnts[i]);
-		}
+	for (i = 0U; i < amcgcr_cg0nc; i++) {
+		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
 	}
 
-	/* Restore group 1 counter configuration */
-	write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	for (i = 0U; i < amcgcr_cg1nc; i++) {
+		amu_group1_cnt_write(i, ctx->group1_cnts[i]);
+	}
+#endif
+
+	/*
+	 * Re-enable counters that were disabled during context save.
+	 */
+
+	write_amcntenset0_px(ctx->group0_enable);
+
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	if (amcfgr_ncg > 0U) {
+		write_amcntenset1_px(ctx->group1_enable);
+	}
 #endif
 
 	return (void *)0;
diff --git a/lib/extensions/amu/aarch32/amu_helpers.S b/lib/extensions/amu/aarch32/amu_helpers.S
index d387341..8ac7678 100644
--- a/lib/extensions/amu/aarch32/amu_helpers.S
+++ b/lib/extensions/amu/aarch32/amu_helpers.S
@@ -84,6 +84,7 @@
 	bx		lr
 endfunc amu_group0_cnt_write_internal
 
+#if ENABLE_AMU_AUXILIARY_COUNTERS
 /*
  * uint64_t amu_group1_cnt_read_internal(int idx);
  *
@@ -267,3 +268,4 @@
 	stcopr	r1, AMEVTYPER1F /* index 15 */
 	bx	lr
 endfunc amu_group1_set_evtype_internal
+#endif
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
index 295c0d5..35efd21 100644
--- a/lib/extensions/amu/aarch64/amu.c
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -5,86 +5,218 @@
  */
 
 #include <assert.h>
+#include <cdefs.h>
 #include <stdbool.h>
 
+#include "../amu_private.h"
 #include <arch.h>
 #include <arch_features.h>
 #include <arch_helpers.h>
-
+#include <common/debug.h>
 #include <lib/el3_runtime/pubsub_events.h>
 #include <lib/extensions/amu.h>
-#include <lib/extensions/amu_private.h>
 
 #include <plat/common/platform.h>
 
-static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
+#if ENABLE_AMU_FCONF
+#	include <lib/fconf/fconf.h>
+#	include <lib/fconf/fconf_amu_getter.h>
+#endif
 
-/*
- * Get AMU version value from aa64pfr0.
- * Return values
- *   ID_AA64PFR0_AMU_V1: FEAT_AMUv1 supported (introduced in ARM v8.4)
- *   ID_AA64PFR0_AMU_V1P1: FEAT_AMUv1p1 supported (introduced in ARM v8.6)
- *   ID_AA64PFR0_AMU_NOT_SUPPORTED: not supported
- */
-unsigned int amu_get_version(void)
+#if ENABLE_MPMM
+#	include <lib/mpmm/mpmm.h>
+#endif
+
+struct amu_ctx {
+	uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS];
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS];
+#endif
+
+	/* Architected event counter 1 does not have an offset register */
+	uint64_t group0_voffsets[AMU_GROUP0_MAX_COUNTERS - 1U];
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	uint64_t group1_voffsets[AMU_GROUP1_MAX_COUNTERS];
+#endif
+
+	uint16_t group0_enable;
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	uint16_t group1_enable;
+#endif
+};
+
+static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT];
+
+CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS,
+	amu_ctx_group0_enable_cannot_represent_all_group0_counters);
+
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS,
+	amu_ctx_group1_enable_cannot_represent_all_group1_counters);
+#endif
+
+static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void)
 {
-	return (unsigned int)(read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
+	return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
 		ID_AA64PFR0_AMU_MASK;
 }
 
+static inline __unused uint64_t read_hcr_el2_amvoffen(void)
+{
+	return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
+		HCR_AMVOFFEN_SHIFT;
+}
+
-#if AMU_GROUP1_NR_COUNTERS
-/* Check if group 1 counters is implemented */
-bool amu_group1_supported(void)
+static inline __unused void write_cptr_el2_tam(uint64_t value)
 {
-	uint64_t features = read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT;
+	write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) |
+		((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
+}
+
+static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
+{
+	uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
 
-	return (features & AMCFGR_EL0_NCG_MASK) == 1U;
+	value &= ~TAM_BIT;
+	value |= (tam << TAM_SHIFT) & TAM_BIT;
+
+	write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
+}
+
+static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
+{
+	write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
+		((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT));
+}
+
+static inline __unused void write_amcr_el0_cg1rz(uint64_t value)
+{
+	write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) |
+		((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
+}
+
+static inline __unused uint64_t read_amcfgr_el0_ncg(void)
+{
+	return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) &
+		AMCFGR_EL0_NCG_MASK;
+}
+
+static inline __unused uint64_t read_amcgcr_el0_cg0nc(void)
+{
+	return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) &
+		AMCGCR_EL0_CG0NC_MASK;
+}
+
+static inline __unused uint64_t read_amcg1idr_el0_voff(void)
+{
+	return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
+		AMCG1IDR_VOFF_MASK;
+}
+
+static inline __unused uint64_t read_amcgcr_el0_cg1nc(void)
+{
+	return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
+		AMCGCR_EL0_CG1NC_MASK;
+}
+
+static inline __unused uint64_t read_amcntenset0_el0_px(void)
+{
+	return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) &
+		AMCNTENSET0_EL0_Pn_MASK;
+}
+
+static inline __unused uint64_t read_amcntenset1_el0_px(void)
+{
+	return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) &
+		AMCNTENSET1_EL0_Pn_MASK;
+}
+
+static inline __unused void write_amcntenset0_el0_px(uint64_t px)
+{
+	uint64_t value = read_amcntenset0_el0();
+
+	value &= ~AMCNTENSET0_EL0_Pn_MASK;
+	value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK;
+
+	write_amcntenset0_el0(value);
+}
+
+static inline __unused void write_amcntenset1_el0_px(uint64_t px)
+{
+	uint64_t value = read_amcntenset1_el0();
+
+	value &= ~AMCNTENSET1_EL0_Pn_MASK;
+	value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK;
+
+	write_amcntenset1_el0(value);
+}
+
+static inline __unused void write_amcntenclr0_el0_px(uint64_t px)
+{
+	uint64_t value = read_amcntenclr0_el0();
+
+	value &= ~AMCNTENCLR0_EL0_Pn_MASK;
+	value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK;
+
+	write_amcntenclr0_el0(value);
+}
+
+static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
+{
+	uint64_t value = read_amcntenclr1_el0();
+
+	value &= ~AMCNTENCLR1_EL0_Pn_MASK;
+	value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK;
+
+	write_amcntenclr1_el0(value);
+}
+
+static __unused bool amu_supported(void)
+{
+	return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1;
+}
+
+static __unused bool amu_v1p1_supported(void)
+{
+	return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1;
+}
+
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+static __unused bool amu_group1_supported(void)
+{
+	return read_amcfgr_el0_ncg() > 0U;
 }
 #endif
 
 /*
- * Enable counters. This function is meant to be invoked
- * by the context management library before exiting from EL3.
+ * Enable counters. This function is meant to be invoked by the context
+ * management library before exiting from EL3.
  */
 void amu_enable(bool el2_unused, cpu_context_t *ctx)
 {
-	uint64_t v;
-	unsigned int amu_version = amu_get_version();
+	uint64_t id_aa64pfr0_el1_amu;		/* AMU version */
 
-	if (amu_version == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
-		return;
-	}
+	uint64_t amcfgr_el0_ncg;		/* Number of counter groups */
+	uint64_t amcgcr_el0_cg0nc;		/* Number of group 0 counters */
 
-#if AMU_GROUP1_NR_COUNTERS
-	/* Check and set presence of group 1 counters */
-	if (!amu_group1_supported()) {
-		ERROR("AMU Counter Group 1 is not implemented\n");
-		panic();
-	}
+	uint64_t amcntenset0_el0_px = 0x0;	/* Group 0 enable mask */
+	uint64_t amcntenset1_el0_px = 0x0;	/* Group 1 enable mask */
 
-	/* Check number of group 1 counters */
-	uint64_t cnt_num = (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
-				AMCGCR_EL0_CG1NC_MASK;
-	VERBOSE("%s%llu. %s%u\n",
-		"Number of AMU Group 1 Counters ", cnt_num,
-		"Requested number ", AMU_GROUP1_NR_COUNTERS);
+	id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
+	if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
+		/*
+		 * If the AMU is unsupported, nothing needs to be done.
+		 */
 
-	if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
-		ERROR("%s%llu is less than %s%u\n",
-		"Number of AMU Group 1 Counters ", cnt_num,
-		"Requested number ", AMU_GROUP1_NR_COUNTERS);
-		panic();
+		return;
 	}
-#endif
 
 	if (el2_unused) {
 		/*
-		 * CPTR_EL2.TAM: Set to zero so any accesses to
-		 * the Activity Monitor registers do not trap to EL2.
+		 * CPTR_EL2.TAM: Set to zero so any accesses to the Activity
+		 * Monitor registers do not trap to EL2.
 		 */
-		v = read_cptr_el2();
-		v &= ~CPTR_EL2_TAM_BIT;
-		write_cptr_el2(v);
+		write_cptr_el2_tam(0U);
 	}
 
 	/*
@@ -92,72 +224,141 @@
 	 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
 	 * the Activity Monitor registers do not trap to EL3.
 	 */
-	v = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
-	v &= ~TAM_BIT;
-	write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, v);
+	write_cptr_el3_tam(ctx, 0U);
 
-	/* Enable group 0 counters */
-	write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+	/*
+	 * Retrieve the number of architected counters. All of these counters
+	 * are enabled by default.
+	 */
 
-#if AMU_GROUP1_NR_COUNTERS
-	/* Enable group 1 counters */
-	write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
-#endif
+	amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
+	amcntenset0_el0_px = (UINT64_C(1) << (amcgcr_el0_cg0nc)) - 1U;
 
-	/* Initialize FEAT_AMUv1p1 features if present. */
-	if (amu_version < ID_AA64PFR0_AMU_V1P1) {
-		return;
-	}
+	assert(amcgcr_el0_cg0nc <= AMU_AMCGCR_CG0NC_MAX);
 
-	if (el2_unused) {
-		/* Make sure virtual offsets are disabled if EL2 not used. */
-		write_hcr_el2(read_hcr_el2() & ~HCR_AMVOFFEN_BIT);
+	/*
+	 * The platform may opt to enable specific auxiliary counters. This can
+	 * be done via the common FCONF getter, or via the platform-implemented
+	 * function.
+	 */
+
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	const struct amu_topology *topology;
+
+#if ENABLE_AMU_FCONF
+	topology = FCONF_GET_PROPERTY(amu, config, topology);
+#else
+	topology = plat_amu_topology();
+#endif /* ENABLE_AMU_FCONF */
+
+	if (topology != NULL) {
+		unsigned int core_pos = plat_my_core_pos();
+
+		amcntenset1_el0_px = topology->cores[core_pos].enable;
+	} else {
+		ERROR("AMU: failed to generate AMU topology\n");
 	}
+#endif /* ENABLE_AMU_AUXILIARY_COUNTERS */
 
-#if AMU_RESTRICT_COUNTERS
 	/*
-	 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
-	 * counters at all but the highest implemented EL.  This is controlled
-	 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
-	 * register reads at lower ELs return zero.  Reads from the memory
-	 * mapped view are unaffected.
+	 * Enable the requested counters.
 	 */
-	VERBOSE("AMU group 1 counter access restricted.\n");
-	write_amcr_el0(read_amcr_el0() | AMCR_CG1RZ_BIT);
+
+	write_amcntenset0_el0_px(amcntenset0_el0_px);
+
+	amcfgr_el0_ncg = read_amcfgr_el0_ncg();
+	if (amcfgr_el0_ncg > 0U) {
+		write_amcntenset1_el0_px(amcntenset1_el0_px);
+
+#if !ENABLE_AMU_AUXILIARY_COUNTERS
+		VERBOSE("AMU: auxiliary counters detected but support is disabled\n");
+#endif
+	}
+
+	/* Initialize FEAT_AMUv1p1 features if present. */
+	if (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) {
+		if (el2_unused) {
+			/*
+			 * Make sure virtual offsets are disabled if EL2 not
+			 * used.
+			 */
+			write_hcr_el2_amvoffen(0U);
+		}
+
+#if AMU_RESTRICT_COUNTERS
+		/*
+		 * FEAT_AMUv1p1 adds a register field to restrict access to
+		 * group 1 counters at all but the highest implemented EL. This
+		 * is controlled with the `AMU_RESTRICT_COUNTERS` compile time
+		 * flag, when set, system register reads at lower ELs return
+		 * zero. Reads from the memory mapped view are unaffected.
+		 */
+		VERBOSE("AMU group 1 counter access restricted.\n");
+		write_amcr_el0_cg1rz(1U);
 #else
-	write_amcr_el0(read_amcr_el0() & ~AMCR_CG1RZ_BIT);
+		write_amcr_el0_cg1rz(0U);
+#endif
+	}
+
+#if ENABLE_MPMM
+	mpmm_enable();
 #endif
 }
 
 /* Read the group 0 counter identified by the given `idx`. */
-uint64_t amu_group0_cnt_read(unsigned int idx)
+static uint64_t amu_group0_cnt_read(unsigned int idx)
 {
-	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
-	assert(idx < AMU_GROUP0_NR_COUNTERS);
+	assert(amu_supported());
+	assert(idx < read_amcgcr_el0_cg0nc());
 
 	return amu_group0_cnt_read_internal(idx);
 }
 
 /* Write the group 0 counter identified by the given `idx` with `val` */
-void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
+static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
 {
-	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
-	assert(idx < AMU_GROUP0_NR_COUNTERS);
+	assert(amu_supported());
+	assert(idx < read_amcgcr_el0_cg0nc());
 
 	amu_group0_cnt_write_internal(idx, val);
 	isb();
 }
 
 /*
+ * Unlike with auxiliary counters, we cannot detect at runtime whether an
+ * architected counter supports a virtual offset. These are instead fixed
+ * according to FEAT_AMUv1p1, but this switch will need to be updated if later
+ * revisions of FEAT_AMU add additional architected counters.
+ */
+static bool amu_group0_voffset_supported(uint64_t idx)
+{
+	switch (idx) {
+	case 0U:
+	case 2U:
+	case 3U:
+		return true;
+
+	case 1U:
+		return false;
+
+	default:
+		ERROR("AMU: can't set up virtual offset for unknown "
+		      "architected counter %llu!\n", idx);
+
+		panic();
+	}
+}
+
+/*
  * Read the group 0 offset register for a given index. Index must be 0, 2,
  * or 3, the register for 1 does not exist.
  *
  * Using this function requires FEAT_AMUv1p1 support.
  */
-uint64_t amu_group0_voffset_read(unsigned int idx)
+static uint64_t amu_group0_voffset_read(unsigned int idx)
 {
-	assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
-	assert(idx < AMU_GROUP0_NR_COUNTERS);
+	assert(amu_v1p1_supported());
+	assert(idx < read_amcgcr_el0_cg0nc());
 	assert(idx != 1U);
 
 	return amu_group0_voffset_read_internal(idx);
@@ -169,33 +370,33 @@
  *
  * Using this function requires FEAT_AMUv1p1 support.
  */
-void amu_group0_voffset_write(unsigned int idx, uint64_t val)
+static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
 {
-	assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
-	assert(idx < AMU_GROUP0_NR_COUNTERS);
+	assert(amu_v1p1_supported());
+	assert(idx < read_amcgcr_el0_cg0nc());
 	assert(idx != 1U);
 
 	amu_group0_voffset_write_internal(idx, val);
 	isb();
 }
 
-#if AMU_GROUP1_NR_COUNTERS
+#if ENABLE_AMU_AUXILIARY_COUNTERS
 /* Read the group 1 counter identified by the given `idx` */
-uint64_t amu_group1_cnt_read(unsigned int idx)
+static uint64_t amu_group1_cnt_read(unsigned int idx)
 {
-	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
+	assert(amu_supported());
 	assert(amu_group1_supported());
-	assert(idx < AMU_GROUP1_NR_COUNTERS);
+	assert(idx < read_amcgcr_el0_cg1nc());
 
 	return amu_group1_cnt_read_internal(idx);
 }
 
 /* Write the group 1 counter identified by the given `idx` with `val` */
-void amu_group1_cnt_write(unsigned int idx, uint64_t val)
+static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
 {
-	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
+	assert(amu_supported());
 	assert(amu_group1_supported());
-	assert(idx < AMU_GROUP1_NR_COUNTERS);
+	assert(idx < read_amcgcr_el0_cg1nc());
 
 	amu_group1_cnt_write_internal(idx, val);
 	isb();
@@ -206,13 +407,12 @@
  *
  * Using this function requires FEAT_AMUv1p1 support.
  */
-uint64_t amu_group1_voffset_read(unsigned int idx)
+static uint64_t amu_group1_voffset_read(unsigned int idx)
 {
-	assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
+	assert(amu_v1p1_supported());
 	assert(amu_group1_supported());
-	assert(idx < AMU_GROUP1_NR_COUNTERS);
-	assert(((read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
-		(1ULL << idx)) != 0ULL);
+	assert(idx < read_amcgcr_el0_cg1nc());
+	assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
 
 	return amu_group1_voffset_read_internal(idx);
 }
@@ -222,167 +422,211 @@
  *
  * Using this function requires FEAT_AMUv1p1 support.
  */
-void amu_group1_voffset_write(unsigned int idx, uint64_t val)
+static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
 {
-	assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
+	assert(amu_v1p1_supported());
 	assert(amu_group1_supported());
-	assert(idx < AMU_GROUP1_NR_COUNTERS);
-	assert(((read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
-		(1ULL << idx)) != 0ULL);
+	assert(idx < read_amcgcr_el0_cg1nc());
+	assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
 
 	amu_group1_voffset_write_internal(idx, val);
 	isb();
 }
+#endif
 
-/*
- * Program the event type register for the given `idx` with
- * the event number `val`
- */
-void amu_group1_set_evtype(unsigned int idx, unsigned int val)
+static void *amu_context_save(const void *arg)
 {
-	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
-	assert(amu_group1_supported());
-	assert(idx < AMU_GROUP1_NR_COUNTERS);
+	uint64_t i, j;
 
-	amu_group1_set_evtype_internal(idx, val);
-	isb();
-}
-#endif	/* AMU_GROUP1_NR_COUNTERS */
+	unsigned int core_pos;
+	struct amu_ctx *ctx;
 
-static void *amu_context_save(const void *arg)
-{
-	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
-	unsigned int i;
+	uint64_t id_aa64pfr0_el1_amu;	/* AMU version */
+	uint64_t hcr_el2_amvoffen;	/* AMU virtual offsets enabled */
+	uint64_t amcgcr_el0_cg0nc;	/* Number of group 0 counters */
 
-	if (amu_get_version() == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
-		return (void *)-1;
-	}
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	uint64_t amcg1idr_el0_voff;	/* Auxiliary counters with virtual offsets */
+	uint64_t amcfgr_el0_ncg;	/* Number of counter groups */
+	uint64_t amcgcr_el0_cg1nc;	/* Number of group 1 counters */
+#endif
 
-#if AMU_GROUP1_NR_COUNTERS
-	if (!amu_group1_supported()) {
-		return (void *)-1;
+	id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
+	if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
+		return (void *)0;
 	}
-#endif
-	/* Assert that group 0/1 counter configuration is what we expect */
-	assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
 
-#if AMU_GROUP1_NR_COUNTERS
-	assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
+	core_pos = plat_my_core_pos();
+	ctx = &amu_ctxs_[core_pos];
+
+	amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
+	hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ?
+		read_hcr_el2_amvoffen() : 0U;
+
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	amcfgr_el0_ncg = read_amcfgr_el0_ncg();
+	amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
+	amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U;
 #endif
+
 	/*
-	 * Disable group 0/1 counters to avoid other observers like SCP sampling
-	 * counter values from the future via the memory mapped view.
+	 * Disable all AMU counters.
 	 */
-	write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK);
 
-#if AMU_GROUP1_NR_COUNTERS
-	write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK);
+	ctx->group0_enable = read_amcntenset0_el0_px();
+	write_amcntenclr0_el0_px(ctx->group0_enable);
+
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	if (amcfgr_el0_ncg > 0U) {
+		ctx->group1_enable = read_amcntenset1_el0_px();
+		write_amcntenclr1_el0_px(ctx->group1_enable);
+	}
 #endif
-	isb();
 
-	/* Save all group 0 counters */
-	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
+	/*
+	 * Save the counters to the local context.
+	 */
+
+	isb(); /* Ensure counters have been stopped */
+
+	for (i = 0U; i < amcgcr_el0_cg0nc; i++) {
 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
 	}
 
-	/* Save group 0 virtual offsets if supported and enabled. */
-	if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
-			((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
-		/* Not using a loop because count is fixed and index 1 DNE. */
-		ctx->group0_voffsets[0U] = amu_group0_voffset_read(0U);
-		ctx->group0_voffsets[1U] = amu_group0_voffset_read(2U);
-		ctx->group0_voffsets[2U] = amu_group0_voffset_read(3U);
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	for (i = 0U; i < amcgcr_el0_cg1nc; i++) {
+		ctx->group1_cnts[i] = amu_group1_cnt_read(i);
 	}
+#endif
 
-#if AMU_GROUP1_NR_COUNTERS
-	/* Save group 1 counters */
-	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
-		if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
-			ctx->group1_cnts[i] = amu_group1_cnt_read(i);
-		}
-	}
+	/*
+	 * Save virtual offsets for counters that offer them.
+	 */
 
-	/* Save group 1 virtual offsets if supported and enabled. */
-	if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
-			((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
-		u_register_t amcg1idr = read_amcg1idr_el0() >>
-			AMCG1IDR_VOFF_SHIFT;
-		amcg1idr = amcg1idr & AMU_GROUP1_COUNTERS_MASK;
+	if (hcr_el2_amvoffen != 0U) {
+		for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) {
+			if (!amu_group0_voffset_supported(i)) {
+				continue; /* No virtual offset */
+			}
+
+			ctx->group0_voffsets[j++] = amu_group0_voffset_read(i);
+		}
 
-		for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
-			if (((amcg1idr >> i) & 1ULL) != 0ULL) {
-				ctx->group1_voffsets[i] =
-					amu_group1_voffset_read(i);
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+		for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) {
+			if ((amcg1idr_el0_voff >> i) & 1U) {
+				continue; /* No virtual offset */
 			}
+
+			ctx->group1_voffsets[j++] = amu_group1_voffset_read(i);
 		}
-	}
 #endif
+	}
+
 	return (void *)0;
 }
 
 static void *amu_context_restore(const void *arg)
 {
-	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
-	unsigned int i;
+	uint64_t i, j;
 
-	if (amu_get_version() == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
-		return (void *)-1;
-	}
+	unsigned int core_pos;
+	struct amu_ctx *ctx;
 
-#if AMU_GROUP1_NR_COUNTERS
-	if (!amu_group1_supported()) {
-		return (void *)-1;
-	}
+	uint64_t id_aa64pfr0_el1_amu;	/* AMU version */
+
+	uint64_t hcr_el2_amvoffen;	/* AMU virtual offsets enabled */
+
+	uint64_t amcfgr_el0_ncg;	/* Number of counter groups */
+	uint64_t amcgcr_el0_cg0nc;	/* Number of group 0 counters */
+
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	uint64_t amcgcr_el0_cg1nc;	/* Number of group 1 counters */
+	uint64_t amcg1idr_el0_voff;	/* Auxiliary counters with virtual offsets */
 #endif
-	/* Counters were disabled in `amu_context_save()` */
-	assert(read_amcntenset0_el0() == 0U);
 
-#if AMU_GROUP1_NR_COUNTERS
-	assert(read_amcntenset1_el0() == 0U);
+	id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
+	if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
+		return (void *)0;
+	}
+
+	core_pos = plat_my_core_pos();
+	ctx = &amu_ctxs_[core_pos];
+
+	amcfgr_el0_ncg = read_amcfgr_el0_ncg();
+	amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
+
+	hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ?
+		read_hcr_el2_amvoffen() : 0U;
+
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
+	amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U;
 #endif
 
+	/*
+	 * Sanity check that all counters were disabled when the context was
+	 * previously saved.
+	 */
+
+	assert(read_amcntenset0_el0_px() == 0U);
+
+	if (amcfgr_el0_ncg > 0U) {
+		assert(read_amcntenset1_el0_px() == 0U);
+	}
+
+	/*
+	 * Restore the counter values from the local context.
+	 */
+
-	/* Restore all group 0 counters */
-	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
+	for (i = 0U; i < amcgcr_el0_cg0nc; i++) {
 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
 	}
 
-	/* Restore group 0 virtual offsets if supported and enabled. */
-	if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
-			((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
-		/* Not using a loop because count is fixed and index 1 DNE. */
-		amu_group0_voffset_write(0U, ctx->group0_voffsets[0U]);
-		amu_group0_voffset_write(2U, ctx->group0_voffsets[1U]);
-		amu_group0_voffset_write(3U, ctx->group0_voffsets[2U]);
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	for (i = 0U; i < amcgcr_el0_cg1nc; i++) {
+		amu_group1_cnt_write(i, ctx->group1_cnts[i]);
 	}
+#endif
 
-	/* Restore group 0 counter configuration */
-	write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+	/*
+	 * Restore virtual offsets for counters that offer them.
+	 */
 
-#if AMU_GROUP1_NR_COUNTERS
-	/* Restore group 1 counters */
-	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
-		if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
-			amu_group1_cnt_write(i, ctx->group1_cnts[i]);
-		}
-	}
+	if (hcr_el2_amvoffen != 0U) {
+		for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) {
+			if (!amu_group0_voffset_supported(i)) {
+				continue; /* No virtual offset */
+			}
 
-	/* Restore group 1 virtual offsets if supported and enabled. */
-	if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
-			((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
-		u_register_t amcg1idr = read_amcg1idr_el0() >>
-			AMCG1IDR_VOFF_SHIFT;
-		amcg1idr = amcg1idr & AMU_GROUP1_COUNTERS_MASK;
+			amu_group0_voffset_write(i, ctx->group0_voffsets[j++]);
+		}
 
-		for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
-			if (((amcg1idr >> i) & 1ULL) != 0ULL) {
-				amu_group1_voffset_write(i,
-					ctx->group1_voffsets[i]);
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+		for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) {
+			if ((amcg1idr_el0_voff >> i) & 1U) {
+				continue; /* No virtual offset */
 			}
+
+			amu_group1_voffset_write(i, ctx->group1_voffsets[j++]);
 		}
+#endif
+	}
+
+	/*
+	 * Re-enable counters that were disabled during context save.
+	 */
+
+	write_amcntenset0_el0_px(ctx->group0_enable);
+
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+	if (amcfgr_el0_ncg > 0) {
+		write_amcntenset1_el0_px(ctx->group1_enable);
 	}
+#endif
 
-	/* Restore group 1 counter configuration */
-	write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
+#if ENABLE_MPMM
+	mpmm_enable();
 #endif
 
 	return (void *)0;
diff --git a/lib/extensions/amu/aarch64/amu_helpers.S b/lib/extensions/amu/aarch64/amu_helpers.S
index 9989abd..0f6d799 100644
--- a/lib/extensions/amu/aarch64/amu_helpers.S
+++ b/lib/extensions/amu/aarch64/amu_helpers.S
@@ -83,6 +83,7 @@
 	write	AMEVCNTR03_EL0		/* index 3 */
 endfunc amu_group0_cnt_write_internal
 
+#if ENABLE_AMU_AUXILIARY_COUNTERS
 /*
  * uint64_t amu_group1_cnt_read_internal(int idx);
  *
@@ -217,6 +218,7 @@
 	write	AMEVTYPER1E_EL0		/* index 14 */
 	write	AMEVTYPER1F_EL0		/* index 15 */
 endfunc amu_group1_set_evtype_internal
+#endif
 
 /*
  * Accessor functions for virtual offset registers added with FEAT_AMUv1p1
@@ -297,6 +299,7 @@
 	write	AMEVCNTVOFF03_EL2	/* index 3 */
 endfunc amu_group0_voffset_write_internal
 
+#if ENABLE_AMU_AUXILIARY_COUNTERS
 /*
  * uint64_t amu_group1_voffset_read_internal(int idx);
  *
@@ -383,3 +386,4 @@
 	write	AMEVCNTVOFF1E_EL2	/* index 14 */
 	write	AMEVCNTVOFF1F_EL2	/* index 15 */
 endfunc amu_group1_voffset_write_internal
+#endif
diff --git a/lib/extensions/amu/amu.mk b/lib/extensions/amu/amu.mk
new file mode 100644
index 0000000..0d203cb
--- /dev/null
+++ b/lib/extensions/amu/amu.mk
@@ -0,0 +1,24 @@
+#
+# Copyright (c) 2021, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+include lib/fconf/fconf.mk
+
+AMU_SOURCES	:=	lib/extensions/amu/${ARCH}/amu.c \
+			lib/extensions/amu/${ARCH}/amu_helpers.S
+
+ifneq (${ENABLE_AMU_AUXILIARY_COUNTERS},0)
+        ifeq (${ENABLE_AMU},0)
+                $(error AMU auxiliary counter support (`ENABLE_AMU_AUXILIARY_COUNTERS`) requires AMU support (`ENABLE_AMU`))
+        endif
+endif
+
+ifneq (${ENABLE_AMU_FCONF},0)
+        ifeq (${ENABLE_AMU_AUXILIARY_COUNTERS},0)
+                $(error AMU FCONF support (`ENABLE_AMU_FCONF`) is not necessary when auxiliary counter support (`ENABLE_AMU_AUXILIARY_COUNTERS`) is disabled)
+        endif
+
+        AMU_SOURCES	+=	${FCONF_AMU_SOURCES}
+endif
diff --git a/lib/extensions/amu/amu_fconf.c b/lib/extensions/amu/amu_fconf.c
new file mode 100644
index 0000000..c7fb803
--- /dev/null
+++ b/lib/extensions/amu/amu_fconf.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "amu_private.h"
+#include <common/debug.h>
+#include <common/fdt_wrappers.h>
+#include <lib/extensions/amu.h>
+#include <lib/fconf/fconf.h>
+#include <libfdt.h>
+
+#include <plat/common/platform.h>
+
+static bool amu_topology_populated_ ; /* Whether the topology is valid */
+static struct amu_fconf_topology amu_topology_; /* Populated topology cache */
+
+const struct amu_fconf_topology *amu_topology(void)
+{
+	if (!amu_topology_populated_) {
+		return NULL;
+	}
+
+	return &amu_topology_;
+}
+
+/*
+ * Populate the core-specific AMU structure with information retrieved from a
+ * device tree.
+ *
+ * Returns `0` on success, or a negative integer representing an error code.
+ */
+static int amu_fconf_populate_cpu_amu(const void *fdt, int parent,
+				      struct amu_fconf_core *amu)
+{
+	int ret = 0;
+	int node = 0;
+
+	fdt_for_each_subnode(node, fdt, parent) {
+		const char *name;
+		const char *value;
+		int len;
+
+		uintptr_t idx = 0U;
+
+		name = fdt_get_name(fdt, node, &len);
+		if (strncmp(name, "counter@", 8) != 0) {
+			continue;
+		}
+
+		ret = fdt_get_reg_props_by_index(fdt, node, 0, &idx, NULL);
+		if (ret < 0) {
+			break;
+		}
+
+		value = fdt_getprop(fdt, node, "enable-at-el3", &len);
+		if ((value == NULL) && (len != -FDT_ERR_NOTFOUND)) {
+			break;
+		}
+
+		if (len != -FDT_ERR_NOTFOUND) {
+			amu->enable |= (1 << idx);
+		}
+	}
+
+	if ((node < 0) && (node != -FDT_ERR_NOTFOUND)) {
+		return node;
+	}
+
+	return ret;
+}
+
+/*
+ * Within a `cpu` node, attempt to dereference the `amu` property, and populate
+ * the AMU information for the core.
+ *
+ * Returns `0` on success, or a negative integer representing an error code.
+ */
+static int amu_fconf_populate_cpu(const void *fdt, int node, uintptr_t mpidr)
+{
+	int ret;
+	int idx;
+
+	uint32_t amu_phandle;
+	struct amu_fconf_core *amu;
+
+	ret = fdt_read_uint32(fdt, node, "amu", &amu_phandle);
+	if (ret < 0) {
+		if (ret == -FDT_ERR_NOTFOUND) {
+			ret = 0;
+		}
+
+		return ret;
+	}
+
+	node = fdt_node_offset_by_phandle(fdt, amu_phandle);
+	if (node < 0) {
+		return node;
+	}
+
+	idx = plat_core_pos_by_mpidr(mpidr);
+	amu = &amu_topology_.cores[idx];
+
+	return amu_fconf_populate_cpu_amu(fdt, node, amu);
+}
+
+/*
+ * For every CPU node (`/cpus/cpu@n`) in an FDT, executes a callback passing a
+ * pointer to the FDT and the offset of the CPU node. If the return value of the
+ * callback is negative, it is treated as an error and the loop is aborted. In
+ * this situation, the value of the callback is returned from the function.
+ *
+ * Returns `0` on success, or a negative integer representing an error code.
+ */
+static int amu_fconf_foreach_cpu(const void *fdt,
+				 int (*callback)(const void *, int, uintptr_t))
+{
+	int ret = 0;
+	int parent, node = 0;
+
+	parent = fdt_path_offset(fdt, "/cpus");
+	if (parent < 0) {
+		if (parent == -FDT_ERR_NOTFOUND) {
+			parent = 0;
+		}
+
+		return parent;
+	}
+
+	fdt_for_each_subnode(node, fdt, parent) {
+		const char *name;
+		int len;
+
+		uintptr_t mpidr = 0U;
+
+		name = fdt_get_name(fdt, node, &len);
+		if (strncmp(name, "cpu@", 4) != 0) {
+			continue;
+		}
+
+		ret = fdt_get_reg_props_by_index(fdt, node, 0, &mpidr, NULL);
+		if (ret < 0) {
+			break;
+		}
+
+		ret = callback(fdt, node, mpidr);
+		if (ret < 0) {
+			break;
+		}
+	}
+
+	if ((node < 0) && (node != -FDT_ERR_NOTFOUND)) {
+		return node;
+	}
+
+	return ret;
+}
+
+/*
+ * Populates the global `amu_topology` structure based on what's described by
+ * the hardware configuration device tree blob.
+ *
+ * The device tree is expected to provide an `amu` property for each `cpu` node,
+ * like so:
+ *
+ *     cpu@0 {
+ *         amu = <&cpu0_amu>;
+ *     };
+ *
+ *     amus {
+ *         cpu0_amu: amu-0 {
+ *             counters {
+ *                 #address-cells = <2>;
+ *                 #size-cells = <0>;
+ *
+ *                 counter@x,y {
+ *                     reg = <x y>; // Group x, counter y
+ *                 };
+ *             };
+ *         };
+ *     };
+ */
+static int amu_fconf_populate(uintptr_t config)
+{
+	int ret = amu_fconf_foreach_cpu(
+		(const void *)config, amu_fconf_populate_cpu);
+	if (ret < 0) {
+		ERROR("AMU-FCONF: Failed to configure AMU: %d\n", ret);
+	} else {
+		amu_topology_populated_ = true;
+	}
+
+	return ret;
+}
+
+FCONF_REGISTER_POPULATOR(HW_CONFIG, amu, amu_fconf_populate);
diff --git a/include/lib/extensions/amu_private.h b/lib/extensions/amu/amu_private.h
similarity index 78%
rename from include/lib/extensions/amu_private.h
rename to lib/extensions/amu/amu_private.h
index 3b4b47c..eb7ff0e 100644
--- a/include/lib/extensions/amu_private.h
+++ b/lib/extensions/amu/amu_private.h
@@ -9,6 +9,17 @@
 
 #include <stdint.h>
 
+#include <lib/cassert.h>
+#include <lib/extensions/amu.h>
+#include <lib/utils_def.h>
+
+#include <platform_def.h>
+
+#define AMU_GROUP0_MAX_COUNTERS		U(16)
+#define AMU_GROUP1_MAX_COUNTERS		U(16)
+
+#define AMU_AMCGCR_CG0NC_MAX		U(16)
+
 uint64_t amu_group0_cnt_read_internal(unsigned int idx);
 void amu_group0_cnt_write_internal(unsigned int idx, uint64_t val);
 
diff --git a/lib/fconf/fconf.mk b/lib/fconf/fconf.mk
index b01dc6f..fb88910 100644
--- a/lib/fconf/fconf.mk
+++ b/lib/fconf/fconf.mk
@@ -1,12 +1,19 @@
 #
-# Copyright (c) 2019-2020, ARM Limited. All rights reserved.
+# Copyright (c) 2019-2021, ARM Limited. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
 
-# Add Firmware Configuration files
+include common/fdt_wrappers.mk
+
 FCONF_SOURCES		:=	lib/fconf/fconf.c
+FCONF_SOURCES		+=	${FDT_WRAPPERS_SOURCES}
+
 FCONF_DYN_SOURCES	:=	lib/fconf/fconf_dyn_cfg_getter.c
+FCONF_DYN_SOURCES	+=	${FDT_WRAPPERS_SOURCES}
+
+FCONF_AMU_SOURCES	:=	lib/fconf/fconf_amu_getter.c
+FCONF_AMU_SOURCES	+=	${FDT_WRAPPERS_SOURCES}
 
-BL1_SOURCES		+=	${FCONF_SOURCES} ${FCONF_DYN_SOURCES}
-BL2_SOURCES		+=	${FCONF_SOURCES} ${FCONF_DYN_SOURCES}
+FCONF_MPMM_SOURCES	:=	lib/fconf/fconf_mpmm_getter.c
+FCONF_MPMM_SOURCES	+=	${FDT_WRAPPERS_SOURCES}
diff --git a/lib/fconf/fconf_amu_getter.c b/lib/fconf/fconf_amu_getter.c
new file mode 100644
index 0000000..eff309c
--- /dev/null
+++ b/lib/fconf/fconf_amu_getter.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <common/debug.h>
+#include <common/fdt_wrappers.h>
+#include <lib/fconf/fconf.h>
+#include <lib/fconf/fconf_amu_getter.h>
+#include <libfdt.h>
+
+#include <plat/common/platform.h>
+
+struct fconf_amu_config fconf_amu_config;
+static struct amu_topology fconf_amu_topology_;
+
+/*
+ * Populate the core-specific AMU structure with information retrieved from a
+ * device tree.
+ *
+ * Returns `0` on success, or a negative integer representing an error code.
+ */
+static int fconf_populate_amu_cpu_amu(const void *fdt, int parent,
+				      struct amu_core *amu)
+{
+	int ret = 0;
+	int node = 0;
+
+	fdt_for_each_subnode(node, fdt, parent) {
+		const char *name;
+		const char *value;
+		int len;
+
+		uintptr_t idx = 0U;
+
+		name = fdt_get_name(fdt, node, &len);
+		if (strncmp(name, "counter@", 8) != 0) {
+			continue;
+		}
+
+		ret = fdt_get_reg_props_by_index(fdt, node, 0, &idx, NULL);
+		if (ret < 0) {
+			break;
+		}
+
+		value = fdt_getprop(fdt, node, "enable-at-el3", &len);
+		if ((value == NULL) && (len != -FDT_ERR_NOTFOUND)) {
+			break;
+		}
+
+		if (len != -FDT_ERR_NOTFOUND) {
+			amu->enable |= (1 << idx);
+		}
+	}
+
+	if ((node < 0) && (node != -FDT_ERR_NOTFOUND)) {
+		return node;
+	}
+
+	return ret;
+}
+
+/*
+ * Within a `cpu` node, attempt to dereference the `amu` property, and populate
+ * the AMU information for the core.
+ *
+ * Returns `0` on success, or a negative integer representing an error code.
+ */
+static int fconf_populate_amu_cpu(const void *fdt, int node, uintptr_t mpidr)
+{
+	int ret;
+	int idx;
+
+	uint32_t amu_phandle;
+	struct amu_core *amu;
+
+	ret = fdt_read_uint32(fdt, node, "amu", &amu_phandle);
+	if (ret < 0) {
+		if (ret == -FDT_ERR_NOTFOUND) {
+			ret = 0;
+		}
+
+		return ret;
+	}
+
+	node = fdt_node_offset_by_phandle(fdt, amu_phandle);
+	if (node < 0) {
+		return node;
+	}
+
+	idx = plat_core_pos_by_mpidr(mpidr);
+	if (idx < 0) {
+		return -FDT_ERR_BADVALUE;
+	}
+
+	amu = &fconf_amu_topology_.cores[idx];
+
+	return fconf_populate_amu_cpu_amu(fdt, node, amu);
+}
+
+/*
+ * Populates the global `amu_topology` structure based on what's described by
+ * the hardware configuration device tree blob.
+ *
+ * The device tree is expected to provide an `amu` property for each `cpu` node,
+ * like so:
+ *
+ *     cpu@0 {
+ *         amu = <&cpu0_amu>;
+ *     };
+ *
+ *     amus {
+ *         cpu0_amu: amu-0 {
+ *             counters {
+ *                 #address-cells = <2>;
+ *                 #size-cells = <0>;
+ *
+ *                 counter@x,y {
+ *                     reg = <x y>; // Group x, counter y
+ *                 };
+ *             };
+ *         };
+ *     };
+ */
+static int fconf_populate_amu(uintptr_t config)
+{
+	int ret = fdtw_for_each_cpu(
+		(const void *)config, fconf_populate_amu_cpu);
+	if (ret == 0) {
+		fconf_amu_config.topology = &fconf_amu_topology_;
+	} else {
+		ERROR("FCONF: failed to parse AMU information: %d\n", ret);
+	}
+
+	return ret;
+}
+
+FCONF_REGISTER_POPULATOR(HW_CONFIG, amu, fconf_populate_amu);
diff --git a/lib/fconf/fconf_mpmm_getter.c b/lib/fconf/fconf_mpmm_getter.c
new file mode 100644
index 0000000..02a566d
--- /dev/null
+++ b/lib/fconf/fconf_mpmm_getter.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <common/debug.h>
+#include <common/fdt_wrappers.h>
+#include <lib/fconf/fconf.h>
+#include <lib/fconf/fconf_mpmm_getter.h>
+#include <libfdt.h>
+
+#include <plat/common/platform.h>
+
+struct fconf_mpmm_config fconf_mpmm_config;
+static struct mpmm_topology fconf_mpmm_topology;
+
+/*
+ * Within a `cpu` node, determine support for MPMM via the `supports-mpmm`
+ * property.
+ *
+ * Returns `0` on success, or a negative integer representing an error code.
+ */
+static int fconf_populate_mpmm_cpu(const void *fdt, int off, uintptr_t mpidr)
+{
+	int ret, len;
+
+	int core_pos;
+	struct mpmm_core *core;
+
+	core_pos = plat_core_pos_by_mpidr(mpidr);
+	if (core_pos < 0) {
+		return -FDT_ERR_BADVALUE;
+	}
+
+	core = &fconf_mpmm_topology.cores[core_pos];
+
+	fdt_getprop(fdt, off, "supports-mpmm", &len);
+	if (len >= 0) {
+		core->supported = true;
+		ret = 0;
+	} else {
+		core->supported = false;
+		ret = len;
+	}
+
+	return ret;
+}
+
+/*
+ * Populates the global `fconf_mpmm_config` structure based on what's described
+ * by the hardware configuration device tree blob.
+ *
+ * The device tree is expected to provide a `supports-mpmm` property for each
+ * `cpu` node, like so:
+ *
+ *     cpu@0 {
+ *       supports-mpmm;
+ *     };
+ *
+ * This property indicates whether the core implements MPMM, as we cannot detect
+ * support for it dynamically.
+ */
+static int fconf_populate_mpmm(uintptr_t config)
+{
+	int ret = fdtw_for_each_cpu(
+		(const void *)config, fconf_populate_mpmm_cpu);
+	if (ret == 0) {
+		fconf_mpmm_config.topology = &fconf_mpmm_topology;
+	} else {
+		ERROR("FCONF: failed to configure MPMM: %d\n", ret);
+	}
+
+	return ret;
+}
+
+FCONF_REGISTER_POPULATOR(HW_CONFIG, mpmm, fconf_populate_mpmm);
diff --git a/lib/mpmm/mpmm.c b/lib/mpmm/mpmm.c
new file mode 100644
index 0000000..a66f2aa
--- /dev/null
+++ b/lib/mpmm/mpmm.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdbool.h>
+
+#include <common/debug.h>
+#include <lib/mpmm/mpmm.h>
+
+#include <plat/common/platform.h>
+
+#if ENABLE_MPMM_FCONF
+#	include <lib/fconf/fconf.h>
+#	include <lib/fconf/fconf_mpmm_getter.h>
+#endif
+
+static uint64_t read_cpuppmcr_el3_mpmmpinctl(void)
+{
+	return (read_cpuppmcr_el3() >> CPUPPMCR_EL3_MPMMPINCTL_SHIFT) &
+		CPUPPMCR_EL3_MPMMPINCTL_MASK;
+}
+
+static void write_cpumpmmcr_el3_mpmm_en(uint64_t mpmm_en)
+{
+	uint64_t value = read_cpumpmmcr_el3();
+
+	value &= ~(CPUMPMMCR_EL3_MPMM_EN_MASK << CPUMPMMCR_EL3_MPMM_EN_SHIFT);
+	value |= (mpmm_en & CPUMPMMCR_EL3_MPMM_EN_MASK) <<
+		CPUMPMMCR_EL3_MPMM_EN_SHIFT;
+
+	write_cpumpmmcr_el3(value);
+}
+
+static bool mpmm_supported(void)
+{
+	bool supported = false;
+	const struct mpmm_topology *topology;
+
+#if ENABLE_MPMM_FCONF
+	topology = FCONF_GET_PROPERTY(mpmm, config, topology);
+#else
+	topology = plat_mpmm_topology();
+#endif /* ENABLE_MPMM_FCONF */
+
+	/*
+	 * For the current core firstly try to find out if the platform
+	 * configuration has claimed support for MPMM, then make sure that MPMM
+	 * is controllable through the system registers.
+	 */
+
+	if (topology != NULL) {
+		unsigned int core_pos = plat_my_core_pos();
+
+		supported = topology->cores[core_pos].supported &&
+			(read_cpuppmcr_el3_mpmmpinctl() == 0U);
+	} else {
+		ERROR("MPMM: failed to generate MPMM topology\n");
+	}
+
+	return supported;
+}
+
+void mpmm_enable(void)
+{
+	bool supported = mpmm_supported();
+
+	if (supported) {
+		write_cpumpmmcr_el3_mpmm_en(1U);
+	}
+}
diff --git a/lib/mpmm/mpmm.mk b/lib/mpmm/mpmm.mk
new file mode 100644
index 0000000..826f925
--- /dev/null
+++ b/lib/mpmm/mpmm.mk
@@ -0,0 +1,29 @@
+#
+# Copyright (c) 2021, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+include lib/extensions/amu/amu.mk
+include lib/fconf/fconf.mk
+
+ifneq (${ENABLE_MPMM},0)
+        ifneq ($(ARCH),aarch64)
+                $(error MPMM support (`ENABLE_MPMM`) can only be enabled in AArch64 images (`ARCH`))
+        endif
+
+        ifeq (${ENABLE_AMU_AUXILIARY_COUNTERS},0) # For MPMM gear AMU counters
+                $(error MPMM support (`ENABLE_MPM`) requires auxiliary AMU counter support (`ENABLE_AMU_AUXILIARY_COUNTERS`))
+        endif
+endif
+
+MPMM_SOURCES	:=	lib/mpmm/mpmm.c
+MPMM_SOURCES	+=	${AMU_SOURCES}
+
+ifneq (${ENABLE_MPMM_FCONF},0)
+        ifeq (${ENABLE_MPMM},0)
+                $(error MPMM FCONF support (`ENABLE_MPMM_FCONF`) requires MPMM support (`ENABLE_MPMM`))
+        endif
+
+        MPMM_SOURCES	+= ${FCONF_MPMM_SOURCES}
+endif
diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk
index 819c536..45f5fa8 100644
--- a/make_helpers/defaults.mk
+++ b/make_helpers/defaults.mk
@@ -96,6 +96,12 @@
 # Build option to enable MPAM for lower ELs
 ENABLE_MPAM_FOR_LOWER_ELS	:= 0
 
+# Enable the Maximum Power Mitigation Mechanism on supporting cores.
+ENABLE_MPMM			:= 0
+
+# Enable MPMM configuration via FCONF.
+ENABLE_MPMM_FCONF		:= 0
+
 # Flag to Enable Position Independant support (PIE)
 ENABLE_PIE			:= 0
 
@@ -306,6 +312,8 @@
 CTX_INCLUDE_MTE_REGS		:= 0
 
 ENABLE_AMU			:= 0
+ENABLE_AMU_AUXILIARY_COUNTERS	:= 0
+ENABLE_AMU_FCONF		:= 0
 AMU_RESTRICT_COUNTERS		:= 0
 
 # By default, enable Scalable Vector Extension if implemented only for Non-secure
diff --git a/plat/arm/board/a5ds/platform.mk b/plat/arm/board/a5ds/platform.mk
index 8b0dc5c..4f87306 100644
--- a/plat/arm/board/a5ds/platform.mk
+++ b/plat/arm/board/a5ds/platform.mk
@@ -1,18 +1,23 @@
 #
-# Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+# Copyright (c) 2019-2021, Arm Limited. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
 
 # Firmware Configuration Framework sources
+include common/fdt_wrappers.mk
 include lib/fconf/fconf.mk
 
+BL1_SOURCES		+=	${FCONF_SOURCES} ${FCONF_DYN_SOURCES}
+BL2_SOURCES		+=	${FCONF_SOURCES} ${FCONF_DYN_SOURCES}
+
 # Add `libfdt` and Arm common helpers required for Dynamic Config
 include lib/libfdt/libfdt.mk
 
 DYN_CFG_SOURCES		+=	plat/arm/common/arm_dyn_cfg.c		\
-				plat/arm/common/arm_dyn_cfg_helpers.c	\
-				common/fdt_wrappers.c
+				plat/arm/common/arm_dyn_cfg_helpers.c
+
+DYN_CFG_SOURCES		+=	${FDT_WRAPPERS_SOURCES}
 
 # Include GICv2 driver files
 include drivers/arm/gic/v2/gicv2.mk
diff --git a/plat/arm/board/arm_fpga/platform.mk b/plat/arm/board/arm_fpga/platform.mk
index 901fabf..084532c 100644
--- a/plat/arm/board/arm_fpga/platform.mk
+++ b/plat/arm/board/arm_fpga/platform.mk
@@ -4,6 +4,7 @@
 # SPDX-License-Identifier: BSD-3-Clause
 #
 
+include common/fdt_wrappers.mk
 include lib/libfdt/libfdt.mk
 
 RESET_TO_BL31 := 1
@@ -104,8 +105,7 @@
 
 PLAT_BL_COMMON_SOURCES	:=	plat/arm/board/arm_fpga/${ARCH}/fpga_helpers.S
 
-BL31_SOURCES		+=	common/fdt_wrappers.c				\
-				common/fdt_fixup.c				\
+BL31_SOURCES		+=	common/fdt_fixup.c				\
 				drivers/delay_timer/delay_timer.c		\
 				drivers/delay_timer/generic_delay_timer.c	\
 				drivers/arm/pl011/${ARCH}/pl011_console.S	\
@@ -117,6 +117,8 @@
 				${FPGA_CPU_LIBS}				\
 				${FPGA_GIC_SOURCES}
 
+BL31_SOURCES		+=	${FDT_WRAPPERS_SOURCES}
+
 $(eval $(call MAKE_S,$(BUILD_PLAT),plat/arm/board/arm_fpga/rom_trampoline.S,bl31))
 $(eval $(call MAKE_S,$(BUILD_PLAT),plat/arm/board/arm_fpga/kernel_trampoline.S,bl31))
 $(eval $(call MAKE_LD,$(BUILD_PLAT)/build_axf.ld,plat/arm/board/arm_fpga/build_axf.ld.S,bl31))
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
index fd27acb..0d2c319 100644
--- a/plat/arm/board/fvp/platform.mk
+++ b/plat/arm/board/fvp/platform.mk
@@ -4,6 +4,8 @@
 # SPDX-License-Identifier: BSD-3-Clause
 #
 
+include common/fdt_wrappers.mk
+
 # Use the GICv3 driver on the FVP by default
 FVP_USE_GIC_DRIVER	:= FVP_GICV3
 
@@ -228,11 +230,12 @@
 # Support for fconf in BL31
 # Added separately from the above list for better readability
 ifeq ($(filter 1,${BL2_AT_EL3} ${RESET_TO_BL31}),)
-BL31_SOURCES		+=	common/fdt_wrappers.c				\
-				lib/fconf/fconf.c				\
+BL31_SOURCES		+=	lib/fconf/fconf.c				\
 				lib/fconf/fconf_dyn_cfg_getter.c		\
 				plat/arm/board/fvp/fconf/fconf_hw_config_getter.c
 
+BL31_SOURCES		+=	${FDT_WRAPPERS_SOURCES}
+
 ifeq (${SEC_INT_DESC_IN_FCONF},1)
 BL31_SOURCES		+=	plat/arm/common/fconf/fconf_sec_intr_config.c
 endif
diff --git a/plat/arm/board/fvp/sp_min/sp_min-fvp.mk b/plat/arm/board/fvp/sp_min/sp_min-fvp.mk
index 64cb7ad..0d8cca5 100644
--- a/plat/arm/board/fvp/sp_min/sp_min-fvp.mk
+++ b/plat/arm/board/fvp/sp_min/sp_min-fvp.mk
@@ -1,9 +1,11 @@
 #
-# Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
 
+include common/fdt_wrappers.mk
+
 # SP_MIN source files specific to FVP platform
 BL32_SOURCES		+=	drivers/arm/fvp/fvp_pwrc.c			\
 				drivers/cfi/v2m/v2m_flash.c			\
@@ -22,10 +24,11 @@
 # Support for fconf in SP_MIN(BL32)
 # Added separately from the above list for better readability
 ifeq ($(filter 1,${BL2_AT_EL3} ${RESET_TO_SP_MIN}),)
-BL32_SOURCES		+=	common/fdt_wrappers.c				\
-				lib/fconf/fconf.c				\
+BL32_SOURCES		+=	lib/fconf/fconf.c				\
 				plat/arm/board/fvp/fconf/fconf_hw_config_getter.c
 
+BL32_SOURCES		+=	${FDT_WRAPPERS_SOURCES}
+
 ifeq (${SEC_INT_DESC_IN_FCONF},1)
 BL32_SOURCES		+=	plat/arm/common/fconf/fconf_sec_intr_config.c
 endif
diff --git a/plat/arm/board/fvp_ve/platform.mk b/plat/arm/board/fvp_ve/platform.mk
index ac45d57..f7eace8 100644
--- a/plat/arm/board/fvp_ve/platform.mk
+++ b/plat/arm/board/fvp_ve/platform.mk
@@ -1,9 +1,11 @@
 #
-# Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+# Copyright (c) 2019-2021, Arm Limited. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
 
+include common/fdt_wrappers.mk
+
 ifdef ARM_CORTEX_A5
 # Use the SP804 timer instead of the generic one
 USE_SP804_TIMER	:= 1
@@ -125,10 +127,13 @@
 # Firmware Configuration Framework sources
 include lib/fconf/fconf.mk
 
+BL1_SOURCES		+=	${FCONF_SOURCES} ${FCONF_DYN_SOURCES}
+BL2_SOURCES		+=	${FCONF_SOURCES} ${FCONF_DYN_SOURCES}
+
 # Add `libfdt` and Arm common helpers required for Dynamic Config
 include lib/libfdt/libfdt.mk
 
 DYN_CFG_SOURCES		+=	plat/arm/common/arm_dyn_cfg.c		\
-				plat/arm/common/arm_dyn_cfg_helpers.c	\
-				common/fdt_wrappers.c
+				plat/arm/common/arm_dyn_cfg_helpers.c
 
+DYN_CFG_SOURCES		+=	${FDT_WRAPPERS_SOURCES}
diff --git a/plat/arm/board/juno/platform.mk b/plat/arm/board/juno/platform.mk
index 92fbf35..2c84eb3 100644
--- a/plat/arm/board/juno/platform.mk
+++ b/plat/arm/board/juno/platform.mk
@@ -4,6 +4,8 @@
 # SPDX-License-Identifier: BSD-3-Clause
 #
 
+include common/fdt_wrappers.mk
+
 # Include GICv2 driver files
 include drivers/arm/gic/v2/gicv2.mk
 
@@ -83,7 +85,6 @@
 				lib/cpus/aarch64/cortex_a57.S		\
 				lib/cpus/aarch64/cortex_a72.S		\
 				lib/utils/mem_region.c			\
-				common/fdt_wrappers.c			\
 				lib/fconf/fconf.c			\
 				lib/fconf/fconf_dyn_cfg_getter.c	\
 				plat/arm/board/juno/juno_bl31_setup.c	\
@@ -94,6 +95,8 @@
 				${JUNO_INTERCONNECT_SOURCES}		\
 				${JUNO_SECURITY_SOURCES}
 
+BL31_SOURCES		+=	${FDT_WRAPPERS_SOURCES}
+
 ifeq (${CSS_USE_SCMI_SDS_DRIVER},1)
 BL1_SOURCES		+=	drivers/arm/css/sds/sds.c
 endif
diff --git a/plat/arm/board/tc/platform.mk b/plat/arm/board/tc/platform.mk
index 7ebf639..8765fa2 100644
--- a/plat/arm/board/tc/platform.mk
+++ b/plat/arm/board/tc/platform.mk
@@ -3,6 +3,8 @@
 # SPDX-License-Identifier: BSD-3-Clause
 #
 
+include common/fdt_wrappers.mk
+
 ifeq ($(filter ${TARGET_PLATFORM}, 0 1),)
         $(error TARGET_PLATFORM must be 0 or 1)
 endif
@@ -91,13 +93,14 @@
 				${ENT_GIC_SOURCES}			\
 				${TC_BASE}/tc_bl31_setup.c	\
 				${TC_BASE}/tc_topology.c	\
-				common/fdt_wrappers.c			\
 				lib/fconf/fconf.c			\
 				lib/fconf/fconf_dyn_cfg_getter.c	\
 				drivers/cfi/v2m/v2m_flash.c		\
 				lib/utils/mem_region.c			\
 				plat/arm/common/arm_nor_psci_mem_protect.c
 
+BL31_SOURCES		+=	${FDT_WRAPPERS_SOURCES}
+
 # Add the FDT_SOURCES and options for Dynamic Config
 FDT_SOURCES		+=	${TC_BASE}/fdts/${PLAT}_fw_config.dts	\
 				${TC_BASE}/fdts/${PLAT}_tb_fw_config.dts
@@ -137,6 +140,11 @@
 override ENABLE_SPE_FOR_LOWER_ELS	:= 0
 
 override ENABLE_AMU := 1
+override ENABLE_AMU_AUXILIARY_COUNTERS := 1
+override ENABLE_AMU_FCONF := 1
+
+override ENABLE_MPMM := 1
+override ENABLE_MPMM_FCONF := 1
 
 include plat/arm/common/arm_common.mk
 include plat/arm/css/common/css_common.mk
diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk
index a20e258..78efb0f 100644
--- a/plat/arm/common/arm_common.mk
+++ b/plat/arm/common/arm_common.mk
@@ -4,6 +4,8 @@
 # SPDX-License-Identifier: BSD-3-Clause
 #
 
+include common/fdt_wrappers.mk
+
 ifeq (${ARCH}, aarch64)
   # On ARM standard platorms, the TSP can execute from Trusted SRAM, Trusted
   # DRAM (if available) or the TZC secured area of DRAM.
@@ -256,14 +258,18 @@
 # Firmware Configuration Framework sources
 include lib/fconf/fconf.mk
 
+BL1_SOURCES		+=	${FCONF_SOURCES} ${FCONF_DYN_SOURCES}
+BL2_SOURCES		+=	${FCONF_SOURCES} ${FCONF_DYN_SOURCES}
+
 # Add `libfdt` and Arm common helpers required for Dynamic Config
 include lib/libfdt/libfdt.mk
 
 DYN_CFG_SOURCES		+=	plat/arm/common/arm_dyn_cfg.c		\
 				plat/arm/common/arm_dyn_cfg_helpers.c	\
-				common/fdt_wrappers.c			\
 				common/uuid.c
 
+DYN_CFG_SOURCES		+=	${FDT_WRAPPERS_SOURCES}
+
 BL1_SOURCES		+=	${DYN_CFG_SOURCES}
 BL2_SOURCES		+=	${DYN_CFG_SOURCES}
 
@@ -343,10 +349,10 @@
 
 ifeq (${SPD},spmd)
 BL31_SOURCES		+=	plat/common/plat_spmd_manifest.c	\
-				common/fdt_wrappers.c			\
 				common/uuid.c				\
 				${LIBFDT_SRCS}
 
+BL31_SOURCES		+=	${FDT_WRAPPERS_SOURCES}
 endif
 
 ifneq (${TRUSTED_BOARD_BOOT},0)
diff --git a/plat/nvidia/tegra/soc/t194/platform_t194.mk b/plat/nvidia/tegra/soc/t194/platform_t194.mk
index 339375f..7583833 100644
--- a/plat/nvidia/tegra/soc/t194/platform_t194.mk
+++ b/plat/nvidia/tegra/soc/t194/platform_t194.mk
@@ -1,9 +1,11 @@
 #
-# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
 
+include common/fdt_wrappers.mk
+
 # platform configs
 ENABLE_CONSOLE_SPE			:= 1
 $(eval $(call add_define,ENABLE_CONSOLE_SPE))
@@ -74,10 +76,10 @@
 
 # SPM dispatcher
 ifeq (${SPD},spmd)
-# include device tree helper library
 include lib/libfdt/libfdt.mk
 # sources to support spmd
 BL31_SOURCES		+=	plat/common/plat_spmd_manifest.c	\
-				common/fdt_wrappers.c			\
 				${LIBFDT_SRCS}
+
+BL31_SOURCES		+=	${FDT_WRAPPERS_SOURCES}
 endif
diff --git a/plat/qemu/qemu_sbsa/platform.mk b/plat/qemu/qemu_sbsa/platform.mk
index 9fb30ad..5a6b1e1 100644
--- a/plat/qemu/qemu_sbsa/platform.mk
+++ b/plat/qemu/qemu_sbsa/platform.mk
@@ -1,9 +1,11 @@
 #
-# Copyright (c) 2019-2020, Linaro Limited and Contributors. All rights reserved.
+# Copyright (c) 2019-2021, Linaro Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
 
+include common/fdt_wrappers.mk
+
 CRASH_REPORTING	:=	1
 
 include lib/libfdt/libfdt.mk
@@ -86,8 +88,10 @@
 				${PLAT_QEMU_COMMON_PATH}/aarch64/plat_helpers.S	\
 				${PLAT_QEMU_COMMON_PATH}/qemu_bl31_setup.c	\
 				common/fdt_fixup.c				\
-				common/fdt_wrappers.c				\
 				${QEMU_GIC_SOURCES}
+
+BL31_SOURCES		+=	${FDT_WRAPPERS_SOURCES}
+
 ifeq (${SPM_MM},1)
 	BL31_SOURCES		+=	${PLAT_QEMU_COMMON_PATH}/qemu_spm.c
 endif
diff --git a/plat/st/stm32mp1/platform.mk b/plat/st/stm32mp1/platform.mk
index 28463f1..14f90d4 100644
--- a/plat/st/stm32mp1/platform.mk
+++ b/plat/st/stm32mp1/platform.mk
@@ -154,13 +154,15 @@
 PLAT_INCLUDES		:=	-Iplat/st/common/include/
 PLAT_INCLUDES		+=	-Iplat/st/stm32mp1/include/
 
+include common/fdt_wrappers.mk
 include lib/libfdt/libfdt.mk
 
-PLAT_BL_COMMON_SOURCES	:=	common/fdt_wrappers.c					\
-				common/uuid.c						\
+PLAT_BL_COMMON_SOURCES	:=	common/uuid.c						\
 				plat/st/common/stm32mp_common.c				\
 				plat/st/stm32mp1/stm32mp1_private.c
 
+PLAT_BL_COMMON_SOURCES	+=	${FDT_WRAPPERS_SOURCES}
+
 PLAT_BL_COMMON_SOURCES	+=	drivers/st/uart/aarch32/stm32_console.S
 
 ifneq (${ENABLE_STACK_PROTECTOR},0)