Merge changes I9405f7f6,Id53ea099 into integration

* changes:
  fix(plat/mediatek/mt8183): fix out-of-bound access
  feat(plat/mediatek/common): enable software reset for CIRQ
diff --git a/Makefile b/Makefile
index 9d1e945..ec6f885 100644
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -129,6 +129,23 @@
         $(error Unknown BRANCH_PROTECTION value ${BRANCH_PROTECTION})
 endif
 
+# FEAT_RME
+ifeq (${ENABLE_RME},1)
+# RME doesn't support PIE
+ifneq (${ENABLE_PIE},0)
+        $(error ENABLE_RME does not support PIE)
+endif
+# RME requires AARCH64
+ifneq (${ARCH},aarch64)
+        $(error ENABLE_RME requires AArch64)
+endif
+# RME requires el2 context to be saved for now.
+CTX_INCLUDE_EL2_REGS := 1
+CTX_INCLUDE_AARCH32_REGS := 0
+ARM_ARCH_MAJOR := 8
+ARM_ARCH_MINOR := 6
+endif
+
 # USE_SPINLOCK_CAS requires AArch64 build
 ifeq (${USE_SPINLOCK_CAS},1)
 ifneq (${ARCH},aarch64)
@@ -529,6 +546,10 @@
         ifneq ($(ARM_BL2_SP_LIST_DTS),)
             DTC_CPPFLAGS += -DARM_BL2_SP_LIST_DTS=$(ARM_BL2_SP_LIST_DTS)
         endif
+
+        ifneq ($(SP_LAYOUT_FILE),)
+            BL2_ENABLE_SP_LOAD := 1
+        endif
     else
         # All other SPDs in spd directory
         SPD_DIR := spd
@@ -555,6 +576,18 @@
 endif
 
 ################################################################################
+# Include rmmd Makefile if RME is enabled
+################################################################################
+
+ifneq (${ENABLE_RME},0)
+ifneq (${ARCH},aarch64)
+	$(error ENABLE_RME requires AArch64)
+endif
+include services/std_svc/rmmd/rmmd.mk
+$(warning "RME is an experimental feature")
+endif
+
+################################################################################
 # Include the platform specific Makefile after the SPD Makefile (the platform
 # makefile may use all previous definitions in this file)
 ################################################################################
@@ -772,10 +805,16 @@
 # Process platform overrideable behaviour
 ################################################################################
 
+ifdef BL1_SOURCES
+NEED_BL1 := yes
+endif
+
-# Using BL2 implies that a BL33 image also needs to be supplied for the FIP and
-# Certificate generation tools. This flag can be overridden by the platform.
 ifdef BL2_SOURCES
-        ifdef EL3_PAYLOAD_BASE
+	NEED_BL2 := yes
+
+	# Using BL2 implies that a BL33 image also needs to be supplied for the FIP and
+	# Certificate generation tools. This flag can be overridden by the platform.
+	ifdef EL3_PAYLOAD_BASE
                 # If booting an EL3 payload there is no need for a BL33 image
                 # in the FIP file.
                 NEED_BL33		:=	no
@@ -790,6 +829,10 @@
         endif
 endif
 
+ifdef BL2U_SOURCES
+NEED_BL2U := yes
+endif
+
 # If SCP_BL2 is given, we always want FIP to include it.
 ifdef SCP_BL2
         NEED_SCP_BL2		:=	yes
@@ -827,6 +870,10 @@
 FIP_ARGS += --align ${FIP_ALIGN}
 endif
 
+ifdef FDT_SOURCES
+NEED_FDT := yes
+endif
+
 ################################################################################
 # Include libraries' Makefile that are used in all BL
 ################################################################################
@@ -870,31 +917,23 @@
 ################################################################################
 # Include BL specific makefiles
 ################################################################################
-ifdef BL1_SOURCES
-NEED_BL1 := yes
+
+ifeq (${NEED_BL1},yes)
 include bl1/bl1.mk
 endif
 
-ifdef BL2_SOURCES
-NEED_BL2 := yes
+ifeq (${NEED_BL2},yes)
 include bl2/bl2.mk
 endif
 
-ifdef BL2U_SOURCES
-NEED_BL2U := yes
+ifeq (${NEED_BL2U},yes)
 include bl2u/bl2u.mk
 endif
 
 ifeq (${NEED_BL31},yes)
-ifdef BL31_SOURCES
 include bl31/bl31.mk
 endif
-endif
 
-ifdef FDT_SOURCES
-NEED_FDT := yes
-endif
-
 ################################################################################
 # Build options checks
 ################################################################################
@@ -902,6 +941,7 @@
 $(eval $(call assert_booleans,\
     $(sort \
         ALLOW_RO_XLAT_TABLES \
+        BL2_ENABLE_SP_LOAD \
         COLD_BOOT_SINGLE_CPU \
         CREATE_KEYS \
         CTX_INCLUDE_AARCH32_REGS \
@@ -921,6 +961,7 @@
         ENABLE_PIE \
         ENABLE_PMF \
         ENABLE_PSCI_STAT \
+        ENABLE_RME \
         ENABLE_RUNTIME_INSTRUMENTATION \
         ENABLE_SPE_FOR_LOWER_ELS \
         ENABLE_SVE_FOR_NS \
@@ -971,6 +1012,7 @@
         ENABLE_TRBE_FOR_NS \
         ENABLE_SYS_REG_TRACE_FOR_NS \
         ENABLE_TRF_FOR_NS \
+        ENABLE_FEAT_HCX \
 )))
 
 $(eval $(call assert_numerics,\
@@ -1002,6 +1044,7 @@
         ALLOW_RO_XLAT_TABLES \
         ARM_ARCH_MAJOR \
         ARM_ARCH_MINOR \
+        BL2_ENABLE_SP_LOAD \
         COLD_BOOT_SINGLE_CPU \
         CTX_INCLUDE_AARCH32_REGS \
         CTX_INCLUDE_FPREGS \
@@ -1021,6 +1064,7 @@
         ENABLE_PIE \
         ENABLE_PMF \
         ENABLE_PSCI_STAT \
+        ENABLE_RME \
         ENABLE_RUNTIME_INSTRUMENTATION \
         ENABLE_SPE_FOR_LOWER_ELS \
         ENABLE_SVE_FOR_NS \
@@ -1074,6 +1118,7 @@
         ENABLE_TRBE_FOR_NS \
         ENABLE_SYS_REG_TRACE_FOR_NS \
         ENABLE_TRF_FOR_NS \
+        ENABLE_FEAT_HCX \
 )))
 
 ifeq (${SANITIZE_UB},trap)
@@ -1103,9 +1148,6 @@
 # Generate and include sp_gen.mk if SPD is spmd and SP_LAYOUT_FILE is defined
 ifeq (${SPD},spmd)
 ifdef SP_LAYOUT_FILE
-        ifeq (${SPMD_SPM_AT_SEL2},0)
-            $(error "SPMD with SPM at S-EL1 does not require SP_LAYOUT_FILE")
-        endif
         -include $(BUILD_PLAT)/sp_gen.mk
         FIP_DEPS += sp
         CRT_DEPS += sp
@@ -1143,7 +1185,7 @@
 
 # Expand build macros for the different images
 ifeq (${NEED_BL1},yes)
-$(eval $(call MAKE_BL,1))
+$(eval $(call MAKE_BL,bl1))
 endif
 
 ifeq (${NEED_BL2},yes)
@@ -1152,7 +1194,7 @@
 endif
 
 $(if ${BL2}, $(eval $(call TOOL_ADD_IMG,bl2,--${FIP_BL2_ARGS})),\
-	$(eval $(call MAKE_BL,2,${FIP_BL2_ARGS})))
+	$(eval $(call MAKE_BL,bl2,${FIP_BL2_ARGS})))
 endif
 
 ifeq (${NEED_SCP_BL2},yes)
@@ -1165,10 +1207,10 @@
 BL31_SOURCES := $(sort ${BL31_SOURCES})
 ifneq (${DECRYPTION_SUPPORT},none)
 $(if ${BL31}, $(eval $(call TOOL_ADD_IMG,bl31,--soc-fw,,$(ENCRYPT_BL31))),\
-	$(eval $(call MAKE_BL,31,soc-fw,,$(ENCRYPT_BL31))))
+	$(eval $(call MAKE_BL,bl31,soc-fw,,$(ENCRYPT_BL31))))
 else
 $(if ${BL31}, $(eval $(call TOOL_ADD_IMG,bl31,--soc-fw)),\
-	$(eval $(call MAKE_BL,31,soc-fw)))
+	$(eval $(call MAKE_BL,bl31,soc-fw)))
 endif
 endif
 
@@ -1181,14 +1223,25 @@
 BUILD_BL32 := $(if $(BL32),,$(if $(BL32_SOURCES),1))
 
 ifneq (${DECRYPTION_SUPPORT},none)
-$(if ${BUILD_BL32}, $(eval $(call MAKE_BL,32,tos-fw,,$(ENCRYPT_BL32))),\
+$(if ${BUILD_BL32}, $(eval $(call MAKE_BL,bl32,tos-fw,,$(ENCRYPT_BL32))),\
 	$(eval $(call TOOL_ADD_IMG,bl32,--tos-fw,,$(ENCRYPT_BL32))))
 else
-$(if ${BUILD_BL32}, $(eval $(call MAKE_BL,32,tos-fw)),\
+$(if ${BUILD_BL32}, $(eval $(call MAKE_BL,bl32,tos-fw)),\
 	$(eval $(call TOOL_ADD_IMG,bl32,--tos-fw)))
 endif
 endif
 
+# If RMM image is needed but RMM is not defined, Test Realm Payload (TRP)
+# needs to be built from RMM_SOURCES.
+ifeq (${NEED_RMM},yes)
+# Sort RMM source files to remove duplicates
+RMM_SOURCES := $(sort ${RMM_SOURCES})
+BUILD_RMM := $(if $(RMM),,$(if $(RMM_SOURCES),1))
+
+$(if ${BUILD_RMM}, $(eval $(call MAKE_BL,rmm,rmm-fw)),\
+         $(eval $(call TOOL_ADD_IMG,rmm,--rmm-fw)))
+endif
+
 # Add the BL33 image if required by the platform
 ifeq (${NEED_BL33},yes)
 $(eval $(call TOOL_ADD_IMG,bl33,--nt-fw))
@@ -1196,7 +1249,7 @@
 
 ifeq (${NEED_BL2U},yes)
 $(if ${BL2U}, $(eval $(call TOOL_ADD_IMG,bl2u,--ap-fwu-cfg,FWU_)),\
-	$(eval $(call MAKE_BL,2u,ap-fwu-cfg,FWU_)))
+	$(eval $(call MAKE_BL,bl2u,ap-fwu-cfg,FWU_)))
 endif
 
 # Expand build macros for the different images
diff --git a/bl1/aarch64/bl1_context_mgmt.c b/bl1/aarch64/bl1_context_mgmt.c
index 2a8d58e..b9a7e5b 100644
--- a/bl1/aarch64/bl1_context_mgmt.c
+++ b/bl1/aarch64/bl1_context_mgmt.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -16,6 +16,7 @@
 
 /* Following contains the cpu context pointers. */
 static void *bl1_cpu_context_ptr[2];
+entry_point_info_t *bl2_ep_info;
 
 
 void *cm_get_context(uint32_t security_state)
@@ -30,6 +31,40 @@
 	bl1_cpu_context_ptr[security_state] = context;
 }
 
+#if ENABLE_RME
+/*******************************************************************************
+ * This function prepares the entry point information to run BL2 in Root world,
+ * i.e. EL3, for the case when FEAT_RME is enabled.
+ ******************************************************************************/
+void bl1_prepare_next_image(unsigned int image_id)
+{
+	image_desc_t *bl2_desc;
+
+	assert(image_id == BL2_IMAGE_ID);
+
+	/* Get the image descriptor. */
+	bl2_desc = bl1_plat_get_image_desc(BL2_IMAGE_ID);
+	assert(bl2_desc != NULL);
+
+	/* Get the entry point info. */
+	bl2_ep_info = &bl2_desc->ep_info;
+
+	bl2_ep_info->spsr = (uint32_t)SPSR_64(MODE_EL3, MODE_SP_ELX,
+						DISABLE_ALL_EXCEPTIONS);
+
+	/*
+	 * Flush cache since bl2_ep_info is accessed after MMU is disabled
+	 * before jumping to BL2.
+	 */
+	flush_dcache_range((uintptr_t)bl2_ep_info, sizeof(entry_point_info_t));
+
+	/* Indicate that image is in execution state. */
+	bl2_desc->state = IMAGE_STATE_EXECUTED;
+
+	/* Print debug info and flush the console before running BL2. */
+	print_entry_point_info(bl2_ep_info);
+}
+#else
 /*******************************************************************************
  * This function prepares the context for Secure/Normal world images.
  * Normal world images are transitioned to EL2(if supported) else EL1.
@@ -93,3 +128,4 @@
 
 	print_entry_point_info(next_bl_ep);
 }
+#endif /* ENABLE_RME */
diff --git a/bl1/aarch64/bl1_entrypoint.S b/bl1/aarch64/bl1_entrypoint.S
index 00f2718..f61c060 100644
--- a/bl1/aarch64/bl1_entrypoint.S
+++ b/bl1/aarch64/bl1_entrypoint.S
@@ -1,13 +1,15 @@
 /*
- * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
+#include <common/bl_common.h>
 #include <el3_common_macros.S>
 
 	.globl	bl1_entrypoint
+	.globl	bl1_run_bl2_in_root
 
 
 	/* -----------------------------------------------------
@@ -66,5 +68,41 @@
 	 * Do the transition to next boot image.
 	 * --------------------------------------------------
 	 */
+#if ENABLE_RME
+	b	bl1_run_bl2_in_root
+#else
 	b	el3_exit
+#endif
 endfunc bl1_entrypoint
+
+	/* -----------------------------------------------------
+	 * void bl1_run_bl2_in_root();
+	 * This function runs BL2 in root/EL3 when RME is enabled.
+	 * -----------------------------------------------------
+	 */
+
+func bl1_run_bl2_in_root
+	/* read bl2_ep_info */
+	adrp	x20, bl2_ep_info
+	add	x20, x20, :lo12:bl2_ep_info
+	ldr	x20, [x20]
+
+	/* ---------------------------------------------
+	 * MMU needs to be disabled because BL2 executes
+	 * in EL3. It will initialize the address space
+	 * according to its own requirements.
+	 * ---------------------------------------------
+	 */
+	bl	disable_mmu_icache_el3
+	tlbi	alle3
+
+	ldp	x0, x1, [x20, #ENTRY_POINT_INFO_PC_OFFSET]
+	msr	elr_el3, x0
+	msr	spsr_el3, x1
+
+	ldp	x6, x7, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x30)]
+	ldp	x4, x5, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x20)]
+	ldp	x2, x3, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x10)]
+	ldp	x0, x1, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x0)]
+	exception_return
+endfunc bl1_run_bl2_in_root
diff --git a/bl1/bl1.mk b/bl1/bl1.mk
index d11b4ab..9f63fd5 100644
--- a/bl1/bl1.mk
+++ b/bl1/bl1.mk
@@ -1,14 +1,14 @@
 #
-# Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
 
-BL1_SOURCES		+=	bl1/bl1_main.c				\
-				bl1/${ARCH}/bl1_arch_setup.c		\
+BL1_SOURCES		+=	bl1/${ARCH}/bl1_arch_setup.c		\
 				bl1/${ARCH}/bl1_context_mgmt.c		\
 				bl1/${ARCH}/bl1_entrypoint.S		\
 				bl1/${ARCH}/bl1_exceptions.S		\
+				bl1/bl1_main.c				\
 				lib/cpus/${ARCH}/cpu_helpers.S		\
 				lib/cpus/errata_report.c		\
 				lib/el3_runtime/${ARCH}/context_mgmt.c	\
diff --git a/bl1/bl1_private.h b/bl1/bl1_private.h
index 2cfeeea..e119ba7 100644
--- a/bl1/bl1_private.h
+++ b/bl1/bl1_private.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -11,6 +11,8 @@
 
 #include <common/bl_common.h>
 
+extern entry_point_info_t *bl2_ep_info;
+
 /******************************************
  * Function prototypes
  *****************************************/
@@ -18,6 +20,7 @@
 void bl1_arch_next_el_setup(void);
 
 void bl1_prepare_next_image(unsigned int image_id);
+void bl1_run_bl2_in_root(void);
 
 u_register_t bl1_fwu_smc_handler(unsigned int smc_fid,
 		u_register_t x1,
diff --git a/bl2/aarch32/bl2_el3_entrypoint.S b/bl2/aarch32/bl2_el3_entrypoint.S
index 7e85551..40154aa 100644
--- a/bl2/aarch32/bl2_el3_entrypoint.S
+++ b/bl2/aarch32/bl2_el3_entrypoint.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,7 +10,6 @@
 #include <el3_common_macros.S>
 
 	.globl	bl2_entrypoint
-	.globl	bl2_run_next_image
 
 
 func bl2_entrypoint
@@ -56,37 +55,3 @@
 	no_ret	plat_panic_handler
 
 endfunc bl2_entrypoint
-
-func bl2_run_next_image
-	mov	r8,r0
-
-	/*
-	 * MMU needs to be disabled because both BL2 and BL32 execute
-	 * in PL1, and therefore share the same address space.
-	 * BL32 will initialize the address space according to its
-	 * own requirement.
-	 */
-	bl	disable_mmu_icache_secure
-	stcopr	r0, TLBIALL
-	dsb	sy
-	isb
-	mov	r0, r8
-	bl	bl2_el3_plat_prepare_exit
-
-	/*
-	 * Extract PC and SPSR based on struct `entry_point_info_t`
-	 * and load it in LR and SPSR registers respectively.
-	 */
-	ldr	lr, [r8, #ENTRY_POINT_INFO_PC_OFFSET]
-	ldr	r1, [r8, #(ENTRY_POINT_INFO_PC_OFFSET + 4)]
-	msr	spsr_xc, r1
-
-	/* Some BL32 stages expect lr_svc to provide the BL33 entry address */
-	cps	#MODE32_svc
-	ldr	lr, [r8, #ENTRY_POINT_INFO_LR_SVC_OFFSET]
-	cps	#MODE32_mon
-
-	add	r8, r8, #ENTRY_POINT_INFO_ARGS_OFFSET
-	ldm	r8, {r0, r1, r2, r3}
-	exception_return
-endfunc bl2_run_next_image
diff --git a/bl2/aarch32/bl2_run_next_image.S b/bl2/aarch32/bl2_run_next_image.S
new file mode 100644
index 0000000..0b3554e
--- /dev/null
+++ b/bl2/aarch32/bl2_run_next_image.S
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+
+	.globl	bl2_run_next_image
+
+
+func bl2_run_next_image
+	mov	r8,r0
+
+	/*
+	 * MMU needs to be disabled because both BL2 and BL32 execute
+	 * in PL1, and therefore share the same address space.
+	 * BL32 will initialize the address space according to its
+	 * own requirement.
+	 */
+	bl	disable_mmu_icache_secure
+	stcopr	r0, TLBIALL
+	dsb	sy
+	isb
+	mov	r0, r8
+	bl	bl2_el3_plat_prepare_exit
+
+	/*
+	 * Extract PC and SPSR based on struct `entry_point_info_t`
+	 * and load it in LR and SPSR registers respectively.
+	 */
+	ldr	lr, [r8, #ENTRY_POINT_INFO_PC_OFFSET]
+	ldr	r1, [r8, #(ENTRY_POINT_INFO_PC_OFFSET + 4)]
+	msr	spsr_xc, r1
+
+	/* Some BL32 stages expect lr_svc to provide the BL33 entry address */
+	cps	#MODE32_svc
+	ldr	lr, [r8, #ENTRY_POINT_INFO_LR_SVC_OFFSET]
+	cps	#MODE32_mon
+
+	add	r8, r8, #ENTRY_POINT_INFO_ARGS_OFFSET
+	ldm	r8, {r0, r1, r2, r3}
+	exception_return
+endfunc bl2_run_next_image
diff --git a/bl2/aarch64/bl2_el3_entrypoint.S b/bl2/aarch64/bl2_el3_entrypoint.S
index 4eab39c..45bac7d 100644
--- a/bl2/aarch64/bl2_el3_entrypoint.S
+++ b/bl2/aarch64/bl2_el3_entrypoint.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -12,8 +12,6 @@
 #include <el3_common_macros.S>
 
 	.globl	bl2_entrypoint
-	.globl	bl2_el3_run_image
-	.globl	bl2_run_next_image
 
 #if BL2_IN_XIP_MEM
 #define FIXUP_SIZE	0
@@ -72,36 +70,3 @@
 	 */
 	no_ret	plat_panic_handler
 endfunc bl2_entrypoint
-
-func bl2_run_next_image
-	mov	x20,x0
-	/* ---------------------------------------------
-	 * MMU needs to be disabled because both BL2 and BL31 execute
-	 * in EL3, and therefore share the same address space.
-	 * BL31 will initialize the address space according to its
-	 * own requirement.
-	 * ---------------------------------------------
-	 */
-	bl	disable_mmu_icache_el3
-	tlbi	alle3
-	bl	bl2_el3_plat_prepare_exit
-
-#if ENABLE_PAUTH
-	/* ---------------------------------------------
-	 * Disable pointer authentication before jumping
-	 * to next boot image.
-	 * ---------------------------------------------
-	 */
-	bl	pauth_disable_el3
-#endif /* ENABLE_PAUTH */
-
-	ldp	x0, x1, [x20, #ENTRY_POINT_INFO_PC_OFFSET]
-	msr	elr_el3, x0
-	msr	spsr_el3, x1
-
-	ldp	x6, x7, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x30)]
-	ldp	x4, x5, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x20)]
-	ldp	x2, x3, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x10)]
-	ldp	x0, x1, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x0)]
-	exception_return
-endfunc bl2_run_next_image
diff --git a/bl2/aarch64/bl2_rme_entrypoint.S b/bl2/aarch64/bl2_rme_entrypoint.S
new file mode 100644
index 0000000..076e326
--- /dev/null
+++ b/bl2/aarch64/bl2_rme_entrypoint.S
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <el3_common_macros.S>
+
+	.globl	bl2_entrypoint
+
+
+func bl2_entrypoint
+	/* Save arguments x0-x3 from previous Boot loader */
+	mov	x20, x0
+	mov	x21, x1
+	mov	x22, x2
+	mov	x23, x3
+
+	el3_entrypoint_common                                   \
+		_init_sctlr=0                                   \
+		_warm_boot_mailbox=0                            \
+		_secondary_cold_boot=0                          \
+		_init_memory=0                                  \
+		_init_c_runtime=1                               \
+		_exception_vectors=bl2_el3_exceptions           \
+		_pie_fixup_size=0
+
+	/* ---------------------------------------------
+	 * Restore parameters of boot rom
+	 * ---------------------------------------------
+	 */
+	mov	x0, x20
+	mov	x1, x21
+	mov	x2, x22
+	mov	x3, x23
+
+	/* ---------------------------------------------
+	 * Perform BL2 setup
+	 * ---------------------------------------------
+	 */
+	bl	bl2_setup
+
+#if ENABLE_PAUTH
+	/* ---------------------------------------------
+	 * Program APIAKey_EL1 and enable pointer authentication.
+	 * ---------------------------------------------
+	 */
+	bl	pauth_init_enable_el3
+#endif /* ENABLE_PAUTH */
+
+	/* ---------------------------------------------
+	 * Jump to main function.
+	 * ---------------------------------------------
+	 */
+	bl	bl2_main
+
+	/* ---------------------------------------------
+	 * Should never reach this point.
+	 * ---------------------------------------------
+	 */
+	no_ret	plat_panic_handler
+endfunc bl2_entrypoint
diff --git a/bl2/aarch64/bl2_run_next_image.S b/bl2/aarch64/bl2_run_next_image.S
new file mode 100644
index 0000000..f0a8be8
--- /dev/null
+++ b/bl2/aarch64/bl2_run_next_image.S
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+
+	.globl	bl2_run_next_image
+
+
+func bl2_run_next_image
+	mov	x20,x0
+	/* ---------------------------------------------
+	 * MMU needs to be disabled because both BL2 and BL31 execute
+	 * in EL3, and therefore share the same address space.
+	 * BL31 will initialize the address space according to its
+	 * own requirement.
+	 * ---------------------------------------------
+	 */
+	bl	disable_mmu_icache_el3
+	tlbi	alle3
+	bl	bl2_el3_plat_prepare_exit
+
+#if ENABLE_PAUTH
+	/* ---------------------------------------------
+	 * Disable pointer authentication before jumping
+	 * to next boot image.
+	 * ---------------------------------------------
+	 */
+	bl	pauth_disable_el3
+#endif /* ENABLE_PAUTH */
+
+	ldp	x0, x1, [x20, #ENTRY_POINT_INFO_PC_OFFSET]
+	msr	elr_el3, x0
+	msr	spsr_el3, x1
+
+	ldp	x6, x7, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x30)]
+	ldp	x4, x5, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x20)]
+	ldp	x2, x3, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x10)]
+	ldp	x0, x1, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x0)]
+	exception_return
+endfunc bl2_run_next_image
diff --git a/bl2/bl2.ld.S b/bl2/bl2.ld.S
index 37849c3..d332ec0 100644
--- a/bl2/bl2.ld.S
+++ b/bl2/bl2.ld.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -25,7 +25,11 @@
 #if SEPARATE_CODE_AND_RODATA
     .text . : {
         __TEXT_START__ = .;
+#if ENABLE_RME
+        *bl2_rme_entrypoint.o(.text*)
+#else /* ENABLE_RME */
         *bl2_entrypoint.o(.text*)
+#endif /* ENABLE_RME */
         *(SORT_BY_ALIGNMENT(.text*))
         *(.vectors)
         . = ALIGN(PAGE_SIZE);
diff --git a/bl2/bl2.mk b/bl2/bl2.mk
index 735e7e0..7a973e5 100644
--- a/bl2/bl2.mk
+++ b/bl2/bl2.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -15,13 +15,26 @@
 BL2_SOURCES		+=	common/aarch64/early_exceptions.S
 endif
 
-ifeq (${BL2_AT_EL3},0)
+ifeq (${ENABLE_RME},1)
+# Using RME, run BL2 at EL3
+include lib/gpt_rme/gpt_rme.mk
+
+BL2_SOURCES		+=      bl2/${ARCH}/bl2_rme_entrypoint.S	\
+				bl2/${ARCH}/bl2_el3_exceptions.S	\
+				bl2/${ARCH}/bl2_run_next_image.S	\
+				${GPT_LIB_SRCS}
+BL2_LINKERFILE		:=	bl2/bl2.ld.S
+
+else ifeq (${BL2_AT_EL3},0)
+# Normal operation, no RME, no BL2 at EL3
 BL2_SOURCES		+=	bl2/${ARCH}/bl2_entrypoint.S
 BL2_LINKERFILE		:=	bl2/bl2.ld.S
 
 else
+# BL2 at EL3, no RME
 BL2_SOURCES		+=	bl2/${ARCH}/bl2_el3_entrypoint.S	\
 				bl2/${ARCH}/bl2_el3_exceptions.S	\
+				bl2/${ARCH}/bl2_run_next_image.S        \
 				lib/cpus/${ARCH}/cpu_helpers.S		\
 				lib/cpus/errata_report.c
 
diff --git a/bl2/bl2_main.c b/bl2/bl2_main.c
index d2de135..197c057 100644
--- a/bl2/bl2_main.c
+++ b/bl2/bl2_main.c
@@ -29,18 +29,18 @@
 #define NEXT_IMAGE	"BL32"
 #endif
 
-#if !BL2_AT_EL3
+#if BL2_AT_EL3
 /*******************************************************************************
- * Setup function for BL2.
+ * Setup function for BL2 when BL2_AT_EL3=1
  ******************************************************************************/
-void bl2_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2,
-	       u_register_t arg3)
+void bl2_el3_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2,
+		   u_register_t arg3)
 {
 	/* Perform early platform-specific setup */
-	bl2_early_platform_setup2(arg0, arg1, arg2, arg3);
+	bl2_el3_early_platform_setup(arg0, arg1, arg2, arg3);
 
 	/* Perform late platform-specific setup */
-	bl2_plat_arch_setup();
+	bl2_el3_plat_arch_setup();
 
 #if CTX_INCLUDE_PAUTH_REGS
 	/*
@@ -50,19 +50,18 @@
 	assert(is_armv8_3_pauth_present());
 #endif /* CTX_INCLUDE_PAUTH_REGS */
 }
-
-#else /* if BL2_AT_EL3 */
+#else /* BL2_AT_EL3 */
 /*******************************************************************************
- * Setup function for BL2 when BL2_AT_EL3=1.
+ * Setup function for BL2 when BL2_AT_EL3=0
  ******************************************************************************/
-void bl2_el3_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2,
-		   u_register_t arg3)
+void bl2_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2,
+	       u_register_t arg3)
 {
 	/* Perform early platform-specific setup */
-	bl2_el3_early_platform_setup(arg0, arg1, arg2, arg3);
+	bl2_early_platform_setup2(arg0, arg1, arg2, arg3);
 
 	/* Perform late platform-specific setup */
-	bl2_el3_plat_arch_setup();
+	bl2_plat_arch_setup();
 
 #if CTX_INCLUDE_PAUTH_REGS
 	/*
@@ -115,7 +114,7 @@
 	measured_boot_finish();
 #endif /* MEASURED_BOOT */
 
-#if !BL2_AT_EL3
+#if !BL2_AT_EL3 && !ENABLE_RME
 #ifndef __aarch64__
 	/*
 	 * For AArch32 state BL1 and BL2 share the MMU setup.
@@ -140,7 +139,7 @@
 	 * be passed to next BL image as an argument.
 	 */
 	smc(BL1_SMC_RUN_IMAGE, (unsigned long)next_bl_ep_info, 0, 0, 0, 0, 0, 0);
-#else /* if BL2_AT_EL3 */
+#else /* if BL2_AT_EL3 || ENABLE_RME */
 	NOTICE("BL2: Booting " NEXT_IMAGE "\n");
 	print_entry_point_info(next_bl_ep_info);
 	console_flush();
@@ -153,5 +152,5 @@
 #endif /* ENABLE_PAUTH */
 
 	bl2_run_next_image(next_bl_ep_info);
-#endif /* BL2_AT_EL3 */
+#endif /* BL2_AT_EL3 && ENABLE_RME */
 }
diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S
index 2d672dd..ed05864 100644
--- a/bl31/aarch64/bl31_entrypoint.S
+++ b/bl31/aarch64/bl31_entrypoint.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -195,6 +195,19 @@
 #endif
 	bl	bl31_plat_enable_mmu
 
+#if ENABLE_RME
+	/*
+	 * At warm boot GPT data structures have already been initialized in RAM
+	 * but the sysregs for this CPU need to be initialized. Note that the GPT
+	 * accesses are controlled attributes in GPCCR and do not depend on the
+	 * SCR_EL3.C bit.
+	 */
+	bl	gpt_enable
+	cbz	x0, 1f
+	no_ret plat_panic_handler
+1:
+#endif
+
 #if ENABLE_PAUTH
 	/* --------------------------------------------------------------------
 	 * Program APIAKey_EL1 and enable pointer authentication
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index 51eb2bd..0d0a12d 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -500,6 +500,21 @@
 	stp	x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
 	str	x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
 
+	/* Clear flag register */
+	mov	x7, xzr
+
+#if ENABLE_RME
+	/* Copy SCR_EL3.NSE bit to the flag to indicate caller's security */
+	ubfx	x7, x18, #SCR_NSE_SHIFT, 1
+
+	/*
+	 * Shift copied SCR_EL3.NSE bit by 5 to create space for
+	 * SCR_EL3.NS bit. Bit 5 of the flag correspondes to
+	 * the SCR_EL3.NSE bit.
+	 */
+	lsl	x7, x7, #5
+#endif /* ENABLE_RME */
+
 	/* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
 	bfi	x7, x18, #0, #1
 
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 7819141..106d410 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -111,6 +111,13 @@
 BL31_SOURCES		+=	services/std_svc/pci_svc.c
 endif
 
+ifeq (${ENABLE_RME},1)
+include lib/gpt_rme/gpt_rme.mk
+
+BL31_SOURCES		+=	${GPT_LIB_SRCS}					\
+				${RMMD_SOURCES}
+endif
+
 BL31_LINKERFILE		:=	bl31/bl31.ld.S
 
 # Flag used to indicate if Crash reporting via console should be included
diff --git a/bl31/bl31_context_mgmt.c b/bl31/bl31_context_mgmt.c
index 9175ee3..34f69ad 100644
--- a/bl31/bl31_context_mgmt.c
+++ b/bl31/bl31_context_mgmt.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -19,9 +19,9 @@
  ******************************************************************************/
 void *cm_get_context(uint32_t security_state)
 {
-	assert(security_state <= NON_SECURE);
+	assert(sec_state_is_valid(security_state));
 
-	return get_cpu_data(cpu_context[security_state]);
+	return get_cpu_data(cpu_context[get_cpu_context_index(security_state)]);
 }
 
 /*******************************************************************************
@@ -30,9 +30,10 @@
  ******************************************************************************/
 void cm_set_context(void *context, uint32_t security_state)
 {
-	assert(security_state <= NON_SECURE);
+	assert(sec_state_is_valid(security_state));
 
-	set_cpu_data(cpu_context[security_state], context);
+	set_cpu_data(cpu_context[get_cpu_context_index(security_state)],
+			context);
 }
 
 /*******************************************************************************
@@ -46,7 +47,8 @@
 {
 	assert(sec_state_is_valid(security_state));
 
-	return get_cpu_data_by_index(cpu_idx, cpu_context[security_state]);
+	return get_cpu_data_by_index(cpu_idx,
+			cpu_context[get_cpu_context_index(security_state)]);
 }
 
 /*******************************************************************************
@@ -58,5 +60,7 @@
 {
 	assert(sec_state_is_valid(security_state));
 
-	set_cpu_data_by_index(cpu_idx, cpu_context[security_state], context);
+	set_cpu_data_by_index(cpu_idx,
+			cpu_context[get_cpu_context_index(security_state)],
+			context);
 }
diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c
index 44bf32c..9ac10e2 100644
--- a/bl31/bl31_main.c
+++ b/bl31/bl31_main.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -35,6 +35,13 @@
  ******************************************************************************/
 static int32_t (*bl32_init)(void);
 
+/*****************************************************************************
+ * Function used to initialise RMM if RME is enabled
+ *****************************************************************************/
+#if ENABLE_RME
+static int32_t (*rmm_init)(void);
+#endif
+
 /*******************************************************************************
  * Variable to indicate whether next image to execute after BL31 is BL33
  * (non-secure & default) or BL32 (secure).
@@ -85,6 +92,15 @@
 	/* Perform late platform-specific setup */
 	bl31_plat_arch_setup();
 
+#if ENABLE_FEAT_HCX
+	/*
+	 * Assert that FEAT_HCX is supported on this system, without this check
+	 * an exception would occur during context save/restore if enabled but
+	 * not supported.
+	 */
+	assert(is_feat_hcx_present());
+#endif /* ENABLE_FEAT_HCX */
+
 #if CTX_INCLUDE_PAUTH_REGS
 	/*
 	 * Assert that the ARMv8.3-PAuth registers are present or an access
@@ -130,12 +146,15 @@
 
 	/*
 	 * All the cold boot actions on the primary cpu are done. We now need to
-	 * decide which is the next image (BL32 or BL33) and how to execute it.
+	 * decide which is the next image and how to execute it.
 	 * If the SPD runtime service is present, it would want to pass control
 	 * to BL32 first in S-EL1. In that case, SPD would have registered a
 	 * function to initialize bl32 where it takes responsibility of entering
-	 * S-EL1 and returning control back to bl31_main. Once this is done we
-	 * can prepare entry into BL33 as normal.
+	 * S-EL1 and returning control back to bl31_main. Similarly, if RME is
+	 * enabled and a function is registered to initialize RMM, control is
+	 * transferred to RMM in R-EL2. After RMM initialization, control is
+	 * returned back to bl31_main. Once this is done we can prepare entry
+	 * into BL33 as normal.
 	 */
 
 	/*
@@ -146,9 +165,27 @@
 
 		int32_t rc = (*bl32_init)();
 
-		if (rc == 0)
+		if (rc == 0) {
 			WARN("BL31: BL32 initialization failed\n");
+		}
 	}
+
+	/*
+	 * If RME is enabled and init hook is registered, initialize RMM
+	 * in R-EL2.
+	 */
+#if ENABLE_RME
+	if (rmm_init != NULL) {
+		INFO("BL31: Initializing RMM\n");
+
+		int32_t rc = (*rmm_init)();
+
+		if (rc == 0) {
+			WARN("BL31: RMM initialization failed\n");
+		}
+	}
+#endif
+
 	/*
 	 * We are ready to enter the next EL. Prepare entry into the image
 	 * corresponding to the desired security state after the next ERET.
@@ -227,3 +264,14 @@
 {
 	bl32_init = func;
 }
+
+#if ENABLE_RME
+/*******************************************************************************
+ * This function initializes the pointer to RMM init function. This is expected
+ * to be called by the RMMD after it finishes all its initialization
+ ******************************************************************************/
+void bl31_register_rmm_init(int32_t (*func)(void))
+{
+	rmm_init = func;
+}
+#endif
diff --git a/docs/about/maintainers.rst b/docs/about/maintainers.rst
index 1bd4666..7db81fc 100644
--- a/docs/about/maintainers.rst
+++ b/docs/about/maintainers.rst
@@ -109,6 +109,16 @@
 :|G|: `john-powell-arm`_
 :|F|: bl31/ehf.c
 
+Realm Management Extension (RME)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+:|M|: Bipin Ravi <bipin.ravi@arm.com>
+:|G|: `bipinravi-arm`_
+:|M|: Mark Dykes <mark.dykes@arm.com>
+:|G|: `mardyk01`_
+:|M|: John Powell <John.Powell@arm.com>
+:|G|: `john-powell-arm`_
+:|M|: Zelalem Aweke <Zelalem.Aweke@arm.com>
+:|G|: `zelalem-aweke`_
 
 Drivers, Libraries and Framework Code
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/docs/about/release-information.rst b/docs/about/release-information.rst
index 3e8dd91..b65571d 100644
--- a/docs/about/release-information.rst
+++ b/docs/about/release-information.rst
@@ -46,7 +46,7 @@
 +-----------------+---------------------------+------------------------------+
 | v2.5            | 3rd week of May '21       | 5th week of Apr '21          |
 +-----------------+---------------------------+------------------------------+
-| v2.6            | 4th week of Oct '21       | 1st week of Oct '21          |
+| v2.6            | 4th week of Nov '21       | 2nd week of Nov '21          |
 +-----------------+---------------------------+------------------------------+
 
 Removal of Deprecated Interfaces
diff --git a/docs/components/index.rst b/docs/components/index.rst
index 2409f96..f349d8d 100644
--- a/docs/components/index.rst
+++ b/docs/components/index.rst
@@ -22,3 +22,4 @@
    ffa-manifest-binding
    xlat-tables-lib-v2-design
    cot-binding
+   realm-management-extension
diff --git a/docs/components/realm-management-extension.rst b/docs/components/realm-management-extension.rst
new file mode 100644
index 0000000..5c580f3
--- /dev/null
+++ b/docs/components/realm-management-extension.rst
@@ -0,0 +1,194 @@
+
+Realm Management Extension (RME)
+====================================
+
+FEAT_RME (or RME for short) is an Armv9-A extension and is one component of the
+`Arm Confidential Compute Architecture (Arm CCA)`_. TF-A supports RME starting
+from version 2.6. This document provides instructions on how to build and run
+TF-A with RME.
+
+Building and running TF-A with RME
+------------------------------------
+
+This section describes how you can build and run TF-A with RME enabled.
+We assume you have all the :ref:`Prerequisites` to build TF-A.
+
+To enable RME, you need to set the ENABLE_RME build flag when building
+TF-A. Currently, this feature is only supported for the FVP platform.
+
+The following instructions show you how to build and run TF-A with RME
+for two scenarios: TF-A with TF-A Tests, and four-world execution with
+Hafnium and TF-A Tests. The instructions assume you have already obtained
+TF-A. You can use the following command to clone TF-A.
+
+.. code:: shell
+
+ git clone https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git
+
+To run the tests, you need an FVP model. You can download a model that supports
+RME from the `Arm Architecture Models website`_. Please select the
+*Base RevC AEM FVP* model. After extracting the downloaded file, you should be able to
+find the *FVP_Base_RevC-2xAEMvA* binary. The instructions below have been tested
+with model version 11.15 revision 18.
+
+.. note::
+
+ ENABLE_RME build option is currently experimental.
+
+Building TF-A with TF-A Tests
+********************************************
+Use the following instructions to build TF-A with `TF-A Tests`_ as the
+non-secure payload (BL33).
+
+**1. Obtain and build TF-A Tests**
+
+.. code:: shell
+
+ git clone https://git.trustedfirmware.org/TF-A/tf-a-tests.git
+ cd tf-a-tests
+ make CROSS_COMPILE=aarch64-none-elf- PLAT=fvp DEBUG=1
+
+This produces a TF-A Tests binary (*tftf.bin*) in the *build/fvp/debug* directory.
+
+**2. Build TF-A**
+
+.. code:: shell
+
+ cd trusted-firmware-a
+ make CROSS_COMPILE=aarch64-none-elf- \
+ PLAT=fvp \
+ ENABLE_RME=1 \
+ FVP_HW_CONFIG_DTS=fdts/fvp-base-gicv3-psci-1t.dts \
+ DEBUG=1 \
+ BL33=<path/to/tftf.bin> \
+ all fip
+
+This produces *bl1.bin* and *fip.bin* binaries in the *build/fvp/debug* directory.
+The above command also builds a Test Realm Payload (TRP), which is a small test
+payload that implements Realm Monitor Management (RMM) functionalities and runs
+in the realm world (R-EL2). The TRP binary is packaged in *fip.bin*.
+
+Four-world execution with Hafnium and TF-A Tests
+****************************************************
+Four-world execution involves software components at each security state: root,
+secure, realm and non-secure. This section describes how to build TF-A
+with four-world support. We use TF-A as the root firmware, `Hafnium`_ as the
+secure component, TRP as the realm-world firmware and TF-A Tests as the
+non-secure payload.
+
+Before building TF-A, you first need to build the other software components.
+You can find instructions on how to get and build TF-A Tests above.
+
+**1. Obtain and build Hafnium**
+
+.. code:: shell
+
+ git clone --recurse-submodules https://git.trustedfirmware.org/hafnium/hafnium.git
+ cd hafnium
+ make PROJECT=reference
+
+The Hafnium binary should be located at
+*out/reference/secure_aem_v8a_fvp_clang/hafnium.bin*
+
+**2. Build TF-A**
+
+Build TF-A with RME as well as SPM enabled.
+
+.. code:: shell
+
+ make CROSS_COMPILE=aarch64-none-elf- \
+ PLAT=fvp \
+ ENABLE_RME=1 \
+ FVP_HW_CONFIG_DTS=fdts/fvp-base-gicv3-psci-1t.dts \
+ SPD=spmd \
+ SPMD_SPM_AT_SEL2=1 \
+ BRANCH_PROTECTION=1 \
+ CTX_INCLUDE_PAUTH_REGS=1 \
+ DEBUG=1 \
+ SP_LAYOUT_FILE=<path/to/tf-a-tests>/build/fvp/debug/sp_layout.json> \
+ BL32=<path/to/hafnium.bin> \
+ BL33=<path/to/tftf.bin> \
+ all fip
+
+Running the tests
+*********************
+Use the following command to run the tests on FVP. TF-A Tests should boot
+and run the default tests including RME tests.
+
+.. code:: shell
+
+ FVP_Base_RevC-2xAEMvA \
+ -C bp.flashloader0.fname=<path/to/fip.bin> \
+ -C bp.secureflashloader.fname=<path/to/bl1.bin> \
+ -C bp.refcounter.non_arch_start_at_default=1 \
+ -C bp.refcounter.use_real_time=0 \
+ -C bp.ve_sysregs.exit_on_shutdown=1 \
+ -C cache_state_modelled=1 \
+ -C cluster0.NUM_CORES=4 \
+ -C cluster0.PA_SIZE=48 \
+ -C cluster0.ecv_support_level=2 \
+ -C cluster0.gicv3.cpuintf-mmap-access-level=2 \
+ -C cluster0.gicv3.without-DS-support=1 \
+ -C cluster0.gicv4.mask-virtual-interrupt=1 \
+ -C cluster0.has_arm_v8-6=1 \
+ -C cluster0.has_branch_target_exception=1 \
+ -C cluster0.has_rme=1 \
+ -C cluster0.has_rndr=1 \
+ -C cluster0.has_amu=1 \
+ -C cluster0.has_v8_7_pmu_extension=2 \
+ -C cluster0.max_32bit_el=-1 \
+ -C cluster0.restriction_on_speculative_execution=2 \
+ -C cluster0.restriction_on_speculative_execution_aarch32=2 \
+ -C cluster1.NUM_CORES=4 \
+ -C cluster1.PA_SIZE=48 \
+ -C cluster1.ecv_support_level=2 \
+ -C cluster1.gicv3.cpuintf-mmap-access-level=2 \
+ -C cluster1.gicv3.without-DS-support=1 \
+ -C cluster1.gicv4.mask-virtual-interrupt=1 \
+ -C cluster1.has_arm_v8-6=1 \
+ -C cluster1.has_branch_target_exception=1 \
+ -C cluster1.has_rme=1 \
+ -C cluster1.has_rndr=1 \
+ -C cluster1.has_amu=1 \
+ -C cluster1.has_v8_7_pmu_extension=2 \
+ -C cluster1.max_32bit_el=-1 \
+ -C cluster1.restriction_on_speculative_execution=2 \
+ -C cluster1.restriction_on_speculative_execution_aarch32=2 \
+ -C pci.pci_smmuv3.mmu.SMMU_AIDR=2 \
+ -C pci.pci_smmuv3.mmu.SMMU_IDR0=0x0046123B \
+ -C pci.pci_smmuv3.mmu.SMMU_IDR1=0x00600002 \
+ -C pci.pci_smmuv3.mmu.SMMU_IDR3=0x1714 \
+ -C pci.pci_smmuv3.mmu.SMMU_IDR5=0xFFFF0475 \
+ -C pci.pci_smmuv3.mmu.SMMU_S_IDR1=0xA0000002 \
+ -C pci.pci_smmuv3.mmu.SMMU_S_IDR2=0 \
+ -C pci.pci_smmuv3.mmu.SMMU_S_IDR3=0 \
+ -C bp.pl011_uart0.out_file=uart0.log \
+ -C bp.pl011_uart1.out_file=uart1.log \
+ -C bp.pl011_uart2.out_file=uart2.log \
+ -C pctl.startup=0.0.0.0 \
+ -Q 1000 \
+ "$@"
+
+The bottom of the output from *uart0* should look something like the following.
+
+.. code-block:: shell
+
+ ...
+
+ > Test suite 'FF-A Interrupt'
+                                                                Passed
+ > Test suite 'SMMUv3 tests'
+                                                                Passed
+ > Test suite 'PMU Leakage'
+                                                                Passed
+ > Test suite 'DebugFS'
+                                                                Passed
+ > Test suite 'Realm payload tests'
+                                                                Passed
+ ...
+
+
+.. _Arm Confidential Compute Architecture (Arm CCA): https://www.arm.com/why-arm/architecture/security-features/arm-confidential-compute-architecture
+.. _Arm Architecture Models website: https://developer.arm.com/tools-and-software/simulation-models/fixed-virtual-platforms/arm-ecosystem-models
+.. _TF-A Tests: https://trustedfirmware-a-tests.readthedocs.io/en/latest
+.. _Hafnium: https://www.trustedfirmware.org/projects/hafnium
diff --git a/docs/components/xlat-tables-lib-v2-design.rst b/docs/components/xlat-tables-lib-v2-design.rst
index af5151f..cac32f5 100644
--- a/docs/components/xlat-tables-lib-v2-design.rst
+++ b/docs/components/xlat-tables-lib-v2-design.rst
@@ -10,7 +10,7 @@
 More specifically, some use cases that this library aims to support are:
 
 #. Statically allocate translation tables and populate them (at run-time) based
-   on a description of the memory layout. The memory layout is typically
+   upon a description of the memory layout. The memory layout is typically
    provided by the platform port as a list of memory regions;
 
 #. Support for generating translation tables pertaining to a different
@@ -26,22 +26,28 @@
 #. Support for changing memory attributes of memory regions at run-time.
 
 
-About version 1 and version 2
------------------------------
+About version 1, version 2 and MPU libraries
+--------------------------------------------
 
 This document focuses on version 2 of the library, whose sources are available
 in the ``lib/xlat_tables_v2`` directory. Version 1 of the library can still be
 found in ``lib/xlat_tables`` directory but it is less flexible and doesn't
-support dynamic mapping. Although potential bug fixes will be applied to both
-versions, future features enhancements will focus on version 2 and might not be
-back-ported to version 1. Therefore, it is recommended to use version 2,
-especially for new platform ports.
+support dynamic mapping. ``lib/xlat_mpu``, which configures Arm's MPU
+equivalently, is also addressed here. The ``lib/xlat_mpu`` is experimental,
+meaning that its API may change. It currently strives for consistency and
+code-reuse with xlat_tables_v2.  Future versions may be more MPU-specific (e.g.,
+removing all mentions of virtual addresses). Although potential bug fixes will
+be applied to all versions of the xlat_* libs, future feature enhancements will
+focus on version 2 and might not be back-ported to version 1 and MPU versions.
+Therefore, it is recommended to use version 2, especially for new platform
+ports (unless the platform uses an MPU).
 
-However, please note that version 2 is still in active development and is not
-considered stable yet. Hence, compatibility breaks might be introduced.
+However, please note that version 2 and the MPU version are still in active
+development and is not considered stable yet. Hence, compatibility breaks might
+be introduced.
 
 From this point onwards, this document will implicitly refer to version 2 of the
-library.
+library, unless stated otherwise.
 
 
 Design concepts and interfaces
@@ -102,6 +108,16 @@
 library will choose the mapping granularity for this region as it sees fit (more
 details can be found in `The memory mapping algorithm`_ section below).
 
+The MPU library also uses ``struct mmap_region`` to specify translations, but
+the MPU's translations are limited to specification of valid addresses and
+access permissions.  If the requested virtual and physical addresses mismatch
+the system will panic. Being register-based for deterministic memory-reference
+timing, the MPU hardware does not involve memory-resident translation tables.
+
+Currently, the MPU library is also limited to MPU translation at EL2 with no
+MMU translation at other ELs.  These limitations, however, are expected to be
+overcome in future library versions.
+
 Translation Context
 ~~~~~~~~~~~~~~~~~~~
 
@@ -215,7 +231,8 @@
 The ``MAP_REGION()`` and ``MAP_REGION_FLAT()`` macros do not allow specifying a
 mapping granularity, which leaves the library implementation free to choose
 it. However, in cases where a specific granularity is required, the
-``MAP_REGION2()`` macro might be used instead.
+``MAP_REGION2()`` macro might be used instead. Using ``MAP_REGION_FLAT()`` only
+to define regions for the MPU library is strongly recommended.
 
 As explained earlier in this document, when the dynamic mapping feature is
 disabled, there is no notion of dynamic regions. Conceptually, there are only
@@ -374,6 +391,9 @@
 refer to the comments in the source code of the core module for more details
 about the sorting algorithm in use.
 
+This mapping algorithm does not apply to the MPU library, since the MPU hardware
+directly maps regions by "base" and "limit" (bottom and top) addresses.
+
 TLB maintenance operations
 ~~~~~~~~~~~~~~~~~~~~~~~~~~
 
@@ -390,6 +410,11 @@
 is deferred to the ``enable_mmu*()`` family of functions, just before the MMU is
 turned on.
 
+Regarding enabling and disabling memory management, for the MPU library, to
+reduce confusion, calls to enable or disable the MPU use ``mpu`` in their names
+in place of ``mmu``. For example, the ``enable_mmu_el2()`` call is changed to
+``enable_mpu_el2()``.
+
 TLB invalidation is not required when adding dynamic regions either. Dynamic
 regions are not allowed to overlap existing memory region. Therefore, if the
 dynamic mapping request is deemed legitimate, it automatically concerns memory
@@ -412,6 +437,6 @@
 
 --------------
 
-*Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved.*
+*Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.*
 
 .. |Alignment Example| image:: ../resources/diagrams/xlat_align.png
diff --git a/docs/design/cpu-specific-build-macros.rst b/docs/design/cpu-specific-build-macros.rst
index bde6d97..eebeaa2 100644
--- a/docs/design/cpu-specific-build-macros.rst
+++ b/docs/design/cpu-specific-build-macros.rst
@@ -284,6 +284,10 @@
 -  ``ERRATA_A78_1952683``: This applies errata 1952683 workaround to Cortex-A78
    CPU. This needs to be enabled for revision r0p0, it is fixed in r1p0.
 
+-  ``ERRATA_A78_2132060``: This applies errata 2132060 workaround to Cortex-A78
+   CPU. This needs to be enabled for revisions r0p0, r1p0, r1p1, and r1p2. It
+   is still open.
+
 For Cortex-A78 AE, the following errata build flags are defined :
 
 - ``ERRATA_A78_AE_1941500`` : This applies errata 1941500 workaround to Cortex-A78
@@ -371,6 +375,10 @@
    CPU. This needs to be enabled for revisions r0p0, r1p0, and r1p1 of the
    CPU.  It is still open.
 
+-  ``ERRATA_V1_2108267``: This applies errata 2108267 workaround to Neoverse-V1
+   CPU. This needs to be enabled for revisions r0p0, r1p0, and r1p1 of the CPU.
+   It is still open.
+
 For Cortex-A710, the following errata build flags are defined :
 
 -  ``ERRATA_A710_1987031``: This applies errata 1987031 workaround to
@@ -389,6 +397,14 @@
    Cortex-A710 CPU. This needs to be enabled for revisions r0p0, r1p0 and r2p0
    of the CPU and is still open.
 
+-  ``ERRATA_A710_2083908``: This applies errata 2083908 workaround to
+   Cortex-A710 CPU. This needs to be enabled for revision r2p0 of the CPU and
+   is still open.
+
+-  ``ERRATA_A710_2058056``: This applies errata 2058056 workaround to
+   Cortex-A710 CPU. This needs to be enabled for revisions r0p0, r1p0 and r2p0
+   of the CPU and is still open.
+
 For Neoverse N2, the following errata build flags are defined :
 
 -  ``ERRATA_N2_2067956``: This applies errata 2067956 workaround to Neoverse-N2
@@ -403,6 +419,9 @@
 -  ``ERRATA_N2_2138956``: This applies errata 2138956 workaround to Neoverse-N2
    CPU. This needs to be enabled for revision r0p0 of the CPU and is still open.
 
+-  ``ERRATA_N2_2138953``: This applies errata 2138953 workaround to Neoverse-N2
+   CPU. This needs to be enabled for revision r0p0 of the CPU and is still open.
+
 DSU Errata Workarounds
 ----------------------
 
diff --git a/docs/getting_started/build-options.rst b/docs/getting_started/build-options.rst
index 115b2b2..1259881 100644
--- a/docs/getting_started/build-options.rst
+++ b/docs/getting_started/build-options.rst
@@ -55,6 +55,9 @@
 -  ``BL2_AT_EL3``: This is an optional build option that enables the use of
    BL2 at EL3 execution level.
 
+-  ``BL2_ENABLE_SP_LOAD``: Boolean option to enable loading SP packages from the
+   FIP. Automatically enabled if ``SP_LAYOUT_FILE`` is provided.
+
 -  ``BL2_IN_XIP_MEM``: In some use-cases BL2 will be stored in eXecute In Place
    (XIP) memory, like BL1. In these use-cases, it is necessary to initialize
    the RW sections in RAM, while leaving the RO sections in place. This option
@@ -235,6 +238,10 @@
    builds, but this behaviour can be overridden in each platform's Makefile or
    in the build command line.
 
+-  ``ENABLE_FEAT_HCX``: This option sets the bit SCR_EL3.HXEn in EL3 to allow
+   access to HCRX_EL2 (extended hypervisor control register) from EL2 as well as
+   adding HCRX_EL2 to the EL2 context save/restore operations.
+
 -  ``ENABLE_LTO``: Boolean option to enable Link Time Optimization (LTO)
    support in GCC for TF-A. This option is currently only supported for
    AArch64. Default is 0.
@@ -264,6 +271,10 @@
    be enabled. If ``ENABLE_PMF`` is set, the residency statistics are tracked in
    software.
 
+- ``ENABLE_RME``: Boolean option to enable support for the ARMv9 Realm
+   Management Extension. Default value is 0. This is currently an experimental
+   feature.
+
 -  ``ENABLE_RUNTIME_INSTRUMENTATION``: Boolean option to enable runtime
    instrumentation which injects timestamp collection points into TF-A to
    allow runtime performance to be measured. Currently, only PSCI is
diff --git a/docs/plat/arm/fvp_r/index.rst b/docs/plat/arm/fvp_r/index.rst
new file mode 100644
index 0000000..8af16ba
--- /dev/null
+++ b/docs/plat/arm/fvp_r/index.rst
@@ -0,0 +1,46 @@
+ARM V8-R64 Fixed Virtual Platform (FVP)
+=======================================
+
+Some of the features of Armv8-R AArch64 FVP platform referenced in Trusted
+Boot R-class include:
+
+- Secure World Support Only
+- EL2 as Maximum EL support (No EL3)
+- MPU Support only at EL2
+- MPU or MMU Support at EL0/EL1
+- AArch64 Support Only
+- Trusted Board Boot
+
+Further information on v8-R64 FVP is available at `info <https://developer.arm.com/documentation/ddi0600/latest/>`_
+
+Boot Sequence
+-------------
+
+BL1 –> BL33
+
+The execution begins from BL1 which loads the BL33 image, a boot-wrapped (bootloader + Operating System)
+Operating System, from FIP to DRAM.
+
+Build Procedure
+~~~~~~~~~~~~~~~
+
+-  Obtain arm `toolchain <https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-a/downloads>`_.
+   Set the CROSS_COMPILE environment variable to point to the toolchain folder.
+
+-  Build TF-A:
+
+   .. code:: shell
+
+      make PLAT=fvp_r BL33=<path_to_os.bin> all fip
+
+   Enable TBBR by adding the following options to the make command:
+
+   .. code:: shell
+
+      MBEDTLS_DIR=<path_to_mbedtls_directory>  \
+      TRUSTED_BOARD_BOOT=1 \
+      GENERATE_COT=1 \
+      ARM_ROTPK_LOCATION=devel_rsa  \
+      ROT_KEY=plat/arm/board/common/rotpk/arm_rotprivk_rsa.pem
+
+*Copyright (c) 2021, Arm Limited. All rights reserved.*
diff --git a/docs/plat/arm/index.rst b/docs/plat/arm/index.rst
index c834f6a..f262dc0 100644
--- a/docs/plat/arm/index.rst
+++ b/docs/plat/arm/index.rst
@@ -7,6 +7,7 @@
 
    juno/index
    fvp/index
+   fvp_r/index
    fvp-ve/index
    tc/index
    arm_fpga/index
@@ -20,4 +21,4 @@
 
 --------------
 
-*Copyright (c) 2021, Arm Limited. All rights reserved.*
+*Copyright (c) 2019-2021, Arm Limited. All rights reserved.*
diff --git a/drivers/arm/ethosn/ethosn_smc.c b/drivers/arm/ethosn/ethosn_smc.c
index 299d07c..60364cd 100644
--- a/drivers/arm/ethosn/ethosn_smc.c
+++ b/drivers/arm/ethosn/ethosn_smc.c
@@ -14,11 +14,10 @@
 #include <lib/mmio.h>
 #include <plat/arm/common/fconf_ethosn_getter.h>
 
-/* Arm Ethos-N NPU (NPU) status */
-#define ETHOSN_STATUS \
-	FCONF_GET_PROPERTY(hw_config, ethosn_config, status)
-
-/* Number of NPU cores available */
+/*
+ * Number of Arm Ethos-N NPU (NPU) cores available for a
+ * particular parent device
+ */
 #define ETHOSN_NUM_CORES \
 	FCONF_GET_PROPERTY(hw_config, ethosn_config, num_cores)
 
@@ -51,6 +50,17 @@
 #define SEC_SYSCTRL0_SOFT_RESET		U(3U << 29)
 #define SEC_SYSCTRL0_HARD_RESET		U(1U << 31)
 
+static bool ethosn_is_core_addr_valid(uintptr_t core_addr)
+{
+	for (uint32_t core_idx = 0U; core_idx < ETHOSN_NUM_CORES; core_idx++) {
+		if (ETHOSN_CORE_ADDR(core_idx) == core_addr) {
+			return true;
+		}
+	}
+
+	return false;
+}
+
 static void ethosn_delegate_to_ns(uintptr_t core_addr)
 {
 	mmio_setbits_32(ETHOSN_CORE_SEC_REG(core_addr, SEC_SECCTLR_REG),
@@ -66,9 +76,9 @@
 			SEC_DEL_ADDR_EXT_VAL);
 }
 
-static int ethosn_is_sec(void)
+static int ethosn_is_sec(uintptr_t core_addr)
 {
-	if ((mmio_read_32(ETHOSN_CORE_SEC_REG(ETHOSN_CORE_ADDR(0), SEC_DEL_REG))
+	if ((mmio_read_32(ETHOSN_CORE_SEC_REG(core_addr, SEC_DEL_REG))
 		& SEC_DEL_EXCC_MASK) != 0U) {
 		return 0;
 	}
@@ -101,7 +111,7 @@
 }
 
 uintptr_t ethosn_smc_handler(uint32_t smc_fid,
-			     u_register_t core_idx,
+			     u_register_t core_addr,
 			     u_register_t x2,
 			     u_register_t x3,
 			     u_register_t x4,
@@ -109,8 +119,8 @@
 			     void *handle,
 			     u_register_t flags)
 {
-	uintptr_t core_addr;
 	int hard_reset = 0;
+	const uint32_t fid = smc_fid & FUNCID_NUM_MASK;
 
 	/* Only SiP fast calls are expected */
 	if ((GET_SMC_TYPE(smc_fid) != SMC_TYPE_FAST) ||
@@ -120,7 +130,7 @@
 
 	/* Truncate parameters to 32-bits for SMC32 */
 	if (GET_SMC_CC(smc_fid) == SMC_32) {
-		core_idx &= 0xFFFFFFFF;
+		core_addr &= 0xFFFFFFFF;
 		x2 &= 0xFFFFFFFF;
 		x3 &= 0xFFFFFFFF;
 		x4 &= 0xFFFFFFFF;
@@ -130,33 +140,29 @@
 		SMC_RET1(handle, SMC_UNK);
 	}
 
-	if (ETHOSN_STATUS == ETHOSN_STATUS_DISABLED) {
-		WARN("ETHOSN: Arm Ethos-N NPU not available\n");
-		SMC_RET1(handle, ETHOSN_NOT_SUPPORTED);
-	}
-
-	switch (smc_fid & FUNCID_NUM_MASK) {
+	/* Commands that do not require a valid core address */
+	switch (fid) {
 	case ETHOSN_FNUM_VERSION:
 		SMC_RET2(handle, ETHOSN_VERSION_MAJOR, ETHOSN_VERSION_MINOR);
+	}
+
+	if (!ethosn_is_core_addr_valid(core_addr)) {
+		WARN("ETHOSN: Unknown core address given to SMC call.\n");
+		SMC_RET1(handle, ETHOSN_UNKNOWN_CORE_ADDRESS);
+	}
+
+	/* Commands that require a valid addr */
+	switch (fid) {
 	case ETHOSN_FNUM_IS_SEC:
-		SMC_RET1(handle, ethosn_is_sec());
+		SMC_RET1(handle, ethosn_is_sec(core_addr));
 	case ETHOSN_FNUM_HARD_RESET:
 		hard_reset = 1;
 		/* Fallthrough */
 	case ETHOSN_FNUM_SOFT_RESET:
-		if (core_idx >= ETHOSN_NUM_CORES) {
-			WARN("ETHOSN: core index out of range\n");
-			SMC_RET1(handle, ETHOSN_CORE_IDX_OUT_OF_RANGE);
-		}
-
-		core_addr = ETHOSN_CORE_ADDR(core_idx);
-
 		if (!ethosn_reset(core_addr, hard_reset)) {
 			SMC_RET1(handle, ETHOSN_FAILURE);
 		}
-
 		ethosn_delegate_to_ns(core_addr);
-
 		SMC_RET1(handle, ETHOSN_SUCCESS);
 	default:
 		SMC_RET1(handle, SMC_UNK);
diff --git a/drivers/auth/tbbr/tbbr_cot_bl1_r64.c b/drivers/auth/tbbr/tbbr_cot_bl1_r64.c
new file mode 100644
index 0000000..e8e017c
--- /dev/null
+++ b/drivers/auth/tbbr/tbbr_cot_bl1_r64.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+
+#include <drivers/auth/auth_mod.h>
+#include <drivers/auth/mbedtls/mbedtls_config.h>
+#include <drivers/auth/tbbr_cot_common.h>
+
+#if USE_TBBR_DEFS
+#include <tools_share/tbbr_oid.h>
+#else
+#include <platform_oid.h>
+#endif
+#include <platform_def.h>
+
+
+static unsigned char trusted_world_pk_buf[PK_DER_LEN];
+static unsigned char non_trusted_world_pk_buf[PK_DER_LEN];
+static unsigned char content_pk_buf[PK_DER_LEN];
+static unsigned char nt_fw_config_hash_buf[HASH_DER_LEN];
+
+static auth_param_type_desc_t non_trusted_nv_ctr = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_NV_CTR, NON_TRUSTED_FW_NVCOUNTER_OID);
+static auth_param_type_desc_t trusted_world_pk = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_PUB_KEY, TRUSTED_WORLD_PK_OID);
+static auth_param_type_desc_t non_trusted_world_pk = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_PUB_KEY, NON_TRUSTED_WORLD_PK_OID);
+static auth_param_type_desc_t nt_fw_content_pk = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_PUB_KEY, NON_TRUSTED_FW_CONTENT_CERT_PK_OID);
+static auth_param_type_desc_t nt_world_bl_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, NON_TRUSTED_WORLD_BOOTLOADER_HASH_OID);
+static auth_param_type_desc_t nt_fw_config_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, NON_TRUSTED_FW_CONFIG_HASH_OID);
+/*
+ * Trusted key certificate
+ */
+static const auth_img_desc_t trusted_key_cert = {
+	.img_id = TRUSTED_KEY_CERT_ID,
+	.img_type = IMG_CERT,
+	.parent = NULL,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &subject_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &raw_data
+			}
+		},
+		[1] = {
+			.type = AUTH_METHOD_NV_CTR,
+			.param.nv_ctr = {
+				.cert_nv_ctr = &trusted_nv_ctr,
+				.plat_nv_ctr = &trusted_nv_ctr
+			}
+		}
+	},
+	.authenticated_data = (const auth_param_desc_t[COT_MAX_VERIFIED_PARAMS]) {
+		[0] = {
+			.type_desc = &trusted_world_pk,
+			.data = {
+				.ptr = (void *)trusted_world_pk_buf,
+				.len = (unsigned int)PK_DER_LEN
+			}
+		},
+		[1] = {
+			.type_desc = &non_trusted_world_pk,
+			.data = {
+				.ptr = (void *)non_trusted_world_pk_buf,
+				.len = (unsigned int)PK_DER_LEN
+			}
+		}
+	}
+};
+/*
+ * Non-Trusted Firmware
+ */
+static const auth_img_desc_t non_trusted_fw_key_cert = {
+	.img_id = NON_TRUSTED_FW_KEY_CERT_ID,
+	.img_type = IMG_CERT,
+	.parent = &trusted_key_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &non_trusted_world_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &raw_data
+			}
+		},
+		[1] = {
+			.type = AUTH_METHOD_NV_CTR,
+			.param.nv_ctr = {
+				.cert_nv_ctr = &non_trusted_nv_ctr,
+				.plat_nv_ctr = &non_trusted_nv_ctr
+			}
+		}
+	},
+	.authenticated_data = (const auth_param_desc_t[COT_MAX_VERIFIED_PARAMS]) {
+		[0] = {
+			.type_desc = &nt_fw_content_pk,
+			.data = {
+				.ptr = (void *)content_pk_buf,
+				.len = (unsigned int)PK_DER_LEN
+			}
+		}
+	}
+};
+static const auth_img_desc_t non_trusted_fw_content_cert = {
+	.img_id = NON_TRUSTED_FW_CONTENT_CERT_ID,
+	.img_type = IMG_CERT,
+	.parent = &non_trusted_fw_key_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_SIG,
+			.param.sig = {
+				.pk = &nt_fw_content_pk,
+				.sig = &sig,
+				.alg = &sig_alg,
+				.data = &raw_data
+			}
+		},
+		[1] = {
+			.type = AUTH_METHOD_NV_CTR,
+			.param.nv_ctr = {
+				.cert_nv_ctr = &non_trusted_nv_ctr,
+				.plat_nv_ctr = &non_trusted_nv_ctr
+			}
+		}
+	},
+	.authenticated_data = (const auth_param_desc_t[COT_MAX_VERIFIED_PARAMS]) {
+		[0] = {
+			.type_desc = &nt_world_bl_hash,
+			.data = {
+				.ptr = (void *)nt_world_bl_hash_buf,
+				.len = (unsigned int)HASH_DER_LEN
+			}
+		},
+		[1] = {
+			.type_desc = &nt_fw_config_hash,
+			.data = {
+				.ptr = (void *)nt_fw_config_hash_buf,
+				.len = (unsigned int)HASH_DER_LEN
+			}
+		}
+	}
+};
+static const auth_img_desc_t bl33_image = {
+	.img_id = BL33_IMAGE_ID,
+	.img_type = IMG_RAW,
+	.parent = &non_trusted_fw_content_cert,
+	.img_auth_methods = (const auth_method_desc_t[AUTH_METHOD_NUM]) {
+		[0] = {
+			.type = AUTH_METHOD_HASH,
+			.param.hash = {
+				.data = &raw_data,
+				.hash = &nt_world_bl_hash
+			}
+		}
+	}
+};
+
+static const auth_img_desc_t * const cot_desc[] = {
+	[TRUSTED_KEY_CERT_ID]			=	&trusted_key_cert,
+	[NON_TRUSTED_FW_KEY_CERT_ID]		=	&non_trusted_fw_key_cert,
+	[NON_TRUSTED_FW_CONTENT_CERT_ID]	=	&non_trusted_fw_content_cert,
+	[BL33_IMAGE_ID]				=	&bl33_image,
+};
+
+/* Register the CoT in the authentication module */
+REGISTER_COT(cot_desc);
diff --git a/drivers/marvell/comphy/phy-comphy-3700.c b/drivers/marvell/comphy/phy-comphy-3700.c
index 027d07d..0ad14a8 100644
--- a/drivers/marvell/comphy/phy-comphy-3700.c
+++ b/drivers/marvell/comphy/phy-comphy-3700.c
@@ -118,7 +118,7 @@
 };
 
 /* PHY selector configures with corresponding modes */
-static void mvebu_a3700_comphy_set_phy_selector(uint8_t comphy_index,
+static int mvebu_a3700_comphy_set_phy_selector(uint8_t comphy_index,
 						uint32_t comphy_mode)
 {
 	uint32_t reg;
@@ -168,9 +168,10 @@
 	}
 
 	mmio_write_32(MVEBU_COMPHY_REG_BASE + COMPHY_SELECTOR_PHY_REG, reg);
-	return;
+	return 0;
 error:
 	ERROR("COMPHY[%d] mode[%d] is invalid\n", comphy_index, mode);
+	return -EINVAL;
 }
 
 /*
@@ -214,7 +215,7 @@
 
 /* It is only used for SATA and USB3 on comphy lane2. */
 static void comphy_set_indirect(uintptr_t addr, uint32_t offset, uint16_t data,
-				uint16_t mask, int mode)
+				uint16_t mask, bool is_sata)
 {
 	/*
 	 * When Lane 2 PHY is for USB3, access the PHY registers
@@ -224,27 +225,38 @@
 	 * within the SATA Host Controller registers, Lane 2 base register
 	 * offset is 0x200
 	 */
-	if (mode == COMPHY_UNUSED)
-		return;
-
-	if (mode == COMPHY_SATA_MODE)
+	if (is_sata) {
 		mmio_write_32(addr + COMPHY_LANE2_INDIR_ADDR_OFFSET, offset);
-	else
+	} else {
 		mmio_write_32(addr + COMPHY_LANE2_INDIR_ADDR_OFFSET,
 			      offset + USB3PHY_LANE2_REG_BASE_OFFSET);
+	}
 
 	reg_set(addr + COMPHY_LANE2_INDIR_DATA_OFFSET, data, mask);
 }
 
+/* It is only used for SATA on comphy lane2. */
+static void comphy_sata_set_indirect(uintptr_t addr, uint32_t reg_offset,
+				     uint16_t data, uint16_t mask)
+{
+	comphy_set_indirect(addr, reg_offset, data, mask, true);
+}
+
+/* It is only used for USB3 indirect access on comphy lane2. */
+static void comphy_usb3_set_indirect(uintptr_t addr, uint32_t reg_offset,
+				     uint16_t data, uint16_t mask)
+{
+	comphy_set_indirect(addr, reg_offset, data, mask, false);
+}
+
-/* It is only used USB3 direct access not on comphy lane2. */
+/* It is only used for USB3 direct access not on comphy lane2. */
 static void comphy_usb3_set_direct(uintptr_t addr, uint32_t reg_offset,
-				   uint16_t data, uint16_t mask, int mode)
+				   uint16_t data, uint16_t mask)
 {
 	reg_set16((reg_offset * PHY_SHFT(USB3) + addr), data, mask);
 }
 
-static void comphy_sgmii_phy_init(uint32_t comphy_index, uint32_t mode,
-				  uintptr_t sd_ip_addr)
+static void comphy_sgmii_phy_init(uintptr_t sd_ip_addr, bool is_1gbps)
 {
 	const int fix_arr_sz = ARRAY_SIZE(sgmii_phy_init_fix);
 	int addr, fix_idx;
@@ -259,8 +271,7 @@
 		 * comparison to 3.125 Gbps values. These register values are
 		 * stored in "sgmii_phy_init_fix" array.
 		 */
-		if ((mode != COMPHY_SGMII_MODE) &&
-		    (sgmii_phy_init_fix[fix_idx].addr == addr)) {
+		if (!is_1gbps && sgmii_phy_init_fix[fix_idx].addr == addr) {
 			/* Use new value */
 			val = sgmii_phy_init_fix[fix_idx].value;
 			if (fix_idx < fix_arr_sz)
@@ -276,21 +287,22 @@
 static int mvebu_a3700_comphy_sata_power_on(uint8_t comphy_index,
 					    uint32_t comphy_mode)
 {
-	int ret = 0;
+	int ret;
 	uint32_t offset, data = 0, ref_clk;
 	uintptr_t comphy_indir_regs = COMPHY_INDIRECT_REG;
-	int mode = COMPHY_GET_MODE(comphy_mode);
 	int invert = COMPHY_GET_POLARITY_INVERT(comphy_mode);
 
 	debug_enter();
 
 	/* Configure phy selector for SATA */
-	mvebu_a3700_comphy_set_phy_selector(comphy_index, comphy_mode);
+	ret = mvebu_a3700_comphy_set_phy_selector(comphy_index, comphy_mode);
+	if (ret) {
+		return ret;
+	}
 
 	/* Clear phy isolation mode to make it work in normal mode */
 	offset =  COMPHY_ISOLATION_CTRL_REG + SATAPHY_LANE2_REG_BASE_OFFSET;
-	comphy_set_indirect(comphy_indir_regs, offset, 0, PHY_ISOLATE_MODE,
-			    mode);
+	comphy_sata_set_indirect(comphy_indir_regs, offset, 0, PHY_ISOLATE_MODE);
 
 	/* 0. Check the Polarity invert bits */
 	if (invert & COMPHY_POLARITY_TXD_INVERT)
@@ -299,13 +311,13 @@
 		data |= RXD_INVERT_BIT;
 
 	offset = COMPHY_SYNC_PATTERN_REG + SATAPHY_LANE2_REG_BASE_OFFSET;
-	comphy_set_indirect(comphy_indir_regs, offset, data, TXD_INVERT_BIT |
-			    RXD_INVERT_BIT, mode);
+	comphy_sata_set_indirect(comphy_indir_regs, offset, data, TXD_INVERT_BIT |
+				 RXD_INVERT_BIT);
 
 	/* 1. Select 40-bit data width width */
 	offset = COMPHY_LOOPBACK_REG0 + SATAPHY_LANE2_REG_BASE_OFFSET;
-	comphy_set_indirect(comphy_indir_regs, offset, DATA_WIDTH_40BIT,
-			    SEL_DATA_WIDTH_MASK, mode);
+	comphy_sata_set_indirect(comphy_indir_regs, offset, DATA_WIDTH_40BIT,
+				 SEL_DATA_WIDTH_MASK);
 
 	/* 2. Select reference clock(25M) and PHY mode (SATA) */
 	offset = COMPHY_POWER_PLL_CTRL + SATAPHY_LANE2_REG_BASE_OFFSET;
@@ -314,17 +326,17 @@
 	else
 		ref_clk = REF_CLOCK_SPEED_25M;
 
-	comphy_set_indirect(comphy_indir_regs, offset, ref_clk | PHY_MODE_SATA,
-			    REF_FREF_SEL_MASK | PHY_MODE_MASK, mode);
+	comphy_sata_set_indirect(comphy_indir_regs, offset, ref_clk | PHY_MODE_SATA,
+				 REF_FREF_SEL_MASK | PHY_MODE_MASK);
 
 	/* 3. Use maximum PLL rate (no power save) */
 	offset = COMPHY_KVCO_CAL_CTRL + SATAPHY_LANE2_REG_BASE_OFFSET;
-	comphy_set_indirect(comphy_indir_regs, offset, USE_MAX_PLL_RATE_BIT,
-			    USE_MAX_PLL_RATE_BIT, mode);
+	comphy_sata_set_indirect(comphy_indir_regs, offset, USE_MAX_PLL_RATE_BIT,
+				 USE_MAX_PLL_RATE_BIT);
 
 	/* 4. Reset reserved bit */
-	comphy_set_indirect(comphy_indir_regs, COMPHY_RESERVED_REG, 0,
-			    PHYCTRL_FRM_PIN_BIT, mode);
+	comphy_sata_set_indirect(comphy_indir_regs, COMPHY_RESERVED_REG, 0,
+				 PHYCTRL_FRM_PIN_BIT);
 
 	/* 5. Set vendor-specific configuration (It is done in sata driver) */
 	/* XXX: in U-Boot below sequence was executed in this place, in Linux
@@ -346,17 +358,21 @@
 				   COMPHY_LANE2_INDIR_DATA_OFFSET,
 				   PLL_READY_TX_BIT, PLL_READY_TX_BIT,
 				   COMPHY_PLL_TIMEOUT, REG_32BIT);
+	if (ret) {
+		return -ETIMEDOUT;
+	}
 
 	debug_exit();
 
-	return ret;
+	return 0;
 }
 
 static int mvebu_a3700_comphy_sgmii_power_on(uint8_t comphy_index,
 					     uint32_t comphy_mode)
 {
-	int ret = 0;
-	uint32_t mask, data, offset;
+	int ret;
+	uint32_t mask, data;
+	uintptr_t offset;
 	uintptr_t sd_ip_addr;
 	int mode = COMPHY_GET_MODE(comphy_mode);
 	int invert = COMPHY_GET_POLARITY_INVERT(comphy_mode);
@@ -364,7 +380,10 @@
 	debug_enter();
 
 	/* Set selector */
-	mvebu_a3700_comphy_set_phy_selector(comphy_index, comphy_mode);
+	ret = mvebu_a3700_comphy_set_phy_selector(comphy_index, comphy_mode);
+	if (ret) {
+		return ret;
+	}
 
 	/* Serdes IP Base address
 	 * COMPHY Lane0 -- USB3/GBE1
@@ -481,7 +500,7 @@
 	debug("Running C-DPI phy init %s mode\n",
 	      mode == COMPHY_2500BASEX_MODE ? "2G5" : "1G");
 	if (get_ref_clk() == 40)
-		comphy_sgmii_phy_init(comphy_index, mode, sd_ip_addr);
+		comphy_sgmii_phy_init(sd_ip_addr, mode != COMPHY_2500BASEX_MODE);
 
 	/*
 	 * 14. [Simulation Only] should not be used for real chip.
@@ -525,8 +544,10 @@
 				   PHY_PLL_READY_TX_BIT | PHY_PLL_READY_RX_BIT,
 				   PHY_PLL_READY_TX_BIT | PHY_PLL_READY_RX_BIT,
 				   COMPHY_PLL_TIMEOUT, REG_32BIT);
-	if (ret)
+	if (ret) {
 		ERROR("Failed to lock PLL for SGMII PHY %d\n", comphy_index);
+		return -ETIMEDOUT;
+	}
 
 	/*
 	 * 19. Set COMPHY input port PIN_TX_IDLE=0
@@ -549,26 +570,29 @@
 				   PHY_PLL_READY_TX_BIT | PHY_PLL_READY_RX_BIT,
 				   PHY_PLL_READY_TX_BIT | PHY_PLL_READY_RX_BIT,
 				   COMPHY_PLL_TIMEOUT, REG_32BIT);
-	if (ret)
+	if (ret) {
 		ERROR("Failed to lock PLL for SGMII PHY %d\n", comphy_index);
-
+		return -ETIMEDOUT;
+	}
 
 	ret = polling_with_timeout(MVEBU_COMPHY_REG_BASE +
 				   COMPHY_PHY_STATUS_OFFSET(comphy_index),
 				   PHY_RX_INIT_DONE_BIT, PHY_RX_INIT_DONE_BIT,
 				   COMPHY_PLL_TIMEOUT, REG_32BIT);
-	if (ret)
+	if (ret) {
 		ERROR("Failed to init RX of SGMII PHY %d\n", comphy_index);
+		return -ETIMEDOUT;
+	}
 
 	debug_exit();
 
-	return ret;
+	return 0;
 }
 
 static int mvebu_a3700_comphy_sgmii_power_off(uint8_t comphy_index)
 {
-	int ret = 0;
-	uint32_t mask, data, offset;
+	uintptr_t offset;
+	uint32_t mask, data;
 
 	debug_enter();
 
@@ -579,28 +603,31 @@
 
 	debug_exit();
 
-	return ret;
+	return 0;
 }
 
 static int mvebu_a3700_comphy_usb3_power_on(uint8_t comphy_index,
 					    uint32_t comphy_mode)
 {
-	int ret = 0;
+	int ret;
 	uintptr_t reg_base = 0;
-	uint32_t mask, data, addr, cfg, ref_clk;
+	uintptr_t addr;
+	uint32_t mask, data, cfg, ref_clk;
 	void (*usb3_reg_set)(uintptr_t addr, uint32_t reg_offset, uint16_t data,
-			     uint16_t mask, int mode);
-	int mode = COMPHY_GET_MODE(comphy_mode);
+			     uint16_t mask);
 	int invert = COMPHY_GET_POLARITY_INVERT(comphy_mode);
 
 	debug_enter();
 
 	/* Set phy seclector */
-	mvebu_a3700_comphy_set_phy_selector(comphy_index, comphy_mode);
+	ret = mvebu_a3700_comphy_set_phy_selector(comphy_index, comphy_mode);
+	if (ret) {
+		return ret;
+	}
 
 	/* Set usb3 reg access func, Lane2 is indirect access */
 	if (comphy_index == COMPHY_LANE2) {
-		usb3_reg_set = &comphy_set_indirect;
+		usb3_reg_set = &comphy_usb3_set_indirect;
 		reg_base = COMPHY_INDIRECT_REG;
 	} else {
 		/* Get the direct access register resource and map */
@@ -619,7 +646,7 @@
 	mask = PRD_TXDEEMPH0_MASK | PRD_TXMARGIN_MASK | PRD_TXSWING_MASK |
 		CFG_TX_ALIGN_POS_MASK;
 	usb3_reg_set(reg_base, COMPHY_REG_LANE_CFG0_ADDR, PRD_TXDEEMPH0_MASK,
-		     mask, mode);
+		     mask);
 
 	/*
 	 * 2. Set BIT0: enable transmitter in high impedance mode
@@ -631,20 +658,20 @@
 	mask = PRD_TXDEEMPH1_MASK | TX_DET_RX_MODE | GEN2_TX_DATA_DLY_MASK |
 		TX_ELEC_IDLE_MODE_EN;
 	data = TX_DET_RX_MODE | GEN2_TX_DATA_DLY_DEFT | TX_ELEC_IDLE_MODE_EN;
-	usb3_reg_set(reg_base, COMPHY_REG_LANE_CFG1_ADDR, data, mask, mode);
+	usb3_reg_set(reg_base, COMPHY_REG_LANE_CFG1_ADDR, data, mask);
 
 	/*
 	 * 3. Set Spread Spectrum Clock Enabled
 	 */
 	usb3_reg_set(reg_base, COMPHY_REG_LANE_CFG4_ADDR,
-		     SPREAD_SPECTRUM_CLK_EN, SPREAD_SPECTRUM_CLK_EN, mode);
+		     SPREAD_SPECTRUM_CLK_EN, SPREAD_SPECTRUM_CLK_EN);
 
 	/*
 	 * 4. Set Override Margining Controls From the MAC:
 	 *    Use margining signals from lane configuration
 	 */
 	usb3_reg_set(reg_base, COMPHY_REG_TEST_MODE_CTRL_ADDR,
-		     MODE_MARGIN_OVERRIDE, REG_16_BIT_MASK, mode);
+		     MODE_MARGIN_OVERRIDE, REG_16_BIT_MASK);
 
 	/*
 	 * 5. Set Lane-to-Lane Bundle Clock Sampling Period = per PCLK cycles
@@ -652,13 +679,13 @@
 	 */
 	usb3_reg_set(reg_base, COMPHY_REG_GLOB_CLK_SRC_LO_ADDR, 0x0,
 		     (MODE_CLK_SRC | BUNDLE_PERIOD_SEL | BUNDLE_PERIOD_SCALE |
-		      BUNDLE_SAMPLE_CTRL | PLL_READY_DLY), mode);
+		      BUNDLE_SAMPLE_CTRL | PLL_READY_DLY));
 
 	/*
 	 * 6. Set G2 Spread Spectrum Clock Amplitude at 4K
 	 */
 	usb3_reg_set(reg_base, COMPHY_REG_GEN2_SET_2,
-		     G2_TX_SSC_AMP_VALUE_20, G2_TX_SSC_AMP_MASK, mode);
+		     G2_TX_SSC_AMP_VALUE_20, G2_TX_SSC_AMP_MASK);
 
 	/*
 	 * 7. Unset G3 Spread Spectrum Clock Amplitude
@@ -667,7 +694,7 @@
 	mask = G3_TX_SSC_AMP_MASK | G3_VREG_RXTX_MAS_ISET_MASK |
 		RSVD_PH03FH_6_0_MASK;
 	usb3_reg_set(reg_base, COMPHY_REG_GEN2_SET_3,
-		     G3_VREG_RXTX_MAS_ISET_60U, mask, mode);
+		     G3_VREG_RXTX_MAS_ISET_60U, mask);
 
 	/*
 	 * 8. Check crystal jumper setting and program the Power and PLL Control
@@ -688,39 +715,37 @@
 		REF_FREF_SEL_MASK;
 	data = PU_IVREF_BIT | PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT |
 		PU_TX_INTP_BIT | PU_DFE_BIT | PHY_MODE_USB3 | ref_clk;
-	usb3_reg_set(reg_base, COMPHY_POWER_PLL_CTRL, data,  mask, mode);
+	usb3_reg_set(reg_base, COMPHY_POWER_PLL_CTRL, data,  mask);
 
 	mask = CFG_PM_OSCCLK_WAIT_MASK | CFG_PM_RXDEN_WAIT_MASK |
 		CFG_PM_RXDLOZ_WAIT_MASK;
 	data = CFG_PM_RXDEN_WAIT_1_UNIT  | cfg;
-	usb3_reg_set(reg_base, COMPHY_REG_PWR_MGM_TIM1_ADDR, data, mask, mode);
+	usb3_reg_set(reg_base, COMPHY_REG_PWR_MGM_TIM1_ADDR, data, mask);
 
 	/*
 	 * 9. Enable idle sync
 	 */
 	data = UNIT_CTRL_DEFAULT_VALUE | IDLE_SYNC_EN;
-	usb3_reg_set(reg_base, COMPHY_REG_UNIT_CTRL_ADDR, data, REG_16_BIT_MASK,
-		     mode);
+	usb3_reg_set(reg_base, COMPHY_REG_UNIT_CTRL_ADDR, data, REG_16_BIT_MASK);
 
 	/*
 	 * 10. Enable the output of 500M clock
 	 */
 	data = MISC_REG0_DEFAULT_VALUE | CLK500M_EN;
-	usb3_reg_set(reg_base, COMPHY_MISC_REG0_ADDR, data, REG_16_BIT_MASK,
-		     mode);
+	usb3_reg_set(reg_base, COMPHY_MISC_REG0_ADDR, data, REG_16_BIT_MASK);
 
 	/*
 	 * 11. Set 20-bit data width
 	 */
 	usb3_reg_set(reg_base, COMPHY_LOOPBACK_REG0, DATA_WIDTH_20BIT,
-		     REG_16_BIT_MASK, mode);
+		     REG_16_BIT_MASK);
 
 	/*
 	 * 12. Override Speed_PLL value and use MAC PLL
 	 */
 	usb3_reg_set(reg_base, COMPHY_KVCO_CAL_CTRL,
 		     (SPEED_PLL_VALUE_16 | USE_MAX_PLL_RATE_BIT),
-		     REG_16_BIT_MASK, mode);
+		     REG_16_BIT_MASK);
 
 	/*
 	 * 13. Check the Polarity invert bit
@@ -733,27 +758,26 @@
 		data |= RXD_INVERT_BIT;
 	}
 	mask = TXD_INVERT_BIT | RXD_INVERT_BIT;
-	usb3_reg_set(reg_base, COMPHY_SYNC_PATTERN_REG, data, mask, mode);
+	usb3_reg_set(reg_base, COMPHY_SYNC_PATTERN_REG, data, mask);
 
 	/*
 	 * 14. Set max speed generation to USB3.0 5Gbps
 	 */
 	usb3_reg_set(reg_base, COMPHY_SYNC_MASK_GEN_REG, PHY_GEN_USB3_5G,
-		     PHY_GEN_MAX_MASK, mode);
+		     PHY_GEN_MAX_MASK);
 
 	/*
 	 * 15. Set capacitor value for FFE gain peaking to 0xF
 	 */
 	usb3_reg_set(reg_base, COMPHY_REG_GEN3_SETTINGS_3,
-		     COMPHY_GEN_FFE_CAP_SEL_VALUE, COMPHY_GEN_FFE_CAP_SEL_MASK,
-		     mode);
+		     COMPHY_GEN_FFE_CAP_SEL_VALUE, COMPHY_GEN_FFE_CAP_SEL_MASK);
 
 	/*
 	 * 16. Release SW reset
 	 */
 	data = MODE_CORE_CLK_FREQ_SEL | MODE_PIPE_WIDTH_32 | MODE_REFDIV_BY_4;
 	usb3_reg_set(reg_base, COMPHY_REG_GLOB_PHY_CTRL0_ADDR, data,
-		     REG_16_BIT_MASK, mode);
+		     REG_16_BIT_MASK);
 
 	/* Wait for > 55 us to allow PCLK be enabled */
 	udelay(PLL_SET_DELAY_US);
@@ -771,12 +795,14 @@
 					   TXDCLK_PCLK_EN, TXDCLK_PCLK_EN,
 					   COMPHY_PLL_TIMEOUT, REG_16BIT);
 	}
-	if (ret)
+	if (ret) {
 		ERROR("Failed to lock USB3 PLL\n");
+		return -ETIMEDOUT;
+	}
 
 	debug_exit();
 
-	return ret;
+	return 0;
 }
 
 static int mvebu_a3700_comphy_pcie_power_on(uint8_t comphy_index,
@@ -862,12 +888,14 @@
 	ret = polling_with_timeout(LANE_STATUS1_ADDR(PCIE) + COMPHY_SD_ADDR,
 				   TXDCLK_PCLK_EN, TXDCLK_PCLK_EN,
 				   COMPHY_PLL_TIMEOUT, REG_16BIT);
-	if (ret)
+	if (ret) {
 		ERROR("Failed to lock PCIE PLL\n");
+		return -ETIMEDOUT;
+	}
 
 	debug_exit();
 
-	return ret;
+	return 0;
 }
 
 int mvebu_3700_comphy_power_on(uint8_t comphy_index, uint32_t comphy_mode)
@@ -919,23 +947,22 @@
 	return 0;
 }
 
-static int mvebu_a3700_comphy_sata_power_off(uint32_t comphy_mode)
+static int mvebu_a3700_comphy_sata_power_off(void)
 {
 	uintptr_t comphy_indir_regs = COMPHY_INDIRECT_REG;
-	int mode = COMPHY_GET_MODE(comphy_mode);
 	uint32_t offset;
 
 	debug_enter();
 
 	/* Set phy isolation mode */
 	offset = COMPHY_ISOLATION_CTRL_REG + SATAPHY_LANE2_REG_BASE_OFFSET;
-	comphy_set_indirect(comphy_indir_regs, offset, PHY_ISOLATE_MODE,
-			    PHY_ISOLATE_MODE, mode);
+	comphy_sata_set_indirect(comphy_indir_regs, offset, PHY_ISOLATE_MODE,
+				 PHY_ISOLATE_MODE);
 
 	/* Power off PLL, Tx, Rx */
 	offset = COMPHY_POWER_PLL_CTRL + SATAPHY_LANE2_REG_BASE_OFFSET;
-	comphy_set_indirect(comphy_indir_regs, offset, 0,
-			    PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT, mode);
+	comphy_sata_set_indirect(comphy_indir_regs, offset, 0,
+				 PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT);
 
 	debug_exit();
 
@@ -968,7 +995,7 @@
 		err = mvebu_a3700_comphy_usb3_power_off();
 		break;
 	case (COMPHY_SATA_MODE):
-		err = mvebu_a3700_comphy_sata_power_off(comphy_mode);
+		err = mvebu_a3700_comphy_sata_power_off();
 		break;
 
 	default:
diff --git a/drivers/marvell/comphy/phy-comphy-cp110.c b/drivers/marvell/comphy/phy-comphy-cp110.c
index d10425b..e7cde75 100644
--- a/drivers/marvell/comphy/phy-comphy-cp110.c
+++ b/drivers/marvell/comphy/phy-comphy-cp110.c
@@ -1730,11 +1730,13 @@
 				HPIPE_LANE_STATUS1_REG;
 			data = HPIPE_LANE_STATUS1_PCLK_EN_MASK;
 			mask = data;
-			ret = polling_with_timeout(addr, data, mask,
+			data = polling_with_timeout(addr, data, mask,
 						   PLL_LOCK_TIMEOUT,
 						   REG_32BIT);
-			if (ret)
+			if (data) {
 				ERROR("Failed to lock PCIE PLL\n");
+				ret = -ETIMEDOUT;
+			}
 		}
 	}
 
diff --git a/drivers/mtd/nand/spi_nand.c b/drivers/mtd/nand/spi_nand.c
index d01a119..abb524d 100644
--- a/drivers/mtd/nand/spi_nand.c
+++ b/drivers/mtd/nand/spi_nand.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019,  STMicroelectronics - All Rights Reserved
+ * Copyright (c) 2019-2021,  STMicroelectronics - All Rights Reserved
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -286,6 +286,10 @@
 		return -EINVAL;
 	}
 
+	assert((spinand_dev.nand_dev->page_size != 0U) &&
+	       (spinand_dev.nand_dev->block_size != 0U) &&
+	       (spinand_dev.nand_dev->size != 0U));
+
 	ret = spi_nand_reset();
 	if (ret != 0) {
 		return ret;
@@ -301,12 +305,12 @@
 		return ret;
 	}
 
-	ret = spi_nand_quad_enable(id[0]);
+	ret = spi_nand_quad_enable(id[1]);
 	if (ret != 0) {
 		return ret;
 	}
 
-	VERBOSE("SPI_NAND Detected ID 0x%x 0x%x\n", id[0], id[1]);
+	VERBOSE("SPI_NAND Detected ID 0x%x\n", id[1]);
 
 	VERBOSE("Page size %i, Block size %i, size %lli\n",
 		spinand_dev.nand_dev->page_size,
diff --git a/drivers/renesas/common/console/rcar_console.S b/drivers/renesas/common/console/rcar_console.S
index 29baa67..b683d7b 100644
--- a/drivers/renesas/common/console/rcar_console.S
+++ b/drivers/renesas/common/console/rcar_console.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2019, Renesas Electronics Corporation. All rights reserved.
+ * Copyright (c) 2018-2021, Renesas Electronics Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -63,7 +63,7 @@
 	 * ---------------------------------------------
 	 */
 func console_rcar_init
-	mov	w0, #0
+	mov	w0, #1
 	ret
 endfunc console_rcar_init
 
diff --git a/drivers/renesas/common/ddr/ddr_a/ddr_init_d3.c b/drivers/renesas/common/ddr/ddr_a/ddr_init_d3.c
index a49510e..f0113f1 100644
--- a/drivers/renesas/common/ddr/ddr_a/ddr_init_d3.c
+++ b/drivers/renesas/common/ddr/ddr_a/ddr_init_d3.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2019, Renesas Electronics Corporation.
+ * Copyright (c) 2015-2021, Renesas Electronics Corporation.
  * All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
@@ -11,7 +11,11 @@
 #include "rcar_def.h"
 #include "../ddr_regs.h"
 
-#define RCAR_DDR_VERSION	"rev.0.01"
+#define RCAR_DDR_VERSION	"rev.0.02"
+
+/* Average periodic refresh interval[ns]. Support 3900,7800 */
+#define REFRESH_RATE  3900
+
 
 #if RCAR_LSI != RCAR_D3
 #error "Don't have DDR initialize routine."
@@ -44,7 +48,7 @@
 	mmio_write_32(DBSC_DBTR16, 0x09210507);
 	mmio_write_32(DBSC_DBTR17, 0x040E0000);
 	mmio_write_32(DBSC_DBTR18, 0x00000200);
-	mmio_write_32(DBSC_DBTR19, 0x012B004B);
+	mmio_write_32(DBSC_DBTR19, 0x0129004B);
 	mmio_write_32(DBSC_DBTR20, 0x020000FB);
 	mmio_write_32(DBSC_DBTR21, 0x00040004);
 	mmio_write_32(DBSC_DBBL, 0x00000000);
@@ -54,8 +58,8 @@
 	mmio_write_32(DBSC_DBDFICNT_0, 0x00000010);
 	mmio_write_32(DBSC_DBBCAMDIS, 0x00000001);
 	mmio_write_32(DBSC_DBSCHRW1, 0x00000046);
-	mmio_write_32(DBSC_SCFCTST0, 0x0D020D04);
-	mmio_write_32(DBSC_SCFCTST1, 0x0306040C);
+	mmio_write_32(DBSC_SCFCTST0, 0x0C050B03);
+	mmio_write_32(DBSC_SCFCTST1, 0x0305030C);
 
 	mmio_write_32(DBSC_DBPDLK_0, 0x0000A55A);
 	mmio_write_32(DBSC_DBCMD, 0x01000001);
@@ -101,7 +105,9 @@
 		;
 
 	mmio_write_32(DBSC_DBPDRGA_0, 0x00000004);
-	mmio_write_32(DBSC_DBPDRGD_0, 0x0A206F89);
+	mmio_write_32(DBSC_DBPDRGD_0,
+		(uint32_t) (REFRESH_RATE * 928 / 125) - 400
+			+ 0x0A300000);
 	mmio_write_32(DBSC_DBPDRGA_0, 0x00000022);
 	mmio_write_32(DBSC_DBPDRGD_0, 0x1000040B);
 	mmio_write_32(DBSC_DBPDRGA_0, 0x00000023);
@@ -117,7 +123,11 @@
 	mmio_write_32(DBSC_DBPDRGA_0, 0x00000028);
 	mmio_write_32(DBSC_DBPDRGD_0, 0x00000046);
 	mmio_write_32(DBSC_DBPDRGA_0, 0x00000029);
-	mmio_write_32(DBSC_DBPDRGD_0, 0x000000A0);
+	if (REFRESH_RATE > 3900) {
+		mmio_write_32(DBSC_DBPDRGD_0, 0x00000020);
+	} else {
+		mmio_write_32(DBSC_DBPDRGD_0, 0x000000A0);
+	}
 	mmio_write_32(DBSC_DBPDRGA_0, 0x0000002C);
 	mmio_write_32(DBSC_DBPDRGD_0, 0x81003047);
 	mmio_write_32(DBSC_DBPDRGA_0, 0x00000020);
@@ -225,8 +235,10 @@
 
 	mmio_write_32(DBSC_DBPDRGA_0, 0x000000AF);
 	r2 = mmio_read_32(DBSC_DBPDRGD_0);
+	mmio_write_32(DBSC_DBPDRGA_0, 0x000000AF);
 	mmio_write_32(DBSC_DBPDRGD_0, ((r2 + 0x1) & 0xFF) | (r2 & 0xFFFFFF00));
 	mmio_write_32(DBSC_DBPDRGA_0, 0x000000CF);
+	mmio_write_32(DBSC_DBPDRGA_0, 0x000000CF);
 	r2 = mmio_read_32(DBSC_DBPDRGD_0);
 	mmio_write_32(DBSC_DBPDRGD_0, ((r2 + 0x1) & 0xFF) | (r2 & 0xFFFFFF00));
 
@@ -296,8 +308,10 @@
 	mmio_write_32(DBSC_DBPDRGD_0, 0x0024643E);
 
 	mmio_write_32(DBSC_DBBUS0CNF1, 0x00000010);
-	mmio_write_32(DBSC_DBCALCNF, 0x0100401B);
-	mmio_write_32(DBSC_DBRFCNF1, 0x00080E23);
+	mmio_write_32(DBSC_DBCALCNF,
+		(uint32_t) (64000000 / REFRESH_RATE) + 0x01000000);
+	mmio_write_32(DBSC_DBRFCNF1,
+		(uint32_t) (REFRESH_RATE * 116 / 125) + 0x00080000);
 	mmio_write_32(DBSC_DBRFCNF2, 0x00010000);
 	mmio_write_32(DBSC_DBDFICUPDCNF, 0x40100001);
 	mmio_write_32(DBSC_DBRFEN, 0x00000001);
@@ -346,6 +360,19 @@
 {
 	uint32_t i, r2, r3, r5, r6, r7, r12;
 
+	mmio_write_32(CPG_CPGWPR, 0x5A5AFFFF);
+	mmio_write_32(CPG_CPGWPCR, 0xA5A50000);
+
+	mmio_write_32(CPG_SRCR4, 0x20000000);
+
+	mmio_write_32(0xE61500DC, 0xe2200000);
+	while (!(mmio_read_32(CPG_PLLECR) & BIT(11)))
+		;
+
+	mmio_write_32(CPG_SRSTCLR4, 0x20000000);
+
+	mmio_write_32(CPG_CPGWPCR, 0xA5A50001);
+
 	mmio_write_32(DBSC_DBSYSCNT0, 0x00001234);
 	mmio_write_32(DBSC_DBKIND, 0x00000007);
 	mmio_write_32(DBSC_DBMEMCONF_0_0, 0x0f030a01);
@@ -363,14 +390,14 @@
 	mmio_write_32(DBSC_DBTR10, 0x0000000C);
 	mmio_write_32(DBSC_DBTR11, 0x0000000A);
 	mmio_write_32(DBSC_DBTR12, 0x00120012);
-	mmio_write_32(DBSC_DBTR13, 0x000000D0);
+	mmio_write_32(DBSC_DBTR13, 0x000000CE);
 	mmio_write_32(DBSC_DBTR14, 0x00140005);
 	mmio_write_32(DBSC_DBTR15, 0x00050004);
 	mmio_write_32(DBSC_DBTR16, 0x071F0305);
 	mmio_write_32(DBSC_DBTR17, 0x040C0000);
 	mmio_write_32(DBSC_DBTR18, 0x00000200);
 	mmio_write_32(DBSC_DBTR19, 0x01000040);
-	mmio_write_32(DBSC_DBTR20, 0x020000D8);
+	mmio_write_32(DBSC_DBTR20, 0x020000D6);
 	mmio_write_32(DBSC_DBTR21, 0x00040004);
 	mmio_write_32(DBSC_DBBL, 0x00000000);
 	mmio_write_32(DBSC_DBODT0, 0x00000001);
@@ -379,8 +406,8 @@
 	mmio_write_32(DBSC_DBDFICNT_0, 0x00000010);
 	mmio_write_32(DBSC_DBBCAMDIS, 0x00000001);
 	mmio_write_32(DBSC_DBSCHRW1, 0x00000046);
-	mmio_write_32(DBSC_SCFCTST0, 0x0D020C04);
-	mmio_write_32(DBSC_SCFCTST1, 0x0305040C);
+	mmio_write_32(DBSC_SCFCTST0, 0x0D050B03);
+	mmio_write_32(DBSC_SCFCTST1, 0x0306030C);
 
 	mmio_write_32(DBSC_DBPDLK_0, 0x0000A55A);
 	mmio_write_32(DBSC_DBCMD, 0x01000001);
@@ -426,13 +453,14 @@
 		;
 
 	mmio_write_32(DBSC_DBPDRGA_0, 0x00000004);
-	mmio_write_32(DBSC_DBPDRGD_0, 0x08C05FF0);
+	mmio_write_32(DBSC_DBPDRGD_0,
+		(uint32_t) (REFRESH_RATE * 792 / 125) - 400 + 0x08B00000);
 	mmio_write_32(DBSC_DBPDRGA_0, 0x00000022);
 	mmio_write_32(DBSC_DBPDRGD_0, 0x1000040B);
 	mmio_write_32(DBSC_DBPDRGA_0, 0x00000023);
 	mmio_write_32(DBSC_DBPDRGD_0, 0x2D9C0B66);
 	mmio_write_32(DBSC_DBPDRGA_0, 0x00000024);
-	mmio_write_32(DBSC_DBPDRGD_0, 0x2A88C400);
+	mmio_write_32(DBSC_DBPDRGD_0, 0x2A88B400);
 	mmio_write_32(DBSC_DBPDRGA_0, 0x00000025);
 	mmio_write_32(DBSC_DBPDRGD_0, 0x30005200);
 	mmio_write_32(DBSC_DBPDRGA_0, 0x00000026);
@@ -442,7 +470,11 @@
 	mmio_write_32(DBSC_DBPDRGA_0, 0x00000028);
 	mmio_write_32(DBSC_DBPDRGD_0, 0x00000046);
 	mmio_write_32(DBSC_DBPDRGA_0, 0x00000029);
-	mmio_write_32(DBSC_DBPDRGD_0, 0x00000098);
+	if (REFRESH_RATE > 3900) {
+		mmio_write_32(DBSC_DBPDRGD_0, 0x00000018);
+	} else {
+		mmio_write_32(DBSC_DBPDRGD_0, 0x00000098);
+	}
 	mmio_write_32(DBSC_DBPDRGA_0, 0x0000002C);
 	mmio_write_32(DBSC_DBPDRGD_0, 0x81003047);
 	mmio_write_32(DBSC_DBPDRGA_0, 0x00000020);
@@ -549,9 +581,11 @@
 
 	mmio_write_32(DBSC_DBPDRGA_0, 0x000000AF);
 	r2 = mmio_read_32(DBSC_DBPDRGD_0);
+	mmio_write_32(DBSC_DBPDRGA_0, 0x000000AF);
 	mmio_write_32(DBSC_DBPDRGD_0, ((r2 + 0x1) & 0xFF) | (r2 & 0xFFFFFF00));
 	mmio_write_32(DBSC_DBPDRGA_0, 0x000000CF);
 	r2 = mmio_read_32(DBSC_DBPDRGD_0);
+	mmio_write_32(DBSC_DBPDRGA_0, 0x000000CF);
 	mmio_write_32(DBSC_DBPDRGD_0, ((r2 + 0x1) & 0xFF) | (r2 & 0xFFFFFF00));
 
 	mmio_write_32(DBSC_DBPDRGA_0, 0x000000A0);
@@ -620,8 +654,10 @@
 	mmio_write_32(DBSC_DBPDRGD_0, 0x0024643E);
 
 	mmio_write_32(DBSC_DBBUS0CNF1, 0x00000010);
-	mmio_write_32(DBSC_DBCALCNF, 0x0100401B);
-	mmio_write_32(DBSC_DBRFCNF1, 0x00080C30);
+	mmio_write_32(DBSC_DBCALCNF,
+		(uint32_t) (64000000 / REFRESH_RATE) + 0x01000000);
+	mmio_write_32(DBSC_DBRFCNF1,
+		(uint32_t) (REFRESH_RATE * 99 / 125) + 0x00080000);
 	mmio_write_32(DBSC_DBRFCNF2, 0x00010000);
 	mmio_write_32(DBSC_DBDFICUPDCNF, 0x40100001);
 	mmio_write_32(DBSC_DBRFEN, 0x00000001);
@@ -693,7 +729,7 @@
 		ddr_mbps = 1600;
 	}
 
-	NOTICE("BL2: DDR%d\n", ddr_mbps);
+	NOTICE("BL2: DDR%d(%s)\n", ddr_mbps, RCAR_DDR_VERSION);
 
 	return 0;
 }
diff --git a/drivers/renesas/common/io/io_rcar.c b/drivers/renesas/common/io/io_rcar.c
index c3e8319..17d7aaa 100644
--- a/drivers/renesas/common/io/io_rcar.c
+++ b/drivers/renesas/common/io/io_rcar.c
@@ -151,6 +151,9 @@
 	return -EINVAL;
 }
 
+#define MFISBTSTSR			(0xE6260604U)
+#define MFISBTSTSR_BOOT_PARTITION	(0x00000010U)
+
 static int32_t file_to_offset(const int32_t name, uintptr_t *offset,
 			      uint32_t *cert, uint32_t *no_load,
 			      uintptr_t *partition)
@@ -169,6 +172,9 @@
 		}
 
 		*offset = rcar_image_header[addr];
+
+		if (mmio_read_32(MFISBTSTSR) & MFISBTSTSR_BOOT_PARTITION)
+			*offset += 0x800000;
 		*cert = RCAR_CERT_SIZE;
 		*cert *= RCAR_ATTR_GET_CERTOFF(name_offset[i].attr);
 		*cert += RCAR_SDRAM_certESS;
diff --git a/drivers/renesas/common/pwrc/pwrc.c b/drivers/renesas/common/pwrc/pwrc.c
index 3f60fe6..4ebf049 100644
--- a/drivers/renesas/common/pwrc/pwrc.c
+++ b/drivers/renesas/common/pwrc/pwrc.c
@@ -44,6 +44,7 @@
 #define CPU_PWR_OFF				(0x00000003U)
 #define RCAR_PSTR_MASK				(0x00000003U)
 #define ST_ALL_STANDBY				(0x00003333U)
+#define SYSCEXTMASK_EXTMSK0			(0x00000001U)
 /* Suspend to ram	*/
 #define DBSC4_REG_BASE				(0xE6790000U)
 #define DBSC4_REG_DBSYSCNT0			(DBSC4_REG_BASE + 0x0100U)
@@ -191,6 +192,8 @@
 {
 	uintptr_t reg_pwrsr, reg_cpumcr, reg_pwron, reg_pwrer;
 	uint32_t c, sysc_reg_bit;
+	uint32_t lsi_product;
+	uint32_t lsi_cut;
 
 	c = rcar_pwrc_get_mpidr_cluster(mpidr);
 	reg_cpumcr = IS_CA57(c) ? RCAR_CA57CPUCMCR : RCAR_CA53CPUCMCR;
@@ -205,6 +208,17 @@
 	if (mmio_read_32(reg_cpumcr) != 0)
 		mmio_write_32(reg_cpumcr, 0);
 
+	lsi_product = mmio_read_32((uintptr_t)RCAR_PRR);
+	lsi_cut = lsi_product & PRR_CUT_MASK;
+	lsi_product &= PRR_PRODUCT_MASK;
+
+	if ((lsi_product == PRR_PRODUCT_M3 && lsi_cut >= PRR_PRODUCT_30) ||
+	    lsi_product == PRR_PRODUCT_H3 ||
+	    lsi_product == PRR_PRODUCT_M3N ||
+	    lsi_product == PRR_PRODUCT_E3) {
+		mmio_setbits_32(RCAR_SYSCEXTMASK, SYSCEXTMASK_EXTMSK0);
+	}
+
 	mmio_setbits_32(RCAR_SYSCIER, sysc_reg_bit);
 	mmio_setbits_32(RCAR_SYSCIMR, sysc_reg_bit);
 
@@ -216,7 +230,15 @@
 
 	while ((mmio_read_32(RCAR_SYSCISR) & sysc_reg_bit) == 0)
 		;
-	mmio_write_32(RCAR_SYSCISR, sysc_reg_bit);
+	mmio_write_32(RCAR_SYSCISCR, sysc_reg_bit);
+
+	if ((lsi_product == PRR_PRODUCT_M3 && lsi_cut >= PRR_PRODUCT_30) ||
+	    lsi_product == PRR_PRODUCT_H3 ||
+	    lsi_product == PRR_PRODUCT_M3N ||
+	    lsi_product == PRR_PRODUCT_E3) {
+		mmio_clrbits_32(RCAR_SYSCEXTMASK, SYSCEXTMASK_EXTMSK0);
+	}
+
 	while ((mmio_read_32(reg_pwrsr) & STATUS_PWRUP) == 0)
 		;
 }
diff --git a/drivers/renesas/common/scif/scif.S b/drivers/renesas/common/scif/scif.S
index beb8dd8..72b5b4b 100644
--- a/drivers/renesas/common/scif/scif.S
+++ b/drivers/renesas/common/scif/scif.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2020, Renesas Electronics Corporation. All rights reserved.
+ * Copyright (c) 2015-2021, Renesas Electronics Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -79,7 +79,7 @@
 					 SCSMR_STOP_1 +		\
 					 SCSMR_CKS_DIV1)
 #define SCBRR_115200BPS		(17)
-#define SCBRR_115200BPSON	(16)
+#define SCBRR_115200BPS_D3_SSCG	(16)
 #define SCBRR_115200BPS_E3_SSCG	(15)
 #define SCBRR_230400BPS		(8)
 
@@ -216,26 +216,38 @@
 	and	w1, w1, #PRR_PRODUCT_MASK
 	mov	w2, #PRR_PRODUCT_D3
 	cmp	w1, w2
-	beq	4f
+	beq	5f
 	and	w1, w1, #PRR_PRODUCT_MASK
 	mov	w2, #PRR_PRODUCT_E3
 	cmp	w1, w2
-	bne	5f
+	bne	4f
 
+	/* When SSCG(MD12) on (E3) */
 	ldr	x1, =RST_MODEMR
 	ldr	w1, [x1]
 	and	w1, w1, #MODEMR_MD12
 	mov	w2, #MODEMR_MD12
 	cmp	w1, w2
-	bne	5f
+	bne	4f
 
+	/* When SSCG(MD12) on (E3) */
 	mov	w1, #SCBRR_115200BPS_E3_SSCG
 	b	2f
 5:
-	mov	w1, #SCBRR_115200BPS
+	/* In case of D3 */
+	ldr	x1, =RST_MODEMR
+	ldr	w1, [x1]
+	and	w1, w1, #MODEMR_MD12
+	mov	w2, #MODEMR_MD12
+	cmp	w1, w2
+	bne	4f
+
+	/* When SSCG(MD12) on (D3) */
+	mov	w1, #SCBRR_115200BPS_D3_SSCG
 	b	2f
 4:
-	mov	w1, #SCBRR_115200BPSON
+	/* In case of H3/M3/M3N or when SSCG(MD12) is off in E3/D3 */
+	mov	w1, #SCBRR_115200BPS
 	b	2f
 3:
 	mov	w1, #SCBRR_230400BPS
diff --git a/drivers/renesas/common/watchdog/swdt.c b/drivers/renesas/common/watchdog/swdt.c
index 1a351ca..29ef6f4 100644
--- a/drivers/renesas/common/watchdog/swdt.c
+++ b/drivers/renesas/common/watchdog/swdt.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2020, Renesas Electronics Corporation. All rights reserved.
+ * Copyright (c) 2015-2021, Renesas Electronics Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -78,7 +78,7 @@
 void rcar_swdt_init(void)
 {
 	uint32_t rmsk, sr;
-#if (RCAR_LSI != RCAR_E3) && (RCAR_LSI != RZ_G2E)
+#if (RCAR_LSI != RCAR_E3) && (RCAR_LSI != RCAR_D3) && (RCAR_LSI != RZ_G2E)
 	uint32_t reg, val, product_cut, chk_data;
 
 	reg = mmio_read_32(RCAR_PRR);
@@ -96,6 +96,8 @@
 
 #if (RCAR_LSI == RCAR_E3) || (RCAR_LSI == RZ_G2E)
 	mmio_write_32(SWDT_WTCNT, WTCNT_UPPER_BYTE | WTCNT_COUNT_7p81k);
+#elif (RCAR_LSI == RCAR_D3)
+	mmio_write_32(SWDT_WTCNT, WTCNT_UPPER_BYTE | WTCNT_COUNT_8p13k);
 #else
 	val = WTCNT_UPPER_BYTE;
 
diff --git a/drivers/st/clk/stm32mp1_clk.c b/drivers/st/clk/stm32mp1_clk.c
index 6ada96a..80b6408 100644
--- a/drivers/st/clk/stm32mp1_clk.c
+++ b/drivers/st/clk/stm32mp1_clk.c
@@ -56,6 +56,7 @@
 	_HSI_KER = NB_OSC,
 	_HSE_KER,
 	_HSE_KER_DIV2,
+	_HSE_RTC,
 	_CSI_KER,
 	_PLL1_P,
 	_PLL1_Q,
@@ -107,7 +108,7 @@
 	_USBPHY_SEL,
 	_USBO_SEL,
 	_MPU_SEL,
-	_PER_SEL,
+	_CKPER_SEL,
 	_RTC_SEL,
 	_PARENT_SEL_NB,
 	_UNKNOWN_SEL = 0xff,
@@ -125,6 +126,7 @@
 	[_HSI_KER] = CK_HSI,
 	[_HSE_KER] = CK_HSE,
 	[_HSE_KER_DIV2] = CK_HSE_DIV2,
+	[_HSE_RTC] = _UNKNOWN_ID,
 	[_CSI_KER] = CK_CSI,
 	[_PLL1_P] = PLL1_P,
 	[_PLL1_Q] = PLL1_Q,
@@ -490,7 +492,7 @@
 };
 
 static const uint8_t rtc_parents[] = {
-	_UNKNOWN_ID, _LSE, _LSI, _HSE
+	_UNKNOWN_ID, _LSE, _LSI, _HSE_RTC
 };
 
 static const struct stm32mp1_clk_sel stm32mp1_clk_sel[_PARENT_SEL_NB] = {
@@ -502,7 +504,7 @@
 	_CLK_PARENT_SEL(UART1, RCC_UART1CKSELR, usart1_parents),
 	_CLK_PARENT_SEL(RNG1, RCC_RNG1CKSELR, rng1_parents),
 	_CLK_PARENT_SEL(MPU, RCC_MPCKSELR, mpu_parents),
-	_CLK_PARENT_SEL(PER, RCC_CPERCKSELR, per_parents),
+	_CLK_PARENT_SEL(CKPER, RCC_CPERCKSELR, per_parents),
 	_CLK_PARENT_SEL(RTC, RCC_BDCR, rtc_parents),
 	_CLK_PARENT_SEL(UART6, RCC_UART6CKSELR, uart6_parents),
 	_CLK_PARENT_SEL(UART24, RCC_UART24CKSELR, uart234578_parents),
@@ -587,6 +589,7 @@
 	[_HSI_KER] = "HSI_KER",
 	[_HSE_KER] = "HSE_KER",
 	[_HSE_KER_DIV2] = "HSE_KER_DIV2",
+	[_HSE_RTC] = "HSE_RTC",
 	[_CSI_KER] = "CSI_KER",
 	[_PLL1_P] = "PLL1_P",
 	[_PLL1_Q] = "PLL1_Q",
@@ -969,6 +972,10 @@
 	case _HSE_KER_DIV2:
 		clock = stm32mp1_clk_get_fixed(_HSE) >> 1;
 		break;
+	case _HSE_RTC:
+		clock = stm32mp1_clk_get_fixed(_HSE);
+		clock /= (mmio_read_32(rcc_base + RCC_RTCDIVR) & RCC_DIVR_DIV_MASK) + 1U;
+		break;
 	case _LSI:
 		clock = stm32mp1_clk_get_fixed(_LSI);
 		break;
@@ -1652,7 +1659,7 @@
 	    (clksrc != (uint32_t)CLK_RTC_DISABLED)) {
 		mmio_clrsetbits_32(address,
 				   RCC_BDCR_RTCSRC_MASK,
-				   clksrc << RCC_BDCR_RTCSRC_SHIFT);
+				   (clksrc & RCC_SELR_SRC_MASK) << RCC_BDCR_RTCSRC_SHIFT);
 
 		mmio_setbits_32(address, RCC_BDCR_RTCCKEN);
 	}
@@ -2152,6 +2159,7 @@
 	case _HSE:
 	case _HSE_KER:
 	case _HSE_KER_DIV2:
+	case _HSE_RTC:
 	case _LSE:
 		break;
 
@@ -2210,6 +2218,7 @@
 		DDRC2, DDRC2LP,
 		DDRCAPB, DDRPHYCAPB, DDRPHYCAPBLP,
 		DDRPHYC, DDRPHYCLP,
+		RTCAPB,
 		TZC1, TZC2,
 		TZPC,
 		STGEN_K,
@@ -2218,10 +2227,6 @@
 	for (idx = 0U; idx < ARRAY_SIZE(secure_enable); idx++) {
 		stm32mp_clk_enable(secure_enable[idx]);
 	}
-
-	if (!stm32mp_is_single_core()) {
-		stm32mp1_clk_enable_secure(RTCAPB);
-	}
 }
 
 int stm32mp1_clk_probe(void)
diff --git a/drivers/st/clk/stm32mp_clkfunc.c b/drivers/st/clk/stm32mp_clkfunc.c
index 8333f6d..2101171 100644
--- a/drivers/st/clk/stm32mp_clkfunc.c
+++ b/drivers/st/clk/stm32mp_clkfunc.c
@@ -161,7 +161,7 @@
  * @param fdt: Device tree reference
  * @return: Node offset or a negative value on error
  */
-int fdt_get_rcc_node(void *fdt)
+static int fdt_get_rcc_node(void *fdt)
 {
 	return fdt_node_offset_by_compatible(fdt, -1, DT_RCC_CLK_COMPAT);
 }
diff --git a/drivers/st/uart/aarch32/stm32_console.S b/drivers/st/uart/aarch32/stm32_console.S
index 686b18b..2b8879a 100644
--- a/drivers/st/uart/aarch32/stm32_console.S
+++ b/drivers/st/uart/aarch32/stm32_console.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -45,7 +45,12 @@
 	/* Check the input base address */
 	cmp	r0, #0
 	beq	core_init_fail
-#if defined(IMAGE_BL2)
+#if !defined(IMAGE_BL2)
+	/* Skip UART initialization if it is already enabled */
+	ldr	r3, [r0, #USART_CR1]
+	ands	r3, r3, #USART_CR1_UE
+	bne	1f
+#endif /* IMAGE_BL2 */
 	/* Check baud rate and uart clock for sanity */
 	cmp	r1, #0
 	beq	core_init_fail
@@ -78,7 +83,7 @@
 	ldr	r3, [r0, #USART_ISR]
 	tst	r3, #USART_ISR_TEACK
 	beq	teack_loop
-#endif /* IMAGE_BL2 */
+1:
 	mov	r0, #1
 	bx	lr
 core_init_fail:
diff --git a/fdts/fvp-base-gicv3-psci-common.dtsi b/fdts/fvp-base-gicv3-psci-common.dtsi
index b6753de..3cb613f 100644
--- a/fdts/fvp-base-gicv3-psci-common.dtsi
+++ b/fdts/fvp-base-gicv3-psci-common.dtsi
@@ -24,7 +24,11 @@
 	#address-cells = <2>;
 	#size-cells = <2>;
 
-	chosen { };
+#if (ENABLE_RME == 1)
+	chosen { bootargs = "mem=1G console=ttyAMA0 earlycon=pl011,0x1c090000 root=/dev/vda ip=on";};
+#else
+	chosen {};
+#endif
 
 	aliases {
 		serial0 = &v2m_serial0;
@@ -135,8 +139,13 @@
 
 	memory@80000000 {
 		device_type = "memory";
+#if (ENABLE_RME == 1)
+		reg = <0x00000000 0x80000000 0 0x7C000000>,
+		      <0x00000008 0x80000000 0 0x80000000>;
+#else
 		reg = <0x00000000 0x80000000 0 0x7F000000>,
 		      <0x00000008 0x80000000 0 0x80000000>;
+#endif
 	};
 
 	gic: interrupt-controller@2f000000 {
diff --git a/fdts/juno-ethosn.dtsi b/fdts/juno-ethosn.dtsi
index 87ab378..e2f3355 100644
--- a/fdts/juno-ethosn.dtsi
+++ b/fdts/juno-ethosn.dtsi
@@ -4,19 +4,21 @@
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
+/*
+ * For examples of multi-core and multi-device NPU, refer to the examples given in the
+ * Arm Ethos-N NPU driver stack.
+ * https://github.com/ARM-software/ethos-n-driver-stack
+ */
+
 / {
 	#address-cells = <2>;
 	#size-cells = <2>;
 
-	ethosn: ethosn@6f300000 {
+	ethosn0: ethosn@6f300000 {
 		compatible = "ethosn";
 		reg = <0 0x6f300000 0 0x00100000>;
 		status = "okay";
 
-		/*
-		 * Single-core NPU. For multi-core NPU, additional core nodes
-		 * and reg values must be added.
-		 */
 		core0 {
 			compatible = "ethosn-core";
 			status = "okay";
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index 9ea1114..74bc8cb 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -182,6 +182,11 @@
 #define ID_AA64PFR0_CSV2_SHIFT	U(56)
 #define ID_AA64PFR0_CSV2_MASK	ULL(0xf)
 #define ID_AA64PFR0_CSV2_LENGTH	U(4)
+#define ID_AA64PFR0_FEAT_RME_SHIFT		U(52)
+#define ID_AA64PFR0_FEAT_RME_MASK		ULL(0xf)
+#define ID_AA64PFR0_FEAT_RME_LENGTH		U(4)
+#define ID_AA64PFR0_FEAT_RME_NOT_SUPPORTED	U(0)
+#define ID_AA64PFR0_FEAT_RME_V1			U(1)
 
 /* Exception level handling */
 #define EL_IMPL_NONE		ULL(0)
@@ -281,6 +286,11 @@
 #define ID_AA64MMFR1_EL1_VHE_SHIFT		U(8)
 #define ID_AA64MMFR1_EL1_VHE_MASK		ULL(0xf)
 
+#define ID_AA64MMFR1_EL1_HCX_SHIFT              U(40)
+#define ID_AA64MMFR1_EL1_HCX_MASK               ULL(0xf)
+#define ID_AA64MMFR1_EL1_HCX_SUPPORTED          ULL(0x1)
+#define ID_AA64MMFR1_EL1_HCX_NOT_SUPPORTED      ULL(0x0)
+
 /* ID_AA64MMFR2_EL1 definitions */
 #define ID_AA64MMFR2_EL1		S3_0_C0_C7_2
 
@@ -427,13 +437,18 @@
 
 /* SCR definitions */
 #define SCR_RES1_BITS		((U(1) << 4) | (U(1) << 5))
+#define SCR_NSE_SHIFT		U(62)
+#define SCR_NSE_BIT		(ULL(1) << SCR_NSE_SHIFT)
+#define SCR_GPF_BIT		(UL(1) << 48)
 #define SCR_TWEDEL_SHIFT	U(30)
 #define SCR_TWEDEL_MASK		ULL(0xf)
+#define SCR_HXEn_BIT            (UL(1) << 38)
 #define SCR_AMVOFFEN_BIT	(UL(1) << 35)
 #define SCR_TWEDEn_BIT		(UL(1) << 29)
 #define SCR_ECVEN_BIT		(UL(1) << 28)
 #define SCR_FGTEN_BIT		(UL(1) << 27)
 #define SCR_ATA_BIT		(UL(1) << 26)
+#define SCR_EnSCXT_BIT		(UL(1) << 25)
 #define SCR_FIEN_BIT		(UL(1) << 21)
 #define SCR_EEL2_BIT		(UL(1) << 18)
 #define SCR_API_BIT		(UL(1) << 17)
@@ -516,13 +531,18 @@
 #define VTTBR_BADDR_SHIFT	U(0)
 
 /* HCR definitions */
+#define HCR_RESET_VAL		ULL(0x0)
 #define HCR_AMVOFFEN_BIT	(ULL(1) << 51)
+#define HCR_TEA_BIT		(ULL(1) << 47)
 #define HCR_API_BIT		(ULL(1) << 41)
 #define HCR_APK_BIT		(ULL(1) << 40)
 #define HCR_E2H_BIT		(ULL(1) << 34)
+#define HCR_HCD_BIT		(ULL(1) << 29)
 #define HCR_TGE_BIT		(ULL(1) << 27)
 #define HCR_RW_SHIFT		U(31)
 #define HCR_RW_BIT		(ULL(1) << HCR_RW_SHIFT)
+#define HCR_TWE_BIT		(ULL(1) << 14)
+#define HCR_TWI_BIT		(ULL(1) << 13)
 #define HCR_AMO_BIT		(ULL(1) << 5)
 #define HCR_IMO_BIT		(ULL(1) << 4)
 #define HCR_FMO_BIT		(ULL(1) << 3)
@@ -565,6 +585,10 @@
 #define CPTR_EL2_TZ_BIT		(U(1) << 8)
 #define CPTR_EL2_RESET_VAL	CPTR_EL2_RES1
 
+/* VTCR_EL2 definitions */
+#define VTCR_RESET_VAL         U(0x0)
+#define VTCR_EL2_MSA           (U(1) << 31)
+
 /* CPSR/SPSR definitions */
 #define DAIF_FIQ_BIT		(U(1) << 0)
 #define DAIF_IRQ_BIT		(U(1) << 1)
@@ -590,6 +614,7 @@
 #define SPSR_M_MASK		U(0x1)
 #define SPSR_M_AARCH64		U(0x0)
 #define SPSR_M_AARCH32		U(0x1)
+#define SPSR_M_EL2H		U(0x9)
 
 #define SPSR_EL_SHIFT		U(2)
 #define SPSR_EL_WIDTH		U(2)
@@ -1082,6 +1107,12 @@
 #define AMEVCNTVOFF1F_EL2	S3_4_C13_C11_7
 
 /*******************************************************************************
+ * Realm management extension register definitions
+ ******************************************************************************/
+#define GPCCR_EL3			S3_6_C2_C1_6
+#define GPTBR_EL3			S3_6_C2_C1_4
+
+/*******************************************************************************
  * RAS system registers
  ******************************************************************************/
 #define DISR_EL1		S3_0_C12_C1_1
@@ -1144,6 +1175,16 @@
 #define GCR_EL1			S3_0_C1_C0_6
 
 /*******************************************************************************
+ * FEAT_HCX - Extended Hypervisor Configuration Register
+ ******************************************************************************/
+#define HCRX_EL2                S3_4_C1_C2_2
+#define HCRX_EL2_FGTnXS_BIT     (UL(1) << 4)
+#define HCRX_EL2_FnXS_BIT       (UL(1) << 3)
+#define HCRX_EL2_EnASR_BIT      (UL(1) << 2)
+#define HCRX_EL2_EnALS_BIT      (UL(1) << 1)
+#define HCRX_EL2_EnAS0_BIT      (UL(1) << 0)
+
+/*******************************************************************************
  * Definitions for DynamicIQ Shared Unit registers
  ******************************************************************************/
 #define CLUSTERPWRDN_EL1	S3_0_c15_c3_6
diff --git a/include/arch/aarch64/arch_features.h b/include/arch/aarch64/arch_features.h
index dc0b7f3..46cd1c9 100644
--- a/include/arch/aarch64/arch_features.h
+++ b/include/arch/aarch64/arch_features.h
@@ -117,4 +117,21 @@
 		ID_AA64PFR1_MPAM_FRAC_SHIFT) & ID_AA64PFR1_MPAM_FRAC_MASK));
 }
 
+static inline bool is_feat_hcx_present(void)
+{
+	return (((read_id_aa64mmfr1_el1() >> ID_AA64MMFR1_EL1_HCX_SHIFT) &
+		ID_AA64MMFR1_EL1_HCX_MASK) == ID_AA64MMFR1_EL1_HCX_SUPPORTED);
+}
+
+static inline unsigned int get_armv9_2_feat_rme_support(void)
+{
+	/*
+	 * Return the RME version, zero if not supported.  This function can be
+	 * used as both an integer value for the RME version or compared to zero
+	 * to detect RME presence.
+	 */
+	return (unsigned int)(read_id_aa64pfr0_el1() >>
+		ID_AA64PFR0_FEAT_RME_SHIFT) & ID_AA64PFR0_FEAT_RME_MASK;
+}
+
 #endif /* ARCH_FEATURES_H */
diff --git a/include/arch/aarch64/arch_helpers.h b/include/arch/aarch64/arch_helpers.h
index a41b325..cae05dc 100644
--- a/include/arch/aarch64/arch_helpers.h
+++ b/include/arch/aarch64/arch_helpers.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -233,8 +233,10 @@
 
 void disable_mmu_el1(void);
 void disable_mmu_el3(void);
+void disable_mpu_el2(void);
 void disable_mmu_icache_el1(void);
 void disable_mmu_icache_el3(void);
+void disable_mpu_icache_el2(void);
 
 /*******************************************************************************
  * Misc. accessor prototypes
@@ -440,6 +442,8 @@
 DEFINE_SYSREG_READ_FUNC(cntpct_el0)
 DEFINE_SYSREG_RW_FUNCS(cnthctl_el2)
 
+DEFINE_SYSREG_RW_FUNCS(vtcr_el2)
+
 #define get_cntp_ctl_enable(x)  (((x) >> CNTP_CTL_ENABLE_SHIFT) & \
 					CNTP_CTL_ENABLE_MASK)
 #define get_cntp_ctl_imask(x)   (((x) >> CNTP_CTL_IMASK_SHIFT) & \
@@ -532,9 +536,16 @@
 DEFINE_SYSREG_READ_FUNC(rndr)
 DEFINE_SYSREG_READ_FUNC(rndrrs)
 
+/* FEAT_HCX Register */
+DEFINE_RENAME_SYSREG_RW_FUNCS(hcrx_el2, HCRX_EL2)
+
 /* DynamIQ Shared Unit power management */
 DEFINE_RENAME_SYSREG_RW_FUNCS(clusterpwrdn_el1, CLUSTERPWRDN_EL1)
 
+/* Armv9.2 RME Registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(gptbr_el3, GPTBR_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(gpccr_el3, GPCCR_EL3)
+
 #define IS_IN_EL(x) \
 	(GET_EL(read_CurrentEl()) == MODE_EL##x)
 
@@ -578,7 +589,28 @@
 	}
 }
 
+/*
+ * TLBIPAALLOS instruction
+ * (TLB Inivalidate GPT Information by PA,
+ * All Entries, Outer Shareable)
+ */
+static inline void tlbipaallos(void)
+{
+	__asm__("SYS #6,c8,c1,#4");
+}
+
+/*
+ * Invalidate cached copies of GPT entries
+ * from TLBs by physical address
+ *
+ * @pa: the starting address for the range
+ *      of invalidation
+ * @size: size of the range of invalidation
+ */
+void gpt_tlbi_by_pa(uint64_t pa, size_t size);
+
+
-/* Previously defined accesor functions with incomplete register names  */
+/* Previously defined accessor functions with incomplete register names  */
 
 #define read_current_el()	read_CurrentEl()
 
diff --git a/include/arch/aarch64/el2_common_macros.S b/include/arch/aarch64/el2_common_macros.S
new file mode 100644
index 0000000..c57a1ec
--- /dev/null
+++ b/include/arch/aarch64/el2_common_macros.S
@@ -0,0 +1,448 @@
+/*
+ * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef EL2_COMMON_MACROS_S
+#define EL2_COMMON_MACROS_S
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+
+#include <platform_def.h>
+
+	/*
+	 * Helper macro to initialise system registers at EL2.
+	 */
+	.macro el2_arch_init_common
+
+	/* ---------------------------------------------------------------------
+	 * SCTLR_EL2 has already been initialised - read current value before
+	 * modifying.
+	 *
+	 * SCTLR_EL2.I: Enable the instruction cache.
+	 *
+	 * SCTLR_EL2.SA: Enable Stack Alignment check. A SP alignment fault
+	 *  exception is generated if a load or store instruction executed at
+	 *  EL2 uses the SP as the base address and the SP is not aligned to a
+	 *  16-byte boundary.
+	 *
+	 * SCTLR_EL2.A: Enable Alignment fault checking. All instructions that
+	 *  load or store one or more registers have an alignment check that the
+	 *  address being accessed is aligned to the size of the data element(s)
+	 *  being accessed.
+	 * ---------------------------------------------------------------------
+	 */
+	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+	mrs	x0, sctlr_el2
+	orr	x0, x0, x1
+	msr	sctlr_el2, x0
+	isb
+
+	/* ---------------------------------------------------------------------
+	 * Initialise HCR_EL2, setting all fields rather than relying on HW.
+	 * All fields are architecturally UNKNOWN on reset. The following fields
+	 * do not change during the TF lifetime. The remaining fields are set to
+	 * zero here but are updated ahead of transitioning to a lower EL in the
+	 * function cm_init_context_common().
+	 *
+	 * HCR_EL2.TWE: Set to zero so that execution of WFE instructions at
+	 *  EL2, EL1 and EL0 are not trapped to EL2.
+	 *
+	 * HCR_EL2.TWI: Set to zero so that execution of WFI instructions at
+	 *  EL2, EL1 and EL0 are not trapped to EL2.
+	 *
+	 * HCR_EL2.HCD: Set to zero to enable HVC calls at EL1 and above,
+	 *  from both Security states and both Execution states.
+	 *
+	 * HCR_EL2.TEA: Set to one to route External Aborts and SError
+	 * Interrupts to EL2 when executing at any EL.
+	 *
+	 * HCR_EL2.{API,APK}: For Armv8.3 pointer authentication feature,
+	 * disable traps to EL2 when accessing key registers or using
+	 * pointer authentication instructions from lower ELs.
+	 * ---------------------------------------------------------------------
+	 */
+	mov_imm	x0, ((HCR_RESET_VAL | HCR_TEA_BIT) \
+			& ~(HCR_TWE_BIT | HCR_TWI_BIT | HCR_HCD_BIT))
+#if CTX_INCLUDE_PAUTH_REGS
+	/*
+	 * If the pointer authentication registers are saved during world
+	 * switches, enable pointer authentication everywhere, as it is safe to
+	 * do so.
+	 */
+	orr	x0, x0, #(HCR_API_BIT | HCR_APK_BIT)
+#endif  /* CTX_INCLUDE_PAUTH_REGS */
+	msr	hcr_el2, x0
+
+	/* ---------------------------------------------------------------------
+	 * Initialise MDCR_EL2, setting all fields rather than relying on
+	 * hw. Some fields are architecturally UNKNOWN on reset.
+	 *
+	 * MDCR_EL2.SDD: Set to one to disable AArch64 Secure self-hosted
+	 *  debug. Debug exceptions, other than Breakpoint Instruction
+	 *  exceptions, are disabled from all ELs in Secure state.
+	 *
+	 * MDCR_EL2.TDOSA: Set to zero so that EL2 and EL2 System register
+	 *  access to the powerdown debug registers do not trap to EL2.
+	 *
+	 * MDCR_EL2.TDA: Set to zero to allow EL0, EL1 and EL2 access to the
+	 *  debug registers, other than those registers that are controlled by
+	 *  MDCR_EL2.TDOSA.
+	 *
+	 * MDCR_EL2.TPM: Set to zero so that EL0, EL1, and EL2 System
+	 *  register accesses to all Performance Monitors registers do not trap
+	 *  to EL2.
+	 *
+	 * MDCR_EL2.SCCD: Set to one so that cycle counting by PMCCNTR_EL0
+	 *  is prohibited in Secure state. This bit is RES0 in versions of the
+	 *  architecture with FEAT_PMUv3p5 not implemented, setting it to 1
+	 *  doesn't have any effect on them.
+	 *
+	 * MDCR_EL2.MCCD: Set to one so that cycle counting by PMCCNTR_EL0
+	 *  is prohibited in EL2. This bit is RES0 in versions of the
+	 *  architecture with FEAT_PMUv3p7 not implemented, setting it to 1
+	 *  doesn't have any effect on them.
+	 *
+	 * MDCR_EL2.SPME: Set to zero so that event counting by the program-
+	 *  mable counters PMEVCNTR<n>_EL0 is prohibited in Secure state. If
+	 *  ARMv8.2 Debug is not implemented this bit does not have any effect
+	 *  on the counters unless there is support for the implementation
+	 *  defined authentication interface
+	 *  ExternalSecureNoninvasiveDebugEnabled().
+	 * ---------------------------------------------------------------------
+	 */
+	mov_imm	x0, ((MDCR_EL2_RESET_VAL | MDCR_SDD_BIT | \
+		      MDCR_SPD32(MDCR_SPD32_DISABLE) | MDCR_SCCD_BIT | \
+		      MDCR_MCCD_BIT) & ~(MDCR_SPME_BIT | MDCR_TDOSA_BIT | \
+		      MDCR_TDA_BIT | MDCR_TPM_BIT))
+
+	msr	mdcr_el2, x0
+
+	/* ---------------------------------------------------------------------
+	 * Initialise PMCR_EL0 setting all fields rather than relying
+	 * on hw. Some fields are architecturally UNKNOWN on reset.
+	 *
+	 * PMCR_EL0.LP: Set to one so that event counter overflow, that
+	 *  is recorded in PMOVSCLR_EL0[0-30], occurs on the increment
+	 *  that changes PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU
+	 *  is implemented. This bit is RES0 in versions of the architecture
+	 *  earlier than ARMv8.5, setting it to 1 doesn't have any effect
+	 *  on them.
+	 *
+	 * PMCR_EL0.LC: Set to one so that cycle counter overflow, that
+	 *  is recorded in PMOVSCLR_EL0[31], occurs on the increment
+	 *  that changes PMCCNTR_EL0[63] from 1 to 0.
+	 *
+	 * PMCR_EL0.DP: Set to one so that the cycle counter,
+	 *  PMCCNTR_EL0 does not count when event counting is prohibited.
+	 *
+	 * PMCR_EL0.X: Set to zero to disable export of events.
+	 *
+	 * PMCR_EL0.D: Set to zero so that, when enabled, PMCCNTR_EL0
+	 *  counts on every clock cycle.
+	 * ---------------------------------------------------------------------
+	 */
+	mov_imm	x0, ((PMCR_EL0_RESET_VAL | PMCR_EL0_LP_BIT | \
+		      PMCR_EL0_LC_BIT | PMCR_EL0_DP_BIT) & \
+		    ~(PMCR_EL0_X_BIT | PMCR_EL0_D_BIT))
+
+	msr	pmcr_el0, x0
+
+	/* ---------------------------------------------------------------------
+	 * Enable External Aborts and SError Interrupts now that the exception
+	 * vectors have been setup.
+	 * ---------------------------------------------------------------------
+	 */
+	msr	daifclr, #DAIF_ABT_BIT
+
+	/* ---------------------------------------------------------------------
+	 * Initialise CPTR_EL2, setting all fields rather than relying on hw.
+	 * All fields are architecturally UNKNOWN on reset.
+	 *
+	 * CPTR_EL2.TCPAC: Set to zero so that any accesses to CPACR_EL1 do
+	 * not trap to EL2.
+	 *
+	 * CPTR_EL2.TTA: Set to zero so that System register accesses to the
+	 *  trace registers do not trap to EL2.
+	 *
+	 * CPTR_EL2.TFP: Set to zero so that accesses to the V- or Z- registers
+	 *  by Advanced SIMD, floating-point or SVE instructions (if implemented)
+	 *  do not trap to EL2.
+	 */
+
+	mov_imm x0, (CPTR_EL2_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT))
+	msr	cptr_el2, x0
+
+	/*
+	 * If Data Independent Timing (DIT) functionality is implemented,
+	 * always enable DIT in EL2
+	 */
+	mrs	x0, id_aa64pfr0_el1
+	ubfx	x0, x0, #ID_AA64PFR0_DIT_SHIFT, #ID_AA64PFR0_DIT_LENGTH
+	cmp	x0, #ID_AA64PFR0_DIT_SUPPORTED
+	bne	1f
+	mov	x0, #DIT_BIT
+	msr	DIT, x0
+1:
+	.endm
+
+/* -----------------------------------------------------------------------------
+ * This is the super set of actions that need to be performed during a cold boot
+ * or a warm boot in EL2. This code is shared by BL1 and BL31.
+ *
+ * This macro will always perform reset handling, architectural initialisations
+ * and stack setup. The rest of the actions are optional because they might not
+ * be needed, depending on the context in which this macro is called. This is
+ * why this macro is parameterised ; each parameter allows to enable/disable
+ * some actions.
+ *
+ *  _init_sctlr:
+ *	Whether the macro needs to initialise SCTLR_EL2, including configuring
+ *      the endianness of data accesses.
+ *
+ *  _warm_boot_mailbox:
+ *	Whether the macro needs to detect the type of boot (cold/warm). The
+ *	detection is based on the platform entrypoint address : if it is zero
+ *	then it is a cold boot, otherwise it is a warm boot. In the latter case,
+ *	this macro jumps on the platform entrypoint address.
+ *
+ *  _secondary_cold_boot:
+ *	Whether the macro needs to identify the CPU that is calling it: primary
+ *	CPU or secondary CPU. The primary CPU will be allowed to carry on with
+ *	the platform initialisations, while the secondaries will be put in a
+ *	platform-specific state in the meantime.
+ *
+ *	If the caller knows this macro will only be called by the primary CPU
+ *	then this parameter can be defined to 0 to skip this step.
+ *
+ * _init_memory:
+ *	Whether the macro needs to initialise the memory.
+ *
+ * _init_c_runtime:
+ *	Whether the macro needs to initialise the C runtime environment.
+ *
+ * _exception_vectors:
+ *	Address of the exception vectors to program in the VBAR_EL2 register.
+ *
+ * _pie_fixup_size:
+ *	Size of memory region to fixup Global Descriptor Table (GDT).
+ *
+ *	A non-zero value is expected when firmware needs GDT to be fixed-up.
+ *
+ * -----------------------------------------------------------------------------
+ */
+	.macro el2_entrypoint_common					\
+		_init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,	\
+		_init_memory, _init_c_runtime, _exception_vectors,	\
+		_pie_fixup_size
+
+	.if \_init_sctlr
+		/* -------------------------------------------------------------
+		 * This is the initialisation of SCTLR_EL2 and so must ensure
+		 * that all fields are explicitly set rather than relying on hw.
+		 * Some fields reset to an IMPLEMENTATION DEFINED value and
+		 * others are architecturally UNKNOWN on reset.
+		 *
+		 * SCTLR.EE: Set the CPU endianness before doing anything that
+		 *  might involve memory reads or writes. Set to zero to select
+		 *  Little Endian.
+		 *
+		 * SCTLR_EL2.WXN: For the EL2 translation regime, this field can
+		 *  force all memory regions that are writeable to be treated as
+		 *  XN (Execute-never). Set to zero so that this control has no
+		 *  effect on memory access permissions.
+		 *
+		 * SCTLR_EL2.SA: Set to zero to disable Stack Alignment check.
+		 *
+		 * SCTLR_EL2.A: Set to zero to disable Alignment fault checking.
+		 *
+		 * SCTLR.DSSBS: Set to zero to disable speculation store bypass
+		 *  safe behaviour upon exception entry to EL2.
+		 * -------------------------------------------------------------
+		 */
+		mov_imm	x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \
+				| SCTLR_SA_BIT | SCTLR_A_BIT | SCTLR_DSSBS_BIT))
+		msr	sctlr_el2, x0
+		isb
+	.endif /* _init_sctlr */
+
+#if DISABLE_MTPMU
+		bl	mtpmu_disable
+#endif
+
+	.if \_warm_boot_mailbox
+		/* -------------------------------------------------------------
+		 * This code will be executed for both warm and cold resets.
+		 * Now is the time to distinguish between the two.
+		 * Query the platform entrypoint address and if it is not zero
+		 * then it means it is a warm boot so jump to this address.
+		 * -------------------------------------------------------------
+		 */
+		bl	plat_get_my_entrypoint
+		cbz	x0, do_cold_boot
+		br	x0
+
+	do_cold_boot:
+	.endif /* _warm_boot_mailbox */
+
+	.if \_pie_fixup_size
+#if ENABLE_PIE
+		/*
+		 * ------------------------------------------------------------
+		 * If PIE is enabled fixup the Global descriptor Table only
+		 * once during primary core cold boot path.
+		 *
+		 * Compile time base address, required for fixup, is calculated
+		 * using "pie_fixup" label present within first page.
+		 * ------------------------------------------------------------
+		 */
+	pie_fixup:
+		ldr	x0, =pie_fixup
+		and	x0, x0, #~(PAGE_SIZE_MASK)
+		mov_imm	x1, \_pie_fixup_size
+		add	x1, x1, x0
+		bl	fixup_gdt_reloc
+#endif /* ENABLE_PIE */
+	.endif /* _pie_fixup_size */
+
+	/* ---------------------------------------------------------------------
+	 * Set the exception vectors.
+	 * ---------------------------------------------------------------------
+	 */
+	adr	x0, \_exception_vectors
+	msr	vbar_el2, x0
+	isb
+
+	/* ---------------------------------------------------------------------
+	 * It is a cold boot.
+	 * Perform any processor specific actions upon reset e.g. cache, TLB
+	 * invalidations etc.
+	 * ---------------------------------------------------------------------
+	 */
+	bl	reset_handler
+
+	el2_arch_init_common
+
+	.if \_secondary_cold_boot
+		/* -------------------------------------------------------------
+		 * Check if this is a primary or secondary CPU cold boot.
+		 * The primary CPU will set up the platform while the
+		 * secondaries are placed in a platform-specific state until the
+		 * primary CPU performs the necessary actions to bring them out
+		 * of that state and allows entry into the OS.
+		 * -------------------------------------------------------------
+		 */
+		bl	plat_is_my_cpu_primary
+		cbnz	w0, do_primary_cold_boot
+
+		/* This is a cold boot on a secondary CPU */
+		bl	plat_secondary_cold_boot_setup
+		/* plat_secondary_cold_boot_setup() is not supposed to return */
+		bl	el2_panic
+	do_primary_cold_boot:
+	.endif /* _secondary_cold_boot */
+
+	/* ---------------------------------------------------------------------
+	 * Initialize memory now. Secondary CPU initialization won't get to this
+	 * point.
+	 * ---------------------------------------------------------------------
+	 */
+
+	.if \_init_memory
+		bl	platform_mem_init
+	.endif /* _init_memory */
+
+	/* ---------------------------------------------------------------------
+	 * Init C runtime environment:
+	 *   - Zero-initialise the NOBITS sections. There are 2 of them:
+	 *       - the .bss section;
+	 *       - the coherent memory section (if any).
+	 *   - Relocate the data section from ROM to RAM, if required.
+	 * ---------------------------------------------------------------------
+	 */
+	.if \_init_c_runtime
+		adrp	x0, __BSS_START__
+		add	x0, x0, :lo12:__BSS_START__
+
+		adrp	x1, __BSS_END__
+		add	x1, x1, :lo12:__BSS_END__
+		sub	x1, x1, x0
+		bl	zeromem
+
+#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_IN_XIP_MEM)
+		adrp	x0, __DATA_RAM_START__
+		add	x0, x0, :lo12:__DATA_RAM_START__
+		adrp	x1, __DATA_ROM_START__
+		add	x1, x1, :lo12:__DATA_ROM_START__
+		adrp	x2, __DATA_RAM_END__
+		add	x2, x2, :lo12:__DATA_RAM_END__
+		sub	x2, x2, x0
+		bl	memcpy16
+#endif
+	.endif /* _init_c_runtime */
+
+	/* ---------------------------------------------------------------------
+	 * Use SP_EL0 for the C runtime stack.
+	 * ---------------------------------------------------------------------
+	 */
+	msr	spsel, #0
+
+	/* ---------------------------------------------------------------------
+	 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
+	 * the MMU is enabled. There is no risk of reading stale stack memory
+	 * after enabling the MMU as only the primary CPU is running at the
+	 * moment.
+	 * ---------------------------------------------------------------------
+	 */
+	bl	plat_set_my_stack
+
+#if STACK_PROTECTOR_ENABLED
+	.if \_init_c_runtime
+	bl	update_stack_protector_canary
+	.endif /* _init_c_runtime */
+#endif
+	.endm
+
+	.macro	apply_at_speculative_wa
+#if ERRATA_SPECULATIVE_AT
+	/*
+	 * Explicitly save x30 so as to free up a register and to enable
+	 * branching and also, save x29 which will be used in the called
+	 * function
+	 */
+	stp	x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+	bl	save_and_update_ptw_el1_sys_regs
+	ldp	x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+#endif
+	.endm
+
+	.macro	restore_ptw_el1_sys_regs
+#if ERRATA_SPECULATIVE_AT
+	/* -----------------------------------------------------------
+	 * In case of ERRATA_SPECULATIVE_AT, must follow below order
+	 * to ensure that page table walk is not enabled until
+	 * restoration of all EL1 system registers. TCR_EL1 register
+	 * should be updated at the end which restores previous page
+	 * table walk setting of stage1 i.e.(TCR_EL1.EPDx) bits. ISB
+	 * ensures that CPU does below steps in order.
+	 *
+	 * 1. Ensure all other system registers are written before
+	 *    updating SCTLR_EL1 using ISB.
+	 * 2. Restore SCTLR_EL1 register.
+	 * 3. Ensure SCTLR_EL1 written successfully using ISB.
+	 * 4. Restore TCR_EL1 register.
+	 * -----------------------------------------------------------
+	 */
+	isb
+	ldp	x28, x29, [sp, #CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1]
+	msr	sctlr_el1, x28
+	isb
+	msr	tcr_el1, x29
+#endif
+	.endm
+
+#endif /* EL2_COMMON_MACROS_S */
diff --git a/include/arch/aarch64/el3_common_macros.S b/include/arch/aarch64/el3_common_macros.S
index d496584..7d6a963 100644
--- a/include/arch/aarch64/el3_common_macros.S
+++ b/include/arch/aarch64/el3_common_macros.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -88,6 +88,13 @@
 	 */
 	orr	x0, x0, #(SCR_API_BIT | SCR_APK_BIT)
 #endif
+#if ENABLE_RME
+	/*
+	 * TODO: Settting the EEL2 bit to allow EL3 access to secure only registers
+	 * in context management. This will need to be refactored.
+	 */
+	orr	x0, x0, #SCR_EEL2_BIT
+#endif
 	msr	scr_el3, x0
 
 	/* ---------------------------------------------------------------------
@@ -365,6 +372,7 @@
 	msr	vbar_el3, x0
 	isb
 
+#if !(defined(IMAGE_BL2) && ENABLE_RME)
 	/* ---------------------------------------------------------------------
 	 * It is a cold boot.
 	 * Perform any processor specific actions upon reset e.g. cache, TLB
@@ -372,6 +380,7 @@
 	 * ---------------------------------------------------------------------
 	 */
 	bl	reset_handler
+#endif
 
 	el3_arch_init_common
 
@@ -414,7 +423,8 @@
 	 * ---------------------------------------------------------------------
 	 */
 	.if \_init_c_runtime
-#if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_INV_DCACHE)
+#if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && \
+	((BL2_AT_EL3 && BL2_INV_DCACHE) || ENABLE_RME))
 		/* -------------------------------------------------------------
 		 * Invalidate the RW memory used by the BL31 image. This
 		 * includes the data and NOBITS sections. This is done to
diff --git a/include/bl31/bl31.h b/include/bl31/bl31.h
index 3deb0a5..1d58ef9 100644
--- a/include/bl31/bl31.h
+++ b/include/bl31/bl31.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -19,6 +19,7 @@
 uint32_t bl31_get_next_image_type(void);
 void bl31_prepare_next_image_entry(void);
 void bl31_register_bl32_init(int32_t (*func)(void));
+void bl31_register_rmm_init(int32_t (*func)(void));
 void bl31_warm_entrypoint(void);
 void bl31_main(void);
 void bl31_lib_init(void);
diff --git a/include/common/bl_common.h b/include/common/bl_common.h
index e33840c..8cb4990 100644
--- a/include/common/bl_common.h
+++ b/include/common/bl_common.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -126,6 +126,8 @@
 IMPORT_SYM(uintptr_t, __BL31_END__,		BL31_END);
 #elif defined(IMAGE_BL32)
 IMPORT_SYM(uintptr_t, __BL32_END__,		BL32_END);
+#elif defined(IMAGE_RMM)
+IMPORT_SYM(uintptr_t, __RMM_END__,		RMM_END);
 #endif /* IMAGE_BLX */
 
 /* The following symbols are only exported from the BL2 at EL3 linker script. */
diff --git a/include/common/ep_info.h b/include/common/ep_info.h
index 4bfa1fa..771572c 100644
--- a/include/common/ep_info.h
+++ b/include/common/ep_info.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -18,14 +18,21 @@
 
 #define SECURE		EP_SECURE
 #define NON_SECURE	EP_NON_SECURE
+#define REALM		EP_REALM
+#if ENABLE_RME
+#define sec_state_is_valid(s)	(((s) == SECURE) ||	\
+				((s) == NON_SECURE) ||	\
+				((s) == REALM))
+#else
 #define sec_state_is_valid(s) (((s) == SECURE) || ((s) == NON_SECURE))
+#endif
 
 #define PARAM_EP_SECURITY_MASK	EP_SECURITY_MASK
 
 #define NON_EXECUTABLE	EP_NON_EXECUTABLE
 #define EXECUTABLE	EP_EXECUTABLE
 
-/* Secure or Non-secure image */
+/* Get/set security state of an image */
 #define GET_SECURITY_STATE(x) ((x) & EP_SECURITY_MASK)
 #define SET_SECURITY_STATE(x, security) \
 			((x) = ((x) & ~EP_SECURITY_MASK) | (security))
diff --git a/include/common/fdt_wrappers.h b/include/common/fdt_wrappers.h
index e8b3933..98e7a3e 100644
--- a/include/common/fdt_wrappers.h
+++ b/include/common/fdt_wrappers.h
@@ -48,4 +48,9 @@
 	return fdt32_to_cpu(dtb_header[1]);
 }
 
+#define fdt_for_each_compatible_node(dtb, node, compatible_str)       \
+for (node = fdt_node_offset_by_compatible(dtb, -1, compatible_str);   \
+     node >= 0;                                                       \
+     node = fdt_node_offset_by_compatible(dtb, node, compatible_str))
+
 #endif /* FDT_WRAPPERS_H */
diff --git a/include/drivers/arm/ethosn.h b/include/drivers/arm/ethosn.h
index 6de2abb..9310733 100644
--- a/include/drivers/arm/ethosn.h
+++ b/include/drivers/arm/ethosn.h
@@ -38,8 +38,8 @@
 #define is_ethosn_fid(_fid) (((_fid) & ETHOSN_FID_MASK) == ETHOSN_FID_VALUE)
 
 /* Service version  */
-#define ETHOSN_VERSION_MAJOR U(0)
-#define ETHOSN_VERSION_MINOR U(1)
+#define ETHOSN_VERSION_MAJOR U(1)
+#define ETHOSN_VERSION_MINOR U(0)
 
 /* Return codes for function calls */
 #define ETHOSN_SUCCESS			 0
@@ -47,10 +47,10 @@
 /* -2 Reserved for NOT_REQUIRED */
 /* -3 Reserved for INVALID_PARAMETER */
 #define ETHOSN_FAILURE			-4
-#define ETHOSN_CORE_IDX_OUT_OF_RANGE	-5
+#define ETHOSN_UNKNOWN_CORE_ADDRESS	-5
 
 uintptr_t ethosn_smc_handler(uint32_t smc_fid,
-			     u_register_t core_idx,
+			     u_register_t core_addr,
 			     u_register_t x2,
 			     u_register_t x3,
 			     u_register_t x4,
diff --git a/include/drivers/measured_boot/measured_boot.h b/include/drivers/measured_boot/measured_boot.h
index f8769ab..05be4a9 100644
--- a/include/drivers/measured_boot/measured_boot.h
+++ b/include/drivers/measured_boot/measured_boot.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -11,9 +11,6 @@
 
 #include <drivers/measured_boot/event_log.h>
 
-/* Platform specific table of image IDs, names and PCRs */
-extern const image_data_t images_data[];
-
 /* Functions' declarations */
 void measured_boot_init(void);
 void measured_boot_finish(void);
diff --git a/include/drivers/nxp/flexspi/flash_info.h b/include/drivers/nxp/flexspi/flash_info.h
index 94c9f02..d0ffc86 100644
--- a/include/drivers/nxp/flexspi/flash_info.h
+++ b/include/drivers/nxp/flexspi/flash_info.h
@@ -12,6 +12,7 @@
 
 #define SZ_16M_BYTES			0x1000000U
 
+/* Start of "if defined(CONFIG_MT25QU512A)" */
 #if defined(CONFIG_MT25QU512A)
 #define F_SECTOR_64K			0x10000U
 #define F_PAGE_256			0x100U
@@ -22,6 +23,9 @@
 #define F_SECTOR_ERASE_SZ		F_SECTOR_4K
 #endif
 
+/* End of "if defined(CONFIG_MT25QU512A)" */
+
+/* Start of "if defined(CONFIG_MX25U25645G)" */
 #elif defined(CONFIG_MX25U25645G)
 #define F_SECTOR_64K			0x10000U
 #define F_PAGE_256			0x100U
@@ -32,6 +36,9 @@
 #define F_SECTOR_ERASE_SZ		F_SECTOR_4K
 #endif
 
+/* End of "if defined(CONFIG_MX25U25645G)" */
+
+/* Start of "if defined(CONFIG_MX25U51245G)" */
 #elif defined(CONFIG_MX25U51245G)
 #define F_SECTOR_64K			0x10000U
 #define F_PAGE_256			0x100U
@@ -42,6 +49,9 @@
 #define F_SECTOR_ERASE_SZ		F_SECTOR_4K
 #endif
 
+/* End of "if defined(CONFIG_MX25U51245G)" */
+
+/* Start of "if defined(CONFIG_MT35XU512A)" */
 #elif defined(CONFIG_MT35XU512A)
 #define F_SECTOR_128K			0x20000U
 #define F_SECTOR_32K			0x8000U
@@ -52,7 +62,18 @@
 #ifdef CONFIG_FSPI_4K_ERASE
 #define F_SECTOR_ERASE_SZ		F_SECTOR_4K
 #endif
+/* If Warm boot is enabled for the platform,
+ * count of arm instruction N-OP(s) to mark
+ * the completion of write operation to flash;
+ * varies from one flash to other.
+ */
+#ifdef NXP_WARM_BOOT
+#define FLASH_WR_COMP_WAIT_BY_NOP_COUNT	0x20000
+#endif
 
+/* End of "if defined(CONFIG_MT35XU512A)" */
+
+/* Start of #elif defined(CONFIG_MT35XU02G) */
 #elif defined(CONFIG_MT35XU02G)
 #define F_SECTOR_128K			0x20000U
 #define F_PAGE_256			0x100U
@@ -63,9 +84,6 @@
 #define F_SECTOR_ERASE_SZ		F_SECTOR_4K
 #endif
 
-#ifdef NXP_WARM_BOOT
-#define FLASH_WR_COMP_WAIT_BY_NOP_COUNT	0x20000
-#endif
+#endif /* End of #elif defined(CONFIG_MT35XU02G) */
 
-#endif
 #endif /* FLASH_INFO_H */
diff --git a/include/drivers/st/stm32mp1_rcc.h b/include/drivers/st/stm32mp1_rcc.h
index 2ffc3b2..14f93fd 100644
--- a/include/drivers/st/stm32mp1_rcc.h
+++ b/include/drivers/st/stm32mp1_rcc.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2019, STMicroelectronics - All Rights Reserved
+ * Copyright (c) 2015-2021, STMicroelectronics - All Rights Reserved
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,508 +9,1158 @@
 
 #include <lib/utils_def.h>
 
-#define RCC_TZCR			U(0x00)
-#define RCC_OCENSETR			U(0x0C)
-#define RCC_OCENCLRR			U(0x10)
-#define RCC_HSICFGR			U(0x18)
-#define RCC_CSICFGR			U(0x1C)
-#define RCC_MPCKSELR			U(0x20)
-#define RCC_ASSCKSELR			U(0x24)
-#define RCC_RCK12SELR			U(0x28)
-#define RCC_MPCKDIVR			U(0x2C)
-#define RCC_AXIDIVR			U(0x30)
-#define RCC_APB4DIVR			U(0x3C)
-#define RCC_APB5DIVR			U(0x40)
-#define RCC_RTCDIVR			U(0x44)
-#define RCC_MSSCKSELR			U(0x48)
-#define RCC_PLL1CR			U(0x80)
-#define RCC_PLL1CFGR1			U(0x84)
-#define RCC_PLL1CFGR2			U(0x88)
-#define RCC_PLL1FRACR			U(0x8C)
-#define RCC_PLL1CSGR			U(0x90)
-#define RCC_PLL2CR			U(0x94)
-#define RCC_PLL2CFGR1			U(0x98)
-#define RCC_PLL2CFGR2			U(0x9C)
-#define RCC_PLL2FRACR			U(0xA0)
-#define RCC_PLL2CSGR			U(0xA4)
-#define RCC_I2C46CKSELR			U(0xC0)
-#define RCC_SPI6CKSELR			U(0xC4)
-#define RCC_UART1CKSELR			U(0xC8)
-#define RCC_RNG1CKSELR			U(0xCC)
-#define RCC_CPERCKSELR			U(0xD0)
-#define RCC_STGENCKSELR			U(0xD4)
-#define RCC_DDRITFCR			U(0xD8)
-#define RCC_MP_BOOTCR			U(0x100)
-#define RCC_MP_SREQSETR			U(0x104)
-#define RCC_MP_SREQCLRR			U(0x108)
-#define RCC_MP_GCR			U(0x10C)
-#define RCC_MP_APRSTCR			U(0x110)
-#define RCC_MP_APRSTSR			U(0x114)
-#define RCC_BDCR			U(0x140)
-#define RCC_RDLSICR			U(0x144)
-#define RCC_APB4RSTSETR			U(0x180)
-#define RCC_APB4RSTCLRR			U(0x184)
-#define RCC_APB5RSTSETR			U(0x188)
-#define RCC_APB5RSTCLRR			U(0x18C)
-#define RCC_AHB5RSTSETR			U(0x190)
-#define RCC_AHB5RSTCLRR			U(0x194)
-#define RCC_AHB6RSTSETR			U(0x198)
-#define RCC_AHB6RSTCLRR			U(0x19C)
-#define RCC_TZAHB6RSTSETR		U(0x1A0)
-#define RCC_TZAHB6RSTCLRR		U(0x1A4)
-#define RCC_MP_APB4ENSETR		U(0x200)
-#define RCC_MP_APB4ENCLRR		U(0x204)
-#define RCC_MP_APB5ENSETR		U(0x208)
-#define RCC_MP_APB5ENCLRR		U(0x20C)
-#define RCC_MP_AHB5ENSETR		U(0x210)
-#define RCC_MP_AHB5ENCLRR		U(0x214)
-#define RCC_MP_AHB6ENSETR		U(0x218)
-#define RCC_MP_AHB6ENCLRR		U(0x21C)
-#define RCC_MP_TZAHB6ENSETR		U(0x220)
-#define RCC_MP_TZAHB6ENCLRR		U(0x224)
-#define RCC_MC_APB4ENSETR		U(0x280)
-#define RCC_MC_APB4ENCLRR		U(0x284)
-#define RCC_MC_APB5ENSETR		U(0x288)
-#define RCC_MC_APB5ENCLRR		U(0x28C)
-#define RCC_MC_AHB5ENSETR		U(0x290)
-#define RCC_MC_AHB5ENCLRR		U(0x294)
-#define RCC_MC_AHB6ENSETR		U(0x298)
-#define RCC_MC_AHB6ENCLRR		U(0x29C)
-#define RCC_MP_APB4LPENSETR		U(0x300)
-#define RCC_MP_APB4LPENCLRR		U(0x304)
-#define RCC_MP_APB5LPENSETR		U(0x308)
-#define RCC_MP_APB5LPENCLRR		U(0x30C)
-#define RCC_MP_AHB5LPENSETR		U(0x310)
-#define RCC_MP_AHB5LPENCLRR		U(0x314)
-#define RCC_MP_AHB6LPENSETR		U(0x318)
-#define RCC_MP_AHB6LPENCLRR		U(0x31C)
-#define RCC_MP_TZAHB6LPENSETR		U(0x320)
-#define RCC_MP_TZAHB6LPENCLRR		U(0x324)
-#define RCC_MC_APB4LPENSETR		U(0x380)
-#define RCC_MC_APB4LPENCLRR		U(0x384)
-#define RCC_MC_APB5LPENSETR		U(0x388)
-#define RCC_MC_APB5LPENCLRR		U(0x38C)
-#define RCC_MC_AHB5LPENSETR		U(0x390)
-#define RCC_MC_AHB5LPENCLRR		U(0x394)
-#define RCC_MC_AHB6LPENSETR		U(0x398)
-#define RCC_MC_AHB6LPENCLRR		U(0x39C)
-#define RCC_BR_RSTSCLRR			U(0x400)
-#define RCC_MP_GRSTCSETR		U(0x404)
-#define RCC_MP_RSTSCLRR			U(0x408)
-#define RCC_MP_IWDGFZSETR		U(0x40C)
-#define RCC_MP_IWDGFZCLRR		U(0x410)
-#define RCC_MP_CIER			U(0x414)
-#define RCC_MP_CIFR			U(0x418)
-#define RCC_PWRLPDLYCR			U(0x41C)
-#define RCC_MP_RSTSSETR			U(0x420)
-#define RCC_MCO1CFGR			U(0x800)
-#define RCC_MCO2CFGR			U(0x804)
-#define RCC_OCRDYR			U(0x808)
-#define RCC_DBGCFGR			U(0x80C)
-#define RCC_RCK3SELR			U(0x820)
-#define RCC_RCK4SELR			U(0x824)
-#define RCC_TIMG1PRER			U(0x828)
-#define RCC_TIMG2PRER			U(0x82C)
-#define RCC_MCUDIVR			U(0x830)
-#define RCC_APB1DIVR			U(0x834)
-#define RCC_APB2DIVR			U(0x838)
-#define RCC_APB3DIVR			U(0x83C)
-#define RCC_PLL3CR			U(0x880)
-#define RCC_PLL3CFGR1			U(0x884)
-#define RCC_PLL3CFGR2			U(0x888)
-#define RCC_PLL3FRACR			U(0x88C)
-#define RCC_PLL3CSGR			U(0x890)
-#define RCC_PLL4CR			U(0x894)
-#define RCC_PLL4CFGR1			U(0x898)
-#define RCC_PLL4CFGR2			U(0x89C)
-#define RCC_PLL4FRACR			U(0x8A0)
-#define RCC_PLL4CSGR			U(0x8A4)
-#define RCC_I2C12CKSELR			U(0x8C0)
-#define RCC_I2C35CKSELR			U(0x8C4)
-#define RCC_SAI1CKSELR			U(0x8C8)
-#define RCC_SAI2CKSELR			U(0x8CC)
-#define RCC_SAI3CKSELR			U(0x8D0)
-#define RCC_SAI4CKSELR			U(0x8D4)
-#define RCC_SPI2S1CKSELR		U(0x8D8)
-#define RCC_SPI2S23CKSELR		U(0x8DC)
-#define RCC_SPI45CKSELR			U(0x8E0)
-#define RCC_UART6CKSELR			U(0x8E4)
-#define RCC_UART24CKSELR		U(0x8E8)
-#define RCC_UART35CKSELR		U(0x8EC)
-#define RCC_UART78CKSELR		U(0x8F0)
-#define RCC_SDMMC12CKSELR		U(0x8F4)
-#define RCC_SDMMC3CKSELR		U(0x8F8)
-#define RCC_ETHCKSELR			U(0x8FC)
-#define RCC_QSPICKSELR			U(0x900)
-#define RCC_FMCCKSELR			U(0x904)
-#define RCC_FDCANCKSELR			U(0x90C)
-#define RCC_SPDIFCKSELR			U(0x914)
-#define RCC_CECCKSELR			U(0x918)
-#define RCC_USBCKSELR			U(0x91C)
-#define RCC_RNG2CKSELR			U(0x920)
-#define RCC_DSICKSELR			U(0x924)
-#define RCC_ADCCKSELR			U(0x928)
-#define RCC_LPTIM45CKSELR		U(0x92C)
-#define RCC_LPTIM23CKSELR		U(0x930)
-#define RCC_LPTIM1CKSELR		U(0x934)
-#define RCC_APB1RSTSETR			U(0x980)
-#define RCC_APB1RSTCLRR			U(0x984)
-#define RCC_APB2RSTSETR			U(0x988)
-#define RCC_APB2RSTCLRR			U(0x98C)
-#define RCC_APB3RSTSETR			U(0x990)
-#define RCC_APB3RSTCLRR			U(0x994)
-#define RCC_AHB2RSTSETR			U(0x998)
-#define RCC_AHB2RSTCLRR			U(0x99C)
-#define RCC_AHB3RSTSETR			U(0x9A0)
-#define RCC_AHB3RSTCLRR			U(0x9A4)
-#define RCC_AHB4RSTSETR			U(0x9A8)
-#define RCC_AHB4RSTCLRR			U(0x9AC)
-#define RCC_MP_APB1ENSETR		U(0xA00)
-#define RCC_MP_APB1ENCLRR		U(0xA04)
-#define RCC_MP_APB2ENSETR		U(0xA08)
-#define RCC_MP_APB2ENCLRR		U(0xA0C)
-#define RCC_MP_APB3ENSETR		U(0xA10)
-#define RCC_MP_APB3ENCLRR		U(0xA14)
-#define RCC_MP_AHB2ENSETR		U(0xA18)
-#define RCC_MP_AHB2ENCLRR		U(0xA1C)
-#define RCC_MP_AHB3ENSETR		U(0xA20)
-#define RCC_MP_AHB3ENCLRR		U(0xA24)
-#define RCC_MP_AHB4ENSETR		U(0xA28)
-#define RCC_MP_AHB4ENCLRR		U(0xA2C)
-#define RCC_MP_MLAHBENSETR		U(0xA38)
-#define RCC_MP_MLAHBENCLRR		U(0xA3C)
-#define RCC_MC_APB1ENSETR		U(0xA80)
-#define RCC_MC_APB1ENCLRR		U(0xA84)
-#define RCC_MC_APB2ENSETR		U(0xA88)
-#define RCC_MC_APB2ENCLRR		U(0xA8C)
-#define RCC_MC_APB3ENSETR		U(0xA90)
-#define RCC_MC_APB3ENCLRR		U(0xA94)
-#define RCC_MC_AHB2ENSETR		U(0xA98)
-#define RCC_MC_AHB2ENCLRR		U(0xA9C)
-#define RCC_MC_AHB3ENSETR		U(0xAA0)
-#define RCC_MC_AHB3ENCLRR		U(0xAA4)
-#define RCC_MC_AHB4ENSETR		U(0xAA8)
-#define RCC_MC_AHB4ENCLRR		U(0xAAC)
-#define RCC_MC_AXIMENSETR		U(0xAB0)
-#define RCC_MC_AXIMENCLRR		U(0xAB4)
-#define RCC_MC_MLAHBENSETR		U(0xAB8)
-#define RCC_MC_MLAHBENCLRR		U(0xABC)
-#define RCC_MP_APB1LPENSETR		U(0xB00)
-#define RCC_MP_APB1LPENCLRR		U(0xB04)
-#define RCC_MP_APB2LPENSETR		U(0xB08)
-#define RCC_MP_APB2LPENCLRR		U(0xB0C)
-#define RCC_MP_APB3LPENSETR		U(0xB10)
-#define RCC_MP_APB3LPENCLRR		U(0xB14)
-#define RCC_MP_AHB2LPENSETR		U(0xB18)
-#define RCC_MP_AHB2LPENCLRR		U(0xB1C)
-#define RCC_MP_AHB3LPENSETR		U(0xB20)
-#define RCC_MP_AHB3LPENCLRR		U(0xB24)
-#define RCC_MP_AHB4LPENSETR		U(0xB28)
-#define RCC_MP_AHB4LPENCLRR		U(0xB2C)
-#define RCC_MP_AXIMLPENSETR		U(0xB30)
-#define RCC_MP_AXIMLPENCLRR		U(0xB34)
-#define RCC_MP_MLAHBLPENSETR		U(0xB38)
-#define RCC_MP_MLAHBLPENCLRR		U(0xB3C)
-#define RCC_MC_APB1LPENSETR		U(0xB80)
-#define RCC_MC_APB1LPENCLRR		U(0xB84)
-#define RCC_MC_APB2LPENSETR		U(0xB88)
-#define RCC_MC_APB2LPENCLRR		U(0xB8C)
-#define RCC_MC_APB3LPENSETR		U(0xB90)
-#define RCC_MC_APB3LPENCLRR		U(0xB94)
-#define RCC_MC_AHB2LPENSETR		U(0xB98)
-#define RCC_MC_AHB2LPENCLRR		U(0xB9C)
-#define RCC_MC_AHB3LPENSETR		U(0xBA0)
-#define RCC_MC_AHB3LPENCLRR		U(0xBA4)
-#define RCC_MC_AHB4LPENSETR		U(0xBA8)
-#define RCC_MC_AHB4LPENCLRR		U(0xBAC)
-#define RCC_MC_AXIMLPENSETR		U(0xBB0)
-#define RCC_MC_AXIMLPENCLRR		U(0xBB4)
-#define RCC_MC_MLAHBLPENSETR		U(0xBB8)
-#define RCC_MC_MLAHBLPENCLRR		U(0xBBC)
-#define RCC_MC_RSTSCLRR			U(0xC00)
-#define RCC_MC_CIER			U(0xC14)
-#define RCC_MC_CIFR			U(0xC18)
-#define RCC_VERR			U(0xFF4)
-#define RCC_IDR				U(0xFF8)
-#define RCC_SIDR			U(0xFFC)
+#define RCC_TZCR				U(0x00)
+#define RCC_OCENSETR				U(0x0C)
+#define RCC_OCENCLRR				U(0x10)
+#define RCC_HSICFGR				U(0x18)
+#define RCC_CSICFGR				U(0x1C)
+#define RCC_MPCKSELR				U(0x20)
+#define RCC_ASSCKSELR				U(0x24)
+#define RCC_RCK12SELR				U(0x28)
+#define RCC_MPCKDIVR				U(0x2C)
+#define RCC_AXIDIVR				U(0x30)
+#define RCC_APB4DIVR				U(0x3C)
+#define RCC_APB5DIVR				U(0x40)
+#define RCC_RTCDIVR				U(0x44)
+#define RCC_MSSCKSELR				U(0x48)
+#define RCC_PLL1CR				U(0x80)
+#define RCC_PLL1CFGR1				U(0x84)
+#define RCC_PLL1CFGR2				U(0x88)
+#define RCC_PLL1FRACR				U(0x8C)
+#define RCC_PLL1CSGR				U(0x90)
+#define RCC_PLL2CR				U(0x94)
+#define RCC_PLL2CFGR1				U(0x98)
+#define RCC_PLL2CFGR2				U(0x9C)
+#define RCC_PLL2FRACR				U(0xA0)
+#define RCC_PLL2CSGR				U(0xA4)
+#define RCC_I2C46CKSELR				U(0xC0)
+#define RCC_SPI6CKSELR				U(0xC4)
+#define RCC_UART1CKSELR				U(0xC8)
+#define RCC_RNG1CKSELR				U(0xCC)
+#define RCC_CPERCKSELR				U(0xD0)
+#define RCC_STGENCKSELR				U(0xD4)
+#define RCC_DDRITFCR				U(0xD8)
+#define RCC_MP_BOOTCR				U(0x100)
+#define RCC_MP_SREQSETR				U(0x104)
+#define RCC_MP_SREQCLRR				U(0x108)
+#define RCC_MP_GCR				U(0x10C)
+#define RCC_MP_APRSTCR				U(0x110)
+#define RCC_MP_APRSTSR				U(0x114)
+#define RCC_BDCR				U(0x140)
+#define RCC_RDLSICR				U(0x144)
+#define RCC_APB4RSTSETR				U(0x180)
+#define RCC_APB4RSTCLRR				U(0x184)
+#define RCC_APB5RSTSETR				U(0x188)
+#define RCC_APB5RSTCLRR				U(0x18C)
+#define RCC_AHB5RSTSETR				U(0x190)
+#define RCC_AHB5RSTCLRR				U(0x194)
+#define RCC_AHB6RSTSETR				U(0x198)
+#define RCC_AHB6RSTCLRR				U(0x19C)
+#define RCC_TZAHB6RSTSETR			U(0x1A0)
+#define RCC_TZAHB6RSTCLRR			U(0x1A4)
+#define RCC_MP_APB4ENSETR			U(0x200)
+#define RCC_MP_APB4ENCLRR			U(0x204)
+#define RCC_MP_APB5ENSETR			U(0x208)
+#define RCC_MP_APB5ENCLRR			U(0x20C)
+#define RCC_MP_AHB5ENSETR			U(0x210)
+#define RCC_MP_AHB5ENCLRR			U(0x214)
+#define RCC_MP_AHB6ENSETR			U(0x218)
+#define RCC_MP_AHB6ENCLRR			U(0x21C)
+#define RCC_MP_TZAHB6ENSETR			U(0x220)
+#define RCC_MP_TZAHB6ENCLRR			U(0x224)
+#define RCC_MC_APB4ENSETR			U(0x280)
+#define RCC_MC_APB4ENCLRR			U(0x284)
+#define RCC_MC_APB5ENSETR			U(0x288)
+#define RCC_MC_APB5ENCLRR			U(0x28C)
+#define RCC_MC_AHB5ENSETR			U(0x290)
+#define RCC_MC_AHB5ENCLRR			U(0x294)
+#define RCC_MC_AHB6ENSETR			U(0x298)
+#define RCC_MC_AHB6ENCLRR			U(0x29C)
+#define RCC_MP_APB4LPENSETR			U(0x300)
+#define RCC_MP_APB4LPENCLRR			U(0x304)
+#define RCC_MP_APB5LPENSETR			U(0x308)
+#define RCC_MP_APB5LPENCLRR			U(0x30C)
+#define RCC_MP_AHB5LPENSETR			U(0x310)
+#define RCC_MP_AHB5LPENCLRR			U(0x314)
+#define RCC_MP_AHB6LPENSETR			U(0x318)
+#define RCC_MP_AHB6LPENCLRR			U(0x31C)
+#define RCC_MP_TZAHB6LPENSETR			U(0x320)
+#define RCC_MP_TZAHB6LPENCLRR			U(0x324)
+#define RCC_MC_APB4LPENSETR			U(0x380)
+#define RCC_MC_APB4LPENCLRR			U(0x384)
+#define RCC_MC_APB5LPENSETR			U(0x388)
+#define RCC_MC_APB5LPENCLRR			U(0x38C)
+#define RCC_MC_AHB5LPENSETR			U(0x390)
+#define RCC_MC_AHB5LPENCLRR			U(0x394)
+#define RCC_MC_AHB6LPENSETR			U(0x398)
+#define RCC_MC_AHB6LPENCLRR			U(0x39C)
+#define RCC_BR_RSTSCLRR				U(0x400)
+#define RCC_MP_GRSTCSETR			U(0x404)
+#define RCC_MP_RSTSCLRR				U(0x408)
+#define RCC_MP_IWDGFZSETR			U(0x40C)
+#define RCC_MP_IWDGFZCLRR			U(0x410)
+#define RCC_MP_CIER				U(0x414)
+#define RCC_MP_CIFR				U(0x418)
+#define RCC_PWRLPDLYCR				U(0x41C)
+#define RCC_MP_RSTSSETR				U(0x420)
+#define RCC_MCO1CFGR				U(0x800)
+#define RCC_MCO2CFGR				U(0x804)
+#define RCC_OCRDYR				U(0x808)
+#define RCC_DBGCFGR				U(0x80C)
+#define RCC_RCK3SELR				U(0x820)
+#define RCC_RCK4SELR				U(0x824)
+#define RCC_TIMG1PRER				U(0x828)
+#define RCC_TIMG2PRER				U(0x82C)
+#define RCC_MCUDIVR				U(0x830)
+#define RCC_APB1DIVR				U(0x834)
+#define RCC_APB2DIVR				U(0x838)
+#define RCC_APB3DIVR				U(0x83C)
+#define RCC_PLL3CR				U(0x880)
+#define RCC_PLL3CFGR1				U(0x884)
+#define RCC_PLL3CFGR2				U(0x888)
+#define RCC_PLL3FRACR				U(0x88C)
+#define RCC_PLL3CSGR				U(0x890)
+#define RCC_PLL4CR				U(0x894)
+#define RCC_PLL4CFGR1				U(0x898)
+#define RCC_PLL4CFGR2				U(0x89C)
+#define RCC_PLL4FRACR				U(0x8A0)
+#define RCC_PLL4CSGR				U(0x8A4)
+#define RCC_I2C12CKSELR				U(0x8C0)
+#define RCC_I2C35CKSELR				U(0x8C4)
+#define RCC_SAI1CKSELR				U(0x8C8)
+#define RCC_SAI2CKSELR				U(0x8CC)
+#define RCC_SAI3CKSELR				U(0x8D0)
+#define RCC_SAI4CKSELR				U(0x8D4)
+#define RCC_SPI2S1CKSELR			U(0x8D8)
+#define RCC_SPI2S23CKSELR			U(0x8DC)
+#define RCC_SPI45CKSELR				U(0x8E0)
+#define RCC_UART6CKSELR				U(0x8E4)
+#define RCC_UART24CKSELR			U(0x8E8)
+#define RCC_UART35CKSELR			U(0x8EC)
+#define RCC_UART78CKSELR			U(0x8F0)
+#define RCC_SDMMC12CKSELR			U(0x8F4)
+#define RCC_SDMMC3CKSELR			U(0x8F8)
+#define RCC_ETHCKSELR				U(0x8FC)
+#define RCC_QSPICKSELR				U(0x900)
+#define RCC_FMCCKSELR				U(0x904)
+#define RCC_FDCANCKSELR				U(0x90C)
+#define RCC_SPDIFCKSELR				U(0x914)
+#define RCC_CECCKSELR				U(0x918)
+#define RCC_USBCKSELR				U(0x91C)
+#define RCC_RNG2CKSELR				U(0x920)
+#define RCC_DSICKSELR				U(0x924)
+#define RCC_ADCCKSELR				U(0x928)
+#define RCC_LPTIM45CKSELR			U(0x92C)
+#define RCC_LPTIM23CKSELR			U(0x930)
+#define RCC_LPTIM1CKSELR			U(0x934)
+#define RCC_APB1RSTSETR				U(0x980)
+#define RCC_APB1RSTCLRR				U(0x984)
+#define RCC_APB2RSTSETR				U(0x988)
+#define RCC_APB2RSTCLRR				U(0x98C)
+#define RCC_APB3RSTSETR				U(0x990)
+#define RCC_APB3RSTCLRR				U(0x994)
+#define RCC_AHB2RSTSETR				U(0x998)
+#define RCC_AHB2RSTCLRR				U(0x99C)
+#define RCC_AHB3RSTSETR				U(0x9A0)
+#define RCC_AHB3RSTCLRR				U(0x9A4)
+#define RCC_AHB4RSTSETR				U(0x9A8)
+#define RCC_AHB4RSTCLRR				U(0x9AC)
+#define RCC_MP_APB1ENSETR			U(0xA00)
+#define RCC_MP_APB1ENCLRR			U(0xA04)
+#define RCC_MP_APB2ENSETR			U(0xA08)
+#define RCC_MP_APB2ENCLRR			U(0xA0C)
+#define RCC_MP_APB3ENSETR			U(0xA10)
+#define RCC_MP_APB3ENCLRR			U(0xA14)
+#define RCC_MP_AHB2ENSETR			U(0xA18)
+#define RCC_MP_AHB2ENCLRR			U(0xA1C)
+#define RCC_MP_AHB3ENSETR			U(0xA20)
+#define RCC_MP_AHB3ENCLRR			U(0xA24)
+#define RCC_MP_AHB4ENSETR			U(0xA28)
+#define RCC_MP_AHB4ENCLRR			U(0xA2C)
+#define RCC_MP_MLAHBENSETR			U(0xA38)
+#define RCC_MP_MLAHBENCLRR			U(0xA3C)
+#define RCC_MC_APB1ENSETR			U(0xA80)
+#define RCC_MC_APB1ENCLRR			U(0xA84)
+#define RCC_MC_APB2ENSETR			U(0xA88)
+#define RCC_MC_APB2ENCLRR			U(0xA8C)
+#define RCC_MC_APB3ENSETR			U(0xA90)
+#define RCC_MC_APB3ENCLRR			U(0xA94)
+#define RCC_MC_AHB2ENSETR			U(0xA98)
+#define RCC_MC_AHB2ENCLRR			U(0xA9C)
+#define RCC_MC_AHB3ENSETR			U(0xAA0)
+#define RCC_MC_AHB3ENCLRR			U(0xAA4)
+#define RCC_MC_AHB4ENSETR			U(0xAA8)
+#define RCC_MC_AHB4ENCLRR			U(0xAAC)
+#define RCC_MC_AXIMENSETR			U(0xAB0)
+#define RCC_MC_AXIMENCLRR			U(0xAB4)
+#define RCC_MC_MLAHBENSETR			U(0xAB8)
+#define RCC_MC_MLAHBENCLRR			U(0xABC)
+#define RCC_MP_APB1LPENSETR			U(0xB00)
+#define RCC_MP_APB1LPENCLRR			U(0xB04)
+#define RCC_MP_APB2LPENSETR			U(0xB08)
+#define RCC_MP_APB2LPENCLRR			U(0xB0C)
+#define RCC_MP_APB3LPENSETR			U(0xB10)
+#define RCC_MP_APB3LPENCLRR			U(0xB14)
+#define RCC_MP_AHB2LPENSETR			U(0xB18)
+#define RCC_MP_AHB2LPENCLRR			U(0xB1C)
+#define RCC_MP_AHB3LPENSETR			U(0xB20)
+#define RCC_MP_AHB3LPENCLRR			U(0xB24)
+#define RCC_MP_AHB4LPENSETR			U(0xB28)
+#define RCC_MP_AHB4LPENCLRR			U(0xB2C)
+#define RCC_MP_AXIMLPENSETR			U(0xB30)
+#define RCC_MP_AXIMLPENCLRR			U(0xB34)
+#define RCC_MP_MLAHBLPENSETR			U(0xB38)
+#define RCC_MP_MLAHBLPENCLRR			U(0xB3C)
+#define RCC_MC_APB1LPENSETR			U(0xB80)
+#define RCC_MC_APB1LPENCLRR			U(0xB84)
+#define RCC_MC_APB2LPENSETR			U(0xB88)
+#define RCC_MC_APB2LPENCLRR			U(0xB8C)
+#define RCC_MC_APB3LPENSETR			U(0xB90)
+#define RCC_MC_APB3LPENCLRR			U(0xB94)
+#define RCC_MC_AHB2LPENSETR			U(0xB98)
+#define RCC_MC_AHB2LPENCLRR			U(0xB9C)
+#define RCC_MC_AHB3LPENSETR			U(0xBA0)
+#define RCC_MC_AHB3LPENCLRR			U(0xBA4)
+#define RCC_MC_AHB4LPENSETR			U(0xBA8)
+#define RCC_MC_AHB4LPENCLRR			U(0xBAC)
+#define RCC_MC_AXIMLPENSETR			U(0xBB0)
+#define RCC_MC_AXIMLPENCLRR			U(0xBB4)
+#define RCC_MC_MLAHBLPENSETR			U(0xBB8)
+#define RCC_MC_MLAHBLPENCLRR			U(0xBBC)
+#define RCC_MC_RSTSCLRR				U(0xC00)
+#define RCC_MC_CIER				U(0xC14)
+#define RCC_MC_CIFR				U(0xC18)
+#define RCC_VERR				U(0xFF4)
+#define RCC_IDR					U(0xFF8)
+#define RCC_SIDR				U(0xFFC)
 
-#define RCC_OFFSET_MASK			GENMASK(11, 0)
+/* RCC_TZCR register fields */
+#define RCC_TZCR_TZEN				BIT(0)
+#define RCC_TZCR_MCKPROT			BIT(1)
 
-/* Values for RCC_TZCR register */
-#define RCC_TZCR_TZEN			BIT(0)
-#define RCC_TZCR_MCKPROT		BIT(1)
+/* RCC_OCENSETR register fields */
+#define RCC_OCENSETR_HSION			BIT(0)
+#define RCC_OCENSETR_HSIKERON			BIT(1)
+#define RCC_OCENSETR_CSION			BIT(4)
+#define RCC_OCENSETR_CSIKERON			BIT(5)
+#define RCC_OCENSETR_DIGBYP			BIT(7)
+#define RCC_OCENSETR_HSEON			BIT(8)
+#define RCC_OCENSETR_HSEKERON			BIT(9)
+#define RCC_OCENSETR_HSEBYP			BIT(10)
+#define RCC_OCENSETR_HSECSSON			BIT(11)
 
-/* Used for most of RCC_<x>SELR registers */
-#define RCC_SELR_SRC_MASK		GENMASK(2, 0)
-#define RCC_SELR_REFCLK_SRC_MASK	GENMASK(1, 0)
-#define RCC_SELR_SRCRDY			BIT(31)
+/* RCC_OCENCLRR register fields */
+#define RCC_OCENCLRR_HSION			BIT(0)
+#define RCC_OCENCLRR_HSIKERON			BIT(1)
+#define RCC_OCENCLRR_CSION			BIT(4)
+#define RCC_OCENCLRR_CSIKERON			BIT(5)
+#define RCC_OCENCLRR_DIGBYP			BIT(7)
+#define RCC_OCENCLRR_HSEON			BIT(8)
+#define RCC_OCENCLRR_HSEKERON			BIT(9)
+#define RCC_OCENCLRR_HSEBYP			BIT(10)
 
-/* Values of RCC_MPCKSELR register */
-#define RCC_MPCKSELR_HSI		0x00000000
-#define RCC_MPCKSELR_HSE		0x00000001
-#define RCC_MPCKSELR_PLL		0x00000002
-#define RCC_MPCKSELR_PLL_MPUDIV		0x00000003
-#define RCC_MPCKSELR_MPUSRC_MASK	GENMASK(1, 0)
-#define RCC_MPCKSELR_MPUSRC_SHIFT	0
+/* RCC_HSICFGR register fields */
+#define RCC_HSICFGR_HSIDIV_MASK			GENMASK(1, 0)
+#define RCC_HSICFGR_HSIDIV_SHIFT		0
+#define RCC_HSICFGR_HSITRIM_MASK		GENMASK(14, 8)
+#define RCC_HSICFGR_HSITRIM_SHIFT		8
+#define RCC_HSICFGR_HSICAL_MASK			GENMASK(24, 16)
+#define RCC_HSICFGR_HSICAL_SHIFT		16
+#define RCC_HSICFGR_HSICAL_TEMP_MASK		GENMASK(27, 25)
 
-/* Values of RCC_ASSCKSELR register */
-#define RCC_ASSCKSELR_HSI		0x00000000
-#define RCC_ASSCKSELR_HSE		0x00000001
-#define RCC_ASSCKSELR_PLL		0x00000002
+/* RCC_CSICFGR register fields */
+#define RCC_CSICFGR_CSITRIM_MASK		GENMASK(12, 8)
+#define RCC_CSICFGR_CSITRIM_SHIFT		8
+#define RCC_CSICFGR_CSICAL_MASK			GENMASK(23, 16)
+#define RCC_CSICFGR_CSICAL_SHIFT		16
 
-/* Values of RCC_MSSCKSELR register */
-#define RCC_MSSCKSELR_HSI		0x00000000
-#define RCC_MSSCKSELR_HSE		0x00000001
-#define RCC_MSSCKSELR_CSI		0x00000002
-#define RCC_MSSCKSELR_PLL		0x00000003
+/* RCC_MPCKSELR register fields */
+#define RCC_MPCKSELR_HSI			0x00000000
+#define RCC_MPCKSELR_HSE			0x00000001
+#define RCC_MPCKSELR_PLL			0x00000002
+#define RCC_MPCKSELR_PLL_MPUDIV			0x00000003
+#define RCC_MPCKSELR_MPUSRC_MASK		GENMASK(1, 0)
+#define RCC_MPCKSELR_MPUSRC_SHIFT		0
+#define RCC_MPCKSELR_MPUSRCRDY			BIT(31)
 
-/* Values of RCC_CPERCKSELR register */
-#define RCC_CPERCKSELR_HSI		0x00000000
-#define RCC_CPERCKSELR_CSI		0x00000001
-#define RCC_CPERCKSELR_HSE		0x00000002
-#define RCC_CPERCKSELR_PERSRC_MASK	GENMASK(1, 0)
-#define RCC_CPERCKSELR_PERSRC_SHIFT	0
+/* RCC_ASSCKSELR register fields */
+#define RCC_ASSCKSELR_HSI			0x00000000
+#define RCC_ASSCKSELR_HSE			0x00000001
+#define RCC_ASSCKSELR_PLL			0x00000002
+#define RCC_ASSCKSELR_AXISSRC_MASK		GENMASK(2, 0)
+#define RCC_ASSCKSELR_AXISSRC_SHIFT		0
+#define RCC_ASSCKSELR_AXISSRCRDY		BIT(31)
 
-/* Used for most of DIVR register: max div for RTC */
-#define RCC_DIVR_DIV_MASK		GENMASK(5, 0)
-#define RCC_DIVR_DIVRDY			BIT(31)
+/* RCC_RCK12SELR register fields */
+#define RCC_RCK12SELR_PLL12SRC_MASK		GENMASK(1, 0)
+#define RCC_RCK12SELR_PLL12SRC_SHIFT		0
+#define RCC_RCK12SELR_PLL12SRCRDY		BIT(31)
 
-/* Masks for specific DIVR registers */
-#define RCC_APBXDIV_MASK		GENMASK(2, 0)
-#define RCC_MPUDIV_MASK			GENMASK(2, 0)
-#define RCC_AXIDIV_MASK			GENMASK(2, 0)
-#define RCC_MCUDIV_MASK			GENMASK(3, 0)
+/* RCC_MPCKDIVR register fields */
+#define RCC_MPCKDIVR_MPUDIV_MASK		GENMASK(2, 0)
+#define RCC_MPCKDIVR_MPUDIV_SHIFT		0
+#define RCC_MPCKDIVR_MPUDIVRDY			BIT(31)
 
-/* Used for TIMER Prescaler */
-#define RCC_TIMGXPRER_TIMGXPRE		BIT(0)
+/* RCC_AXIDIVR register fields */
+#define RCC_AXIDIVR_AXIDIV_MASK			GENMASK(2, 0)
+#define RCC_AXIDIVR_AXIDIV_SHIFT		0
+#define RCC_AXIDIVR_AXIDIVRDY			BIT(31)
 
-/* Offset between RCC_MP_xxxENSETR and RCC_MP_xxxENCLRR registers */
-#define RCC_MP_ENCLRR_OFFSET		U(4)
+/* RCC_APB4DIVR register fields */
+#define RCC_APB4DIVR_APB4DIV_MASK		GENMASK(2, 0)
+#define RCC_APB4DIVR_APB4DIV_SHIFT		0
+#define RCC_APB4DIVR_APB4DIVRDY			BIT(31)
 
-/* Offset between RCC_xxxRSTSETR and RCC_xxxRSTCLRR registers */
-#define RCC_RSTCLRR_OFFSET		U(4)
+/* RCC_APB5DIVR register fields */
+#define RCC_APB5DIVR_APB5DIV_MASK		GENMASK(2, 0)
+#define RCC_APB5DIVR_APB5DIV_SHIFT		0
+#define RCC_APB5DIVR_APB5DIVRDY			BIT(31)
 
-/* Fields of RCC_BDCR register */
-#define RCC_BDCR_LSEON			BIT(0)
-#define RCC_BDCR_LSEBYP			BIT(1)
-#define RCC_BDCR_LSERDY			BIT(2)
-#define RCC_BDCR_DIGBYP			BIT(3)
-#define RCC_BDCR_LSEDRV_MASK		GENMASK(5, 4)
-#define RCC_BDCR_LSEDRV_SHIFT		4
-#define RCC_BDCR_LSECSSON		BIT(8)
-#define RCC_BDCR_RTCCKEN		BIT(20)
-#define RCC_BDCR_RTCSRC_MASK		GENMASK(17, 16)
-#define RCC_BDCR_RTCSRC_SHIFT		16
-#define RCC_BDCR_VSWRST			BIT(31)
+/* RCC_RTCDIVR register fields */
+#define RCC_RTCDIVR_RTCDIV_MASK			GENMASK(5, 0)
+#define RCC_RTCDIVR_RTCDIV_SHIFT		0
 
-/* Fields of RCC_RDLSICR register */
-#define RCC_RDLSICR_LSION		BIT(0)
-#define RCC_RDLSICR_LSIRDY		BIT(1)
+/* RCC_MSSCKSELR register fields */
+#define RCC_MSSCKSELR_HSI			0x00000000
+#define RCC_MSSCKSELR_HSE			0x00000001
+#define RCC_MSSCKSELR_CSI			0x00000002
+#define RCC_MSSCKSELR_PLL			0x00000003
+#define RCC_MSSCKSELR_MCUSSRC_MASK		GENMASK(1, 0)
+#define RCC_MSSCKSELR_MCUSSRC_SHIFT		0
+#define RCC_MSSCKSELR_MCUSSRCRDY		BIT(31)
 
-/* Used for all RCC_PLL<n>CR registers */
-#define RCC_PLLNCR_PLLON		BIT(0)
-#define RCC_PLLNCR_PLLRDY		BIT(1)
-#define RCC_PLLNCR_SSCG_CTRL		BIT(2)
-#define RCC_PLLNCR_DIVPEN		BIT(4)
-#define RCC_PLLNCR_DIVQEN		BIT(5)
-#define RCC_PLLNCR_DIVREN		BIT(6)
-#define RCC_PLLNCR_DIVEN_SHIFT		4
+/* RCC_PLL1CR register fields */
+#define RCC_PLL1CR_PLLON			BIT(0)
+#define RCC_PLL1CR_PLL1RDY			BIT(1)
+#define RCC_PLL1CR_SSCG_CTRL			BIT(2)
+#define RCC_PLL1CR_DIVPEN			BIT(4)
+#define RCC_PLL1CR_DIVQEN			BIT(5)
+#define RCC_PLL1CR_DIVREN			BIT(6)
 
-/* Used for all RCC_PLL<n>CFGR1 registers */
-#define RCC_PLLNCFGR1_DIVM_SHIFT	16
-#define RCC_PLLNCFGR1_DIVM_MASK		GENMASK(21, 16)
-#define RCC_PLLNCFGR1_DIVN_SHIFT	0
-#define RCC_PLLNCFGR1_DIVN_MASK		GENMASK(8, 0)
-/* Only for PLL3 and PLL4 */
-#define RCC_PLLNCFGR1_IFRGE_SHIFT	24
-#define RCC_PLLNCFGR1_IFRGE_MASK	GENMASK(25, 24)
+/* RCC_PLL1CFGR1 register fields */
+#define RCC_PLL1CFGR1_DIVN_MASK			GENMASK(8, 0)
+#define RCC_PLL1CFGR1_DIVN_SHIFT		0
+#define RCC_PLL1CFGR1_DIVM1_MASK		GENMASK(21, 16)
+#define RCC_PLL1CFGR1_DIVM1_SHIFT		16
 
-/* Used for all RCC_PLL<n>CFGR2 registers */
-#define RCC_PLLNCFGR2_DIVX_MASK		GENMASK(6, 0)
-#define RCC_PLLNCFGR2_DIVP_SHIFT	0
-#define RCC_PLLNCFGR2_DIVP_MASK		GENMASK(6, 0)
-#define RCC_PLLNCFGR2_DIVQ_SHIFT	8
-#define RCC_PLLNCFGR2_DIVQ_MASK		GENMASK(14, 8)
-#define RCC_PLLNCFGR2_DIVR_SHIFT	16
-#define RCC_PLLNCFGR2_DIVR_MASK		GENMASK(22, 16)
+/* RCC_PLL1CFGR2 register fields */
+#define RCC_PLL1CFGR2_DIVP_MASK			GENMASK(6, 0)
+#define RCC_PLL1CFGR2_DIVP_SHIFT		0
+#define RCC_PLL1CFGR2_DIVQ_MASK			GENMASK(14, 8)
+#define RCC_PLL1CFGR2_DIVQ_SHIFT		8
+#define RCC_PLL1CFGR2_DIVR_MASK			GENMASK(22, 16)
+#define RCC_PLL1CFGR2_DIVR_SHIFT		16
 
-/* Used for all RCC_PLL<n>FRACR registers */
-#define RCC_PLLNFRACR_FRACV_SHIFT	3
-#define RCC_PLLNFRACR_FRACV_MASK	GENMASK(15, 3)
-#define RCC_PLLNFRACR_FRACLE		BIT(16)
+/* RCC_PLL1FRACR register fields */
+#define RCC_PLL1FRACR_FRACV_MASK		GENMASK(15, 3)
+#define RCC_PLL1FRACR_FRACV_SHIFT		3
+#define RCC_PLL1FRACR_FRACLE			BIT(16)
 
-/* Used for all RCC_PLL<n>CSGR registers */
-#define RCC_PLLNCSGR_INC_STEP_SHIFT	16
-#define RCC_PLLNCSGR_INC_STEP_MASK	GENMASK(30, 16)
-#define RCC_PLLNCSGR_MOD_PER_SHIFT	0
-#define RCC_PLLNCSGR_MOD_PER_MASK	GENMASK(12, 0)
-#define RCC_PLLNCSGR_SSCG_MODE_SHIFT	15
-#define RCC_PLLNCSGR_SSCG_MODE_MASK	BIT(15)
+/* RCC_PLL1CSGR register fields */
+#define RCC_PLL1CSGR_MOD_PER_MASK		GENMASK(12, 0)
+#define RCC_PLL1CSGR_MOD_PER_SHIFT		0
+#define RCC_PLL1CSGR_TPDFN_DIS			BIT(13)
+#define RCC_PLL1CSGR_RPDFN_DIS			BIT(14)
+#define RCC_PLL1CSGR_SSCG_MODE			BIT(15)
+#define RCC_PLL1CSGR_INC_STEP_MASK		GENMASK(30, 16)
+#define RCC_PLL1CSGR_INC_STEP_SHIFT		16
 
-/* Used for RCC_OCENSETR and RCC_OCENCLRR registers */
-#define RCC_OCENR_HSION			BIT(0)
-#define RCC_OCENR_HSIKERON		BIT(1)
-#define RCC_OCENR_CSION			BIT(4)
-#define RCC_OCENR_CSIKERON		BIT(5)
-#define RCC_OCENR_DIGBYP		BIT(7)
-#define RCC_OCENR_HSEON			BIT(8)
-#define RCC_OCENR_HSEKERON		BIT(9)
-#define RCC_OCENR_HSEBYP		BIT(10)
-#define RCC_OCENR_HSECSSON		BIT(11)
+/* RCC_PLL2CR register fields */
+#define RCC_PLL2CR_PLLON			BIT(0)
+#define RCC_PLL2CR_PLL2RDY			BIT(1)
+#define RCC_PLL2CR_SSCG_CTRL			BIT(2)
+#define RCC_PLL2CR_DIVPEN			BIT(4)
+#define RCC_PLL2CR_DIVQEN			BIT(5)
+#define RCC_PLL2CR_DIVREN			BIT(6)
 
-/* Fields of RCC_OCRDYR register */
-#define RCC_OCRDYR_HSIRDY		BIT(0)
-#define RCC_OCRDYR_HSIDIVRDY		BIT(2)
-#define RCC_OCRDYR_CSIRDY		BIT(4)
-#define RCC_OCRDYR_HSERDY		BIT(8)
+/* RCC_PLL2CFGR1 register fields */
+#define RCC_PLL2CFGR1_DIVN_MASK			GENMASK(8, 0)
+#define RCC_PLL2CFGR1_DIVN_SHIFT		0
+#define RCC_PLL2CFGR1_DIVM2_MASK		GENMASK(21, 16)
+#define RCC_PLL2CFGR1_DIVM2_SHIFT		16
 
-/* Fields of RCC_DDRITFCR register */
-#define RCC_DDRITFCR_DDRC1EN		BIT(0)
-#define RCC_DDRITFCR_DDRC1LPEN		BIT(1)
-#define RCC_DDRITFCR_DDRC2EN		BIT(2)
-#define RCC_DDRITFCR_DDRC2LPEN		BIT(3)
-#define RCC_DDRITFCR_DDRPHYCEN		BIT(4)
-#define RCC_DDRITFCR_DDRPHYCLPEN	BIT(5)
-#define RCC_DDRITFCR_DDRCAPBEN		BIT(6)
-#define RCC_DDRITFCR_DDRCAPBLPEN	BIT(7)
-#define RCC_DDRITFCR_AXIDCGEN		BIT(8)
-#define RCC_DDRITFCR_DDRPHYCAPBEN	BIT(9)
-#define RCC_DDRITFCR_DDRPHYCAPBLPEN	BIT(10)
-#define RCC_DDRITFCR_DDRCAPBRST		BIT(14)
-#define RCC_DDRITFCR_DDRCAXIRST		BIT(15)
-#define RCC_DDRITFCR_DDRCORERST		BIT(16)
-#define RCC_DDRITFCR_DPHYAPBRST		BIT(17)
-#define RCC_DDRITFCR_DPHYRST		BIT(18)
-#define RCC_DDRITFCR_DPHYCTLRST		BIT(19)
-#define RCC_DDRITFCR_DDRCKMOD_MASK	GENMASK(22, 20)
-#define RCC_DDRITFCR_DDRCKMOD_SHIFT	20
-#define RCC_DDRITFCR_DDRCKMOD_SSR	0
-#define RCC_DDRITFCR_DDRCKMOD_ASR1	BIT(20)
-#define RCC_DDRITFCR_DDRCKMOD_HSR1	BIT(21)
-#define RCC_DDRITFCR_GSKPCTRL		BIT(24)
+/* RCC_PLL2CFGR2 register fields */
+#define RCC_PLL2CFGR2_DIVP_MASK			GENMASK(6, 0)
+#define RCC_PLL2CFGR2_DIVP_SHIFT		0
+#define RCC_PLL2CFGR2_DIVQ_MASK			GENMASK(14, 8)
+#define RCC_PLL2CFGR2_DIVQ_SHIFT		8
+#define RCC_PLL2CFGR2_DIVR_MASK			GENMASK(22, 16)
+#define RCC_PLL2CFGR2_DIVR_SHIFT		16
 
-/* Fields of RCC_HSICFGR register */
-#define RCC_HSICFGR_HSIDIV_MASK		GENMASK(1, 0)
-#define RCC_HSICFGR_HSITRIM_SHIFT	8
-#define RCC_HSICFGR_HSITRIM_MASK	GENMASK(14, 8)
-#define RCC_HSICFGR_HSICAL_SHIFT	16
-#define RCC_HSICFGR_HSICAL_MASK		GENMASK(27, 16)
+/* RCC_PLL2FRACR register fields */
+#define RCC_PLL2FRACR_FRACV_MASK		GENMASK(15, 3)
+#define RCC_PLL2FRACR_FRACV_SHIFT		3
+#define RCC_PLL2FRACR_FRACLE			BIT(16)
 
-/* Fields of RCC_CSICFGR register */
-#define RCC_CSICFGR_CSITRIM_SHIFT	8
-#define RCC_CSICFGR_CSITRIM_MASK	GENMASK(12, 8)
-#define RCC_CSICFGR_CSICAL_SHIFT	16
-#define RCC_CSICFGR_CSICAL_MASK		GENMASK(23, 16)
+/* RCC_PLL2CSGR register fields */
+#define RCC_PLL2CSGR_MOD_PER_MASK		GENMASK(12, 0)
+#define RCC_PLL2CSGR_MOD_PER_SHIFT		0
+#define RCC_PLL2CSGR_TPDFN_DIS			BIT(13)
+#define RCC_PLL2CSGR_RPDFN_DIS			BIT(14)
+#define RCC_PLL2CSGR_SSCG_MODE			BIT(15)
+#define RCC_PLL2CSGR_INC_STEP_MASK		GENMASK(30, 16)
+#define RCC_PLL2CSGR_INC_STEP_SHIFT		16
 
-/* Used for RCC_MCO related operations */
-#define RCC_MCOCFG_MCOON		BIT(12)
-#define RCC_MCOCFG_MCODIV_MASK		GENMASK(7, 4)
-#define RCC_MCOCFG_MCODIV_SHIFT		4
-#define RCC_MCOCFG_MCOSRC_MASK		GENMASK(2, 0)
+/* RCC_I2C46CKSELR register fields */
+#define RCC_I2C46CKSELR_I2C46SRC_MASK		GENMASK(2, 0)
+#define RCC_I2C46CKSELR_I2C46SRC_SHIFT		0
 
-/* Fields of RCC_DBGCFGR register */
-#define RCC_DBGCFGR_DBGCKEN		BIT(8)
+/* RCC_SPI6CKSELR register fields */
+#define RCC_SPI6CKSELR_SPI6SRC_MASK		GENMASK(2, 0)
+#define RCC_SPI6CKSELR_SPI6SRC_SHIFT		0
 
-/* RCC register fields for reset reasons */
-#define RCC_MP_RSTSCLRR_PORRSTF		BIT(0)
-#define RCC_MP_RSTSCLRR_BORRSTF		BIT(1)
-#define RCC_MP_RSTSCLRR_PADRSTF		BIT(2)
-#define RCC_MP_RSTSCLRR_HCSSRSTF	BIT(3)
-#define RCC_MP_RSTSCLRR_VCORERSTF	BIT(4)
-#define RCC_MP_RSTSCLRR_MPSYSRSTF	BIT(6)
-#define RCC_MP_RSTSCLRR_MCSYSRSTF	BIT(7)
-#define RCC_MP_RSTSCLRR_IWDG1RSTF	BIT(8)
-#define RCC_MP_RSTSCLRR_IWDG2RSTF	BIT(9)
-#define RCC_MP_RSTSCLRR_STDBYRSTF	BIT(11)
-#define RCC_MP_RSTSCLRR_CSTDBYRSTF	BIT(12)
-#define RCC_MP_RSTSCLRR_MPUP0RSTF	BIT(13)
-#define RCC_MP_RSTSCLRR_MPUP1RSTF	BIT(14)
+/* RCC_UART1CKSELR register fields */
+#define RCC_UART1CKSELR_UART1SRC_MASK		GENMASK(2, 0)
+#define RCC_UART1CKSELR_UART1SRC_SHIFT		0
 
-/* Global Reset Register */
-#define RCC_MP_GRSTCSETR_MPSYSRST	BIT(0)
-#define RCC_MP_GRSTCSETR_MCURST		BIT(1)
-#define RCC_MP_GRSTCSETR_MPUP0RST	BIT(4)
-#define RCC_MP_GRSTCSETR_MPUP1RST	BIT(5)
+/* RCC_RNG1CKSELR register fields */
+#define RCC_RNG1CKSELR_RNG1SRC_MASK		GENMASK(1, 0)
+#define RCC_RNG1CKSELR_RNG1SRC_SHIFT		0
 
-/* Clock Source Interrupt Flag Register */
-#define RCC_MP_CIFR_MASK		U(0x110F1F)
-#define RCC_MP_CIFR_LSIRDYF		BIT(0)
-#define RCC_MP_CIFR_LSERDYF		BIT(1)
-#define RCC_MP_CIFR_HSIRDYF		BIT(2)
-#define RCC_MP_CIFR_HSERDYF		BIT(3)
-#define RCC_MP_CIFR_CSIRDYF		BIT(4)
-#define RCC_MP_CIFR_PLL1DYF		BIT(8)
-#define RCC_MP_CIFR_PLL2DYF		BIT(9)
-#define RCC_MP_CIFR_PLL3DYF		BIT(10)
-#define RCC_MP_CIFR_PLL4DYF		BIT(11)
-#define RCC_MP_CIFR_WKUPF		BIT(20)
+/* RCC_CPERCKSELR register fields */
+#define RCC_CPERCKSELR_HSI			0x00000000
+#define RCC_CPERCKSELR_CSI			0x00000001
+#define RCC_CPERCKSELR_HSE			0x00000002
+#define RCC_CPERCKSELR_CKPERSRC_MASK		GENMASK(1, 0)
+#define RCC_CPERCKSELR_CKPERSRC_SHIFT		0
 
-/* Stop Request Set Register */
-#define RCC_MP_SREQSETR_STPREQ_P0	BIT(0)
-#define RCC_MP_SREQSETR_STPREQ_P1	BIT(1)
+/* RCC_STGENCKSELR register fields */
+#define RCC_STGENCKSELR_STGENSRC_MASK		GENMASK(1, 0)
+#define RCC_STGENCKSELR_STGENSRC_SHIFT		0
 
-/* Stop Request Clear Register */
-#define RCC_MP_SREQCLRR_STPREQ_P0	BIT(0)
-#define RCC_MP_SREQCLRR_STPREQ_P1	BIT(1)
+/* RCC_DDRITFCR register fields */
+#define RCC_DDRITFCR_DDRC1EN			BIT(0)
+#define RCC_DDRITFCR_DDRC1LPEN			BIT(1)
+#define RCC_DDRITFCR_DDRC2EN			BIT(2)
+#define RCC_DDRITFCR_DDRC2LPEN			BIT(3)
+#define RCC_DDRITFCR_DDRPHYCEN			BIT(4)
+#define RCC_DDRITFCR_DDRPHYCLPEN		BIT(5)
+#define RCC_DDRITFCR_DDRCAPBEN			BIT(6)
+#define RCC_DDRITFCR_DDRCAPBLPEN		BIT(7)
+#define RCC_DDRITFCR_AXIDCGEN			BIT(8)
+#define RCC_DDRITFCR_DDRPHYCAPBEN		BIT(9)
+#define RCC_DDRITFCR_DDRPHYCAPBLPEN		BIT(10)
+#define RCC_DDRITFCR_KERDCG_DLY_MASK		GENMASK(13, 11)
+#define RCC_DDRITFCR_KERDCG_DLY_SHIFT		11
+#define RCC_DDRITFCR_DDRCAPBRST			BIT(14)
+#define RCC_DDRITFCR_DDRCAXIRST			BIT(15)
+#define RCC_DDRITFCR_DDRCORERST			BIT(16)
+#define RCC_DDRITFCR_DPHYAPBRST			BIT(17)
+#define RCC_DDRITFCR_DPHYRST			BIT(18)
+#define RCC_DDRITFCR_DPHYCTLRST			BIT(19)
+#define RCC_DDRITFCR_DDRCKMOD_MASK		GENMASK(22, 20)
+#define RCC_DDRITFCR_DDRCKMOD_SHIFT		20
+#define RCC_DDRITFCR_DDRCKMOD_SSR		0
+#define RCC_DDRITFCR_DDRCKMOD_ASR1		BIT(20)
+#define RCC_DDRITFCR_DDRCKMOD_HSR1		BIT(21)
+#define RCC_DDRITFCR_GSKPMOD			BIT(23)
+#define RCC_DDRITFCR_GSKPCTRL			BIT(24)
+#define RCC_DDRITFCR_DFILP_WIDTH_MASK		GENMASK(27, 25)
+#define RCC_DDRITFCR_DFILP_WIDTH_SHIFT		25
+#define RCC_DDRITFCR_GSKP_DUR_MASK		GENMASK(31, 28)
+#define RCC_DDRITFCR_GSKP_DUR_SHIFT		28
 
-/* Values of RCC_UART24CKSELR register */
-#define RCC_UART24CKSELR_HSI		0x00000002
+/* RCC_MP_BOOTCR register fields */
+#define RCC_MP_BOOTCR_MCU_BEN			BIT(0)
+#define RCC_MP_BOOTCR_MPU_BEN			BIT(1)
 
-/* Values of RCC_MP_APB1ENSETR register */
-#define RCC_MP_APB1ENSETR_UART4EN	BIT(16)
+/* RCC_MP_SREQSETR register fields */
+#define RCC_MP_SREQSETR_STPREQ_P0		BIT(0)
+#define RCC_MP_SREQSETR_STPREQ_P1		BIT(1)
 
-/* Values of RCC_MP_APB5ENSETR register */
-#define RCC_MP_APB5ENSETR_SPI6EN	BIT(0)
-#define RCC_MP_APB5ENSETR_I2C4EN	BIT(2)
-#define RCC_MP_APB5ENSETR_I2C6EN	BIT(3)
-#define RCC_MP_APB5ENSETR_USART1EN	BIT(4)
-#define RCC_MP_APB5ENSETR_RTCAPBEN	BIT(8)
-#define RCC_MP_APB5ENSETR_IWDG1APBEN	BIT(15)
+/* RCC_MP_SREQCLRR register fields */
+#define RCC_MP_SREQCLRR_STPREQ_P0		BIT(0)
+#define RCC_MP_SREQCLRR_STPREQ_P1		BIT(1)
 
-/* Values of RCC_MP_AHB4ENSETR register */
-#define RCC_MP_AHB4ENSETR_GPIOGEN	BIT(6)
-#define RCC_MP_AHB4ENSETR_GPIOHEN	BIT(7)
+/* RCC_MP_GCR register fields */
+#define RCC_MP_GCR_BOOT_MCU			BIT(0)
 
-/* Values of RCC_MP_AHB5ENSETR register */
-#define RCC_MP_AHB5ENSETR_GPIOZEN	BIT(0)
-#define RCC_MP_AHB5ENSETR_CRYP1EN	BIT(4)
-#define RCC_MP_AHB5ENSETR_HASH1EN	BIT(5)
-#define RCC_MP_AHB5ENSETR_RNG1EN	BIT(6)
+/* RCC_MP_APRSTCR register fields */
+#define RCC_MP_APRSTCR_RDCTLEN			BIT(0)
+#define RCC_MP_APRSTCR_RSTTO_MASK		GENMASK(14, 8)
+#define RCC_MP_APRSTCR_RSTTO_SHIFT		8
 
-/* Values of RCC_MP_IWDGFZSETR register */
-#define RCC_MP_IWDGFZSETR_IWDG1		BIT(0)
-#define RCC_MP_IWDGFZSETR_IWDG2		BIT(1)
+/* RCC_MP_APRSTSR register fields */
+#define RCC_MP_APRSTSR_RSTTOV_MASK		GENMASK(14, 8)
+#define RCC_MP_APRSTSR_RSTTOV_SHIFT		8
 
-/* Values of RCC_PWRLPDLYCR register */
-#define RCC_PWRLPDLYCR_PWRLP_DLY_MASK	GENMASK(21, 0)
+/* RCC_BDCR register fields */
+#define RCC_BDCR_LSEON				BIT(0)
+#define RCC_BDCR_LSEBYP				BIT(1)
+#define RCC_BDCR_LSERDY				BIT(2)
+#define RCC_BDCR_DIGBYP				BIT(3)
+#define RCC_BDCR_LSEDRV_MASK			GENMASK(5, 4)
+#define RCC_BDCR_LSEDRV_SHIFT			4
+#define RCC_BDCR_LSECSSON			BIT(8)
+#define RCC_BDCR_LSECSSD			BIT(9)
+#define RCC_BDCR_RTCSRC_MASK			GENMASK(17, 16)
+#define RCC_BDCR_RTCSRC_SHIFT			16
+#define RCC_BDCR_RTCCKEN			BIT(20)
+#define RCC_BDCR_VSWRST				BIT(31)
 
-/* RCC_ASSCKSELR register fields */
-#define RCC_ASSCKSELR_AXISSRC_MASK		GENMASK(2, 0)
-#define RCC_ASSCKSELR_AXISSRC_SHIFT		0
+/* RCC_RDLSICR register fields */
+#define RCC_RDLSICR_LSION			BIT(0)
+#define RCC_RDLSICR_LSIRDY			BIT(1)
+#define RCC_RDLSICR_MRD_MASK			GENMASK(20, 16)
+#define RCC_RDLSICR_MRD_SHIFT			16
+#define RCC_RDLSICR_EADLY_MASK			GENMASK(26, 24)
+#define RCC_RDLSICR_EADLY_SHIFT			24
+#define RCC_RDLSICR_SPARE_MASK			GENMASK(31, 27)
+#define RCC_RDLSICR_SPARE_SHIFT			27
 
-/* RCC_MSSCKSELR register fields */
-#define RCC_MSSCKSELR_MCUSSRC_MASK		GENMASK(1, 0)
-#define RCC_MSSCKSELR_MCUSSRC_SHIFT		0
+/* RCC_APB4RSTSETR register fields */
+#define RCC_APB4RSTSETR_LTDCRST			BIT(0)
+#define RCC_APB4RSTSETR_DSIRST			BIT(4)
+#define RCC_APB4RSTSETR_DDRPERFMRST		BIT(8)
+#define RCC_APB4RSTSETR_USBPHYRST		BIT(16)
 
-/* RCC_I2C46CKSELR register fields */
-#define RCC_I2C46CKSELR_I2C46SRC_MASK		GENMASK(2, 0)
-#define RCC_I2C46CKSELR_I2C46SRC_SHIFT		0
+/* RCC_APB4RSTCLRR register fields */
+#define RCC_APB4RSTCLRR_LTDCRST			BIT(0)
+#define RCC_APB4RSTCLRR_DSIRST			BIT(4)
+#define RCC_APB4RSTCLRR_DDRPERFMRST		BIT(8)
+#define RCC_APB4RSTCLRR_USBPHYRST		BIT(16)
 
-/* RCC_SPI6CKSELR register fields */
-#define RCC_SPI6CKSELR_SPI6SRC_MASK		GENMASK(2, 0)
-#define RCC_SPI6CKSELR_SPI6SRC_SHIFT		0
+/* RCC_APB5RSTSETR register fields */
+#define RCC_APB5RSTSETR_SPI6RST			BIT(0)
+#define RCC_APB5RSTSETR_I2C4RST			BIT(2)
+#define RCC_APB5RSTSETR_I2C6RST			BIT(3)
+#define RCC_APB5RSTSETR_USART1RST		BIT(4)
+#define RCC_APB5RSTSETR_STGENRST		BIT(20)
 
-/* RCC_UART1CKSELR register fields */
-#define RCC_UART1CKSELR_UART1SRC_MASK		GENMASK(2, 0)
-#define RCC_UART1CKSELR_UART1SRC_SHIFT		0
+/* RCC_APB5RSTCLRR register fields */
+#define RCC_APB5RSTCLRR_SPI6RST			BIT(0)
+#define RCC_APB5RSTCLRR_I2C4RST			BIT(2)
+#define RCC_APB5RSTCLRR_I2C6RST			BIT(3)
+#define RCC_APB5RSTCLRR_USART1RST		BIT(4)
+#define RCC_APB5RSTCLRR_STGENRST		BIT(20)
 
-/* RCC_RNG1CKSELR register fields */
-#define RCC_RNG1CKSELR_RNG1SRC_MASK		GENMASK(1, 0)
-#define RCC_RNG1CKSELR_RNG1SRC_SHIFT		0
+/* RCC_AHB5RSTSETR register fields */
+#define RCC_AHB5RSTSETR_GPIOZRST		BIT(0)
+#define RCC_AHB5RSTSETR_CRYP1RST		BIT(4)
+#define RCC_AHB5RSTSETR_HASH1RST		BIT(5)
+#define RCC_AHB5RSTSETR_RNG1RST			BIT(6)
+#define RCC_AHB5RSTSETR_AXIMCRST		BIT(16)
 
-/* RCC_STGENCKSELR register fields */
-#define RCC_STGENCKSELR_STGENSRC_MASK		GENMASK(1, 0)
-#define RCC_STGENCKSELR_STGENSRC_SHIFT		0
+/* RCC_AHB5RSTCLRR register fields */
+#define RCC_AHB5RSTCLRR_GPIOZRST		BIT(0)
+#define RCC_AHB5RSTCLRR_CRYP1RST		BIT(4)
+#define RCC_AHB5RSTCLRR_HASH1RST		BIT(5)
+#define RCC_AHB5RSTCLRR_RNG1RST			BIT(6)
+#define RCC_AHB5RSTCLRR_AXIMCRST		BIT(16)
+
+/* RCC_AHB6RSTSETR register fields */
+#define RCC_AHB6RSTSETR_GPURST			BIT(5)
+#define RCC_AHB6RSTSETR_ETHMACRST		BIT(10)
+#define RCC_AHB6RSTSETR_FMCRST			BIT(12)
+#define RCC_AHB6RSTSETR_QSPIRST			BIT(14)
+#define RCC_AHB6RSTSETR_SDMMC1RST		BIT(16)
+#define RCC_AHB6RSTSETR_SDMMC2RST		BIT(17)
+#define RCC_AHB6RSTSETR_CRC1RST			BIT(20)
+#define RCC_AHB6RSTSETR_USBHRST			BIT(24)
+
+/* RCC_AHB6RSTCLRR register fields */
+#define RCC_AHB6RSTCLRR_ETHMACRST		BIT(10)
+#define RCC_AHB6RSTCLRR_FMCRST			BIT(12)
+#define RCC_AHB6RSTCLRR_QSPIRST			BIT(14)
+#define RCC_AHB6RSTCLRR_SDMMC1RST		BIT(16)
+#define RCC_AHB6RSTCLRR_SDMMC2RST		BIT(17)
+#define RCC_AHB6RSTCLRR_CRC1RST			BIT(20)
+#define RCC_AHB6RSTCLRR_USBHRST			BIT(24)
+
+/* RCC_TZAHB6RSTSETR register fields */
+#define RCC_TZAHB6RSTSETR_MDMARST		BIT(0)
+
+/* RCC_TZAHB6RSTCLRR register fields */
+#define RCC_TZAHB6RSTCLRR_MDMARST		BIT(0)
+
+/* RCC_MP_APB4ENSETR register fields */
+#define RCC_MP_APB4ENSETR_LTDCEN		BIT(0)
+#define RCC_MP_APB4ENSETR_DSIEN			BIT(4)
+#define RCC_MP_APB4ENSETR_DDRPERFMEN		BIT(8)
+#define RCC_MP_APB4ENSETR_IWDG2APBEN		BIT(15)
+#define RCC_MP_APB4ENSETR_USBPHYEN		BIT(16)
+#define RCC_MP_APB4ENSETR_STGENROEN		BIT(20)
+
+/* RCC_MP_APB4ENCLRR register fields */
+#define RCC_MP_APB4ENCLRR_LTDCEN		BIT(0)
+#define RCC_MP_APB4ENCLRR_DSIEN			BIT(4)
+#define RCC_MP_APB4ENCLRR_DDRPERFMEN		BIT(8)
+#define RCC_MP_APB4ENCLRR_IWDG2APBEN		BIT(15)
+#define RCC_MP_APB4ENCLRR_USBPHYEN		BIT(16)
+#define RCC_MP_APB4ENCLRR_STGENROEN		BIT(20)
+
+/* RCC_MP_APB5ENSETR register fields */
+#define RCC_MP_APB5ENSETR_SPI6EN		BIT(0)
+#define RCC_MP_APB5ENSETR_I2C4EN		BIT(2)
+#define RCC_MP_APB5ENSETR_I2C6EN		BIT(3)
+#define RCC_MP_APB5ENSETR_USART1EN		BIT(4)
+#define RCC_MP_APB5ENSETR_RTCAPBEN		BIT(8)
+#define RCC_MP_APB5ENSETR_TZC1EN		BIT(11)
+#define RCC_MP_APB5ENSETR_TZC2EN		BIT(12)
+#define RCC_MP_APB5ENSETR_TZPCEN		BIT(13)
+#define RCC_MP_APB5ENSETR_IWDG1APBEN		BIT(15)
+#define RCC_MP_APB5ENSETR_BSECEN		BIT(16)
+#define RCC_MP_APB5ENSETR_STGENEN		BIT(20)
+
+/* RCC_MP_APB5ENCLRR register fields */
+#define RCC_MP_APB5ENCLRR_SPI6EN		BIT(0)
+#define RCC_MP_APB5ENCLRR_I2C4EN		BIT(2)
+#define RCC_MP_APB5ENCLRR_I2C6EN		BIT(3)
+#define RCC_MP_APB5ENCLRR_USART1EN		BIT(4)
+#define RCC_MP_APB5ENCLRR_RTCAPBEN		BIT(8)
+#define RCC_MP_APB5ENCLRR_TZC1EN		BIT(11)
+#define RCC_MP_APB5ENCLRR_TZC2EN		BIT(12)
+#define RCC_MP_APB5ENCLRR_TZPCEN		BIT(13)
+#define RCC_MP_APB5ENCLRR_IWDG1APBEN		BIT(15)
+#define RCC_MP_APB5ENCLRR_BSECEN		BIT(16)
+#define RCC_MP_APB5ENCLRR_STGENEN		BIT(20)
+
+/* RCC_MP_AHB5ENSETR register fields */
+#define RCC_MP_AHB5ENSETR_GPIOZEN		BIT(0)
+#define RCC_MP_AHB5ENSETR_CRYP1EN		BIT(4)
+#define RCC_MP_AHB5ENSETR_HASH1EN		BIT(5)
+#define RCC_MP_AHB5ENSETR_RNG1EN		BIT(6)
+#define RCC_MP_AHB5ENSETR_BKPSRAMEN		BIT(8)
+#define RCC_MP_AHB5ENSETR_AXIMCEN		BIT(16)
+
+/* RCC_MP_AHB5ENCLRR register fields */
+#define RCC_MP_AHB5ENCLRR_GPIOZEN		BIT(0)
+#define RCC_MP_AHB5ENCLRR_CRYP1EN		BIT(4)
+#define RCC_MP_AHB5ENCLRR_HASH1EN		BIT(5)
+#define RCC_MP_AHB5ENCLRR_RNG1EN		BIT(6)
+#define RCC_MP_AHB5ENCLRR_BKPSRAMEN		BIT(8)
+#define RCC_MP_AHB5ENCLRR_AXIMCEN		BIT(16)
+
+/* RCC_MP_AHB6ENSETR register fields */
+#define RCC_MP_AHB6ENSETR_MDMAEN		BIT(0)
+#define RCC_MP_AHB6ENSETR_GPUEN			BIT(5)
+#define RCC_MP_AHB6ENSETR_ETHCKEN		BIT(7)
+#define RCC_MP_AHB6ENSETR_ETHTXEN		BIT(8)
+#define RCC_MP_AHB6ENSETR_ETHRXEN		BIT(9)
+#define RCC_MP_AHB6ENSETR_ETHMACEN		BIT(10)
+#define RCC_MP_AHB6ENSETR_FMCEN			BIT(12)
+#define RCC_MP_AHB6ENSETR_QSPIEN		BIT(14)
+#define RCC_MP_AHB6ENSETR_SDMMC1EN		BIT(16)
+#define RCC_MP_AHB6ENSETR_SDMMC2EN		BIT(17)
+#define RCC_MP_AHB6ENSETR_CRC1EN		BIT(20)
+#define RCC_MP_AHB6ENSETR_USBHEN		BIT(24)
+
+/* RCC_MP_AHB6ENCLRR register fields */
+#define RCC_MP_AHB6ENCLRR_MDMAEN		BIT(0)
+#define RCC_MP_AHB6ENCLRR_GPUEN			BIT(5)
+#define RCC_MP_AHB6ENCLRR_ETHCKEN		BIT(7)
+#define RCC_MP_AHB6ENCLRR_ETHTXEN		BIT(8)
+#define RCC_MP_AHB6ENCLRR_ETHRXEN		BIT(9)
+#define RCC_MP_AHB6ENCLRR_ETHMACEN		BIT(10)
+#define RCC_MP_AHB6ENCLRR_FMCEN			BIT(12)
+#define RCC_MP_AHB6ENCLRR_QSPIEN		BIT(14)
+#define RCC_MP_AHB6ENCLRR_SDMMC1EN		BIT(16)
+#define RCC_MP_AHB6ENCLRR_SDMMC2EN		BIT(17)
+#define RCC_MP_AHB6ENCLRR_CRC1EN		BIT(20)
+#define RCC_MP_AHB6ENCLRR_USBHEN		BIT(24)
+
+/* RCC_MP_TZAHB6ENSETR register fields */
+#define RCC_MP_TZAHB6ENSETR_MDMAEN		BIT(0)
+
+/* RCC_MP_TZAHB6ENCLRR register fields */
+#define RCC_MP_TZAHB6ENCLRR_MDMAEN		BIT(0)
+
+/* RCC_MC_APB4ENSETR register fields */
+#define RCC_MC_APB4ENSETR_LTDCEN		BIT(0)
+#define RCC_MC_APB4ENSETR_DSIEN			BIT(4)
+#define RCC_MC_APB4ENSETR_DDRPERFMEN		BIT(8)
+#define RCC_MC_APB4ENSETR_USBPHYEN		BIT(16)
+#define RCC_MC_APB4ENSETR_STGENROEN		BIT(20)
+
+/* RCC_MC_APB4ENCLRR register fields */
+#define RCC_MC_APB4ENCLRR_LTDCEN		BIT(0)
+#define RCC_MC_APB4ENCLRR_DSIEN			BIT(4)
+#define RCC_MC_APB4ENCLRR_DDRPERFMEN		BIT(8)
+#define RCC_MC_APB4ENCLRR_USBPHYEN		BIT(16)
+#define RCC_MC_APB4ENCLRR_STGENROEN		BIT(20)
+
+/* RCC_MC_APB5ENSETR register fields */
+#define RCC_MC_APB5ENSETR_SPI6EN		BIT(0)
+#define RCC_MC_APB5ENSETR_I2C4EN		BIT(2)
+#define RCC_MC_APB5ENSETR_I2C6EN		BIT(3)
+#define RCC_MC_APB5ENSETR_USART1EN		BIT(4)
+#define RCC_MC_APB5ENSETR_RTCAPBEN		BIT(8)
+#define RCC_MC_APB5ENSETR_TZC1EN		BIT(11)
+#define RCC_MC_APB5ENSETR_TZC2EN		BIT(12)
+#define RCC_MC_APB5ENSETR_TZPCEN		BIT(13)
+#define RCC_MC_APB5ENSETR_BSECEN		BIT(16)
+#define RCC_MC_APB5ENSETR_STGENEN		BIT(20)
+
+/* RCC_MC_APB5ENCLRR register fields */
+#define RCC_MC_APB5ENCLRR_SPI6EN		BIT(0)
+#define RCC_MC_APB5ENCLRR_I2C4EN		BIT(2)
+#define RCC_MC_APB5ENCLRR_I2C6EN		BIT(3)
+#define RCC_MC_APB5ENCLRR_USART1EN		BIT(4)
+#define RCC_MC_APB5ENCLRR_RTCAPBEN		BIT(8)
+#define RCC_MC_APB5ENCLRR_TZC1EN		BIT(11)
+#define RCC_MC_APB5ENCLRR_TZC2EN		BIT(12)
+#define RCC_MC_APB5ENCLRR_TZPCEN		BIT(13)
+#define RCC_MC_APB5ENCLRR_BSECEN		BIT(16)
+#define RCC_MC_APB5ENCLRR_STGENEN		BIT(20)
+
+/* RCC_MC_AHB5ENSETR register fields */
+#define RCC_MC_AHB5ENSETR_GPIOZEN		BIT(0)
+#define RCC_MC_AHB5ENSETR_CRYP1EN		BIT(4)
+#define RCC_MC_AHB5ENSETR_HASH1EN		BIT(5)
+#define RCC_MC_AHB5ENSETR_RNG1EN		BIT(6)
+#define RCC_MC_AHB5ENSETR_BKPSRAMEN		BIT(8)
+
+/* RCC_MC_AHB5ENCLRR register fields */
+#define RCC_MC_AHB5ENCLRR_GPIOZEN		BIT(0)
+#define RCC_MC_AHB5ENCLRR_CRYP1EN		BIT(4)
+#define RCC_MC_AHB5ENCLRR_HASH1EN		BIT(5)
+#define RCC_MC_AHB5ENCLRR_RNG1EN		BIT(6)
+#define RCC_MC_AHB5ENCLRR_BKPSRAMEN		BIT(8)
+
+/* RCC_MC_AHB6ENSETR register fields */
+#define RCC_MC_AHB6ENSETR_MDMAEN		BIT(0)
+#define RCC_MC_AHB6ENSETR_GPUEN			BIT(5)
+#define RCC_MC_AHB6ENSETR_ETHCKEN		BIT(7)
+#define RCC_MC_AHB6ENSETR_ETHTXEN		BIT(8)
+#define RCC_MC_AHB6ENSETR_ETHRXEN		BIT(9)
+#define RCC_MC_AHB6ENSETR_ETHMACEN		BIT(10)
+#define RCC_MC_AHB6ENSETR_FMCEN			BIT(12)
+#define RCC_MC_AHB6ENSETR_QSPIEN		BIT(14)
+#define RCC_MC_AHB6ENSETR_SDMMC1EN		BIT(16)
+#define RCC_MC_AHB6ENSETR_SDMMC2EN		BIT(17)
+#define RCC_MC_AHB6ENSETR_CRC1EN		BIT(20)
+#define RCC_MC_AHB6ENSETR_USBHEN		BIT(24)
+
+/* RCC_MC_AHB6ENCLRR register fields */
+#define RCC_MC_AHB6ENCLRR_MDMAEN		BIT(0)
+#define RCC_MC_AHB6ENCLRR_GPUEN			BIT(5)
+#define RCC_MC_AHB6ENCLRR_ETHCKEN		BIT(7)
+#define RCC_MC_AHB6ENCLRR_ETHTXEN		BIT(8)
+#define RCC_MC_AHB6ENCLRR_ETHRXEN		BIT(9)
+#define RCC_MC_AHB6ENCLRR_ETHMACEN		BIT(10)
+#define RCC_MC_AHB6ENCLRR_FMCEN			BIT(12)
+#define RCC_MC_AHB6ENCLRR_QSPIEN		BIT(14)
+#define RCC_MC_AHB6ENCLRR_SDMMC1EN		BIT(16)
+#define RCC_MC_AHB6ENCLRR_SDMMC2EN		BIT(17)
+#define RCC_MC_AHB6ENCLRR_CRC1EN		BIT(20)
+#define RCC_MC_AHB6ENCLRR_USBHEN		BIT(24)
+
+/* RCC_MP_APB4LPENSETR register fields */
+#define RCC_MP_APB4LPENSETR_LTDCLPEN		BIT(0)
+#define RCC_MP_APB4LPENSETR_DSILPEN		BIT(4)
+#define RCC_MP_APB4LPENSETR_DDRPERFMLPEN	BIT(8)
+#define RCC_MP_APB4LPENSETR_IWDG2APBLPEN	BIT(15)
+#define RCC_MP_APB4LPENSETR_USBPHYLPEN		BIT(16)
+#define RCC_MP_APB4LPENSETR_STGENROLPEN		BIT(20)
+#define RCC_MP_APB4LPENSETR_STGENROSTPEN	BIT(21)
+
+/* RCC_MP_APB4LPENCLRR register fields */
+#define RCC_MP_APB4LPENCLRR_LTDCLPEN		BIT(0)
+#define RCC_MP_APB4LPENCLRR_DSILPEN		BIT(4)
+#define RCC_MP_APB4LPENCLRR_DDRPERFMLPEN	BIT(8)
+#define RCC_MP_APB4LPENCLRR_IWDG2APBLPEN	BIT(15)
+#define RCC_MP_APB4LPENCLRR_USBPHYLPEN		BIT(16)
+#define RCC_MP_APB4LPENCLRR_STGENROLPEN		BIT(20)
+#define RCC_MP_APB4LPENCLRR_STGENROSTPEN	BIT(21)
+
+/* RCC_MP_APB5LPENSETR register fields */
+#define RCC_MP_APB5LPENSETR_SPI6LPEN		BIT(0)
+#define RCC_MP_APB5LPENSETR_I2C4LPEN		BIT(2)
+#define RCC_MP_APB5LPENSETR_I2C6LPEN		BIT(3)
+#define RCC_MP_APB5LPENSETR_USART1LPEN		BIT(4)
+#define RCC_MP_APB5LPENSETR_RTCAPBLPEN		BIT(8)
+#define RCC_MP_APB5LPENSETR_TZC1LPEN		BIT(11)
+#define RCC_MP_APB5LPENSETR_TZC2LPEN		BIT(12)
+#define RCC_MP_APB5LPENSETR_TZPCLPEN		BIT(13)
+#define RCC_MP_APB5LPENSETR_IWDG1APBLPEN	BIT(15)
+#define RCC_MP_APB5LPENSETR_BSECLPEN		BIT(16)
+#define RCC_MP_APB5LPENSETR_STGENLPEN		BIT(20)
+#define RCC_MP_APB5LPENSETR_STGENSTPEN		BIT(21)
+
+/* RCC_MP_APB5LPENCLRR register fields */
+#define RCC_MP_APB5LPENCLRR_SPI6LPEN		BIT(0)
+#define RCC_MP_APB5LPENCLRR_I2C4LPEN		BIT(2)
+#define RCC_MP_APB5LPENCLRR_I2C6LPEN		BIT(3)
+#define RCC_MP_APB5LPENCLRR_USART1LPEN		BIT(4)
+#define RCC_MP_APB5LPENCLRR_RTCAPBLPEN		BIT(8)
+#define RCC_MP_APB5LPENCLRR_TZC1LPEN		BIT(11)
+#define RCC_MP_APB5LPENCLRR_TZC2LPEN		BIT(12)
+#define RCC_MP_APB5LPENCLRR_TZPCLPEN		BIT(13)
+#define RCC_MP_APB5LPENCLRR_IWDG1APBLPEN	BIT(15)
+#define RCC_MP_APB5LPENCLRR_BSECLPEN		BIT(16)
+#define RCC_MP_APB5LPENCLRR_STGENLPEN		BIT(20)
+#define RCC_MP_APB5LPENCLRR_STGENSTPEN		BIT(21)
+
+/* RCC_MP_AHB5LPENSETR register fields */
+#define RCC_MP_AHB5LPENSETR_GPIOZLPEN		BIT(0)
+#define RCC_MP_AHB5LPENSETR_CRYP1LPEN		BIT(4)
+#define RCC_MP_AHB5LPENSETR_HASH1LPEN		BIT(5)
+#define RCC_MP_AHB5LPENSETR_RNG1LPEN		BIT(6)
+#define RCC_MP_AHB5LPENSETR_BKPSRAMLPEN		BIT(8)
+
+/* RCC_MP_AHB5LPENCLRR register fields */
+#define RCC_MP_AHB5LPENCLRR_GPIOZLPEN		BIT(0)
+#define RCC_MP_AHB5LPENCLRR_CRYP1LPEN		BIT(4)
+#define RCC_MP_AHB5LPENCLRR_HASH1LPEN		BIT(5)
+#define RCC_MP_AHB5LPENCLRR_RNG1LPEN		BIT(6)
+#define RCC_MP_AHB5LPENCLRR_BKPSRAMLPEN		BIT(8)
+
+/* RCC_MP_AHB6LPENSETR register fields */
+#define RCC_MP_AHB6LPENSETR_MDMALPEN		BIT(0)
+#define RCC_MP_AHB6LPENSETR_GPULPEN		BIT(5)
+#define RCC_MP_AHB6LPENSETR_ETHCKLPEN		BIT(7)
+#define RCC_MP_AHB6LPENSETR_ETHTXLPEN		BIT(8)
+#define RCC_MP_AHB6LPENSETR_ETHRXLPEN		BIT(9)
+#define RCC_MP_AHB6LPENSETR_ETHMACLPEN		BIT(10)
+#define RCC_MP_AHB6LPENSETR_ETHSTPEN		BIT(11)
+#define RCC_MP_AHB6LPENSETR_FMCLPEN		BIT(12)
+#define RCC_MP_AHB6LPENSETR_QSPILPEN		BIT(14)
+#define RCC_MP_AHB6LPENSETR_SDMMC1LPEN		BIT(16)
+#define RCC_MP_AHB6LPENSETR_SDMMC2LPEN		BIT(17)
+#define RCC_MP_AHB6LPENSETR_CRC1LPEN		BIT(20)
+#define RCC_MP_AHB6LPENSETR_USBHLPEN		BIT(24)
+
+/* RCC_MP_AHB6LPENCLRR register fields */
+#define RCC_MP_AHB6LPENCLRR_MDMALPEN		BIT(0)
+#define RCC_MP_AHB6LPENCLRR_GPULPEN		BIT(5)
+#define RCC_MP_AHB6LPENCLRR_ETHCKLPEN		BIT(7)
+#define RCC_MP_AHB6LPENCLRR_ETHTXLPEN		BIT(8)
+#define RCC_MP_AHB6LPENCLRR_ETHRXLPEN		BIT(9)
+#define RCC_MP_AHB6LPENCLRR_ETHMACLPEN		BIT(10)
+#define RCC_MP_AHB6LPENCLRR_ETHSTPEN		BIT(11)
+#define RCC_MP_AHB6LPENCLRR_FMCLPEN		BIT(12)
+#define RCC_MP_AHB6LPENCLRR_QSPILPEN		BIT(14)
+#define RCC_MP_AHB6LPENCLRR_SDMMC1LPEN		BIT(16)
+#define RCC_MP_AHB6LPENCLRR_SDMMC2LPEN		BIT(17)
+#define RCC_MP_AHB6LPENCLRR_CRC1LPEN		BIT(20)
+#define RCC_MP_AHB6LPENCLRR_USBHLPEN		BIT(24)
+
+/* RCC_MP_TZAHB6LPENSETR register fields */
+#define RCC_MP_TZAHB6LPENSETR_MDMALPEN		BIT(0)
+
+/* RCC_MP_TZAHB6LPENCLRR register fields */
+#define RCC_MP_TZAHB6LPENCLRR_MDMALPEN		BIT(0)
+
+/* RCC_MC_APB4LPENSETR register fields */
+#define RCC_MC_APB4LPENSETR_LTDCLPEN		BIT(0)
+#define RCC_MC_APB4LPENSETR_DSILPEN		BIT(4)
+#define RCC_MC_APB4LPENSETR_DDRPERFMLPEN	BIT(8)
+#define RCC_MC_APB4LPENSETR_USBPHYLPEN		BIT(16)
+#define RCC_MC_APB4LPENSETR_STGENROLPEN		BIT(20)
+#define RCC_MC_APB4LPENSETR_STGENROSTPEN	BIT(21)
+
+/* RCC_MC_APB4LPENCLRR register fields */
+#define RCC_MC_APB4LPENCLRR_LTDCLPEN		BIT(0)
+#define RCC_MC_APB4LPENCLRR_DSILPEN		BIT(4)
+#define RCC_MC_APB4LPENCLRR_DDRPERFMLPEN	BIT(8)
+#define RCC_MC_APB4LPENCLRR_USBPHYLPEN		BIT(16)
+#define RCC_MC_APB4LPENCLRR_STGENROLPEN		BIT(20)
+#define RCC_MC_APB4LPENCLRR_STGENROSTPEN	BIT(21)
+
+/* RCC_MC_APB5LPENSETR register fields */
+#define RCC_MC_APB5LPENSETR_SPI6LPEN		BIT(0)
+#define RCC_MC_APB5LPENSETR_I2C4LPEN		BIT(2)
+#define RCC_MC_APB5LPENSETR_I2C6LPEN		BIT(3)
+#define RCC_MC_APB5LPENSETR_USART1LPEN		BIT(4)
+#define RCC_MC_APB5LPENSETR_RTCAPBLPEN		BIT(8)
+#define RCC_MC_APB5LPENSETR_TZC1LPEN		BIT(11)
+#define RCC_MC_APB5LPENSETR_TZC2LPEN		BIT(12)
+#define RCC_MC_APB5LPENSETR_TZPCLPEN		BIT(13)
+#define RCC_MC_APB5LPENSETR_BSECLPEN		BIT(16)
+#define RCC_MC_APB5LPENSETR_STGENLPEN		BIT(20)
+#define RCC_MC_APB5LPENSETR_STGENSTPEN		BIT(21)
+
+/* RCC_MC_APB5LPENCLRR register fields */
+#define RCC_MC_APB5LPENCLRR_SPI6LPEN		BIT(0)
+#define RCC_MC_APB5LPENCLRR_I2C4LPEN		BIT(2)
+#define RCC_MC_APB5LPENCLRR_I2C6LPEN		BIT(3)
+#define RCC_MC_APB5LPENCLRR_USART1LPEN		BIT(4)
+#define RCC_MC_APB5LPENCLRR_RTCAPBLPEN		BIT(8)
+#define RCC_MC_APB5LPENCLRR_TZC1LPEN		BIT(11)
+#define RCC_MC_APB5LPENCLRR_TZC2LPEN		BIT(12)
+#define RCC_MC_APB5LPENCLRR_TZPCLPEN		BIT(13)
+#define RCC_MC_APB5LPENCLRR_BSECLPEN		BIT(16)
+#define RCC_MC_APB5LPENCLRR_STGENLPEN		BIT(20)
+#define RCC_MC_APB5LPENCLRR_STGENSTPEN		BIT(21)
+
+/* RCC_MC_AHB5LPENSETR register fields */
+#define RCC_MC_AHB5LPENSETR_GPIOZLPEN		BIT(0)
+#define RCC_MC_AHB5LPENSETR_CRYP1LPEN		BIT(4)
+#define RCC_MC_AHB5LPENSETR_HASH1LPEN		BIT(5)
+#define RCC_MC_AHB5LPENSETR_RNG1LPEN		BIT(6)
+#define RCC_MC_AHB5LPENSETR_BKPSRAMLPEN		BIT(8)
+
+/* RCC_MC_AHB5LPENCLRR register fields */
+#define RCC_MC_AHB5LPENCLRR_GPIOZLPEN		BIT(0)
+#define RCC_MC_AHB5LPENCLRR_CRYP1LPEN		BIT(4)
+#define RCC_MC_AHB5LPENCLRR_HASH1LPEN		BIT(5)
+#define RCC_MC_AHB5LPENCLRR_RNG1LPEN		BIT(6)
+#define RCC_MC_AHB5LPENCLRR_BKPSRAMLPEN		BIT(8)
+
+/* RCC_MC_AHB6LPENSETR register fields */
+#define RCC_MC_AHB6LPENSETR_MDMALPEN		BIT(0)
+#define RCC_MC_AHB6LPENSETR_GPULPEN		BIT(5)
+#define RCC_MC_AHB6LPENSETR_ETHCKLPEN		BIT(7)
+#define RCC_MC_AHB6LPENSETR_ETHTXLPEN		BIT(8)
+#define RCC_MC_AHB6LPENSETR_ETHRXLPEN		BIT(9)
+#define RCC_MC_AHB6LPENSETR_ETHMACLPEN		BIT(10)
+#define RCC_MC_AHB6LPENSETR_ETHSTPEN		BIT(11)
+#define RCC_MC_AHB6LPENSETR_FMCLPEN		BIT(12)
+#define RCC_MC_AHB6LPENSETR_QSPILPEN		BIT(14)
+#define RCC_MC_AHB6LPENSETR_SDMMC1LPEN		BIT(16)
+#define RCC_MC_AHB6LPENSETR_SDMMC2LPEN		BIT(17)
+#define RCC_MC_AHB6LPENSETR_CRC1LPEN		BIT(20)
+#define RCC_MC_AHB6LPENSETR_USBHLPEN		BIT(24)
+
+/* RCC_MC_AHB6LPENCLRR register fields */
+#define RCC_MC_AHB6LPENCLRR_MDMALPEN		BIT(0)
+#define RCC_MC_AHB6LPENCLRR_GPULPEN		BIT(5)
+#define RCC_MC_AHB6LPENCLRR_ETHCKLPEN		BIT(7)
+#define RCC_MC_AHB6LPENCLRR_ETHTXLPEN		BIT(8)
+#define RCC_MC_AHB6LPENCLRR_ETHRXLPEN		BIT(9)
+#define RCC_MC_AHB6LPENCLRR_ETHMACLPEN		BIT(10)
+#define RCC_MC_AHB6LPENCLRR_ETHSTPEN		BIT(11)
+#define RCC_MC_AHB6LPENCLRR_FMCLPEN		BIT(12)
+#define RCC_MC_AHB6LPENCLRR_QSPILPEN		BIT(14)
+#define RCC_MC_AHB6LPENCLRR_SDMMC1LPEN		BIT(16)
+#define RCC_MC_AHB6LPENCLRR_SDMMC2LPEN		BIT(17)
+#define RCC_MC_AHB6LPENCLRR_CRC1LPEN		BIT(20)
+#define RCC_MC_AHB6LPENCLRR_USBHLPEN		BIT(24)
+
+/* RCC_BR_RSTSCLRR register fields */
+#define RCC_BR_RSTSCLRR_PORRSTF			BIT(0)
+#define RCC_BR_RSTSCLRR_BORRSTF			BIT(1)
+#define RCC_BR_RSTSCLRR_PADRSTF			BIT(2)
+#define RCC_BR_RSTSCLRR_HCSSRSTF		BIT(3)
+#define RCC_BR_RSTSCLRR_VCORERSTF		BIT(4)
+#define RCC_BR_RSTSCLRR_MPSYSRSTF		BIT(6)
+#define RCC_BR_RSTSCLRR_MCSYSRSTF		BIT(7)
+#define RCC_BR_RSTSCLRR_IWDG1RSTF		BIT(8)
+#define RCC_BR_RSTSCLRR_IWDG2RSTF		BIT(9)
+#define RCC_BR_RSTSCLRR_MPUP0RSTF		BIT(13)
+#define RCC_BR_RSTSCLRR_MPUP1RSTF		BIT(14)
+
+/* RCC_MP_GRSTCSETR register fields */
+#define RCC_MP_GRSTCSETR_MPSYSRST		BIT(0)
+#define RCC_MP_GRSTCSETR_MCURST			BIT(1)
+#define RCC_MP_GRSTCSETR_MPUP0RST		BIT(4)
+#define RCC_MP_GRSTCSETR_MPUP1RST		BIT(5)
+
+/* RCC_MP_RSTSCLRR register fields */
+#define RCC_MP_RSTSCLRR_PORRSTF			BIT(0)
+#define RCC_MP_RSTSCLRR_BORRSTF			BIT(1)
+#define RCC_MP_RSTSCLRR_PADRSTF			BIT(2)
+#define RCC_MP_RSTSCLRR_HCSSRSTF		BIT(3)
+#define RCC_MP_RSTSCLRR_VCORERSTF		BIT(4)
+#define RCC_MP_RSTSCLRR_MPSYSRSTF		BIT(6)
+#define RCC_MP_RSTSCLRR_MCSYSRSTF		BIT(7)
+#define RCC_MP_RSTSCLRR_IWDG1RSTF		BIT(8)
+#define RCC_MP_RSTSCLRR_IWDG2RSTF		BIT(9)
+#define RCC_MP_RSTSCLRR_STDBYRSTF		BIT(11)
+#define RCC_MP_RSTSCLRR_CSTDBYRSTF		BIT(12)
+#define RCC_MP_RSTSCLRR_MPUP0RSTF		BIT(13)
+#define RCC_MP_RSTSCLRR_MPUP1RSTF		BIT(14)
+#define RCC_MP_RSTSCLRR_SPARE			BIT(15)
+
+/* RCC_MP_IWDGFZSETR register fields */
+#define RCC_MP_IWDGFZSETR_FZ_IWDG1		BIT(0)
+#define RCC_MP_IWDGFZSETR_FZ_IWDG2		BIT(1)
+
+/* RCC_MP_IWDGFZCLRR register fields */
+#define RCC_MP_IWDGFZCLRR_FZ_IWDG1		BIT(0)
+#define RCC_MP_IWDGFZCLRR_FZ_IWDG2		BIT(1)
+
+/* RCC_MP_CIER register fields */
+#define RCC_MP_CIER_LSIRDYIE			BIT(0)
+#define RCC_MP_CIER_LSERDYIE			BIT(1)
+#define RCC_MP_CIER_HSIRDYIE			BIT(2)
+#define RCC_MP_CIER_HSERDYIE			BIT(3)
+#define RCC_MP_CIER_CSIRDYIE			BIT(4)
+#define RCC_MP_CIER_PLL1DYIE			BIT(8)
+#define RCC_MP_CIER_PLL2DYIE			BIT(9)
+#define RCC_MP_CIER_PLL3DYIE			BIT(10)
+#define RCC_MP_CIER_PLL4DYIE			BIT(11)
+#define RCC_MP_CIER_LSECSSIE			BIT(16)
+#define RCC_MP_CIER_WKUPIE			BIT(20)
+
+/* RCC_MP_CIFR register fields */
+#define RCC_MP_CIFR_MASK			U(0x110F1F)
+#define RCC_MP_CIFR_LSIRDYF			BIT(0)
+#define RCC_MP_CIFR_LSERDYF			BIT(1)
+#define RCC_MP_CIFR_HSIRDYF			BIT(2)
+#define RCC_MP_CIFR_HSERDYF			BIT(3)
+#define RCC_MP_CIFR_CSIRDYF			BIT(4)
+#define RCC_MP_CIFR_PLL1DYF			BIT(8)
+#define RCC_MP_CIFR_PLL2DYF			BIT(9)
+#define RCC_MP_CIFR_PLL3DYF			BIT(10)
+#define RCC_MP_CIFR_PLL4DYF			BIT(11)
+#define RCC_MP_CIFR_LSECSSF			BIT(16)
+#define RCC_MP_CIFR_WKUPF			BIT(20)
+
+/* RCC_PWRLPDLYCR register fields */
+#define RCC_PWRLPDLYCR_PWRLP_DLY_MASK		GENMASK(21, 0)
+#define RCC_PWRLPDLYCR_PWRLP_DLY_SHIFT		0
+#define RCC_PWRLPDLYCR_MCTMPSKP			BIT(24)
+
+/* RCC_MP_RSTSSETR register fields */
+#define RCC_MP_RSTSSETR_PORRSTF			BIT(0)
+#define RCC_MP_RSTSSETR_BORRSTF			BIT(1)
+#define RCC_MP_RSTSSETR_PADRSTF			BIT(2)
+#define RCC_MP_RSTSSETR_HCSSRSTF		BIT(3)
+#define RCC_MP_RSTSSETR_VCORERSTF		BIT(4)
+#define RCC_MP_RSTSSETR_MPSYSRSTF		BIT(6)
+#define RCC_MP_RSTSSETR_MCSYSRSTF		BIT(7)
+#define RCC_MP_RSTSSETR_IWDG1RSTF		BIT(8)
+#define RCC_MP_RSTSSETR_IWDG2RSTF		BIT(9)
+#define RCC_MP_RSTSSETR_STDBYRSTF		BIT(11)
+#define RCC_MP_RSTSSETR_CSTDBYRSTF		BIT(12)
+#define RCC_MP_RSTSSETR_MPUP0RSTF		BIT(13)
+#define RCC_MP_RSTSSETR_MPUP1RSTF		BIT(14)
+#define RCC_MP_RSTSSETR_SPARE			BIT(15)
+
+/* RCC_MCO1CFGR register fields */
+#define RCC_MCO1CFGR_MCO1SEL_MASK		GENMASK(2, 0)
+#define RCC_MCO1CFGR_MCO1SEL_SHIFT		0
+#define RCC_MCO1CFGR_MCO1DIV_MASK		GENMASK(7, 4)
+#define RCC_MCO1CFGR_MCO1DIV_SHIFT		4
+#define RCC_MCO1CFGR_MCO1ON			BIT(12)
+
+/* RCC_MCO2CFGR register fields */
+#define RCC_MCO2CFGR_MCO2SEL_MASK		GENMASK(2, 0)
+#define RCC_MCO2CFGR_MCO2SEL_SHIFT		0
+#define RCC_MCO2CFGR_MCO2DIV_MASK		GENMASK(7, 4)
+#define RCC_MCO2CFGR_MCO2DIV_SHIFT		4
+#define RCC_MCO2CFGR_MCO2ON			BIT(12)
+
+/* RCC_OCRDYR register fields */
+#define RCC_OCRDYR_HSIRDY			BIT(0)
+#define RCC_OCRDYR_HSIDIVRDY			BIT(2)
+#define RCC_OCRDYR_CSIRDY			BIT(4)
+#define RCC_OCRDYR_HSERDY			BIT(8)
+#define RCC_OCRDYR_MPUCKRDY			BIT(23)
+#define RCC_OCRDYR_AXICKRDY			BIT(24)
+#define RCC_OCRDYR_CKREST			BIT(25)
+
+/* RCC_DBGCFGR register fields */
+#define RCC_DBGCFGR_TRACEDIV_MASK		GENMASK(2, 0)
+#define RCC_DBGCFGR_TRACEDIV_SHIFT		0
+#define RCC_DBGCFGR_DBGCKEN			BIT(8)
+#define RCC_DBGCFGR_TRACECKEN			BIT(9)
+#define RCC_DBGCFGR_DBGRST			BIT(12)
+
+/* RCC_RCK3SELR register fields */
+#define RCC_RCK3SELR_PLL3SRC_MASK		GENMASK(1, 0)
+#define RCC_RCK3SELR_PLL3SRC_SHIFT		0
+#define RCC_RCK3SELR_PLL3SRCRDY			BIT(31)
+
+/* RCC_RCK4SELR register fields */
+#define RCC_RCK4SELR_PLL4SRC_MASK		GENMASK(1, 0)
+#define RCC_RCK4SELR_PLL4SRC_SHIFT		0
+#define RCC_RCK4SELR_PLL4SRCRDY			BIT(31)
+
+/* RCC_TIMG1PRER register fields */
+#define RCC_TIMG1PRER_TIMG1PRE			BIT(0)
+#define RCC_TIMG1PRER_TIMG1PRERDY		BIT(31)
+
+/* RCC_TIMG2PRER register fields */
+#define RCC_TIMG2PRER_TIMG2PRE			BIT(0)
+#define RCC_TIMG2PRER_TIMG2PRERDY		BIT(31)
+
+/* RCC_MCUDIVR register fields */
+#define RCC_MCUDIVR_MCUDIV_MASK			GENMASK(3, 0)
+#define RCC_MCUDIVR_MCUDIV_SHIFT		0
+#define RCC_MCUDIVR_MCUDIVRDY			BIT(31)
+
+/* RCC_APB1DIVR register fields */
+#define RCC_APB1DIVR_APB1DIV_MASK		GENMASK(2, 0)
+#define RCC_APB1DIVR_APB1DIV_SHIFT		0
+#define RCC_APB1DIVR_APB1DIVRDY			BIT(31)
+
+/* RCC_APB2DIVR register fields */
+#define RCC_APB2DIVR_APB2DIV_MASK		GENMASK(2, 0)
+#define RCC_APB2DIVR_APB2DIV_SHIFT		0
+#define RCC_APB2DIVR_APB2DIVRDY			BIT(31)
+
+/* RCC_APB3DIVR register fields */
+#define RCC_APB3DIVR_APB3DIV_MASK		GENMASK(2, 0)
+#define RCC_APB3DIVR_APB3DIV_SHIFT		0
+#define RCC_APB3DIVR_APB3DIVRDY			BIT(31)
+
+/* RCC_PLL3CR register fields */
+#define RCC_PLL3CR_PLLON			BIT(0)
+#define RCC_PLL3CR_PLL3RDY			BIT(1)
+#define RCC_PLL3CR_SSCG_CTRL			BIT(2)
+#define RCC_PLL3CR_DIVPEN			BIT(4)
+#define RCC_PLL3CR_DIVQEN			BIT(5)
+#define RCC_PLL3CR_DIVREN			BIT(6)
+
+/* RCC_PLL3CFGR1 register fields */
+#define RCC_PLL3CFGR1_DIVN_MASK			GENMASK(8, 0)
+#define RCC_PLL3CFGR1_DIVN_SHIFT		0
+#define RCC_PLL3CFGR1_DIVM3_MASK		GENMASK(21, 16)
+#define RCC_PLL3CFGR1_DIVM3_SHIFT		16
+#define RCC_PLL3CFGR1_IFRGE_MASK		GENMASK(25, 24)
+#define RCC_PLL3CFGR1_IFRGE_SHIFT		24
+
+/* RCC_PLL3CFGR2 register fields */
+#define RCC_PLL3CFGR2_DIVP_MASK			GENMASK(6, 0)
+#define RCC_PLL3CFGR2_DIVP_SHIFT		0
+#define RCC_PLL3CFGR2_DIVQ_MASK			GENMASK(14, 8)
+#define RCC_PLL3CFGR2_DIVQ_SHIFT		8
+#define RCC_PLL3CFGR2_DIVR_MASK			GENMASK(22, 16)
+#define RCC_PLL3CFGR2_DIVR_SHIFT		16
+
+/* RCC_PLL3FRACR register fields */
+#define RCC_PLL3FRACR_FRACV_MASK		GENMASK(15, 3)
+#define RCC_PLL3FRACR_FRACV_SHIFT		3
+#define RCC_PLL3FRACR_FRACLE			BIT(16)
+
+/* RCC_PLL3CSGR register fields */
+#define RCC_PLL3CSGR_MOD_PER_MASK		GENMASK(12, 0)
+#define RCC_PLL3CSGR_MOD_PER_SHIFT		0
+#define RCC_PLL3CSGR_TPDFN_DIS			BIT(13)
+#define RCC_PLL3CSGR_RPDFN_DIS			BIT(14)
+#define RCC_PLL3CSGR_SSCG_MODE			BIT(15)
+#define RCC_PLL3CSGR_INC_STEP_MASK		GENMASK(30, 16)
+#define RCC_PLL3CSGR_INC_STEP_SHIFT		16
+
+/* RCC_PLL4CR register fields */
+#define RCC_PLL4CR_PLLON			BIT(0)
+#define RCC_PLL4CR_PLL4RDY			BIT(1)
+#define RCC_PLL4CR_SSCG_CTRL			BIT(2)
+#define RCC_PLL4CR_DIVPEN			BIT(4)
+#define RCC_PLL4CR_DIVQEN			BIT(5)
+#define RCC_PLL4CR_DIVREN			BIT(6)
+
+/* RCC_PLL4CFGR1 register fields */
+#define RCC_PLL4CFGR1_DIVN_MASK			GENMASK(8, 0)
+#define RCC_PLL4CFGR1_DIVN_SHIFT		0
+#define RCC_PLL4CFGR1_DIVM4_MASK		GENMASK(21, 16)
+#define RCC_PLL4CFGR1_DIVM4_SHIFT		16
+#define RCC_PLL4CFGR1_IFRGE_MASK		GENMASK(25, 24)
+#define RCC_PLL4CFGR1_IFRGE_SHIFT		24
+
+/* RCC_PLL4CFGR2 register fields */
+#define RCC_PLL4CFGR2_DIVP_MASK			GENMASK(6, 0)
+#define RCC_PLL4CFGR2_DIVP_SHIFT		0
+#define RCC_PLL4CFGR2_DIVQ_MASK			GENMASK(14, 8)
+#define RCC_PLL4CFGR2_DIVQ_SHIFT		8
+#define RCC_PLL4CFGR2_DIVR_MASK			GENMASK(22, 16)
+#define RCC_PLL4CFGR2_DIVR_SHIFT		16
+
+/* RCC_PLL4FRACR register fields */
+#define RCC_PLL4FRACR_FRACV_MASK		GENMASK(15, 3)
+#define RCC_PLL4FRACR_FRACV_SHIFT		3
+#define RCC_PLL4FRACR_FRACLE			BIT(16)
+
+/* RCC_PLL4CSGR register fields */
+#define RCC_PLL4CSGR_MOD_PER_MASK		GENMASK(12, 0)
+#define RCC_PLL4CSGR_MOD_PER_SHIFT		0
+#define RCC_PLL4CSGR_TPDFN_DIS			BIT(13)
+#define RCC_PLL4CSGR_RPDFN_DIS			BIT(14)
+#define RCC_PLL4CSGR_SSCG_MODE			BIT(15)
+#define RCC_PLL4CSGR_INC_STEP_MASK		GENMASK(30, 16)
+#define RCC_PLL4CSGR_INC_STEP_SHIFT		16
 
 /* RCC_I2C12CKSELR register fields */
 #define RCC_I2C12CKSELR_I2C12SRC_MASK		GENMASK(2, 0)
@@ -520,11 +1170,40 @@
 #define RCC_I2C35CKSELR_I2C35SRC_MASK		GENMASK(2, 0)
 #define RCC_I2C35CKSELR_I2C35SRC_SHIFT		0
 
+/* RCC_SAI1CKSELR register fields */
+#define RCC_SAI1CKSELR_SAI1SRC_MASK		GENMASK(2, 0)
+#define RCC_SAI1CKSELR_SAI1SRC_SHIFT		0
+
+/* RCC_SAI2CKSELR register fields */
+#define RCC_SAI2CKSELR_SAI2SRC_MASK		GENMASK(2, 0)
+#define RCC_SAI2CKSELR_SAI2SRC_SHIFT		0
+
+/* RCC_SAI3CKSELR register fields */
+#define RCC_SAI3CKSELR_SAI3SRC_MASK		GENMASK(2, 0)
+#define RCC_SAI3CKSELR_SAI3SRC_SHIFT		0
+
+/* RCC_SAI4CKSELR register fields */
+#define RCC_SAI4CKSELR_SAI4SRC_MASK		GENMASK(2, 0)
+#define RCC_SAI4CKSELR_SAI4SRC_SHIFT		0
+
+/* RCC_SPI2S1CKSELR register fields */
+#define RCC_SPI2S1CKSELR_SPI1SRC_MASK		GENMASK(2, 0)
+#define RCC_SPI2S1CKSELR_SPI1SRC_SHIFT		0
+
+/* RCC_SPI2S23CKSELR register fields */
+#define RCC_SPI2S23CKSELR_SPI23SRC_MASK		GENMASK(2, 0)
+#define RCC_SPI2S23CKSELR_SPI23SRC_SHIFT	0
+
+/* RCC_SPI45CKSELR register fields */
+#define RCC_SPI45CKSELR_SPI45SRC_MASK		GENMASK(2, 0)
+#define RCC_SPI45CKSELR_SPI45SRC_SHIFT		0
+
 /* RCC_UART6CKSELR register fields */
 #define RCC_UART6CKSELR_UART6SRC_MASK		GENMASK(2, 0)
 #define RCC_UART6CKSELR_UART6SRC_SHIFT		0
 
 /* RCC_UART24CKSELR register fields */
+#define RCC_UART24CKSELR_HSI			0x00000002
 #define RCC_UART24CKSELR_UART24SRC_MASK		GENMASK(2, 0)
 #define RCC_UART24CKSELR_UART24SRC_SHIFT	0
 
@@ -547,6 +1226,8 @@
 /* RCC_ETHCKSELR register fields */
 #define RCC_ETHCKSELR_ETHSRC_MASK		GENMASK(1, 0)
 #define RCC_ETHCKSELR_ETHSRC_SHIFT		0
+#define RCC_ETHCKSELR_ETHPTPDIV_MASK		GENMASK(7, 4)
+#define RCC_ETHCKSELR_ETHPTPDIV_SHIFT		4
 
 /* RCC_QSPICKSELR register fields */
 #define RCC_QSPICKSELR_QSPISRC_MASK		GENMASK(1, 0)
@@ -556,10 +1237,1092 @@
 #define RCC_FMCCKSELR_FMCSRC_MASK		GENMASK(1, 0)
 #define RCC_FMCCKSELR_FMCSRC_SHIFT		0
 
+/* RCC_FDCANCKSELR register fields */
+#define RCC_FDCANCKSELR_FDCANSRC_MASK		GENMASK(1, 0)
+#define RCC_FDCANCKSELR_FDCANSRC_SHIFT		0
+
+/* RCC_SPDIFCKSELR register fields */
+#define RCC_SPDIFCKSELR_SPDIFSRC_MASK		GENMASK(1, 0)
+#define RCC_SPDIFCKSELR_SPDIFSRC_SHIFT		0
+
+/* RCC_CECCKSELR register fields */
+#define RCC_CECCKSELR_CECSRC_MASK		GENMASK(1, 0)
+#define RCC_CECCKSELR_CECSRC_SHIFT		0
+
 /* RCC_USBCKSELR register fields */
 #define RCC_USBCKSELR_USBPHYSRC_MASK		GENMASK(1, 0)
 #define RCC_USBCKSELR_USBPHYSRC_SHIFT		0
+#define RCC_USBCKSELR_USBOSRC			BIT(4)
 #define RCC_USBCKSELR_USBOSRC_MASK		BIT(4)
 #define RCC_USBCKSELR_USBOSRC_SHIFT		4
 
+/* RCC_RNG2CKSELR register fields */
+#define RCC_RNG2CKSELR_RNG2SRC_MASK		GENMASK(1, 0)
+#define RCC_RNG2CKSELR_RNG2SRC_SHIFT		0
+
+/* RCC_DSICKSELR register fields */
+#define RCC_DSICKSELR_DSISRC			BIT(0)
+
+/* RCC_ADCCKSELR register fields */
+#define RCC_ADCCKSELR_ADCSRC_MASK		GENMASK(1, 0)
+#define RCC_ADCCKSELR_ADCSRC_SHIFT		0
+
+/* RCC_LPTIM45CKSELR register fields */
+#define RCC_LPTIM45CKSELR_LPTIM45SRC_MASK	GENMASK(2, 0)
+#define RCC_LPTIM45CKSELR_LPTIM45SRC_SHIFT	0
+
+/* RCC_LPTIM23CKSELR register fields */
+#define RCC_LPTIM23CKSELR_LPTIM23SRC_MASK	GENMASK(2, 0)
+#define RCC_LPTIM23CKSELR_LPTIM23SRC_SHIFT	0
+
+/* RCC_LPTIM1CKSELR register fields */
+#define RCC_LPTIM1CKSELR_LPTIM1SRC_MASK		GENMASK(2, 0)
+#define RCC_LPTIM1CKSELR_LPTIM1SRC_SHIFT	0
+
+/* RCC_APB1RSTSETR register fields */
+#define RCC_APB1RSTSETR_TIM2RST			BIT(0)
+#define RCC_APB1RSTSETR_TIM3RST			BIT(1)
+#define RCC_APB1RSTSETR_TIM4RST			BIT(2)
+#define RCC_APB1RSTSETR_TIM5RST			BIT(3)
+#define RCC_APB1RSTSETR_TIM6RST			BIT(4)
+#define RCC_APB1RSTSETR_TIM7RST			BIT(5)
+#define RCC_APB1RSTSETR_TIM12RST		BIT(6)
+#define RCC_APB1RSTSETR_TIM13RST		BIT(7)
+#define RCC_APB1RSTSETR_TIM14RST		BIT(8)
+#define RCC_APB1RSTSETR_LPTIM1RST		BIT(9)
+#define RCC_APB1RSTSETR_SPI2RST			BIT(11)
+#define RCC_APB1RSTSETR_SPI3RST			BIT(12)
+#define RCC_APB1RSTSETR_USART2RST		BIT(14)
+#define RCC_APB1RSTSETR_USART3RST		BIT(15)
+#define RCC_APB1RSTSETR_UART4RST		BIT(16)
+#define RCC_APB1RSTSETR_UART5RST		BIT(17)
+#define RCC_APB1RSTSETR_UART7RST		BIT(18)
+#define RCC_APB1RSTSETR_UART8RST		BIT(19)
+#define RCC_APB1RSTSETR_I2C1RST			BIT(21)
+#define RCC_APB1RSTSETR_I2C2RST			BIT(22)
+#define RCC_APB1RSTSETR_I2C3RST			BIT(23)
+#define RCC_APB1RSTSETR_I2C5RST			BIT(24)
+#define RCC_APB1RSTSETR_SPDIFRST		BIT(26)
+#define RCC_APB1RSTSETR_CECRST			BIT(27)
+#define RCC_APB1RSTSETR_DAC12RST		BIT(29)
+#define RCC_APB1RSTSETR_MDIOSRST		BIT(31)
+
+/* RCC_APB1RSTCLRR register fields */
+#define RCC_APB1RSTCLRR_TIM2RST			BIT(0)
+#define RCC_APB1RSTCLRR_TIM3RST			BIT(1)
+#define RCC_APB1RSTCLRR_TIM4RST			BIT(2)
+#define RCC_APB1RSTCLRR_TIM5RST			BIT(3)
+#define RCC_APB1RSTCLRR_TIM6RST			BIT(4)
+#define RCC_APB1RSTCLRR_TIM7RST			BIT(5)
+#define RCC_APB1RSTCLRR_TIM12RST		BIT(6)
+#define RCC_APB1RSTCLRR_TIM13RST		BIT(7)
+#define RCC_APB1RSTCLRR_TIM14RST		BIT(8)
+#define RCC_APB1RSTCLRR_LPTIM1RST		BIT(9)
+#define RCC_APB1RSTCLRR_SPI2RST			BIT(11)
+#define RCC_APB1RSTCLRR_SPI3RST			BIT(12)
+#define RCC_APB1RSTCLRR_USART2RST		BIT(14)
+#define RCC_APB1RSTCLRR_USART3RST		BIT(15)
+#define RCC_APB1RSTCLRR_UART4RST		BIT(16)
+#define RCC_APB1RSTCLRR_UART5RST		BIT(17)
+#define RCC_APB1RSTCLRR_UART7RST		BIT(18)
+#define RCC_APB1RSTCLRR_UART8RST		BIT(19)
+#define RCC_APB1RSTCLRR_I2C1RST			BIT(21)
+#define RCC_APB1RSTCLRR_I2C2RST			BIT(22)
+#define RCC_APB1RSTCLRR_I2C3RST			BIT(23)
+#define RCC_APB1RSTCLRR_I2C5RST			BIT(24)
+#define RCC_APB1RSTCLRR_SPDIFRST		BIT(26)
+#define RCC_APB1RSTCLRR_CECRST			BIT(27)
+#define RCC_APB1RSTCLRR_DAC12RST		BIT(29)
+#define RCC_APB1RSTCLRR_MDIOSRST		BIT(31)
+
+/* RCC_APB2RSTSETR register fields */
+#define RCC_APB2RSTSETR_TIM1RST			BIT(0)
+#define RCC_APB2RSTSETR_TIM8RST			BIT(1)
+#define RCC_APB2RSTSETR_TIM15RST		BIT(2)
+#define RCC_APB2RSTSETR_TIM16RST		BIT(3)
+#define RCC_APB2RSTSETR_TIM17RST		BIT(4)
+#define RCC_APB2RSTSETR_SPI1RST			BIT(8)
+#define RCC_APB2RSTSETR_SPI4RST			BIT(9)
+#define RCC_APB2RSTSETR_SPI5RST			BIT(10)
+#define RCC_APB2RSTSETR_USART6RST		BIT(13)
+#define RCC_APB2RSTSETR_SAI1RST			BIT(16)
+#define RCC_APB2RSTSETR_SAI2RST			BIT(17)
+#define RCC_APB2RSTSETR_SAI3RST			BIT(18)
+#define RCC_APB2RSTSETR_DFSDMRST		BIT(20)
+#define RCC_APB2RSTSETR_FDCANRST		BIT(24)
+
+/* RCC_APB2RSTCLRR register fields */
+#define RCC_APB2RSTCLRR_TIM1RST			BIT(0)
+#define RCC_APB2RSTCLRR_TIM8RST			BIT(1)
+#define RCC_APB2RSTCLRR_TIM15RST		BIT(2)
+#define RCC_APB2RSTCLRR_TIM16RST		BIT(3)
+#define RCC_APB2RSTCLRR_TIM17RST		BIT(4)
+#define RCC_APB2RSTCLRR_SPI1RST			BIT(8)
+#define RCC_APB2RSTCLRR_SPI4RST			BIT(9)
+#define RCC_APB2RSTCLRR_SPI5RST			BIT(10)
+#define RCC_APB2RSTCLRR_USART6RST		BIT(13)
+#define RCC_APB2RSTCLRR_SAI1RST			BIT(16)
+#define RCC_APB2RSTCLRR_SAI2RST			BIT(17)
+#define RCC_APB2RSTCLRR_SAI3RST			BIT(18)
+#define RCC_APB2RSTCLRR_DFSDMRST		BIT(20)
+#define RCC_APB2RSTCLRR_FDCANRST		BIT(24)
+
+/* RCC_APB3RSTSETR register fields */
+#define RCC_APB3RSTSETR_LPTIM2RST		BIT(0)
+#define RCC_APB3RSTSETR_LPTIM3RST		BIT(1)
+#define RCC_APB3RSTSETR_LPTIM4RST		BIT(2)
+#define RCC_APB3RSTSETR_LPTIM5RST		BIT(3)
+#define RCC_APB3RSTSETR_SAI4RST			BIT(8)
+#define RCC_APB3RSTSETR_SYSCFGRST		BIT(11)
+#define RCC_APB3RSTSETR_VREFRST			BIT(13)
+#define RCC_APB3RSTSETR_TMPSENSRST		BIT(16)
+#define RCC_APB3RSTSETR_PMBCTRLRST		BIT(17)
+
+/* RCC_APB3RSTCLRR register fields */
+#define RCC_APB3RSTCLRR_LPTIM2RST		BIT(0)
+#define RCC_APB3RSTCLRR_LPTIM3RST		BIT(1)
+#define RCC_APB3RSTCLRR_LPTIM4RST		BIT(2)
+#define RCC_APB3RSTCLRR_LPTIM5RST		BIT(3)
+#define RCC_APB3RSTCLRR_SAI4RST			BIT(8)
+#define RCC_APB3RSTCLRR_SYSCFGRST		BIT(11)
+#define RCC_APB3RSTCLRR_VREFRST			BIT(13)
+#define RCC_APB3RSTCLRR_TMPSENSRST		BIT(16)
+#define RCC_APB3RSTCLRR_PMBCTRLRST		BIT(17)
+
+/* RCC_AHB2RSTSETR register fields */
+#define RCC_AHB2RSTSETR_DMA1RST			BIT(0)
+#define RCC_AHB2RSTSETR_DMA2RST			BIT(1)
+#define RCC_AHB2RSTSETR_DMAMUXRST		BIT(2)
+#define RCC_AHB2RSTSETR_ADC12RST		BIT(5)
+#define RCC_AHB2RSTSETR_USBORST			BIT(8)
+#define RCC_AHB2RSTSETR_SDMMC3RST		BIT(16)
+
+/* RCC_AHB2RSTCLRR register fields */
+#define RCC_AHB2RSTCLRR_DMA1RST			BIT(0)
+#define RCC_AHB2RSTCLRR_DMA2RST			BIT(1)
+#define RCC_AHB2RSTCLRR_DMAMUXRST		BIT(2)
+#define RCC_AHB2RSTCLRR_ADC12RST		BIT(5)
+#define RCC_AHB2RSTCLRR_USBORST			BIT(8)
+#define RCC_AHB2RSTCLRR_SDMMC3RST		BIT(16)
+
+/* RCC_AHB3RSTSETR register fields */
+#define RCC_AHB3RSTSETR_DCMIRST			BIT(0)
+#define RCC_AHB3RSTSETR_CRYP2RST		BIT(4)
+#define RCC_AHB3RSTSETR_HASH2RST		BIT(5)
+#define RCC_AHB3RSTSETR_RNG2RST			BIT(6)
+#define RCC_AHB3RSTSETR_CRC2RST			BIT(7)
+#define RCC_AHB3RSTSETR_HSEMRST			BIT(11)
+#define RCC_AHB3RSTSETR_IPCCRST			BIT(12)
+
+/* RCC_AHB3RSTCLRR register fields */
+#define RCC_AHB3RSTCLRR_DCMIRST			BIT(0)
+#define RCC_AHB3RSTCLRR_CRYP2RST		BIT(4)
+#define RCC_AHB3RSTCLRR_HASH2RST		BIT(5)
+#define RCC_AHB3RSTCLRR_RNG2RST			BIT(6)
+#define RCC_AHB3RSTCLRR_CRC2RST			BIT(7)
+#define RCC_AHB3RSTCLRR_HSEMRST			BIT(11)
+#define RCC_AHB3RSTCLRR_IPCCRST			BIT(12)
+
+/* RCC_AHB4RSTSETR register fields */
+#define RCC_AHB4RSTSETR_GPIOARST		BIT(0)
+#define RCC_AHB4RSTSETR_GPIOBRST		BIT(1)
+#define RCC_AHB4RSTSETR_GPIOCRST		BIT(2)
+#define RCC_AHB4RSTSETR_GPIODRST		BIT(3)
+#define RCC_AHB4RSTSETR_GPIOERST		BIT(4)
+#define RCC_AHB4RSTSETR_GPIOFRST		BIT(5)
+#define RCC_AHB4RSTSETR_GPIOGRST		BIT(6)
+#define RCC_AHB4RSTSETR_GPIOHRST		BIT(7)
+#define RCC_AHB4RSTSETR_GPIOIRST		BIT(8)
+#define RCC_AHB4RSTSETR_GPIOJRST		BIT(9)
+#define RCC_AHB4RSTSETR_GPIOKRST		BIT(10)
+
+/* RCC_AHB4RSTCLRR register fields */
+#define RCC_AHB4RSTCLRR_GPIOARST		BIT(0)
+#define RCC_AHB4RSTCLRR_GPIOBRST		BIT(1)
+#define RCC_AHB4RSTCLRR_GPIOCRST		BIT(2)
+#define RCC_AHB4RSTCLRR_GPIODRST		BIT(3)
+#define RCC_AHB4RSTCLRR_GPIOERST		BIT(4)
+#define RCC_AHB4RSTCLRR_GPIOFRST		BIT(5)
+#define RCC_AHB4RSTCLRR_GPIOGRST		BIT(6)
+#define RCC_AHB4RSTCLRR_GPIOHRST		BIT(7)
+#define RCC_AHB4RSTCLRR_GPIOIRST		BIT(8)
+#define RCC_AHB4RSTCLRR_GPIOJRST		BIT(9)
+#define RCC_AHB4RSTCLRR_GPIOKRST		BIT(10)
+
+/* RCC_MP_APB1ENSETR register fields */
+#define RCC_MP_APB1ENSETR_TIM2EN		BIT(0)
+#define RCC_MP_APB1ENSETR_TIM3EN		BIT(1)
+#define RCC_MP_APB1ENSETR_TIM4EN		BIT(2)
+#define RCC_MP_APB1ENSETR_TIM5EN		BIT(3)
+#define RCC_MP_APB1ENSETR_TIM6EN		BIT(4)
+#define RCC_MP_APB1ENSETR_TIM7EN		BIT(5)
+#define RCC_MP_APB1ENSETR_TIM12EN		BIT(6)
+#define RCC_MP_APB1ENSETR_TIM13EN		BIT(7)
+#define RCC_MP_APB1ENSETR_TIM14EN		BIT(8)
+#define RCC_MP_APB1ENSETR_LPTIM1EN		BIT(9)
+#define RCC_MP_APB1ENSETR_SPI2EN		BIT(11)
+#define RCC_MP_APB1ENSETR_SPI3EN		BIT(12)
+#define RCC_MP_APB1ENSETR_USART2EN		BIT(14)
+#define RCC_MP_APB1ENSETR_USART3EN		BIT(15)
+#define RCC_MP_APB1ENSETR_UART4EN		BIT(16)
+#define RCC_MP_APB1ENSETR_UART5EN		BIT(17)
+#define RCC_MP_APB1ENSETR_UART7EN		BIT(18)
+#define RCC_MP_APB1ENSETR_UART8EN		BIT(19)
+#define RCC_MP_APB1ENSETR_I2C1EN		BIT(21)
+#define RCC_MP_APB1ENSETR_I2C2EN		BIT(22)
+#define RCC_MP_APB1ENSETR_I2C3EN		BIT(23)
+#define RCC_MP_APB1ENSETR_I2C5EN		BIT(24)
+#define RCC_MP_APB1ENSETR_SPDIFEN		BIT(26)
+#define RCC_MP_APB1ENSETR_CECEN			BIT(27)
+#define RCC_MP_APB1ENSETR_DAC12EN		BIT(29)
+#define RCC_MP_APB1ENSETR_MDIOSEN		BIT(31)
+
+/* RCC_MP_APB1ENCLRR register fields */
+#define RCC_MP_APB1ENCLRR_TIM2EN		BIT(0)
+#define RCC_MP_APB1ENCLRR_TIM3EN		BIT(1)
+#define RCC_MP_APB1ENCLRR_TIM4EN		BIT(2)
+#define RCC_MP_APB1ENCLRR_TIM5EN		BIT(3)
+#define RCC_MP_APB1ENCLRR_TIM6EN		BIT(4)
+#define RCC_MP_APB1ENCLRR_TIM7EN		BIT(5)
+#define RCC_MP_APB1ENCLRR_TIM12EN		BIT(6)
+#define RCC_MP_APB1ENCLRR_TIM13EN		BIT(7)
+#define RCC_MP_APB1ENCLRR_TIM14EN		BIT(8)
+#define RCC_MP_APB1ENCLRR_LPTIM1EN		BIT(9)
+#define RCC_MP_APB1ENCLRR_SPI2EN		BIT(11)
+#define RCC_MP_APB1ENCLRR_SPI3EN		BIT(12)
+#define RCC_MP_APB1ENCLRR_USART2EN		BIT(14)
+#define RCC_MP_APB1ENCLRR_USART3EN		BIT(15)
+#define RCC_MP_APB1ENCLRR_UART4EN		BIT(16)
+#define RCC_MP_APB1ENCLRR_UART5EN		BIT(17)
+#define RCC_MP_APB1ENCLRR_UART7EN		BIT(18)
+#define RCC_MP_APB1ENCLRR_UART8EN		BIT(19)
+#define RCC_MP_APB1ENCLRR_I2C1EN		BIT(21)
+#define RCC_MP_APB1ENCLRR_I2C2EN		BIT(22)
+#define RCC_MP_APB1ENCLRR_I2C3EN		BIT(23)
+#define RCC_MP_APB1ENCLRR_I2C5EN		BIT(24)
+#define RCC_MP_APB1ENCLRR_SPDIFEN		BIT(26)
+#define RCC_MP_APB1ENCLRR_CECEN			BIT(27)
+#define RCC_MP_APB1ENCLRR_DAC12EN		BIT(29)
+#define RCC_MP_APB1ENCLRR_MDIOSEN		BIT(31)
+
+/* RCC_MP_APB2ENSETR register fields */
+#define RCC_MP_APB2ENSETR_TIM1EN		BIT(0)
+#define RCC_MP_APB2ENSETR_TIM8EN		BIT(1)
+#define RCC_MP_APB2ENSETR_TIM15EN		BIT(2)
+#define RCC_MP_APB2ENSETR_TIM16EN		BIT(3)
+#define RCC_MP_APB2ENSETR_TIM17EN		BIT(4)
+#define RCC_MP_APB2ENSETR_SPI1EN		BIT(8)
+#define RCC_MP_APB2ENSETR_SPI4EN		BIT(9)
+#define RCC_MP_APB2ENSETR_SPI5EN		BIT(10)
+#define RCC_MP_APB2ENSETR_USART6EN		BIT(13)
+#define RCC_MP_APB2ENSETR_SAI1EN		BIT(16)
+#define RCC_MP_APB2ENSETR_SAI2EN		BIT(17)
+#define RCC_MP_APB2ENSETR_SAI3EN		BIT(18)
+#define RCC_MP_APB2ENSETR_DFSDMEN		BIT(20)
+#define RCC_MP_APB2ENSETR_ADFSDMEN		BIT(21)
+#define RCC_MP_APB2ENSETR_FDCANEN		BIT(24)
+
+/* RCC_MP_APB2ENCLRR register fields */
+#define RCC_MP_APB2ENCLRR_TIM1EN		BIT(0)
+#define RCC_MP_APB2ENCLRR_TIM8EN		BIT(1)
+#define RCC_MP_APB2ENCLRR_TIM15EN		BIT(2)
+#define RCC_MP_APB2ENCLRR_TIM16EN		BIT(3)
+#define RCC_MP_APB2ENCLRR_TIM17EN		BIT(4)
+#define RCC_MP_APB2ENCLRR_SPI1EN		BIT(8)
+#define RCC_MP_APB2ENCLRR_SPI4EN		BIT(9)
+#define RCC_MP_APB2ENCLRR_SPI5EN		BIT(10)
+#define RCC_MP_APB2ENCLRR_USART6EN		BIT(13)
+#define RCC_MP_APB2ENCLRR_SAI1EN		BIT(16)
+#define RCC_MP_APB2ENCLRR_SAI2EN		BIT(17)
+#define RCC_MP_APB2ENCLRR_SAI3EN		BIT(18)
+#define RCC_MP_APB2ENCLRR_DFSDMEN		BIT(20)
+#define RCC_MP_APB2ENCLRR_ADFSDMEN		BIT(21)
+#define RCC_MP_APB2ENCLRR_FDCANEN		BIT(24)
+
+/* RCC_MP_APB3ENSETR register fields */
+#define RCC_MP_APB3ENSETR_LPTIM2EN		BIT(0)
+#define RCC_MP_APB3ENSETR_LPTIM3EN		BIT(1)
+#define RCC_MP_APB3ENSETR_LPTIM4EN		BIT(2)
+#define RCC_MP_APB3ENSETR_LPTIM5EN		BIT(3)
+#define RCC_MP_APB3ENSETR_SAI4EN		BIT(8)
+#define RCC_MP_APB3ENSETR_SYSCFGEN		BIT(11)
+#define RCC_MP_APB3ENSETR_VREFEN		BIT(13)
+#define RCC_MP_APB3ENSETR_TMPSENSEN		BIT(16)
+#define RCC_MP_APB3ENSETR_PMBCTRLEN		BIT(17)
+#define RCC_MP_APB3ENSETR_HDPEN			BIT(20)
+
+/* RCC_MP_APB3ENCLRR register fields */
+#define RCC_MP_APB3ENCLRR_LPTIM2EN		BIT(0)
+#define RCC_MP_APB3ENCLRR_LPTIM3EN		BIT(1)
+#define RCC_MP_APB3ENCLRR_LPTIM4EN		BIT(2)
+#define RCC_MP_APB3ENCLRR_LPTIM5EN		BIT(3)
+#define RCC_MP_APB3ENCLRR_SAI4EN		BIT(8)
+#define RCC_MP_APB3ENCLRR_SYSCFGEN		BIT(11)
+#define RCC_MP_APB3ENCLRR_VREFEN		BIT(13)
+#define RCC_MP_APB3ENCLRR_TMPSENSEN		BIT(16)
+#define RCC_MP_APB3ENCLRR_PMBCTRLEN		BIT(17)
+#define RCC_MP_APB3ENCLRR_HDPEN			BIT(20)
+
+/* RCC_MP_AHB2ENSETR register fields */
+#define RCC_MP_AHB2ENSETR_DMA1EN		BIT(0)
+#define RCC_MP_AHB2ENSETR_DMA2EN		BIT(1)
+#define RCC_MP_AHB2ENSETR_DMAMUXEN		BIT(2)
+#define RCC_MP_AHB2ENSETR_ADC12EN		BIT(5)
+#define RCC_MP_AHB2ENSETR_USBOEN		BIT(8)
+#define RCC_MP_AHB2ENSETR_SDMMC3EN		BIT(16)
+
+/* RCC_MP_AHB2ENCLRR register fields */
+#define RCC_MP_AHB2ENCLRR_DMA1EN		BIT(0)
+#define RCC_MP_AHB2ENCLRR_DMA2EN		BIT(1)
+#define RCC_MP_AHB2ENCLRR_DMAMUXEN		BIT(2)
+#define RCC_MP_AHB2ENCLRR_ADC12EN		BIT(5)
+#define RCC_MP_AHB2ENCLRR_USBOEN		BIT(8)
+#define RCC_MP_AHB2ENCLRR_SDMMC3EN		BIT(16)
+
+/* RCC_MP_AHB3ENSETR register fields */
+#define RCC_MP_AHB3ENSETR_DCMIEN		BIT(0)
+#define RCC_MP_AHB3ENSETR_CRYP2EN		BIT(4)
+#define RCC_MP_AHB3ENSETR_HASH2EN		BIT(5)
+#define RCC_MP_AHB3ENSETR_RNG2EN		BIT(6)
+#define RCC_MP_AHB3ENSETR_CRC2EN		BIT(7)
+#define RCC_MP_AHB3ENSETR_HSEMEN		BIT(11)
+#define RCC_MP_AHB3ENSETR_IPCCEN		BIT(12)
+
+/* RCC_MP_AHB3ENCLRR register fields */
+#define RCC_MP_AHB3ENCLRR_DCMIEN		BIT(0)
+#define RCC_MP_AHB3ENCLRR_CRYP2EN		BIT(4)
+#define RCC_MP_AHB3ENCLRR_HASH2EN		BIT(5)
+#define RCC_MP_AHB3ENCLRR_RNG2EN		BIT(6)
+#define RCC_MP_AHB3ENCLRR_CRC2EN		BIT(7)
+#define RCC_MP_AHB3ENCLRR_HSEMEN		BIT(11)
+#define RCC_MP_AHB3ENCLRR_IPCCEN		BIT(12)
+
+/* RCC_MP_AHB4ENSETR register fields */
+#define RCC_MP_AHB4ENSETR_GPIOAEN		BIT(0)
+#define RCC_MP_AHB4ENSETR_GPIOBEN		BIT(1)
+#define RCC_MP_AHB4ENSETR_GPIOCEN		BIT(2)
+#define RCC_MP_AHB4ENSETR_GPIODEN		BIT(3)
+#define RCC_MP_AHB4ENSETR_GPIOEEN		BIT(4)
+#define RCC_MP_AHB4ENSETR_GPIOFEN		BIT(5)
+#define RCC_MP_AHB4ENSETR_GPIOGEN		BIT(6)
+#define RCC_MP_AHB4ENSETR_GPIOHEN		BIT(7)
+#define RCC_MP_AHB4ENSETR_GPIOIEN		BIT(8)
+#define RCC_MP_AHB4ENSETR_GPIOJEN		BIT(9)
+#define RCC_MP_AHB4ENSETR_GPIOKEN		BIT(10)
+
+/* RCC_MP_AHB4ENCLRR register fields */
+#define RCC_MP_AHB4ENCLRR_GPIOAEN		BIT(0)
+#define RCC_MP_AHB4ENCLRR_GPIOBEN		BIT(1)
+#define RCC_MP_AHB4ENCLRR_GPIOCEN		BIT(2)
+#define RCC_MP_AHB4ENCLRR_GPIODEN		BIT(3)
+#define RCC_MP_AHB4ENCLRR_GPIOEEN		BIT(4)
+#define RCC_MP_AHB4ENCLRR_GPIOFEN		BIT(5)
+#define RCC_MP_AHB4ENCLRR_GPIOGEN		BIT(6)
+#define RCC_MP_AHB4ENCLRR_GPIOHEN		BIT(7)
+#define RCC_MP_AHB4ENCLRR_GPIOIEN		BIT(8)
+#define RCC_MP_AHB4ENCLRR_GPIOJEN		BIT(9)
+#define RCC_MP_AHB4ENCLRR_GPIOKEN		BIT(10)
+
+/* RCC_MP_MLAHBENSETR register fields */
+#define RCC_MP_MLAHBENSETR_RETRAMEN		BIT(4)
+
+/* RCC_MP_MLAHBENCLRR register fields */
+#define RCC_MP_MLAHBENCLRR_RETRAMEN		BIT(4)
+
+/* RCC_MC_APB1ENSETR register fields */
+#define RCC_MC_APB1ENSETR_TIM2EN		BIT(0)
+#define RCC_MC_APB1ENSETR_TIM3EN		BIT(1)
+#define RCC_MC_APB1ENSETR_TIM4EN		BIT(2)
+#define RCC_MC_APB1ENSETR_TIM5EN		BIT(3)
+#define RCC_MC_APB1ENSETR_TIM6EN		BIT(4)
+#define RCC_MC_APB1ENSETR_TIM7EN		BIT(5)
+#define RCC_MC_APB1ENSETR_TIM12EN		BIT(6)
+#define RCC_MC_APB1ENSETR_TIM13EN		BIT(7)
+#define RCC_MC_APB1ENSETR_TIM14EN		BIT(8)
+#define RCC_MC_APB1ENSETR_LPTIM1EN		BIT(9)
+#define RCC_MC_APB1ENSETR_SPI2EN		BIT(11)
+#define RCC_MC_APB1ENSETR_SPI3EN		BIT(12)
+#define RCC_MC_APB1ENSETR_USART2EN		BIT(14)
+#define RCC_MC_APB1ENSETR_USART3EN		BIT(15)
+#define RCC_MC_APB1ENSETR_UART4EN		BIT(16)
+#define RCC_MC_APB1ENSETR_UART5EN		BIT(17)
+#define RCC_MC_APB1ENSETR_UART7EN		BIT(18)
+#define RCC_MC_APB1ENSETR_UART8EN		BIT(19)
+#define RCC_MC_APB1ENSETR_I2C1EN		BIT(21)
+#define RCC_MC_APB1ENSETR_I2C2EN		BIT(22)
+#define RCC_MC_APB1ENSETR_I2C3EN		BIT(23)
+#define RCC_MC_APB1ENSETR_I2C5EN		BIT(24)
+#define RCC_MC_APB1ENSETR_SPDIFEN		BIT(26)
+#define RCC_MC_APB1ENSETR_CECEN			BIT(27)
+#define RCC_MC_APB1ENSETR_WWDG1EN		BIT(28)
+#define RCC_MC_APB1ENSETR_DAC12EN		BIT(29)
+#define RCC_MC_APB1ENSETR_MDIOSEN		BIT(31)
+
+/* RCC_MC_APB1ENCLRR register fields */
+#define RCC_MC_APB1ENCLRR_TIM2EN		BIT(0)
+#define RCC_MC_APB1ENCLRR_TIM3EN		BIT(1)
+#define RCC_MC_APB1ENCLRR_TIM4EN		BIT(2)
+#define RCC_MC_APB1ENCLRR_TIM5EN		BIT(3)
+#define RCC_MC_APB1ENCLRR_TIM6EN		BIT(4)
+#define RCC_MC_APB1ENCLRR_TIM7EN		BIT(5)
+#define RCC_MC_APB1ENCLRR_TIM12EN		BIT(6)
+#define RCC_MC_APB1ENCLRR_TIM13EN		BIT(7)
+#define RCC_MC_APB1ENCLRR_TIM14EN		BIT(8)
+#define RCC_MC_APB1ENCLRR_LPTIM1EN		BIT(9)
+#define RCC_MC_APB1ENCLRR_SPI2EN		BIT(11)
+#define RCC_MC_APB1ENCLRR_SPI3EN		BIT(12)
+#define RCC_MC_APB1ENCLRR_USART2EN		BIT(14)
+#define RCC_MC_APB1ENCLRR_USART3EN		BIT(15)
+#define RCC_MC_APB1ENCLRR_UART4EN		BIT(16)
+#define RCC_MC_APB1ENCLRR_UART5EN		BIT(17)
+#define RCC_MC_APB1ENCLRR_UART7EN		BIT(18)
+#define RCC_MC_APB1ENCLRR_UART8EN		BIT(19)
+#define RCC_MC_APB1ENCLRR_I2C1EN		BIT(21)
+#define RCC_MC_APB1ENCLRR_I2C2EN		BIT(22)
+#define RCC_MC_APB1ENCLRR_I2C3EN		BIT(23)
+#define RCC_MC_APB1ENCLRR_I2C5EN		BIT(24)
+#define RCC_MC_APB1ENCLRR_SPDIFEN		BIT(26)
+#define RCC_MC_APB1ENCLRR_CECEN			BIT(27)
+#define RCC_MC_APB1ENCLRR_DAC12EN		BIT(29)
+#define RCC_MC_APB1ENCLRR_MDIOSEN		BIT(31)
+
+/* RCC_MC_APB2ENSETR register fields */
+#define RCC_MC_APB2ENSETR_TIM1EN		BIT(0)
+#define RCC_MC_APB2ENSETR_TIM8EN		BIT(1)
+#define RCC_MC_APB2ENSETR_TIM15EN		BIT(2)
+#define RCC_MC_APB2ENSETR_TIM16EN		BIT(3)
+#define RCC_MC_APB2ENSETR_TIM17EN		BIT(4)
+#define RCC_MC_APB2ENSETR_SPI1EN		BIT(8)
+#define RCC_MC_APB2ENSETR_SPI4EN		BIT(9)
+#define RCC_MC_APB2ENSETR_SPI5EN		BIT(10)
+#define RCC_MC_APB2ENSETR_USART6EN		BIT(13)
+#define RCC_MC_APB2ENSETR_SAI1EN		BIT(16)
+#define RCC_MC_APB2ENSETR_SAI2EN		BIT(17)
+#define RCC_MC_APB2ENSETR_SAI3EN		BIT(18)
+#define RCC_MC_APB2ENSETR_DFSDMEN		BIT(20)
+#define RCC_MC_APB2ENSETR_ADFSDMEN		BIT(21)
+#define RCC_MC_APB2ENSETR_FDCANEN		BIT(24)
+
+/* RCC_MC_APB2ENCLRR register fields */
+#define RCC_MC_APB2ENCLRR_TIM1EN		BIT(0)
+#define RCC_MC_APB2ENCLRR_TIM8EN		BIT(1)
+#define RCC_MC_APB2ENCLRR_TIM15EN		BIT(2)
+#define RCC_MC_APB2ENCLRR_TIM16EN		BIT(3)
+#define RCC_MC_APB2ENCLRR_TIM17EN		BIT(4)
+#define RCC_MC_APB2ENCLRR_SPI1EN		BIT(8)
+#define RCC_MC_APB2ENCLRR_SPI4EN		BIT(9)
+#define RCC_MC_APB2ENCLRR_SPI5EN		BIT(10)
+#define RCC_MC_APB2ENCLRR_USART6EN		BIT(13)
+#define RCC_MC_APB2ENCLRR_SAI1EN		BIT(16)
+#define RCC_MC_APB2ENCLRR_SAI2EN		BIT(17)
+#define RCC_MC_APB2ENCLRR_SAI3EN		BIT(18)
+#define RCC_MC_APB2ENCLRR_DFSDMEN		BIT(20)
+#define RCC_MC_APB2ENCLRR_ADFSDMEN		BIT(21)
+#define RCC_MC_APB2ENCLRR_FDCANEN		BIT(24)
+
+/* RCC_MC_APB3ENSETR register fields */
+#define RCC_MC_APB3ENSETR_LPTIM2EN		BIT(0)
+#define RCC_MC_APB3ENSETR_LPTIM3EN		BIT(1)
+#define RCC_MC_APB3ENSETR_LPTIM4EN		BIT(2)
+#define RCC_MC_APB3ENSETR_LPTIM5EN		BIT(3)
+#define RCC_MC_APB3ENSETR_SAI4EN		BIT(8)
+#define RCC_MC_APB3ENSETR_SYSCFGEN		BIT(11)
+#define RCC_MC_APB3ENSETR_VREFEN		BIT(13)
+#define RCC_MC_APB3ENSETR_TMPSENSEN		BIT(16)
+#define RCC_MC_APB3ENSETR_PMBCTRLEN		BIT(17)
+#define RCC_MC_APB3ENSETR_HDPEN			BIT(20)
+
+/* RCC_MC_APB3ENCLRR register fields */
+#define RCC_MC_APB3ENCLRR_LPTIM2EN		BIT(0)
+#define RCC_MC_APB3ENCLRR_LPTIM3EN		BIT(1)
+#define RCC_MC_APB3ENCLRR_LPTIM4EN		BIT(2)
+#define RCC_MC_APB3ENCLRR_LPTIM5EN		BIT(3)
+#define RCC_MC_APB3ENCLRR_SAI4EN		BIT(8)
+#define RCC_MC_APB3ENCLRR_SYSCFGEN		BIT(11)
+#define RCC_MC_APB3ENCLRR_VREFEN		BIT(13)
+#define RCC_MC_APB3ENCLRR_TMPSENSEN		BIT(16)
+#define RCC_MC_APB3ENCLRR_PMBCTRLEN		BIT(17)
+#define RCC_MC_APB3ENCLRR_HDPEN			BIT(20)
+
+/* RCC_MC_AHB2ENSETR register fields */
+#define RCC_MC_AHB2ENSETR_DMA1EN		BIT(0)
+#define RCC_MC_AHB2ENSETR_DMA2EN		BIT(1)
+#define RCC_MC_AHB2ENSETR_DMAMUXEN		BIT(2)
+#define RCC_MC_AHB2ENSETR_ADC12EN		BIT(5)
+#define RCC_MC_AHB2ENSETR_USBOEN		BIT(8)
+#define RCC_MC_AHB2ENSETR_SDMMC3EN		BIT(16)
+
+/* RCC_MC_AHB2ENCLRR register fields */
+#define RCC_MC_AHB2ENCLRR_DMA1EN		BIT(0)
+#define RCC_MC_AHB2ENCLRR_DMA2EN		BIT(1)
+#define RCC_MC_AHB2ENCLRR_DMAMUXEN		BIT(2)
+#define RCC_MC_AHB2ENCLRR_ADC12EN		BIT(5)
+#define RCC_MC_AHB2ENCLRR_USBOEN		BIT(8)
+#define RCC_MC_AHB2ENCLRR_SDMMC3EN		BIT(16)
+
+/* RCC_MC_AHB3ENSETR register fields */
+#define RCC_MC_AHB3ENSETR_DCMIEN		BIT(0)
+#define RCC_MC_AHB3ENSETR_CRYP2EN		BIT(4)
+#define RCC_MC_AHB3ENSETR_HASH2EN		BIT(5)
+#define RCC_MC_AHB3ENSETR_RNG2EN		BIT(6)
+#define RCC_MC_AHB3ENSETR_CRC2EN		BIT(7)
+#define RCC_MC_AHB3ENSETR_HSEMEN		BIT(11)
+#define RCC_MC_AHB3ENSETR_IPCCEN		BIT(12)
+
+/* RCC_MC_AHB3ENCLRR register fields */
+#define RCC_MC_AHB3ENCLRR_DCMIEN		BIT(0)
+#define RCC_MC_AHB3ENCLRR_CRYP2EN		BIT(4)
+#define RCC_MC_AHB3ENCLRR_HASH2EN		BIT(5)
+#define RCC_MC_AHB3ENCLRR_RNG2EN		BIT(6)
+#define RCC_MC_AHB3ENCLRR_CRC2EN		BIT(7)
+#define RCC_MC_AHB3ENCLRR_HSEMEN		BIT(11)
+#define RCC_MC_AHB3ENCLRR_IPCCEN		BIT(12)
+
+/* RCC_MC_AHB4ENSETR register fields */
+#define RCC_MC_AHB4ENSETR_GPIOAEN		BIT(0)
+#define RCC_MC_AHB4ENSETR_GPIOBEN		BIT(1)
+#define RCC_MC_AHB4ENSETR_GPIOCEN		BIT(2)
+#define RCC_MC_AHB4ENSETR_GPIODEN		BIT(3)
+#define RCC_MC_AHB4ENSETR_GPIOEEN		BIT(4)
+#define RCC_MC_AHB4ENSETR_GPIOFEN		BIT(5)
+#define RCC_MC_AHB4ENSETR_GPIOGEN		BIT(6)
+#define RCC_MC_AHB4ENSETR_GPIOHEN		BIT(7)
+#define RCC_MC_AHB4ENSETR_GPIOIEN		BIT(8)
+#define RCC_MC_AHB4ENSETR_GPIOJEN		BIT(9)
+#define RCC_MC_AHB4ENSETR_GPIOKEN		BIT(10)
+
+/* RCC_MC_AHB4ENCLRR register fields */
+#define RCC_MC_AHB4ENCLRR_GPIOAEN		BIT(0)
+#define RCC_MC_AHB4ENCLRR_GPIOBEN		BIT(1)
+#define RCC_MC_AHB4ENCLRR_GPIOCEN		BIT(2)
+#define RCC_MC_AHB4ENCLRR_GPIODEN		BIT(3)
+#define RCC_MC_AHB4ENCLRR_GPIOEEN		BIT(4)
+#define RCC_MC_AHB4ENCLRR_GPIOFEN		BIT(5)
+#define RCC_MC_AHB4ENCLRR_GPIOGEN		BIT(6)
+#define RCC_MC_AHB4ENCLRR_GPIOHEN		BIT(7)
+#define RCC_MC_AHB4ENCLRR_GPIOIEN		BIT(8)
+#define RCC_MC_AHB4ENCLRR_GPIOJEN		BIT(9)
+#define RCC_MC_AHB4ENCLRR_GPIOKEN		BIT(10)
+
+/* RCC_MC_AXIMENSETR register fields */
+#define RCC_MC_AXIMENSETR_SYSRAMEN		BIT(0)
+
+/* RCC_MC_AXIMENCLRR register fields */
+#define RCC_MC_AXIMENCLRR_SYSRAMEN		BIT(0)
+
+/* RCC_MC_MLAHBENSETR register fields */
+#define RCC_MC_MLAHBENSETR_RETRAMEN		BIT(4)
+
+/* RCC_MC_MLAHBENCLRR register fields */
+#define RCC_MC_MLAHBENCLRR_RETRAMEN		BIT(4)
+
+/* RCC_MP_APB1LPENSETR register fields */
+#define RCC_MP_APB1LPENSETR_TIM2LPEN		BIT(0)
+#define RCC_MP_APB1LPENSETR_TIM3LPEN		BIT(1)
+#define RCC_MP_APB1LPENSETR_TIM4LPEN		BIT(2)
+#define RCC_MP_APB1LPENSETR_TIM5LPEN		BIT(3)
+#define RCC_MP_APB1LPENSETR_TIM6LPEN		BIT(4)
+#define RCC_MP_APB1LPENSETR_TIM7LPEN		BIT(5)
+#define RCC_MP_APB1LPENSETR_TIM12LPEN		BIT(6)
+#define RCC_MP_APB1LPENSETR_TIM13LPEN		BIT(7)
+#define RCC_MP_APB1LPENSETR_TIM14LPEN		BIT(8)
+#define RCC_MP_APB1LPENSETR_LPTIM1LPEN		BIT(9)
+#define RCC_MP_APB1LPENSETR_SPI2LPEN		BIT(11)
+#define RCC_MP_APB1LPENSETR_SPI3LPEN		BIT(12)
+#define RCC_MP_APB1LPENSETR_USART2LPEN		BIT(14)
+#define RCC_MP_APB1LPENSETR_USART3LPEN		BIT(15)
+#define RCC_MP_APB1LPENSETR_UART4LPEN		BIT(16)
+#define RCC_MP_APB1LPENSETR_UART5LPEN		BIT(17)
+#define RCC_MP_APB1LPENSETR_UART7LPEN		BIT(18)
+#define RCC_MP_APB1LPENSETR_UART8LPEN		BIT(19)
+#define RCC_MP_APB1LPENSETR_I2C1LPEN		BIT(21)
+#define RCC_MP_APB1LPENSETR_I2C2LPEN		BIT(22)
+#define RCC_MP_APB1LPENSETR_I2C3LPEN		BIT(23)
+#define RCC_MP_APB1LPENSETR_I2C5LPEN		BIT(24)
+#define RCC_MP_APB1LPENSETR_SPDIFLPEN		BIT(26)
+#define RCC_MP_APB1LPENSETR_CECLPEN		BIT(27)
+#define RCC_MP_APB1LPENSETR_DAC12LPEN		BIT(29)
+#define RCC_MP_APB1LPENSETR_MDIOSLPEN		BIT(31)
+
+/* RCC_MP_APB1LPENCLRR register fields */
+#define RCC_MP_APB1LPENCLRR_TIM2LPEN		BIT(0)
+#define RCC_MP_APB1LPENCLRR_TIM3LPEN		BIT(1)
+#define RCC_MP_APB1LPENCLRR_TIM4LPEN		BIT(2)
+#define RCC_MP_APB1LPENCLRR_TIM5LPEN		BIT(3)
+#define RCC_MP_APB1LPENCLRR_TIM6LPEN		BIT(4)
+#define RCC_MP_APB1LPENCLRR_TIM7LPEN		BIT(5)
+#define RCC_MP_APB1LPENCLRR_TIM12LPEN		BIT(6)
+#define RCC_MP_APB1LPENCLRR_TIM13LPEN		BIT(7)
+#define RCC_MP_APB1LPENCLRR_TIM14LPEN		BIT(8)
+#define RCC_MP_APB1LPENCLRR_LPTIM1LPEN		BIT(9)
+#define RCC_MP_APB1LPENCLRR_SPI2LPEN		BIT(11)
+#define RCC_MP_APB1LPENCLRR_SPI3LPEN		BIT(12)
+#define RCC_MP_APB1LPENCLRR_USART2LPEN		BIT(14)
+#define RCC_MP_APB1LPENCLRR_USART3LPEN		BIT(15)
+#define RCC_MP_APB1LPENCLRR_UART4LPEN		BIT(16)
+#define RCC_MP_APB1LPENCLRR_UART5LPEN		BIT(17)
+#define RCC_MP_APB1LPENCLRR_UART7LPEN		BIT(18)
+#define RCC_MP_APB1LPENCLRR_UART8LPEN		BIT(19)
+#define RCC_MP_APB1LPENCLRR_I2C1LPEN		BIT(21)
+#define RCC_MP_APB1LPENCLRR_I2C2LPEN		BIT(22)
+#define RCC_MP_APB1LPENCLRR_I2C3LPEN		BIT(23)
+#define RCC_MP_APB1LPENCLRR_I2C5LPEN		BIT(24)
+#define RCC_MP_APB1LPENCLRR_SPDIFLPEN		BIT(26)
+#define RCC_MP_APB1LPENCLRR_CECLPEN		BIT(27)
+#define RCC_MP_APB1LPENCLRR_DAC12LPEN		BIT(29)
+#define RCC_MP_APB1LPENCLRR_MDIOSLPEN		BIT(31)
+
+/* RCC_MP_APB2LPENSETR register fields */
+#define RCC_MP_APB2LPENSETR_TIM1LPEN		BIT(0)
+#define RCC_MP_APB2LPENSETR_TIM8LPEN		BIT(1)
+#define RCC_MP_APB2LPENSETR_TIM15LPEN		BIT(2)
+#define RCC_MP_APB2LPENSETR_TIM16LPEN		BIT(3)
+#define RCC_MP_APB2LPENSETR_TIM17LPEN		BIT(4)
+#define RCC_MP_APB2LPENSETR_SPI1LPEN		BIT(8)
+#define RCC_MP_APB2LPENSETR_SPI4LPEN		BIT(9)
+#define RCC_MP_APB2LPENSETR_SPI5LPEN		BIT(10)
+#define RCC_MP_APB2LPENSETR_USART6LPEN		BIT(13)
+#define RCC_MP_APB2LPENSETR_SAI1LPEN		BIT(16)
+#define RCC_MP_APB2LPENSETR_SAI2LPEN		BIT(17)
+#define RCC_MP_APB2LPENSETR_SAI3LPEN		BIT(18)
+#define RCC_MP_APB2LPENSETR_DFSDMLPEN		BIT(20)
+#define RCC_MP_APB2LPENSETR_ADFSDMLPEN		BIT(21)
+#define RCC_MP_APB2LPENSETR_FDCANLPEN		BIT(24)
+
+/* RCC_MP_APB2LPENCLRR register fields */
+#define RCC_MP_APB2LPENCLRR_TIM1LPEN		BIT(0)
+#define RCC_MP_APB2LPENCLRR_TIM8LPEN		BIT(1)
+#define RCC_MP_APB2LPENCLRR_TIM15LPEN		BIT(2)
+#define RCC_MP_APB2LPENCLRR_TIM16LPEN		BIT(3)
+#define RCC_MP_APB2LPENCLRR_TIM17LPEN		BIT(4)
+#define RCC_MP_APB2LPENCLRR_SPI1LPEN		BIT(8)
+#define RCC_MP_APB2LPENCLRR_SPI4LPEN		BIT(9)
+#define RCC_MP_APB2LPENCLRR_SPI5LPEN		BIT(10)
+#define RCC_MP_APB2LPENCLRR_USART6LPEN		BIT(13)
+#define RCC_MP_APB2LPENCLRR_SAI1LPEN		BIT(16)
+#define RCC_MP_APB2LPENCLRR_SAI2LPEN		BIT(17)
+#define RCC_MP_APB2LPENCLRR_SAI3LPEN		BIT(18)
+#define RCC_MP_APB2LPENCLRR_DFSDMLPEN		BIT(20)
+#define RCC_MP_APB2LPENCLRR_ADFSDMLPEN		BIT(21)
+#define RCC_MP_APB2LPENCLRR_FDCANLPEN		BIT(24)
+
+/* RCC_MP_APB3LPENSETR register fields */
+#define RCC_MP_APB3LPENSETR_LPTIM2LPEN		BIT(0)
+#define RCC_MP_APB3LPENSETR_LPTIM3LPEN		BIT(1)
+#define RCC_MP_APB3LPENSETR_LPTIM4LPEN		BIT(2)
+#define RCC_MP_APB3LPENSETR_LPTIM5LPEN		BIT(3)
+#define RCC_MP_APB3LPENSETR_SAI4LPEN		BIT(8)
+#define RCC_MP_APB3LPENSETR_SYSCFGLPEN		BIT(11)
+#define RCC_MP_APB3LPENSETR_VREFLPEN		BIT(13)
+#define RCC_MP_APB3LPENSETR_TMPSENSLPEN		BIT(16)
+#define RCC_MP_APB3LPENSETR_PMBCTRLLPEN		BIT(17)
+
+/* RCC_MP_APB3LPENCLRR register fields */
+#define RCC_MP_APB3LPENCLRR_LPTIM2LPEN		BIT(0)
+#define RCC_MP_APB3LPENCLRR_LPTIM3LPEN		BIT(1)
+#define RCC_MP_APB3LPENCLRR_LPTIM4LPEN		BIT(2)
+#define RCC_MP_APB3LPENCLRR_LPTIM5LPEN		BIT(3)
+#define RCC_MP_APB3LPENCLRR_SAI4LPEN		BIT(8)
+#define RCC_MP_APB3LPENCLRR_SYSCFGLPEN		BIT(11)
+#define RCC_MP_APB3LPENCLRR_VREFLPEN		BIT(13)
+#define RCC_MP_APB3LPENCLRR_TMPSENSLPEN		BIT(16)
+#define RCC_MP_APB3LPENCLRR_PMBCTRLLPEN		BIT(17)
+
+/* RCC_MP_AHB2LPENSETR register fields */
+#define RCC_MP_AHB2LPENSETR_DMA1LPEN		BIT(0)
+#define RCC_MP_AHB2LPENSETR_DMA2LPEN		BIT(1)
+#define RCC_MP_AHB2LPENSETR_DMAMUXLPEN		BIT(2)
+#define RCC_MP_AHB2LPENSETR_ADC12LPEN		BIT(5)
+#define RCC_MP_AHB2LPENSETR_USBOLPEN		BIT(8)
+#define RCC_MP_AHB2LPENSETR_SDMMC3LPEN		BIT(16)
+
+/* RCC_MP_AHB2LPENCLRR register fields */
+#define RCC_MP_AHB2LPENCLRR_DMA1LPEN		BIT(0)
+#define RCC_MP_AHB2LPENCLRR_DMA2LPEN		BIT(1)
+#define RCC_MP_AHB2LPENCLRR_DMAMUXLPEN		BIT(2)
+#define RCC_MP_AHB2LPENCLRR_ADC12LPEN		BIT(5)
+#define RCC_MP_AHB2LPENCLRR_USBOLPEN		BIT(8)
+#define RCC_MP_AHB2LPENCLRR_SDMMC3LPEN		BIT(16)
+
+/* RCC_MP_AHB3LPENSETR register fields */
+#define RCC_MP_AHB3LPENSETR_DCMILPEN		BIT(0)
+#define RCC_MP_AHB3LPENSETR_CRYP2LPEN		BIT(4)
+#define RCC_MP_AHB3LPENSETR_HASH2LPEN		BIT(5)
+#define RCC_MP_AHB3LPENSETR_RNG2LPEN		BIT(6)
+#define RCC_MP_AHB3LPENSETR_CRC2LPEN		BIT(7)
+#define RCC_MP_AHB3LPENSETR_HSEMLPEN		BIT(11)
+#define RCC_MP_AHB3LPENSETR_IPCCLPEN		BIT(12)
+
+/* RCC_MP_AHB3LPENCLRR register fields */
+#define RCC_MP_AHB3LPENCLRR_DCMILPEN		BIT(0)
+#define RCC_MP_AHB3LPENCLRR_CRYP2LPEN		BIT(4)
+#define RCC_MP_AHB3LPENCLRR_HASH2LPEN		BIT(5)
+#define RCC_MP_AHB3LPENCLRR_RNG2LPEN		BIT(6)
+#define RCC_MP_AHB3LPENCLRR_CRC2LPEN		BIT(7)
+#define RCC_MP_AHB3LPENCLRR_HSEMLPEN		BIT(11)
+#define RCC_MP_AHB3LPENCLRR_IPCCLPEN		BIT(12)
+
+/* RCC_MP_AHB4LPENSETR register fields */
+#define RCC_MP_AHB4LPENSETR_GPIOALPEN		BIT(0)
+#define RCC_MP_AHB4LPENSETR_GPIOBLPEN		BIT(1)
+#define RCC_MP_AHB4LPENSETR_GPIOCLPEN		BIT(2)
+#define RCC_MP_AHB4LPENSETR_GPIODLPEN		BIT(3)
+#define RCC_MP_AHB4LPENSETR_GPIOELPEN		BIT(4)
+#define RCC_MP_AHB4LPENSETR_GPIOFLPEN		BIT(5)
+#define RCC_MP_AHB4LPENSETR_GPIOGLPEN		BIT(6)
+#define RCC_MP_AHB4LPENSETR_GPIOHLPEN		BIT(7)
+#define RCC_MP_AHB4LPENSETR_GPIOILPEN		BIT(8)
+#define RCC_MP_AHB4LPENSETR_GPIOJLPEN		BIT(9)
+#define RCC_MP_AHB4LPENSETR_GPIOKLPEN		BIT(10)
+
+/* RCC_MP_AHB4LPENCLRR register fields */
+#define RCC_MP_AHB4LPENCLRR_GPIOALPEN		BIT(0)
+#define RCC_MP_AHB4LPENCLRR_GPIOBLPEN		BIT(1)
+#define RCC_MP_AHB4LPENCLRR_GPIOCLPEN		BIT(2)
+#define RCC_MP_AHB4LPENCLRR_GPIODLPEN		BIT(3)
+#define RCC_MP_AHB4LPENCLRR_GPIOELPEN		BIT(4)
+#define RCC_MP_AHB4LPENCLRR_GPIOFLPEN		BIT(5)
+#define RCC_MP_AHB4LPENCLRR_GPIOGLPEN		BIT(6)
+#define RCC_MP_AHB4LPENCLRR_GPIOHLPEN		BIT(7)
+#define RCC_MP_AHB4LPENCLRR_GPIOILPEN		BIT(8)
+#define RCC_MP_AHB4LPENCLRR_GPIOJLPEN		BIT(9)
+#define RCC_MP_AHB4LPENCLRR_GPIOKLPEN		BIT(10)
+
+/* RCC_MP_AXIMLPENSETR register fields */
+#define RCC_MP_AXIMLPENSETR_SYSRAMLPEN		BIT(0)
+
+/* RCC_MP_AXIMLPENCLRR register fields */
+#define RCC_MP_AXIMLPENCLRR_SYSRAMLPEN		BIT(0)
+
+/* RCC_MP_MLAHBLPENSETR register fields */
+#define RCC_MP_MLAHBLPENSETR_SRAM1LPEN		BIT(0)
+#define RCC_MP_MLAHBLPENSETR_SRAM2LPEN		BIT(1)
+#define RCC_MP_MLAHBLPENSETR_SRAM34LPEN		BIT(2)
+#define RCC_MP_MLAHBLPENSETR_RETRAMLPEN		BIT(4)
+
+/* RCC_MP_MLAHBLPENCLRR register fields */
+#define RCC_MP_MLAHBLPENCLRR_SRAM1LPEN		BIT(0)
+#define RCC_MP_MLAHBLPENCLRR_SRAM2LPEN		BIT(1)
+#define RCC_MP_MLAHBLPENCLRR_SRAM34LPEN		BIT(2)
+#define RCC_MP_MLAHBLPENCLRR_RETRAMLPEN		BIT(4)
+
+/* RCC_MC_APB1LPENSETR register fields */
+#define RCC_MC_APB1LPENSETR_TIM2LPEN		BIT(0)
+#define RCC_MC_APB1LPENSETR_TIM3LPEN		BIT(1)
+#define RCC_MC_APB1LPENSETR_TIM4LPEN		BIT(2)
+#define RCC_MC_APB1LPENSETR_TIM5LPEN		BIT(3)
+#define RCC_MC_APB1LPENSETR_TIM6LPEN		BIT(4)
+#define RCC_MC_APB1LPENSETR_TIM7LPEN		BIT(5)
+#define RCC_MC_APB1LPENSETR_TIM12LPEN		BIT(6)
+#define RCC_MC_APB1LPENSETR_TIM13LPEN		BIT(7)
+#define RCC_MC_APB1LPENSETR_TIM14LPEN		BIT(8)
+#define RCC_MC_APB1LPENSETR_LPTIM1LPEN		BIT(9)
+#define RCC_MC_APB1LPENSETR_SPI2LPEN		BIT(11)
+#define RCC_MC_APB1LPENSETR_SPI3LPEN		BIT(12)
+#define RCC_MC_APB1LPENSETR_USART2LPEN		BIT(14)
+#define RCC_MC_APB1LPENSETR_USART3LPEN		BIT(15)
+#define RCC_MC_APB1LPENSETR_UART4LPEN		BIT(16)
+#define RCC_MC_APB1LPENSETR_UART5LPEN		BIT(17)
+#define RCC_MC_APB1LPENSETR_UART7LPEN		BIT(18)
+#define RCC_MC_APB1LPENSETR_UART8LPEN		BIT(19)
+#define RCC_MC_APB1LPENSETR_I2C1LPEN		BIT(21)
+#define RCC_MC_APB1LPENSETR_I2C2LPEN		BIT(22)
+#define RCC_MC_APB1LPENSETR_I2C3LPEN		BIT(23)
+#define RCC_MC_APB1LPENSETR_I2C5LPEN		BIT(24)
+#define RCC_MC_APB1LPENSETR_SPDIFLPEN		BIT(26)
+#define RCC_MC_APB1LPENSETR_CECLPEN		BIT(27)
+#define RCC_MC_APB1LPENSETR_WWDG1LPEN		BIT(28)
+#define RCC_MC_APB1LPENSETR_DAC12LPEN		BIT(29)
+#define RCC_MC_APB1LPENSETR_MDIOSLPEN		BIT(31)
+
+/* RCC_MC_APB1LPENCLRR register fields */
+#define RCC_MC_APB1LPENCLRR_TIM2LPEN		BIT(0)
+#define RCC_MC_APB1LPENCLRR_TIM3LPEN		BIT(1)
+#define RCC_MC_APB1LPENCLRR_TIM4LPEN		BIT(2)
+#define RCC_MC_APB1LPENCLRR_TIM5LPEN		BIT(3)
+#define RCC_MC_APB1LPENCLRR_TIM6LPEN		BIT(4)
+#define RCC_MC_APB1LPENCLRR_TIM7LPEN		BIT(5)
+#define RCC_MC_APB1LPENCLRR_TIM12LPEN		BIT(6)
+#define RCC_MC_APB1LPENCLRR_TIM13LPEN		BIT(7)
+#define RCC_MC_APB1LPENCLRR_TIM14LPEN		BIT(8)
+#define RCC_MC_APB1LPENCLRR_LPTIM1LPEN		BIT(9)
+#define RCC_MC_APB1LPENCLRR_SPI2LPEN		BIT(11)
+#define RCC_MC_APB1LPENCLRR_SPI3LPEN		BIT(12)
+#define RCC_MC_APB1LPENCLRR_USART2LPEN		BIT(14)
+#define RCC_MC_APB1LPENCLRR_USART3LPEN		BIT(15)
+#define RCC_MC_APB1LPENCLRR_UART4LPEN		BIT(16)
+#define RCC_MC_APB1LPENCLRR_UART5LPEN		BIT(17)
+#define RCC_MC_APB1LPENCLRR_UART7LPEN		BIT(18)
+#define RCC_MC_APB1LPENCLRR_UART8LPEN		BIT(19)
+#define RCC_MC_APB1LPENCLRR_I2C1LPEN		BIT(21)
+#define RCC_MC_APB1LPENCLRR_I2C2LPEN		BIT(22)
+#define RCC_MC_APB1LPENCLRR_I2C3LPEN		BIT(23)
+#define RCC_MC_APB1LPENCLRR_I2C5LPEN		BIT(24)
+#define RCC_MC_APB1LPENCLRR_SPDIFLPEN		BIT(26)
+#define RCC_MC_APB1LPENCLRR_CECLPEN		BIT(27)
+#define RCC_MC_APB1LPENCLRR_WWDG1LPEN		BIT(28)
+#define RCC_MC_APB1LPENCLRR_DAC12LPEN		BIT(29)
+#define RCC_MC_APB1LPENCLRR_MDIOSLPEN		BIT(31)
+
+/* RCC_MC_APB2LPENSETR register fields */
+#define RCC_MC_APB2LPENSETR_TIM1LPEN		BIT(0)
+#define RCC_MC_APB2LPENSETR_TIM8LPEN		BIT(1)
+#define RCC_MC_APB2LPENSETR_TIM15LPEN		BIT(2)
+#define RCC_MC_APB2LPENSETR_TIM16LPEN		BIT(3)
+#define RCC_MC_APB2LPENSETR_TIM17LPEN		BIT(4)
+#define RCC_MC_APB2LPENSETR_SPI1LPEN		BIT(8)
+#define RCC_MC_APB2LPENSETR_SPI4LPEN		BIT(9)
+#define RCC_MC_APB2LPENSETR_SPI5LPEN		BIT(10)
+#define RCC_MC_APB2LPENSETR_USART6LPEN		BIT(13)
+#define RCC_MC_APB2LPENSETR_SAI1LPEN		BIT(16)
+#define RCC_MC_APB2LPENSETR_SAI2LPEN		BIT(17)
+#define RCC_MC_APB2LPENSETR_SAI3LPEN		BIT(18)
+#define RCC_MC_APB2LPENSETR_DFSDMLPEN		BIT(20)
+#define RCC_MC_APB2LPENSETR_ADFSDMLPEN		BIT(21)
+#define RCC_MC_APB2LPENSETR_FDCANLPEN		BIT(24)
+
+/* RCC_MC_APB2LPENCLRR register fields */
+#define RCC_MC_APB2LPENCLRR_TIM1LPEN		BIT(0)
+#define RCC_MC_APB2LPENCLRR_TIM8LPEN		BIT(1)
+#define RCC_MC_APB2LPENCLRR_TIM15LPEN		BIT(2)
+#define RCC_MC_APB2LPENCLRR_TIM16LPEN		BIT(3)
+#define RCC_MC_APB2LPENCLRR_TIM17LPEN		BIT(4)
+#define RCC_MC_APB2LPENCLRR_SPI1LPEN		BIT(8)
+#define RCC_MC_APB2LPENCLRR_SPI4LPEN		BIT(9)
+#define RCC_MC_APB2LPENCLRR_SPI5LPEN		BIT(10)
+#define RCC_MC_APB2LPENCLRR_USART6LPEN		BIT(13)
+#define RCC_MC_APB2LPENCLRR_SAI1LPEN		BIT(16)
+#define RCC_MC_APB2LPENCLRR_SAI2LPEN		BIT(17)
+#define RCC_MC_APB2LPENCLRR_SAI3LPEN		BIT(18)
+#define RCC_MC_APB2LPENCLRR_DFSDMLPEN		BIT(20)
+#define RCC_MC_APB2LPENCLRR_ADFSDMLPEN		BIT(21)
+#define RCC_MC_APB2LPENCLRR_FDCANLPEN		BIT(24)
+
+/* RCC_MC_APB3LPENSETR register fields */
+#define RCC_MC_APB3LPENSETR_LPTIM2LPEN		BIT(0)
+#define RCC_MC_APB3LPENSETR_LPTIM3LPEN		BIT(1)
+#define RCC_MC_APB3LPENSETR_LPTIM4LPEN		BIT(2)
+#define RCC_MC_APB3LPENSETR_LPTIM5LPEN		BIT(3)
+#define RCC_MC_APB3LPENSETR_SAI4LPEN		BIT(8)
+#define RCC_MC_APB3LPENSETR_SYSCFGLPEN		BIT(11)
+#define RCC_MC_APB3LPENSETR_VREFLPEN		BIT(13)
+#define RCC_MC_APB3LPENSETR_TMPSENSLPEN		BIT(16)
+#define RCC_MC_APB3LPENSETR_PMBCTRLLPEN		BIT(17)
+
+/* RCC_MC_APB3LPENCLRR register fields */
+#define RCC_MC_APB3LPENCLRR_LPTIM2LPEN		BIT(0)
+#define RCC_MC_APB3LPENCLRR_LPTIM3LPEN		BIT(1)
+#define RCC_MC_APB3LPENCLRR_LPTIM4LPEN		BIT(2)
+#define RCC_MC_APB3LPENCLRR_LPTIM5LPEN		BIT(3)
+#define RCC_MC_APB3LPENCLRR_SAI4LPEN		BIT(8)
+#define RCC_MC_APB3LPENCLRR_SYSCFGLPEN		BIT(11)
+#define RCC_MC_APB3LPENCLRR_VREFLPEN		BIT(13)
+#define RCC_MC_APB3LPENCLRR_TMPSENSLPEN		BIT(16)
+#define RCC_MC_APB3LPENCLRR_PMBCTRLLPEN		BIT(17)
+
+/* RCC_MC_AHB2LPENSETR register fields */
+#define RCC_MC_AHB2LPENSETR_DMA1LPEN		BIT(0)
+#define RCC_MC_AHB2LPENSETR_DMA2LPEN		BIT(1)
+#define RCC_MC_AHB2LPENSETR_DMAMUXLPEN		BIT(2)
+#define RCC_MC_AHB2LPENSETR_ADC12LPEN		BIT(5)
+#define RCC_MC_AHB2LPENSETR_USBOLPEN		BIT(8)
+#define RCC_MC_AHB2LPENSETR_SDMMC3LPEN		BIT(16)
+
+/* RCC_MC_AHB2LPENCLRR register fields */
+#define RCC_MC_AHB2LPENCLRR_DMA1LPEN		BIT(0)
+#define RCC_MC_AHB2LPENCLRR_DMA2LPEN		BIT(1)
+#define RCC_MC_AHB2LPENCLRR_DMAMUXLPEN		BIT(2)
+#define RCC_MC_AHB2LPENCLRR_ADC12LPEN		BIT(5)
+#define RCC_MC_AHB2LPENCLRR_USBOLPEN		BIT(8)
+#define RCC_MC_AHB2LPENCLRR_SDMMC3LPEN		BIT(16)
+
+/* RCC_MC_AHB3LPENSETR register fields */
+#define RCC_MC_AHB3LPENSETR_DCMILPEN		BIT(0)
+#define RCC_MC_AHB3LPENSETR_CRYP2LPEN		BIT(4)
+#define RCC_MC_AHB3LPENSETR_HASH2LPEN		BIT(5)
+#define RCC_MC_AHB3LPENSETR_RNG2LPEN		BIT(6)
+#define RCC_MC_AHB3LPENSETR_CRC2LPEN		BIT(7)
+#define RCC_MC_AHB3LPENSETR_HSEMLPEN		BIT(11)
+#define RCC_MC_AHB3LPENSETR_IPCCLPEN		BIT(12)
+
+/* RCC_MC_AHB3LPENCLRR register fields */
+#define RCC_MC_AHB3LPENCLRR_DCMILPEN		BIT(0)
+#define RCC_MC_AHB3LPENCLRR_CRYP2LPEN		BIT(4)
+#define RCC_MC_AHB3LPENCLRR_HASH2LPEN		BIT(5)
+#define RCC_MC_AHB3LPENCLRR_RNG2LPEN		BIT(6)
+#define RCC_MC_AHB3LPENCLRR_CRC2LPEN		BIT(7)
+#define RCC_MC_AHB3LPENCLRR_HSEMLPEN		BIT(11)
+#define RCC_MC_AHB3LPENCLRR_IPCCLPEN		BIT(12)
+
+/* RCC_MC_AHB4LPENSETR register fields */
+#define RCC_MC_AHB4LPENSETR_GPIOALPEN		BIT(0)
+#define RCC_MC_AHB4LPENSETR_GPIOBLPEN		BIT(1)
+#define RCC_MC_AHB4LPENSETR_GPIOCLPEN		BIT(2)
+#define RCC_MC_AHB4LPENSETR_GPIODLPEN		BIT(3)
+#define RCC_MC_AHB4LPENSETR_GPIOELPEN		BIT(4)
+#define RCC_MC_AHB4LPENSETR_GPIOFLPEN		BIT(5)
+#define RCC_MC_AHB4LPENSETR_GPIOGLPEN		BIT(6)
+#define RCC_MC_AHB4LPENSETR_GPIOHLPEN		BIT(7)
+#define RCC_MC_AHB4LPENSETR_GPIOILPEN		BIT(8)
+#define RCC_MC_AHB4LPENSETR_GPIOJLPEN		BIT(9)
+#define RCC_MC_AHB4LPENSETR_GPIOKLPEN		BIT(10)
+
+/* RCC_MC_AHB4LPENCLRR register fields */
+#define RCC_MC_AHB4LPENCLRR_GPIOALPEN		BIT(0)
+#define RCC_MC_AHB4LPENCLRR_GPIOBLPEN		BIT(1)
+#define RCC_MC_AHB4LPENCLRR_GPIOCLPEN		BIT(2)
+#define RCC_MC_AHB4LPENCLRR_GPIODLPEN		BIT(3)
+#define RCC_MC_AHB4LPENCLRR_GPIOELPEN		BIT(4)
+#define RCC_MC_AHB4LPENCLRR_GPIOFLPEN		BIT(5)
+#define RCC_MC_AHB4LPENCLRR_GPIOGLPEN		BIT(6)
+#define RCC_MC_AHB4LPENCLRR_GPIOHLPEN		BIT(7)
+#define RCC_MC_AHB4LPENCLRR_GPIOILPEN		BIT(8)
+#define RCC_MC_AHB4LPENCLRR_GPIOJLPEN		BIT(9)
+#define RCC_MC_AHB4LPENCLRR_GPIOKLPEN		BIT(10)
+
+/* RCC_MC_AXIMLPENSETR register fields */
+#define RCC_MC_AXIMLPENSETR_SYSRAMLPEN		BIT(0)
+
+/* RCC_MC_AXIMLPENCLRR register fields */
+#define RCC_MC_AXIMLPENCLRR_SYSRAMLPEN		BIT(0)
+
+/* RCC_MC_MLAHBLPENSETR register fields */
+#define RCC_MC_MLAHBLPENSETR_SRAM1LPEN		BIT(0)
+#define RCC_MC_MLAHBLPENSETR_SRAM2LPEN		BIT(1)
+#define RCC_MC_MLAHBLPENSETR_SRAM34LPEN		BIT(2)
+#define RCC_MC_MLAHBLPENSETR_RETRAMLPEN		BIT(4)
+
+/* RCC_MC_MLAHBLPENCLRR register fields */
+#define RCC_MC_MLAHBLPENCLRR_SRAM1LPEN		BIT(0)
+#define RCC_MC_MLAHBLPENCLRR_SRAM2LPEN		BIT(1)
+#define RCC_MC_MLAHBLPENCLRR_SRAM34LPEN		BIT(2)
+#define RCC_MC_MLAHBLPENCLRR_RETRAMLPEN		BIT(4)
+
+/* RCC_MC_RSTSCLRR register fields */
+#define RCC_MC_RSTSCLRR_PORRSTF			BIT(0)
+#define RCC_MC_RSTSCLRR_BORRSTF			BIT(1)
+#define RCC_MC_RSTSCLRR_PADRSTF			BIT(2)
+#define RCC_MC_RSTSCLRR_HCSSRSTF		BIT(3)
+#define RCC_MC_RSTSCLRR_VCORERSTF		BIT(4)
+#define RCC_MC_RSTSCLRR_MCURSTF			BIT(5)
+#define RCC_MC_RSTSCLRR_MPSYSRSTF		BIT(6)
+#define RCC_MC_RSTSCLRR_MCSYSRSTF		BIT(7)
+#define RCC_MC_RSTSCLRR_IWDG1RSTF		BIT(8)
+#define RCC_MC_RSTSCLRR_IWDG2RSTF		BIT(9)
+#define RCC_MC_RSTSCLRR_WWDG1RSTF		BIT(10)
+
+/* RCC_MC_CIER register fields */
+#define RCC_MC_CIER_LSIRDYIE			BIT(0)
+#define RCC_MC_CIER_LSERDYIE			BIT(1)
+#define RCC_MC_CIER_HSIRDYIE			BIT(2)
+#define RCC_MC_CIER_HSERDYIE			BIT(3)
+#define RCC_MC_CIER_CSIRDYIE			BIT(4)
+#define RCC_MC_CIER_PLL1DYIE			BIT(8)
+#define RCC_MC_CIER_PLL2DYIE			BIT(9)
+#define RCC_MC_CIER_PLL3DYIE			BIT(10)
+#define RCC_MC_CIER_PLL4DYIE			BIT(11)
+#define RCC_MC_CIER_LSECSSIE			BIT(16)
+#define RCC_MC_CIER_WKUPIE			BIT(20)
+
+/* RCC_MC_CIFR register fields */
+#define RCC_MC_CIFR_LSIRDYF			BIT(0)
+#define RCC_MC_CIFR_LSERDYF			BIT(1)
+#define RCC_MC_CIFR_HSIRDYF			BIT(2)
+#define RCC_MC_CIFR_HSERDYF			BIT(3)
+#define RCC_MC_CIFR_CSIRDYF			BIT(4)
+#define RCC_MC_CIFR_PLL1DYF			BIT(8)
+#define RCC_MC_CIFR_PLL2DYF			BIT(9)
+#define RCC_MC_CIFR_PLL3DYF			BIT(10)
+#define RCC_MC_CIFR_PLL4DYF			BIT(11)
+#define RCC_MC_CIFR_LSECSSF			BIT(16)
+#define RCC_MC_CIFR_WKUPF			BIT(20)
+
+/* RCC_VERR register fields */
+#define RCC_VERR_MINREV_MASK			GENMASK(3, 0)
+#define RCC_VERR_MINREV_SHIFT			0
+#define RCC_VERR_MAJREV_MASK			GENMASK(7, 4)
+#define RCC_VERR_MAJREV_SHIFT			4
+
+/* Used for RCC_OCENSETR and RCC_OCENCLRR registers */
+#define RCC_OCENR_HSION				BIT(0)
+#define RCC_OCENR_HSIKERON			BIT(1)
+#define RCC_OCENR_CSION				BIT(4)
+#define RCC_OCENR_CSIKERON			BIT(5)
+#define RCC_OCENR_DIGBYP			BIT(7)
+#define RCC_OCENR_HSEON				BIT(8)
+#define RCC_OCENR_HSEKERON			BIT(9)
+#define RCC_OCENR_HSEBYP			BIT(10)
+#define RCC_OCENR_HSECSSON			BIT(11)
+
+/* Offset between RCC_MP_xxxENSETR and RCC_MP_xxxENCLRR registers */
+#define RCC_MP_ENCLRR_OFFSET			U(4)
+
+/* Offset between RCC_xxxRSTSETR and RCC_xxxRSTCLRR registers */
+#define RCC_RSTCLRR_OFFSET			U(4)
+
+/* Used for most of DIVR register: max div for RTC */
+#define RCC_DIVR_DIV_MASK			GENMASK(5, 0)
+#define RCC_DIVR_DIVRDY				BIT(31)
+
+/* Masks for specific DIVR registers */
+#define RCC_APBXDIV_MASK			GENMASK(2, 0)
+#define RCC_MPUDIV_MASK				GENMASK(2, 0)
+#define RCC_AXIDIV_MASK				GENMASK(2, 0)
+#define RCC_MCUDIV_MASK				GENMASK(3, 0)
+
+/* Used for most of RCC_<x>SELR registers */
+#define RCC_SELR_SRC_MASK			GENMASK(2, 0)
+#define RCC_SELR_REFCLK_SRC_MASK		GENMASK(1, 0)
+#define RCC_SELR_SRCRDY				BIT(31)
+
+/* Used for all RCC_PLL<n>CR registers */
+#define RCC_PLLNCR_PLLON			BIT(0)
+#define RCC_PLLNCR_PLLRDY			BIT(1)
+#define RCC_PLLNCR_SSCG_CTRL			BIT(2)
+#define RCC_PLLNCR_DIVPEN			BIT(4)
+#define RCC_PLLNCR_DIVQEN			BIT(5)
+#define RCC_PLLNCR_DIVREN			BIT(6)
+#define RCC_PLLNCR_DIVEN_SHIFT			4
+
+/* Used for all RCC_PLL<n>CFGR1 registers */
+#define RCC_PLLNCFGR1_DIVM_MASK			GENMASK(21, 16)
+#define RCC_PLLNCFGR1_DIVM_SHIFT		16
+#define RCC_PLLNCFGR1_DIVN_MASK			GENMASK(8, 0)
+#define RCC_PLLNCFGR1_DIVN_SHIFT		0
+
+/* Only for PLL3 and PLL4 */
+#define RCC_PLLNCFGR1_IFRGE_MASK		GENMASK(25, 24)
+#define RCC_PLLNCFGR1_IFRGE_SHIFT		24
+
+/* Used for all RCC_PLL<n>CFGR2 registers */
+#define RCC_PLLNCFGR2_DIVX_MASK			GENMASK(6, 0)
+#define RCC_PLLNCFGR2_DIVP_MASK			GENMASK(6, 0)
+#define RCC_PLLNCFGR2_DIVP_SHIFT		0
+#define RCC_PLLNCFGR2_DIVQ_MASK			GENMASK(14, 8)
+#define RCC_PLLNCFGR2_DIVQ_SHIFT		8
+#define RCC_PLLNCFGR2_DIVR_MASK			GENMASK(22, 16)
+#define RCC_PLLNCFGR2_DIVR_SHIFT		16
+
+/* Used for all RCC_PLL<n>FRACR registers */
+#define RCC_PLLNFRACR_FRACV_SHIFT		3
+#define RCC_PLLNFRACR_FRACV_MASK		GENMASK(15, 3)
+#define RCC_PLLNFRACR_FRACLE			BIT(16)
+
+/* Used for all RCC_PLL<n>CSGR registers */
+#define RCC_PLLNCSGR_INC_STEP_SHIFT		16
+#define RCC_PLLNCSGR_INC_STEP_MASK		GENMASK(30, 16)
+#define RCC_PLLNCSGR_MOD_PER_SHIFT		0
+#define RCC_PLLNCSGR_MOD_PER_MASK		GENMASK(12, 0)
+#define RCC_PLLNCSGR_SSCG_MODE_SHIFT		15
+#define RCC_PLLNCSGR_SSCG_MODE_MASK		BIT(15)
+
+/* Used for TIMER Prescaler */
+#define RCC_TIMGXPRER_TIMGXPRE			BIT(0)
+
+/* Used for RCC_MCO related operations */
+#define RCC_MCOCFG_MCOON			BIT(12)
+#define RCC_MCOCFG_MCODIV_MASK			GENMASK(7, 4)
+#define RCC_MCOCFG_MCODIV_SHIFT			4
+#define RCC_MCOCFG_MCOSRC_MASK			GENMASK(2, 0)
+
 #endif /* STM32MP1_RCC_H */
diff --git a/include/drivers/st/stm32mp_clkfunc.h b/include/drivers/st/stm32mp_clkfunc.h
index c7e0b6e..c3329cc 100644
--- a/include/drivers/st/stm32mp_clkfunc.h
+++ b/include/drivers/st/stm32mp_clkfunc.h
@@ -19,7 +19,6 @@
 				     const char *prop_name,
 				     uint32_t dflt_value);
 
-int fdt_get_rcc_node(void *fdt);
 int fdt_rcc_read_uint32_array(const char *prop_name, uint32_t count,
 			      uint32_t *array);
 int fdt_rcc_subnode_offset(const char *name);
diff --git a/include/export/common/ep_info_exp.h b/include/export/common/ep_info_exp.h
index 9d2969f..a5bd10a 100644
--- a/include/export/common/ep_info_exp.h
+++ b/include/export/common/ep_info_exp.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -24,11 +24,23 @@
 #define ENTRY_POINT_INFO_ARGS_OFFSET	U(0x14)
 #endif
 
-/* Security state of the image. */
-#define EP_SECURITY_MASK	UL(0x1)
+/*
+ * Security state of the image. Bit 0 and
+ * bit 5 are used to determine the security
+ * state of the image as follows:
+ *
+ * ---------------------------------
+ *  Bit 5 | Bit 0 | Security state
+ * ---------------------------------
+ *   0        0      EP_SECURE
+ *   0        1      EP_NON_SECURE
+ *   1        1      EP_REALM
+ */
+#define EP_SECURITY_MASK	UL(0x21)
 #define EP_SECURITY_SHIFT	UL(0)
 #define EP_SECURE		UL(0x0)
 #define EP_NON_SECURE		UL(0x1)
+#define EP_REALM		UL(0x21)
 
 /* Endianness of the image. */
 #define EP_EE_MASK		U(0x2)
diff --git a/include/export/common/tbbr/tbbr_img_def_exp.h b/include/export/common/tbbr/tbbr_img_def_exp.h
index 2623c75..98544c0 100644
--- a/include/export/common/tbbr/tbbr_img_def_exp.h
+++ b/include/export/common/tbbr/tbbr_img_def_exp.h
@@ -101,7 +101,10 @@
  */
 #define BKUP_FWU_METADATA_IMAGE_ID	U(33)
 
+/* Realm Monitor Manager (RMM) */
+#define RMM_IMAGE_ID			U(34)
+
 /* Max Images */
-#define MAX_IMAGE_IDS			U(34)
+#define MAX_IMAGE_IDS			U(35)
 
 #endif /* ARM_TRUSTED_FIRMWARE_EXPORT_COMMON_TBBR_TBBR_IMG_DEF_EXP_H */
diff --git a/include/lib/cpus/aarch64/cortex_a710.h b/include/lib/cpus/aarch64/cortex_a710.h
index 8b011aa..d2bc146 100644
--- a/include/lib/cpus/aarch64/cortex_a710.h
+++ b/include/lib/cpus/aarch64/cortex_a710.h
@@ -13,7 +13,7 @@
  * CPU Extended Control register specific definitions
  ******************************************************************************/
 #define CORTEX_A710_CPUECTLR_EL1				S3_0_C15_C1_4
-#define CORTEX_A710_CPUECTLR_EL1_PFSTIDIS_BIT                   (ULL(1) << 8)
+#define CORTEX_A710_CPUECTLR_EL1_PFSTIDIS_BIT			(ULL(1) << 8)
 
 /*******************************************************************************
  * CPU Power Control register specific definitions
@@ -25,6 +25,20 @@
  * CPU Auxiliary Control register specific definitions.
  ******************************************************************************/
 #define CORTEX_A710_CPUACTLR_EL1 				S3_0_C15_C1_0
-#define CORTEX_A710_CPUACTLR_EL1_BIT_46 			(ULL(1) << 46)
+#define CORTEX_A710_CPUACTLR_EL1_BIT_46				(ULL(1) << 46)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A710_CPUACTLR5_EL1				S3_0_C15_C8_0
+#define CORTEX_A710_CPUACTLR5_EL1_BIT_13			(ULL(1) << 13)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A710_CPUECTLR2_EL1				S3_0_C15_C1_5
+#define CORTEX_A710_CPUECTLR2_EL1_PF_MODE_CNSRV			ULL(9)
+#define CPUECTLR2_EL1_PF_MODE_LSB				U(11)
+#define CPUECTLR2_EL1_PF_MODE_WIDTH				U(4)
 
 #endif /* CORTEX_A710_H */
diff --git a/include/lib/cpus/aarch64/cortex_a78.h b/include/lib/cpus/aarch64/cortex_a78.h
index 4bc49f3..42b0833 100644
--- a/include/lib/cpus/aarch64/cortex_a78.h
+++ b/include/lib/cpus/aarch64/cortex_a78.h
@@ -16,6 +16,9 @@
  ******************************************************************************/
 #define CORTEX_A78_CPUECTLR_EL1				S3_0_C15_C1_4
 #define CORTEX_A78_CPUECTLR_EL1_BIT_8			(ULL(1) << 8)
+#define CORTEX_A78_CPUECTLR_EL1_PF_MODE_CNSRV		ULL(3)
+#define CPUECTLR_EL1_PF_MODE_LSB				U(6)
+#define CPUECTLR_EL1_PF_MODE_WIDTH				U(2)
 
 /*******************************************************************************
  * CPU Power Control register specific definitions
diff --git a/include/lib/cpus/aarch64/cortex_hayes.h b/include/lib/cpus/aarch64/cortex_hayes.h
new file mode 100644
index 0000000..82022e9
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_hayes.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_HAYES_H
+#define CORTEX_HAYES_H
+
+#define CORTEX_HAYES_MIDR					U(0x410FD800)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_HAYES_CPUECTLR_EL1				S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_HAYES_CPUPWRCTLR_EL1				S3_0_C15_C2_7
+#define CORTEX_HAYES_CPUPWRCTLR_EL1_CORE_PWRDN_BIT		U(1)
+
+#endif /* CORTEX_HAYES_H */
diff --git a/include/lib/cpus/aarch64/neoverse_n2.h b/include/lib/cpus/aarch64/neoverse_n2.h
index 948f965..f414cb5 100644
--- a/include/lib/cpus/aarch64/neoverse_n2.h
+++ b/include/lib/cpus/aarch64/neoverse_n2.h
@@ -8,37 +8,45 @@
 #define NEOVERSE_N2_H
 
 /* Neoverse N2 ID register for revision r0p0 */
-#define NEOVERSE_N2_MIDR			U(0x410FD490)
+#define NEOVERSE_N2_MIDR				U(0x410FD490)
 
 /*******************************************************************************
  * CPU Power control register
  ******************************************************************************/
-#define NEOVERSE_N2_CPUPWRCTLR_EL1		S3_0_C15_C2_7
-#define NEOVERSE_N2_CORE_PWRDN_EN_BIT		(ULL(1) << 0)
+#define NEOVERSE_N2_CPUPWRCTLR_EL1			S3_0_C15_C2_7
+#define NEOVERSE_N2_CORE_PWRDN_EN_BIT			(ULL(1) << 0)
 
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
-#define NEOVERSE_N2_CPUECTLR_EL1		S3_0_C15_C1_4
-#define NEOVERSE_N2_CPUECTLR_EL1_EXTLLC_BIT	(ULL(1) << 0)
-#define NEOVERSE_N2_CPUECTLR_EL1_PFSTIDIS_BIT	(ULL(1) << 8)
+#define NEOVERSE_N2_CPUECTLR_EL1			S3_0_C15_C1_4
+#define NEOVERSE_N2_CPUECTLR_EL1_EXTLLC_BIT		(ULL(1) << 0)
+#define NEOVERSE_N2_CPUECTLR_EL1_PFSTIDIS_BIT		(ULL(1) << 8)
 
 /*******************************************************************************
  * CPU Auxiliary Control register specific definitions.
  ******************************************************************************/
-#define NEOVERSE_N2_CPUACTLR_EL1		S3_0_C15_C1_0
-#define NEOVERSE_N2_CPUACTLR_EL1_BIT_46	        (ULL(1) << 46)
+#define NEOVERSE_N2_CPUACTLR_EL1			S3_0_C15_C1_0
+#define NEOVERSE_N2_CPUACTLR_EL1_BIT_46			(ULL(1) << 46)
 
 /*******************************************************************************
  * CPU Auxiliary Control register 2 specific definitions.
  ******************************************************************************/
-#define NEOVERSE_N2_CPUACTLR2_EL1		S3_0_C15_C1_1
-#define NEOVERSE_N2_CPUACTLR2_EL1_BIT_2		(ULL(1) << 2)
+#define NEOVERSE_N2_CPUACTLR2_EL1			S3_0_C15_C1_1
+#define NEOVERSE_N2_CPUACTLR2_EL1_BIT_2			(ULL(1) << 2)
 
 /*******************************************************************************
  * CPU Auxiliary Control register 5 specific definitions.
  ******************************************************************************/
-#define NEOVERSE_N2_CPUACTLR5_EL1		S3_0_C15_C8_0
-#define NEOVERSE_N2_CPUACTLR5_EL1_BIT_44	(ULL(1) << 44)
+#define NEOVERSE_N2_CPUACTLR5_EL1			S3_0_C15_C8_0
+#define NEOVERSE_N2_CPUACTLR5_EL1_BIT_44		(ULL(1) << 44)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_N2_CPUECTLR2_EL1			S3_0_C15_C1_5
+#define NEOVERSE_N2_CPUECTLR2_EL1_PF_MODE_CNSRV		ULL(9)
+#define CPUECTLR2_EL1_PF_MODE_LSB			U(11)
+#define CPUECTLR2_EL1_PF_MODE_WIDTH			U(4)
 
 #endif /* NEOVERSE_N2_H */
diff --git a/include/lib/cpus/aarch64/neoverse_v1.h b/include/lib/cpus/aarch64/neoverse_v1.h
index cfb26ab..e43c907 100644
--- a/include/lib/cpus/aarch64/neoverse_v1.h
+++ b/include/lib/cpus/aarch64/neoverse_v1.h
@@ -15,6 +15,9 @@
 #define NEOVERSE_V1_CPUECTLR_EL1				S3_0_C15_C1_4
 #define NEOVERSE_V1_CPUECTLR_EL1_BIT_8				(ULL(1) << 8)
 #define NEOVERSE_V1_CPUECTLR_EL1_BIT_53				(ULL(1) << 53)
+#define NEOVERSE_V1_CPUECTLR_EL1_PF_MODE_CNSRV			ULL(3)
+#define CPUECTLR_EL1_PF_MODE_LSB				U(6)
+#define CPUECTLR_EL1_PF_MODE_WIDTH				U(2)
 
 /*******************************************************************************
  * CPU Power Control register specific definitions
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
index d449a65..698e208 100644
--- a/include/lib/el3_runtime/aarch64/context.h
+++ b/include/lib/el3_runtime/aarch64/context.h
@@ -228,6 +228,10 @@
 
 // Starting with Armv8.5
 #define CTX_SCXTNUM_EL2		U(0x1e0)
+
+// Register for FEAT_HCX
+#define CTX_HCRX_EL2            U(0x1e8)
+
 /* Align to the next 16 byte boundary */
 #define CTX_EL2_SYSREGS_END	U(0x1f0)
 
@@ -401,13 +405,12 @@
 					 = (uint64_t) (val))
 
 /*
- * Top-level context structure which is used by EL3 firmware to
- * preserve the state of a core at EL1 in one of the two security
- * states and save enough EL3 meta data to be able to return to that
- * EL and security state. The context management library will be used
- * to ensure that SP_EL3 always points to an instance of this
- * structure at exception entry and exit. Each instance will
- * correspond to either the secure or the non-secure state.
+ * Top-level context structure which is used by EL3 firmware to preserve
+ * the state of a core at the next lower EL in a given security state and
+ * save enough EL3 meta data to be able to return to that EL and security
+ * state. The context management library will be used to ensure that
+ * SP_EL3 always points to an instance of this structure at exception
+ * entry and exit.
  */
 typedef struct cpu_context {
 	gp_regs_t gpregs_ctx;
diff --git a/include/lib/el3_runtime/cpu_data.h b/include/lib/el3_runtime/cpu_data.h
index 3d57a5c..2c7b619 100644
--- a/include/lib/el3_runtime/cpu_data.h
+++ b/include/lib/el3_runtime/cpu_data.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -19,16 +19,25 @@
 /* 8-bytes aligned size of psci_cpu_data structure */
 #define PSCI_CPU_DATA_SIZE_ALIGNED	((PSCI_CPU_DATA_SIZE + 7) & ~7)
 
+#if ENABLE_RME
+/* Size of cpu_context array */
+#define CPU_DATA_CONTEXT_NUM		3
 /* Offset of cpu_ops_ptr, size 8 bytes */
+#define CPU_DATA_CPU_OPS_PTR		0x18
+#else /* ENABLE_RME */
+#define CPU_DATA_CONTEXT_NUM		2
 #define CPU_DATA_CPU_OPS_PTR		0x10
+#endif /* ENABLE_RME */
 
 #if ENABLE_PAUTH
 /* 8-bytes aligned offset of apiakey[2], size 16 bytes */
-#define	CPU_DATA_APIAKEY_OFFSET		(0x18 + PSCI_CPU_DATA_SIZE_ALIGNED)
-#define CPU_DATA_CRASH_BUF_OFFSET	(CPU_DATA_APIAKEY_OFFSET + 0x10)
-#else
-#define CPU_DATA_CRASH_BUF_OFFSET	(0x18 + PSCI_CPU_DATA_SIZE_ALIGNED)
-#endif	/* ENABLE_PAUTH */
+#define	CPU_DATA_APIAKEY_OFFSET		(0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \
+					     + CPU_DATA_CPU_OPS_PTR)
+#define CPU_DATA_CRASH_BUF_OFFSET	(0x10 + CPU_DATA_APIAKEY_OFFSET)
+#else /* ENABLE_PAUTH */
+#define CPU_DATA_CRASH_BUF_OFFSET	(0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \
+					     + CPU_DATA_CPU_OPS_PTR)
+#endif /* ENABLE_PAUTH */
 
 /* need enough space in crash buffer to save 8 registers */
 #define CPU_DATA_CRASH_BUF_SIZE		64
@@ -65,11 +74,14 @@
 
 #ifndef __ASSEMBLER__
 
+#include <assert.h>
+#include <stdint.h>
+
 #include <arch_helpers.h>
 #include <lib/cassert.h>
 #include <lib/psci/psci.h>
+
 #include <platform_def.h>
-#include <stdint.h>
 
 /* Offsets for the cpu_data structure */
 #define CPU_DATA_PSCI_LOCK_OFFSET	__builtin_offsetof\
@@ -80,27 +92,34 @@
 		(cpu_data_t, platform_cpu_data)
 #endif
 
+typedef enum context_pas {
+	CPU_CONTEXT_SECURE = 0,
+	CPU_CONTEXT_NS,
+#if ENABLE_RME
+	CPU_CONTEXT_REALM,
+#endif
+	CPU_CONTEXT_NUM
+} context_pas_t;
+
 /*******************************************************************************
  * Function & variable prototypes
  ******************************************************************************/
 
 /*******************************************************************************
  * Cache of frequently used per-cpu data:
- *   Pointers to non-secure and secure security state contexts
+ *   Pointers to non-secure, realm, and secure security state contexts
  *   Address of the crash stack
  * It is aligned to the cache line boundary to allow efficient concurrent
  * manipulation of these pointers on different cpus
  *
- * TODO: Add other commonly used variables to this (tf_issues#90)
- *
  * The data structure and the _cpu_data accessors should not be used directly
  * by components that have per-cpu members. The member access macros should be
  * used for this.
  ******************************************************************************/
 typedef struct cpu_data {
 #ifdef __aarch64__
-	void *cpu_context[2];
-#endif
+	void *cpu_context[CPU_DATA_CONTEXT_NUM];
+#endif /* __aarch64__ */
 	uintptr_t cpu_ops_ptr;
 	struct psci_cpu_data psci_svc_cpu_data;
 #if ENABLE_PAUTH
@@ -122,6 +141,11 @@
 
 extern cpu_data_t percpu_data[PLATFORM_CORE_COUNT];
 
+#ifdef __aarch64__
+CASSERT(CPU_DATA_CONTEXT_NUM == CPU_CONTEXT_NUM,
+		assert_cpu_data_context_num_mismatch);
+#endif
+
 #if ENABLE_PAUTH
 CASSERT(CPU_DATA_APIAKEY_OFFSET == __builtin_offsetof
 	(cpu_data_t, apiakey),
@@ -160,6 +184,31 @@
 struct cpu_data *_cpu_data(void);
 #endif
 
+/*
+ * Returns the index of the cpu_context array for the given security state.
+ * All accesses to cpu_context should be through this helper to make sure
+ * an access is not out-of-bounds. The function assumes security_state is
+ * valid.
+ */
+static inline context_pas_t get_cpu_context_index(uint32_t security_state)
+{
+	if (security_state == SECURE) {
+		return CPU_CONTEXT_SECURE;
+	} else {
+#if ENABLE_RME
+		if (security_state == NON_SECURE) {
+			return CPU_CONTEXT_NS;
+		} else {
+			assert(security_state == REALM);
+			return CPU_CONTEXT_REALM;
+		}
+#else
+		assert(security_state == NON_SECURE);
+		return CPU_CONTEXT_NS;
+#endif
+	}
+}
+
 /**************************************************************************
  * APIs for initialising and accessing per-cpu data
  *************************************************************************/
diff --git a/include/lib/gpt_rme/gpt_rme.h b/include/lib/gpt_rme/gpt_rme.h
new file mode 100644
index 0000000..379b915
--- /dev/null
+++ b/include/lib/gpt_rme/gpt_rme.h
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef GPT_RME_H
+#define GPT_RME_H
+
+#include <stdint.h>
+
+#include <arch.h>
+
+/******************************************************************************/
+/* GPT helper macros and definitions                                          */
+/******************************************************************************/
+
+/*
+ * Structure for specifying a mapping range and it's properties. This should not
+ * be manually initialized, using the MAP_GPT_REGION_x macros is recommended as
+ * to avoid potential incompatibilities in the future.
+ */
+typedef struct pas_region {
+	uintptr_t	base_pa;	/* Base address for PAS. */
+	size_t		size;		/* Size of the PAS. */
+	unsigned int	attrs;		/* PAS GPI and entry type. */
+} pas_region_t;
+
+/* GPT GPI definitions */
+#define GPT_GPI_NO_ACCESS		U(0x0)
+#define GPT_GPI_SECURE			U(0x8)
+#define GPT_GPI_NS			U(0x9)
+#define GPT_GPI_ROOT			U(0xA)
+#define GPT_GPI_REALM			U(0xB)
+#define GPT_GPI_ANY			U(0xF)
+#define GPT_GPI_VAL_MASK		UL(0xF)
+
+/* PAS attribute GPI definitions. */
+#define GPT_PAS_ATTR_GPI_SHIFT		U(0)
+#define GPT_PAS_ATTR_GPI_MASK		U(0xF)
+#define GPT_PAS_ATTR_GPI(_attrs)	(((_attrs)			\
+					>> GPT_PAS_ATTR_GPI_SHIFT)	\
+					& GPT_PAS_ATTR_GPI_MASK)
+
+/* PAS attribute mapping type definitions */
+#define GPT_PAS_ATTR_MAP_TYPE_BLOCK	U(0x0)
+#define GPT_PAS_ATTR_MAP_TYPE_GRANULE	U(0x1)
+#define GPT_PAS_ATTR_MAP_TYPE_SHIFT	U(4)
+#define GPT_PAS_ATTR_MAP_TYPE_MASK	U(0x1)
+#define GPT_PAS_ATTR_MAP_TYPE(_attrs)	(((_attrs)			\
+					>> GPT_PAS_ATTR_MAP_TYPE_SHIFT)	\
+					& GPT_PAS_ATTR_MAP_TYPE_MASK)
+
+/*
+ * Macro to initialize the attributes field in the pas_region_t structure.
+ * [31:5] Reserved
+ * [4]    Mapping type (GPT_PAS_ATTR_MAP_TYPE_x definitions)
+ * [3:0]  PAS GPI type (GPT_GPI_x definitions)
+ */
+#define GPT_PAS_ATTR(_type, _gpi)					\
+	((((_type) & GPT_PAS_ATTR_MAP_TYPE_MASK)			\
+	  << GPT_PAS_ATTR_MAP_TYPE_SHIFT) |				\
+	(((_gpi) & GPT_PAS_ATTR_GPI_MASK)				\
+	 << GPT_PAS_ATTR_GPI_SHIFT))
+
+/*
+ * Macro to create a GPT entry for this PAS range as a block descriptor. If this
+ * region does not fit the requirements for a block descriptor then GPT
+ * initialization will fail.
+ */
+#define GPT_MAP_REGION_BLOCK(_pa, _sz, _gpi)				\
+	{								\
+		.base_pa = (_pa),					\
+		.size = (_sz),						\
+		.attrs = GPT_PAS_ATTR(GPT_PAS_ATTR_MAP_TYPE_BLOCK, (_gpi)), \
+	}
+
+/*
+ * Macro to create a GPT entry for this PAS range as a table descriptor. If this
+ * region does not fit the requirements for a table descriptor then GPT
+ * initialization will fail.
+ */
+#define GPT_MAP_REGION_GRANULE(_pa, _sz, _gpi)				\
+	{								\
+		.base_pa = (_pa),					\
+		.size = (_sz),						\
+		.attrs = GPT_PAS_ATTR(GPT_PAS_ATTR_MAP_TYPE_GRANULE, (_gpi)), \
+	}
+
+/******************************************************************************/
+/* GPT register field definitions                                             */
+/******************************************************************************/
+
+/*
+ * Least significant address bits protected by each entry in level 0 GPT. This
+ * field is read-only.
+ */
+#define GPCCR_L0GPTSZ_SHIFT	U(20)
+#define GPCCR_L0GPTSZ_MASK	U(0xF)
+
+typedef enum {
+	GPCCR_L0GPTSZ_30BITS	= U(0x0),
+	GPCCR_L0GPTSZ_34BITS	= U(0x4),
+	GPCCR_L0GPTSZ_36BITS	= U(0x6),
+	GPCCR_L0GPTSZ_39BITS	= U(0x9)
+} gpccr_l0gptsz_e;
+
+/* Granule protection check priority bit definitions */
+#define GPCCR_GPCP_SHIFT	U(17)
+#define GPCCR_GPCP_BIT		(ULL(1) << GPCCR_EL3_GPCP_SHIFT)
+
+/* Granule protection check bit definitions */
+#define GPCCR_GPC_SHIFT		U(16)
+#define GPCCR_GPC_BIT		(ULL(1) << GPCCR_GPC_SHIFT)
+
+/* Physical granule size bit definitions */
+#define GPCCR_PGS_SHIFT		U(14)
+#define GPCCR_PGS_MASK		U(0x3)
+#define SET_GPCCR_PGS(x)	(((x) & GPCCR_PGS_MASK) << GPCCR_PGS_SHIFT)
+
+typedef enum {
+	GPCCR_PGS_4K		= U(0x0),
+	GPCCR_PGS_64K		= U(0x1),
+	GPCCR_PGS_16K		= U(0x2)
+} gpccr_pgs_e;
+
+/* GPT fetch shareability attribute bit definitions */
+#define GPCCR_SH_SHIFT		U(12)
+#define GPCCR_SH_MASK		U(0x3)
+#define SET_GPCCR_SH(x)		(((x) & GPCCR_SH_MASK) << GPCCR_SH_SHIFT)
+
+typedef enum {
+	GPCCR_SH_NS		= U(0x0),
+	GPCCR_SH_OS		= U(0x2),
+	GPCCR_SH_IS		= U(0x3)
+} gpccr_sh_e;
+
+/* GPT fetch outer cacheability attribute bit definitions */
+#define GPCCR_ORGN_SHIFT	U(10)
+#define GPCCR_ORGN_MASK		U(0x3)
+#define SET_GPCCR_ORGN(x)	(((x) & GPCCR_ORGN_MASK) << GPCCR_ORGN_SHIFT)
+
+typedef enum {
+	GPCCR_ORGN_NC		= U(0x0),
+	GPCCR_ORGN_WB_RA_WA	= U(0x1),
+	GPCCR_ORGN_WT_RA_NWA	= U(0x2),
+	GPCCR_ORGN_WB_RA_NWA	= U(0x3)
+} gpccr_orgn_e;
+
+/* GPT fetch inner cacheability attribute bit definitions */
+#define GPCCR_IRGN_SHIFT	U(8)
+#define GPCCR_IRGN_MASK		U(0x3)
+#define SET_GPCCR_IRGN(x)	(((x) & GPCCR_IRGN_MASK) << GPCCR_IRGN_SHIFT)
+
+typedef enum {
+	GPCCR_IRGN_NC		= U(0x0),
+	GPCCR_IRGN_WB_RA_WA	= U(0x1),
+	GPCCR_IRGN_WT_RA_NWA	= U(0x2),
+	GPCCR_IRGN_WB_RA_NWA	= U(0x3)
+} gpccr_irgn_e;
+
+/* Protected physical address size bit definitions */
+#define GPCCR_PPS_SHIFT		U(0)
+#define GPCCR_PPS_MASK		U(0x7)
+#define SET_GPCCR_PPS(x)	(((x) & GPCCR_PPS_MASK) << GPCCR_PPS_SHIFT)
+
+typedef enum {
+	GPCCR_PPS_4GB		= U(0x0),
+	GPCCR_PPS_64GB		= U(0x1),
+	GPCCR_PPS_1TB		= U(0x2),
+	GPCCR_PPS_4TB		= U(0x3),
+	GPCCR_PPS_16TB		= U(0x4),
+	GPCCR_PPS_256TB		= U(0x5),
+	GPCCR_PPS_4PB		= U(0x6)
+} gpccr_pps_e;
+
+/* Base Address for the GPT bit definitions */
+#define GPTBR_BADDR_SHIFT	U(0)
+#define GPTBR_BADDR_VAL_SHIFT	U(12)
+#define GPTBR_BADDR_MASK	ULL(0xffffffffff)
+
+/******************************************************************************/
+/* GPT public APIs                                                            */
+/******************************************************************************/
+
+/*
+ * Public API that initializes the entire protected space to GPT_GPI_ANY using
+ * the L0 tables (block descriptors).  Ideally, this function is invoked prior
+ * to DDR discovery and initialization.  The MMU must be initialized before
+ * calling this function.
+ *
+ * Parameters
+ *   pps		PPS value to use for table generation
+ *   l0_mem_base	Base address of L0 tables in memory.
+ *   l0_mem_size	Total size of memory available for L0 tables.
+ *
+ * Return
+ *   Negative Linux error code in the event of a failure, 0 for success.
+ */
+int gpt_init_l0_tables(gpccr_pps_e pps,
+		       uintptr_t l0_mem_base,
+		       size_t l0_mem_size);
+
+/*
+ * Public API that carves out PAS regions from the L0 tables and builds any L1
+ * tables that are needed.  This function ideally is run after DDR discovery and
+ * initialization.  The L0 tables must have already been initialized to GPI_ANY
+ * when this function is called.
+ *
+ * Parameters
+ *   pgs		PGS value to use for table generation.
+ *   l1_mem_base	Base address of memory used for L1 tables.
+ *   l1_mem_size	Total size of memory available for L1 tables.
+ *   *pas_regions	Pointer to PAS regions structure array.
+ *   pas_count		Total number of PAS regions.
+ *
+ * Return
+ *   Negative Linux error code in the event of a failure, 0 for success.
+ */
+int gpt_init_pas_l1_tables(gpccr_pgs_e pgs,
+			   uintptr_t l1_mem_base,
+			   size_t l1_mem_size,
+			   pas_region_t *pas_regions,
+			   unsigned int pas_count);
+
+/*
+ * Public API to initialize the runtime gpt_config structure based on the values
+ * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization
+ * typically happens in a bootloader stage prior to setting up the EL3 runtime
+ * environment for the granule transition service so this function detects the
+ * initialization from a previous stage. Granule protection checks must be
+ * enabled already or this function will return an error.
+ *
+ * Return
+ *   Negative Linux error code in the event of a failure, 0 for success.
+ */
+int gpt_runtime_init(void);
+
+/*
+ * Public API to enable granule protection checks once the tables have all been
+ * initialized.  This function is called at first initialization and then again
+ * later during warm boots of CPU cores.
+ *
+ * Return
+ *   Negative Linux error code in the event of a failure, 0 for success.
+ */
+int gpt_enable(void);
+
+/*
+ * Public API to disable granule protection checks.
+ */
+void gpt_disable(void);
+
+/*
+ * This function is the core of the granule transition service. When a granule
+ * transition request occurs it is routed to this function where the request is
+ * validated then fulfilled if possible.
+ *
+ * TODO: implement support for transitioning multiple granules at once.
+ *
+ * Parameters
+ *   base: Base address of the region to transition, must be aligned to granule
+ *         size.
+ *   size: Size of region to transition, must be aligned to granule size.
+ *   src_sec_state: Security state of the caller.
+ *   target_pas: Target PAS of the specified memory region.
+ *
+ * Return
+ *    Negative Linux error code in the event of a failure, 0 for success.
+ */
+int gpt_transition_pas(uint64_t base,
+		       size_t size,
+		       unsigned int src_sec_state,
+		       unsigned int target_pas);
+
+#endif /* GPT_RME_H */
diff --git a/include/lib/smccc.h b/include/lib/smccc.h
index deaeb1d..1a39f24 100644
--- a/include/lib/smccc.h
+++ b/include/lib/smccc.h
@@ -108,9 +108,24 @@
 #define SMC_ARCH_CALL_NOT_REQUIRED	-2
 #define SMC_ARCH_CALL_INVAL_PARAM	-3
 
-/* Various flags passed to SMC handlers */
+/*
+ * Various flags passed to SMC handlers
+ *
+ * Bit 5 and bit 0 of the flag are used to
+ * determine the source security state as
+ * follows:
+ * ---------------------------------
+ *  Bit 5 | Bit 0 | Security state
+ * ---------------------------------
+ *   0        0      SMC_FROM_SECURE
+ *   0        1      SMC_FROM_NON_SECURE
+ *   1        1      SMC_FROM_REALM
+ */
+
 #define SMC_FROM_SECURE		(U(0) << 0)
 #define SMC_FROM_NON_SECURE	(U(1) << 0)
+#define SMC_FROM_REALM		U(0x21)
+#define SMC_FROM_MASK		U(0x21)
 
 #ifndef __ASSEMBLER__
 
@@ -118,8 +133,18 @@
 
 #include <lib/cassert.h>
 
+#if ENABLE_RME
+#define is_caller_non_secure(_f)	(((_f) & SMC_FROM_MASK) \
+					  == SMC_FROM_NON_SECURE)
+#define is_caller_secure(_f)		(((_f) & SMC_FROM_MASK) \
+					  == SMC_FROM_SECURE)
+#define is_caller_realm(_f)		(((_f) & SMC_FROM_MASK) \
+					  == SMC_FROM_REALM)
+#define caller_sec_state(_f)		((_f) & SMC_FROM_MASK)
+#else /* ENABLE_RME */
 #define is_caller_non_secure(_f)	(((_f) & SMC_FROM_NON_SECURE) != U(0))
 #define is_caller_secure(_f)		(!is_caller_non_secure(_f))
+#endif /* ENABLE_RME */
 
 /* The macro below is used to identify a Standard Service SMC call */
 #define is_std_svc_call(_fid)		(GET_SMC_OEN(_fid) == OEN_STD_START)
diff --git a/include/lib/xlat_mpu/xlat_mpu.h b/include/lib/xlat_mpu/xlat_mpu.h
new file mode 100644
index 0000000..252b92c
--- /dev/null
+++ b/include/lib/xlat_mpu/xlat_mpu.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_MPU_H
+#define XLAT_MPU_H
+
+#ifndef __ASSEMBLER__
+
+#include <lib/cassert.h>
+
+#define XLAT_TABLES_LIB_V2	1
+
+void enable_mpu_el2(unsigned int flags);
+void enable_mpu_direct_el2(unsigned int flags);
+
+/*
+ * Function to wipe clean and disable all MPU regions.  This function expects
+ * that the MPU has already been turned off, and caching concerns addressed,
+ * but it nevertheless also explicitly turns off the MPU.
+ */
+void clear_all_mpu_regions(void);
+
+#endif /* __ASSEMBLER__ */
+#endif /* XLAT_MPU_H */
diff --git a/include/lib/xlat_tables/xlat_tables.h b/include/lib/xlat_tables/xlat_tables.h
index 082bb5e..a156969 100644
--- a/include/lib/xlat_tables/xlat_tables.h
+++ b/include/lib/xlat_tables/xlat_tables.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -72,6 +72,13 @@
 #define MT_CODE			(MT_MEMORY | MT_RO | MT_EXECUTE)
 #define MT_RO_DATA		(MT_MEMORY | MT_RO | MT_EXECUTE_NEVER)
 
+/* Memory type for EL3 regions */
+#if ENABLE_RME
+#error FEAT_RME requires version 2 of the Translation Tables Library
+#else
+#define EL3_PAS			MT_SECURE
+#endif
+
 /*
  * Structure for specifying a single region of memory.
  */
diff --git a/include/lib/xlat_tables/xlat_tables_defs.h b/include/lib/xlat_tables/xlat_tables_defs.h
index 579d8d8..2d0949b 100644
--- a/include/lib/xlat_tables/xlat_tables_defs.h
+++ b/include/lib/xlat_tables/xlat_tables_defs.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -142,6 +142,7 @@
 #define AP_NO_ACCESS_UNPRIVILEGED	(AP1_NO_ACCESS_UNPRIVILEGED << 4)
 #define AP_ONE_VA_RANGE_RES1		(AP1_RES1 << 4)
 #define NS				(U(0x1) << 3)
+#define EL3_S1_NSE			(U(0x1) << 9)
 #define ATTR_NON_CACHEABLE_INDEX	ULL(0x2)
 #define ATTR_DEVICE_INDEX		ULL(0x1)
 #define ATTR_IWBWA_OWBWA_NTR_INDEX	ULL(0x0)
diff --git a/include/lib/xlat_tables/xlat_tables_v2.h b/include/lib/xlat_tables/xlat_tables_v2.h
index 359b983..69ad027 100644
--- a/include/lib/xlat_tables/xlat_tables_v2.h
+++ b/include/lib/xlat_tables/xlat_tables_v2.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -60,17 +60,22 @@
 #define MT_TYPE(_attr)		((_attr) & MT_TYPE_MASK)
 /* Access permissions (RO/RW) */
 #define MT_PERM_SHIFT		U(3)
-/* Security state (SECURE/NS) */
-#define MT_SEC_SHIFT		U(4)
+
+/* Physical address space (SECURE/NS/Root/Realm) */
+#define	MT_PAS_SHIFT		U(4)
+#define MT_PAS_MASK		(U(3) << MT_PAS_SHIFT)
+#define MT_PAS(_attr)		((_attr) & MT_PAS_MASK)
+
 /* Access permissions for instruction execution (EXECUTE/EXECUTE_NEVER) */
-#define MT_EXECUTE_SHIFT	U(5)
+#define MT_EXECUTE_SHIFT	U(6)
 /* In the EL1&0 translation regime, User (EL0) or Privileged (EL1). */
-#define MT_USER_SHIFT		U(6)
+#define MT_USER_SHIFT		U(7)
 
 /* Shareability attribute for the memory region */
-#define MT_SHAREABILITY_SHIFT	U(7)
+#define MT_SHAREABILITY_SHIFT	U(8)
 #define MT_SHAREABILITY_MASK	(U(3) << MT_SHAREABILITY_SHIFT)
 #define MT_SHAREABILITY(_attr)	((_attr) & MT_SHAREABILITY_MASK)
+
 /* All other bits are reserved */
 
 /*
@@ -91,8 +96,10 @@
 #define MT_RO			(U(0) << MT_PERM_SHIFT)
 #define MT_RW			(U(1) << MT_PERM_SHIFT)
 
-#define MT_SECURE		(U(0) << MT_SEC_SHIFT)
-#define MT_NS			(U(1) << MT_SEC_SHIFT)
+#define MT_SECURE		(U(0) << MT_PAS_SHIFT)
+#define MT_NS			(U(1) << MT_PAS_SHIFT)
+#define MT_ROOT			(U(2) << MT_PAS_SHIFT)
+#define MT_REALM		(U(3) << MT_PAS_SHIFT)
 
 /*
  * Access permissions for instruction execution are only relevant for normal
@@ -149,6 +156,13 @@
 #define EL3_REGIME		3
 #define EL_REGIME_INVALID	-1
 
+/* Memory type for EL3 regions. With RME, EL3 is in ROOT PAS */
+#if ENABLE_RME
+#define EL3_PAS			MT_ROOT
+#else
+#define EL3_PAS			MT_SECURE
+#endif /* ENABLE_RME */
+
 /*
  * Declare the translation context type.
  * Its definition is private.
diff --git a/include/plat/arm/board/common/v2m_def.h b/include/plat/arm/board/common/v2m_def.h
index 6a6979c..cb11dac 100644
--- a/include/plat/arm/board/common/v2m_def.h
+++ b/include/plat/arm/board/common/v2m_def.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -8,6 +8,13 @@
 
 #include <lib/utils_def.h>
 
+/* Base address of all V2M */
+#ifdef PLAT_V2M_OFFSET
+#define V2M_OFFSET			PLAT_V2M_OFFSET
+#else
+#define V2M_OFFSET			UL(0)
+#endif
+
 /* V2M motherboard system registers & offsets */
 #define V2M_SYSREGS_BASE		UL(0x1c010000)
 #define V2M_SYS_ID			UL(0x0)
@@ -69,18 +76,18 @@
 
 
 /* NOR Flash */
-#define V2M_FLASH0_BASE			UL(0x08000000)
+#define V2M_FLASH0_BASE			(V2M_OFFSET + UL(0x08000000))
 #define V2M_FLASH0_SIZE			UL(0x04000000)
-#define V2M_FLASH_BLOCK_SIZE		UL(0x00040000)	/* 256 KB */
+#define V2M_FLASH_BLOCK_SIZE		UL(0x00040000) /* 256 KB */
 
-#define V2M_IOFPGA_BASE			UL(0x1c000000)
+#define V2M_IOFPGA_BASE			(V2M_OFFSET + UL(0x1c000000))
 #define V2M_IOFPGA_SIZE			UL(0x03000000)
 
 /* PL011 UART related constants */
-#define V2M_IOFPGA_UART0_BASE		UL(0x1c090000)
-#define V2M_IOFPGA_UART1_BASE		UL(0x1c0a0000)
-#define V2M_IOFPGA_UART2_BASE		UL(0x1c0b0000)
-#define V2M_IOFPGA_UART3_BASE		UL(0x1c0c0000)
+#define V2M_IOFPGA_UART0_BASE		(V2M_OFFSET + UL(0x1c090000))
+#define V2M_IOFPGA_UART1_BASE		(V2M_OFFSET + UL(0x1c0a0000))
+#define V2M_IOFPGA_UART2_BASE		(V2M_OFFSET + UL(0x1c0b0000))
+#define V2M_IOFPGA_UART3_BASE		(V2M_OFFSET + UL(0x1c0c0000))
 
 #define V2M_IOFPGA_UART0_CLK_IN_HZ	24000000
 #define V2M_IOFPGA_UART1_CLK_IN_HZ	24000000
@@ -88,11 +95,11 @@
 #define V2M_IOFPGA_UART3_CLK_IN_HZ	24000000
 
 /* SP804 timer related constants */
-#define V2M_SP804_TIMER0_BASE		UL(0x1C110000)
-#define V2M_SP804_TIMER1_BASE		UL(0x1C120000)
+#define V2M_SP804_TIMER0_BASE		(V2M_OFFSET + UL(0x1C110000))
+#define V2M_SP804_TIMER1_BASE		(V2M_OFFSET + UL(0x1C120000))
 
 /* SP810 controller */
-#define V2M_SP810_BASE			UL(0x1c020000)
+#define V2M_SP810_BASE			(V2M_OFFSET + UL(0x1c020000))
 #define V2M_SP810_CTRL_TIM0_SEL		BIT_32(15)
 #define V2M_SP810_CTRL_TIM1_SEL		BIT_32(17)
 #define V2M_SP810_CTRL_TIM2_SEL		BIT_32(19)
diff --git a/include/plat/arm/board/fvp_r/fvp_r_bl1.h b/include/plat/arm/board/fvp_r/fvp_r_bl1.h
new file mode 100644
index 0000000..0b41e67
--- /dev/null
+++ b/include/plat/arm/board/fvp_r/fvp_r_bl1.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FVP_R_BL1_H
+#define FVP_R_BL1_H
+
+void bl1_load_bl33(void);
+void bl1_transfer_bl33(void);
+
+#endif /* FVP_R_BL1_H */
diff --git a/include/plat/arm/common/arm_def.h b/include/plat/arm/common/arm_def.h
index ae80628..1993cb4 100644
--- a/include/plat/arm/common/arm_def.h
+++ b/include/plat/arm/common/arm_def.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -58,8 +58,12 @@
 #define ARM_TRUSTED_DRAM_ID		1
 #define ARM_DRAM_ID			2
 
-/* The first 4KB of Trusted SRAM are used as shared memory */
+#ifdef PLAT_ARM_TRUSTED_SRAM_BASE
+#define ARM_TRUSTED_SRAM_BASE		PLAT_ARM_TRUSTED_SRAM_BASE
+#else
 #define ARM_TRUSTED_SRAM_BASE		UL(0x04000000)
+#endif /* PLAT_ARM_TRUSTED_SRAM_BASE */
+
 #define ARM_SHARED_RAM_BASE		ARM_TRUSTED_SRAM_BASE
 #define ARM_SHARED_RAM_SIZE		UL(0x00001000)	/* 4 KB */
 
@@ -70,38 +74,84 @@
 					 ARM_SHARED_RAM_SIZE)
 
 /*
- * The top 16MB of DRAM1 is configured as secure access only using the TZC
+ * The top 16MB (or 64MB if RME is enabled) of DRAM1 is configured as
+ * follows:
  *   - SCP TZC DRAM: If present, DRAM reserved for SCP use
+ *   - L1 GPT DRAM: Reserved for L1 GPT if RME is enabled
+ *   - REALM DRAM: Reserved for Realm world if RME is enabled
  *   - AP TZC DRAM: The remaining TZC secured DRAM reserved for AP use
+ *
+ *              RME enabled(64MB)                RME not enabled(16MB)
+ *              --------------------             -------------------
+ *              |                  |             |                 |
+ *              |  AP TZC (~28MB)  |             |  AP TZC (~14MB) |
+ *              --------------------             -------------------
+ *              |                  |             |                 |
+ *              |  REALM (32MB)    |             |  EL3 TZC (2MB)  |
+ *              --------------------             -------------------
+ *              |                  |             |                 |
+ *              |  EL3 TZC (3MB)   |             |    SCP TZC      |
+ *              --------------------  0xFFFF_FFFF-------------------
+ *              | L1 GPT + SCP TZC |
+ *              |       (~1MB)     |
+ *  0xFFFF_FFFF --------------------
+ */
+#if ENABLE_RME
+#define ARM_TZC_DRAM1_SIZE              UL(0x04000000) /* 64MB */
+/*
+ * Define a region within the TZC secured DRAM for use by EL3 runtime
+ * firmware. This region is meant to be NOLOAD and will not be zero
+ * initialized. Data sections with the attribute `arm_el3_tzc_dram` will be
+ * placed here. 3MB region is reserved if RME is enabled, 2MB otherwise.
  */
-#define ARM_TZC_DRAM1_SIZE		UL(0x01000000)
+#define ARM_EL3_TZC_DRAM1_SIZE		UL(0x00300000) /* 3MB */
+#define ARM_L1_GPT_SIZE			UL(0x00100000) /* 1MB */
+#define ARM_REALM_SIZE			UL(0x02000000) /* 32MB */
+#else
+#define ARM_TZC_DRAM1_SIZE		UL(0x01000000) /* 16MB */
+#define ARM_EL3_TZC_DRAM1_SIZE		UL(0x00200000) /* 2MB */
+#define ARM_L1_GPT_SIZE			UL(0)
+#define ARM_REALM_SIZE			UL(0)
+#endif /* ENABLE_RME */
 
 #define ARM_SCP_TZC_DRAM1_BASE		(ARM_DRAM1_BASE +		\
-					 ARM_DRAM1_SIZE -		\
-					 ARM_SCP_TZC_DRAM1_SIZE)
+					ARM_DRAM1_SIZE -		\
+					(ARM_SCP_TZC_DRAM1_SIZE +	\
+					ARM_L1_GPT_SIZE))
 #define ARM_SCP_TZC_DRAM1_SIZE		PLAT_ARM_SCP_TZC_DRAM1_SIZE
 #define ARM_SCP_TZC_DRAM1_END		(ARM_SCP_TZC_DRAM1_BASE +	\
-					 ARM_SCP_TZC_DRAM1_SIZE - 1U)
+					ARM_SCP_TZC_DRAM1_SIZE - 1U)
+#if ENABLE_RME
+#define ARM_L1_GPT_ADDR_BASE		(ARM_DRAM1_BASE +		\
+					ARM_DRAM1_SIZE -		\
+					ARM_L1_GPT_SIZE)
+#define ARM_L1_GPT_END			(ARM_L1_GPT_ADDR_BASE +		\
+					ARM_L1_GPT_SIZE - 1U)
 
-/*
- * Define a 2MB region within the TZC secured DRAM for use by EL3 runtime
- * firmware. This region is meant to be NOLOAD and will not be zero
- * initialized. Data sections with the attribute `arm_el3_tzc_dram` will be
- * placed here.
- */
-#define ARM_EL3_TZC_DRAM1_BASE		(ARM_SCP_TZC_DRAM1_BASE - ARM_EL3_TZC_DRAM1_SIZE)
-#define ARM_EL3_TZC_DRAM1_SIZE		UL(0x00200000) /* 2 MB */
+#define ARM_REALM_BASE			(ARM_DRAM1_BASE +		\
+					ARM_DRAM1_SIZE -		\
+					(ARM_SCP_TZC_DRAM1_SIZE +	\
+					ARM_EL3_TZC_DRAM1_SIZE +	\
+					ARM_REALM_SIZE +		\
+					ARM_L1_GPT_SIZE))
+#define ARM_REALM_END                   (ARM_REALM_BASE + ARM_REALM_SIZE - 1U)
+#endif /* ENABLE_RME */
+
+#define ARM_EL3_TZC_DRAM1_BASE		(ARM_SCP_TZC_DRAM1_BASE -	\
+					ARM_EL3_TZC_DRAM1_SIZE)
 #define ARM_EL3_TZC_DRAM1_END		(ARM_EL3_TZC_DRAM1_BASE +	\
 					ARM_EL3_TZC_DRAM1_SIZE - 1U)
 
 #define ARM_AP_TZC_DRAM1_BASE		(ARM_DRAM1_BASE +		\
-					 ARM_DRAM1_SIZE -		\
-					 ARM_TZC_DRAM1_SIZE)
+					ARM_DRAM1_SIZE -		\
+					ARM_TZC_DRAM1_SIZE)
 #define ARM_AP_TZC_DRAM1_SIZE		(ARM_TZC_DRAM1_SIZE -		\
-					 (ARM_SCP_TZC_DRAM1_SIZE +	\
-					 ARM_EL3_TZC_DRAM1_SIZE))
+					(ARM_SCP_TZC_DRAM1_SIZE +	\
+					ARM_EL3_TZC_DRAM1_SIZE +	\
+					ARM_REALM_SIZE +		\
+					ARM_L1_GPT_SIZE))
 #define ARM_AP_TZC_DRAM1_END		(ARM_AP_TZC_DRAM1_BASE +	\
-					 ARM_AP_TZC_DRAM1_SIZE - 1U)
+					ARM_AP_TZC_DRAM1_SIZE - 1U)
 
 /* Define the Access permissions for Secure peripherals to NS_DRAM */
 #if ARM_CRYPTOCELL_INTEG
@@ -149,8 +199,12 @@
 					 ARM_TZC_DRAM1_SIZE)
 #define ARM_NS_DRAM1_END		(ARM_NS_DRAM1_BASE +		\
 					 ARM_NS_DRAM1_SIZE - 1U)
-
+#ifdef PLAT_ARM_DRAM1_BASE
+#define ARM_DRAM1_BASE			PLAT_ARM_DRAM1_BASE
+#else
 #define ARM_DRAM1_BASE			ULL(0x80000000)
+#endif /* PLAT_ARM_DRAM1_BASE */
+
 #define ARM_DRAM1_SIZE			ULL(0x80000000)
 #define ARM_DRAM1_END			(ARM_DRAM1_BASE +		\
 					 ARM_DRAM1_SIZE - 1U)
@@ -198,45 +252,58 @@
 	INTR_PROP_DESC(ARM_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, (grp), \
 			GIC_INTR_CFG_EDGE)
 
-#define ARM_MAP_SHARED_RAM		MAP_REGION_FLAT(		\
-						ARM_SHARED_RAM_BASE,	\
-						ARM_SHARED_RAM_SIZE,	\
-						MT_DEVICE | MT_RW | MT_SECURE)
+#define ARM_MAP_SHARED_RAM	MAP_REGION_FLAT(			\
+					ARM_SHARED_RAM_BASE,		\
+					ARM_SHARED_RAM_SIZE,		\
+					MT_DEVICE | MT_RW | EL3_PAS)
 
-#define ARM_MAP_NS_DRAM1		MAP_REGION_FLAT(		\
-						ARM_NS_DRAM1_BASE,	\
-						ARM_NS_DRAM1_SIZE,	\
-						MT_MEMORY | MT_RW | MT_NS)
+#define ARM_MAP_NS_DRAM1	MAP_REGION_FLAT(			\
+					ARM_NS_DRAM1_BASE,		\
+					ARM_NS_DRAM1_SIZE,		\
+					MT_MEMORY | MT_RW | MT_NS)
 
-#define ARM_MAP_DRAM2			MAP_REGION_FLAT(		\
-						ARM_DRAM2_BASE,		\
-						ARM_DRAM2_SIZE,		\
-						MT_MEMORY | MT_RW | MT_NS)
+#define ARM_MAP_DRAM2		MAP_REGION_FLAT(			\
+					ARM_DRAM2_BASE,			\
+					ARM_DRAM2_SIZE,			\
+					MT_MEMORY | MT_RW | MT_NS)
 
-#define ARM_MAP_TSP_SEC_MEM		MAP_REGION_FLAT(		\
-						TSP_SEC_MEM_BASE,	\
-						TSP_SEC_MEM_SIZE,	\
-						MT_MEMORY | MT_RW | MT_SECURE)
+#define ARM_MAP_TSP_SEC_MEM	MAP_REGION_FLAT(			\
+					TSP_SEC_MEM_BASE,		\
+					TSP_SEC_MEM_SIZE,		\
+					MT_MEMORY | MT_RW | MT_SECURE)
 
 #if ARM_BL31_IN_DRAM
-#define ARM_MAP_BL31_SEC_DRAM		MAP_REGION_FLAT(		\
-						BL31_BASE,		\
-						PLAT_ARM_MAX_BL31_SIZE,	\
-						MT_MEMORY | MT_RW | MT_SECURE)
+#define ARM_MAP_BL31_SEC_DRAM	MAP_REGION_FLAT(			\
+					BL31_BASE,			\
+					PLAT_ARM_MAX_BL31_SIZE,		\
+					MT_MEMORY | MT_RW | MT_SECURE)
 #endif
 
-#define ARM_MAP_EL3_TZC_DRAM		MAP_REGION_FLAT(			\
-						ARM_EL3_TZC_DRAM1_BASE,	\
-						ARM_EL3_TZC_DRAM1_SIZE,	\
-						MT_MEMORY | MT_RW | MT_SECURE)
+#define ARM_MAP_EL3_TZC_DRAM	MAP_REGION_FLAT(			\
+					ARM_EL3_TZC_DRAM1_BASE,		\
+					ARM_EL3_TZC_DRAM1_SIZE,		\
+					MT_MEMORY | MT_RW | EL3_PAS)
 
 #if defined(SPD_spmd)
-#define ARM_MAP_TRUSTED_DRAM		MAP_REGION_FLAT(		    \
-						PLAT_ARM_TRUSTED_DRAM_BASE, \
-						PLAT_ARM_TRUSTED_DRAM_SIZE, \
-						MT_MEMORY | MT_RW | MT_SECURE)
+#define ARM_MAP_TRUSTED_DRAM	MAP_REGION_FLAT(			\
+					PLAT_ARM_TRUSTED_DRAM_BASE,	\
+					PLAT_ARM_TRUSTED_DRAM_SIZE,	\
+					MT_MEMORY | MT_RW | MT_SECURE)
 #endif
 
+#if ENABLE_RME
+#define ARM_MAP_RMM_DRAM	MAP_REGION_FLAT(			\
+					PLAT_ARM_RMM_BASE,		\
+					PLAT_ARM_RMM_SIZE,		\
+					MT_MEMORY | MT_RW | MT_REALM)
+
+
+#define ARM_MAP_GPT_L1_DRAM	MAP_REGION_FLAT(			\
+					ARM_L1_GPT_ADDR_BASE,		\
+					ARM_L1_GPT_SIZE,		\
+					MT_MEMORY | MT_RW | EL3_PAS)
+
+#endif /* ENABLE_RME */
 
 /*
  * Mapping for the BL1 RW region. This mapping is needed by BL2 in order to
@@ -247,7 +314,7 @@
 #define ARM_MAP_BL1_RW		MAP_REGION_FLAT(	\
 					BL1_RW_BASE,	\
 					BL1_RW_LIMIT - BL1_RW_BASE, \
-					MT_MEMORY | MT_RW | MT_SECURE)
+					MT_MEMORY | MT_RW | EL3_PAS)
 
 /*
  * If SEPARATE_CODE_AND_RODATA=1 we define a region for each section
@@ -257,35 +324,35 @@
 #define ARM_MAP_BL_RO			MAP_REGION_FLAT(			\
 						BL_CODE_BASE,			\
 						BL_CODE_END - BL_CODE_BASE,	\
-						MT_CODE | MT_SECURE),		\
+						MT_CODE | EL3_PAS),		\
 					MAP_REGION_FLAT(			\
 						BL_RO_DATA_BASE,		\
 						BL_RO_DATA_END			\
 							- BL_RO_DATA_BASE,	\
-						MT_RO_DATA | MT_SECURE)
+						MT_RO_DATA | EL3_PAS)
 #else
 #define ARM_MAP_BL_RO			MAP_REGION_FLAT(			\
 						BL_CODE_BASE,			\
 						BL_CODE_END - BL_CODE_BASE,	\
-						MT_CODE | MT_SECURE)
+						MT_CODE | EL3_PAS)
 #endif
 #if USE_COHERENT_MEM
 #define ARM_MAP_BL_COHERENT_RAM		MAP_REGION_FLAT(			\
 						BL_COHERENT_RAM_BASE,		\
 						BL_COHERENT_RAM_END		\
 							- BL_COHERENT_RAM_BASE, \
-						MT_DEVICE | MT_RW | MT_SECURE)
+						MT_DEVICE | MT_RW | EL3_PAS)
 #endif
 #if USE_ROMLIB
 #define ARM_MAP_ROMLIB_CODE		MAP_REGION_FLAT(			\
 						ROMLIB_RO_BASE,			\
 						ROMLIB_RO_LIMIT	- ROMLIB_RO_BASE,\
-						MT_CODE | MT_SECURE)
+						MT_CODE | EL3_PAS)
 
 #define ARM_MAP_ROMLIB_DATA		MAP_REGION_FLAT(			\
 						ROMLIB_RW_BASE,			\
 						ROMLIB_RW_END	- ROMLIB_RW_BASE,\
-						MT_MEMORY | MT_RW | MT_SECURE)
+						MT_MEMORY | MT_RW | EL3_PAS)
 #endif
 
 /*
@@ -300,7 +367,15 @@
 #define ARM_MAP_BL_CONFIG_REGION	MAP_REGION_FLAT(ARM_BL_RAM_BASE,	\
 						(ARM_FW_CONFIGS_LIMIT		\
 							- ARM_BL_RAM_BASE),	\
-						MT_MEMORY | MT_RW | MT_SECURE)
+						MT_MEMORY | MT_RW | EL3_PAS)
+/*
+ * Map L0_GPT with read and write permissions
+ */
+#if ENABLE_RME
+#define ARM_MAP_L0_GPT_REGION		MAP_REGION_FLAT(ARM_L0_GPT_ADDR_BASE,	\
+						ARM_L0_GPT_SIZE,		\
+						MT_MEMORY | MT_RW | MT_ROOT)
+#endif
 
 /*
  * The max number of regions like RO(code), coherent and data required by
@@ -312,16 +387,44 @@
 					 ARM_BL_REGIONS)
 
 /* Memory mapped Generic timer interfaces  */
+#ifdef PLAT_ARM_SYS_CNTCTL_BASE
+#define ARM_SYS_CNTCTL_BASE		PLAT_ARM_SYS_CNTCTL_BASE
+#else
 #define ARM_SYS_CNTCTL_BASE		UL(0x2a430000)
+#endif
+
+#ifdef PLAT_ARM_SYS_CNTREAD_BASE
+#define ARM_SYS_CNTREAD_BASE		PLAT_ARM_SYS_CNTREAD_BASE
+#else
 #define ARM_SYS_CNTREAD_BASE		UL(0x2a800000)
+#endif
+
+#ifdef PLAT_ARM_SYS_TIMCTL_BASE
+#define ARM_SYS_TIMCTL_BASE		PLAT_ARM_SYS_TIMCTL_BASE
+#else
 #define ARM_SYS_TIMCTL_BASE		UL(0x2a810000)
+#endif
+
+#ifdef PLAT_ARM_SYS_CNT_BASE_S
+#define ARM_SYS_CNT_BASE_S		PLAT_ARM_SYS_CNT_BASE_S
+#else
 #define ARM_SYS_CNT_BASE_S		UL(0x2a820000)
+#endif
+
+#ifdef PLAT_ARM_SYS_CNT_BASE_NS
+#define ARM_SYS_CNT_BASE_NS		PLAT_ARM_SYS_CNT_BASE_NS
+#else
 #define ARM_SYS_CNT_BASE_NS		UL(0x2a830000)
+#endif
 
 #define ARM_CONSOLE_BAUDRATE		115200
 
 /* Trusted Watchdog constants */
+#ifdef PLAT_ARM_SP805_TWDG_BASE
+#define ARM_SP805_TWDG_BASE		PLAT_ARM_SP805_TWDG_BASE
+#else
 #define ARM_SP805_TWDG_BASE		UL(0x2a490000)
+#endif
 #define ARM_SP805_TWDG_CLK_HZ		32768
 /* The TBBR document specifies a watchdog timeout of 256 seconds. SP805
  * asserts reset after two consecutive countdowns (2 x 128 = 256 sec) */
@@ -373,15 +476,32 @@
  */
 #define ARM_FW_CONFIGS_LIMIT		(ARM_BL_RAM_BASE + (PAGE_SIZE * 2))
 
+#if ENABLE_RME
+/*
+ * Store the L0 GPT on Trusted SRAM next to firmware
+ * configuration memory, 4KB aligned.
+ */
+#define ARM_L0_GPT_SIZE			(PAGE_SIZE)
+#define ARM_L0_GPT_ADDR_BASE		(ARM_FW_CONFIGS_LIMIT)
+#define ARM_L0_GPT_LIMIT		(ARM_L0_GPT_ADDR_BASE + ARM_L0_GPT_SIZE)
+#else
+#define ARM_L0_GPT_SIZE			U(0)
+#endif
+
 /*******************************************************************************
  * BL1 specific defines.
  * BL1 RW data is relocated from ROM to RAM at runtime so we need 2 sets of
  * addresses.
  ******************************************************************************/
 #define BL1_RO_BASE			PLAT_ARM_TRUSTED_ROM_BASE
+#ifdef PLAT_BL1_RO_LIMIT
+#define BL1_RO_LIMIT			PLAT_BL1_RO_LIMIT
+#else
 #define BL1_RO_LIMIT			(PLAT_ARM_TRUSTED_ROM_BASE	\
 					 + (PLAT_ARM_TRUSTED_ROM_SIZE - \
 					    PLAT_ARM_MAX_ROMLIB_RO_SIZE))
+#endif
+
 /*
  * Put BL1 RW at the top of the Trusted SRAM.
  */
@@ -460,6 +580,14 @@
 #endif
 #endif
 
+/******************************************************************************
+ * RMM specific defines
+ *****************************************************************************/
+#if ENABLE_RME
+#define RMM_BASE			(ARM_REALM_BASE)
+#define RMM_LIMIT			(RMM_BASE + ARM_REALM_SIZE)
+#endif
+
 #if !defined(__aarch64__) || JUNO_AARCH32_EL3_RUNTIME
 /*******************************************************************************
  * BL32 specific defines for EL3 runtime in AArch32 mode
diff --git a/include/plat/arm/common/arm_pas_def.h b/include/plat/arm/common/arm_pas_def.h
new file mode 100644
index 0000000..4fee41b
--- /dev/null
+++ b/include/plat/arm/common/arm_pas_def.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef ARM_PAS_DEF_H
+#define ARM_PAS_DEF_H
+
+#include <lib/gpt_rme/gpt_rme.h>
+#include <plat/arm/common/arm_def.h>
+
+/*****************************************************************************
+ * PAS regions used to initialize the Granule Protection Table (GPT)
+ ****************************************************************************/
+
+/*
+ * The PA space is initially mapped in the GPT as follows:
+ *
+ * ============================================================================
+ * Base Addr| Size        |L? GPT|PAS   |Content                 |Comment
+ * ============================================================================
+ * 0GB      | 1GB         |L0 GPT|ANY   |TBROM (EL3 code)        |Fixed mapping
+ *          |             |      |      |TSRAM (EL3 data)        |
+ *          |             |      |      |IO (incl.UARTs & GIC)   |
+ * ----------------------------------------------------------------------------
+ * 1GB      | 1GB         |L0 GPT|ANY   |IO                      |Fixed mapping
+ * ----------------------------------------------------------------------------
+ * 2GB      | 1GB         |L1 GPT|NS    |DRAM (NS Kernel)        |Use T.Descrip
+ * ----------------------------------------------------------------------------
+ * 3GB      |1GB-64MB     |L1 GPT|NS    |DRAM (NS Kernel)        |Use T.Descrip
+ * ----------------------------------------------------------------------------
+ * 4GB-64MB |64MB-32MB    |      |      |                        |
+ *          | -4MB        |L1 GPT|SECURE|DRAM TZC                |Use T.Descrip
+ * ----------------------------------------------------------------------------
+ * 4GB-32MB |             |      |      |                        |
+ * -3MB-1MB |32MB         |L1 GPT|REALM |RMM                     |Use T.Descrip
+ * ----------------------------------------------------------------------------
+ * 4GB-3MB  |             |      |      |                        |
+ * -1MB     |3MB          |L1 GPT|ROOT  |EL3 DRAM data           |Use T.Descrip
+ * ----------------------------------------------------------------------------
+ * 4GB-1MB  |1MB          |L1 GPT|ROOT  |DRAM (L1 GPTs, SCP TZC) |Fixed mapping
+ * ============================================================================
+ *
+ * - 4KB of L0 GPT reside in TSRAM, on top of the CONFIG section.
+ * - ~1MB of L1 GPTs reside at the top of DRAM1 (TZC area).
+ * - The first 1GB region has GPT_GPI_ANY and, therefore, is not protected by
+ *   the GPT.
+ * - The DRAM TZC area is split into three regions: the L1 GPT region and
+ *   3MB of region below that are defined as GPT_GPI_ROOT, 32MB Realm region
+ *   below that is defined as GPT_GPI_REALM and the rest of it is defined as
+ *   GPT_GPI_SECURE.
+ */
+
+/* TODO: This might not be the best way to map the PAS */
+
+/* Device memory 0 to 2GB */
+#define ARM_PAS_1_BASE			(U(0))
+#define ARM_PAS_1_SIZE			((ULL(1)<<31)) /* 2GB */
+
+/* NS memory 2GB to (end - 64MB) */
+#define ARM_PAS_2_BASE			(ARM_PAS_1_BASE + ARM_PAS_1_SIZE)
+#define ARM_PAS_2_SIZE			(ARM_NS_DRAM1_SIZE)
+
+/* Secure TZC region */
+#define ARM_PAS_3_BASE			(ARM_AP_TZC_DRAM1_BASE)
+#define ARM_PAS_3_SIZE			(ARM_AP_TZC_DRAM1_SIZE)
+
+#define ARM_PAS_GPI_ANY			MAP_GPT_REGION(ARM_PAS_1_BASE, \
+						       ARM_PAS_1_SIZE, \
+						       GPT_GPI_ANY)
+#define	ARM_PAS_KERNEL			GPT_MAP_REGION_GRANULE(ARM_PAS_2_BASE, \
+							       ARM_PAS_2_SIZE, \
+							       GPT_GPI_NS)
+
+#define ARM_PAS_SECURE			GPT_MAP_REGION_GRANULE(ARM_PAS_3_BASE, \
+							       ARM_PAS_3_SIZE, \
+							       GPT_GPI_SECURE)
+
+#define ARM_PAS_REALM			GPT_MAP_REGION_GRANULE(ARM_REALM_BASE, \
+							       ARM_REALM_SIZE, \
+							       GPT_GPI_REALM)
+
+#define ARM_PAS_EL3_DRAM		GPT_MAP_REGION_GRANULE(ARM_EL3_TZC_DRAM1_BASE, \
+							       ARM_EL3_TZC_DRAM1_SIZE, \
+							       GPT_GPI_ROOT)
+
+#define	ARM_PAS_GPTS			GPT_MAP_REGION_GRANULE(ARM_L1_GPT_ADDR_BASE, \
+							       ARM_L1_GPT_SIZE, \
+							       GPT_GPI_ROOT)
+
+/* GPT Configuration options */
+#define PLATFORM_L0GPTSZ		GPCCR_L0GPTSZ_30BITS
+
+#endif /* ARM_PAS_DEF_H */
diff --git a/include/plat/arm/common/fconf_ethosn_getter.h b/include/plat/arm/common/fconf_ethosn_getter.h
index 0fd1f02..fcdc31f 100644
--- a/include/plat/arm/common/fconf_ethosn_getter.h
+++ b/include/plat/arm/common/fconf_ethosn_getter.h
@@ -14,7 +14,7 @@
 #define hw_config__ethosn_config_getter(prop) ethosn_config.prop
 #define hw_config__ethosn_core_addr_getter(idx) __extension__ ({	\
 	assert(idx < ethosn_config.num_cores);				\
-	ethosn_config.core_addr[idx];					\
+	ethosn_config.core[idx].addr;					\
 })
 
 #define ETHOSN_STATUS_DISABLED U(0)
@@ -22,10 +22,13 @@
 
 #define ETHOSN_CORE_NUM_MAX U(64)
 
+struct ethosn_core_t {
+	uint64_t addr;
+};
+
 struct ethosn_config_t {
-	uint8_t status;
 	uint32_t num_cores;
-	uint64_t core_addr[ETHOSN_CORE_NUM_MAX];
+	struct ethosn_core_t core[ETHOSN_CORE_NUM_MAX];
 };
 
 int fconf_populate_arm_ethosn(uintptr_t config);
diff --git a/include/plat/arm/common/plat_arm.h b/include/plat/arm/common/plat_arm.h
index 0a19d8b..1500ed3 100644
--- a/include/plat/arm/common/plat_arm.h
+++ b/include/plat/arm/common/plat_arm.h
@@ -41,7 +41,7 @@
  ******************************************************************************/
 #if SPM_MM
 #define ARM_TZC_REGIONS_DEF						\
-	{ARM_AP_TZC_DRAM1_BASE, ARM_EL3_TZC_DRAM1_END,			\
+	{ARM_AP_TZC_DRAM1_BASE, ARM_EL3_TZC_DRAM1_END + ARM_L1_GPT_SIZE,\
 		TZC_REGION_S_RDWR, 0},					\
 	{ARM_NS_DRAM1_BASE, ARM_NS_DRAM1_END, ARM_TZC_NS_DRAM_S_ACCESS, \
 		PLAT_ARM_TZC_NS_DEV_ACCESS}, 				\
@@ -51,9 +51,20 @@
 		PLAT_SP_IMAGE_NS_BUF_SIZE) - 1, TZC_REGION_S_NONE,	\
 		PLAT_ARM_TZC_NS_DEV_ACCESS}
 
+#elif ENABLE_RME
+#define ARM_TZC_REGIONS_DEF						    \
+	{ARM_AP_TZC_DRAM1_BASE, ARM_AP_TZC_DRAM1_END, TZC_REGION_S_RDWR, 0},\
+	{ARM_EL3_TZC_DRAM1_BASE, ARM_L1_GPT_END, TZC_REGION_S_RDWR, 0},	    \
+	{ARM_NS_DRAM1_BASE, ARM_NS_DRAM1_END, ARM_TZC_NS_DRAM_S_ACCESS,	    \
+		PLAT_ARM_TZC_NS_DEV_ACCESS},				    \
+	{ARM_REALM_BASE, ARM_REALM_END, ARM_TZC_NS_DRAM_S_ACCESS,	    \
+		PLAT_ARM_TZC_NS_DEV_ACCESS},				    \
+	{ARM_DRAM2_BASE, ARM_DRAM2_END, ARM_TZC_NS_DRAM_S_ACCESS,	    \
+		PLAT_ARM_TZC_NS_DEV_ACCESS}
+
 #else
 #define ARM_TZC_REGIONS_DEF						\
-	{ARM_AP_TZC_DRAM1_BASE, ARM_EL3_TZC_DRAM1_END,			\
+	{ARM_AP_TZC_DRAM1_BASE, ARM_EL3_TZC_DRAM1_END + ARM_L1_GPT_SIZE,\
 		TZC_REGION_S_RDWR, 0},					\
 	{ARM_NS_DRAM1_BASE, ARM_NS_DRAM1_END, ARM_TZC_NS_DRAM_S_ACCESS, \
 		PLAT_ARM_TZC_NS_DEV_ACCESS},	 			\
diff --git a/include/services/gtsi_svc.h b/include/services/gtsi_svc.h
new file mode 100644
index 0000000..cb942ed
--- /dev/null
+++ b/include/services/gtsi_svc.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef GTSI_SVC_H
+#define GTSI_SVC_H
+
+/* GTSI error codes. */
+#define GTSI_SUCCESS			0
+#define GTSI_ERROR_NOT_SUPPORTED	-1
+#define GTSI_ERROR_INVALID_ADDRESS	-2
+#define GTSI_ERROR_INVALID_PAS		-3
+
+/* The macros below are used to identify GTSI calls from the SMC function ID */
+#define GTSI_FNUM_MIN_VALUE	U(0x100)
+#define GTSI_FNUM_MAX_VALUE	U(0x101)
+#define is_gtsi_fid(fid) __extension__ ({		\
+	__typeof__(fid) _fid = (fid);			\
+	((GET_SMC_NUM(_fid) >= GTSI_FNUM_MIN_VALUE) &&	\
+	 (GET_SMC_NUM(_fid) <= GTSI_FNUM_MAX_VALUE)); })
+
+/* Get GTSI fastcall std FID from function number */
+#define GTSI_FID(smc_cc, func_num)			\
+	((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT)	|	\
+	 ((smc_cc) << FUNCID_CC_SHIFT)		|	\
+	 (OEN_STD_START << FUNCID_OEN_SHIFT)	|	\
+	 ((func_num) << FUNCID_NUM_SHIFT))
+
+#define GRAN_TRANS_TO_REALM_FNUM	U(0x100)
+#define GRAN_TRANS_TO_NS_FNUM		U(0x101)
+
+#define SMC_ASC_MARK_REALM	GTSI_FID(SMC_64, GRAN_TRANS_TO_REALM_FNUM)
+#define SMC_ASC_MARK_NONSECURE	GTSI_FID(SMC_64, GRAN_TRANS_TO_NS_FNUM)
+
+#define GRAN_TRANS_RET_BAD_ADDR		-2
+#define GRAN_TRANS_RET_BAD_PAS		-3
+
+#endif /* GTSI_SVC_H */
diff --git a/include/services/rmi_svc.h b/include/services/rmi_svc.h
new file mode 100644
index 0000000..9106f08
--- /dev/null
+++ b/include/services/rmi_svc.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RMI_SVC_H
+#define RMI_SVC_H
+
+#include <lib/smccc.h>
+#include <lib/utils_def.h>
+
+/* RMI error codes. */
+#define RMI_SUCCESS			0
+#define RMI_ERROR_NOT_SUPPORTED		-1
+#define RMI_ERROR_INVALID_ADDRESS	-2
+#define RMI_ERROR_INVALID_PAS		-3
+
+/* The macros below are used to identify RMI calls from the SMC function ID */
+#define RMI_FNUM_MIN_VALUE	U(0x00)
+#define RMI_FNUM_MAX_VALUE	U(0x20)
+#define is_rmi_fid(fid) __extension__ ({		\
+	__typeof__(fid) _fid = (fid);			\
+	((GET_SMC_NUM(_fid) >= RMI_FNUM_MIN_VALUE) &&	\
+	 (GET_SMC_NUM(_fid) <= RMI_FNUM_MAX_VALUE) &&	\
+	 (GET_SMC_TYPE(_fid) == SMC_TYPE_FAST)	   &&	\
+	 (GET_SMC_CC(_fid) == SMC_64)              &&	\
+	 (GET_SMC_OEN(_fid) == OEN_ARM_START)      &&	\
+	 ((_fid & 0x00FE0000) == 0U)); })
+
+/* Get RMI fastcall std FID from function number */
+#define RMI_FID(smc_cc, func_num)			\
+	((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT)	|	\
+	((smc_cc) << FUNCID_CC_SHIFT)		|	\
+	(OEN_ARM_START << FUNCID_OEN_SHIFT)	|	\
+	((func_num) << FUNCID_NUM_SHIFT))
+
+/*
+ * SMC_RMM_INIT_COMPLETE is the only function in the RMI that originates from
+ * the Realm world and is handled by the RMMD. The remaining functions are
+ * always invoked by the Normal world, forwarded by RMMD and handled by the
+ * RMM
+ */
+#define RMI_FNUM_REQ_COMPLETE		U(0x10)
+#define RMI_FNUM_VERSION_REQ		U(0x00)
+
+#define RMI_FNUM_GRAN_NS_REALM		U(0x01)
+#define RMI_FNUM_GRAN_REALM_NS		U(0x10)
+
+/* RMI SMC64 FIDs handled by the RMMD */
+#define RMI_RMM_REQ_COMPLETE		RMI_FID(SMC_64, RMI_FNUM_REQ_COMPLETE)
+#define RMI_RMM_REQ_VERSION		RMI_FID(SMC_64, RMI_FNUM_VERSION_REQ)
+
+#define RMI_RMM_GRANULE_DELEGATE	RMI_FID(SMC_64, RMI_FNUM_GRAN_NS_REALM)
+#define RMI_RMM_GRANULE_UNDELEGATE	RMI_FID(SMC_64, RMI_FNUM_GRAN_REALM_NS)
+
+
+#define RMI_ABI_VERSION_GET_MAJOR(_version) ((_version) >> 16)
+#define RMI_ABI_VERSION_GET_MINOR(_version) ((_version) & 0xFFFF)
+
+/* Reserve a special value for MBZ parameters. */
+#define RMI_PARAM_MBZ			U(0x0)
+
+#endif /* RMI_SVC_H */
diff --git a/include/services/rmmd_svc.h b/include/services/rmmd_svc.h
new file mode 100644
index 0000000..132973b
--- /dev/null
+++ b/include/services/rmmd_svc.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RMMD_SVC_H
+#define RMMD_SVC_H
+
+#ifndef __ASSEMBLER__
+#include <stdint.h>
+
+int rmmd_setup(void);
+uint64_t rmmd_rmi_handler(uint32_t smc_fid,
+		uint64_t x1,
+		uint64_t x2,
+		uint64_t x3,
+		uint64_t x4,
+		void *cookie,
+		void *handle,
+		uint64_t flags);
+
+uint64_t rmmd_gtsi_handler(uint32_t smc_fid,
+		uint64_t x1,
+		uint64_t x2,
+		uint64_t x3,
+		uint64_t x4,
+		void *cookie,
+		void *handle,
+		uint64_t flags);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* RMMD_SVC_H */
diff --git a/include/services/trp/platform_trp.h b/include/services/trp/platform_trp.h
new file mode 100644
index 0000000..b34da85
--- /dev/null
+++ b/include/services/trp/platform_trp.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_TRP_H
+#define PLATFORM_TRP_H
+
+/*******************************************************************************
+ * Mandatory TRP functions (only if platform contains a TRP)
+ ******************************************************************************/
+void trp_early_platform_setup(void);
+
+#endif /* PLATFORM_TRP_H */
diff --git a/include/tools_share/firmware_image_package.h b/include/tools_share/firmware_image_package.h
index dc65cc6..bd5b14b 100644
--- a/include/tools_share/firmware_image_package.h
+++ b/include/tools_share/firmware_image_package.h
@@ -38,6 +38,8 @@
 	{{0x8e,  0xa8, 0x7b, 0xb1}, {0xcf, 0xa2}, {0x3f, 0x4d}, 0x85, 0xfd, {0xe7, 0xbb, 0xa5, 0x02, 0x20, 0xd9} }
 #define UUID_NON_TRUSTED_FIRMWARE_BL33 \
 	{{0xd6,  0xd0, 0xee, 0xa7}, {0xfc, 0xea}, {0xd5, 0x4b}, 0x97, 0x82, {0x99, 0x34, 0xf2, 0x34, 0xb6, 0xe4} }
+#define UUID_REALM_MONITOR_MGMT_FIRMWARE \
+	{{0x6c,  0x07, 0x62, 0xa6}, {0x12, 0xf2}, {0x4b, 0x56}, 0x92, 0xcb, {0xba, 0x8f, 0x63, 0x36, 0x06, 0xd9} }
 /* Key certificates */
 #define UUID_ROT_KEY_CERT \
 	{{0x86,  0x2d, 0x1d, 0x72}, {0xf8, 0x60}, {0xe4, 0x11}, 0x92, 0x0b, {0x8b, 0xe7, 0x62, 0x16, 0x0f, 0x24} }
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
index b6f6c9d..6e4d1fc 100644
--- a/lib/aarch64/misc_helpers.S
+++ b/lib/aarch64/misc_helpers.S
@@ -15,6 +15,7 @@
 	.globl	zero_normalmem
 	.globl	zeromem
 	.globl	memcpy16
+	.globl	gpt_tlbi_by_pa
 
 	.globl	disable_mmu_el1
 	.globl	disable_mmu_el3
@@ -162,7 +163,8 @@
 	 * Check for M bit (MMU enabled) of the current SCTLR_EL(1|3)
 	 * register value and panic if the MMU is disabled.
 	 */
-#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
+#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && \
+	(BL2_AT_EL3 || ENABLE_RME))
 	mrs	tmp1, sctlr_el3
 #else
 	mrs	tmp1, sctlr_el1
@@ -592,3 +594,20 @@
 	b.lo	1b
 	ret
 endfunc fixup_gdt_reloc
+
+/*
+ * TODO: Currently only supports size of 4KB,
+ * support other sizes as well.
+ */
+func gpt_tlbi_by_pa
+#if ENABLE_ASSERTIONS
+	cmp	x1, #PAGE_SIZE_4KB
+	ASM_ASSERT(eq)
+	tst	x0, #(PAGE_SIZE_MASK)
+	ASM_ASSERT(eq)
+#endif
+	lsr	x0, x0, #FOUR_KB_SHIFT	/* 4KB size encoding is zero */
+	sys	#6, c8, c4, #3, x0 	/* TLBI RPAOS, <Xt> */
+	dsb	sy
+	ret
+endfunc gpt_tlbi_by_pa
diff --git a/lib/cpus/aarch64/cortex_a710.S b/lib/cpus/aarch64/cortex_a710.S
index 75b7647..7d7fbd8 100644
--- a/lib/cpus/aarch64/cortex_a710.S
+++ b/lib/cpus/aarch64/cortex_a710.S
@@ -160,6 +160,62 @@
 	b       cpu_rev_var_ls
 endfunc check_errata_2017096
 
+
+/* ---------------------------------------------------------------------
+ * Errata Workaround for Cortex-A710 Erratum 2083908.
+ * This applies to revision r2p0 of Cortex-A710 and is still open.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------------------------
+ */
+func errata_a710_2083908_wa
+	/* Compare x0 against revision r2p0 */
+	mov	x17, x30
+	bl	check_errata_2083908
+	cbz	x0, 1f
+	mrs	x1, CORTEX_A710_CPUACTLR5_EL1
+	orr	x1, x1, CORTEX_A710_CPUACTLR5_EL1_BIT_13
+	msr	CORTEX_A710_CPUACTLR5_EL1, x1
+1:
+	ret	x17
+endfunc errata_a710_2083908_wa
+
+func check_errata_2083908
+	/* Applies to r2p0 */
+	mov	x1, #CPU_REV(2, 0)
+	mov	x2, #CPU_REV(2, 0)
+	b	cpu_rev_var_range
+endfunc check_errata_2083908
+
+/* ---------------------------------------------------------------------
+ * Errata Workaround for Cortex-A710 Erratum 2058056.
+ * This applies to revisions r0p0, r1p0 and r2p0 of Cortex-A710 and is still
+ * open.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------------------------
+ */
+func errata_a710_2058056_wa
+	/* Compare x0 against revision r2p0 */
+	mov	x17, x30
+	bl	check_errata_2058056
+	cbz	x0, 1f
+	mrs	x1, CORTEX_A710_CPUECTLR2_EL1
+	mov	x0, #CORTEX_A710_CPUECTLR2_EL1_PF_MODE_CNSRV
+	bfi	x1, x0, #CPUECTLR2_EL1_PF_MODE_LSB, #CPUECTLR2_EL1_PF_MODE_WIDTH
+	msr	CORTEX_A710_CPUECTLR2_EL1, x1
+1:
+	ret	x17
+endfunc errata_a710_2058056_wa
+
+func check_errata_2058056
+	/* Applies to r0p0, r1p0 and r2p0 */
+	mov	x1, #0x20
+	b	cpu_rev_var_ls
+endfunc check_errata_2058056
+
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ----------------------------------------------------
@@ -194,6 +250,8 @@
 	report_errata ERRATA_A710_2081180, cortex_a710, 2081180
 	report_errata ERRATA_A710_2055002, cortex_a710, 2055002
 	report_errata ERRATA_A710_2017096, cortex_a710, 2017096
+	report_errata ERRATA_A710_2083908, cortex_a710, 2083908
+	report_errata ERRATA_A710_2058056, cortex_a710, 2058056
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -225,8 +283,18 @@
 #endif
 
 #if ERRATA_A710_2017096
-	mov     x0, x18
-	bl      errata_a710_2017096_wa
+	mov	x0, x18
+	bl	errata_a710_2017096_wa
+#endif
+
+#if ERRATA_A710_2083908
+	mov	x0, x18
+	bl	errata_a710_2083908_wa
+#endif
+
+#if ERRATA_A710_2058056
+	mov	x0, x18
+	bl	errata_a710_2058056_wa
 #endif
 	isb
 	ret	x19
diff --git a/lib/cpus/aarch64/cortex_a78.S b/lib/cpus/aarch64/cortex_a78.S
index 3a74571..4e8a228 100644
--- a/lib/cpus/aarch64/cortex_a78.S
+++ b/lib/cpus/aarch64/cortex_a78.S
@@ -198,6 +198,35 @@
 	b	cpu_rev_var_ls
 endfunc check_errata_1952683
 
+/* --------------------------------------------------
+ * Errata Workaround for Cortex A78 Errata 2132060.
+ * This applies to revisions r0p0, r1p0, r1p1, and r1p2.
+ * It is still open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * --------------------------------------------------
+ */
+func errata_a78_2132060_wa
+	/* Check revision. */
+	mov	x17, x30
+	bl	check_errata_2132060
+	cbz	x0, 1f
+
+	/* Apply the workaround. */
+	mrs	x1, CORTEX_A78_CPUECTLR_EL1
+	mov	x0, #CORTEX_A78_CPUECTLR_EL1_PF_MODE_CNSRV
+	bfi	x1, x0, #CPUECTLR_EL1_PF_MODE_LSB, #CPUECTLR_EL1_PF_MODE_WIDTH
+	msr	CORTEX_A78_CPUECTLR_EL1, x1
+1:
+	ret	x17
+endfunc errata_a78_2132060_wa
+
+func check_errata_2132060
+	/* Applies to r0p0, r0p1, r1p1, and r1p2 */
+	mov	x1, #0x12
+	b	cpu_rev_var_ls
+endfunc check_errata_2132060
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A78
 	 * -------------------------------------------------
@@ -232,6 +261,11 @@
 	bl	errata_a78_1952683_wa
 #endif
 
+#if ERRATA_A78_2132060
+	mov	x0, x18
+	bl	errata_a78_2132060_wa
+#endif
+
 #if ENABLE_AMU
 	/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
 	mrs	x0, actlr_el3
@@ -291,6 +325,7 @@
 	report_errata ERRATA_A78_1951500, cortex_a78, 1951500
 	report_errata ERRATA_A78_1821534, cortex_a78, 1821534
 	report_errata ERRATA_A78_1952683, cortex_a78, 1952683
+	report_errata ERRATA_A78_2132060, cortex_a78, 2132060
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/cortex_hayes.S b/lib/cpus/aarch64/cortex_hayes.S
new file mode 100644
index 0000000..445a691
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_hayes.S
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_hayes.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex Hayes must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex Hayes supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+	/* ----------------------------------------------------
+	 * HW will do the cache maintenance while powering down
+	 * ----------------------------------------------------
+	 */
+func cortex_hayes_core_pwr_dwn
+	/* ---------------------------------------------------
+	 * Enable CPU power down bit in power control register
+	 * ---------------------------------------------------
+	 */
+	mrs	x0, CORTEX_HAYES_CPUPWRCTLR_EL1
+	orr	x0, x0, #CORTEX_HAYES_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+	msr	CORTEX_HAYES_CPUPWRCTLR_EL1, x0
+	isb
+	ret
+endfunc cortex_hayes_core_pwr_dwn
+
+	/*
+	 * Errata printing function for Cortex Hayes. Must follow AAPCS.
+	 */
+#if REPORT_ERRATA
+func cortex_hayes_errata_report
+	ret
+endfunc cortex_hayes_errata_report
+#endif
+
+func cortex_hayes_reset_func
+	/* Disable speculative loads */
+	msr	SSBS, xzr
+	isb
+	ret
+endfunc cortex_hayes_reset_func
+
+	/* ---------------------------------------------
+	 * This function provides Cortex Hayes specific
+	 * register information for crash reporting.
+	 * It needs to return with x6 pointing to
+	 * a list of register names in ascii and
+	 * x8 - x15 having values of registers to be
+	 * reported.
+	 * ---------------------------------------------
+	 */
+.section .rodata.cortex_hayes_regs, "aS"
+cortex_hayes_regs:  /* The ascii list of register names to be reported */
+	.asciz	"cpuectlr_el1", ""
+
+func cortex_hayes_cpu_reg_dump
+	adr	x6, cortex_hayes_regs
+	mrs	x8, CORTEX_HAYES_CPUECTLR_EL1
+	ret
+endfunc cortex_hayes_cpu_reg_dump
+
+declare_cpu_ops cortex_hayes, CORTEX_HAYES_MIDR, \
+	cortex_hayes_reset_func, \
+	cortex_hayes_core_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S
index 9e7bbf7..330cc59 100644
--- a/lib/cpus/aarch64/neoverse_n2.S
+++ b/lib/cpus/aarch64/neoverse_n2.S
@@ -183,6 +183,35 @@
 	b	cpu_rev_var_ls
 endfunc check_errata_2138956
 
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N2 Erratum 2138953.
+ * This applies to revision r0p0 of Neoverse N2. it is still open.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * --------------------------------------------------
+ */
+func errata_n2_2138953_wa
+	/* Check revision. */
+	mov	x17, x30
+	bl	check_errata_2138953
+	cbz	x0, 1f
+
+	/* Apply instruction patching sequence */
+	mrs	x1, NEOVERSE_N2_CPUECTLR2_EL1
+	mov	x0, #NEOVERSE_N2_CPUECTLR2_EL1_PF_MODE_CNSRV
+	bfi	x1, x0, #CPUECTLR2_EL1_PF_MODE_LSB, #CPUECTLR2_EL1_PF_MODE_WIDTH
+	msr	NEOVERSE_N2_CPUECTLR2_EL1, x1
+1:
+	ret	x17
+endfunc errata_n2_2138953_wa
+
+func check_errata_2138953
+	/* Applies to r0p0 */
+	mov	x1, #0x00
+	b	cpu_rev_var_ls
+endfunc check_errata_2138953
+
 	/* -------------------------------------------
 	 * The CPU Ops reset function for Neoverse N2.
 	 * -------------------------------------------
@@ -224,6 +253,11 @@
 	bl	errata_n2_2138956_wa
 #endif
 
+#if ERRATA_N2_2138953
+	mov	x0, x18
+	bl	errata_n2_2138953_wa
+#endif
+
 #if ENABLE_AMU
 	/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
 	mrs	x0, cptr_el3
@@ -287,8 +321,9 @@
 	report_errata ERRATA_N2_2002655, neoverse_n2, 2002655
 	report_errata ERRATA_N2_2067956, neoverse_n2, 2067956
 	report_errata ERRATA_N2_2025414, neoverse_n2, 2025414
-        report_errata ERRATA_N2_2189731, neoverse_n2, 2189731
+	report_errata ERRATA_N2_2189731, neoverse_n2, 2189731
 	report_errata ERRATA_N2_2138956, neoverse_n2, 2138956
+	report_errata ERRATA_N2_2138953, neoverse_n2, 2138953
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/neoverse_v1.S b/lib/cpus/aarch64/neoverse_v1.S
index 0bcf52a..200f67d 100644
--- a/lib/cpus/aarch64/neoverse_v1.S
+++ b/lib/cpus/aarch64/neoverse_v1.S
@@ -259,6 +259,35 @@
 	b	cpu_rev_var_ls
 endfunc check_errata_2139242
 
+	/* --------------------------------------------------
+	 * Errata Workaround for Neoverse V1 Errata #2108267.
+	 * This applies to revisions r0p0, r1p0, and r1p1, it
+	 * is still open.
+	 * x0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: x0-x1, x17
+	 * --------------------------------------------------
+	 */
+func errata_neoverse_v1_2108267_wa
+	/* Check workaround compatibility. */
+	mov	x17, x30
+	bl	check_errata_2108267
+	cbz	x0, 1f
+
+	/* Apply the workaround. */
+	mrs	x1, NEOVERSE_V1_CPUECTLR_EL1
+	mov	x0, #NEOVERSE_V1_CPUECTLR_EL1_PF_MODE_CNSRV
+	bfi	x1, x0, #CPUECTLR_EL1_PF_MODE_LSB, #CPUECTLR_EL1_PF_MODE_WIDTH
+	msr	NEOVERSE_V1_CPUECTLR_EL1, x1
+1:
+	ret	x17
+endfunc errata_neoverse_v1_2108267_wa
+
+func check_errata_2108267
+	/* Applies to r0p0, r1p0, r1p1 */
+	mov	x1, #0x11
+	b	cpu_rev_var_ls
+endfunc check_errata_2108267
+
 	/* ---------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ---------------------------------------------
@@ -296,6 +325,7 @@
 	report_errata ERRATA_V1_1940577, neoverse_v1, 1940577
 	report_errata ERRATA_V1_1966096, neoverse_v1, 1966096
 	report_errata ERRATA_V1_2139242, neoverse_v1, 2139242
+	report_errata ERRATA_V1_2108267, neoverse_v1, 2108267
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -344,6 +374,11 @@
 	bl	errata_neoverse_v1_2139242_wa
 #endif
 
+#if ERRATA_V1_2108267
+	mov	x0, x18
+	bl	errata_neoverse_v1_2108267_wa
+#endif
+
 	ret	x19
 endfunc neoverse_v1_reset_func
 
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 6103a5a..138f7a5 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -327,6 +327,10 @@
 # to revision r0p0 of the A78 cpu and was fixed in the revision r1p0.
 ERRATA_A78_1952683	?=0
 
+# Flag to apply erratum 2132060 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, r1p1, and r1p2 of the A78 cpu. It is still open.
+ERRATA_A78_2132060	?=0
+
 # Flag to apply T32 CLREX workaround during reset. This erratum applies
 # only to r0p0 and r1p0 of the Neoverse N1 cpu.
 ERRATA_N1_1043202	?=0
@@ -411,11 +415,15 @@
 # Flag to apply erratum 1966096 workaround during reset. This erratum applies
 # to revisions r1p0 and r1p1 of the Neoverse V1 CPU and is open.  This issue
 # exists in r0p0 as well but there is no workaround for that revision.
-ERRATA_V1_1966096   ?=0
+ERRATA_V1_1966096	?=0
 
 # Flag to apply erratum 2139242 workaround during reset. This erratum applies
 # to revisions r0p0, r1p0, and r1p1 of the Neoverse V1 cpu and is still open.
+ERRATA_V1_2139242	?=0
+
+# Flag to apply erratum 2108267 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, and r1p1 of the Neoverse V1 cpu and is still open.
-ERRATA_V1_2139242   ?=0
+ERRATA_V1_2108267	?=0
 
 # Flag to apply erratum 1987031 workaround during reset. This erratum applies
 # to revisions r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is still open.
@@ -425,6 +433,14 @@
 # to revisions r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is still open.
 ERRATA_A710_2081180	?=0
 
+# Flag to apply erratum 2083908 workaround during reset. This erratum applies
+# to revision r2p0 of the Cortex-A710 cpu and is still open.
+ERRATA_A710_2083908	?=0
+
+# Flag to apply erratum 2058056 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is still open.
+ERRATA_A710_2058056	?=0
+
 # Flag to apply erratum 2067956 workaround during reset. This erratum applies
 # to revision r0p0 of the Neoverse N2 cpu and is still open.
 ERRATA_N2_2067956	?=0
@@ -441,6 +457,10 @@
 # to revision r0p0 of the Neoverse N2 cpu and is still open.
 ERRATA_N2_2138956	?=0
 
+# Flag to apply erratum 2138953 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu and is still open.
+ERRATA_N2_2138953	?=0
+
 # Flag to apply erratum 2055002 workaround during reset. This erratum applies
 # to revision r1p0, r2p0 of the Cortex-A710 cpu and is still open.
 ERRATA_A710_2055002	?=0
@@ -710,6 +730,10 @@
 $(eval $(call assert_boolean,ERRATA_A78_1952683))
 $(eval $(call add_define,ERRATA_A78_1952683))
 
+# Process ERRATA_A78_2132060 flag
+$(eval $(call assert_boolean,ERRATA_A78_2132060))
+$(eval $(call add_define,ERRATA_A78_2132060))
+
 # Process ERRATA_N1_1043202 flag
 $(eval $(call assert_boolean,ERRATA_N1_1043202))
 $(eval $(call add_define,ERRATA_N1_1043202))
@@ -798,6 +822,10 @@
 $(eval $(call assert_boolean,ERRATA_V1_2139242))
 $(eval $(call add_define,ERRATA_V1_2139242))
 
+# Process ERRATA_V1_2108267 flag
+$(eval $(call assert_boolean,ERRATA_V1_2108267))
+$(eval $(call add_define,ERRATA_V1_2108267))
+
 # Process ERRATA_A710_1987031 flag
 $(eval $(call assert_boolean,ERRATA_A710_1987031))
 $(eval $(call add_define,ERRATA_A710_1987031))
@@ -806,6 +834,14 @@
 $(eval $(call assert_boolean,ERRATA_A710_2081180))
 $(eval $(call add_define,ERRATA_A710_2081180))
 
+# Process ERRATA_A710_2083908 flag
+$(eval $(call assert_boolean,ERRATA_A710_2083908))
+$(eval $(call add_define,ERRATA_A710_2083908))
+
+# Process ERRATA_A710_2058056 flag
+$(eval $(call assert_boolean,ERRATA_A710_2058056))
+$(eval $(call add_define,ERRATA_A710_2058056))
+
 # Process ERRATA_N2_2067956 flag
 $(eval $(call assert_boolean,ERRATA_N2_2067956))
 $(eval $(call add_define,ERRATA_N2_2067956))
@@ -822,6 +858,10 @@
 $(eval $(call assert_boolean,ERRATA_N2_2138956))
 $(eval $(call add_define,ERRATA_N2_2138956))
 
+# Process ERRATA_N2_2138953 flag
+$(eval $(call assert_boolean,ERRATA_N2_2138953))
+$(eval $(call add_define,ERRATA_N2_2138953))
+
 # Process ERRATA_A710_2055002 flag
 $(eval $(call assert_boolean,ERRATA_A710_2055002))
 $(eval $(call add_define,ERRATA_A710_2055002))
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index 40e7ddf..e270ad0 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -193,6 +193,11 @@
 	str	x13, [x0, #CTX_SCXTNUM_EL2]
 #endif
 
+#if ENABLE_FEAT_HCX
+	mrs	x14, hcrx_el2
+	str	x14, [x0, #CTX_HCRX_EL2]
+#endif
+
 	ret
 endfunc el2_sysregs_context_save
 
@@ -362,6 +367,11 @@
 	msr	scxtnum_el2, x13
 #endif
 
+#if ENABLE_FEAT_HCX
+	ldr	x14, [x0, #CTX_HCRX_EL2]
+	msr	hcrx_el2, x14
+#endif
+
 	ret
 endfunc el2_sysregs_context_restore
 
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index 52102dd..0ec7e7e 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -93,24 +93,49 @@
 	scr_el3 = read_scr();
 	scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT |
 			SCR_ST_BIT | SCR_HCE_BIT);
+
+#if ENABLE_RME
+	/* When RME support is enabled, clear the NSE bit as well. */
+	scr_el3 &= ~SCR_NSE_BIT;
+#endif /* ENABLE_RME */
+
 	/*
 	 * SCR_NS: Set the security state of the next EL.
 	 */
-	if (security_state != SECURE)
+	if (security_state == NON_SECURE) {
 		scr_el3 |= SCR_NS_BIT;
+	}
+
+#if ENABLE_RME
+	/* Check for realm state if RME support enabled. */
+	if (security_state == REALM) {
+		scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT | SCR_EnSCXT_BIT;
+	}
+#endif /* ENABLE_RME */
+
 	/*
 	 * SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next
 	 *  Exception level as specified by SPSR.
 	 */
-	if (GET_RW(ep->spsr) == MODE_RW_64)
+	if (GET_RW(ep->spsr) == MODE_RW_64) {
 		scr_el3 |= SCR_RW_BIT;
+	}
 	/*
 	 * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical
 	 *  Secure timer registers to EL3, from AArch64 state only, if specified
 	 *  by the entrypoint attributes.
 	 */
-	if (EP_GET_ST(ep->h.attr) != 0U)
+	if (EP_GET_ST(ep->h.attr) != 0U) {
 		scr_el3 |= SCR_ST_BIT;
+	}
+
+	/*
+	 * If FEAT_HCX is enabled, enable access to HCRX_EL2 by setting
+	 * SCR_EL3.HXEn.
+	 */
+#if ENABLE_FEAT_HCX
+	scr_el3 |= SCR_HXEn_BIT;
+#endif
 
 #if RAS_TRAP_LOWER_EL_ERR_ACCESS
 	/*
@@ -144,8 +169,9 @@
 	 * If the Secure world wants to use pointer authentication,
 	 * CTX_INCLUDE_PAUTH_REGS must be set to 1.
 	 */
-	if (security_state == NON_SECURE)
+	if (security_state == NON_SECURE) {
 		scr_el3 |= SCR_API_BIT | SCR_APK_BIT;
+	}
 #endif /* !CTX_INCLUDE_PAUTH_REGS */
 
 #if !CTX_INCLUDE_MTE_REGS || ENABLE_ASSERTIONS
@@ -180,8 +206,14 @@
 	/*
 	 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as
 	 *  indicated by the interrupt routing model for BL31.
+	 *
+	 * TODO: The interrupt routing model code is not updated for REALM
+	 * state. Use the default values of IRQ = FIQ = 0 for REALM security
+	 * state for now.
 	 */
-	scr_el3 |= get_scr_el3_from_routing_model(security_state);
+	if (security_state != REALM) {
+		scr_el3 |= get_scr_el3_from_routing_model(security_state);
+	}
 #endif
 
 	/* Save the initialized value of CPTR_EL3 register */
@@ -248,9 +280,9 @@
 	 *  required by PSCI specification)
 	 */
 	sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0U;
-	if (GET_RW(ep->spsr) == MODE_RW_64)
+	if (GET_RW(ep->spsr) == MODE_RW_64) {
 		sctlr_elx |= SCTLR_EL1_RES1;
-	else {
+	} else {
 		/*
 		 * If the target execution state is AArch32 then the following
 		 * fields need to be set.
@@ -405,7 +437,8 @@
 }
 
 /*******************************************************************************
- * Prepare the CPU system registers for first entry into secure or normal world
+ * Prepare the CPU system registers for first entry into realm, secure, or
+ * normal world.
  *
  * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized
  * If execution is requested to non-secure EL1 or svc mode, and the CPU supports
@@ -489,7 +522,7 @@
 			 * architecturally UNKNOWN on reset and are set to zero
 			 * except for field(s) listed below.
 			 *
-			 * CNTHCTL_EL2.EL1PCEN: Set to one to disable traps to
+			 * CNTHCTL_EL2.EL1PTEN: Set to one to disable traps to
 			 *  Hyp mode of Non-secure EL0 and EL1 accesses to the
 			 *  physical timer registers.
 			 *
@@ -637,10 +670,10 @@
 	u_register_t scr_el3 = read_scr();
 
 	/*
-	 * Always save the non-secure EL2 context, only save the
+	 * Always save the non-secure and realm EL2 context, only save the
 	 * S-EL2 context if S-EL2 is enabled.
 	 */
-	if ((security_state == NON_SECURE) ||
+	if ((security_state != SECURE) ||
 	    ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) {
 		cpu_context_t *ctx;
 
@@ -659,10 +692,10 @@
 	u_register_t scr_el3 = read_scr();
 
 	/*
-	 * Always restore the non-secure EL2 context, only restore the
+	 * Always restore the non-secure and realm EL2 context, only restore the
 	 * S-EL2 context if S-EL2 is enabled.
 	 */
-	if ((security_state == NON_SECURE) ||
+	if ((security_state != SECURE) ||
 	    ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) {
 		cpu_context_t *ctx;
 
diff --git a/lib/gpt_rme/gpt_rme.c b/lib/gpt_rme/gpt_rme.c
new file mode 100644
index 0000000..1f90e64
--- /dev/null
+++ b/lib/gpt_rme/gpt_rme.c
@@ -0,0 +1,1112 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <limits.h>
+#include <stdint.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include "gpt_rme_private.h"
+#include <lib/gpt_rme/gpt_rme.h>
+#include <lib/smccc.h>
+#include <lib/spinlock.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#if !ENABLE_RME
+#error "ENABLE_RME must be enabled to use the GPT library."
+#endif
+
+/*
+ * Lookup T from PPS
+ *
+ *   PPS    Size    T
+ *   0b000  4GB     32
+ *   0b001  64GB    36
+ *   0b010  1TB     40
+ *   0b011  4TB     42
+ *   0b100  16TB    44
+ *   0b101  256TB   48
+ *   0b110  4PB     52
+ *
+ * See section 15.1.27 of the RME specification.
+ */
+static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T,
+					   PPS_1TB_T, PPS_4TB_T,
+					   PPS_16TB_T, PPS_256TB_T,
+					   PPS_4PB_T};
+
+/*
+ * Lookup P from PGS
+ *
+ *   PGS    Size    P
+ *   0b00   4KB     12
+ *   0b10   16KB    14
+ *   0b01   64KB    16
+ *
+ * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo.
+ *
+ * See section 15.1.27 of the RME specification.
+ */
+static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P};
+
+/*
+ * This structure contains GPT configuration data.
+ */
+typedef struct {
+	uintptr_t plat_gpt_l0_base;
+	gpccr_pps_e pps;
+	gpt_t_val_e t;
+	gpccr_pgs_e pgs;
+	gpt_p_val_e p;
+} gpt_config_t;
+
+static gpt_config_t gpt_config;
+
+/* These variables are used during initialization of the L1 tables. */
+static unsigned int gpt_next_l1_tbl_idx;
+static uintptr_t gpt_l1_tbl;
+
+/*
+ * This function checks to see if a GPI value is valid.
+ *
+ * These are valid GPI values.
+ *   GPT_GPI_NO_ACCESS   U(0x0)
+ *   GPT_GPI_SECURE      U(0x8)
+ *   GPT_GPI_NS          U(0x9)
+ *   GPT_GPI_ROOT        U(0xA)
+ *   GPT_GPI_REALM       U(0xB)
+ *   GPT_GPI_ANY         U(0xF)
+ *
+ * Parameters
+ *   gpi		GPI to check for validity.
+ *
+ * Return
+ *   true for a valid GPI, false for an invalid one.
+ */
+static bool gpt_is_gpi_valid(unsigned int gpi)
+{
+	if ((gpi == GPT_GPI_NO_ACCESS) || (gpi == GPT_GPI_ANY) ||
+	    ((gpi >= GPT_GPI_SECURE) && (gpi <= GPT_GPI_REALM))) {
+		return true;
+	} else {
+		return false;
+	}
+}
+
+/*
+ * This function checks to see if two PAS regions overlap.
+ *
+ * Parameters
+ *   base_1: base address of first PAS
+ *   size_1: size of first PAS
+ *   base_2: base address of second PAS
+ *   size_2: size of second PAS
+ *
+ * Return
+ *   True if PAS regions overlap, false if they do not.
+ */
+static bool gpt_check_pas_overlap(uintptr_t base_1, size_t size_1,
+				  uintptr_t base_2, size_t size_2)
+{
+	if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) {
+		return true;
+	} else {
+		return false;
+	}
+}
+
+/*
+ * This helper function checks to see if a PAS region from index 0 to
+ * (pas_idx - 1) occupies the L0 region at index l0_idx in the L0 table.
+ *
+ * Parameters
+ *   l0_idx:      Index of the L0 entry to check
+ *   pas_regions: PAS region array
+ *   pas_idx:     Upper bound of the PAS array index.
+ *
+ * Return
+ *   True if a PAS region occupies the L0 region in question, false if not.
+ */
+static bool gpt_does_previous_pas_exist_here(unsigned int l0_idx,
+					     pas_region_t *pas_regions,
+					     unsigned int pas_idx)
+{
+	/* Iterate over PAS regions up to pas_idx. */
+	for (unsigned int i = 0U; i < pas_idx; i++) {
+		if (gpt_check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx),
+		    GPT_L0GPTSZ_ACTUAL_SIZE,
+		    pas_regions[i].base_pa, pas_regions[i].size)) {
+			return true;
+		}
+	}
+	return false;
+}
+
+/*
+ * This function iterates over all of the PAS regions and checks them to ensure
+ * proper alignment of base and size, that the GPI is valid, and that no regions
+ * overlap. As a part of the overlap checks, this function checks existing L0
+ * mappings against the new PAS regions in the event that gpt_init_pas_l1_tables
+ * is called multiple times to place L1 tables in different areas of memory. It
+ * also counts the number of L1 tables needed and returns it on success.
+ *
+ * Parameters
+ *   *pas_regions	Pointer to array of PAS region structures.
+ *   pas_region_cnt	Total number of PAS regions in the array.
+ *
+ * Return
+ *   Negative Linux error code in the event of a failure, number of L1 regions
+ *   required when successful.
+ */
+static int gpt_validate_pas_mappings(pas_region_t *pas_regions,
+				     unsigned int pas_region_cnt)
+{
+	unsigned int idx;
+	unsigned int l1_cnt = 0U;
+	unsigned int pas_l1_cnt;
+	uint64_t *l0_desc = (uint64_t *)gpt_config.plat_gpt_l0_base;
+
+	assert(pas_regions != NULL);
+	assert(pas_region_cnt != 0U);
+
+	for (idx = 0U; idx < pas_region_cnt; idx++) {
+		/* Check for arithmetic overflow in region. */
+		if ((ULONG_MAX - pas_regions[idx].base_pa) <
+		    pas_regions[idx].size) {
+			ERROR("[GPT] Address overflow in PAS[%u]!\n", idx);
+			return -EOVERFLOW;
+		}
+
+		/* Initial checks for PAS validity. */
+		if (((pas_regions[idx].base_pa + pas_regions[idx].size) >
+		    GPT_PPS_ACTUAL_SIZE(gpt_config.t)) ||
+		    !gpt_is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) {
+			ERROR("[GPT] PAS[%u] is invalid!\n", idx);
+			return -EFAULT;
+		}
+
+		/*
+		 * Make sure this PAS does not overlap with another one. We
+		 * start from idx + 1 instead of 0 since prior PAS mappings will
+		 * have already checked themselves against this one.
+		 */
+		for (unsigned int i = idx + 1; i < pas_region_cnt; i++) {
+			if (gpt_check_pas_overlap(pas_regions[idx].base_pa,
+			    pas_regions[idx].size,
+			    pas_regions[i].base_pa,
+			    pas_regions[i].size)) {
+				ERROR("[GPT] PAS[%u] overlaps with PAS[%u]\n",
+					i, idx);
+				return -EFAULT;
+			}
+		}
+
+		/*
+		 * Since this function can be called multiple times with
+		 * separate L1 tables we need to check the existing L0 mapping
+		 * to see if this PAS would fall into one that has already been
+		 * initialized.
+		 */
+		for (unsigned int i = GPT_L0_IDX(pas_regions[idx].base_pa);
+		     i <= GPT_L0_IDX(pas_regions[idx].base_pa + pas_regions[idx].size - 1);
+		     i++) {
+			if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) &&
+			    (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) {
+				/* This descriptor is unused so continue. */
+				continue;
+			}
+
+			/*
+			 * This descriptor has been initialized in a previous
+			 * call to this function so cannot be initialized again.
+			 */
+			ERROR("[GPT] PAS[%u] overlaps with previous L0[%d]!\n",
+			      idx, i);
+			return -EFAULT;
+		}
+
+		/* Check for block mapping (L0) type. */
+		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
+		    GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
+			/* Make sure base and size are block-aligned. */
+			if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) ||
+			    !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) {
+				ERROR("[GPT] PAS[%u] is not block-aligned!\n",
+				      idx);
+				return -EFAULT;
+			}
+
+			continue;
+		}
+
+		/* Check for granule mapping (L1) type. */
+		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
+		    GPT_PAS_ATTR_MAP_TYPE_GRANULE) {
+			/* Make sure base and size are granule-aligned. */
+			if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) ||
+			    !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) {
+				ERROR("[GPT] PAS[%u] is not granule-aligned!\n",
+				      idx);
+				return -EFAULT;
+			}
+
+			/* Find how many L1 tables this PAS occupies. */
+			pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa +
+				     pas_regions[idx].size - 1) -
+				     GPT_L0_IDX(pas_regions[idx].base_pa) + 1);
+
+			/*
+			 * This creates a situation where, if multiple PAS
+			 * regions occupy the same table descriptor, we can get
+			 * an artificially high total L1 table count. The way we
+			 * handle this is by checking each PAS against those
+			 * before it in the array, and if they both occupy the
+			 * same PAS we subtract from pas_l1_cnt and only the
+			 * first PAS in the array gets to count it.
+			 */
+
+			/*
+			 * If L1 count is greater than 1 we know the start and
+			 * end PAs are in different L0 regions so we must check
+			 * both for overlap against other PAS.
+			 */
+			if (pas_l1_cnt > 1) {
+				if (gpt_does_previous_pas_exist_here(
+				    GPT_L0_IDX(pas_regions[idx].base_pa +
+				    pas_regions[idx].size - 1),
+				    pas_regions, idx)) {
+					pas_l1_cnt = pas_l1_cnt - 1;
+				}
+			}
+
+			if (gpt_does_previous_pas_exist_here(
+			    GPT_L0_IDX(pas_regions[idx].base_pa),
+			    pas_regions, idx)) {
+				pas_l1_cnt = pas_l1_cnt - 1;
+			}
+
+			l1_cnt += pas_l1_cnt;
+			continue;
+		}
+
+		/* If execution reaches this point, mapping type is invalid. */
+		ERROR("[GPT] PAS[%u] has invalid mapping type 0x%x.\n", idx,
+		      GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
+		return -EINVAL;
+	}
+
+	return l1_cnt;
+}
+
+/*
+ * This function validates L0 initialization parameters.
+ *
+ * Parameters
+ *   l0_mem_base	Base address of memory used for L0 tables.
+ *   l1_mem_size	Size of memory available for L0 tables.
+ *
+ * Return
+ *   Negative Linux error code in the event of a failure, 0 for success.
+ */
+static int gpt_validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base,
+				  size_t l0_mem_size)
+{
+	size_t l0_alignment;
+
+	/*
+	 * Make sure PPS is valid and then store it since macros need this value
+	 * to work.
+	 */
+	if (pps > GPT_PPS_MAX) {
+		ERROR("[GPT] Invalid PPS: 0x%x\n", pps);
+		return -EINVAL;
+	}
+	gpt_config.pps = pps;
+	gpt_config.t = gpt_t_lookup[pps];
+
+	/* Alignment must be the greater of 4k or l0 table size. */
+	l0_alignment = PAGE_SIZE_4KB;
+	if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) {
+		l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t);
+	}
+
+	/* Check base address. */
+	if ((l0_mem_base == 0U) || ((l0_mem_base & (l0_alignment - 1)) != 0U)) {
+		ERROR("[GPT] Invalid L0 base address: 0x%lx\n", l0_mem_base);
+		return -EFAULT;
+	}
+
+	/* Check size. */
+	if (l0_mem_size < GPT_L0_TABLE_SIZE(gpt_config.t)) {
+		ERROR("[GPT] Inadequate L0 memory: need 0x%lx, have 0x%lx)\n",
+		      GPT_L0_TABLE_SIZE(gpt_config.t),
+		      l0_mem_size);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/*
+ * In the event that L1 tables are needed, this function validates
+ * the L1 table generation parameters.
+ *
+ * Parameters
+ *   l1_mem_base	Base address of memory used for L1 table allocation.
+ *   l1_mem_size	Total size of memory available for L1 tables.
+ *   l1_gpt_cnt		Number of L1 tables needed.
+ *
+ * Return
+ *   Negative Linux error code in the event of a failure, 0 for success.
+ */
+static int gpt_validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size,
+				  unsigned int l1_gpt_cnt)
+{
+	size_t l1_gpt_mem_sz;
+
+	/* Check if the granularity is supported */
+	if (!xlat_arch_is_granule_size_supported(
+	    GPT_PGS_ACTUAL_SIZE(gpt_config.p))) {
+		return -EPERM;
+	}
+
+	/* Make sure L1 tables are aligned to their size. */
+	if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1)) != 0U) {
+		ERROR("[GPT] Unaligned L1 GPT base address: 0x%lx\n",
+		      l1_mem_base);
+		return -EFAULT;
+	}
+
+	/* Get total memory needed for L1 tables. */
+	l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p);
+
+	/* Check for overflow. */
+	if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) {
+		ERROR("[GPT] Overflow calculating L1 memory size.\n");
+		return -ENOMEM;
+	}
+
+	/* Make sure enough space was supplied. */
+	if (l1_mem_size < l1_gpt_mem_sz) {
+		ERROR("[GPT] Inadequate memory for L1 GPTs. ");
+		ERROR("      Expected 0x%lx bytes. Got 0x%lx bytes\n",
+		      l1_gpt_mem_sz, l1_mem_size);
+		return -ENOMEM;
+	}
+
+	VERBOSE("[GPT] Requested 0x%lx bytes for L1 GPTs.\n", l1_gpt_mem_sz);
+	return 0;
+}
+
+/*
+ * This function initializes L0 block descriptors (regions that cannot be
+ * transitioned at the granule level) according to the provided PAS.
+ *
+ * Parameters
+ *   *pas		Pointer to the structure defining the PAS region to
+ *			initialize.
+ */
+static void gpt_generate_l0_blk_desc(pas_region_t *pas)
+{
+	uint64_t gpt_desc;
+	unsigned int end_idx;
+	unsigned int idx;
+	uint64_t *l0_gpt_arr;
+
+	assert(gpt_config.plat_gpt_l0_base != 0U);
+	assert(pas != NULL);
+
+	/*
+	 * Checking of PAS parameters has already been done in
+	 * gpt_validate_pas_mappings so no need to check the same things again.
+	 */
+
+	l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base;
+
+	/* Create the GPT Block descriptor for this PAS region */
+	gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs));
+
+	/* Start index of this region in L0 GPTs */
+	idx = pas->base_pa >> GPT_L0_IDX_SHIFT;
+
+	/*
+	 * Determine number of L0 GPT descriptors covered by
+	 * this PAS region and use the count to populate these
+	 * descriptors.
+	 */
+	end_idx = (pas->base_pa + pas->size) >> GPT_L0_IDX_SHIFT;
+
+	/* Generate the needed block descriptors. */
+	for (; idx < end_idx; idx++) {
+		l0_gpt_arr[idx] = gpt_desc;
+		VERBOSE("[GPT] L0 entry (BLOCK) index %u [%p]: GPI = 0x%llx (0x%llx)\n",
+			idx, &l0_gpt_arr[idx],
+			(gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) &
+			GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]);
+	}
+}
+
+/*
+ * Helper function to determine if the end physical address lies in the same L0
+ * region as the current physical address. If true, the end physical address is
+ * returned else, the start address of the next region is returned.
+ *
+ * Parameters
+ *   cur_pa		Physical address of the current PA in the loop through
+ *			the range.
+ *   end_pa		Physical address of the end PA in a PAS range.
+ *
+ * Return
+ *   The PA of the end of the current range.
+ */
+static uintptr_t gpt_get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa)
+{
+	uintptr_t cur_idx;
+	uintptr_t end_idx;
+
+	cur_idx = cur_pa >> GPT_L0_IDX_SHIFT;
+	end_idx = end_pa >> GPT_L0_IDX_SHIFT;
+
+	assert(cur_idx <= end_idx);
+
+	if (cur_idx == end_idx) {
+		return end_pa;
+	}
+
+	return (cur_idx + 1U) << GPT_L0_IDX_SHIFT;
+}
+
+/*
+ * Helper function to fill out GPI entries in a single L1 table. This function
+ * fills out entire L1 descriptors at a time to save memory writes.
+ *
+ * Parameters
+ *   gpi		GPI to set this range to
+ *   l1			Pointer to L1 table to fill out
+ *   first		Address of first granule in range.
+ *   last		Address of last granule in range (inclusive).
+ */
+static void gpt_fill_l1_tbl(uint64_t gpi, uint64_t *l1, uintptr_t first,
+			    uintptr_t last)
+{
+	uint64_t gpi_field = GPT_BUILD_L1_DESC(gpi);
+	uint64_t gpi_mask = 0xFFFFFFFFFFFFFFFF;
+
+	assert(first <= last);
+	assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U);
+	assert((last & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U);
+	assert(GPT_L0_IDX(first) == GPT_L0_IDX(last));
+	assert(l1 != NULL);
+
+	/* Shift the mask if we're starting in the middle of an L1 entry. */
+	gpi_mask = gpi_mask << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2);
+
+	/* Fill out each L1 entry for this region. */
+	for (unsigned int i = GPT_L1_IDX(gpt_config.p, first);
+	     i <= GPT_L1_IDX(gpt_config.p, last); i++) {
+		/* Account for stopping in the middle of an L1 entry. */
+		if (i == GPT_L1_IDX(gpt_config.p, last)) {
+			gpi_mask &= (gpi_mask >> ((15 -
+				    GPT_L1_GPI_IDX(gpt_config.p, last)) << 2));
+		}
+
+		/* Write GPI values. */
+		assert((l1[i] & gpi_mask) ==
+		       (GPT_BUILD_L1_DESC(GPT_GPI_ANY) & gpi_mask));
+		l1[i] = (l1[i] & ~gpi_mask) | (gpi_mask & gpi_field);
+
+		/* Reset mask. */
+		gpi_mask = 0xFFFFFFFFFFFFFFFF;
+	}
+}
+
+/*
+ * This function finds the next available unused L1 table and initializes all
+ * granules descriptor entries to GPI_ANY. This ensures that there are no chunks
+ * of GPI_NO_ACCESS (0b0000) memory floating around in the system in the
+ * event that a PAS region stops midway through an L1 table, thus guaranteeing
+ * that all memory not explicitly assigned is GPI_ANY. This function does not
+ * check for overflow conditions, that should be done by the caller.
+ *
+ * Return
+ *   Pointer to the next available L1 table.
+ */
+static uint64_t *gpt_get_new_l1_tbl(void)
+{
+	/* Retrieve the next L1 table. */
+	uint64_t *l1 = (uint64_t *)((uint64_t)(gpt_l1_tbl) +
+		       (GPT_L1_TABLE_SIZE(gpt_config.p) *
+		       gpt_next_l1_tbl_idx));
+
+	/* Increment L1 counter. */
+	gpt_next_l1_tbl_idx++;
+
+	/* Initialize all GPIs to GPT_GPI_ANY */
+	for (unsigned int i = 0U; i < GPT_L1_ENTRY_COUNT(gpt_config.p); i++) {
+		l1[i] = GPT_BUILD_L1_DESC(GPT_GPI_ANY);
+	}
+
+	return l1;
+}
+
+/*
+ * When L1 tables are needed, this function creates the necessary L0 table
+ * descriptors and fills out the L1 table entries according to the supplied
+ * PAS range.
+ *
+ * Parameters
+ *   *pas		Pointer to the structure defining the PAS region.
+ */
+static void gpt_generate_l0_tbl_desc(pas_region_t *pas)
+{
+	uintptr_t end_pa;
+	uintptr_t cur_pa;
+	uintptr_t last_gran_pa;
+	uint64_t *l0_gpt_base;
+	uint64_t *l1_gpt_arr;
+	unsigned int l0_idx;
+
+	assert(gpt_config.plat_gpt_l0_base != 0U);
+	assert(pas != NULL);
+
+	/*
+	 * Checking of PAS parameters has already been done in
+	 * gpt_validate_pas_mappings so no need to check the same things again.
+	 */
+
+	end_pa = pas->base_pa + pas->size;
+	l0_gpt_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
+
+	/* We start working from the granule at base PA */
+	cur_pa = pas->base_pa;
+
+	/* Iterate over each L0 region in this memory range. */
+	for (l0_idx = GPT_L0_IDX(pas->base_pa);
+	     l0_idx <= GPT_L0_IDX(end_pa - 1U);
+	     l0_idx++) {
+
+		/*
+		 * See if the L0 entry is already a table descriptor or if we
+		 * need to create one.
+		 */
+		if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) {
+			/* Get the L1 array from the L0 entry. */
+			l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]);
+		} else {
+			/* Get a new L1 table from the L1 memory space. */
+			l1_gpt_arr = gpt_get_new_l1_tbl();
+
+			/* Fill out the L0 descriptor and flush it. */
+			l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr);
+		}
+
+		VERBOSE("[GPT] L0 entry (TABLE) index %u [%p] ==> L1 Addr 0x%llx (0x%llx)\n",
+			l0_idx, &l0_gpt_base[l0_idx],
+			(unsigned long long)(l1_gpt_arr),
+			l0_gpt_base[l0_idx]);
+
+		/*
+		 * Determine the PA of the last granule in this L0 descriptor.
+		 */
+		last_gran_pa = gpt_get_l1_end_pa(cur_pa, end_pa) -
+			       GPT_PGS_ACTUAL_SIZE(gpt_config.p);
+
+		/*
+		 * Fill up L1 GPT entries between these two addresses. This
+		 * function needs the addresses of the first granule and last
+		 * granule in the range.
+		 */
+		gpt_fill_l1_tbl(GPT_PAS_ATTR_GPI(pas->attrs), l1_gpt_arr,
+				cur_pa, last_gran_pa);
+
+		/* Advance cur_pa to first granule in next L0 region. */
+		cur_pa = gpt_get_l1_end_pa(cur_pa, end_pa);
+	}
+}
+
+/*
+ * This function flushes a range of L0 descriptors used by a given PAS region
+ * array. There is a chance that some unmodified L0 descriptors would be flushed
+ * in the case that there are "holes" in an array of PAS regions but overall
+ * this should be faster than individually flushing each modified L0 descriptor
+ * as they are created.
+ *
+ * Parameters
+ *   *pas		Pointer to an array of PAS regions.
+ *   pas_count		Number of entries in the PAS array.
+ */
+static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count)
+{
+	unsigned int idx;
+	unsigned int start_idx;
+	unsigned int end_idx;
+	uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base;
+
+	assert(pas != NULL);
+	assert(pas_count > 0);
+
+	/* Initial start and end values. */
+	start_idx = GPT_L0_IDX(pas[0].base_pa);
+	end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1);
+
+	/* Find lowest and highest L0 indices used in this PAS array. */
+	for (idx = 1; idx < pas_count; idx++) {
+		if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) {
+			start_idx = GPT_L0_IDX(pas[idx].base_pa);
+		}
+		if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1) > end_idx) {
+			end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1);
+		}
+	}
+
+	/*
+	 * Flush all covered L0 descriptors, add 1 because we need to include
+	 * the end index value.
+	 */
+	flush_dcache_range((uintptr_t)&l0[start_idx],
+			   ((end_idx + 1) - start_idx) * sizeof(uint64_t));
+}
+
+/*
+ * Public API to enable granule protection checks once the tables have all been
+ * initialized. This function is called at first initialization and then again
+ * later during warm boots of CPU cores.
+ *
+ * Return
+ *   Negative Linux error code in the event of a failure, 0 for success.
+ */
+int gpt_enable(void)
+{
+	u_register_t gpccr_el3;
+
+	/*
+	 * Granule tables must be initialised before enabling
+	 * granule protection.
+	 */
+	if (gpt_config.plat_gpt_l0_base == 0U) {
+		ERROR("[GPT] Tables have not been initialized!\n");
+		return -EPERM;
+	}
+
+	/* Invalidate any stale TLB entries */
+	tlbipaallos();
+	dsb();
+
+	/* Write the base address of the L0 tables into GPTBR */
+	write_gptbr_el3(((gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT)
+			>> GPTBR_BADDR_SHIFT) & GPTBR_BADDR_MASK);
+
+	/* GPCCR_EL3.PPS */
+	gpccr_el3 = SET_GPCCR_PPS(gpt_config.pps);
+
+	/* GPCCR_EL3.PGS */
+	gpccr_el3 |= SET_GPCCR_PGS(gpt_config.pgs);
+
+	/* Set shareability attribute to Outher Shareable */
+	gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_OS);
+
+	/* Outer and Inner cacheability set to Normal memory, WB, RA, WA. */
+	gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA);
+	gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA);
+
+	/* Enable GPT */
+	gpccr_el3 |= GPCCR_GPC_BIT;
+
+	/* TODO: Configure GPCCR_EL3_GPCP for Fault control. */
+	write_gpccr_el3(gpccr_el3);
+	tlbipaallos();
+	dsb();
+	isb();
+
+	return 0;
+}
+
+/*
+ * Public API to disable granule protection checks.
+ */
+void gpt_disable(void)
+{
+	u_register_t gpccr_el3 = read_gpccr_el3();
+
+	write_gpccr_el3(gpccr_el3 & ~GPCCR_GPC_BIT);
+	dsbsy();
+	isb();
+}
+
+/*
+ * Public API that initializes the entire protected space to GPT_GPI_ANY using
+ * the L0 tables (block descriptors). Ideally, this function is invoked prior
+ * to DDR discovery and initialization. The MMU must be initialized before
+ * calling this function.
+ *
+ * Parameters
+ *   pps		PPS value to use for table generation
+ *   l0_mem_base	Base address of L0 tables in memory.
+ *   l0_mem_size	Total size of memory available for L0 tables.
+ *
+ * Return
+ *   Negative Linux error code in the event of a failure, 0 for success.
+ */
+int gpt_init_l0_tables(unsigned int pps, uintptr_t l0_mem_base,
+		       size_t l0_mem_size)
+{
+	int ret;
+	uint64_t gpt_desc;
+
+	/* Ensure that MMU and caches are enabled. */
+	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
+
+	/* Validate other parameters. */
+	ret = gpt_validate_l0_params(pps, l0_mem_base, l0_mem_size);
+	if (ret < 0) {
+		return ret;
+	}
+
+	/* Create the descriptor to initialize L0 entries with. */
+	gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY);
+
+	/* Iterate through all L0 entries */
+	for (unsigned int i = 0U; i < GPT_L0_REGION_COUNT(gpt_config.t); i++) {
+		((uint64_t *)l0_mem_base)[i] = gpt_desc;
+	}
+
+	/* Flush updated L0 tables to memory. */
+	flush_dcache_range((uintptr_t)l0_mem_base,
+			   (size_t)GPT_L0_TABLE_SIZE(gpt_config.t));
+
+	/* Stash the L0 base address once initial setup is complete. */
+	gpt_config.plat_gpt_l0_base = l0_mem_base;
+
+	return 0;
+}
+
+/*
+ * Public API that carves out PAS regions from the L0 tables and builds any L1
+ * tables that are needed. This function ideally is run after DDR discovery and
+ * initialization. The L0 tables must have already been initialized to GPI_ANY
+ * when this function is called.
+ *
+ * This function can be called multiple times with different L1 memory ranges
+ * and PAS regions if it is desirable to place L1 tables in different locations
+ * in memory. (ex: you have multiple DDR banks and want to place the L1 tables
+ * in the DDR bank that they control)
+ *
+ * Parameters
+ *   pgs		PGS value to use for table generation.
+ *   l1_mem_base	Base address of memory used for L1 tables.
+ *   l1_mem_size	Total size of memory available for L1 tables.
+ *   *pas_regions	Pointer to PAS regions structure array.
+ *   pas_count		Total number of PAS regions.
+ *
+ * Return
+ *   Negative Linux error code in the event of a failure, 0 for success.
+ */
+int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base,
+			   size_t l1_mem_size, pas_region_t *pas_regions,
+			   unsigned int pas_count)
+{
+	int ret;
+	int l1_gpt_cnt;
+
+	/* Ensure that MMU and caches are enabled. */
+	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
+
+	/* PGS is needed for gpt_validate_pas_mappings so check it now. */
+	if (pgs > GPT_PGS_MAX) {
+		ERROR("[GPT] Invalid PGS: 0x%x\n", pgs);
+		return -EINVAL;
+	}
+	gpt_config.pgs = pgs;
+	gpt_config.p = gpt_p_lookup[pgs];
+
+	/* Make sure L0 tables have been initialized. */
+	if (gpt_config.plat_gpt_l0_base == 0U) {
+		ERROR("[GPT] L0 tables must be initialized first!\n");
+		return -EPERM;
+	}
+
+	/* Check if L1 GPTs are required and how many. */
+	l1_gpt_cnt = gpt_validate_pas_mappings(pas_regions, pas_count);
+	if (l1_gpt_cnt < 0) {
+		return l1_gpt_cnt;
+	}
+
+	VERBOSE("[GPT] %u L1 GPTs requested.\n", l1_gpt_cnt);
+
+	/* If L1 tables are needed then validate the L1 parameters. */
+	if (l1_gpt_cnt > 0) {
+		ret = gpt_validate_l1_params(l1_mem_base, l1_mem_size,
+		      l1_gpt_cnt);
+		if (ret < 0) {
+			return ret;
+		}
+
+		/* Set up parameters for L1 table generation. */
+		gpt_l1_tbl = l1_mem_base;
+		gpt_next_l1_tbl_idx = 0U;
+	}
+
+	INFO("[GPT] Boot Configuration\n");
+	INFO("  PPS/T:     0x%x/%u\n", gpt_config.pps, gpt_config.t);
+	INFO("  PGS/P:     0x%x/%u\n", gpt_config.pgs, gpt_config.p);
+	INFO("  L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
+	INFO("  PAS count: 0x%x\n", pas_count);
+	INFO("  L0 base:   0x%lx\n", gpt_config.plat_gpt_l0_base);
+
+	/* Generate the tables in memory. */
+	for (unsigned int idx = 0U; idx < pas_count; idx++) {
+		INFO("[GPT] PAS[%u]: base 0x%lx, size 0x%lx, GPI 0x%x, type 0x%x\n",
+		     idx, pas_regions[idx].base_pa, pas_regions[idx].size,
+		     GPT_PAS_ATTR_GPI(pas_regions[idx].attrs),
+		     GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
+
+		/* Check if a block or table descriptor is required */
+		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
+		    GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
+			gpt_generate_l0_blk_desc(&pas_regions[idx]);
+
+		} else {
+			gpt_generate_l0_tbl_desc(&pas_regions[idx]);
+		}
+	}
+
+	/* Flush modified L0 tables. */
+	flush_l0_for_pas_array(pas_regions, pas_count);
+
+	/* Flush L1 tables if needed. */
+	if (l1_gpt_cnt > 0) {
+		flush_dcache_range(l1_mem_base,
+				   GPT_L1_TABLE_SIZE(gpt_config.p) *
+				   l1_gpt_cnt);
+	}
+
+	/* Make sure that all the entries are written to the memory. */
+	dsbishst();
+
+	return 0;
+}
+
+/*
+ * Public API to initialize the runtime gpt_config structure based on the values
+ * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization
+ * typically happens in a bootloader stage prior to setting up the EL3 runtime
+ * environment for the granule transition service so this function detects the
+ * initialization from a previous stage. Granule protection checks must be
+ * enabled already or this function will return an error.
+ *
+ * Return
+ *   Negative Linux error code in the event of a failure, 0 for success.
+ */
+int gpt_runtime_init(void)
+{
+	u_register_t reg;
+
+	/* Ensure that MMU and caches are enabled. */
+	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
+
+	/* Ensure GPC are already enabled. */
+	if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0U) {
+		ERROR("[GPT] Granule protection checks are not enabled!\n");
+		return -EPERM;
+	}
+
+	/*
+	 * Read the L0 table address from GPTBR, we don't need the L1 base
+	 * address since those are included in the L0 tables as needed.
+	 */
+	reg = read_gptbr_el3();
+	gpt_config.plat_gpt_l0_base = ((reg >> GPTBR_BADDR_SHIFT) &
+				      GPTBR_BADDR_MASK) <<
+				      GPTBR_BADDR_VAL_SHIFT;
+
+	/* Read GPCCR to get PGS and PPS values. */
+	reg = read_gpccr_el3();
+	gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK;
+	gpt_config.t = gpt_t_lookup[gpt_config.pps];
+	gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK;
+	gpt_config.p = gpt_p_lookup[gpt_config.pgs];
+
+	VERBOSE("[GPT] Runtime Configuration\n");
+	VERBOSE("  PPS/T:     0x%x/%u\n", gpt_config.pps, gpt_config.t);
+	VERBOSE("  PGS/P:     0x%x/%u\n", gpt_config.pgs, gpt_config.p);
+	VERBOSE("  L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
+	VERBOSE("  L0 base:   0x%lx\n", gpt_config.plat_gpt_l0_base);
+
+	return 0;
+}
+
+/*
+ * The L1 descriptors are protected by a spinlock to ensure that multiple
+ * CPUs do not attempt to change the descriptors at once. In the future it
+ * would be better to have separate spinlocks for each L1 descriptor.
+ */
+static spinlock_t gpt_lock;
+
+/*
+ * Check if caller is allowed to transition a PAS.
+ *
+ * - Secure world caller can only request S <-> NS transitions on a
+ *   granule that is already in either S or NS PAS.
+ *
+ * - Realm world caller can only request R <-> NS transitions on a
+ *   granule that is already in either R or NS PAS.
+ *
+ * Parameters
+ *   src_sec_state	Security state of the caller.
+ *   current_gpi	Current GPI of the granule.
+ *   target_gpi		Requested new GPI for the granule.
+ *
+ * Return
+ *   Negative Linux error code in the event of a failure, 0 for success.
+ */
+static int gpt_check_transition_gpi(unsigned int src_sec_state,
+				    unsigned int current_gpi,
+				    unsigned int target_gpi)
+{
+	unsigned int check_gpi;
+
+	/* Cannot transition a granule to the state it is already in. */
+	if (current_gpi == target_gpi) {
+		return -EINVAL;
+	}
+
+	/* Check security state, only secure and realm can transition. */
+	if (src_sec_state == SMC_FROM_REALM) {
+		check_gpi = GPT_GPI_REALM;
+	} else if (src_sec_state == SMC_FROM_SECURE) {
+		check_gpi = GPT_GPI_SECURE;
+	} else {
+		return -EINVAL;
+	}
+
+	/* Make sure security state is allowed to make the transition. */
+	if ((target_gpi != check_gpi) && (target_gpi != GPT_GPI_NS)) {
+		return -EINVAL;
+	}
+	if ((current_gpi != check_gpi) && (current_gpi != GPT_GPI_NS)) {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * This function is the core of the granule transition service. When a granule
+ * transition request occurs it is routed to this function where the request is
+ * validated then fulfilled if possible.
+ *
+ * TODO: implement support for transitioning multiple granules at once.
+ *
+ * Parameters
+ *   base		Base address of the region to transition, must be
+ *			aligned to granule size.
+ *   size		Size of region to transition, must be aligned to granule
+ *			size.
+ *   src_sec_state	Security state of the caller.
+ *   target_pas		Target PAS of the specified memory region.
+ *
+ * Return
+ *    Negative Linux error code in the event of a failure, 0 for success.
+ */
+int gpt_transition_pas(uint64_t base, size_t size, unsigned int src_sec_state,
+	unsigned int target_pas)
+{
+	int idx;
+	unsigned int gpi_shift;
+	unsigned int gpi;
+	uint64_t gpt_l0_desc;
+	uint64_t gpt_l1_desc;
+	uint64_t *gpt_l1_addr;
+	uint64_t *gpt_l0_base;
+
+	/* Ensure that the tables have been set up before taking requests. */
+	assert(gpt_config.plat_gpt_l0_base != 0U);
+
+	/* Check for address range overflow. */
+	if ((ULONG_MAX - base) < size) {
+		VERBOSE("[GPT] Transition request address overflow!\n");
+		VERBOSE("      Base=0x%llx\n", base);
+		VERBOSE("      Size=0x%lx\n", size);
+		return -EINVAL;
+	}
+
+	/* Make sure base and size are valid. */
+	if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0U) ||
+	    ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0U) ||
+	    (size == 0U) ||
+	    ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
+		VERBOSE("[GPT] Invalid granule transition address range!\n");
+		VERBOSE("      Base=0x%llx\n", base);
+		VERBOSE("      Size=0x%lx\n", size);
+		return -EINVAL;
+	}
+
+	/* See if this is a single granule transition or a range of granules. */
+	if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
+		/*
+		 * TODO: Add support for transitioning multiple granules with a
+		 * single call to this function.
+		 */
+		panic();
+	}
+
+	/* Get the L0 descriptor and make sure it is for a table. */
+	gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
+	gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)];
+	if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) {
+		VERBOSE("[GPT] Granule is not covered by a table descriptor!\n");
+		VERBOSE("      Base=0x%llx\n", base);
+		return -EINVAL;
+	}
+
+	/* Get the table index and GPI shift from PA. */
+	gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc);
+	idx = GPT_L1_IDX(gpt_config.p, base);
+	gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2;
+
+	/*
+	 * Access to L1 tables is controlled by a global lock to ensure
+	 * that no more than one CPU is allowed to make changes at any
+	 * given time.
+	 */
+	spin_lock(&gpt_lock);
+	gpt_l1_desc = gpt_l1_addr[idx];
+	gpi = (gpt_l1_desc >> gpi_shift) & GPT_L1_GRAN_DESC_GPI_MASK;
+
+	/* Make sure caller state and source/target PAS are allowed. */
+	if (gpt_check_transition_gpi(src_sec_state, gpi, target_pas) < 0) {
+		spin_unlock(&gpt_lock);
+			VERBOSE("[GPT] Invalid caller state and PAS combo!\n");
+		VERBOSE("      Caller: %u, Current GPI: %u, Target GPI: %u\n",
+			src_sec_state, gpi, target_pas);
+		return -EPERM;
+	}
+
+	/* Clear existing GPI encoding and transition granule. */
+	gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift);
+	gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift);
+	gpt_l1_addr[idx] = gpt_l1_desc;
+
+	/* Ensure that the write operation happens before the unlock. */
+	dmbishst();
+
+	/* Unlock access to the L1 tables. */
+	spin_unlock(&gpt_lock);
+
+	/* Cache maintenance. */
+	clean_dcache_range((uintptr_t)&gpt_l1_addr[idx],
+			   sizeof(uint64_t));
+	gpt_tlbi_by_pa(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+	dsbishst();
+
+	VERBOSE("[GPT] Granule 0x%llx, GPI 0x%x->0x%x\n", base, gpi,
+		target_pas);
+
+	return 0;
+}
diff --git a/lib/gpt_rme/gpt_rme.mk b/lib/gpt_rme/gpt_rme.mk
new file mode 100644
index 0000000..60176f4
--- /dev/null
+++ b/lib/gpt_rme/gpt_rme.mk
@@ -0,0 +1,8 @@
+#
+# Copyright (c) 2021, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+GPT_LIB_SRCS	:=	$(addprefix lib/gpt_rme/,        \
+			gpt_rme.c)
diff --git a/lib/gpt_rme/gpt_rme_private.h b/lib/gpt_rme/gpt_rme_private.h
new file mode 100644
index 0000000..5770bf7
--- /dev/null
+++ b/lib/gpt_rme/gpt_rme_private.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef GPT_RME_PRIVATE_H
+#define GPT_RME_PRIVATE_H
+
+#include <arch.h>
+#include <lib/gpt_rme/gpt_rme.h>
+#include <lib/utils_def.h>
+
+/******************************************************************************/
+/* GPT descriptor definitions                                                 */
+/******************************************************************************/
+
+/* GPT level 0 descriptor bit definitions. */
+#define GPT_L0_TYPE_MASK		UL(0xF)
+#define GPT_L0_TYPE_SHIFT		U(0)
+
+/* For now, we don't support contiguous descriptors, only table and block. */
+#define GPT_L0_TYPE_TBL_DESC		UL(0x3)
+#define GPT_L0_TYPE_BLK_DESC		UL(0x1)
+
+#define GPT_L0_TBL_DESC_L1ADDR_MASK	UL(0xFFFFFFFFFF)
+#define GPT_L0_TBL_DESC_L1ADDR_SHIFT	U(12)
+
+#define GPT_L0_BLK_DESC_GPI_MASK	UL(0xF)
+#define GPT_L0_BLK_DESC_GPI_SHIFT	U(4)
+
+/* GPT level 1 descriptor bit definitions */
+#define GPT_L1_GRAN_DESC_GPI_MASK	UL(0xF)
+
+/*
+ * This macro fills out every GPI entry in a granules descriptor to the same
+ * value.
+ */
+#define GPT_BUILD_L1_DESC(_gpi)		(((uint64_t)(_gpi) << 4*0) | \
+					 ((uint64_t)(_gpi) << 4*1) | \
+					 ((uint64_t)(_gpi) << 4*2) | \
+					 ((uint64_t)(_gpi) << 4*3) | \
+					 ((uint64_t)(_gpi) << 4*4) | \
+					 ((uint64_t)(_gpi) << 4*5) | \
+					 ((uint64_t)(_gpi) << 4*6) | \
+					 ((uint64_t)(_gpi) << 4*7) | \
+					 ((uint64_t)(_gpi) << 4*8) | \
+					 ((uint64_t)(_gpi) << 4*9) | \
+					 ((uint64_t)(_gpi) << 4*10) | \
+					 ((uint64_t)(_gpi) << 4*11) | \
+					 ((uint64_t)(_gpi) << 4*12) | \
+					 ((uint64_t)(_gpi) << 4*13) | \
+					 ((uint64_t)(_gpi) << 4*14) | \
+					 ((uint64_t)(_gpi) << 4*15))
+
+/******************************************************************************/
+/* GPT platform configuration                                                 */
+/******************************************************************************/
+
+/* This value comes from GPCCR_EL3 so no externally supplied definition. */
+#define GPT_L0GPTSZ		((unsigned int)((read_gpccr_el3() >> \
+				GPCCR_L0GPTSZ_SHIFT) & GPCCR_L0GPTSZ_MASK))
+
+/* The "S" value is directly related to L0GPTSZ */
+#define GPT_S_VAL		(GPT_L0GPTSZ + 30U)
+
+/*
+ * Map PPS values to T values.
+ *
+ *   PPS    Size    T
+ *   0b000  4GB     32
+ *   0b001  64GB    36
+ *   0b010  1TB     40
+ *   0b011  4TB     42
+ *   0b100  16TB    44
+ *   0b101  256TB   48
+ *   0b110  4PB     52
+ *
+ * See section 15.1.27 of the RME specification.
+ */
+typedef enum {
+	PPS_4GB_T =	32U,
+	PPS_64GB_T =	36U,
+	PPS_1TB_T =	40U,
+	PPS_4TB_T =	42U,
+	PPS_16TB_T =	44U,
+	PPS_256TB_T =	48U,
+	PPS_4PB_T =	52U
+} gpt_t_val_e;
+
+/*
+ * Map PGS values to P values.
+ *
+ *   PGS    Size    P
+ *   0b00   4KB     12
+ *   0b10   16KB    14
+ *   0b01   64KB    16
+ *
+ * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo.
+ *
+ * See section 15.1.27 of the RME specification.
+ */
+typedef enum {
+	PGS_4KB_P =	12U,
+	PGS_16KB_P =	14U,
+	PGS_64KB_P =	16U
+} gpt_p_val_e;
+
+/* Max valid value for PGS. */
+#define GPT_PGS_MAX			(2U)
+
+/* Max valid value for PPS. */
+#define GPT_PPS_MAX			(6U)
+
+/******************************************************************************/
+/* L0 address attribute macros                                                */
+/******************************************************************************/
+
+/*
+ * If S is greater than or equal to T then there is a single L0 region covering
+ * the entire protected space so there is no L0 index, so the width (and the
+ * derivative mask value) are both zero.  If we don't specifically handle this
+ * special case we'll get a negative width value which does not make sense and
+ * could cause a lot of problems.
+ */
+#define GPT_L0_IDX_WIDTH(_t)		(((_t) > GPT_S_VAL) ? \
+					((_t) - GPT_S_VAL) : (0U))
+
+/* Bit shift for the L0 index field in a PA. */
+#define GPT_L0_IDX_SHIFT		(GPT_S_VAL)
+
+/* Mask for the L0 index field, must be shifted. */
+#define GPT_L0_IDX_MASK(_t)		(0xFFFFFFFFFFFFFFFFUL >> \
+					(64U - (GPT_L0_IDX_WIDTH(_t))))
+
+/* Total number of L0 regions. */
+#define GPT_L0_REGION_COUNT(_t)		((GPT_L0_IDX_MASK(_t)) + 1U)
+
+/* Total size of each GPT L0 region in bytes. */
+#define GPT_L0_REGION_SIZE		(1UL << (GPT_L0_IDX_SHIFT))
+
+/* Total size in bytes of the whole L0 table. */
+#define GPT_L0_TABLE_SIZE(_t)		((GPT_L0_REGION_COUNT(_t)) << 3U)
+
+/******************************************************************************/
+/* L1 address attribute macros                                                */
+/******************************************************************************/
+
+/* Width of the L1 index field. */
+#define GPT_L1_IDX_WIDTH(_p)		((GPT_S_VAL - 1U) - ((_p) + 3U))
+
+/* Bit shift for the L1 index field. */
+#define GPT_L1_IDX_SHIFT(_p)		((_p) + 4U)
+
+/* Mask for the L1 index field, must be shifted. */
+#define GPT_L1_IDX_MASK(_p)		(0xFFFFFFFFFFFFFFFFUL >> \
+					(64U - (GPT_L1_IDX_WIDTH(_p))))
+
+/* Bit shift for the index of the L1 GPI in a PA. */
+#define GPT_L1_GPI_IDX_SHIFT(_p)	(_p)
+
+/* Mask for the index of the L1 GPI in a PA. */
+#define GPT_L1_GPI_IDX_MASK		(0xF)
+
+/* Total number of entries in each L1 table. */
+#define GPT_L1_ENTRY_COUNT(_p)		((GPT_L1_IDX_MASK(_p)) + 1U)
+
+/* Total size in bytes of each L1 table. */
+#define GPT_L1_TABLE_SIZE(_p)		((GPT_L1_ENTRY_COUNT(_p)) << 3U)
+
+/******************************************************************************/
+/* General helper macros                                                      */
+/******************************************************************************/
+
+/* Protected space actual size in bytes. */
+#define GPT_PPS_ACTUAL_SIZE(_t)	(1UL << (_t))
+
+/* Granule actual size in bytes. */
+#define GPT_PGS_ACTUAL_SIZE(_p)	(1UL << (_p))
+
+/* L0 GPT region size in bytes. */
+#define GPT_L0GPTSZ_ACTUAL_SIZE	(1UL << GPT_S_VAL)
+
+/* Get the index of the L0 entry from a physical address. */
+#define GPT_L0_IDX(_pa)		((_pa) >> GPT_L0_IDX_SHIFT)
+
+/*
+ * This definition is used to determine if a physical address lies on an L0
+ * region boundary.
+ */
+#define GPT_IS_L0_ALIGNED(_pa)	(((_pa) & (GPT_L0_REGION_SIZE - U(1))) == U(0))
+
+/* Get the type field from an L0 descriptor. */
+#define GPT_L0_TYPE(_desc)	(((_desc) >> GPT_L0_TYPE_SHIFT) & \
+				GPT_L0_TYPE_MASK)
+
+/* Create an L0 block descriptor. */
+#define GPT_L0_BLK_DESC(_gpi)	(GPT_L0_TYPE_BLK_DESC | \
+				(((_gpi) & GPT_L0_BLK_DESC_GPI_MASK) << \
+				GPT_L0_BLK_DESC_GPI_SHIFT))
+
+/* Create an L0 table descriptor with an L1 table address. */
+#define GPT_L0_TBL_DESC(_pa)	(GPT_L0_TYPE_TBL_DESC | ((uint64_t)(_pa) & \
+				(GPT_L0_TBL_DESC_L1ADDR_MASK << \
+				GPT_L0_TBL_DESC_L1ADDR_SHIFT)))
+
+/* Get the GPI from an L0 block descriptor. */
+#define GPT_L0_BLKD_GPI(_desc)	(((_desc) >> GPT_L0_BLK_DESC_GPI_SHIFT) & \
+				GPT_L0_BLK_DESC_GPI_MASK)
+
+/* Get the L1 address from an L0 table descriptor. */
+#define GPT_L0_TBLD_ADDR(_desc)	((uint64_t *)(((_desc) & \
+				(GPT_L0_TBL_DESC_L1ADDR_MASK << \
+				GPT_L0_TBL_DESC_L1ADDR_SHIFT))))
+
+/* Get the index into the L1 table from a physical address. */
+#define GPT_L1_IDX(_p, _pa)	(((_pa) >> GPT_L1_IDX_SHIFT(_p)) & \
+				GPT_L1_IDX_MASK(_p))
+
+/* Get the index of the GPI within an L1 table entry from a physical address. */
+#define GPT_L1_GPI_IDX(_p, _pa)	(((_pa) >> GPT_L1_GPI_IDX_SHIFT(_p)) & \
+				GPT_L1_GPI_IDX_MASK)
+
+/* Determine if an address is granule-aligned. */
+#define GPT_IS_L1_ALIGNED(_p, _pa) (((_pa) & (GPT_PGS_ACTUAL_SIZE(_p) - U(1))) \
+				   == U(0))
+
+#endif /* GPT_RME_PRIVATE_H */
diff --git a/lib/xlat_mpu/aarch64/enable_mpu.S b/lib/xlat_mpu/aarch64/enable_mpu.S
new file mode 100644
index 0000000..3791f2d
--- /dev/null
+++ b/lib/xlat_mpu/aarch64/enable_mpu.S
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <platform_def.h>
+
+	.global	enable_mpu_direct_el2
+
+	/* void enable_mmu_direct_el2(unsigned int flags) */
+func enable_mpu_direct_el2
+#if ENABLE_ASSERTIONS
+	mrs	x1, sctlr_el2
+	tst	x1, #SCTLR_M_BIT
+	ASM_ASSERT(eq)
+#endif
+	mov	x7, x0
+	adrp	x0, mmu_cfg_params
+	add	x0, x0, :lo12:mmu_cfg_params
+
+	/* (MAIRs are already set up) */
+
+	/* TCR */
+	ldr	x2, [x0, #(MMU_CFG_TCR << 3)]
+	msr	tcr_el2, x2
+
+	/*
+	 * Ensure all translation table writes have drained into memory, the TLB
+	 * invalidation is complete, and translation register writes are
+	 * committed before enabling the MMU
+	 */
+	dsb	ish
+	isb
+
+	/* Set and clear required fields of SCTLR */
+	mrs	x4, sctlr_el2
+	mov_imm	x5, SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT
+	orr	x4, x4, x5
+
+	/* Additionally, amend SCTLR fields based on flags */
+	bic	x5, x4, #SCTLR_C_BIT
+	tst	x7, #DISABLE_DCACHE
+	csel	x4, x5, x4, ne
+
+	msr	sctlr_el2, x4
+	isb
+
+	ret
+endfunc enable_mpu_direct_el2
diff --git a/lib/xlat_mpu/aarch64/xlat_mpu_arch.c b/lib/xlat_mpu/aarch64/xlat_mpu_arch.c
new file mode 100644
index 0000000..5068eb8
--- /dev/null
+++ b/lib/xlat_mpu/aarch64/xlat_mpu_arch.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "../xlat_mpu_private.h"
+#include <arch.h>
+#include <arch_features.h>
+#include <lib/cassert.h>
+#include <lib/utils_def.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#include <fvp_r_arch_helpers.h>
+
+#warning "xlat_mpu library is currently experimental and its API may change in future."
+
+#if ENABLE_ASSERTIONS
+/*
+ * Return minimum virtual address space size supported by the architecture
+ */
+uintptr_t xlat_get_min_virt_addr_space_size(void)
+{
+	uintptr_t ret;
+
+	if (is_armv8_4_ttst_present()) {
+		ret = MIN_VIRT_ADDR_SPACE_SIZE_TTST;
+	} else {
+		ret = MIN_VIRT_ADDR_SPACE_SIZE;
+	}
+	return ret;
+}
+#endif /* ENABLE_ASSERTIONS*/
+
+bool is_mpu_enabled_ctx(const xlat_ctx_t *ctx)
+{
+	if (ctx->xlat_regime == EL1_EL0_REGIME) {
+		assert(xlat_arch_current_el() >= 1U);
+		return (read_sctlr_el1() & SCTLR_M_BIT) != 0U;
+	} else {
+		assert(xlat_arch_current_el() >= 2U);
+		return (read_sctlr_el2() & SCTLR_M_BIT) != 0U;
+	}
+}
+
+bool is_dcache_enabled(void)
+{
+	unsigned int el = get_current_el();
+
+	if (el == 1U) {
+		return (read_sctlr_el1() & SCTLR_C_BIT) != 0U;
+	} else {  /* must be EL2 */
+		return (read_sctlr_el2() & SCTLR_C_BIT) != 0U;
+	}
+}
+
+unsigned int xlat_arch_current_el(void)
+{
+	unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
+
+	assert(el > 0U);
+
+	return el;
+}
+
diff --git a/lib/xlat_mpu/ro_xlat_mpu.mk b/lib/xlat_mpu/ro_xlat_mpu.mk
new file mode 100644
index 0000000..23f1d46
--- /dev/null
+++ b/lib/xlat_mpu/ro_xlat_mpu.mk
@@ -0,0 +1,14 @@
+#
+# Copyright (c) 2021, ARM Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${USE_DEBUGFS}, 1)
+    $(error "Debugfs requires functionality from the dynamic translation \
+             library and is incompatible with ALLOW_RO_XLAT_TABLES.")
+endif
+
+ifeq (${ARCH},aarch32)
+    $(error "The xlat_mpu library does not currently support AArch32.")
+endif
diff --git a/lib/xlat_mpu/xlat_mpu.mk b/lib/xlat_mpu/xlat_mpu.mk
new file mode 100644
index 0000000..041b91c
--- /dev/null
+++ b/lib/xlat_mpu/xlat_mpu.mk
@@ -0,0 +1,19 @@
+#
+# Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+XLAT_MPU_LIB_V1_SRCS	:=	$(addprefix lib/xlat_mpu/,		\
+				${ARCH}/enable_mpu.S			\
+				${ARCH}/xlat_mpu_arch.c			\
+				xlat_mpu_context.c			\
+				xlat_mpu_core.c				\
+				xlat_mpu_utils.c)
+
+XLAT_MPU_LIB_V1	:=	1
+$(eval $(call add_define,XLAT_MPU_LIB_V1))
+
+ifeq (${ALLOW_XLAT_MPU}, 1)
+    include lib/xlat_mpu_v2/ro_xlat_mpu.mk
+endif
diff --git a/lib/xlat_mpu/xlat_mpu_context.c b/lib/xlat_mpu/xlat_mpu_context.c
new file mode 100644
index 0000000..28c463b
--- /dev/null
+++ b/lib/xlat_mpu/xlat_mpu_context.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <common/debug.h>
+
+#include "lib/xlat_mpu/xlat_mpu.h"
+#include <lib/xlat_tables/xlat_tables_defs.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include "xlat_mpu_private.h"
+
+#include <fvp_r_arch_helpers.h>
+#include <platform_def.h>
+
+#warning "xlat_mpu library is currently experimental and its API may change in future."
+
+
+/*
+ * MMU configuration register values for the active translation context. Used
+ * from the MMU assembly helpers.
+ */
+uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
+/*
+ * Allocate and initialise the default translation context for the BL image
+ * currently executing.
+ */
+REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
+			PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
+
+void mmap_add(const mmap_region_t *mm)
+{
+	mmap_add_ctx(&tf_xlat_ctx, mm);
+}
+
+void __init init_xlat_tables(void)
+{
+	assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID);
+
+	unsigned int current_el = xlat_arch_current_el();
+
+	if (current_el == 1U) {
+		tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
+	} else {
+		assert(current_el == 2U);
+		tf_xlat_ctx.xlat_regime = EL2_REGIME;
+	}
+	/* Note:  If EL3 is supported in future v8-R64, add EL3 assignment */
+	init_xlat_tables_ctx(&tf_xlat_ctx);
+}
+
+int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr)
+{
+	return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr);
+}
+
+void enable_mpu_el2(unsigned int flags)
+{
+	/* EL2 is strictly MPU on v8-R64, so no need for setup_mpu_cfg() */
+	enable_mpu_direct_el2(flags);
+}
diff --git a/lib/xlat_mpu/xlat_mpu_core.c b/lib/xlat_mpu/xlat_mpu_core.c
new file mode 100644
index 0000000..6b4b0c2
--- /dev/null
+++ b/lib/xlat_mpu/xlat_mpu_core.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <arch_features.h>
+#include <common/debug.h>
+#include <lib/utils_def.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include "xlat_mpu_private.h"
+
+#include <fvp_r_arch_helpers.h>
+#include <platform_def.h>
+
+#warning "xlat_mpu library is currently experimental and its API may change in future."
+
+
+/* Helper function that cleans the data cache only if it is enabled. */
+static inline __attribute__((unused))
+	void xlat_clean_dcache_range(uintptr_t addr, size_t size)
+{
+	if (is_dcache_enabled()) {
+		clean_dcache_range(addr, size);
+	}
+}
+
+
+
+/* Calculate region-attributes byte for PRBAR part of MPU-region descriptor: */
+uint64_t prbar_attr_value(uint32_t attr)
+{
+	uint64_t retValue = UL(0);
+	uint64_t extract;  /* temp var holding bit extracted from attr */
+
+	/* Extract and stuff SH: */
+	extract = (uint64_t) ((attr >> MT_SHAREABILITY_SHIFT)
+				& MT_SHAREABILITY_MASK);
+	retValue |= (extract << PRBAR_SH_SHIFT);
+
+	/* Extract and stuff AP: */
+	extract = (uint64_t) ((attr >> MT_PERM_SHIFT) & MT_PERM_MASK);
+	if (extract == 0U) {
+		retValue |= (UL(2) << PRBAR_AP_SHIFT);
+	} else /* extract == 1 */ {
+		retValue |= (UL(0) << PRBAR_AP_SHIFT);
+	}
+
+	/* Extract and stuff XN: */
+	extract = (uint64_t) ((attr >> MT_EXECUTE_SHIFT) & MT_EXECUTE_MASK);
+	retValue |= (extract << PRBAR_XN_SHIFT);
+	/* However, also don't execute in peripheral space: */
+	extract = (uint64_t) ((attr >> MT_TYPE_SHIFT) & MT_TYPE_MASK);
+	if (extract == 0U) {
+		retValue |= (UL(1) << PRBAR_XN_SHIFT);
+	}
+	return retValue;
+}
+
+/* Calculate region-attributes byte for PRLAR part of MPU-region descriptor: */
+uint64_t prlar_attr_value(uint32_t attr)
+{
+	uint64_t retValue = UL(0);
+	uint64_t extract;  /* temp var holding bit extracted from attr */
+
+	/* Extract and stuff AttrIndx: */
+	extract = (uint64_t) ((attr >> MT_TYPE_SHIFT)
+				& MT_TYPE_MASK);
+	switch (extract) {
+	case UL(0):
+		retValue |= (UL(1) << PRLAR_ATTR_SHIFT);
+		break;
+	case UL(2):
+		/* 0, so OR in nothing */
+		break;
+	case UL(3):
+		retValue |= (UL(2) << PRLAR_ATTR_SHIFT);
+		break;
+	default:
+		retValue |= (extract << PRLAR_ATTR_SHIFT);
+		break;
+	}
+
+	/* Stuff EN: */
+	retValue |= (UL(1) << PRLAR_EN_SHIFT);
+
+	/* Force NS to 0 (Secure);  v8-R64 only supports Secure: */
+	extract = ~(1U << PRLAR_NS_SHIFT);
+	retValue &= extract;
+
+	return retValue;
+}
+
+/*
+ * Function that writes an MPU "translation" into the MPU registers. If not
+ * possible (e.g., if no more MPU regions available) boot is aborted.
+ */
+static void mpu_map_region(mmap_region_t *mm)
+{
+	uint64_t prenr_el2_value = 0UL;
+	uint64_t prbar_attrs = 0UL;
+	uint64_t prlar_attrs = 0UL;
+	int region_to_use = 0;
+
+	/* If all MPU regions in use, then abort boot: */
+	prenr_el2_value = read_prenr_el2();
+	assert(prenr_el2_value != 0xffffffff);
+
+	/* Find and select first-available MPU region (PRENR has an enable bit
+	 * for each MPU region, 1 for in-use or 0 for unused):
+	 */
+	for (region_to_use = 0;  region_to_use < N_MPU_REGIONS;
+	     region_to_use++) {
+		if (((prenr_el2_value >> region_to_use) & 1) == 0) {
+			break;
+		}
+	}
+	write_prselr_el2((uint64_t) (region_to_use));
+	isb();
+
+	/* Set base and limit addresses: */
+	write_prbar_el2(mm->base_pa & PRBAR_PRLAR_ADDR_MASK);
+	write_prlar_el2((mm->base_pa + mm->size - 1UL)
+			& PRBAR_PRLAR_ADDR_MASK);
+	dsbsy();
+	isb();
+
+	/* Set attributes: */
+	prbar_attrs = prbar_attr_value(mm->attr);
+	write_prbar_el2(read_prbar_el2() | prbar_attrs);
+	prlar_attrs = prlar_attr_value(mm->attr);
+	write_prlar_el2(read_prlar_el2() | prlar_attrs);
+	dsbsy();
+	isb();
+
+	/* Mark this MPU region as used: */
+	prenr_el2_value |= (1 << region_to_use);
+	write_prenr_el2(prenr_el2_value);
+	isb();
+}
+
+/*
+ * Function that verifies that a region can be mapped.
+ * Returns:
+ *        0: Success, the mapping is allowed.
+ *   EINVAL: Invalid values were used as arguments.
+ *   ERANGE: The memory limits were surpassed.
+ *   ENOMEM: There is not enough memory in the mmap array.
+ *    EPERM: Region overlaps another one in an invalid way.
+ */
+static int mmap_add_region_check(const xlat_ctx_t *ctx, const mmap_region_t *mm)
+{
+	unsigned long long base_pa = mm->base_pa;
+	uintptr_t base_va = mm->base_va;
+	size_t size = mm->size;
+
+	unsigned long long end_pa = base_pa + size - 1U;
+	uintptr_t end_va = base_va + size - 1U;
+
+	if (base_pa != base_va) {
+		return -EINVAL;  /* MPU does not perform address translation */
+	}
+	if ((base_pa % 64ULL) != 0ULL) {
+		return -EINVAL;  /* MPU requires 64-byte alignment */
+	}
+	/* Check for overflows */
+	if ((base_pa > end_pa) || (base_va > end_va)) {
+		return -ERANGE;
+	}
+	if (end_pa > ctx->pa_max_address) {
+		return -ERANGE;
+	}
+	/* Check that there is space in the ctx->mmap array */
+	if (ctx->mmap[ctx->mmap_num - 1].size != 0U) {
+		return -ENOMEM;
+	}
+	/* Check for PAs and VAs overlaps with all other regions */
+	for (const mmap_region_t *mm_cursor = ctx->mmap;
+	     mm_cursor->size != 0U; ++mm_cursor) {
+
+		uintptr_t mm_cursor_end_va =
+			mm_cursor->base_va + mm_cursor->size - 1U;
+
+		/*
+		 * Check if one of the regions is completely inside the other
+		 * one.
+		 */
+		bool fully_overlapped_va =
+			((base_va >= mm_cursor->base_va) &&
+					(end_va <= mm_cursor_end_va)) ||
+			((mm_cursor->base_va >= base_va) &&
+						(mm_cursor_end_va <= end_va));
+
+		/*
+		 * Full VA overlaps are only allowed if both regions are
+		 * identity mapped (zero offset) or have the same VA to PA
+		 * offset. Also, make sure that it's not the exact same area.
+		 * This can only be done with static regions.
+		 */
+		if (fully_overlapped_va) {
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+			if (((mm->attr & MT_DYNAMIC) != 0U) ||
+			    ((mm_cursor->attr & MT_DYNAMIC) != 0U)) {
+				return -EPERM;
+			}
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+			if ((mm_cursor->base_va - mm_cursor->base_pa)
+					!= (base_va - base_pa)) {
+				return -EPERM;
+			}
+			if ((base_va == mm_cursor->base_va) &&
+					(size == mm_cursor->size)) {
+				return -EPERM;
+			}
+		} else {
+			/*
+			 * If the regions do not have fully overlapping VAs,
+			 * then they must have fully separated VAs and PAs.
+			 * Partial overlaps are not allowed
+			 */
+
+			unsigned long long mm_cursor_end_pa =
+				     mm_cursor->base_pa + mm_cursor->size - 1U;
+
+			bool separated_pa = (end_pa < mm_cursor->base_pa) ||
+				(base_pa > mm_cursor_end_pa);
+			bool separated_va = (end_va < mm_cursor->base_va) ||
+				(base_va > mm_cursor_end_va);
+
+			if (!separated_va || !separated_pa) {
+				return -EPERM;
+			}
+		}
+	}
+
+	return 0;
+}
+
+void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
+{
+	mmap_region_t *mm_cursor = ctx->mmap, *mm_destination;
+	const mmap_region_t *mm_end = ctx->mmap + ctx->mmap_num;
+	const mmap_region_t *mm_last;
+	unsigned long long end_pa = mm->base_pa + mm->size - 1U;
+	uintptr_t end_va = mm->base_va + mm->size - 1U;
+	int ret;
+
+	/* Ignore empty regions */
+	if (mm->size == 0U) {
+		return;
+	}
+
+	/* Static regions must be added before initializing the xlat tables. */
+	assert(!ctx->initialized);
+
+	ret = mmap_add_region_check(ctx, mm);
+	if (ret != 0) {
+		ERROR("mmap_add_region_check() failed. error %d\n", ret);
+		assert(false);
+		return;
+	}
+
+	/*
+	 * Find the last entry marker in the mmap
+	 */
+	mm_last = ctx->mmap;
+	while ((mm_last->size != 0U) && (mm_last < mm_end)) {
+		++mm_last;
+	}
+
+	/*
+	 * Check if we have enough space in the memory mapping table.
+	 * This shouldn't happen as we have checked in mmap_add_region_check
+	 * that there is free space.
+	 */
+	assert(mm_last->size == 0U);
+
+	/* Make room for new region by moving other regions up by one place */
+	mm_destination = mm_cursor + 1;
+	(void)memmove(mm_destination, mm_cursor,
+		(uintptr_t)mm_last - (uintptr_t)mm_cursor);
+
+	/*
+	 * Check we haven't lost the empty sentinel from the end of the array.
+	 * This shouldn't happen as we have checked in mmap_add_region_check
+	 * that there is free space.
+	 */
+	assert(mm_end->size == 0U);
+
+	*mm_cursor = *mm;
+
+	if (end_pa > ctx->max_pa) {
+		ctx->max_pa = end_pa;
+	}
+	if (end_va > ctx->max_va) {
+		ctx->max_va = end_va;
+	}
+}
+
+void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
+{
+	const mmap_region_t *mm_cursor = mm;
+
+	while (mm_cursor->granularity != 0U) {
+		mmap_add_region_ctx(ctx, mm_cursor);
+		mm_cursor++;
+	}
+}
+
+void __init init_xlat_tables_ctx(xlat_ctx_t *ctx)
+{
+	uint64_t mair = UL(0);
+
+	assert(ctx != NULL);
+	assert(!ctx->initialized);
+	assert((ctx->xlat_regime == EL2_REGIME) ||
+		(ctx->xlat_regime == EL1_EL0_REGIME));
+	/* Note:  Add EL3_REGIME if EL3 is supported in future v8-R64 cores. */
+	assert(!is_mpu_enabled_ctx(ctx));
+
+	mmap_region_t *mm = ctx->mmap;
+
+	assert(ctx->va_max_address >=
+		(xlat_get_min_virt_addr_space_size() - 1U));
+	assert(ctx->va_max_address <= (MAX_VIRT_ADDR_SPACE_SIZE - 1U));
+	assert(IS_POWER_OF_TWO(ctx->va_max_address + 1U));
+
+	xlat_mmap_print(mm);
+
+	/* All tables must be zeroed before mapping any region. */
+
+	for (unsigned int i = 0U; i < ctx->base_table_entries; i++)
+		ctx->base_table[i] = INVALID_DESC;
+
+	/* Also mark all MPU regions as invalid in the MPU hardware itself: */
+	write_prenr_el2(0);
+		/* Sufficient for current, max-32-region implementations. */
+	dsbsy();
+	isb();
+	while (mm->size != 0U) {
+		if (read_prenr_el2() == ALL_MPU_EL2_REGIONS_USED) {
+			ERROR("Not enough MPU regions to map region:\n"
+				" VA:0x%lx  PA:0x%llx  size:0x%zx  attr:0x%x\n",
+				mm->base_va, mm->base_pa, mm->size, mm->attr);
+			panic();
+		} else {
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+			xlat_clean_dcache_range((uintptr_t)mm->base_va,
+				mm->size);
+#endif
+			mpu_map_region(mm);
+		}
+		mm++;
+	}
+
+	ctx->initialized = true;
+
+	xlat_tables_print(ctx);
+
+	/* Set attributes in the right indices of the MAIR */
+	mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
+	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
+			ATTR_IWBWA_OWBWA_NTR_INDEX);
+	mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE,
+			ATTR_NON_CACHEABLE_INDEX);
+	write_mair_el2(mair);
+	dsbsy();
+	isb();
+}
+
+/*
+ * Function to wipe clean and disable all MPU regions.  This function expects
+ * that the MPU has already been turned off, and caching concerns addressed,
+ * but it nevertheless also explicitly turns off the MPU.
+ */
+void clear_all_mpu_regions(void)
+{
+	uint64_t sctlr_el2_value = 0UL;
+	uint64_t region_n = 0UL;
+
+	/*
+	 * MPU should already be disabled, but explicitly disable it
+	 * nevertheless:
+	 */
+	sctlr_el2_value = read_sctlr_el2() & ~(1UL);
+	write_sctlr_el2(sctlr_el2_value);
+
+	/* Disable all regions: */
+	write_prenr_el2(0UL);
+
+	/* Sequence through all regions, zeroing them out and turning off: */
+	for (region_n = 0UL;  region_n < N_MPU_REGIONS;  region_n++) {
+		write_prselr_el2(region_n);
+		isb();
+		write_prbar_el2((uint64_t) 0);
+		write_prlar_el2((uint64_t) 0);
+		dsbsy();
+		isb();
+	}
+}
diff --git a/lib/xlat_mpu/xlat_mpu_private.h b/lib/xlat_mpu/xlat_mpu_private.h
new file mode 100644
index 0000000..e0e479d
--- /dev/null
+++ b/lib/xlat_mpu/xlat_mpu_private.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_MPU_PRIVATE_H
+#define XLAT_MPU_PRIVATE_H
+
+#include <stdbool.h>
+
+#include <lib/xlat_tables/xlat_tables_defs.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#include <platform_def.h>
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+/*
+ * Private shifts and masks to access fields of an mmap attribute
+ */
+/* Dynamic or static */
+#define MT_DYN_SHIFT		U(31)
+
+/*
+ * Memory mapping private attributes
+ *
+ * Private attributes not exposed in the public header.
+ */
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/* Calculate region-attributes byte for PRBAR part of MPU-region descriptor: */
+uint64_t prbar_attr_value(uint32_t attr);
+/* Calculate region-attributes byte for PRLAR part of MPU-region descriptor: */
+uint64_t prlar_attr_value(uint32_t attr);
+/* Calculates the attr value for a given PRBAR and PRLAR entry value: */
+uint32_t region_attr(uint64_t prbar_attr, uint64_t prlar_attr);
+
+#define PRBAR_PRLAR_ADDR_MASK	UL(0xffffffffffc0)
+	/* mask for PRBAR & PRLAR MPU-region field */
+/* MPU region attribute bit fields: */
+#define PRBAR_SH_SHIFT		UL(4)
+#define PRBAR_SH_MASK		UL(0x3)
+#define PRBAR_AP_SHIFT		UL(2)
+#define PRBAR_AP_MASK		UL(0x3)
+#define PRBAR_XN_SHIFT		UL(1)
+#define PRBAR_XN_MASK		UL(0x3)
+#define PRLAR_NS_SHIFT		UL(4)
+#define PRLAR_NS_MASK		UL(0x3)
+#define PRBAR_ATTR_SHIFT	UL(0)
+#define PRBAR_ATTR_MASK		UL(0x3f)
+#define PRLAR_ATTR_SHIFT	UL(1)
+#define PRLAR_ATTR_MASK		UL(0x7)
+#define PRLAR_EN_SHIFT		UL(0)
+#define PRLAR_EN_MASK		UL(0x1)
+/* Aspects of the source attributes not defined elsewhere: */
+#define MT_PERM_MASK		UL(0x1)
+#define MT_SEC_MASK		UL(0x1)
+#define MT_EXECUTE_MASK		UL(0x3)
+#define MT_TYPE_SHIFT		UL(0)
+
+extern uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
+/*
+ * Return the execute-never mask that will prevent instruction fetch at the
+ * given translation regime.
+ */
+uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime);
+
+/* Print VA, PA, size and attributes of all regions in the mmap array. */
+void xlat_mmap_print(const mmap_region_t *mmap);
+
+/*
+ * Print the current state of the translation tables by reading them from
+ * memory.
+ */
+void xlat_tables_print(xlat_ctx_t *ctx);
+
+/*
+ * Returns a block/page table descriptor for the given level and attributes.
+ */
+uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
+		   unsigned long long addr_pa, unsigned int level);
+
+/*
+ * Architecture-specific initialization code.
+ */
+
+/* Returns the current Exception Level. The returned EL must be 1 or higher. */
+unsigned int xlat_arch_current_el(void);
+
+/*
+ * Returns true if the MMU of the translation regime managed by the given
+ * xlat_ctx_t is enabled, false otherwise.
+ */
+bool is_mpu_enabled_ctx(const xlat_ctx_t *ctx);
+
+/*
+ * Returns minimum virtual address space size supported by the architecture
+ */
+uintptr_t xlat_get_min_virt_addr_space_size(void);
+
+#endif /* XLAT_MPU_PRIVATE_H */
diff --git a/lib/xlat_mpu/xlat_mpu_utils.c b/lib/xlat_mpu/xlat_mpu_utils.c
new file mode 100644
index 0000000..f305632
--- /dev/null
+++ b/lib/xlat_mpu/xlat_mpu_utils.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <common/debug.h>
+#include <lib/utils_def.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include "xlat_mpu_private.h"
+
+#include <fvp_r_arch_helpers.h>
+#include <platform_def.h>
+
+#warning "xlat_mpu library is currently experimental and its API may change in future."
+
+
+#if LOG_LEVEL < LOG_LEVEL_VERBOSE
+
+void xlat_mmap_print(__unused const mmap_region_t *mmap)
+{
+	/* Empty */
+}
+
+void xlat_tables_print(__unused xlat_ctx_t *ctx)
+{
+	/* Empty */
+}
+
+#else /* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+static const char *invalid_descriptors_ommited =
+		"%s(%d invalid descriptors omitted)\n";
+
+void xlat_tables_print(xlat_ctx_t *ctx)
+{
+	const char *xlat_regime_str;
+	int used_page_tables;
+
+	if (ctx->xlat_regime == EL1_EL0_REGIME) {
+		xlat_regime_str = "1&0";
+	} else if (ctx->xlat_regime == EL2_REGIME) {
+		xlat_regime_str = "2";
+	} else {
+		assert(ctx->xlat_regime == EL3_REGIME);
+		xlat_regime_str = "3";
+		/* If no EL3 and EL3 tables generated, then need to know. */
+	}
+	VERBOSE("Translation tables state:\n");
+	VERBOSE("  Xlat regime:     EL%s\n", xlat_regime_str);
+	VERBOSE("  Max allowed PA:  0x%llx\n", ctx->pa_max_address);
+	VERBOSE("  Max allowed VA:  0x%lx\n", ctx->va_max_address);
+	VERBOSE("  Max mapped PA:   0x%llx\n", ctx->max_pa);
+	VERBOSE("  Max mapped VA:   0x%lx\n", ctx->max_va);
+
+	VERBOSE("  Initial lookup level: %u\n", ctx->base_level);
+	VERBOSE("  Entries @initial lookup level: %u\n",
+		ctx->base_table_entries);
+
+	xlat_tables_print_internal(ctx, 0U, ctx->base_table,
+				   ctx->base_table_entries, ctx->base_level);
+}
+
+#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
diff --git a/lib/xlat_tables/aarch64/xlat_tables.c b/lib/xlat_tables/aarch64/xlat_tables.c
index c86412c..dc167e3 100644
--- a/lib/xlat_tables/aarch64/xlat_tables.c
+++ b/lib/xlat_tables/aarch64/xlat_tables.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -147,7 +147,7 @@
  *			exception level
  ******************************************************************************/
 #define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct)		\
-	void enable_mmu_el##_el(unsigned int flags)				\
+	void enable_mmu_el##_el(unsigned int flags)			\
 	{								\
 		uint64_t mair, tcr, ttbr;				\
 		uint32_t sctlr;						\
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
index b69c670..a1a44af 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -39,6 +39,23 @@
 	return PAGE_SIZE_4KB;
 }
 
+/*
+ * Determine the physical address space encoded in the 'attr' parameter.
+ *
+ * The physical address will fall into one of two spaces; secure or
+ * nonsecure.
+ */
+uint32_t xlat_arch_get_pas(uint32_t attr)
+{
+	uint32_t pas = MT_PAS(attr);
+
+	if (pas == MT_NS) {
+		return LOWER_ATTRS(NS);
+	} else { /* MT_SECURE */
+		return 0U;
+	}
+}
+
 #if ENABLE_ASSERTIONS
 unsigned long long xlat_arch_get_max_supported_pa(void)
 {
@@ -203,8 +220,6 @@
 
 		assert(virtual_addr_space_size >=
 			xlat_get_min_virt_addr_space_size());
-		assert(virtual_addr_space_size <=
-			MAX_VIRT_ADDR_SPACE_SIZE);
 		assert(IS_POWER_OF_TWO(virtual_addr_space_size));
 
 		/*
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
index 3832b07..719110a 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -53,6 +53,33 @@
 	}
 }
 
+/*
+ * Determine the physical address space encoded in the 'attr' parameter.
+ *
+ * The physical address will fall into one of four spaces; secure,
+ * nonsecure, root, or realm if RME is enabled, or one of two spaces;
+ * secure and nonsecure otherwise.
+ */
+uint32_t xlat_arch_get_pas(uint32_t attr)
+{
+	uint32_t pas = MT_PAS(attr);
+
+	switch (pas) {
+#if ENABLE_RME
+	/* TTD.NSE = 1 and TTD.NS = 1 for Realm PAS */
+	case MT_REALM:
+		return LOWER_ATTRS(EL3_S1_NSE | NS);
+	/* TTD.NSE = 1 and TTD.NS = 0 for Root PAS */
+	case MT_ROOT:
+		return LOWER_ATTRS(EL3_S1_NSE);
+#endif
+	case MT_NS:
+		return LOWER_ATTRS(NS);
+	default: /* MT_SECURE */
+		return 0U;
+	}
+}
+
 unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr)
 {
 	/* Physical address can't exceed 48 bits */
diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c
index bb6d184..de57184 100644
--- a/lib/xlat_tables_v2/xlat_tables_core.c
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -125,11 +125,14 @@
 	 * faults aren't managed.
 	 */
 	desc |= LOWER_ATTRS(ACCESS_FLAG);
+
+	/* Determine the physical address space this region belongs to. */
+	desc |= xlat_arch_get_pas(attr);
+
 	/*
-	 * Deduce other fields of the descriptor based on the MT_NS and MT_RW
-	 * memory region attributes.
+	 * Deduce other fields of the descriptor based on the MT_RW memory
+	 * region attributes.
 	 */
-	desc |= ((attr & MT_NS) != 0U) ? LOWER_ATTRS(NS) : 0U;
 	desc |= ((attr & MT_RW) != 0U) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
 
 	/*
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
index 863470c..42c9a43 100644
--- a/lib/xlat_tables_v2/xlat_tables_private.h
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -40,6 +40,9 @@
 
 extern uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
 
+/* Determine the physical address space encoded in the 'attr' parameter. */
+uint32_t xlat_arch_get_pas(uint32_t attr);
+
 /*
  * Return the execute-never mask that will prevent instruction fetch at the
  * given translation regime.
diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c
index 9fae7e9..df17386 100644
--- a/lib/xlat_tables_v2/xlat_tables_utils.c
+++ b/lib/xlat_tables_v2/xlat_tables_utils.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -95,7 +95,23 @@
 			  ? "-USER" : "-PRIV");
 	}
 
+#if ENABLE_RME
+	switch (desc & LOWER_ATTRS(EL3_S1_NSE | NS)) {
+	case 0ULL:
+		printf("-S");
+		break;
+	case LOWER_ATTRS(NS):
+		printf("-NS");
+		break;
+	case LOWER_ATTRS(EL3_S1_NSE):
+		printf("-RT");
+		break;
+	default: /* LOWER_ATTRS(EL3_S1_NSE | NS) */
+		printf("-RL");
+	}
+#else
 	printf(((LOWER_ATTRS(NS) & desc) != 0ULL) ? "-NS" : "-S");
+#endif
 
 #ifdef __aarch64__
 	/* Check Guarded Page bit */
diff --git a/make_helpers/build_macros.mk b/make_helpers/build_macros.mk
index 8655028..12aaee6 100644
--- a/make_helpers/build_macros.mk
+++ b/make_helpers/build_macros.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -98,41 +98,41 @@
 endef
 
 # IMG_LINKERFILE defines the linker script corresponding to a BL stage
-#   $(1) = BL stage (1, 2, 2u, 31, 32)
+#   $(1) = BL stage
 define IMG_LINKERFILE
-    ${BUILD_DIR}/bl$(1).ld
+    ${BUILD_DIR}/$(1).ld
 endef
 
 # IMG_MAPFILE defines the output file describing the memory map corresponding
 # to a BL stage
-#   $(1) = BL stage (1, 2, 2u, 31, 32)
+#   $(1) = BL stage
 define IMG_MAPFILE
-    ${BUILD_DIR}/bl$(1).map
+    ${BUILD_DIR}/$(1).map
 endef
 
 # IMG_ELF defines the elf file corresponding to a BL stage
-#   $(1) = BL stage (1, 2, 2u, 31, 32)
+#   $(1) = BL stage
 define IMG_ELF
-    ${BUILD_DIR}/bl$(1).elf
+    ${BUILD_DIR}/$(1).elf
 endef
 
 # IMG_DUMP defines the symbols dump file corresponding to a BL stage
-#   $(1) = BL stage (1, 2, 2u, 31, 32)
+#   $(1) = BL stage
 define IMG_DUMP
-    ${BUILD_DIR}/bl$(1).dump
+    ${BUILD_DIR}/$(1).dump
 endef
 
 # IMG_BIN defines the default image file corresponding to a BL stage
-#   $(1) = BL stage (1, 2, 2u, 31, 32)
+#   $(1) = BL stage
 define IMG_BIN
-    ${BUILD_PLAT}/bl$(1).bin
+    ${BUILD_PLAT}/$(1).bin
 endef
 
 # IMG_ENC_BIN defines the default encrypted image file corresponding to a
 # BL stage
-#   $(1) = BL stage (2, 30, 31, 32, 33)
+#   $(1) = BL stage
 define IMG_ENC_BIN
-    ${BUILD_PLAT}/bl$(1)_enc.bin
+    ${BUILD_PLAT}/$(1)_enc.bin
 endef
 
 # ENCRYPT_FW invokes enctool to encrypt firmware binary
@@ -294,15 +294,15 @@
 # MAKE_C builds a C source file and generates the dependency file
 #   $(1) = output directory
 #   $(2) = source file (%.c)
-#   $(3) = BL stage (1, 2, 2u, 31, 32)
+#   $(3) = BL stage
 define MAKE_C
 
 $(eval OBJ := $(1)/$(patsubst %.c,%.o,$(notdir $(2))))
 $(eval DEP := $(patsubst %.o,%.d,$(OBJ)))
-$(eval BL_CPPFLAGS := $(BL$(call uppercase,$(3))_CPPFLAGS) -DIMAGE_BL$(call uppercase,$(3)))
-$(eval BL_CFLAGS := $(BL$(call uppercase,$(3))_CFLAGS))
+$(eval BL_CPPFLAGS := $($(call uppercase,$(3))_CPPFLAGS) -DIMAGE_$(call uppercase,$(3)))
+$(eval BL_CFLAGS := $($(call uppercase,$(3))_CFLAGS))
 
-$(OBJ): $(2) $(filter-out %.d,$(MAKEFILE_LIST)) | bl$(3)_dirs
+$(OBJ): $(2) $(filter-out %.d,$(MAKEFILE_LIST)) | $(3)_dirs
 	$$(ECHO) "  CC      $$<"
 	$$(Q)$$(CC) $$(LTO_CFLAGS) $$(TF_CFLAGS) $$(CFLAGS) $(BL_CPPFLAGS) $(BL_CFLAGS) $(MAKE_DEP) -c $$< -o $$@
 
@@ -314,15 +314,15 @@
 # MAKE_S builds an assembly source file and generates the dependency file
 #   $(1) = output directory
 #   $(2) = assembly file (%.S)
-#   $(3) = BL stage (1, 2, 2u, 31, 32)
+#   $(3) = BL stage
 define MAKE_S
 
 $(eval OBJ := $(1)/$(patsubst %.S,%.o,$(notdir $(2))))
 $(eval DEP := $(patsubst %.o,%.d,$(OBJ)))
-$(eval BL_CPPFLAGS := $(BL$(call uppercase,$(3))_CPPFLAGS) -DIMAGE_BL$(call uppercase,$(3)))
-$(eval BL_ASFLAGS := $(BL$(call uppercase,$(3))_ASFLAGS))
+$(eval BL_CPPFLAGS := $($(call uppercase,$(3))_CPPFLAGS) -DIMAGE_$(call uppercase,$(3)))
+$(eval BL_ASFLAGS := $($(call uppercase,$(3))_ASFLAGS))
 
-$(OBJ): $(2) $(filter-out %.d,$(MAKEFILE_LIST)) | bl$(3)_dirs
+$(OBJ): $(2) $(filter-out %.d,$(MAKEFILE_LIST)) | $(3)_dirs
 	$$(ECHO) "  AS      $$<"
 	$$(Q)$$(AS) $$(ASFLAGS) $(BL_CPPFLAGS) $(BL_ASFLAGS) $(MAKE_DEP) -c $$< -o $$@
 
@@ -334,13 +334,13 @@
 # MAKE_LD generate the linker script using the C preprocessor
 #   $(1) = output linker script
 #   $(2) = input template
-#   $(3) = BL stage (1, 2, 2u, 31, 32)
+#   $(3) = BL stage
 define MAKE_LD
 
 $(eval DEP := $(1).d)
-$(eval BL_CPPFLAGS := $(BL$(call uppercase,$(3))_CPPFLAGS) -DIMAGE_BL$(call uppercase,$(3)))
+$(eval BL_CPPFLAGS := $($(call uppercase,$(3))_CPPFLAGS) -DIMAGE_$(call uppercase,$(3)))
 
-$(1): $(2) $(filter-out %.d,$(MAKEFILE_LIST)) | bl$(3)_dirs
+$(1): $(2) $(filter-out %.d,$(MAKEFILE_LIST)) | $(3)_dirs
 	$$(ECHO) "  PP      $$<"
 	$$(Q)$$(CPP) $$(CPPFLAGS) $(BL_CPPFLAGS) $(TF_CFLAGS_$(ARCH)) -P -x assembler-with-cpp -D__LINKER__ $(MAKE_DEP) -o $$@ $$<
 
@@ -368,7 +368,7 @@
 # MAKE_OBJS builds both C and assembly source files
 #   $(1) = output directory
 #   $(2) = list of source files (both C and assembly)
-#   $(3) = BL stage (1, 2, 2u, 31, 32)
+#   $(3) = BL stage
 define MAKE_OBJS
         $(eval C_OBJS := $(filter %.c,$(2)))
         $(eval REMAIN := $(filter-out %.c,$(2)))
@@ -445,13 +445,13 @@
 
 # MAKE_BL macro defines the targets and options to build each BL image.
 # Arguments:
-#   $(1) = BL stage (1, 2, 2u, 31, 32)
+#   $(1) = BL stage
 #   $(2) = FIP command line option (if empty, image will not be included in the FIP)
 #   $(3) = FIP prefix (optional) (if FWU_, target is fwu_fip instead of fip)
 #   $(4) = BL encryption flag (optional) (0, 1)
 define MAKE_BL
-        $(eval BUILD_DIR  := ${BUILD_PLAT}/bl$(1))
-        $(eval BL_SOURCES := $(BL$(call uppercase,$(1))_SOURCES))
+        $(eval BUILD_DIR  := ${BUILD_PLAT}/$(1))
+        $(eval BL_SOURCES := $($(call uppercase,$(1))_SOURCES))
         $(eval SOURCES    := $(BL_SOURCES) $(BL_COMMON_SOURCES) $(PLAT_BL_COMMON_SOURCES))
         $(eval OBJS       := $(addprefix $(BUILD_DIR)/,$(call SOURCES_TO_OBJS,$(SOURCES))))
         $(eval LINKERFILE := $(call IMG_LINKERFILE,$(1)))
@@ -460,8 +460,8 @@
         $(eval DUMP       := $(call IMG_DUMP,$(1)))
         $(eval BIN        := $(call IMG_BIN,$(1)))
         $(eval ENC_BIN    := $(call IMG_ENC_BIN,$(1)))
-        $(eval BL_LINKERFILE := $(BL$(call uppercase,$(1))_LINKERFILE))
-        $(eval BL_LIBS    := $(BL$(call uppercase,$(1))_LIBS))
+        $(eval BL_LINKERFILE := $($(call uppercase,$(1))_LINKERFILE))
+        $(eval BL_LIBS    := $($(call uppercase,$(1))_LIBS))
         # We use sort only to get a list of unique object directory names.
         # ordering is not relevant but sort removes duplicates.
         $(eval TEMP_OBJ_DIRS := $(sort $(dir ${OBJS} ${LINKERFILE})))
@@ -475,21 +475,21 @@
 
 $(eval $(foreach objd,${OBJ_DIRS},$(call MAKE_PREREQ_DIR,${objd},${BUILD_DIR})))
 
-.PHONY : bl${1}_dirs
+.PHONY : ${1}_dirs
 
 # We use order-only prerequisites to ensure that directories are created,
 # but do not cause re-builds every time a file is written.
-bl${1}_dirs: | ${OBJ_DIRS}
+${1}_dirs: | ${OBJ_DIRS}
 
 $(eval $(call MAKE_OBJS,$(BUILD_DIR),$(SOURCES),$(1)))
 $(eval $(call MAKE_LD,$(LINKERFILE),$(BL_LINKERFILE),$(1)))
-$(eval BL_LDFLAGS := $(BL$(call uppercase,$(1))_LDFLAGS))
+$(eval BL_LDFLAGS := $($(call uppercase,$(1))_LDFLAGS))
 
 ifeq ($(USE_ROMLIB),1)
 $(ELF): romlib.bin
 endif
 
-$(ELF): $(OBJS) $(LINKERFILE) | bl$(1)_dirs libraries $(BL_LIBS)
+$(ELF): $(OBJS) $(LINKERFILE) | $(1)_dirs libraries $(BL_LIBS)
 	$$(ECHO) "  LD      $$@"
 ifdef MAKE_BUILD_STRINGS
 	$(call MAKE_BUILD_STRINGS, $(BUILD_DIR)/build_message.o)
@@ -499,10 +499,10 @@
 		$$(CC) $$(TF_CFLAGS) $$(CFLAGS) -xc -c - -o $(BUILD_DIR)/build_message.o
 endif
 ifneq ($(findstring armlink,$(notdir $(LD))),)
-	$$(Q)$$(LD) -o $$@ $$(TF_LDFLAGS) $$(LDFLAGS) $(BL_LDFLAGS) --entry=bl${1}_entrypoint \
+	$$(Q)$$(LD) -o $$@ $$(TF_LDFLAGS) $$(LDFLAGS) $(BL_LDFLAGS) --entry=${1}_entrypoint \
 		--predefine="-D__LINKER__=$(__LINKER__)" \
 		--predefine="-DTF_CFLAGS=$(TF_CFLAGS)" \
-		--map --list="$(MAPFILE)" --scatter=${PLAT_DIR}/scat/bl${1}.scat \
+		--map --list="$(MAPFILE)" --scatter=${PLAT_DIR}/scat/${1}.scat \
 		$(LDPATHS) $(LIBWRAPPER) $(LDLIBS) $(BL_LIBS) \
 		$(BUILD_DIR)/build_message.o $(OBJS)
 else ifneq ($(findstring gcc,$(notdir $(LD))),)
@@ -531,21 +531,21 @@
 	@echo "Built $$@ successfully"
 	@${ECHO_BLANK_LINE}
 
-.PHONY: bl$(1)
+.PHONY: $(1)
 ifeq ($(DISABLE_BIN_GENERATION),1)
-bl$(1): $(ELF) $(DUMP)
+$(1): $(ELF) $(DUMP)
 else
-bl$(1): $(BIN) $(DUMP)
+$(1): $(BIN) $(DUMP)
 endif
 
-all: bl$(1)
+all: $(1)
 
 ifeq ($(4),1)
 $(call ENCRYPT_FW,$(BIN),$(ENC_BIN))
-$(if $(2),$(call TOOL_ADD_IMG_PAYLOAD,bl$(1),$(BIN),--$(2),$(ENC_BIN),$(3), \
+$(if $(2),$(call TOOL_ADD_IMG_PAYLOAD,$(1),$(BIN),--$(2),$(ENC_BIN),$(3), \
 		$(ENC_BIN)))
 else
-$(if $(2),$(call TOOL_ADD_IMG_PAYLOAD,bl$(1),$(BIN),--$(2),$(BIN),$(3)))
+$(if $(2),$(call TOOL_ADD_IMG_PAYLOAD,$(1),$(BIN),--$(2),$(BIN),$(3)))
 endif
 
 endef
diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk
index c188621..819c536 100644
--- a/make_helpers/defaults.mk
+++ b/make_helpers/defaults.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2016-2021, ARM Limited. All rights reserved.
+# Copyright (c) 2016-2021, Arm Limited. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -32,6 +32,9 @@
 # Execute BL2 at EL3
 BL2_AT_EL3			:= 0
 
+# Only use SP packages if SP layout JSON is defined
+BL2_ENABLE_SP_LOAD		:= 0
+
 # BL2 image is stored in XIP memory, for now, this option is only supported
 # when BL2_AT_EL3 is 1.
 BL2_IN_XIP_MEM			:= 0
@@ -102,6 +105,9 @@
 # Flag to enable PSCI STATs functionality
 ENABLE_PSCI_STAT		:= 0
 
+# Flag to enable Realm Management Extension (FEAT_RME)
+ENABLE_RME			:= 0
+
 # Flag to enable runtime instrumentation using PMF
 ENABLE_RUNTIME_INSTRUMENTATION	:= 0
 
@@ -121,6 +127,9 @@
 # Use BRANCH_PROTECTION to enable PAUTH.
 ENABLE_PAUTH			:= 0
 
+# Flag to enable access to the HCRX_EL2 register by setting SCR_EL3.HXEn.
+ENABLE_FEAT_HCX                 := 0
+
 # By default BL31 encryption disabled
 ENCRYPT_BL31			:= 0
 
diff --git a/make_helpers/tbbr/tbbr_tools.mk b/make_helpers/tbbr/tbbr_tools.mk
index f7cced4..0a280b4 100644
--- a/make_helpers/tbbr/tbbr_tools.mk
+++ b/make_helpers/tbbr/tbbr_tools.mk
@@ -11,6 +11,7 @@
 # Expected environment:
 #
 #   BUILD_PLAT: output directory
+#   NEED_BL2: indicates whether BL2 is needed by the platform
 #   NEED_BL32: indicates whether BL32 is needed by the platform
 #   BL2: image filename (optional). Default is IMG_BIN(2) (see macro IMG_BIN)
 #   SCP_BL2: image filename (optional). Default is IMG_BIN(30)
@@ -67,9 +68,11 @@
 
 
 # Add the BL2 CoT (image cert)
+ifeq (${NEED_BL2},yes)
 ifeq (${BL2_AT_EL3}, 0)
 $(eval $(call TOOL_ADD_PAYLOAD,${BUILD_PLAT}/tb_fw.crt,--tb-fw-cert))
 endif
+endif
 
 # Add the SCP_BL2 CoT (key cert + img cert)
 ifneq (${SCP_BL2},)
diff --git a/plat/arm/board/arm_fpga/build_axf.ld.S b/plat/arm/board/arm_fpga/build_axf.ld.S
index d7cd008..b4bc7d8 100644
--- a/plat/arm/board/arm_fpga/build_axf.ld.S
+++ b/plat/arm/board/arm_fpga/build_axf.ld.S
@@ -17,6 +17,7 @@
 
 INPUT(./bl31/bl31.elf)
 INPUT(./rom_trampoline.o)
+INPUT(./kernel_trampoline.o)
 
 TARGET(binary)
 INPUT(./fdts/arm_fpga.dtb)
@@ -33,7 +34,6 @@
 	.bl31 (BL31_BASE): {
 		ASSERT(. == ALIGN(PAGE_SIZE), "BL31_BASE is not page aligned");
 		*bl31.elf(.text* .data* .rodata* ro* .bss*)
-		*bl31.elf(.stack)
 	}
 
 	.dtb (FPGA_PRELOADED_DTB_BASE): {
@@ -41,6 +41,12 @@
 		*arm_fpga.dtb
 	}
 
+	.kern_tramp (PRELOADED_BL33_BASE): {
+		*kernel_trampoline.o(.text*)
+		KEEP(*(.kern_tramp))
+	}
+
+	/DISCARD/ : { *(stacks) }
 	/DISCARD/ : { *(.debug_*) }
 	/DISCARD/ : { *(.note*) }
 	/DISCARD/ : { *(.comment*) }
diff --git a/plat/arm/board/arm_fpga/fpga_bl31_setup.c b/plat/arm/board/arm_fpga/fpga_bl31_setup.c
index 81d040c..2b5ca4a 100644
--- a/plat/arm/board/arm_fpga/fpga_bl31_setup.c
+++ b/plat/arm/board/arm_fpga/fpga_bl31_setup.c
@@ -144,6 +144,12 @@
 		panic();
 	}
 
+	/* Reserve memory used by Trusted Firmware. */
+	if (fdt_add_reserved_memory(fdt, "tf-a@80000000", BL31_BASE,
+				    BL31_LIMIT - BL31_BASE)) {
+		WARN("Failed to add reserved memory node to DT\n");
+	}
+
 	/* Check for the command line signature. */
 	if (!strncmp(cmdline, "CMD:", 4)) {
 		int chosen;
diff --git a/plat/arm/board/arm_fpga/include/platform_def.h b/plat/arm/board/arm_fpga/include/platform_def.h
index 411b936..2350d87 100644
--- a/plat/arm/board/arm_fpga/include/platform_def.h
+++ b/plat/arm/board/arm_fpga/include/platform_def.h
@@ -30,7 +30,7 @@
 
 #if !ENABLE_PIE
 #define BL31_BASE			UL(0x80000000)
-#define BL31_LIMIT			UL(0x80100000)
+#define BL31_LIMIT			UL(0x80070000)
 #else
 #define BL31_BASE			UL(0x0)
 #define BL31_LIMIT			UL(0x01000000)
diff --git a/plat/arm/board/arm_fpga/kernel_trampoline.S b/plat/arm/board/arm_fpga/kernel_trampoline.S
new file mode 100644
index 0000000..f4c08ef
--- /dev/null
+++ b/plat/arm/board/arm_fpga/kernel_trampoline.S
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2021, ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * The traditional arm64 Linux kernel load address is 512KiB from the
+ * beginning of DRAM, caused by this having been the default value of the
+ * kernel's CONFIG_TEXT_OFFSET Kconfig value.
+ * However kernel version 5.8 changed the default offset (into a 2MB page)
+ * to 0, so TF-A's default assumption is no longer true. Fortunately the
+ * kernel got more relaxed about this offset at the same time, so it
+ * tolerates the wrong offset, but issues a warning:
+ * [Firmware Bug]: Kernel image misaligned at boot, please fix your bootloader!
+ *
+ * We cannot easily change the load address offset in TF-A to be 2MiB, because
+ * this would break older kernels - and they are not as forgiving in this
+ * respect.
+ *
+ * But we can allow users to load the kernel at the right offset, and
+ * offer this trampoline here to transition to this new load address.
+ * Any older kernels, or newer kernels misloaded, will overwrite this code
+ * here, so it does no harm in this case.
+ */
+
+#include <asm_macros.S>
+#include <common/bl_common.ld.h>
+
+.text
+.global _tramp_start
+
+_tramp_start:
+	adr	x4, _tramp_start
+	orr	x4, x4, #0x1fffff
+	add	x4, x4, #1			/* align up to 2MB */
+	br	x4
diff --git a/plat/arm/board/arm_fpga/platform.mk b/plat/arm/board/arm_fpga/platform.mk
index baffbcf..1217425 100644
--- a/plat/arm/board/arm_fpga/platform.mk
+++ b/plat/arm/board/arm_fpga/platform.mk
@@ -118,10 +118,11 @@
 				${FPGA_GIC_SOURCES}
 
 $(eval $(call MAKE_S,$(BUILD_PLAT),plat/arm/board/arm_fpga/rom_trampoline.S,31))
+$(eval $(call MAKE_S,$(BUILD_PLAT),plat/arm/board/arm_fpga/kernel_trampoline.S,31))
 $(eval $(call MAKE_LD,$(BUILD_PLAT)/build_axf.ld,plat/arm/board/arm_fpga/build_axf.ld.S,31))
 
-bl31.axf: bl31 dtbs ${BUILD_PLAT}/rom_trampoline.o ${BUILD_PLAT}/build_axf.ld
+bl31.axf: bl31 dtbs ${BUILD_PLAT}/rom_trampoline.o ${BUILD_PLAT}/kernel_trampoline.o ${BUILD_PLAT}/build_axf.ld
 	$(ECHO) "  LD      $@"
-	$(Q)$(LD) -T ${BUILD_PLAT}/build_axf.ld -L ${BUILD_PLAT} --strip-debug -o ${BUILD_PLAT}/bl31.axf
+	$(Q)$(LD) -T ${BUILD_PLAT}/build_axf.ld -L ${BUILD_PLAT} --strip-debug -s -n -o ${BUILD_PLAT}/bl31.axf
 
 all: bl31.axf
diff --git a/plat/arm/board/fvp/fdts/optee_sp_manifest.dts b/plat/arm/board/fvp/fdts/optee_sp_manifest.dts
index 07235b0..551efe6 100644
--- a/plat/arm/board/fvp/fdts/optee_sp_manifest.dts
+++ b/plat/arm/board/fvp/fdts/optee_sp_manifest.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  *
@@ -16,7 +16,7 @@
 	/* Properties */
 	description = "op-tee";
 	ffa-version = <0x00010000>; /* 31:16 - Major, 15:0 - Minor */
-	uuid = <0x486178e0 0xe7f811e3 0xbc5e0002 0xa5d5c51b>;
+	uuid = <0xe0786148 0xe311f8e7 0x02005ebc 0x1bc5d5a5>;
 	id = <1>;
 	execution-ctx-count = <8>;
 	exception-level = <2>; /* S-EL1 */
@@ -25,8 +25,9 @@
 	entrypoint-offset = <0x1000>;
 	xlat-granule = <0>; /* 4KiB */
 	boot-order = <0>;
-	messaging-method = <3>; /* Direct messaging only */
-	run-time-model = <1>; /* Run to completion */
+	messaging-method = <0x3>; /* Direct request/response supported. */
+	managed-exit;
+	run-time-model = <1>; /* SP pre-emptible. */
 
 	/* Boot protocol */
 	gp-register-num = <0x0>;
diff --git a/plat/arm/board/fvp/fvp_common.c b/plat/arm/board/fvp/fvp_common.c
index 9d3c031..e7a28ac 100644
--- a/plat/arm/board/fvp/fvp_common.c
+++ b/plat/arm/board/fvp/fvp_common.c
@@ -107,6 +107,10 @@
 #if defined(SPD_spmd)
 	ARM_MAP_TRUSTED_DRAM,
 #endif
+#if ENABLE_RME
+	ARM_MAP_RMM_DRAM,
+	ARM_MAP_GPT_L1_DRAM,
+#endif /* ENABLE_RME */
 #ifdef SPD_tspd
 	ARM_MAP_TSP_SEC_MEM,
 #endif
@@ -159,6 +163,9 @@
 #endif
 	/* Required by fconf APIs to read HW_CONFIG dtb loaded into DRAM */
 	ARM_DTB_DRAM_NS,
+#if ENABLE_RME
+	ARM_MAP_GPT_L1_DRAM,
+#endif
 	{0}
 };
 
@@ -191,6 +198,15 @@
 };
 #endif
 
+#ifdef IMAGE_RMM
+const mmap_region_t plat_arm_mmap[] = {
+	V2M_MAP_IOFPGA,
+	MAP_DEVICE0,
+	MAP_DEVICE1,
+	{0}
+};
+#endif
+
 ARM_CASSERT_MMAP
 
 #if FVP_INTERCONNECT_DRIVER != FVP_CCN
diff --git a/plat/arm/board/fvp/fvp_pm.c b/plat/arm/board/fvp/fvp_pm.c
index 333d892..6b9d618 100644
--- a/plat/arm/board/fvp/fvp_pm.c
+++ b/plat/arm/board/fvp/fvp_pm.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -138,21 +138,37 @@
 	fvp_pwrc_clr_wen(mpidr);
 }
 
-
 /*******************************************************************************
  * FVP handler called when a CPU is about to enter standby.
  ******************************************************************************/
 static void fvp_cpu_standby(plat_local_state_t cpu_state)
 {
+	u_register_t scr = read_scr_el3();
 
 	assert(cpu_state == ARM_LOCAL_STATE_RET);
 
 	/*
+	 * Enable the Non-secure interrupt to wake the CPU.
+	 * In GICv3 affinity routing mode, the Non-secure Group 1 interrupts
+	 * use Physical FIQ at EL3 whereas in GICv2, Physical IRQ is used.
+	 * Enabling both the bits works for both GICv2 mode and GICv3 affinity
+	 * routing mode.
+	 */
+	write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT);
+	isb();
+
+	/*
-	 * Enter standby state
-	 * dsb is good practice before using wfi to enter low power states
+	 * Enter standby state.
+	 * dsb is good practice before using wfi to enter low power states.
 	 */
 	dsb();
 	wfi();
+
+	/*
+	 * Restore SCR_EL3 to the original value, synchronisation of SCR_EL3
+	 * is done by eret in el3_exit() to save some execution cycles.
+	 */
+	write_scr_el3(scr);
 }
 
 /*******************************************************************************
diff --git a/plat/arm/board/fvp/include/platform_def.h b/plat/arm/board/fvp/include/platform_def.h
index 8b25a54..96574e5 100644
--- a/plat/arm/board/fvp/include/platform_def.h
+++ b/plat/arm/board/fvp/include/platform_def.h
@@ -43,6 +43,11 @@
 #define PLAT_ARM_TRUSTED_DRAM_BASE	UL(0x06000000)
 #define PLAT_ARM_TRUSTED_DRAM_SIZE	UL(0x02000000)	/* 32 MB */
 
+#if ENABLE_RME
+#define PLAT_ARM_RMM_BASE		(RMM_BASE)
+#define PLAT_ARM_RMM_SIZE		(RMM_LIMIT - RMM_BASE)
+#endif
+
 /*
  * Max size of SPMC is 2MB for fvp. With SPMD enabled this value corresponds to
  * max size of BL32 image.
@@ -61,12 +66,13 @@
 #define PLAT_ARM_DRAM2_BASE		ULL(0x880000000)
 #define PLAT_ARM_DRAM2_SIZE		UL(0x80000000)
 
-#define PLAT_HW_CONFIG_DTB_BASE		ULL(0x82000000)
-#define PLAT_HW_CONFIG_DTB_SIZE		ULL(0x8000)
+/* Range of kernel DTB load address */
+#define FVP_DTB_DRAM_MAP_START		ULL(0x82000000)
+#define FVP_DTB_DRAM_MAP_SIZE		ULL(0x02000000)	/* 32 MB */
 
 #define ARM_DTB_DRAM_NS			MAP_REGION_FLAT(		\
-					PLAT_HW_CONFIG_DTB_BASE,	\
-					PLAT_HW_CONFIG_DTB_SIZE,	\
+					FVP_DTB_DRAM_MAP_START,		\
+					FVP_DTB_DRAM_MAP_SIZE,		\
 					MT_MEMORY | MT_RO | MT_NS)
 /*
  * Load address of BL33 for this platform port
@@ -80,15 +86,27 @@
 #if defined(IMAGE_BL31)
 # if SPM_MM
 #  define PLAT_ARM_MMAP_ENTRIES		10
-#  define MAX_XLAT_TABLES		9
+#  if ENABLE_RME
+#   define MAX_XLAT_TABLES		10
+#  else
+#   define MAX_XLAT_TABLES		9
+# endif
 #  define PLAT_SP_IMAGE_MMAP_REGIONS	30
 #  define PLAT_SP_IMAGE_MAX_XLAT_TABLES	10
 # else
 #  define PLAT_ARM_MMAP_ENTRIES		9
 #  if USE_DEBUGFS
-#   define MAX_XLAT_TABLES		8
+#   if ENABLE_RME
+#    define MAX_XLAT_TABLES		9
+#   else
+#    define MAX_XLAT_TABLES		8
+#   endif
 #  else
-#   define MAX_XLAT_TABLES		7
+#   if ENABLE_RME
+#    define MAX_XLAT_TABLES		8
+#   else
+#    define MAX_XLAT_TABLES		7
+#   endif
 #  endif
 # endif
 #elif defined(IMAGE_BL32)
@@ -137,16 +155,17 @@
 #endif
 
 #if RESET_TO_BL31
-/* Size of Trusted SRAM - the first 4KB of shared memory */
+/* Size of Trusted SRAM - the first 4KB of shared memory - GPT L0 Tables */
 #define PLAT_ARM_MAX_BL31_SIZE		(PLAT_ARM_TRUSTED_SRAM_SIZE - \
-					 ARM_SHARED_RAM_SIZE)
+					 ARM_SHARED_RAM_SIZE - \
+					 ARM_L0_GPT_SIZE)
 #else
 /*
  * Since BL31 NOBITS overlays BL2 and BL1-RW, PLAT_ARM_MAX_BL31_SIZE is
  * calculated using the current BL31 PROGBITS debug size plus the sizes of
  * BL2 and BL1-RW
  */
-#define PLAT_ARM_MAX_BL31_SIZE		UL(0x3D000)
+#define PLAT_ARM_MAX_BL31_SIZE		(UL(0x3D000) - ARM_L0_GPT_SIZE)
 #endif /* RESET_TO_BL31 */
 
 #ifndef __aarch64__
@@ -177,7 +196,7 @@
 # if TRUSTED_BOARD_BOOT
 #  define PLATFORM_STACK_SIZE		UL(0x1000)
 # else
-#  define PLATFORM_STACK_SIZE		UL(0x440)
+#  define PLATFORM_STACK_SIZE		UL(0x600)
 # endif
 #elif defined(IMAGE_BL2U)
 # define PLATFORM_STACK_SIZE		UL(0x400)
@@ -185,6 +204,8 @@
 #  define PLATFORM_STACK_SIZE		UL(0x800)
 #elif defined(IMAGE_BL32)
 # define PLATFORM_STACK_SIZE		UL(0x440)
+#elif defined(IMAGE_RMM)
+# define PLATFORM_STACK_SIZE		UL(0x440)
 #endif
 
 #define MAX_IO_DEVICES			3
@@ -222,6 +243,9 @@
 #define PLAT_ARM_TSP_UART_BASE		V2M_IOFPGA_UART2_BASE
 #define PLAT_ARM_TSP_UART_CLK_IN_HZ	V2M_IOFPGA_UART2_CLK_IN_HZ
 
+#define PLAT_ARM_TRP_UART_BASE		V2M_IOFPGA_UART3_BASE
+#define PLAT_ARM_TRP_UART_CLK_IN_HZ	V2M_IOFPGA_UART3_CLK_IN_HZ
+
 #define PLAT_FVP_SMMUV3_BASE		UL(0x2b400000)
 
 /* CCI related constants */
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
index 2f8a65e..b375146 100644
--- a/plat/arm/board/fvp/platform.mk
+++ b/plat/arm/board/fvp/platform.mk
@@ -138,7 +138,8 @@
 					lib/cpus/aarch64/cortex_demeter.S	\
 					lib/cpus/aarch64/cortex_a65.S		\
 					lib/cpus/aarch64/cortex_a65ae.S		\
-					lib/cpus/aarch64/cortex_a78c.S
+					lib/cpus/aarch64/cortex_a78c.S		\
+					lib/cpus/aarch64/cortex_hayes.S
 	endif
 	# AArch64/AArch32 cores
 	FVP_CPU_LIBS	+=	lib/cpus/aarch64/cortex_a55.S		\
@@ -185,6 +186,10 @@
 BL2_SOURCES		+=	plat/arm/common/fconf/fconf_nv_cntr_getter.c
 endif
 
+ifeq (${ENABLE_RME},1)
+BL2_SOURCES		+=	plat/arm/board/fvp/aarch64/fvp_helpers.S
+endif
+
 ifeq (${BL2_AT_EL3},1)
 BL2_SOURCES		+=	plat/arm/board/fvp/${ARCH}/fvp_helpers.S	\
 				plat/arm/board/fvp/fvp_bl2_el3_setup.c		\
diff --git a/plat/arm/board/fvp/trp/trp-fvp.mk b/plat/arm/board/fvp/trp/trp-fvp.mk
new file mode 100644
index 0000000..a450541
--- /dev/null
+++ b/plat/arm/board/fvp/trp/trp-fvp.mk
@@ -0,0 +1,12 @@
+#
+# Copyright (c) 2021, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# TRP source files specific to FVP platform
+
+RMM_SOURCES		+=	plat/arm/board/fvp/aarch64/fvp_helpers.S
+
+include plat/arm/common/trp/arm_trp.mk
+
diff --git a/plat/arm/board/fvp_r/fvp_r_bl1_arch_setup.c b/plat/arm/board/fvp_r/fvp_r_bl1_arch_setup.c
new file mode 100644
index 0000000..ae6af6c
--- /dev/null
+++ b/plat/arm/board/fvp_r/fvp_r_bl1_arch_setup.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "../../../../bl1/bl1_private.h"
+#include <arch.h>
+
+#include <fvp_r_arch_helpers.h>
+
+/*******************************************************************************
+ * Function that does the first bit of architectural setup that affects
+ * execution in the non-secure address space.
+ ******************************************************************************/
+void bl1_arch_setup(void)
+{
+	/* v8-R64 does not include SCRs. */
+}
+
+/*******************************************************************************
+ * Set the Secure EL1 required architectural state
+ ******************************************************************************/
+void bl1_arch_next_el_setup(void)
+{
+	u_register_t next_sctlr;
+
+	/* Use the same endianness than the current BL */
+	next_sctlr = (read_sctlr_el2() & SCTLR_EE_BIT);
+
+	/* Set SCTLR Secure EL1 */
+	next_sctlr |= SCTLR_EL1_RES1;
+
+	write_sctlr_el1(next_sctlr);
+}
diff --git a/plat/arm/board/fvp_r/fvp_r_bl1_context_mgmt.c b/plat/arm/board/fvp_r/fvp_r_bl1_context_mgmt.c
new file mode 100644
index 0000000..c6544b4
--- /dev/null
+++ b/plat/arm/board/fvp_r/fvp_r_bl1_context_mgmt.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include "../../../../bl1/bl1_private.h"
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <context.h>
+#include <lib/el3_runtime/context_mgmt.h>
+
+#include <plat/common/platform.h>
+
+
+void cm_prepare_el2_exit(void);
+
+/* Following contains the cpu context pointers. */
+static void *bl1_cpu_context_ptr[2];
+
+void *cm_get_context(uint32_t security_state)
+{
+	assert(sec_state_is_valid(security_state));
+	return bl1_cpu_context_ptr[security_state];
+}
+
+void cm_set_context(void *context, uint32_t security_state)
+{
+	assert(sec_state_is_valid(security_state));
+	bl1_cpu_context_ptr[security_state] = context;
+}
+
+/*******************************************************************************
+ * This function prepares the context for Secure/Normal world images.
+ * Normal world images are transitioned to EL2(if supported) else EL1.
+ ******************************************************************************/
+void bl1_prepare_next_image(unsigned int image_id)
+{
+	/*
+	 * Following array will be used for context management.
+	 * There are 2 instances, for the Secure and Non-Secure contexts.
+	 */
+	static cpu_context_t bl1_cpu_context[2];
+
+	unsigned int security_state, mode = MODE_EL1;
+	image_desc_t *desc;
+	entry_point_info_t *next_bl_ep;
+
+#if CTX_INCLUDE_AARCH32_REGS
+	/*
+	 * Ensure that the build flag to save AArch32 system registers in CPU
+	 * context is not set for AArch64-only platforms.
+	 */
+	if (el_implemented(1) == EL_IMPL_A64ONLY) {
+		ERROR("EL1 supports AArch64-only. Please set build flag %s",
+				"CTX_INCLUDE_AARCH32_REGS = 0\n");
+		panic();
+	}
+#endif
+
+	/* Get the image descriptor. */
+	desc = bl1_plat_get_image_desc(image_id);
+	assert(desc != NULL);
+
+	/* Get the entry point info. */
+	next_bl_ep = &desc->ep_info;
+
+	/* Get the image security state. */
+	security_state = GET_SECURITY_STATE(next_bl_ep->h.attr);
+
+	/* Setup the Secure/Non-Secure context if not done already. */
+	if (cm_get_context(security_state) == NULL) {
+		cm_set_context(&bl1_cpu_context[security_state], security_state);
+	}
+	/* Prepare the SPSR for the next BL image. */
+	next_bl_ep->spsr = (uint32_t)SPSR_64((uint64_t) mode,
+		(uint64_t)MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+
+	/* Allow platform to make change */
+	bl1_plat_set_ep_info(image_id, next_bl_ep);
+
+	/* Prepare context for the next EL */
+	cm_prepare_el2_exit();
+
+	/* Indicate that image is in execution state. */
+	desc->state = IMAGE_STATE_EXECUTED;
+
+	print_entry_point_info(next_bl_ep);
+}
diff --git a/plat/arm/board/fvp_r/fvp_r_bl1_entrypoint.S b/plat/arm/board/fvp_r/fvp_r_bl1_entrypoint.S
new file mode 100644
index 0000000..19a685c
--- /dev/null
+++ b/plat/arm/board/fvp_r/fvp_r_bl1_entrypoint.S
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <el2_common_macros.S>
+#include <lib/xlat_mpu/xlat_mpu.h>
+
+	.globl	bl1_entrypoint
+	.globl	bl1_run_next_image
+
+
+	/* -----------------------------------------------------
+	 * bl1_entrypoint() is the entry point into the trusted
+	 * firmware code when a cpu is released from warm or
+	 * cold reset.
+	 * -----------------------------------------------------
+	 */
+
+func bl1_entrypoint
+	/* ---------------------------------------------------------------------
+	 * If the reset address is programmable then bl1_entrypoint() is
+	 * executed only on the cold boot path. Therefore, we can skip the warm
+	 * boot mailbox mechanism.
+	 * ---------------------------------------------------------------------
+	 */
+	el2_entrypoint_common					\
+		_init_sctlr=1					\
+		_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS	\
+		_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU	\
+		_init_memory=1					\
+		_init_c_runtime=1				\
+		_exception_vectors=bl1_exceptions		\
+		_pie_fixup_size=0
+
+	/* --------------------------------------------------------------------
+	 * Perform BL1 setup
+	 * --------------------------------------------------------------------
+	 */
+	bl	bl1_setup
+
+#if ENABLE_PAUTH
+	/* --------------------------------------------------------------------
+	 * Program APIAKey_EL1 and enable pointer authentication.
+	 * --------------------------------------------------------------------
+	 */
+	bl	pauth_init_enable_el2
+#endif /* ENABLE_PAUTH */
+
+	/* --------------------------------------------------------------------
+	 * Initialize platform and jump to our c-entry point
+	 * for this type of reset.
+	 * --------------------------------------------------------------------
+	 */
+	bl	bl1_main
+
+	/* ---------------------------------------------
+	 * Should never reach this point.
+	 * ---------------------------------------------
+	 */
+	no_ret	plat_panic_handler
+endfunc bl1_entrypoint
+
+func bl1_run_next_image
+	mov	x20,x0
+
+	/* ---------------------------------------------
+	 * MPU needs to be disabled because both BL1 and BL33 execute
+	 * in EL2, and therefore share the same address space.
+	 * BL33 will initialize the address space according to its
+	 * own requirement.
+	 * ---------------------------------------------
+	 */
+	bl	disable_mpu_icache_el2
+
+	/* ---------------------------------------------
+	 * Wipe clean and disable all MPU regions.  This function expects
+	 * that the MPU has already been turned off, and caching concerns
+	 * addressed, but it also explicitly turns off the MPU.
+	 * ---------------------------------------------
+	 */
+	bl	clear_all_mpu_regions
+
+#if ENABLE_PAUTH
+	/* ---------------------------------------------
+	 * Disable pointer authentication before jumping
+	 * to next boot image.
+	 * ---------------------------------------------
+	 */
+	bl	pauth_disable_el2
+#endif /* ENABLE_PAUTH */
+
+	/* --------------------------------------------------
+	 * Do the transition to next boot image.
+	 * --------------------------------------------------
+	 */
+	ldp	x0, x1, [x20, #ENTRY_POINT_INFO_PC_OFFSET]
+	msr	elr_el2, x0
+	msr	spsr_el2, x1
+
+	ldp	x6, x7, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x30)]
+	ldp	x4, x5, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x20)]
+	ldp	x2, x3, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x10)]
+	ldp	x0, x1, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x0)]
+	exception_return
+endfunc bl1_run_next_image
diff --git a/plat/arm/board/fvp_r/fvp_r_bl1_exceptions.S b/plat/arm/board/fvp_r/fvp_r_bl1_exceptions.S
new file mode 100644
index 0000000..43c2e01
--- /dev/null
+++ b/plat/arm/board/fvp_r/fvp_r_bl1_exceptions.S
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl1/bl1.h>
+#include <common/bl_common.h>
+#include <context.h>
+
+/* -----------------------------------------------------------------------------
+ * File contains an EL2 equivalent of the EL3 vector table from:
+ * 	.../bl1/aarch64/bl1_exceptions.S
+ * -----------------------------------------------------------------------------
+ */
+
+/* -----------------------------------------------------------------------------
+ * Very simple stackless exception handlers used by BL1.
+ * -----------------------------------------------------------------------------
+ */
+	.globl	bl1_exceptions
+
+vector_base bl1_exceptions
+
+	/* -----------------------------------------------------
+	 * Current EL with SP0 : 0x0 - 0x200
+	 * -----------------------------------------------------
+	 */
+vector_entry SynchronousExceptionSP0
+	mov	x0, #SYNC_EXCEPTION_SP_EL0
+	bl	plat_report_exception
+	no_ret	plat_panic_handler
+end_vector_entry SynchronousExceptionSP0
+
+vector_entry IrqSP0
+	mov	x0, #IRQ_SP_EL0
+	bl	plat_report_exception
+	no_ret	plat_panic_handler
+end_vector_entry IrqSP0
+
+vector_entry FiqSP0
+	mov	x0, #FIQ_SP_EL0
+	bl	plat_report_exception
+	no_ret	plat_panic_handler
+end_vector_entry FiqSP0
+
+vector_entry SErrorSP0
+	mov	x0, #SERROR_SP_EL0
+	bl	plat_report_exception
+	no_ret	plat_panic_handler
+end_vector_entry SErrorSP0
+
+	/* -----------------------------------------------------
+	 * Current EL with SPx: 0x200 - 0x400
+	 * -----------------------------------------------------
+	 */
+vector_entry SynchronousExceptionSPx
+	mov	x0, #SYNC_EXCEPTION_SP_ELX
+	bl	plat_report_exception
+	no_ret	plat_panic_handler
+end_vector_entry SynchronousExceptionSPx
+
+vector_entry IrqSPx
+	mov	x0, #IRQ_SP_ELX
+	bl	plat_report_exception
+	no_ret	plat_panic_handler
+end_vector_entry IrqSPx
+
+vector_entry FiqSPx
+	mov	x0, #FIQ_SP_ELX
+	bl	plat_report_exception
+	no_ret	plat_panic_handler
+end_vector_entry FiqSPx
+
+vector_entry SErrorSPx
+	mov	x0, #SERROR_SP_ELX
+	bl	plat_report_exception
+	no_ret	plat_panic_handler
+end_vector_entry SErrorSPx
+
+	/* -----------------------------------------------------
+	 * Lower EL using AArch64 : 0x400 - 0x600
+	 * -----------------------------------------------------
+	 */
+vector_entry SynchronousExceptionA64
+	/* The current v8-R64 implementation does not support conduit calls */
+	b	el2_panic
+end_vector_entry SynchronousExceptionA64
+
+vector_entry IrqA64
+	mov	x0, #IRQ_AARCH64
+	bl	plat_report_exception
+	no_ret	plat_panic_handler
+end_vector_entry IrqA64
+
+vector_entry FiqA64
+	mov	x0, #FIQ_AARCH64
+	bl	plat_report_exception
+	no_ret	plat_panic_handler
+end_vector_entry FiqA64
+
+vector_entry SErrorA64
+	mov	x0, #SERROR_AARCH64
+	bl	plat_report_exception
+	no_ret	plat_panic_handler
+end_vector_entry SErrorA64
+
+
+unexpected_sync_exception:
+	mov	x0, #SYNC_EXCEPTION_AARCH64
+	bl	plat_report_exception
+	no_ret	plat_panic_handler
+
+	/* -----------------------------------------------------
+	 * Save Secure/Normal world context and jump to
+	 * BL1 SMC handler.
+	 * -----------------------------------------------------
+	 */
diff --git a/plat/arm/board/fvp_r/fvp_r_bl1_main.c b/plat/arm/board/fvp_r/fvp_r_bl1_main.c
new file mode 100644
index 0000000..2fd0e97
--- /dev/null
+++ b/plat/arm/board/fvp_r/fvp_r_bl1_main.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include "../../../../bl1/bl1_private.h"
+#include <arch.h>
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <bl1/bl1.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <drivers/auth/auth_mod.h>
+#include <drivers/console.h>
+#include <lib/cpus/errata_report.h>
+#include <lib/utils.h>
+#include <smccc_helpers.h>
+#include <tools_share/uuid.h>
+#include <plat/arm/common/plat_arm.h>
+#include <plat/common/platform.h>
+
+#include <platform_def.h>
+
+
+void bl1_run_next_image(const struct entry_point_info *bl_ep_info);
+
+/*******************************************************************************
+ * Function to perform late architectural and platform specific initialization.
+ * It also queries the platform to load and run next BL image. Only called
+ * by the primary cpu after a cold boot.
+ ******************************************************************************/
+void bl1_transfer_bl33(void)
+{
+	unsigned int image_id;
+
+	/* Get the image id of next image to load and run. */
+	image_id = bl1_plat_get_next_image_id();
+
+#if ENABLE_PAUTH
+	/*
+	 * Disable pointer authentication before running next boot image
+	 */
+	pauth_disable_el2();
+#endif /* ENABLE_PAUTH */
+
+#if !ARM_DISABLE_TRUSTED_WDOG
+	/* Disable watchdog before leaving BL1 */
+	plat_arm_secure_wdt_stop();
+#endif
+
+	bl1_run_next_image(&bl1_plat_get_image_desc(image_id)->ep_info);
+}
+
+/*******************************************************************************
+ * This function locates and loads the BL33 raw binary image in the trusted SRAM.
+ * Called by the primary cpu after a cold boot.
+ * TODO: Add support for alternative image load mechanism e.g using virtio/elf
+ * loader etc.
+ ******************************************************************************/
+void bl1_load_bl33(void)
+{
+	image_desc_t *desc;
+	image_info_t *info;
+	int err;
+
+	/* Get the image descriptor */
+	desc = bl1_plat_get_image_desc(BL33_IMAGE_ID);
+	assert(desc != NULL);
+
+	/* Get the image info */
+	info = &desc->image_info;
+	INFO("BL1: Loading BL33\n");
+
+	err = bl1_plat_handle_pre_image_load(BL33_IMAGE_ID);
+	if (err != 0) {
+		ERROR("Failure in pre image load handling of BL33 (%d)\n", err);
+		plat_error_handler(err);
+	}
+
+	err = load_auth_image(BL33_IMAGE_ID, info);
+	if (err != 0) {
+		ERROR("Failed to load BL33 firmware.\n");
+		plat_error_handler(err);
+	}
+
+	/* Allow platform to handle image information. */
+	err = bl1_plat_handle_post_image_load(BL33_IMAGE_ID);
+	if (err != 0) {
+		ERROR("Failure in post image load handling of BL33 (%d)\n", err);
+		plat_error_handler(err);
+	}
+
+	NOTICE("BL1: Booting BL33\n");
+}
+
+static void bl1_load_bl2(void);
+
+#if ENABLE_PAUTH
+uint64_t bl1_apiakey[2];
+#endif
+
+/*******************************************************************************
+ * Helper utility to calculate the BL2 memory layout taking into consideration
+ * the BL1 RW data assuming that it is at the top of the memory layout.
+ ******************************************************************************/
+void bl1_calc_bl2_mem_layout(const meminfo_t *bl1_mem_layout,
+			meminfo_t *bl2_mem_layout)
+{
+	assert(bl1_mem_layout != NULL);
+	assert(bl2_mem_layout != NULL);
+
+	/*
+	 * Remove BL1 RW data from the scope of memory visible to BL2.
+	 * This is assuming BL1 RW data is at the top of bl1_mem_layout.
+	 */
+	assert(bl1_mem_layout->total_base < BL1_RW_BASE);
+	bl2_mem_layout->total_base = bl1_mem_layout->total_base;
+	bl2_mem_layout->total_size = BL1_RW_BASE - bl1_mem_layout->total_base;
+
+	flush_dcache_range((uintptr_t)bl2_mem_layout, sizeof(meminfo_t));
+}
+
+/*******************************************************************************
+ * Setup function for BL1.
+ ******************************************************************************/
+void bl1_setup(void)
+{
+	/* Perform early platform-specific setup */
+	bl1_early_platform_setup();
+
+	/* Perform late platform-specific setup */
+	bl1_plat_arch_setup();
+
+#if CTX_INCLUDE_PAUTH_REGS
+	/*
+	 * Assert that the ARMv8.3-PAuth registers are present or an access
+	 * fault will be triggered when they are being saved or restored.
+	 */
+	assert(is_armv8_3_pauth_present());
+#endif /* CTX_INCLUDE_PAUTH_REGS */
+}
+
+/*******************************************************************************
+ * Function to perform late architectural and platform specific initialization.
+ * It also queries the platform to load and run next BL image. Only called
+ * by the primary cpu after a cold boot.
+ ******************************************************************************/
+void bl1_main(void)
+{
+	unsigned int image_id;
+
+	/* Announce our arrival */
+	NOTICE(FIRMWARE_WELCOME_STR);
+	NOTICE("BL1: %s\n", version_string);
+	NOTICE("BL1: %s\n", build_message);
+
+	INFO("BL1: RAM %p - %p\n", (void *)BL1_RAM_BASE, (void *)BL1_RAM_LIMIT);
+
+	print_errata_status();
+
+#if ENABLE_ASSERTIONS
+	u_register_t val;
+	/*
+	 * Ensure that MMU/Caches and coherency are turned on
+	 */
+	val = read_sctlr_el2();
+
+	assert((val & SCTLR_M_BIT) != 0U);
+	assert((val & SCTLR_C_BIT) != 0U);
+	assert((val & SCTLR_I_BIT) != 0U);
+	/*
+	 * Check that Cache Writeback Granule (CWG) in CTR_EL0 matches the
+	 * provided platform value
+	 */
+	val = (read_ctr_el0() >> CTR_CWG_SHIFT) & CTR_CWG_MASK;
+	/*
+	 * If CWG is zero, then no CWG information is available but we can
+	 * at least check the platform value is less than the architectural
+	 * maximum.
+	 */
+	if (val != 0) {
+		assert(SIZE_FROM_LOG2_WORDS(val) == CACHE_WRITEBACK_GRANULE);
+	} else {
+		assert(MAX_CACHE_LINE_SIZE >= CACHE_WRITEBACK_GRANULE);
+	}
+#endif /* ENABLE_ASSERTIONS */
+
+	/* Perform remaining generic architectural setup from ELmax */
+	bl1_arch_setup();
+
+#if TRUSTED_BOARD_BOOT
+	/* Initialize authentication module */
+	auth_mod_init();
+#endif /* TRUSTED_BOARD_BOOT */
+
+	/* Perform platform setup in BL1. */
+	bl1_platform_setup();
+
+#if ENABLE_PAUTH
+	/* Store APIAKey_EL1 key */
+	bl1_apiakey[0] = read_apiakeylo_el1();
+	bl1_apiakey[1] = read_apiakeyhi_el1();
+#endif /* ENABLE_PAUTH */
+
+	/* Get the image id of next image to load and run. */
+	image_id = bl1_plat_get_next_image_id();
+
+	/*
+	 * We currently interpret any image id other than
+	 * BL2_IMAGE_ID as the start of firmware update.
+	 */
+	if (image_id == BL2_IMAGE_ID) {
+		bl1_load_bl2();
+	} else if (image_id == BL33_IMAGE_ID) {
+		bl1_load_bl33();
+	} else {
+		NOTICE("BL1-FWU: *******FWU Process Started*******\n");
+	}
+
+	bl1_prepare_next_image(image_id);
+
+	console_flush();
+
+	bl1_transfer_bl33();
+}
+
+/*******************************************************************************
+ * This function locates and loads the BL2 raw binary image in the trusted SRAM.
+ * Called by the primary cpu after a cold boot.
+ * TODO: Add support for alternative image load mechanism e.g using virtio/elf
+ * loader etc.
+ ******************************************************************************/
+static void bl1_load_bl2(void)
+{
+	image_desc_t *desc;
+	image_info_t *info;
+	int err;
+
+	/* Get the image descriptor */
+	desc = bl1_plat_get_image_desc(BL2_IMAGE_ID);
+	assert(desc != NULL);
+
+	/* Get the image info */
+	info = &desc->image_info;
+	INFO("BL1: Loading BL2\n");
+
+	err = bl1_plat_handle_pre_image_load(BL2_IMAGE_ID);
+	if (err != 0) {
+		ERROR("Failure in pre image load handling of BL2 (%d)\n", err);
+		plat_error_handler(err);
+	}
+
+	err = load_auth_image(BL2_IMAGE_ID, info);
+	if (err != 0) {
+		ERROR("Failed to load BL2 firmware.\n");
+		plat_error_handler(err);
+	}
+
+	/* Allow platform to handle image information. */
+	err = bl1_plat_handle_post_image_load(BL2_IMAGE_ID);
+	if (err != 0) {
+		ERROR("Failure in post image load handling of BL2 (%d)\n", err);
+		plat_error_handler(err);
+	}
+
+	NOTICE("BL1: Booting BL2\n");
+}
+
+/*******************************************************************************
+ * Function called just before handing over to the next BL to inform the user
+ * about the boot progress. In debug mode, also print details about the BL
+ * image's execution context.
+ ******************************************************************************/
+void bl1_print_next_bl_ep_info(const entry_point_info_t *bl_ep_info)
+{
+	NOTICE("BL1: Booting BL31\n");
+	print_entry_point_info(bl_ep_info);
+}
+
+#if SPIN_ON_BL1_EXIT
+void print_debug_loop_message(void)
+{
+	NOTICE("BL1: Debug loop, spinning forever\n");
+	NOTICE("BL1: Please connect the debugger to continue\n");
+}
+#endif
+
diff --git a/plat/arm/board/fvp_r/fvp_r_bl1_setup.c b/plat/arm/board/fvp_r/fvp_r_bl1_setup.c
new file mode 100644
index 0000000..3f3fd5e
--- /dev/null
+++ b/plat/arm/board/fvp_r/fvp_r_bl1_setup.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* Use the xlat_tables_v2 data structures: */
+#define XLAT_TABLES_LIB_V2	1
+
+#include <assert.h>
+
+#include <bl1/bl1.h>
+#include <common/tbbr/tbbr_img_def.h>
+#include <drivers/arm/sp805.h>
+#include <lib/fconf/fconf.h>
+#include <lib/fconf/fconf_dyn_cfg_getter.h>
+#include <lib/xlat_mpu/xlat_mpu.h>
+
+#include "fvp_r_private.h"
+#include <plat/arm/common/arm_config.h>
+#include <plat/arm/common/arm_def.h>
+#include <plat/arm/common/plat_arm.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+
+#define MAP_BL1_TOTAL		MAP_REGION_FLAT(			\
+					bl1_tzram_layout.total_base,	\
+					bl1_tzram_layout.total_size,	\
+					MT_MEMORY | MT_RW | MT_SECURE)
+/*
+ * If SEPARATE_CODE_AND_RODATA=1 we define a region for each section
+ * otherwise one region is defined containing both
+ */
+#if SEPARATE_CODE_AND_RODATA
+#define MAP_BL1_RO		MAP_REGION_FLAT(			\
+					BL_CODE_BASE,			\
+					BL1_CODE_END - BL_CODE_BASE,	\
+					MT_CODE | MT_SECURE),		\
+				MAP_REGION_FLAT(			\
+					BL1_RO_DATA_BASE,		\
+					BL1_RO_DATA_END			\
+						- BL_RO_DATA_BASE,	\
+					MT_RO_DATA | MT_SECURE)
+#else
+#define MAP_BL1_RO		MAP_REGION_FLAT(			\
+					BL_CODE_BASE,			\
+					BL1_CODE_END - BL_CODE_BASE,	\
+					MT_CODE | MT_SECURE)
+#endif
+
+/* Data structure which holds the extents of the trusted SRAM for BL1*/
+static meminfo_t bl1_tzram_layout;
+
+struct meminfo *bl1_plat_sec_mem_layout(void)
+{
+	return &bl1_tzram_layout;
+}
+
+void arm_bl1_early_platform_setup(void)
+{
+
+#if !ARM_DISABLE_TRUSTED_WDOG
+	/* Enable watchdog */
+	plat_arm_secure_wdt_start();
+#endif
+
+	/* Initialize the console to provide early debug support */
+	arm_console_boot_init();
+
+	/* Allow BL1 to see the whole Trusted RAM */
+	bl1_tzram_layout.total_base = ARM_BL_RAM_BASE;
+	bl1_tzram_layout.total_size = ARM_BL_RAM_SIZE;
+}
+
+/* Boolean variable to hold condition whether firmware update needed or not */
+static bool is_fwu_needed;
+
+/*******************************************************************************
+ * Perform any BL1 specific platform actions.
+ ******************************************************************************/
+void bl1_early_platform_setup(void)
+{
+	arm_bl1_early_platform_setup();
+
+	/* Initialize the platform config for future decision making */
+	fvp_config_setup();
+
+	/*
+	 * Initialize Interconnect for this cluster during cold boot.
+	 * No need for locks as no other CPU is active.
+	 */
+	fvp_interconnect_init();
+	/*
+	 * Enable coherency in Interconnect for the primary CPU's cluster.
+	 */
+	fvp_interconnect_enable();
+}
+
+void arm_bl1_plat_arch_setup(void)
+{
+	const mmap_region_t bl_regions[] = {
+		MAP_BL1_TOTAL,
+		MAP_BL1_RO,
+#if USE_ROMLIB
+		ARM_MAP_ROMLIB_CODE,
+		ARM_MAP_ROMLIB_DATA,
+#endif
+#if ARM_CRYPTOCELL_INTEG
+		ARM_MAP_BL_COHERENT_RAM,
+#endif
+		/* DRAM1_region: */
+		MAP_REGION_FLAT(					\
+			PLAT_ARM_DRAM1_BASE,				\
+			PLAT_ARM_DRAM1_SIZE,				\
+			MT_MEMORY | MT_SECURE | MT_EXECUTE		\
+			| MT_RW | MT_NON_CACHEABLE),
+		/* NULL terminator: */
+		{0}
+	};
+
+	setup_page_tables(bl_regions, plat_arm_get_mmap());
+	enable_mpu_el2(0);
+
+	arm_setup_romlib();
+}
+
+void plat_arm_secure_wdt_start(void)
+{
+	sp805_start(ARM_SP805_TWDG_BASE, ARM_TWDG_LOAD_VAL);
+}
+
+void plat_arm_secure_wdt_stop(void)
+{
+	sp805_stop(ARM_SP805_TWDG_BASE);
+}
+
+/*
+ * Perform the platform specific architecture setup shared between
+ * ARM standard platforms.
+ */
+void arm_bl1_platform_setup(void)
+{
+	image_desc_t *desc;
+	uint32_t fw_config_max_size;
+
+	/* Initialise the IO layer and register platform IO devices */
+	plat_arm_io_setup();
+
+	/* Check if we need FWU before further processing */
+	is_fwu_needed = plat_arm_bl1_fwu_needed();
+	if (is_fwu_needed) {
+		ERROR("Skip platform setup as FWU detected\n");
+		return;
+	}
+
+	/* Set global DTB info for fixed fw_config information */
+	fw_config_max_size = ARM_FW_CONFIG_LIMIT - ARM_FW_CONFIG_BASE;
+	set_config_info(ARM_FW_CONFIG_BASE, fw_config_max_size, FW_CONFIG_ID);
+
+	desc = bl1_plat_get_image_desc(BL33_IMAGE_ID);
+	assert(desc != NULL);
+
+	/*
+	 * Allow access to the System counter timer module and program
+	 * counter frequency for non secure images during FWU
+	 */
+#ifdef ARM_SYS_TIMCTL_BASE
+	arm_configure_sys_timer();
+#endif
+#if (ARM_ARCH_MAJOR > 7) || defined(ARMV7_SUPPORTS_GENERIC_TIMER)
+	write_cntfrq_el0(plat_get_syscnt_freq2());
+#endif
+}
+
+void bl1_platform_setup(void)
+{
+	arm_bl1_platform_setup();
+
+	/* Initialize System level generic or SP804 timer */
+	fvp_timer_init();
+}
+
+__dead2 void bl1_plat_fwu_done(void *client_cookie, void *reserved)
+{
+	/* Setup the watchdog to reset the system as soon as possible */
+	sp805_refresh(ARM_SP805_TWDG_BASE, 1U);
+
+	while (true) {
+		wfi();
+	}
+}
+
+unsigned int bl1_plat_get_next_image_id(void)
+{
+	return  is_fwu_needed ? NS_BL1U_IMAGE_ID : BL33_IMAGE_ID;
+}
+
+/*
+ * Returns BL33 image details.
+ */
+struct image_desc *bl1_plat_get_image_desc(unsigned int image_id)
+{
+	static image_desc_t bl33_img_desc = BL33_IMAGE_DESC;
+
+	return &bl33_img_desc;
+}
+
+/*
+ * This function populates the default arguments to BL33.
+ * The BL33 memory layout structure is allocated and the
+ * calculated layout is populated in arg1 to BL33.
+ */
+int bl1_plat_handle_post_image_load(unsigned int image_id)
+{
+	meminfo_t *bl33_secram_layout;
+	meminfo_t *bl1_secram_layout;
+	image_desc_t *image_desc;
+	entry_point_info_t *ep_info;
+
+	if (image_id != BL33_IMAGE_ID) {
+		return 0;
+	}
+	/* Get the image descriptor */
+	image_desc = bl1_plat_get_image_desc(BL33_IMAGE_ID);
+	assert(image_desc != NULL);
+
+	/* Get the entry point info */
+	ep_info = &image_desc->ep_info;
+
+	/* Find out how much free trusted ram remains after BL1 load */
+	bl1_secram_layout = bl1_plat_sec_mem_layout();
+
+	/*
+	 * Create a new layout of memory for BL33 as seen by BL1 i.e.
+	 * tell it the amount of total and free memory available.
+	 * This layout is created at the first free address visible
+	 * to BL33. BL33 will read the memory layout before using its
+	 * memory for other purposes.
+	 */
+	bl33_secram_layout = (meminfo_t *) bl1_secram_layout->total_base;
+
+	bl1_calc_bl2_mem_layout(bl1_secram_layout, bl33_secram_layout);
+
+	ep_info->args.arg1 = (uintptr_t)bl33_secram_layout;
+
+	VERBOSE("BL1: BL3 memory layout address = %p\n",
+		(void *) bl33_secram_layout);
+	return 0;
+}
diff --git a/plat/arm/board/fvp_r/fvp_r_common.c b/plat/arm/board/fvp_r/fvp_r_common.c
new file mode 100644
index 0000000..bce943d
--- /dev/null
+++ b/plat/arm/board/fvp_r/fvp_r_common.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* This uses xlat_mpu, but tables are set up using V2 mmap_region_t */
+#define XLAT_TABLES_LIB_V2	1
+
+#include <assert.h>
+#include <common/debug.h>
+
+#include <drivers/arm/cci.h>
+#include <drivers/arm/ccn.h>
+#include <drivers/arm/gicv2.h>
+#include <drivers/arm/sp804_delay_timer.h>
+#include <drivers/generic_delay_timer.h>
+#include <lib/mmio.h>
+#include <lib/smccc.h>
+#include <lib/xlat_tables/xlat_tables_compat.h>
+#include <services/arm_arch_svc.h>
+
+#include "fvp_r_private.h"
+#include <plat/arm/common/arm_config.h>
+#include <plat/arm/common/plat_arm.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+
+
+/* Defines for GIC Driver build time selection */
+#define FVP_R_GICV3		2
+
+/*******************************************************************************
+ * arm_config holds the characteristics of the differences between the FVP_R
+ * platforms. It will be populated during cold boot at each boot stage by the
+ * primary before enabling the MPU (to allow interconnect configuration) &
+ * used thereafter. Each BL will have its own copy to allow independent
+ * operation.
+ ******************************************************************************/
+arm_config_t arm_config;
+
+#define MAP_DEVICE0	MAP_REGION_FLAT(DEVICE0_BASE,			\
+					DEVICE0_SIZE,			\
+					MT_DEVICE | MT_RW | MT_SECURE)
+
+#define MAP_DEVICE1	MAP_REGION_FLAT(DEVICE1_BASE,			\
+					DEVICE1_SIZE,			\
+					MT_DEVICE | MT_RW | MT_SECURE)
+
+/*
+ * Need to be mapped with write permissions in order to set a new non-volatile
+ * counter value.
+ */
+#define MAP_DEVICE2	MAP_REGION_FLAT(DEVICE2_BASE,			\
+					DEVICE2_SIZE,			\
+					MT_DEVICE | MT_RW | MT_SECURE)
+
+/*
+ * Table of memory regions for various BL stages to map using the MPU.
+ * This doesn't include Trusted SRAM as setup_page_tables() already takes care
+ * of mapping it.
+ *
+ * The flash needs to be mapped as writable in order to erase the FIP's Table of
+ * Contents in case of unrecoverable error (see plat_error_handler()).
+ */
+#ifdef IMAGE_BL1
+const mmap_region_t plat_arm_mmap[] = {
+	ARM_MAP_SHARED_RAM,
+	V2M_MAP_FLASH0_RW,
+	V2M_MAP_IOFPGA,
+	MAP_DEVICE0,
+	MAP_DEVICE1,
+#if TRUSTED_BOARD_BOOT
+	/* To access the Root of Trust Public Key registers. */
+	MAP_DEVICE2,
+#endif
+	{0}
+};
+#endif
+
+ARM_CASSERT_MMAP
+
+#if FVP_R_INTERCONNECT_DRIVER != FVP_R_CCN
+static const int fvp_cci400_map[] = {
+	PLAT_FVP_R_CCI400_CLUS0_SL_PORT,
+	PLAT_FVP_R_CCI400_CLUS1_SL_PORT,
+};
+
+static const int fvp_cci5xx_map[] = {
+	PLAT_FVP_R_CCI5XX_CLUS0_SL_PORT,
+	PLAT_FVP_R_CCI5XX_CLUS1_SL_PORT,
+};
+
+static unsigned int get_interconnect_master(void)
+{
+	unsigned int master;
+	u_register_t mpidr;
+
+	mpidr = read_mpidr_el1();
+	master = ((arm_config.flags & ARM_CONFIG_FVP_SHIFTED_AFF) != 0U) ?
+		MPIDR_AFFLVL2_VAL(mpidr) : MPIDR_AFFLVL1_VAL(mpidr);
+
+	assert(master < FVP_R_CLUSTER_COUNT);
+	return master;
+}
+#endif
+
+/*******************************************************************************
+ * Initialize the platform config for future decision making
+ ******************************************************************************/
+void __init fvp_config_setup(void)
+{
+	unsigned int rev, hbi, bld, arch, sys_id;
+
+	arm_config.flags |= ARM_CONFIG_BASE_MMAP;
+	sys_id = mmio_read_32(V2M_FVP_R_SYSREGS_BASE + V2M_SYS_ID);
+	rev = (sys_id >> V2M_SYS_ID_REV_SHIFT) & V2M_SYS_ID_REV_MASK;
+	hbi = (sys_id >> V2M_SYS_ID_HBI_SHIFT) & V2M_SYS_ID_HBI_MASK;
+	bld = (sys_id >> V2M_SYS_ID_BLD_SHIFT) & V2M_SYS_ID_BLD_MASK;
+	arch = (sys_id >> V2M_SYS_ID_ARCH_SHIFT) & V2M_SYS_ID_ARCH_MASK;
+
+	if (arch != ARCH_MODEL) {
+		ERROR("This firmware is for FVP_R models\n");
+		panic();
+	}
+
+	/*
+	 * The build field in the SYS_ID tells which variant of the GIC
+	 * memory is implemented by the model.
+	 */
+	switch (bld) {
+	case BLD_GIC_VE_MMAP:
+		ERROR("Legacy Versatile Express memory map for GIC %s",
+				"peripheral is not supported\n");
+		panic();
+		break;
+	case BLD_GIC_A53A57_MMAP:
+		break;
+	default:
+		ERROR("Unsupported board build %x\n", bld);
+		panic();
+	}
+
+	/*
+	 * The hbi field in the SYS_ID is 0x020 for the Base FVP_R & 0x010
+	 * for the Foundation FVP_R.
+	 */
+	switch (hbi) {
+	case HBI_FOUNDATION_FVP_R:
+		arm_config.flags = 0;
+
+		/*
+		 * Check for supported revisions of Foundation FVP_R
+		 * Allow future revisions to run but emit warning diagnostic
+		 */
+		switch (rev) {
+		case REV_FOUNDATION_FVP_R_V2_0:
+		case REV_FOUNDATION_FVP_R_V2_1:
+		case REV_FOUNDATION_FVP_R_v9_1:
+		case REV_FOUNDATION_FVP_R_v9_6:
+			break;
+		default:
+			WARN("Unrecognized Foundation FVP_R revision %x\n", rev);
+			break;
+		}
+		break;
+	case HBI_BASE_FVP_R:
+		arm_config.flags |= (ARM_CONFIG_BASE_MMAP | ARM_CONFIG_HAS_TZC);
+
+		/*
+		 * Check for supported revisions
+		 * Allow future revisions to run but emit warning diagnostic
+		 */
+		switch (rev) {
+		case REV_BASE_FVP_R_V0:
+			arm_config.flags |= ARM_CONFIG_FVP_HAS_CCI400;
+			break;
+		default:
+			WARN("Unrecognized Base FVP_R revision %x\n", rev);
+			break;
+		}
+		break;
+	default:
+		ERROR("Unsupported board HBI number 0x%x\n", hbi);
+		panic();
+	}
+
+	/*
+	 * We assume that the presence of MT bit, and therefore shifted
+	 * affinities, is uniform across the platform: either all CPUs, or no
+	 * CPUs implement it.
+	 */
+	if ((read_mpidr_el1() & MPIDR_MT_MASK) != 0U) {
+		arm_config.flags |= ARM_CONFIG_FVP_SHIFTED_AFF;
+	}
+}
+
+
+void __init fvp_interconnect_init(void)
+{
+#if FVP_R_INTERCONNECT_DRIVER == FVP_R_CCN
+	if (ccn_get_part0_id(PLAT_ARM_CCN_BASE) != CCN_502_PART0_ID) {
+		ERROR("Unrecognized CCN variant detected. Only CCN-502 is supported");
+		panic();
+	}
+
+	plat_arm_interconnect_init();
+#else
+	uintptr_t cci_base = 0U;
+	const int *cci_map = NULL;
+	unsigned int map_size = 0U;
+
+	/* Initialize the right interconnect */
+	if ((arm_config.flags & ARM_CONFIG_FVP_HAS_CCI5XX) != 0U) {
+		cci_base = PLAT_FVP_R_CCI5XX_BASE;
+		cci_map = fvp_cci5xx_map;
+		map_size = ARRAY_SIZE(fvp_cci5xx_map);
+	} else if ((arm_config.flags & ARM_CONFIG_FVP_HAS_CCI400) != 0U) {
+		cci_base = PLAT_FVP_R_CCI400_BASE;
+		cci_map = fvp_cci400_map;
+		map_size = ARRAY_SIZE(fvp_cci400_map);
+	} else {
+		return;
+	}
+
+	assert(cci_base != 0U);
+	assert(cci_map != NULL);
+	cci_init(cci_base, cci_map, map_size);
+#endif
+}
+
+void fvp_interconnect_enable(void)
+{
+#if FVP_R_INTERCONNECT_DRIVER == FVP_R_CCN
+	plat_arm_interconnect_enter_coherency();
+#else
+	unsigned int master;
+
+	if ((arm_config.flags & (ARM_CONFIG_FVP_HAS_CCI400 |
+				 ARM_CONFIG_FVP_HAS_CCI5XX)) != 0U) {
+		master = get_interconnect_master();
+		cci_enable_snoop_dvm_reqs(master);
+	}
+#endif
+}
+
+void fvp_interconnect_disable(void)
+{
+#if FVP_R_INTERCONNECT_DRIVER == FVP_R_CCN
+	plat_arm_interconnect_exit_coherency();
+#else
+	unsigned int master;
+
+	if ((arm_config.flags & (ARM_CONFIG_FVP_HAS_CCI400 |
+				 ARM_CONFIG_FVP_HAS_CCI5XX)) != 0U) {
+		master = get_interconnect_master();
+		cci_disable_snoop_dvm_reqs(master);
+	}
+#endif
+}
+
+#if TRUSTED_BOARD_BOOT
+int plat_get_mbedtls_heap(void **heap_addr, size_t *heap_size)
+{
+	assert(heap_addr != NULL);
+	assert(heap_size != NULL);
+
+	return arm_get_mbedtls_heap(heap_addr, heap_size);
+}
+#endif
+
+void fvp_timer_init(void)
+{
+#if USE_SP804_TIMER
+	/* Enable the clock override for SP804 timer 0, which means that no
+	 * clock dividers are applied and the raw (35MHz) clock will be used.
+	 */
+	mmio_write_32(V2M_SP810_BASE, FVP_R_SP810_CTRL_TIM0_OV);
+
+	/* Initialize delay timer driver using SP804 dual timer 0 */
+	sp804_timer_init(V2M_SP804_TIMER0_BASE,
+			SP804_TIMER_CLKMULT, SP804_TIMER_CLKDIV);
+#else
+	generic_delay_timer_init();
+
+	/* Enable System level generic timer */
+	mmio_write_32(ARM_SYS_CNTCTL_BASE + CNTCR_OFF,
+			CNTCR_FCREQ(0U) | CNTCR_EN);
+#endif /* USE_SP804_TIMER */
+}
+
+/* Get SOC version */
+int32_t plat_get_soc_version(void)
+{
+	return (int32_t)
+		((ARM_SOC_IDENTIFICATION_CODE << ARM_SOC_IDENTIFICATION_SHIFT)
+		 | (ARM_SOC_CONTINUATION_CODE << ARM_SOC_CONTINUATION_SHIFT)
+		 | FVP_R_SOC_ID);
+}
+
+/* Get SOC revision */
+int32_t plat_get_soc_revision(void)
+{
+	unsigned int sys_id;
+
+	sys_id = mmio_read_32(V2M_SYSREGS_BASE + V2M_SYS_ID);
+	return (int32_t)((sys_id >> V2M_SYS_ID_REV_SHIFT) &
+			V2M_SYS_ID_REV_MASK);
+}
diff --git a/plat/arm/board/fvp_r/fvp_r_context.S b/plat/arm/board/fvp_r/fvp_r_context.S
new file mode 100644
index 0000000..2746c2e
--- /dev/null
+++ b/plat/arm/board/fvp_r/fvp_r_context.S
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+	.global	el2_exit
+
+/* ------------------------------------------------------------------
+ * The mechanism, from el3_exit, is not used in this v8-R64 implementation.
+ * ------------------------------------------------------------------
+ */
+func el2_exit
+	exception_return
+endfunc el2_exit
diff --git a/plat/arm/board/fvp_r/fvp_r_context_mgmt.c b/plat/arm/board/fvp_r/fvp_r_context_mgmt.c
new file mode 100644
index 0000000..d172d2d
--- /dev/null
+++ b/plat/arm/board/fvp_r/fvp_r_context_mgmt.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+
+/************************************************************
+ * For R-class everything is in secure world.
+ * Prepare the CPU system registers for first entry into EL1
+ ************************************************************/
+void cm_prepare_el2_exit(void)
+{
+	uint64_t hcr_el2 = 0U;
+
+	/*
+	 * The use of ARMv8.3 pointer authentication (PAuth) is governed
+	 * by fields in HCR_EL2, which trigger a 'trap to EL2' if not
+	 * enabled. This register initialized at boot up, update PAuth
+	 * bits.
+	 *
+	 * HCR_API_BIT: Set to one to disable traps to EL2 if lower ELs
+	 * access PAuth registers
+	 *
+	 * HCR_APK_BIT: Set to one to disable traps to EL2 if lower ELs
+	 * access PAuth instructions
+	 */
+	hcr_el2 = read_hcr_el2();
+	write_hcr_el2(hcr_el2 | HCR_API_BIT | HCR_APK_BIT);
+
+	/*
+	 * Initialise CNTHCTL_EL2. All fields are architecturally UNKNOWN
+	 * on reset and are set to zero except for field(s) listed below.
+	 *
+	 * CNTHCTL_EL2.EL1PCEN: Set to one to disable traps to EL2
+	 * if lower ELs accesses to the physical timer registers.
+	 *
+	 * CNTHCTL_EL2.EL1PCTEN: Set to one to disable traps to EL2
+	 * if lower ELs access to the physical counter registers.
+	 */
+	write_cnthctl_el2(CNTHCTL_RESET_VAL | EL1PCEN_BIT | EL1PCTEN_BIT);
+
+	/*
+	 * On Armv8-R, the EL1&0 memory system architecture is configurable
+	 * as a VMSA or PMSA. All the fields architecturally UNKNOWN on reset
+	* and are set to zero except for field listed below.
+	*
+	* VCTR_EL2.MSA: Set to one to ensure the VMSA is enabled so that
+	* rich OS can boot.
+	*/
+	write_vtcr_el2(VTCR_RESET_VAL | VTCR_EL2_MSA);
+}
diff --git a/plat/arm/board/fvp_r/fvp_r_debug.S b/plat/arm/board/fvp_r/fvp_r_debug.S
new file mode 100644
index 0000000..8db1b09
--- /dev/null
+++ b/plat/arm/board/fvp_r/fvp_r_debug.S
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/debug.h>
+
+	.globl el2_panic
+
+	/***********************************************************
+	 * The common implementation of do_panic for all BL stages
+	 ***********************************************************/
+
+.section .rodata.panic_str, "aS"
+	panic_msg: .asciz "PANIC at PC : 0x"
+
+/*
+ * el2_panic will be redefined by the
+ * crash reporting mechanism (if enabled)
+ */
+el2_panic:
+	mov	x6, x30
+	bl	plat_crash_console_init
+
+	/* Check if the console is initialized */
+	cbz	x0, _panic_handler
+
+	/* The console is initialized */
+	adr	x4, panic_msg
+	bl	asm_print_str
+	mov	x4, x6
+
+	/* The panic location is lr -4 */
+	sub	x4, x4, #4
+	bl	asm_print_hex
+
+	bl	plat_crash_console_flush
+
+_panic_handler:
+	/* Pass to plat_panic_handler the address from where el2_panic was
+	 * called, not the address of the call from el2_panic. */
+	mov	x30, x6
+	b	plat_panic_handler
diff --git a/plat/arm/board/fvp_r/fvp_r_def.h b/plat/arm/board/fvp_r/fvp_r_def.h
new file mode 100644
index 0000000..b9f6989
--- /dev/null
+++ b/plat/arm/board/fvp_r/fvp_r_def.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FVP_R_DEF_H
+#define FVP_R_DEF_H
+
+#include <lib/utils_def.h>
+
+#ifndef FVP_R_CLUSTER_COUNT
+#error "FVP_R_CLUSTER_COUNT is not set in makefile"
+#endif
+
+#ifndef FVP_R_MAX_CPUS_PER_CLUSTER
+#error "FVP_R_MAX_CPUS_PER_CLUSTER is not set in makefile"
+#endif
+
+#ifndef FVP_R_MAX_PE_PER_CPU
+#error "FVP_R_MAX_PE_PER_CPU is not set in makefile"
+#endif
+
+#define FVP_R_PRIMARY_CPU			0x0
+
+/* Defines for the Interconnect build selection */
+#define FVP_R_CCI			1
+#define FVP_R_CCN			2
+
+/******************************************************************************
+ * Definition of platform soc id
+ *****************************************************************************/
+#define FVP_R_SOC_ID      0
+
+/*******************************************************************************
+ * FVP_R memory map related constants
+ ******************************************************************************/
+
+#define FLASH1_BASE			UL(0x8c000000)
+#define FLASH1_SIZE			UL(0x04000000)
+
+#define PSRAM_BASE			UL(0x94000000)
+#define PSRAM_SIZE			UL(0x04000000)
+
+#define VRAM_BASE			UL(0x98000000)
+#define VRAM_SIZE			UL(0x02000000)
+
+/* Aggregate of all devices in the first GB */
+#define DEVICE0_BASE			UL(0xa0000000)
+#define DEVICE0_SIZE			UL(0x0c200000)
+
+/*
+ *  In case of FVP_R models with CCN, the CCN register space overlaps into
+ *  the NSRAM area.
+ */
+#define DEVICE1_BASE			UL(0xae000000)
+#define DEVICE1_SIZE			UL(0x1A00000)
+
+#define NSRAM_BASE			UL(0xae000000)
+#define NSRAM_SIZE			UL(0x10000)
+/* Devices in the second GB */
+#define DEVICE2_BASE			UL(0xffe00000)
+#define DEVICE2_SIZE			UL(0x00200000)
+
+#define PCIE_EXP_BASE			UL(0xc0000000)
+#define TZRNG_BASE			UL(0x7fe60000)
+
+/* Non-volatile counters */
+#define TRUSTED_NVCTR_BASE		UL(0xffe70000)
+#define TFW_NVCTR_BASE			(TRUSTED_NVCTR_BASE + UL(0x0000))
+#define TFW_NVCTR_SIZE			UL(4)
+#define NTFW_CTR_BASE			(TRUSTED_NVCTR_BASE + UL(0x0004))
+#define NTFW_CTR_SIZE			UL(4)
+
+/* Keys */
+#define SOC_KEYS_BASE			UL(0xffe80000)
+#define TZ_PUB_KEY_HASH_BASE		(SOC_KEYS_BASE + UL(0x0000))
+#define TZ_PUB_KEY_HASH_SIZE		UL(32)
+#define HU_KEY_BASE			(SOC_KEYS_BASE + UL(0x0020))
+#define HU_KEY_SIZE			UL(16)
+#define END_KEY_BASE			(SOC_KEYS_BASE + UL(0x0044))
+#define END_KEY_SIZE			UL(32)
+
+/* Constants to distinguish FVP_R type */
+#define HBI_BASE_FVP_R			U(0x020)
+#define REV_BASE_FVP_R_V0		U(0x0)
+#define REV_BASE_FVP_R_REVC		U(0x2)
+
+#define HBI_FOUNDATION_FVP_R		U(0x010)
+#define REV_FOUNDATION_FVP_R_V2_0	U(0x0)
+#define REV_FOUNDATION_FVP_R_V2_1	U(0x1)
+#define REV_FOUNDATION_FVP_R_v9_1	U(0x2)
+#define REV_FOUNDATION_FVP_R_v9_6	U(0x3)
+
+#define BLD_GIC_VE_MMAP			U(0x0)
+#define BLD_GIC_A53A57_MMAP		U(0x1)
+
+#define ARCH_MODEL			U(0x1)
+
+/* FVP_R Power controller base address*/
+#define PWRC_BASE			UL(0x1c100000)
+
+/* FVP_R SP804 timer frequency is 35 MHz*/
+#define SP804_TIMER_CLKMULT		1
+#define SP804_TIMER_CLKDIV		35
+
+/* SP810 controller. FVP_R specific flags */
+#define FVP_R_SP810_CTRL_TIM0_OV		BIT_32(16)
+#define FVP_R_SP810_CTRL_TIM1_OV		BIT_32(18)
+#define FVP_R_SP810_CTRL_TIM2_OV		BIT_32(20)
+#define FVP_R_SP810_CTRL_TIM3_OV		BIT_32(22)
+
+#endif /* FVP_R_DEF_H */
diff --git a/plat/arm/board/fvp_r/fvp_r_err.c b/plat/arm/board/fvp_r/fvp_r_err.c
new file mode 100644
index 0000000..7ee752b
--- /dev/null
+++ b/plat/arm/board/fvp_r/fvp_r_err.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <errno.h>
+
+#include <common/debug.h>
+#include <drivers/arm/sp805.h>
+#include <drivers/cfi/v2m_flash.h>
+#include <plat/arm/common/plat_arm.h>
+#include <platform_def.h>
+
+/*
+ * FVP_R error handler
+ */
+__dead2 void plat_arm_error_handler(int err)
+{
+	int ret;
+
+	switch (err) {
+	case -ENOENT:
+	case -EAUTH:
+		/* Image load or authentication error. Erase the ToC */
+		INFO("Erasing FIP ToC from flash...\n");
+		(void)nor_unlock(PLAT_ARM_FLASH_IMAGE_BASE);
+		ret = nor_word_program(PLAT_ARM_FLASH_IMAGE_BASE, 0);
+		if (ret != 0) {
+			ERROR("Cannot erase ToC\n");
+		} else {
+			INFO("Done\n");
+		}
+		break;
+	default:
+		/* Unexpected error */
+		break;
+	}
+
+	(void)console_flush();
+
+	/* Setup the watchdog to reset the system as soon as possible */
+	sp805_refresh(ARM_SP805_TWDG_BASE, 1U);
+
+	while (true) {
+		wfi();
+	}
+}
diff --git a/plat/arm/board/fvp_r/fvp_r_helpers.S b/plat/arm/board/fvp_r/fvp_r_helpers.S
new file mode 100644
index 0000000..f7a04d8
--- /dev/null
+++ b/plat/arm/board/fvp_r/fvp_r_helpers.S
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <drivers/arm/fvp/fvp_pwrc.h>
+#include <drivers/arm/gicv2.h>
+#include <drivers/arm/gicv3.h>
+
+#include <platform_def.h>
+
+
+	.globl	plat_secondary_cold_boot_setup
+	.globl	plat_get_my_entrypoint
+	.globl	plat_is_my_cpu_primary
+	.globl	plat_arm_calc_core_pos
+
+	/* -----------------------------------------------------
+	 * void plat_secondary_cold_boot_setup (void);
+	 *
+	 * This function performs any platform specific actions
+	 * needed for a secondary cpu after a cold reset e.g
+	 * mark the cpu's presence, mechanism to place it in a
+	 * holding pen etc.
+	 * TODO: Should we read the PSYS register to make sure
+	 * that the request has gone through.
+	 * -----------------------------------------------------
+	 */
+func plat_secondary_cold_boot_setup
+	/* ---------------------------------------------
+	 * Power down this cpu.
+	 * TODO: Do we need to worry about powering the
+	 * cluster down as well here? That will need
+	 * locks which we won't have unless an elf-
+	 * loader zeroes out the zi section.
+	 * ---------------------------------------------
+	 */
+	mrs	x0, mpidr_el1
+	mov_imm	x1, PWRC_BASE
+	str	w0, [x1, #PPOFFR_OFF]
+
+	/* ---------------------------------------------
+	 * There is no sane reason to come out of this
+	 * wfi so panic if we do. This cpu will be pow-
+	 * ered on and reset by the cpu_on pm api
+	 * ---------------------------------------------
+	 */
+	dsb	sy
+	wfi
+	no_ret	plat_panic_handler
+endfunc plat_secondary_cold_boot_setup
+
+	/* ---------------------------------------------------------------------
+	 * uintptr_t plat_get_my_entrypoint (void);
+	 *
+	 * Main job of this routine is to distinguish between a cold and warm
+	 * boot. On FVP_R, this information can be queried from the power
+	 * controller. The Power Control SYS Status Register (PSYSR) indicates
+	 * the wake-up reason for the CPU.
+	 *
+	 * For a cold boot, return 0.
+	 * For a warm boot, read the mailbox and return the address it contains.
+	 *
+	 * TODO: PSYSR is a common register and should be
+	 * 	accessed using locks. Since it is not possible
+	 * 	to use locks immediately after a cold reset
+	 * 	we are relying on the fact that after a cold
+	 * 	reset all cpus will read the same WK field
+	 * ---------------------------------------------------------------------
+	 */
+func plat_get_my_entrypoint
+	/* ---------------------------------------------------------------------
+	 * When bit PSYSR.WK indicates either "Wake by PPONR" or "Wake by GIC
+	 * WakeRequest signal" then it is a warm boot.
+	 * ---------------------------------------------------------------------
+	 */
+	mrs	x2, mpidr_el1
+	mov_imm	x1, PWRC_BASE
+	str	w2, [x1, #PSYSR_OFF]
+	ldr	w2, [x1, #PSYSR_OFF]
+	ubfx	w2, w2, #PSYSR_WK_SHIFT, #PSYSR_WK_WIDTH
+	cmp	w2, #WKUP_PPONR
+	beq	warm_reset
+	cmp	w2, #WKUP_GICREQ
+	beq	warm_reset
+
+	/* Cold reset */
+	mov	x0, #0
+	ret
+
+warm_reset:
+	/* ---------------------------------------------------------------------
+	 * A mailbox is maintained in the trusted SRAM. It is flushed out of the
+	 * caches after every update using normal memory so it is safe to read
+	 * it here with SO attributes.
+	 * ---------------------------------------------------------------------
+	 */
+	mov_imm	x0, PLAT_ARM_TRUSTED_MAILBOX_BASE
+	ldr	x0, [x0]
+	cbz	x0, _panic_handler
+	ret
+
+	/* ---------------------------------------------------------------------
+	 * The power controller indicates this is a warm reset but the mailbox
+	 * is empty. This should never happen!
+	 * ---------------------------------------------------------------------
+	 */
+_panic_handler:
+	no_ret	plat_panic_handler
+endfunc plat_get_my_entrypoint
+
+	/* -----------------------------------------------------
+	 * unsigned int plat_is_my_cpu_primary (void);
+	 *
+	 * Find out whether the current cpu is the primary
+	 * cpu.
+	 * -----------------------------------------------------
+	 */
+func plat_is_my_cpu_primary
+	mrs	x0, mpidr_el1
+	mov_imm	x1, MPIDR_AFFINITY_MASK
+	and	x0, x0, x1
+	cmp	x0, #FVP_R_PRIMARY_CPU
+	cset	w0, eq
+	ret
+endfunc plat_is_my_cpu_primary
+
+	/* ---------------------------------------------------------------------
+	 * unsigned int plat_arm_calc_core_pos(u_register_t mpidr)
+	 *
+	 * Function to calculate the core position on FVP_R.
+	 *
+	 * (ClusterId * FVP_R_MAX_CPUS_PER_CLUSTER * FVP_R_MAX_PE_PER_CPU) +
+	 * (CPUId * FVP_R_MAX_PE_PER_CPU) +
+	 * ThreadId
+	 *
+	 * which can be simplified as:
+	 *
+	 * ((ClusterId * FVP_R_MAX_CPUS_PER_CLUSTER + CPUId) * FVP_R_MAX_PE_PER_CPU)
+	 * + ThreadId
+	 * ---------------------------------------------------------------------
+	 */
+func plat_arm_calc_core_pos
+	/*
+	 * Check for MT bit in MPIDR. If not set, shift MPIDR to left to make it
+	 * look as if in a multi-threaded implementation.
+	 */
+	tst	x0, #MPIDR_MT_MASK
+	lsl	x3, x0, #MPIDR_AFFINITY_BITS
+	csel	x3, x3, x0, eq
+
+	/* Extract individual affinity fields from MPIDR */
+	ubfx	x0, x3, #MPIDR_AFF0_SHIFT, #MPIDR_AFFINITY_BITS
+	ubfx	x1, x3, #MPIDR_AFF1_SHIFT, #MPIDR_AFFINITY_BITS
+	ubfx	x2, x3, #MPIDR_AFF2_SHIFT, #MPIDR_AFFINITY_BITS
+
+	/* Compute linear position */
+	mov	x4, #FVP_R_MAX_CPUS_PER_CLUSTER
+	madd	x1, x2, x4, x1
+	mov	x5, #FVP_R_MAX_PE_PER_CPU
+	madd	x0, x1, x5, x0
+	ret
+endfunc plat_arm_calc_core_pos
diff --git a/plat/arm/board/fvp_r/fvp_r_io_storage.c b/plat/arm/board/fvp_r/fvp_r_io_storage.c
new file mode 100644
index 0000000..3b44828
--- /dev/null
+++ b/plat/arm/board/fvp_r/fvp_r_io_storage.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <common/debug.h>
+#include <drivers/io/io_driver.h>
+#include <drivers/io/io_semihosting.h>
+#include <drivers/io/io_storage.h>
+#include <lib/semihosting.h>
+#include <plat/arm/common/plat_arm.h>
+#include <plat/common/common_def.h>
+
+/* Semihosting filenames */
+#define BL33_IMAGE_NAME			"bl33.bin"
+
+#if TRUSTED_BOARD_BOOT
+#define TRUSTED_KEY_CERT_NAME		"trusted_key.crt"
+#define NT_FW_KEY_CERT_NAME		"nt_fw_key.crt"
+#define NT_FW_CONTENT_CERT_NAME		"nt_fw_content.crt"
+#endif /* TRUSTED_BOARD_BOOT */
+
+/* IO devices */
+static const io_dev_connector_t *sh_dev_con;
+static uintptr_t sh_dev_handle;
+
+static const io_file_spec_t sh_file_spec[] = {
+	[BL33_IMAGE_ID] = {
+		.path = BL33_IMAGE_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+#if TRUSTED_BOARD_BOOT
+	[TRUSTED_KEY_CERT_ID] = {
+		.path = TRUSTED_KEY_CERT_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[NON_TRUSTED_FW_KEY_CERT_ID] = {
+		.path = NT_FW_KEY_CERT_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[NON_TRUSTED_FW_CONTENT_CERT_ID] = {
+		.path = NT_FW_CONTENT_CERT_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+#endif /* TRUSTED_BOARD_BOOT */
+};
+
+
+static int open_semihosting(const uintptr_t spec)
+{
+	int result;
+	uintptr_t local_image_handle;
+
+	/* See if the file exists on semi-hosting.*/
+	result = io_dev_init(sh_dev_handle, (uintptr_t)NULL);
+	if (result == 0) {
+		result = io_open(sh_dev_handle, spec, &local_image_handle);
+		if (result == 0) {
+			VERBOSE("Using Semi-hosting IO\n");
+			io_close(local_image_handle);
+		}
+	}
+	return result;
+}
+
+void plat_arm_io_setup(void)
+{
+	int io_result;
+
+	io_result = arm_io_setup();
+	if (io_result < 0) {
+		panic();
+	}
+
+	/* Register the additional IO devices on this platform */
+	io_result = register_io_dev_sh(&sh_dev_con);
+	if (io_result < 0) {
+		panic();
+	}
+
+	/* Open connections to devices and cache the handles */
+	io_result = io_dev_open(sh_dev_con, (uintptr_t)NULL, &sh_dev_handle);
+	if (io_result < 0) {
+		panic();
+	}
+}
+
+/*
+ * FVP_R provides semihosting as an alternative to load images
+ */
+int plat_arm_get_alt_image_source(unsigned int image_id, uintptr_t *dev_handle,
+				  uintptr_t *image_spec)
+{
+	int result = open_semihosting((const uintptr_t)&sh_file_spec[image_id]);
+
+	if (result == 0) {
+		*dev_handle = sh_dev_handle;
+		*image_spec = (uintptr_t)&sh_file_spec[image_id];
+	}
+
+	return result;
+}
diff --git a/plat/arm/board/fvp_r/fvp_r_misc_helpers.S b/plat/arm/board/fvp_r/fvp_r_misc_helpers.S
new file mode 100644
index 0000000..67ad164
--- /dev/null
+++ b/plat/arm/board/fvp_r/fvp_r_misc_helpers.S
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+	.globl	disable_mpu_el2
+	.globl	disable_mpu_icache_el2
+
+/* ---------------------------------------------------------------------------
+ * Disable the MPU at EL2.
+ * ---------------------------------------------------------------------------
+ */
+
+func disable_mpu_el2
+	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
+do_disable_mpu_el2:
+	mrs	x0, sctlr_el2
+	bic	x0, x0, x1
+	msr	sctlr_el2, x0
+	isb	/* ensure MMU is off */
+	dsb	sy
+	ret
+endfunc disable_mpu_el2
+
+
+func disable_mpu_icache_el2
+	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
+	b	do_disable_mpu_el2
+endfunc disable_mpu_icache_el2
diff --git a/plat/arm/board/fvp_r/fvp_r_pauth_helpers.S b/plat/arm/board/fvp_r/fvp_r_pauth_helpers.S
new file mode 100644
index 0000000..7e6bc3d
--- /dev/null
+++ b/plat/arm/board/fvp_r/fvp_r_pauth_helpers.S
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <lib/el3_runtime/cpu_data.h>
+
+	.global	pauth_init_enable_el2
+	.global	pauth_disable_el2
+
+/* -------------------------------------------------------------
+ * File contains EL2 versions of EL3 funcs in:
+ * 	.../lib/extensions/pauth/pauth_helpers.S
+ * -------------------------------------------------------------
+ */
+
+/* -------------------------------------------------------------
+ * Program APIAKey_EL1 and enable pointer authentication in EL2
+ * -------------------------------------------------------------
+ */
+func pauth_init_enable_el2
+	stp	x29, x30, [sp, #-16]!
+
+	/* Initialize platform key */
+	bl	plat_init_apkey
+
+	/* Program instruction key A used by the Trusted Firmware */
+	msr	APIAKeyLo_EL1, x0
+	msr	APIAKeyHi_EL1, x1
+
+	/* Enable pointer authentication */
+	mrs	x0, sctlr_el2
+	orr	x0, x0, #SCTLR_EnIA_BIT
+
+#if ENABLE_BTI
+	 /* Enable PAC branch type compatibility */
+	bic	x0, x0, #SCTLR_BT_BIT
+#endif
+	msr	sctlr_el2, x0
+	isb
+
+	ldp	x29, x30, [sp], #16
+	ret
+endfunc pauth_init_enable_el2
+
+/* -------------------------------------------------------------
+ * Disable pointer authentication in EL2
+ * -------------------------------------------------------------
+ */
+func pauth_disable_el2
+	mrs	x0, sctlr_el2
+	bic	x0, x0, #SCTLR_EnIA_BIT
+	msr	sctlr_el2, x0
+	isb
+	ret
+endfunc pauth_disable_el2
diff --git a/plat/arm/board/fvp_r/fvp_r_private.h b/plat/arm/board/fvp_r/fvp_r_private.h
new file mode 100644
index 0000000..48f6e89
--- /dev/null
+++ b/plat/arm/board/fvp_r/fvp_r_private.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FVP_R_PRIVATE_H
+#define FVP_R_PRIVATE_H
+
+#include <plat/arm/common/plat_arm.h>
+
+/*******************************************************************************
+ * Function and variable prototypes
+ ******************************************************************************/
+
+void fvp_config_setup(void);
+
+void fvp_interconnect_init(void);
+void fvp_interconnect_enable(void);
+void fvp_interconnect_disable(void);
+void fvp_timer_init(void);
+
+#endif /* FVP_R_PRIVATE_H */
diff --git a/plat/arm/board/fvp_r/fvp_r_stack_protector.c b/plat/arm/board/fvp_r/fvp_r_stack_protector.c
new file mode 100644
index 0000000..69b6312
--- /dev/null
+++ b/plat/arm/board/fvp_r/fvp_r_stack_protector.c
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdint.h>
+
+#include <fvp_r_arch_helpers.h>
+#include <plat/common/platform.h>
+
+#define RANDOM_CANARY_VALUE ((u_register_t) 8092347823957523895ULL)
+
+u_register_t plat_get_stack_protector_canary(void)
+{
+	/*
+	 * Ideally, a random number should be returned instead of the
+	 * combination of a timer's value and a compile-time constant. As the
+	 * FVP_R does not have any random number generator, this is better than
+	 * nothing but not necessarily really secure.
+	 */
+	return RANDOM_CANARY_VALUE ^ read_cntpct_el0();
+}
+
diff --git a/plat/arm/board/fvp_r/fvp_r_trusted_boot.c b/plat/arm/board/fvp_r/fvp_r_trusted_boot.c
new file mode 100644
index 0000000..de0b28f
--- /dev/null
+++ b/plat/arm/board/fvp_r/fvp_r_trusted_boot.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <lib/fconf/fconf.h>
+#include <lib/mmio.h>
+#include <tools_share/tbbr_oid.h>
+
+#include <plat/arm/common/fconf_nv_cntr_getter.h>
+#include <plat/arm/common/plat_arm.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+
+
+/*
+ * Return the ROTPK hash in the following ASN.1 structure in DER format:
+ *
+ * AlgorithmIdentifier  ::=  SEQUENCE  {
+ *     algorithm	OBJECT IDENTIFIER,
+ *     parameters	ANY DEFINED BY algorithm OPTIONAL
+ * }
+ *
+ * DigestInfo ::= SEQUENCE {
+ *     digestAlgorithm	AlgorithmIdentifier,
+ *     digest		OCTET STRING
+ * }
+ */
+int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len,
+			unsigned int *flags)
+{
+	return arm_get_rotpk_info(cookie, key_ptr, key_len, flags);
+}
+
+/*
+ * Store a new non-volatile counter value.
+ *
+ * On some FVP_R versions, the non-volatile counters are read-only so this
+ * function will always fail.
+ *
+ * Return: 0 = success, Otherwise = error
+ */
+int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr)
+{
+	const char *oid;
+	uintptr_t nv_ctr_addr;
+
+	assert(cookie != NULL);
+
+	oid = (const char *)cookie;
+	if (strcmp(oid, TRUSTED_FW_NVCOUNTER_OID) == 0) {
+		nv_ctr_addr = FCONF_GET_PROPERTY(cot, nv_cntr_addr,
+						TRUSTED_NV_CTR_ID);
+	} else if (strcmp(oid, NON_TRUSTED_FW_NVCOUNTER_OID) == 0) {
+		nv_ctr_addr = FCONF_GET_PROPERTY(cot, nv_cntr_addr,
+						NON_TRUSTED_NV_CTR_ID);
+	} else {
+		return 1;
+	}
+
+	mmio_write_32(nv_ctr_addr, nv_ctr);
+
+	/*
+	 * If the FVP_R models a locked counter then its value cannot be updated
+	 * and the above write operation has been silently ignored.
+	 */
+	return (mmio_read_32(nv_ctr_addr) == nv_ctr) ? 0 : 1;
+}
diff --git a/plat/arm/board/fvp_r/include/fvp_r_arch_helpers.h b/plat/arm/board/fvp_r/include/fvp_r_arch_helpers.h
new file mode 100644
index 0000000..92bf484
--- /dev/null
+++ b/plat/arm/board/fvp_r/include/fvp_r_arch_helpers.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FVP_R_ARCH_HELPERS_H
+#define FVP_R_ARCH_HELPERS_H
+
+#include <arch_helpers.h>
+
+/*******************************************************************************
+ * MPU register definitions
+ ******************************************************************************/
+#define MPUIR_EL2		S3_4_C0_C0_4
+#define PRBAR_EL2		S3_4_C6_C8_0
+#define PRLAR_EL2		S3_4_C6_C8_1
+#define PRSELR_EL2		S3_4_C6_C2_1
+#define PRENR_EL2		S3_4_C6_C1_1
+
+/* v8-R64 MPU registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(mpuir_el2, MPUIR_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(prenr_el2, PRENR_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(prselr_el2, PRSELR_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(prbar_el2, PRBAR_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(prlar_el2, PRLAR_EL2)
+
+#endif /* FVP_R_ARCH_HELPERS_H */
diff --git a/plat/arm/board/fvp_r/include/plat.ld.S b/plat/arm/board/fvp_r/include/plat.ld.S
new file mode 100644
index 0000000..e91a5a0
--- /dev/null
+++ b/plat/arm/board/fvp_r/include/plat.ld.S
@@ -0,0 +1,11 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef PLAT_LD_S
+#define PLAT_LD_S
+
+#include <plat/arm/common/arm_tzc_dram.ld.S>
+
+#endif /* PLAT_LD_S */
diff --git a/plat/arm/board/fvp_r/include/platform_def.h b/plat/arm/board/fvp_r/include/platform_def.h
new file mode 100644
index 0000000..4a6b441
--- /dev/null
+++ b/plat/arm/board/fvp_r/include/platform_def.h
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FVP_R_PLATFORM_DEF_H
+#define FVP_R_PLATFORM_DEF_H
+
+#define PLAT_V2M_OFFSET			0x80000000
+
+#define BL33_IMAGE_DESC {				\
+	.image_id = BL33_IMAGE_ID,			\
+	SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,	\
+		VERSION_2, image_info_t, 0),		\
+	.image_info.image_base = PLAT_ARM_DRAM1_BASE + 0x1000,		\
+	.image_info.image_max_size = UL(0x3ffff000), \
+	SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,	\
+		VERSION_2, entry_point_info_t, SECURE | EXECUTABLE),\
+	.ep_info.pc = PLAT_ARM_DRAM1_BASE + 0x1000,				\
+	.ep_info.spsr = SPSR_64(MODE_EL2, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS),	\
+}
+
+#include "../fvp_r_def.h"
+#include <drivers/arm/tzc400.h>
+#include <lib/utils_def.h>
+#include <plat/arm/board/common/v2m_def.h>
+
+/* These are referenced by arm_def.h #included next, so #define first. */
+#define PLAT_ARM_TRUSTED_ROM_BASE	UL(0x80000000)
+#define PLAT_ARM_TRUSTED_SRAM_BASE	UL(0x84000000)
+#define PLAT_ARM_TRUSTED_DRAM_BASE	UL(0x86000000)
+#define PLAT_ARM_DRAM1_BASE		ULL(0x0)
+#define PLAT_ARM_DRAM2_BASE		ULL(0x080000000)
+
+#define PLAT_HW_CONFIG_DTB_BASE		ULL(0x12000000)
+#define PLAT_ARM_SYS_CNTCTL_BASE	UL(0xaa430000)
+#define PLAT_ARM_SYS_CNTREAD_BASE	UL(0xaa800000)
+#define PLAT_ARM_SYS_TIMCTL_BASE	UL(0xaa810000)
+#define PLAT_ARM_SYS_CNT_BASE_S		UL(0xaa820000)
+#define PLAT_ARM_SYS_CNT_BASE_NS	UL(0xaa830000)
+#define PLAT_ARM_SP805_TWDG_BASE	UL(0xaa490000)
+
+#include <plat/arm/common/arm_def.h>
+#include <plat/common/common_def.h>
+
+
+/* Required to create plat_regions: */
+#define MIN_LVL_BLOCK_DESC	U(1)
+
+/* Required platform porting definitions */
+#define PLATFORM_CORE_COUNT  (U(FVP_R_CLUSTER_COUNT) * \
+			      U(FVP_R_MAX_CPUS_PER_CLUSTER) * \
+			      U(FVP_R_MAX_PE_PER_CPU))
+
+#define PLAT_NUM_PWR_DOMAINS (U(FVP_R_CLUSTER_COUNT) + \
+			      PLATFORM_CORE_COUNT + U(1))
+
+#define PLAT_MAX_PWR_LVL		ARM_PWR_LVL2
+
+/*
+ * Other platform porting definitions are provided by included headers
+ */
+
+/*
+ * Required ARM standard platform porting definitions
+ */
+#define PLAT_ARM_CLUSTER_COUNT		U(FVP_R_CLUSTER_COUNT)
+#define PLAT_ARM_DRAM1_SIZE		ULL(0x7fffffff)
+#define PLAT_ARM_TRUSTED_SRAM_SIZE	UL(0x00040000)	/* 256 KB */
+#define PLAT_ARM_TRUSTED_ROM_SIZE	UL(0x04000000)	/* 64 MB */
+#define PLAT_ARM_TRUSTED_DRAM_SIZE	UL(0x02000000)	/* 32 MB */
+
+/* These two are defined thus in arm_def.h, but doesn't seem to see it... */
+#define PLAT_BL1_RO_LIMIT               (BL1_RO_BASE \
+					+ PLAT_ARM_TRUSTED_ROM_SIZE)
+
+#define PLAT_ARM_SYS_CNTCTL_BASE	UL(0xaa430000)
+#define PLAT_ARM_SYS_CNTREAD_BASE	UL(0xaa800000)
+#define PLAT_ARM_SYS_TIMCTL_BASE	UL(0xaa810000)
+#define PLAT_ARM_SYS_CNT_BASE_S		UL(0xaa820000)
+#define PLAT_ARM_SYS_CNT_BASE_NS	UL(0xaa830000)
+#define PLAT_ARM_SP805_TWDG_BASE	UL(0xaa490000)
+
+/* virtual address used by dynamic mem_protect for chunk_base */
+#define PLAT_ARM_MEM_PROTEC_VA_FRAME	UL(0xc0000000)
+
+/* No SCP in FVP_R */
+#define PLAT_ARM_SCP_TZC_DRAM1_SIZE	UL(0x0)
+
+#define PLAT_ARM_DRAM2_SIZE		UL(0x80000000)
+
+#define PLAT_HW_CONFIG_DTB_SIZE		ULL(0x8000)
+
+#define ARM_DTB_DRAM_NS			MAP_REGION_FLAT(		\
+					PLAT_HW_CONFIG_DTB_BASE,	\
+					PLAT_HW_CONFIG_DTB_SIZE,	\
+					MT_MEMORY | MT_RO | MT_NS)
+
+#define V2M_FVP_R_SYSREGS_BASE		UL(0x9c010000)
+
+/*
+ * Load address of BL33 for this platform port,
+ * U-Boot specifically must be loaded at a 4K aligned address.
+ */
+#define PLAT_ARM_NS_IMAGE_BASE		(PLAT_ARM_DRAM1_BASE + 0x1000)
+
+/*
+ * PLAT_ARM_MMAP_ENTRIES depends on the number of entries in the
+ * plat_arm_mmap array defined for each BL stage.
+ */
+#if !USE_ROMLIB
+# define PLAT_ARM_MMAP_ENTRIES		11
+# define MAX_XLAT_TABLES		5
+#else
+# define PLAT_ARM_MMAP_ENTRIES		12
+# define MAX_XLAT_TABLES		6
+#endif
+# define N_MPU_REGIONS			16  /* number of MPU regions */
+# define ALL_MPU_EL2_REGIONS_USED	0xffffffff
+	/* this is the PRENR_EL2 value if all MPU regions are in use */
+
+/*
+ * PLAT_ARM_MAX_BL1_RW_SIZE is calculated using the current BL1 RW debug size
+ * plus a little space for growth.
+ */
+#define PLAT_ARM_MAX_BL1_RW_SIZE	UL(0xB000)
+
+/*
+ * PLAT_ARM_MAX_ROMLIB_RW_SIZE is define to use a full page
+ */
+
+#if USE_ROMLIB
+#define PLAT_ARM_MAX_ROMLIB_RW_SIZE	UL(0x1000)
+#define PLAT_ARM_MAX_ROMLIB_RO_SIZE	UL(0xe000)
+#define FVP_R_BL2_ROMLIB_OPTIMIZATION UL(0x6000)
+#else
+#define PLAT_ARM_MAX_ROMLIB_RW_SIZE	UL(0)
+#define PLAT_ARM_MAX_ROMLIB_RO_SIZE	UL(0)
+#define FVP_R_BL2_ROMLIB_OPTIMIZATION UL(0)
+#endif
+
+/*
+ * PLAT_ARM_MAX_BL2_SIZE is calculated using the current BL2 debug size plus a
+ * little space for growth.
+ */
+#if TRUSTED_BOARD_BOOT
+#if COT_DESC_IN_DTB
+# define PLAT_ARM_MAX_BL2_SIZE	(UL(0x1E000) - FVP_R_BL2_ROMLIB_OPTIMIZATION)
+#else
+# define PLAT_ARM_MAX_BL2_SIZE	(UL(0x1D000) - FVP_R_BL2_ROMLIB_OPTIMIZATION)
+#endif
+#else
+# define PLAT_ARM_MAX_BL2_SIZE	(UL(0x13000) - FVP_R_BL2_ROMLIB_OPTIMIZATION)
+#endif
+
+/*
+ * Since BL31 NOBITS overlays BL2 and BL1-RW, PLAT_ARM_MAX_BL31_SIZE is
+ * calculated using the current BL31 PROGBITS debug size plus the sizes of
+ * BL2 and BL1-RW
+ */
+#define PLAT_ARM_MAX_BL31_SIZE		UL(0x3D000)
+
+/*
+ * Size of cacheable stacks
+ */
+#if defined(IMAGE_BL1)
+# if TRUSTED_BOARD_BOOT
+#  define PLATFORM_STACK_SIZE		UL(0x1000)
+# else
+#  define PLATFORM_STACK_SIZE		UL(0x500)
+# endif
+#endif
+
+#define MAX_IO_DEVICES			3
+#define MAX_IO_HANDLES			4
+
+/*
+ * These nominally reserve the last block of flash for PSCI MEM PROTECT flag,
+ * but no PSCI in FVP_R platform, so reserve nothing:
+ */
+#define PLAT_ARM_FLASH_IMAGE_BASE	(PLAT_ARM_DRAM1_BASE + UL(0x40000000))
+#define PLAT_ARM_FLASH_IMAGE_MAX_SIZE	(PLAT_ARM_DRAM1_SIZE - UL(0x40000000))
+
+#define PLAT_ARM_NVM_BASE		V2M_FLASH0_BASE
+#define PLAT_ARM_NVM_SIZE		(V2M_FLASH0_SIZE - V2M_FLASH_BLOCK_SIZE)
+
+/*
+ * PL011 related constants
+ */
+#define PLAT_ARM_BOOT_UART_BASE		V2M_IOFPGA_UART0_BASE
+#define PLAT_ARM_BOOT_UART_CLK_IN_HZ	V2M_IOFPGA_UART0_CLK_IN_HZ
+
+#define PLAT_ARM_RUN_UART_BASE		V2M_IOFPGA_UART1_BASE
+#define PLAT_ARM_RUN_UART_CLK_IN_HZ	V2M_IOFPGA_UART1_CLK_IN_HZ
+
+#define PLAT_ARM_CRASH_UART_BASE	PLAT_ARM_RUN_UART_BASE
+#define PLAT_ARM_CRASH_UART_CLK_IN_HZ	PLAT_ARM_RUN_UART_CLK_IN_HZ
+
+#define PLAT_ARM_TSP_UART_BASE		V2M_IOFPGA_UART2_BASE
+#define PLAT_ARM_TSP_UART_CLK_IN_HZ	V2M_IOFPGA_UART2_CLK_IN_HZ
+
+/* CCI related constants */
+#define PLAT_FVP_R_CCI400_BASE		UL(0xac090000)
+#define PLAT_FVP_R_CCI400_CLUS0_SL_PORT	3
+#define PLAT_FVP_R_CCI400_CLUS1_SL_PORT	4
+
+/* CCI-500/CCI-550 on Base platform */
+#define PLAT_FVP_R_CCI5XX_BASE		UL(0xaa000000)
+#define PLAT_FVP_R_CCI5XX_CLUS0_SL_PORT	5
+#define PLAT_FVP_R_CCI5XX_CLUS1_SL_PORT	6
+
+/* CCN related constants. Only CCN 502 is currently supported */
+#define PLAT_ARM_CCN_BASE		UL(0xae000000)
+#define PLAT_ARM_CLUSTER_TO_CCN_ID_MAP	1, 5, 7, 11
+
+/* System timer related constants */
+#define PLAT_ARM_NSTIMER_FRAME_ID	U(1)
+
+/* Mailbox base address */
+#define PLAT_ARM_TRUSTED_MAILBOX_BASE	ARM_TRUSTED_SRAM_BASE
+
+
+/* TrustZone controller related constants
+ *
+ * Currently only filters 0 and 2 are connected on Base FVP_R.
+ * Filter 0 : CPU clusters (no access to DRAM by default)
+ * Filter 1 : not connected
+ * Filter 2 : LCDs (access to VRAM allowed by default)
+ * Filter 3 : not connected
+ * Programming unconnected filters will have no effect at the
+ * moment. These filter could, however, be connected in future.
+ * So care should be taken not to configure the unused filters.
+ *
+ * Allow only non-secure access to all DRAM to supported devices.
+ * Give access to the CPUs and Virtio. Some devices
+ * would normally use the default ID so allow that too.
+ */
+#define PLAT_ARM_TZC_BASE		UL(0xaa4a0000)
+#define PLAT_ARM_TZC_FILTERS		TZC_400_REGION_ATTR_FILTER_BIT(0)
+
+#define PLAT_ARM_TZC_NS_DEV_ACCESS	(				\
+		TZC_REGION_ACCESS_RDWR(FVP_R_NSAID_DEFAULT)	|	\
+		TZC_REGION_ACCESS_RDWR(FVP_R_NSAID_PCI)		|	\
+		TZC_REGION_ACCESS_RDWR(FVP_R_NSAID_AP)		|	\
+		TZC_REGION_ACCESS_RDWR(FVP_R_NSAID_VIRTIO)	|	\
+		TZC_REGION_ACCESS_RDWR(FVP_R_NSAID_VIRTIO_OLD))
+
+/*
+ * GIC related constants to cater for both GICv2 and GICv3 instances of an
+ * FVP_R. They could be overridden at runtime in case the FVP_R implements the
+ * legacy VE memory map.
+ */
+#define PLAT_ARM_GICD_BASE		BASE_GICD_BASE
+#define PLAT_ARM_GICR_BASE		BASE_GICR_BASE
+#define PLAT_ARM_GICC_BASE		BASE_GICC_BASE
+
+#define PLAT_ARM_SP_IMAGE_STACK_BASE	(PLAT_SP_IMAGE_NS_BUF_BASE +	\
+					 PLAT_SP_IMAGE_NS_BUF_SIZE)
+
+#define PLAT_SP_PRI			PLAT_RAS_PRI
+
+/*
+ * Physical and virtual address space limits for MPU in AARCH64 & AARCH32 modes
+ */
+#define PLAT_PHY_ADDR_SPACE_SIZE	(1ULL << 36)
+#define PLAT_VIRT_ADDR_SPACE_SIZE	(1ULL << 36)
+
+#define ARM_SOC_CONTINUATION_SHIFT	U(24)
+#define ARM_SOC_IDENTIFICATION_SHIFT	U(16)
+
+#endif /* FVP_R_PLATFORM_DEF_H */
diff --git a/plat/arm/board/fvp_r/platform.mk b/plat/arm/board/fvp_r/platform.mk
new file mode 100644
index 0000000..8f5878f
--- /dev/null
+++ b/plat/arm/board/fvp_r/platform.mk
@@ -0,0 +1,146 @@
+#
+# Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Only aarch64 ARCH supported for FVP_R
+ARCH	:= aarch64
+
+# Override to exclude BL2, BL2U, BL31, and BL33 for FVP_R
+override NEED_BL2	:= no
+override NEED_BL2U	:= no
+override NEED_BL31	:= no
+NEED_BL32		:= no
+
+override CTX_INCLUDE_AARCH32_REGS	:=	0
+
+# Default cluster count for FVP_R
+FVP_R_CLUSTER_COUNT	:= 2
+
+# Default number of CPUs per cluster on FVP_R
+FVP_R_MAX_CPUS_PER_CLUSTER	:= 4
+
+# Default number of threads per CPU on FVP_R
+FVP_R_MAX_PE_PER_CPU	:= 1
+
+# Use MPU-based memory management:
+XLAT_MPU_LIB_V1		:=	1
+
+# Pass FVP_R_CLUSTER_COUNT to the build system.
+$(eval $(call add_define,FVP_R_CLUSTER_COUNT))
+
+# Pass FVP_R_MAX_CPUS_PER_CLUSTER to the build system.
+$(eval $(call add_define,FVP_R_MAX_CPUS_PER_CLUSTER))
+
+# Pass FVP_R_MAX_PE_PER_CPU to the build system.
+$(eval $(call add_define,FVP_R_MAX_PE_PER_CPU))
+
+# Sanity check the cluster count and if FVP_R_CLUSTER_COUNT <= 2,
+# choose the CCI driver , else the CCN driver
+ifeq ($(FVP_R_CLUSTER_COUNT), 0)
+$(error "Incorrect cluster count specified for FVP_R port")
+else ifeq ($(FVP_R_CLUSTER_COUNT),$(filter $(FVP_R_CLUSTER_COUNT),1 2))
+FVP_R_INTERCONNECT_DRIVER := FVP_R_CCI
+else
+FVP_R_INTERCONNECT_DRIVER := FVP_R_CCN
+endif
+
+$(eval $(call add_define,FVP_R_INTERCONNECT_DRIVER))
+
+ifeq (${FVP_R_INTERCONNECT_DRIVER}, FVP_R_CCI)
+FVP_R_INTERCONNECT_SOURCES	:= 	drivers/arm/cci/cci.c
+else ifeq (${FVP_R_INTERCONNECT_DRIVER}, FVP_R_CCN)
+FVP_R_INTERCONNECT_SOURCES	:= 	drivers/arm/ccn/ccn.c		\
+					plat/arm/common/arm_ccn.c
+else
+$(error "Incorrect CCN driver chosen on FVP_R port")
+endif
+
+include plat/arm/board/common/board_common.mk
+include plat/arm/common/arm_common.mk
+
+PLAT_INCLUDES		:=	-Iplat/arm/board/fvp_r/include
+
+FVP_R_BL_COMMON_SOURCES	:=	plat/arm/board/fvp_r/fvp_r_common.c		\
+				plat/arm/board/fvp_r/fvp_r_context_mgmt.c	\
+				plat/arm/board/fvp_r/fvp_r_context.S		\
+				plat/arm/board/fvp_r/fvp_r_debug.S		\
+				plat/arm/board/fvp_r/fvp_r_err.c		\
+				plat/arm/board/fvp_r/fvp_r_helpers.S		\
+				plat/arm/board/fvp_r/fvp_r_misc_helpers.S	\
+				plat/arm/board/fvp_r/fvp_r_pauth_helpers.S
+
+FVP_R_BL1_SOURCES	:=	plat/arm/board/fvp_r/fvp_r_bl1_arch_setup.c	\
+				plat/arm/board/fvp_r/fvp_r_bl1_setup.c		\
+				plat/arm/board/fvp_r/fvp_r_io_storage.c		\
+				plat/arm/board/fvp_r/fvp_r_bl1_context_mgmt.c	\
+				plat/arm/board/fvp_r/fvp_r_bl1_entrypoint.S	\
+				plat/arm/board/fvp_r/fvp_r_bl1_exceptions.S	\
+				plat/arm/board/fvp_r/fvp_r_bl1_main.c
+
+FVP_R_CPU_LIBS		:=	lib/cpus/${ARCH}/aem_generic.S
+
+FVP_R_DYNC_CFG_SOURCES	:=	common/fdt_wrappers.c				\
+				common/uuid.c					\
+				plat/arm/common/arm_dyn_cfg.c			\
+				plat/arm/common/arm_dyn_cfg_helpers.c
+
+ifeq (${TRUSTED_BOARD_BOOT},1)
+FVP_R_AUTH_SOURCES	:=	drivers/auth/auth_mod.c				\
+				drivers/auth/crypto_mod.c			\
+				drivers/auth/img_parser_mod.c			\
+				lib/fconf/fconf_tbbr_getter.c			\
+				bl1/tbbr/tbbr_img_desc.c			\
+				plat/arm/common/arm_bl1_fwu.c			\
+				plat/common/tbbr/plat_tbbr.c			\
+				drivers/auth/tbbr/tbbr_cot_bl1_r64.c		\
+				drivers/auth/tbbr/tbbr_cot_common.c		\
+				plat/arm/board/common/board_arm_trusted_boot.c	\
+				plat/arm/board/common/rotpk/arm_dev_rotpk.S	\
+				plat/arm/board/fvp_r/fvp_r_trusted_boot.c
+
+FVP_R_BL1_SOURCES	+=	${MBEDTLS_SOURCES}				\
+				${FVP_R_AUTH_SOURCES}
+endif
+
+ifeq (${USE_SP804_TIMER},1)
+FVP_R_BL_COMMON_SOURCES		+=	drivers/arm/sp804/sp804_delay_timer.c
+else
+FVP_R_BL_COMMON_SOURCES		+=	drivers/delay_timer/generic_delay_timer.c
+endif
+
+# Enable Activity Monitor Unit extensions by default
+ENABLE_AMU			:=	1
+
+ifneq (${ENABLE_STACK_PROTECTOR},0)
+FVP_R_BL_COMMON_SOURCES	+=	plat/arm/board/fvp_r/fvp_r_stack_protector.c
+endif
+
+override BL1_SOURCES	:=	drivers/arm/sp805/sp805.c			\
+				drivers/cfi/v2m/v2m_flash.c			\
+				drivers/delay_timer/delay_timer.c		\
+				drivers/io/io_fip.c				\
+				drivers/io/io_memmap.c				\
+				drivers/io/io_storage.c				\
+				drivers/io/io_semihosting.c			\
+				lib/cpus/aarch64/cpu_helpers.S			\
+				lib/cpus/errata_report.c			\
+				lib/cpus/aarch64/dsu_helpers.S			\
+				lib/el3_runtime/aarch64/context.S		\
+				lib/el3_runtime/aarch64/context_mgmt.c		\
+				lib/fconf/fconf.c				\
+				lib/fconf/fconf_dyn_cfg_getter.c		\
+				lib/semihosting/semihosting.c			\
+				lib/semihosting/${ARCH}/semihosting_call.S	\
+				plat/arm/common/arm_bl1_setup.c			\
+				plat/arm/common/arm_err.c			\
+				plat/arm/common/arm_io_storage.c		\
+				plat/arm/common/fconf/arm_fconf_io.c		\
+				plat/common/plat_bl1_common.c			\
+				plat/common/aarch64/platform_up_stack.S		\
+				${FVP_R_BL1_SOURCES}				\
+				${FVP_R_BL_COMMON_SOURCES}			\
+				${FVP_R_CPU_LIBS}				\
+				${FVP_R_DYNC_CFG_SOURCES}			\
+				${FVP_R_INTERCONNECT_SOURCES}
diff --git a/plat/arm/board/juno/include/platform_def.h b/plat/arm/board/juno/include/platform_def.h
index 5299a7b..d61ba5d 100644
--- a/plat/arm/board/juno/include/platform_def.h
+++ b/plat/arm/board/juno/include/platform_def.h
@@ -53,12 +53,13 @@
 #define PLAT_ARM_DRAM2_BASE		ULL(0x880000000)
 #define PLAT_ARM_DRAM2_SIZE		ULL(0x180000000)
 
-#define PLAT_HW_CONFIG_DTB_BASE		ULL(0x82000000)
-#define PLAT_HW_CONFIG_DTB_SIZE		ULL(0x00008000) /* 32KB */
+/* Range of kernel DTB load address */
+#define JUNO_DTB_DRAM_MAP_START		ULL(0x82000000)
+#define JUNO_DTB_DRAM_MAP_SIZE		ULL(0x00008000) /* 32KB */
 
 #define ARM_DTB_DRAM_NS			MAP_REGION_FLAT(		\
-					PLAT_HW_CONFIG_DTB_BASE,	\
-					PLAT_HW_CONFIG_DTB_SIZE,	\
+					JUNO_DTB_DRAM_MAP_START,	\
+					JUNO_DTB_DRAM_MAP_SIZE,		\
 					MT_MEMORY | MT_RO | MT_NS)
 
 /* virtual address used by dynamic mem_protect for chunk_base */
diff --git a/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c b/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c
index 6a8943d..0666e57 100644
--- a/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c
+++ b/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -75,8 +75,10 @@
 	    .image_info.image_base = BL31_BASE,
 	    .image_info.image_max_size = BL31_LIMIT - BL31_BASE,
 
-# ifdef BL32_BASE
+# if defined(BL32_BASE)
 	    .next_handoff_image_id = BL32_IMAGE_ID,
+# elif ENABLE_RME
+	    .next_handoff_image_id = RMM_IMAGE_ID,
 # else
 	    .next_handoff_image_id = BL33_IMAGE_ID,
 # endif
@@ -99,6 +101,22 @@
 		    VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
 	    .next_handoff_image_id = INVALID_IMAGE_ID,
     },
+
+# if ENABLE_RME
+	/* Fill RMM related information */
+    {
+	    .image_id = RMM_IMAGE_ID,
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t, EP_REALM | EXECUTABLE),
+	    .ep_info.pc = RMM_BASE,
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, 0),
+	    .image_info.image_base = RMM_BASE,
+	    .image_info.image_max_size = RMM_LIMIT - RMM_BASE,
+	    .next_handoff_image_id = BL33_IMAGE_ID,
+    },
+# endif
+
 # ifdef BL32_BASE
 	/* Fill BL32 related information */
     {
@@ -113,7 +131,11 @@
 	    .image_info.image_base = BL32_BASE,
 	    .image_info.image_max_size = BL32_LIMIT - BL32_BASE,
 
+# if ENABLE_RME
+	    .next_handoff_image_id = RMM_IMAGE_ID,
+# else
 	    .next_handoff_image_id = BL33_IMAGE_ID,
+# endif
     },
 
 	/*
diff --git a/plat/arm/common/arm_bl1_fwu.c b/plat/arm/common/arm_bl1_fwu.c
index 124c1af..ce2c356 100644
--- a/plat/arm/common/arm_bl1_fwu.c
+++ b/plat/arm/common/arm_bl1_fwu.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -16,6 +16,8 @@
 #include <plat/arm/common/plat_arm.h>
 #include <plat/common/platform.h>
 
+#pragma weak bl1_plat_get_image_desc
+
 /* Struct to keep track of usable memory */
 typedef struct bl1_mem_info {
 	uintptr_t mem_base;
diff --git a/plat/arm/common/arm_bl1_setup.c b/plat/arm/common/arm_bl1_setup.c
index 4b2a062..320bb82 100644
--- a/plat/arm/common/arm_bl1_setup.c
+++ b/plat/arm/common/arm_bl1_setup.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -22,14 +22,17 @@
 #pragma weak bl1_early_platform_setup
 #pragma weak bl1_plat_arch_setup
 #pragma weak bl1_plat_sec_mem_layout
+#pragma weak arm_bl1_early_platform_setup
 #pragma weak bl1_plat_prepare_exit
 #pragma weak bl1_plat_get_next_image_id
 #pragma weak plat_arm_bl1_fwu_needed
+#pragma weak arm_bl1_plat_arch_setup
+#pragma weak arm_bl1_platform_setup
 
 #define MAP_BL1_TOTAL		MAP_REGION_FLAT(			\
 					bl1_tzram_layout.total_base,	\
 					bl1_tzram_layout.total_size,	\
-					MT_MEMORY | MT_RW | MT_SECURE)
+					MT_MEMORY | MT_RW | EL3_PAS)
 /*
  * If SEPARATE_CODE_AND_RODATA=1 we define a region for each section
  * otherwise one region is defined containing both
@@ -38,17 +41,17 @@
 #define MAP_BL1_RO		MAP_REGION_FLAT(			\
 					BL_CODE_BASE,			\
 					BL1_CODE_END - BL_CODE_BASE,	\
-					MT_CODE | MT_SECURE),		\
+					MT_CODE | EL3_PAS),		\
 				MAP_REGION_FLAT(			\
 					BL1_RO_DATA_BASE,		\
 					BL1_RO_DATA_END			\
 						- BL_RO_DATA_BASE,	\
-					MT_RO_DATA | MT_SECURE)
+					MT_RO_DATA | EL3_PAS)
 #else
 #define MAP_BL1_RO		MAP_REGION_FLAT(			\
 					BL_CODE_BASE,			\
 					BL1_CODE_END - BL_CODE_BASE,	\
-					MT_CODE | MT_SECURE)
+					MT_CODE | EL3_PAS)
 #endif
 
 /* Data structure which holds the extents of the trusted SRAM for BL1*/
diff --git a/plat/arm/common/arm_bl2_setup.c b/plat/arm/common/arm_bl2_setup.c
index 26af383..2871b1b 100644
--- a/plat/arm/common/arm_bl2_setup.c
+++ b/plat/arm/common/arm_bl2_setup.c
@@ -9,6 +9,7 @@
 
 #include <platform_def.h>
 
+#include <arch_features.h>
 #include <arch_helpers.h>
 #include <common/bl_common.h>
 #include <common/debug.h>
@@ -17,10 +18,16 @@
 #include <drivers/partition/partition.h>
 #include <lib/fconf/fconf.h>
 #include <lib/fconf/fconf_dyn_cfg_getter.h>
+#if ENABLE_RME
+#include <lib/gpt_rme/gpt_rme.h>
+#endif /* ENABLE_RME */
 #ifdef SPD_opteed
 #include <lib/optee_utils.h>
 #endif
 #include <lib/utils.h>
+#if ENABLE_RME
+#include <plat/arm/common/arm_pas_def.h>
+#endif /* ENABLE_RME */
 #include <plat/arm/common/plat_arm.h>
 #include <plat/common/platform.h>
 
@@ -45,11 +52,17 @@
 #pragma weak bl2_plat_get_hash
 #endif
 
+#if ENABLE_RME
 #define MAP_BL2_TOTAL		MAP_REGION_FLAT(			\
 					bl2_tzram_layout.total_base,	\
 					bl2_tzram_layout.total_size,	\
+					MT_MEMORY | MT_RW | MT_ROOT)
+#else
+#define MAP_BL2_TOTAL		MAP_REGION_FLAT(			\
+					bl2_tzram_layout.total_base,	\
+					bl2_tzram_layout.total_size,	\
 					MT_MEMORY | MT_RW | MT_SECURE)
-
+#endif /* ENABLE_RME */
 
 #pragma weak arm_bl2_plat_handle_post_image_load
 
@@ -105,8 +118,10 @@
  */
 void arm_bl2_platform_setup(void)
 {
+#if !ENABLE_RME
 	/* Initialize the secure environment */
 	plat_arm_security_setup();
+#endif
 
 #if defined(PLAT_ARM_MEM_PROT_ADDR)
 	arm_nor_psci_do_static_mem_protect();
@@ -118,9 +133,54 @@
 	arm_bl2_platform_setup();
 }
 
+#if ENABLE_RME
+
+static void arm_bl2_plat_gpt_setup(void)
+{
+	/*
+	 * The GPT library might modify the gpt regions structure to optimize
+	 * the layout, so the array cannot be constant.
+	 */
+	pas_region_t pas_regions[] = {
+		ARM_PAS_KERNEL,
+		ARM_PAS_SECURE,
+		ARM_PAS_REALM,
+		ARM_PAS_EL3_DRAM,
+		ARM_PAS_GPTS
+	};
+
+	/* Initialize entire protected space to GPT_GPI_ANY. */
+	if (gpt_init_l0_tables(GPCCR_PPS_4GB, ARM_L0_GPT_ADDR_BASE,
+		ARM_L0_GPT_SIZE) < 0) {
+		ERROR("gpt_init_l0_tables() failed!\n");
+		panic();
+	}
+
+	/* Carve out defined PAS ranges. */
+	if (gpt_init_pas_l1_tables(GPCCR_PGS_4K,
+				   ARM_L1_GPT_ADDR_BASE,
+				   ARM_L1_GPT_SIZE,
+				   pas_regions,
+				   (unsigned int)(sizeof(pas_regions) /
+				   sizeof(pas_region_t))) < 0) {
+		ERROR("gpt_init_pas_l1_tables() failed!\n");
+		panic();
+	}
+
+	INFO("Enabling Granule Protection Checks\n");
+	if (gpt_enable() < 0) {
+		ERROR("gpt_enable() failed!\n");
+		panic();
+	}
+}
+
+#endif /* ENABLE_RME */
+
 /*******************************************************************************
- * Perform the very early platform specific architectural setup here. At the
- * moment this is only initializes the mmu in a quick and dirty way.
+ * Perform the very early platform specific architectural setup here.
+ * When RME is enabled the secure environment is initialised before
+ * initialising and enabling Granule Protection.
+ * This function initialises the MMU in a quick and dirty way.
  ******************************************************************************/
 void arm_bl2_plat_arch_setup(void)
 {
@@ -143,13 +203,29 @@
 		ARM_MAP_BL_COHERENT_RAM,
 #endif
 		ARM_MAP_BL_CONFIG_REGION,
+#if ENABLE_RME
+		ARM_MAP_L0_GPT_REGION,
+#endif
 		{0}
 	};
 
+#if ENABLE_RME
+	/* Initialise the secure environment */
+	plat_arm_security_setup();
+#endif
 	setup_page_tables(bl_regions, plat_arm_get_mmap());
 
 #ifdef __aarch64__
+#if ENABLE_RME
+	/* BL2 runs in EL3 when RME enabled. */
+	assert(get_armv9_2_feat_rme_support() != 0U);
+	enable_mmu_el3(0);
+
+	/* Initialise and enable granule protection after MMU. */
+	arm_bl2_plat_gpt_setup();
+#else
 	enable_mmu_el1(0);
+#endif
 #else
 	enable_mmu_svc_mon(0);
 #endif
@@ -233,7 +309,7 @@
  ******************************************************************************/
 int arm_bl2_plat_handle_post_image_load(unsigned int image_id)
 {
-#if defined(SPD_spmd) && SPMD_SPM_AT_SEL2
+#if defined(SPD_spmd) && BL2_ENABLE_SP_LOAD
 	/* For Secure Partitions we don't need post processing */
 	if ((image_id >= (MAX_NUMBER_IDS - MAX_SP_IDS)) &&
 		(image_id < MAX_NUMBER_IDS)) {
diff --git a/plat/arm/common/arm_bl31_setup.c b/plat/arm/common/arm_bl31_setup.c
index b819888..6472590 100644
--- a/plat/arm/common/arm_bl31_setup.c
+++ b/plat/arm/common/arm_bl31_setup.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -13,6 +13,9 @@
 #include <drivers/console.h>
 #include <lib/debugfs.h>
 #include <lib/extensions/ras.h>
+#if ENABLE_RME
+#include <lib/gpt_rme/gpt_rme.h>
+#endif
 #include <lib/mmio.h>
 #include <lib/xlat_tables/xlat_tables_compat.h>
 #include <plat/arm/common/plat_arm.h>
@@ -25,6 +28,9 @@
  */
 static entry_point_info_t bl32_image_ep_info;
 static entry_point_info_t bl33_image_ep_info;
+#if ENABLE_RME
+static entry_point_info_t rmm_image_ep_info;
+#endif
 
 #if !RESET_TO_BL31
 /*
@@ -43,7 +49,7 @@
 #define MAP_BL31_TOTAL		MAP_REGION_FLAT(			\
 					BL31_START,			\
 					BL31_END - BL31_START,		\
-					MT_MEMORY | MT_RW | MT_SECURE)
+					MT_MEMORY | MT_RW | EL3_PAS)
 #if RECLAIM_INIT_CODE
 IMPORT_SYM(unsigned long, __INIT_CODE_START__, BL_INIT_CODE_BASE);
 IMPORT_SYM(unsigned long, __INIT_CODE_END__, BL_CODE_END_UNALIGNED);
@@ -58,7 +64,7 @@
 					BL_INIT_CODE_BASE,		\
 					BL_INIT_CODE_END		\
 						- BL_INIT_CODE_BASE,	\
-					MT_CODE | MT_SECURE)
+					MT_CODE | EL3_PAS)
 #endif
 
 #if SEPARATE_NOBITS_REGION
@@ -66,7 +72,7 @@
 					BL31_NOBITS_BASE,		\
 					BL31_NOBITS_LIMIT 		\
 						- BL31_NOBITS_BASE,	\
-					MT_MEMORY | MT_RW | MT_SECURE)
+					MT_MEMORY | MT_RW | EL3_PAS)
 
 #endif
 /*******************************************************************************
@@ -80,8 +86,18 @@
 	entry_point_info_t *next_image_info;
 
 	assert(sec_state_is_valid(type));
-	next_image_info = (type == NON_SECURE)
-			? &bl33_image_ep_info : &bl32_image_ep_info;
+	if (type == NON_SECURE) {
+		next_image_info = &bl33_image_ep_info;
+	}
+#if ENABLE_RME
+	else if (type == REALM) {
+		next_image_info = &rmm_image_ep_info;
+	}
+#endif
+	else {
+		next_image_info = &bl32_image_ep_info;
+	}
+
 	/*
 	 * None of the images on the ARM development platforms can have 0x0
 	 * as the entrypoint
@@ -169,21 +185,31 @@
 	bl_params_node_t *bl_params = params_from_bl2->head;
 
 	/*
-	 * Copy BL33 and BL32 (if present), entry point information.
+	 * Copy BL33, BL32 and RMM (if present), entry point information.
 	 * They are stored in Secure RAM, in BL2's address space.
 	 */
 	while (bl_params != NULL) {
-		if (bl_params->image_id == BL32_IMAGE_ID)
+		if (bl_params->image_id == BL32_IMAGE_ID) {
 			bl32_image_ep_info = *bl_params->ep_info;
-
-		if (bl_params->image_id == BL33_IMAGE_ID)
+		}
+#if ENABLE_RME
+		else if (bl_params->image_id == RMM_IMAGE_ID) {
+			rmm_image_ep_info = *bl_params->ep_info;
+		}
+#endif
+		else if (bl_params->image_id == BL33_IMAGE_ID) {
 			bl33_image_ep_info = *bl_params->ep_info;
+		}
 
 		bl_params = bl_params->next_params_info;
 	}
 
 	if (bl33_image_ep_info.pc == 0U)
 		panic();
+#if ENABLE_RME
+	if (rmm_image_ep_info.pc == 0U)
+		panic();
+#endif
 #endif /* RESET_TO_BL31 */
 
 # if ARM_LINUX_KERNEL_AS_BL33
@@ -193,7 +219,11 @@
 	 * tree blob (DTB) in x0, while x1-x3 are reserved for future use and
 	 * must be 0.
 	 */
+#if RESET_TO_BL31
 	bl33_image_ep_info.args.arg0 = (u_register_t)ARM_PRELOADED_DTB_BASE;
+#else
+	bl33_image_ep_info.args.arg0 = (u_register_t)hw_config;
+#endif
 	bl33_image_ep_info.args.arg1 = 0U;
 	bl33_image_ep_info.args.arg2 = 0U;
 	bl33_image_ep_info.args.arg3 = 0U;
@@ -355,6 +385,9 @@
 {
 	const mmap_region_t bl_regions[] = {
 		MAP_BL31_TOTAL,
+#if ENABLE_RME
+		ARM_MAP_L0_GPT_REGION,
+#endif
 #if RECLAIM_INIT_CODE
 		MAP_BL_INIT_CODE,
 #endif
@@ -376,6 +409,19 @@
 
 	enable_mmu_el3(0);
 
+#if ENABLE_RME
+	/*
+	 * Initialise Granule Protection library and enable GPC for the primary
+	 * processor. The tables have already been initialized by a previous BL
+	 * stage, so there is no need to provide any PAS here. This function
+	 * sets up pointers to those tables.
+	 */
+	if (gpt_runtime_init() < 0) {
+		ERROR("gpt_runtime_init() failed!\n");
+		panic();
+	}
+#endif /* ENABLE_RME */
+
 	arm_setup_romlib();
 }
 
diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk
index 4d5e8b4..f82392c 100644
--- a/plat/arm/common/arm_common.mk
+++ b/plat/arm/common/arm_common.mk
@@ -52,9 +52,10 @@
 $(eval $(call add_define,ARM_RECOM_STATE_ID_ENC))
 
 # Process ARM_DISABLE_TRUSTED_WDOG flag
-# By default, Trusted Watchdog is always enabled unless SPIN_ON_BL1_EXIT is set
+# By default, Trusted Watchdog is always enabled unless
+# SPIN_ON_BL1_EXIT or ENABLE_RME is set
 ARM_DISABLE_TRUSTED_WDOG	:=	0
-ifeq (${SPIN_ON_BL1_EXIT}, 1)
+ifneq ($(filter 1,${SPIN_ON_BL1_EXIT} ${ENABLE_RME}),)
 ARM_DISABLE_TRUSTED_WDOG	:=	1
 endif
 $(eval $(call assert_boolean,ARM_DISABLE_TRUSTED_WDOG))
@@ -94,10 +95,13 @@
   ifndef PRELOADED_BL33_BASE
     $(error "PRELOADED_BL33_BASE must be set if ARM_LINUX_KERNEL_AS_BL33 is used.")
   endif
-  ifndef ARM_PRELOADED_DTB_BASE
-    $(error "ARM_PRELOADED_DTB_BASE must be set if ARM_LINUX_KERNEL_AS_BL33 is used.")
+  ifeq (${RESET_TO_BL31},1)
+    ifndef ARM_PRELOADED_DTB_BASE
+      $(error "ARM_PRELOADED_DTB_BASE must be set if ARM_LINUX_KERNEL_AS_BL33 is
+       used with RESET_TO_BL31.")
+    endif
+    $(eval $(call add_define,ARM_PRELOADED_DTB_BASE))
   endif
-  $(eval $(call add_define,ARM_PRELOADED_DTB_BASE))
 endif
 
 # Arm Ethos-N NPU SiP service
@@ -206,18 +210,22 @@
 				plat/arm/common/arm_console.c
 
 ifeq (${ARM_XLAT_TABLES_LIB_V1}, 1)
-PLAT_BL_COMMON_SOURCES	+=	lib/xlat_tables/xlat_tables_common.c		\
+PLAT_BL_COMMON_SOURCES 	+=	lib/xlat_tables/xlat_tables_common.c	      \
 				lib/xlat_tables/${ARCH}/xlat_tables.c
 else
+ifeq (${XLAT_MPU_LIB_V1}, 1)
+include lib/xlat_mpu/xlat_mpu.mk
+PLAT_BL_COMMON_SOURCES	+=	${XLAT_MPU_LIB_V1_SRCS}
+else
 include lib/xlat_tables_v2/xlat_tables.mk
-
-PLAT_BL_COMMON_SOURCES	+=	${XLAT_TABLES_LIB_SRCS}
+PLAT_BL_COMMON_SOURCES	+=      ${XLAT_TABLES_LIB_SRCS}
+endif
 endif
 
 ARM_IO_SOURCES		+=	plat/arm/common/arm_io_storage.c		\
 				plat/arm/common/fconf/arm_fconf_io.c
 ifeq (${SPD},spmd)
-    ifeq (${SPMD_SPM_AT_SEL2},1)
+    ifeq (${BL2_ENABLE_SP_LOAD},1)
          ARM_IO_SOURCES		+=	plat/arm/common/fconf/arm_fconf_sp.c
     endif
 endif
@@ -351,7 +359,7 @@
 
     # Include the selected chain of trust sources.
     ifeq (${COT},tbbr)
-	BL1_SOURCES     +=      drivers/auth/tbbr/tbbr_cot_common.c		\
+            BL1_SOURCES	+=	drivers/auth/tbbr/tbbr_cot_common.c		\
 				drivers/auth/tbbr/tbbr_cot_bl1.c
         ifneq (${COT_DESC_IN_DTB},0)
             BL2_SOURCES	+=	lib/fconf/fconf_cot_getter.c
diff --git a/plat/arm/common/arm_image_load.c b/plat/arm/common/arm_image_load.c
index ebf6dff..c411c6c 100644
--- a/plat/arm/common/arm_image_load.c
+++ b/plat/arm/common/arm_image_load.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -32,7 +32,7 @@
 		next_bl_params_cpy_ptr);
 }
 
-#if defined(SPD_spmd) && SPMD_SPM_AT_SEL2
+#if defined(SPD_spmd) && BL2_ENABLE_SP_LOAD
 /*******************************************************************************
  * This function appends Secure Partitions to list of loadable images.
  ******************************************************************************/
@@ -76,7 +76,7 @@
  ******************************************************************************/
 struct bl_load_info *plat_get_bl_image_load_info(void)
 {
-#if defined(SPD_spmd) && SPMD_SPM_AT_SEL2
+#if defined(SPD_spmd) && BL2_ENABLE_SP_LOAD
 	bl_load_info_t *bl_load_info;
 
 	bl_load_info = get_bl_load_info_from_mem_params_desc();
diff --git a/plat/arm/common/fconf/arm_fconf_io.c b/plat/arm/common/fconf/arm_fconf_io.c
index 86fd6d5..aea2f38 100644
--- a/plat/arm/common/fconf/arm_fconf_io.c
+++ b/plat/arm/common/fconf/arm_fconf_io.c
@@ -67,6 +67,7 @@
 	[SOC_FW_CONFIG_ID] = {UUID_SOC_FW_CONFIG},
 	[TOS_FW_CONFIG_ID] = {UUID_TOS_FW_CONFIG},
 	[NT_FW_CONFIG_ID] = {UUID_NT_FW_CONFIG},
+	[RMM_IMAGE_ID] = {UUID_REALM_MONITOR_MGMT_FIRMWARE},
 #endif /* ARM_IO_IN_DTB */
 #if TRUSTED_BOARD_BOOT
 	[TRUSTED_BOOT_FW_CERT_ID] = {UUID_TRUSTED_BOOT_FW_CERT},
@@ -162,6 +163,11 @@
 		(uintptr_t)&arm_uuid_spec[BL33_IMAGE_ID],
 		open_fip
 	},
+	[RMM_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&arm_uuid_spec[RMM_IMAGE_ID],
+		open_fip
+	},
 	[HW_CONFIG_ID] = {
 		&fip_dev_handle,
 		(uintptr_t)&arm_uuid_spec[HW_CONFIG_ID],
diff --git a/plat/arm/common/fconf/fconf_ethosn_getter.c b/plat/arm/common/fconf/fconf_ethosn_getter.c
index 1ba9f3a..0af1a20 100644
--- a/plat/arm/common/fconf/fconf_ethosn_getter.c
+++ b/plat/arm/common/fconf/fconf_ethosn_getter.c
@@ -12,7 +12,7 @@
 #include <libfdt.h>
 #include <plat/arm/common/fconf_ethosn_getter.h>
 
-struct ethosn_config_t ethosn_config;
+struct ethosn_config_t ethosn_config = {.num_cores = 0};
 
 static uint8_t fdt_node_get_status(const void *fdt, int node)
 {
@@ -33,74 +33,86 @@
 int fconf_populate_ethosn_config(uintptr_t config)
 {
 	int ethosn_node;
-	int sub_node;
-	uint8_t ethosn_status;
-	uint32_t core_count = 0U;
-	uint32_t core_addr_idx = 0U;
 	const void *hw_conf_dtb = (const void *)config;
 
 	/* Find offset to node with 'ethosn' compatible property */
-	ethosn_node = fdt_node_offset_by_compatible(hw_conf_dtb, -1, "ethosn");
-	if (ethosn_node < 0) {
-		ERROR("FCONF: Can't find 'ethosn' compatible node in dtb\n");
-		return ethosn_node;
-	}
-
-	/* If the Arm Ethos-N NPU is disabled the core check can be skipped */
-	ethosn_status = fdt_node_get_status(hw_conf_dtb, ethosn_node);
-	if (ethosn_status == ETHOSN_STATUS_DISABLED) {
-		return 0;
-	}
+	INFO("Probing Arm Ethos-N NPU\n");
+	uint32_t total_core_count = 0U;
 
-	fdt_for_each_subnode(sub_node, hw_conf_dtb, ethosn_node) {
-		int err;
-		uintptr_t addr;
-		uint8_t status;
+	fdt_for_each_compatible_node(hw_conf_dtb, ethosn_node, "ethosn") {
+		int sub_node;
+		uint8_t ethosn_status;
+		uint32_t device_core_count = 0U;
 
-		/* Check that the sub node is "ethosn-core" compatible */
-		if (fdt_node_check_compatible(hw_conf_dtb, sub_node,
-					      "ethosn-core") != 0) {
-			/* Ignore incompatible sub node */
+		/* If the Arm Ethos-N NPU is disabled the core check can be skipped */
+		ethosn_status = fdt_node_get_status(hw_conf_dtb, ethosn_node);
+		if (ethosn_status == ETHOSN_STATUS_DISABLED) {
 			continue;
 		}
 
-		/* Including disabled cores */
-		if (core_addr_idx >= ETHOSN_CORE_NUM_MAX) {
-			ERROR("FCONF: Reached max number of Arm Ethos-N NPU cores\n");
-			return -1;
-		}
+		fdt_for_each_subnode(sub_node, hw_conf_dtb, ethosn_node) {
+			int err;
+			uintptr_t core_addr;
+			uint8_t core_status;
 
-		status = fdt_node_get_status(hw_conf_dtb, ethosn_node);
-		if (status == ETHOSN_STATUS_DISABLED) {
-			++core_addr_idx;
-			continue;
+			if (total_core_count >= ETHOSN_CORE_NUM_MAX) {
+				ERROR("FCONF: Reached max number of Arm Ethos-N NPU cores\n");
+				return -FDT_ERR_BADSTRUCTURE;
+			}
+
+			/* Check that the sub node is "ethosn-core" compatible */
+			if (fdt_node_check_compatible(hw_conf_dtb,
+						      sub_node,
+						      "ethosn-core") != 0) {
+				/* Ignore incompatible sub node */
+				continue;
+			}
+
+			core_status = fdt_node_get_status(hw_conf_dtb, sub_node);
+			if (core_status == ETHOSN_STATUS_DISABLED) {
+				continue;
+			}
+
+			err = fdt_get_reg_props_by_index(hw_conf_dtb,
+							 ethosn_node,
+							 device_core_count,
+							 &core_addr,
+							 NULL);
+			if (err < 0) {
+				ERROR(
+				"FCONF: Failed to read reg property for Arm Ethos-N NPU core %u\n",
+						device_core_count);
+				return err;
+			}
+
+			INFO("NPU core probed at address 0x%lx\n", core_addr);
+			ethosn_config.core[total_core_count].addr = core_addr;
+			total_core_count++;
+			device_core_count++;
 		}
 
-		err = fdt_get_reg_props_by_index(hw_conf_dtb, ethosn_node,
-						 core_addr_idx, &addr, NULL);
-		if (err < 0) {
-			ERROR("FCONF: Failed to read reg property for Arm Ethos-N NPU core %u\n",
-			      core_addr_idx);
-			return err;
+		if ((sub_node < 0) && (sub_node != -FDT_ERR_NOTFOUND)) {
+			ERROR("FCONF: Failed to parse sub nodes\n");
+			return -FDT_ERR_BADSTRUCTURE;
 		}
 
-		ethosn_config.core_addr[core_count++] = addr;
-		++core_addr_idx;
+		if (device_core_count == 0U) {
+			ERROR(
+			"FCONF: Enabled Arm Ethos-N NPU device must have at least one enabled core\n");
+			return -FDT_ERR_BADSTRUCTURE;
+		}
 	}
 
-	if ((sub_node < 0) && (sub_node != -FDT_ERR_NOTFOUND)) {
-		ERROR("FCONF: Failed to parse sub nodes\n");
-		return sub_node;
+	if (total_core_count == 0U) {
+		ERROR("FCONF: Can't find 'ethosn' compatible node in dtb\n");
+		return -FDT_ERR_BADSTRUCTURE;
 	}
 
-	/* The Arm Ethos-N NPU can't be used if no cores were found */
-	if (core_count == 0) {
-		ERROR("FCONF: No Arm Ethos-N NPU cores found\n");
-		return -1;
-	}
+	ethosn_config.num_cores = total_core_count;
 
-	ethosn_config.num_cores = core_count;
-	ethosn_config.status = ethosn_status;
+	INFO("%d NPU core%s probed\n",
+	     ethosn_config.num_cores,
+	     ethosn_config.num_cores > 1 ? "s" : "");
 
 	return 0;
 }
diff --git a/plat/arm/common/trp/arm_trp.mk b/plat/arm/common/trp/arm_trp.mk
new file mode 100644
index 0000000..997111f
--- /dev/null
+++ b/plat/arm/common/trp/arm_trp.mk
@@ -0,0 +1,10 @@
+#
+# Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# TRP source files common to ARM standard platforms
+RMM_SOURCES		+=	plat/arm/common/trp/arm_trp_setup.c	\
+				plat/arm/common/arm_topology.c		\
+				plat/common/aarch64/platform_mp_stack.S
diff --git a/plat/arm/common/trp/arm_trp_setup.c b/plat/arm/common/trp/arm_trp_setup.c
new file mode 100644
index 0000000..8e48293
--- /dev/null
+++ b/plat/arm/common/trp/arm_trp_setup.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <drivers/arm/pl011.h>
+#include <drivers/console.h>
+#include <plat/arm/common/plat_arm.h>
+#include <platform_def.h>
+
+/*******************************************************************************
+ * Initialize the UART
+ ******************************************************************************/
+static console_t arm_trp_runtime_console;
+
+void arm_trp_early_platform_setup(void)
+{
+	/*
+	 * Initialize a different console than already in use to display
+	 * messages from trp
+	 */
+	int rc = console_pl011_register(PLAT_ARM_TRP_UART_BASE,
+					PLAT_ARM_TRP_UART_CLK_IN_HZ,
+					ARM_CONSOLE_BAUDRATE,
+					&arm_trp_runtime_console);
+	if (rc == 0) {
+		panic();
+	}
+
+	console_set_scope(&arm_trp_runtime_console,
+			  CONSOLE_FLAG_BOOT | CONSOLE_FLAG_RUNTIME);
+}
+
+void trp_early_platform_setup(void)
+{
+	arm_trp_early_platform_setup();
+}
diff --git a/plat/arm/css/sgi/aarch64/sgi_helper.S b/plat/arm/css/sgi/aarch64/sgi_helper.S
index 04bfb77..ced59e8 100644
--- a/plat/arm/css/sgi/aarch64/sgi_helper.S
+++ b/plat/arm/css/sgi/aarch64/sgi_helper.S
@@ -9,6 +9,8 @@
 #include <platform_def.h>
 #include <cortex_a75.h>
 #include <neoverse_n1.h>
+#include <neoverse_v1.h>
+#include <neoverse_n2.h>
 #include <cpu_macros.S>
 
 	.globl	plat_arm_calc_core_pos
@@ -66,6 +68,8 @@
 func plat_reset_handler
 	jump_if_cpu_midr CORTEX_A75_MIDR, A75
 	jump_if_cpu_midr NEOVERSE_N1_MIDR, N1
+	jump_if_cpu_midr NEOVERSE_V1_MIDR, V1
+	jump_if_cpu_midr NEOVERSE_N2_MIDR, N2
 	ret
 
 	/* -----------------------------------------------------
@@ -85,4 +89,18 @@
 	msr	NEOVERSE_N1_CPUPWRCTLR_EL1, x0
 	isb
 	ret
+
+V1:
+	mrs	x0, NEOVERSE_V1_CPUPWRCTLR_EL1
+	bic	x0, x0, #NEOVERSE_V1_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+	msr	NEOVERSE_V1_CPUPWRCTLR_EL1, x0
+	isb
+	ret
+
+N2:
+	mrs	x0, NEOVERSE_N2_CPUPWRCTLR_EL1
+	bic	x0, x0, #NEOVERSE_N2_CORE_PWRDN_EN_BIT
+	msr	NEOVERSE_N2_CPUPWRCTLR_EL1, x0
+	isb
+	ret
 endfunc plat_reset_handler
diff --git a/plat/imx/imx8m/imx8mm/imx8mm_io_storage.c b/plat/imx/common/imx_io_storage.c
similarity index 93%
rename from plat/imx/imx8m/imx8mm/imx8mm_io_storage.c
rename to plat/imx/common/imx_io_storage.c
index ff6687e..bb35662 100644
--- a/plat/imx/imx8m/imx8mm/imx8mm_io_storage.c
+++ b/plat/imx/common/imx_io_storage.c
@@ -6,10 +6,10 @@
 
 #include <assert.h>
 
+#include <common/debug.h>
 #include <drivers/io/io_block.h>
 #include <drivers/io/io_driver.h>
 #include <drivers/io/io_fip.h>
-#include <drivers/io/io_driver.h>
 #include <drivers/io/io_memmap.h>
 #include <drivers/mmc.h>
 #include <lib/utils_def.h>
@@ -21,21 +21,21 @@
 static const io_dev_connector_t *fip_dev_con;
 static uintptr_t fip_dev_handle;
 
-#ifndef IMX8MM_FIP_MMAP
+#ifndef IMX_FIP_MMAP
 static const io_dev_connector_t *mmc_dev_con;
 static uintptr_t mmc_dev_handle;
 
 static const io_block_spec_t mmc_fip_spec = {
-	.offset = IMX8MM_FIP_MMC_BASE,
-	.length = IMX8MM_FIP_SIZE
+	.offset = IMX_FIP_MMC_BASE,
+	.length = IMX_FIP_SIZE
 };
 
 static const io_block_dev_spec_t mmc_dev_spec = {
 	/* It's used as temp buffer in block driver. */
 	.buffer		= {
-		.offset	= IMX8MM_FIP_BASE,
+		.offset	= IMX_FIP_BASE,
 		/* do we need a new value? */
-		.length = IMX8MM_FIP_SIZE
+		.length = IMX_FIP_SIZE
 	},
 	.ops		= {
 		.read	= mmc_read_blocks,
@@ -51,8 +51,8 @@
 static uintptr_t memmap_dev_handle;
 
 static const io_block_spec_t fip_block_spec = {
-	.offset = IMX8MM_FIP_BASE,
-	.length = IMX8MM_FIP_SIZE
+	.offset = IMX_FIP_BASE,
+	.length = IMX_FIP_SIZE
 };
 static int open_memmap(const uintptr_t spec);
 #endif
@@ -113,6 +113,7 @@
 };
 #endif /* TRUSTED_BOARD_BOOT */
 
+/* TODO: this structure is replicated multiple times. rationalize it ! */
 struct plat_io_policy {
 	uintptr_t *dev_handle;
 	uintptr_t image_spec;
@@ -120,7 +121,7 @@
 };
 
 static const struct plat_io_policy policies[] = {
-#ifndef IMX8MM_FIP_MMAP
+#ifndef IMX_FIP_MMAP
 	[FIP_IMAGE_ID] = {
 		&mmc_dev_handle,
 		(uintptr_t)&mmc_fip_spec,
@@ -219,7 +220,7 @@
 	return result;
 }
 
-#ifndef IMX8MM_FIP_MMAP
+#ifndef IMX_FIP_MMAP
 static int open_mmc(const uintptr_t spec)
 {
 	int result;
@@ -270,11 +271,11 @@
 	return result;
 }
 
-void plat_imx8mm_io_setup(void)
+void plat_imx_io_setup(void)
 {
 	int result __unused;
 
-#ifndef IMX8MM_FIP_MMAP
+#ifndef IMX_FIP_MMAP
 	result = register_io_dev_block(&mmc_dev_con);
 	assert(result == 0);
 
diff --git a/plat/imx/imx7/common/imx7.mk b/plat/imx/imx7/common/imx7.mk
index 3a95772..fdde9a9 100644
--- a/plat/imx/imx7/common/imx7.mk
+++ b/plat/imx/imx7/common/imx7.mk
@@ -16,6 +16,7 @@
 				-Iplat/imx/imx7/include			\
 				-Idrivers/imx/timer			\
 				-Idrivers/imx/usdhc			\
+				-Iinclude/common/tbbr
 
 # Translation tables library
 include lib/xlat_tables_v2/xlat_tables.mk
@@ -46,7 +47,7 @@
 				plat/imx/imx7/common/imx7_bl2_el3_common.c	\
 				plat/imx/imx7/common/imx7_helpers.S		\
 				plat/imx/imx7/common/imx7_image_load.c		\
-				plat/imx/imx7/common/imx7_io_storage.c		\
+				plat/imx/common/imx_io_storage.c		\
 				plat/imx/common/aarch32/imx_uart_console.S	\
 				${XLAT_TABLES_LIB_SRCS}
 
diff --git a/plat/imx/imx7/common/imx7_bl2_el3_common.c b/plat/imx/imx7/common/imx7_bl2_el3_common.c
index 7f156e3..4e5028c 100644
--- a/plat/imx/imx7/common/imx7_bl2_el3_common.c
+++ b/plat/imx/imx7/common/imx7_bl2_el3_common.c
@@ -173,7 +173,7 @@
 	console_set_scope(&console, console_scope);
 
 	/* Open handles to persistent storage */
-	plat_imx7_io_setup();
+	plat_imx_io_setup();
 
 	/* Setup higher-level functionality CAAM, RTC etc */
 	imx_caam_init();
@@ -183,7 +183,7 @@
 	VERBOSE("\tOPTEE       0x%08x-0x%08x\n", IMX7_OPTEE_BASE, IMX7_OPTEE_LIMIT);
 	VERBOSE("\tATF/BL2     0x%08x-0x%08x\n", BL2_RAM_BASE, BL2_RAM_LIMIT);
 	VERBOSE("\tSHRAM       0x%08x-0x%08x\n", SHARED_RAM_BASE, SHARED_RAM_LIMIT);
-	VERBOSE("\tFIP         0x%08x-0x%08x\n", IMX7_FIP_BASE, IMX7_FIP_LIMIT);
+	VERBOSE("\tFIP         0x%08x-0x%08x\n", IMX_FIP_BASE, IMX_FIP_LIMIT);
 	VERBOSE("\tDTB-OVERLAY 0x%08x-0x%08x\n", IMX7_DTB_OVERLAY_BASE, IMX7_DTB_OVERLAY_LIMIT);
 	VERBOSE("\tDTB         0x%08x-0x%08x\n", IMX7_DTB_BASE, IMX7_DTB_LIMIT);
 	VERBOSE("\tUBOOT/BL33  0x%08x-0x%08x\n", IMX7_UBOOT_BASE, IMX7_UBOOT_LIMIT);
diff --git a/plat/imx/imx7/common/imx7_io_storage.c b/plat/imx/imx7/common/imx7_io_storage.c
deleted file mode 100644
index 977181d..0000000
--- a/plat/imx/imx7/common/imx7_io_storage.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <assert.h>
-
-#include <platform_def.h>
-
-#include <common/debug.h>
-#include <drivers/io/io_block.h>
-#include <drivers/io/io_driver.h>
-#include <drivers/io/io_fip.h>
-#include <drivers/io/io_memmap.h>
-#include <drivers/mmc.h>
-#include <tools_share/firmware_image_package.h>
-
-static const io_dev_connector_t *fip_dev_con;
-static uintptr_t fip_dev_handle;
-
-#ifndef IMX7_FIP_MMAP
-static const io_dev_connector_t *mmc_dev_con;
-static uintptr_t mmc_dev_handle;
-
-static const io_block_spec_t mmc_fip_spec = {
-	.offset = IMX7_FIP_MMC_BASE,
-	.length = IMX7_FIP_SIZE
-};
-
-static const io_block_dev_spec_t mmc_dev_spec = {
-	/* It's used as temp buffer in block driver. */
-	.buffer		= {
-		.offset	= IMX7_FIP_BASE,
-		/* do we need a new value? */
-		.length = IMX7_FIP_SIZE
-	},
-	.ops		= {
-		.read	= mmc_read_blocks,
-		.write	= mmc_write_blocks,
-	},
-	.block_size	= MMC_BLOCK_SIZE,
-};
-
-static int open_mmc(const uintptr_t spec);
-
-#else
-static const io_dev_connector_t *memmap_dev_con;
-static uintptr_t memmap_dev_handle;
-
-static const io_block_spec_t fip_block_spec = {
-	.offset = IMX7_FIP_BASE,
-	.length = IMX7_FIP_SIZE
-};
-static int open_memmap(const uintptr_t spec);
-#endif
-static int open_fip(const uintptr_t spec);
-
-static const io_uuid_spec_t bl32_uuid_spec = {
-	.uuid = UUID_SECURE_PAYLOAD_BL32,
-};
-
-static const io_uuid_spec_t bl32_extra1_uuid_spec = {
-	.uuid = UUID_SECURE_PAYLOAD_BL32_EXTRA1,
-};
-
-static const io_uuid_spec_t bl32_extra2_uuid_spec = {
-	.uuid = UUID_SECURE_PAYLOAD_BL32_EXTRA2,
-};
-
-static const io_uuid_spec_t bl33_uuid_spec = {
-	.uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
-};
-
-#if TRUSTED_BOARD_BOOT
-static const io_uuid_spec_t tb_fw_cert_uuid_spec = {
-	.uuid = UUID_TRUSTED_BOOT_FW_CERT,
-};
-
-static const io_uuid_spec_t trusted_key_cert_uuid_spec = {
-	.uuid = UUID_TRUSTED_KEY_CERT,
-};
-
-static const io_uuid_spec_t tos_fw_key_cert_uuid_spec = {
-	.uuid = UUID_TRUSTED_OS_FW_KEY_CERT,
-};
-
-static const io_uuid_spec_t tos_fw_cert_uuid_spec = {
-	.uuid = UUID_TRUSTED_OS_FW_CONTENT_CERT,
-};
-
-static const io_uuid_spec_t nt_fw_key_cert_uuid_spec = {
-	.uuid = UUID_NON_TRUSTED_FW_KEY_CERT,
-};
-
-static const io_uuid_spec_t nt_fw_cert_uuid_spec = {
-	.uuid = UUID_NON_TRUSTED_FW_CONTENT_CERT,
-};
-#endif /* TRUSTED_BOARD_BOOT */
-
-/* TODO: this structure is replicated multiple times. rationalize it ! */
-struct plat_io_policy {
-	uintptr_t *dev_handle;
-	uintptr_t image_spec;
-	int (*check)(const uintptr_t spec);
-};
-
-static const struct plat_io_policy policies[] = {
-#ifndef IMX7_FIP_MMAP
-	[FIP_IMAGE_ID] = {
-		&mmc_dev_handle,
-		(uintptr_t)&mmc_fip_spec,
-		open_mmc
-	},
-#else
-	[FIP_IMAGE_ID] = {
-		&memmap_dev_handle,
-		(uintptr_t)&fip_block_spec,
-		open_memmap
-	},
-#endif
-	[BL32_IMAGE_ID] = {
-		&fip_dev_handle,
-		(uintptr_t)&bl32_uuid_spec,
-		open_fip
-	},
-	[BL32_EXTRA1_IMAGE_ID] = {
-		&fip_dev_handle,
-		(uintptr_t)&bl32_extra1_uuid_spec,
-		open_fip
-	},
-	[BL32_EXTRA2_IMAGE_ID] = {
-		&fip_dev_handle,
-		(uintptr_t)&bl32_extra2_uuid_spec,
-		open_fip
-	},
-	[BL33_IMAGE_ID] = {
-		&fip_dev_handle,
-		(uintptr_t)&bl33_uuid_spec,
-		open_fip
-	},
-#if TRUSTED_BOARD_BOOT
-	[TRUSTED_BOOT_FW_CERT_ID] = {
-		&fip_dev_handle,
-		(uintptr_t)&tb_fw_cert_uuid_spec,
-		open_fip
-	},
-	[TRUSTED_KEY_CERT_ID] = {
-		&fip_dev_handle,
-		(uintptr_t)&trusted_key_cert_uuid_spec,
-		open_fip
-	},
-	[TRUSTED_OS_FW_KEY_CERT_ID] = {
-		&fip_dev_handle,
-		(uintptr_t)&tos_fw_key_cert_uuid_spec,
-		open_fip
-	},
-	[NON_TRUSTED_FW_KEY_CERT_ID] = {
-		&fip_dev_handle,
-		(uintptr_t)&nt_fw_key_cert_uuid_spec,
-		open_fip
-	},
-	[TRUSTED_OS_FW_CONTENT_CERT_ID] = {
-		&fip_dev_handle,
-		(uintptr_t)&tos_fw_cert_uuid_spec,
-		open_fip
-	},
-	[NON_TRUSTED_FW_CONTENT_CERT_ID] = {
-		&fip_dev_handle,
-		(uintptr_t)&nt_fw_cert_uuid_spec,
-		open_fip
-	},
-#endif /* TRUSTED_BOARD_BOOT */
-};
-
-static int open_fip(const uintptr_t spec)
-{
-	int result;
-	uintptr_t local_image_handle;
-
-	/* See if a Firmware Image Package is available */
-	result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
-	if (result == 0) {
-		result = io_open(fip_dev_handle, spec, &local_image_handle);
-		if (result == 0) {
-			VERBOSE("Using FIP\n");
-			io_close(local_image_handle);
-		}
-	}
-	return result;
-}
-
-#ifndef IMX7_FIP_MMAP
-static int open_mmc(const uintptr_t spec)
-{
-	int result;
-	uintptr_t local_handle;
-
-	result = io_dev_init(mmc_dev_handle, (uintptr_t)NULL);
-	if (result == 0) {
-		result = io_open(mmc_dev_handle, spec, &local_handle);
-		if (result == 0)
-			io_close(local_handle);
-	}
-	return result;
-}
-#else
-static int open_memmap(const uintptr_t spec)
-{
-	int result;
-	uintptr_t local_image_handle;
-
-	result = io_dev_init(memmap_dev_handle, (uintptr_t)NULL);
-	if (result == 0) {
-		result = io_open(memmap_dev_handle, spec, &local_image_handle);
-		if (result == 0) {
-			VERBOSE("Using Memmap\n");
-			io_close(local_image_handle);
-		}
-	}
-	return result;
-}
-#endif
-
-int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
-			  uintptr_t *image_spec)
-{
-	int result;
-	const struct plat_io_policy *policy;
-
-	assert(image_id < ARRAY_SIZE(policies));
-
-	policy = &policies[image_id];
-	result = policy->check(policy->image_spec);
-	assert(result == 0);
-
-	*image_spec = policy->image_spec;
-	*dev_handle = *policy->dev_handle;
-
-	return result;
-}
-
-void plat_imx7_io_setup(void)
-{
-	int result __unused;
-
-#ifndef IMX7_FIP_MMAP
-	result = register_io_dev_block(&mmc_dev_con);
-	assert(result == 0);
-
-	result = io_dev_open(mmc_dev_con, (uintptr_t)&mmc_dev_spec,
-			     &mmc_dev_handle);
-	assert(result == 0);
-
-#else
-	result = register_io_dev_memmap(&memmap_dev_con);
-	assert(result == 0);
-
-	result = io_dev_open(memmap_dev_con, (uintptr_t)NULL,
-			     &memmap_dev_handle);
-	assert(result == 0);
-
-#endif
-	result = register_io_dev_fip(&fip_dev_con);
-	assert(result == 0);
-
-	result = io_dev_open(fip_dev_con, (uintptr_t)NULL,
-			     &fip_dev_handle);
-	assert(result == 0);
-}
diff --git a/plat/imx/imx7/include/imx7_def.h b/plat/imx/imx7/include/imx7_def.h
index 77a8ca3..d92a2d1 100644
--- a/plat/imx/imx7/include/imx7_def.h
+++ b/plat/imx/imx7/include/imx7_def.h
@@ -13,7 +13,7 @@
 /*******************************************************************************
  * Function and variable prototypes
  ******************************************************************************/
-void plat_imx7_io_setup(void);
+void plat_imx_io_setup(void);
 void imx7_platform_setup(u_register_t arg1, u_register_t arg2,
 			 u_register_t arg3, u_register_t arg4);
 
diff --git a/plat/imx/imx7/picopi/include/platform_def.h b/plat/imx/imx7/picopi/include/platform_def.h
index 141571c..5f2975d 100644
--- a/plat/imx/imx7/picopi/include/platform_def.h
+++ b/plat/imx/imx7/picopi/include/platform_def.h
@@ -92,12 +92,12 @@
 #define IMX7_UBOOT_LIMIT		(IMX7_UBOOT_BASE + IMX7_UBOOT_SIZE)
 
 /* Define FIP image absolute location 0x80000000 - 0x80100000 */
-#define IMX7_FIP_SIZE			0x00100000
-#define IMX7_FIP_BASE			(DRAM_BASE)
-#define IMX7_FIP_LIMIT			(IMX7_FIP_BASE + IMX7_FIP_SIZE)
+#define IMX_FIP_SIZE			0x00100000
+#define IMX_FIP_BASE			(DRAM_BASE)
+#define IMX_FIP_LIMIT			(IMX_FIP_BASE + IMX_FIP_SIZE)
 
 /* Define FIP image location at 1MB offset */
-#define IMX7_FIP_MMC_BASE		(1024 * 1024)
+#define IMX_FIP_MMC_BASE		(1024 * 1024)
 
 /* Define the absolute location of DTB 0x83000000 - 0x83100000 */
 #define IMX7_DTB_SIZE			0x00100000
diff --git a/plat/imx/imx7/warp7/include/platform_def.h b/plat/imx/imx7/warp7/include/platform_def.h
index 4afcb54..683e50d 100644
--- a/plat/imx/imx7/warp7/include/platform_def.h
+++ b/plat/imx/imx7/warp7/include/platform_def.h
@@ -94,12 +94,12 @@
 #define IMX7_UBOOT_LIMIT		(IMX7_UBOOT_BASE + IMX7_UBOOT_SIZE)
 
 /* Define FIP image absolute location 0x80000000 - 0x80100000 */
-#define IMX7_FIP_SIZE			0x00100000
-#define IMX7_FIP_BASE			(DRAM_BASE)
-#define IMX7_FIP_LIMIT			(IMX7_FIP_BASE + IMX7_FIP_SIZE)
+#define IMX_FIP_SIZE			0x00100000
+#define IMX_FIP_BASE			(DRAM_BASE)
+#define IMX_FIP_LIMIT			(IMX_FIP_BASE + IMX_FIP_SIZE)
 
 /* Define FIP image location at 1MB offset */
-#define IMX7_FIP_MMC_BASE		(1024 * 1024)
+#define IMX_FIP_MMC_BASE		(1024 * 1024)
 
 /* Define the absolute location of DTB 0x83000000 - 0x83100000 */
 #define IMX7_DTB_SIZE			0x00100000
diff --git a/plat/imx/imx8m/imx8mm/imx8mm_image_load.c b/plat/imx/imx8m/imx8m_image_load.c
similarity index 100%
rename from plat/imx/imx8m/imx8mm/imx8mm_image_load.c
rename to plat/imx/imx8m/imx8m_image_load.c
diff --git a/plat/imx/imx8m/imx8mm/imx8mm_bl2_el3_setup.c b/plat/imx/imx8m/imx8mm/imx8mm_bl2_el3_setup.c
index 937774c..c39dd93 100644
--- a/plat/imx/imx8m/imx8mm/imx8mm_bl2_el3_setup.c
+++ b/plat/imx/imx8m/imx8mm/imx8mm_bl2_el3_setup.c
@@ -88,7 +88,7 @@
 	imx8mm_usdhc_setup();
 
 	/* Open handles to a FIP image */
-	plat_imx8mm_io_setup();
+	plat_imx_io_setup();
 }
 
 void bl2_el3_plat_arch_setup(void)
diff --git a/plat/imx/imx8m/imx8mm/include/imx8mm_private.h b/plat/imx/imx8m/imx8mm/include/imx8mm_private.h
index 52d13f0..5e0ef97 100644
--- a/plat/imx/imx8m/imx8mm/include/imx8mm_private.h
+++ b/plat/imx/imx8m/imx8mm/include/imx8mm_private.h
@@ -10,6 +10,6 @@
 /*******************************************************************************
  * Function and variable prototypes
  ******************************************************************************/
-void plat_imx8mm_io_setup(void);
+void plat_imx_io_setup(void);
 
 #endif /* IMX8MM_PRIVATE_H */
diff --git a/plat/imx/imx8m/imx8mm/include/platform_def.h b/plat/imx/imx8m/imx8mm/include/platform_def.h
index 940d22b..e86938b 100644
--- a/plat/imx/imx8m/imx8mm/include/platform_def.h
+++ b/plat/imx/imx8m/imx8mm/include/platform_def.h
@@ -41,12 +41,12 @@
 #define BL2_LIMIT			U(0x940000)
 #define BL31_BASE			U(0x900000)
 #define BL31_LIMIT			U(0x920000)
-#define IMX8MM_FIP_BASE			U(0x40310000)
-#define IMX8MM_FIP_SIZE			U(0x000300000)
-#define IMX8MM_FIP_LIMIT		U(FIP_BASE + FIP_SIZE)
+#define IMX_FIP_BASE			U(0x40310000)
+#define IMX_FIP_SIZE			U(0x000300000)
+#define IMX_FIP_LIMIT			U(FIP_BASE + FIP_SIZE)
 
 /* Define FIP image location on eMMC */
-#define IMX8MM_FIP_MMC_BASE		U(0x100000)
+#define IMX_FIP_MMC_BASE		U(0x100000)
 
 #define PLAT_IMX8MM_BOOT_MMC_BASE	U(0x30B50000) /* SD */
 #else
diff --git a/plat/imx/imx8m/imx8mm/platform.mk b/plat/imx/imx8m/imx8mm/platform.mk
index 1863233..ac5a809 100644
--- a/plat/imx/imx8m/imx8mm/platform.mk
+++ b/plat/imx/imx8m/imx8mm/platform.mk
@@ -63,8 +63,8 @@
 				drivers/io/io_storage.c				\
 				drivers/imx/usdhc/imx_usdhc.c			\
 				plat/imx/imx8m/imx8mm/imx8mm_bl2_mem_params_desc.c	\
-				plat/imx/imx8m/imx8mm/imx8mm_io_storage.c		\
-				plat/imx/imx8m/imx8mm/imx8mm_image_load.c		\
+				plat/imx/common/imx_io_storage.c		\
+				plat/imx/imx8m/imx8m_image_load.c		\
 				lib/optee/optee_utils.c
 endif
 
diff --git a/plat/imx/imx8m/imx8mp/imx8mp_bl2_el3_setup.c b/plat/imx/imx8m/imx8mp/imx8mp_bl2_el3_setup.c
new file mode 100644
index 0000000..08cbeeb
--- /dev/null
+++ b/plat/imx/imx8m/imx8mp/imx8mp_bl2_el3_setup.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/desc_image_load.h>
+#include <common/tbbr/tbbr_img_def.h>
+#include <context.h>
+#include <drivers/arm/tzc380.h>
+#include <drivers/console.h>
+#include <drivers/generic_delay_timer.h>
+#include <drivers/mmc.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/mmio.h>
+#include <lib/optee_utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#include <imx8m_caam.h>
+#include "imx8mp_private.h"
+#include <imx_aipstz.h>
+#include <imx_rdc.h>
+#include <imx_uart.h>
+#include <plat/common/platform.h>
+#include <plat_imx8.h>
+#include <platform_def.h>
+
+
+static const struct aipstz_cfg aipstz[] = {
+	{IMX_AIPSTZ1, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, },
+	{IMX_AIPSTZ2, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, },
+	{IMX_AIPSTZ3, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, },
+	{IMX_AIPSTZ4, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, },
+	{0},
+};
+
+void bl2_el3_early_platform_setup(u_register_t arg0, u_register_t arg1,
+		u_register_t arg2, u_register_t arg3)
+{
+	static console_t console;
+	unsigned int i;
+
+	/* Enable CSU NS access permission */
+	for (i = 0U; i < 64; i++) {
+		mmio_write_32(IMX_CSU_BASE + i * 4, 0x00ff00ff);
+	}
+
+	imx_aipstz_init(aipstz);
+
+	console_imx_uart_register(IMX_BOOT_UART_BASE, IMX_BOOT_UART_CLK_IN_HZ,
+		IMX_CONSOLE_BAUDRATE, &console);
+
+	generic_delay_timer_init();
+
+	/* select the CKIL source to 32K OSC */
+	mmio_write_32(IMX_ANAMIX_BASE + ANAMIX_MISC_CTL, 0x1);
+
+	/* Open handles to a FIP image */
+	plat_imx_io_setup();
+}
+
+void bl2_el3_plat_arch_setup(void)
+{
+}
+
+void bl2_platform_setup(void)
+{
+}
+
+int bl2_plat_handle_post_image_load(unsigned int image_id)
+{
+	int err = 0;
+	bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
+	bl_mem_params_node_t *pager_mem_params = NULL;
+	bl_mem_params_node_t *paged_mem_params = NULL;
+
+	assert(bl_mem_params);
+
+	switch (image_id) {
+	case BL32_IMAGE_ID:
+		pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
+		assert(pager_mem_params);
+
+		paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
+		assert(paged_mem_params);
+
+		err = parse_optee_header(&bl_mem_params->ep_info,
+					 &pager_mem_params->image_info,
+					 &paged_mem_params->image_info);
+		if (err != 0) {
+			WARN("OPTEE header parse error.\n");
+		}
+
+		break;
+	default:
+		/* Do nothing in default case */
+		break;
+	}
+
+	return err;
+}
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+	return COUNTER_FREQUENCY;
+}
+
+void bl2_plat_runtime_setup(void)
+{
+	return;
+}
diff --git a/plat/imx/imx8m/imx8mp/imx8mp_bl2_mem_params_desc.c b/plat/imx/imx8m/imx8mp/imx8mp_bl2_mem_params_desc.c
new file mode 100644
index 0000000..f2f6808
--- /dev/null
+++ b/plat/imx/imx8m/imx8mp/imx8mp_bl2_mem_params_desc.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <common/desc_image_load.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+
+static bl_mem_params_node_t bl2_mem_params_descs[] = {
+	{
+		.image_id = BL31_IMAGE_ID,
+		SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2,
+				      entry_point_info_t,
+				      SECURE | EXECUTABLE | EP_FIRST_EXE),
+		.ep_info.pc = BL31_BASE,
+		.ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+					DISABLE_ALL_EXCEPTIONS),
+		SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, VERSION_2, image_info_t,
+				      IMAGE_ATTRIB_PLAT_SETUP),
+		.image_info.image_base = BL31_BASE,
+		.image_info.image_max_size = BL31_LIMIT - BL31_BASE,
+		.next_handoff_image_id = INVALID_IMAGE_ID,
+	},
+	{
+		.image_id = BL32_IMAGE_ID,
+
+		SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2,
+				      entry_point_info_t,
+				      SECURE | EXECUTABLE),
+		.ep_info.pc = BL32_BASE,
+
+		SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, VERSION_2,
+				      image_info_t, 0),
+
+		.image_info.image_base = BL32_BASE,
+		.image_info.image_max_size = BL32_SIZE,
+
+		.next_handoff_image_id = BL33_IMAGE_ID,
+	},
+	{
+		.image_id = BL32_EXTRA1_IMAGE_ID,
+
+		SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2,
+				      entry_point_info_t,
+				      SECURE | NON_EXECUTABLE),
+
+		SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, VERSION_2,
+				      image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+		.image_info.image_base = BL32_BASE,
+		.image_info.image_max_size =  BL32_SIZE,
+
+		.next_handoff_image_id = INVALID_IMAGE_ID,
+	},
+	{
+		/* This is a zero sized image so we don't set base or size */
+		.image_id = BL32_EXTRA2_IMAGE_ID,
+
+		SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+				      VERSION_2, entry_point_info_t,
+				      SECURE | NON_EXECUTABLE),
+
+		SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+				      VERSION_2, image_info_t,
+				      IMAGE_ATTRIB_SKIP_LOADING),
+		.next_handoff_image_id = INVALID_IMAGE_ID,
+	},
+	{
+		.image_id = BL33_IMAGE_ID,
+		SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2,
+				      entry_point_info_t,
+				      NON_SECURE | EXECUTABLE),
+		# ifdef PRELOADED_BL33_BASE
+			.ep_info.pc = PLAT_NS_IMAGE_OFFSET,
+
+			SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+					      VERSION_2, image_info_t,
+					      IMAGE_ATTRIB_SKIP_LOADING),
+		# else
+			.ep_info.pc = PLAT_NS_IMAGE_OFFSET,
+
+			SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+					      VERSION_2, image_info_t, 0),
+			.image_info.image_base = PLAT_NS_IMAGE_OFFSET,
+			.image_info.image_max_size = PLAT_NS_IMAGE_SIZE,
+		# endif /* PRELOADED_BL33_BASE */
+
+		.next_handoff_image_id = INVALID_IMAGE_ID,
+	}
+};
+
+REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs);
diff --git a/plat/imx/imx8m/imx8mp/imx8mp_rotpk.S b/plat/imx/imx8m/imx8mp/imx8mp_rotpk.S
new file mode 100644
index 0000000..a4c7ce1
--- /dev/null
+++ b/plat/imx/imx8m/imx8mp/imx8mp_rotpk.S
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+	.global imx8mp_rotpk_hash
+	.global imx8mp_rotpk_hash_end
+imx8mp_rotpk_hash:
+	/* DER header */
+	.byte 0x30, 0x31, 0x30, 0x0D, 0x06, 0x09, 0x60, 0x86, 0x48
+	.byte 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20
+	/* SHA256 */
+	.incbin ROTPK_HASH
+imx8mp_rotpk_hash_end:
diff --git a/plat/imx/imx8m/imx8mp/imx8mp_trusted_boot.c b/plat/imx/imx8m/imx8mp/imx8mp_trusted_boot.c
new file mode 100644
index 0000000..5d1a6c2
--- /dev/null
+++ b/plat/imx/imx8m/imx8mp/imx8mp_trusted_boot.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <plat/common/platform.h>
+
+extern char imx8mp_rotpk_hash[], imx8mp_rotpk_hash_end[];
+
+int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len,
+			unsigned int *flags)
+{
+	*key_ptr = imx8mp_rotpk_hash;
+	*key_len = imx8mp_rotpk_hash_end - imx8mp_rotpk_hash;
+	*flags = ROTPK_IS_HASH;
+
+	return 0;
+}
+
+int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr)
+{
+	*nv_ctr = 0;
+
+	return 0;
+}
+
+int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr)
+{
+	return 1;
+}
+
+int plat_get_mbedtls_heap(void **heap_addr, size_t *heap_size)
+{
+	return get_mbedtls_heap_helper(heap_addr, heap_size);
+}
diff --git a/plat/imx/imx8m/imx8mp/include/imx8mp_private.h b/plat/imx/imx8m/imx8mp/include/imx8mp_private.h
new file mode 100644
index 0000000..0a02334
--- /dev/null
+++ b/plat/imx/imx8m/imx8mp/include/imx8mp_private.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef IMX8MP_PRIVATE_H
+#define IMX8MP_PRIVATE_H
+
+/*******************************************************************************
+ * Function and variable prototypes
+ ******************************************************************************/
+void plat_imx_io_setup(void);
+
+#endif /* IMX8MP_PRIVATE_H */
diff --git a/plat/imx/imx8m/imx8mp/include/platform_def.h b/plat/imx/imx8m/imx8mp/include/platform_def.h
index 832bed1..14b7ea0 100644
--- a/plat/imx/imx8m/imx8mp/include/platform_def.h
+++ b/plat/imx/imx8m/imx8mp/include/platform_def.h
@@ -6,6 +6,7 @@
 #ifndef PLATFORM_DEF_H
 #define PLATFORM_DEF_H
 
+#include <common/tbbr/tbbr_img_def.h>
 #include <lib/utils_def.h>
 #include <lib/xlat_tables/xlat_tables_v2.h>
 
@@ -34,8 +35,23 @@
 #define PLAT_WAIT_RET_STATE		U(1)
 #define PLAT_STOP_OFF_STATE		U(3)
 
+#if defined(NEED_BL2)
+#define BL2_BASE			U(0x960000)
+#define BL2_LIMIT			U(0x980000)
+#define BL31_BASE			U(0x940000)
+#define BL31_LIMIT			U(0x960000)
+#define IMX_FIP_BASE			U(0x40310000)
+#define IMX_FIP_SIZE			U(0x000300000)
+#define IMX_FIP_LIMIT			U(FIP_BASE + FIP_SIZE)
+
+/* Define FIP image location on eMMC */
+#define IMX_FIP_MMC_BASE		U(0x100000)
+
+#define PLAT_IMX8MP_BOOT_MMC_BASE	U(0x30B50000) /* SD */
+#else
 #define BL31_BASE			U(0x960000)
 #define BL31_LIMIT			U(0x980000)
+#endif
 
 #define PLAT_PRI_BITS			U(3)
 #define PLAT_SDEI_CRITICAL_PRI		0x10
@@ -44,6 +60,7 @@
 
 /* non-secure uboot base */
 #define PLAT_NS_IMAGE_OFFSET		U(0x40200000)
+#define PLAT_NS_IMAGE_SIZE		U(0x00200000)
 
 /* GICv3 base address */
 #define PLAT_GICD_BASE			U(0x38800000)
@@ -150,6 +167,10 @@
 
 #define IMX_WDOG_B_RESET
 
+#define MAX_IO_HANDLES			3U
+#define MAX_IO_DEVICES			2U
+#define MAX_IO_BLOCK_DEVICES		1U
+
 #define GIC_MAP		MAP_REGION_FLAT(IMX_GIC_BASE, IMX_GIC_SIZE, MT_DEVICE | MT_RW)
 #define AIPS_MAP	MAP_REGION_FLAT(IMX_AIPS_BASE, IMX_AIPS_SIZE, MT_DEVICE | MT_RW) /* AIPS map */
 #define OCRAM_S_MAP	MAP_REGION_FLAT(OCRAM_S_BASE, OCRAM_S_SIZE, MT_MEMORY | MT_RW) /* OCRAM_S */
diff --git a/plat/imx/imx8m/imx8mp/platform.mk b/plat/imx/imx8m/imx8mp/platform.mk
index 6be2f98..823b5d6 100644
--- a/plat/imx/imx8m/imx8mp/platform.mk
+++ b/plat/imx/imx8m/imx8mp/platform.mk
@@ -6,7 +6,9 @@
 
 PLAT_INCLUDES		:=	-Iplat/imx/common/include		\
 				-Iplat/imx/imx8m/include		\
-				-Iplat/imx/imx8m/imx8mp/include
+				-Iplat/imx/imx8m/imx8mp/include		\
+				-Idrivers/imx/usdhc			\
+				-Iinclude/common/tbbr
 # Translation tables library
 include lib/xlat_tables_v2/xlat_tables.mk
 
@@ -40,6 +42,96 @@
 				${IMX_GIC_SOURCES}				\
 				${XLAT_TABLES_LIB_SRCS}
 
+ifeq (${NEED_BL2},yes)
+BL2_SOURCES		+=	common/desc_image_load.c			\
+				plat/imx/common/imx8_helpers.S			\
+				plat/imx/common/imx_uart_console.S		\
+				plat/imx/imx8m/imx8mp/imx8mp_bl2_el3_setup.c	\
+				plat/imx/imx8m/imx8mp/gpc.c			\
+				plat/imx/imx8m/imx_aipstz.c			\
+				plat/imx/imx8m/imx_rdc.c			\
+				plat/imx/imx8m/imx8m_caam.c			\
+				plat/common/plat_psci_common.c			\
+				lib/cpus/aarch64/cortex_a53.S			\
+				drivers/arm/tzc/tzc380.c			\
+				drivers/delay_timer/delay_timer.c		\
+				drivers/delay_timer/generic_delay_timer.c	\
+				${PLAT_GIC_SOURCES}				\
+				${PLAT_DRAM_SOURCES}				\
+				${XLAT_TABLES_LIB_SRCS}				\
+				drivers/mmc/mmc.c				\
+				drivers/io/io_block.c				\
+				drivers/io/io_fip.c				\
+				drivers/io/io_memmap.c				\
+				drivers/io/io_storage.c				\
+				drivers/imx/usdhc/imx_usdhc.c			\
+				plat/imx/imx8m/imx8mp/imx8mp_bl2_mem_params_desc.c	\
+				plat/imx/common/imx_io_storage.c		\
+				plat/imx/imx8m/imx8m_image_load.c		\
+				lib/optee/optee_utils.c
+endif
+
+# Add the build options to pack BLx images and kernel device tree
+# in the FIP if the platform requires.
+ifneq ($(BL2),)
+RESET_TO_BL31		:=	0
+$(eval $(call TOOL_ADD_PAYLOAD,${BUILD_PLAT}/tb_fw.crt,--tb-fw-cert))
+endif
+ifneq ($(BL32_EXTRA1),)
+$(eval $(call TOOL_ADD_IMG,BL32_EXTRA1,--tos-fw-extra1))
+endif
+ifneq ($(BL32_EXTRA2),)
+$(eval $(call TOOL_ADD_IMG,BL32_EXTRA2,--tos-fw-extra2))
+endif
+ifneq ($(HW_CONFIG),)
+$(eval $(call TOOL_ADD_IMG,HW_CONFIG,--hw-config))
+endif
+
+ifeq (${NEED_BL2},yes)
+$(eval $(call add_define,NEED_BL2))
+LOAD_IMAGE_V2		:=	1
+# Non-TF Boot ROM
+BL2_AT_EL3		:=	1
+endif
+
+ifneq (${TRUSTED_BOARD_BOOT},0)
+
+include drivers/auth/mbedtls/mbedtls_crypto.mk
+include drivers/auth/mbedtls/mbedtls_x509.mk
+
+AUTH_SOURCES	:=	drivers/auth/auth_mod.c			\
+			drivers/auth/crypto_mod.c		\
+			drivers/auth/img_parser_mod.c		\
+			drivers/auth/tbbr/tbbr_cot_common.c     \
+			drivers/auth/tbbr/tbbr_cot_bl2.c
+
+BL2_SOURCES		+=	${AUTH_SOURCES}					\
+				plat/common/tbbr/plat_tbbr.c			\
+				plat/imx/imx8m/imx8mp/imx8mp_trusted_boot.c	\
+				plat/imx/imx8m/imx8mp/imx8mp_rotpk.S
+
+ROT_KEY             = $(BUILD_PLAT)/rot_key.pem
+ROTPK_HASH          = $(BUILD_PLAT)/rotpk_sha256.bin
+
+$(eval $(call add_define_val,ROTPK_HASH,'"$(ROTPK_HASH)"'))
+$(eval $(call MAKE_LIB_DIRS))
+
+$(BUILD_PLAT)/bl2/imx8mp_rotpk.o: $(ROTPK_HASH)
+
+certificates: $(ROT_KEY)
+
+$(ROT_KEY): | $(BUILD_PLAT)
+	@echo "  OPENSSL $@"
+	@if [ ! -f $(ROT_KEY) ]; then \
+		openssl genrsa 2048 > $@ 2>/dev/null; \
+	fi
+
+$(ROTPK_HASH): $(ROT_KEY)
+	@echo "  OPENSSL $@"
+	$(Q)openssl rsa -in $< -pubout -outform DER 2>/dev/null |\
+	openssl dgst -sha256 -binary > $@ 2>/dev/null
+endif
+
 USE_COHERENT_MEM	:=	1
 RESET_TO_BL31		:=	1
 A53_DISABLE_NON_TEMPORAL_HINT := 0
diff --git a/plat/marvell/armada/a8k/common/a8k_common.mk b/plat/marvell/armada/a8k/common/a8k_common.mk
index 30e6280..9474d08 100644
--- a/plat/marvell/armada/a8k/common/a8k_common.mk
+++ b/plat/marvell/armada/a8k/common/a8k_common.mk
@@ -166,7 +166,7 @@
 BLE_PATH	?=  $(PLAT_COMMON_BASE)/ble
 
 include ${BLE_PATH}/ble.mk
-$(eval $(call MAKE_BL,e))
+$(eval $(call MAKE_BL,ble))
 
 clean realclean distclean: mrvl_clean
 
diff --git a/plat/renesas/common/aarch64/plat_helpers.S b/plat/renesas/common/aarch64/plat_helpers.S
index ec21f25..21c3bed 100644
--- a/plat/renesas/common/aarch64/plat_helpers.S
+++ b/plat/renesas/common/aarch64/plat_helpers.S
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
- * Copyright (c) 2015-2019, Renesas Electronics Corporation. All rights reserved.
+ * Copyright (c) 2015-2021, Renesas Electronics Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -284,7 +284,11 @@
 	str	x3, [sp, #-16]!
 	str	x4, [sp, #-16]!
 	str	x5, [sp, #-16]!
+	str	x6, [sp, #-16]!
+	str	x7, [sp, #-16]!
 	bl	console_rcar_putc
+	ldr	x7, [sp], #16
+	ldr	x6, [sp], #16
 	ldr	x5, [sp], #16
 	ldr	x4, [sp], #16
 	ldr	x3, [sp], #16
diff --git a/plat/renesas/common/bl2_cpg_init.c b/plat/renesas/common/bl2_cpg_init.c
index ba8e53b..a545f71 100644
--- a/plat/renesas/common/bl2_cpg_init.c
+++ b/plat/renesas/common/bl2_cpg_init.c
@@ -40,7 +40,6 @@
 #endif
 
 #if (RCAR_LSI == RCAR_AUTO) || (RCAR_LSI == RCAR_D3)
-static void bl2_realtime_cpg_init_d3(void);
 static void bl2_system_cpg_init_d3(void);
 #endif
 
@@ -140,7 +139,7 @@
 	cpg_write(SMSTPCR1, 0xFFFFFFFFU);
 	cpg_write(SMSTPCR2, 0x040E2FDCU);
 	cpg_write(SMSTPCR3, 0xFFFFFBDFU);
-	cpg_write(SMSTPCR4, 0x80000004U);
+	cpg_write(SMSTPCR4, 0x80000000U | (mmio_read_32(SMSTPCR4) & 0x4));
 	cpg_write(SMSTPCR5, 0xC3FFFFFFU);
 	cpg_write(SMSTPCR6, 0xFFFFFFFFU);
 	cpg_write(SMSTPCR7, 0xFFFFFFFFU);
@@ -176,7 +175,7 @@
 	cpg_write(SMSTPCR1, 0xFFFFFFFFU);
 	cpg_write(SMSTPCR2, 0x040E2FDCU);
 	cpg_write(SMSTPCR3, 0xFFFFFBDFU);
-	cpg_write(SMSTPCR4, 0x80000004U);
+	cpg_write(SMSTPCR4, 0x80000000U | (mmio_read_32(SMSTPCR4) & 0x4));
 	cpg_write(SMSTPCR5, 0xC3FFFFFFU);
 	cpg_write(SMSTPCR6, 0xFFFFFFFFU);
 	cpg_write(SMSTPCR7, 0xFFFFFFFFU);
@@ -212,7 +211,7 @@
 	cpg_write(SMSTPCR1, 0xFFFFFFFFU);
 	cpg_write(SMSTPCR2, 0x040E2FDCU);
 	cpg_write(SMSTPCR3, 0xFFFFFBDFU);
-	cpg_write(SMSTPCR4, 0x80000004U);
+	cpg_write(SMSTPCR4, 0x80000000U | (mmio_read_32(SMSTPCR4) & 0x4));
 	cpg_write(SMSTPCR5, 0xC3FFFFFFU);
 	cpg_write(SMSTPCR6, 0xFFFFFFFFU);
 	cpg_write(SMSTPCR7, 0xFFFFFFFFU);
@@ -246,7 +245,7 @@
 	cpg_write(SMSTPCR1, 0xFFFFFFFFU);
 	cpg_write(SMSTPCR2, 0x340E2FDCU);
 	cpg_write(SMSTPCR3, 0xFFFFFBDFU);
-	cpg_write(SMSTPCR4, 0x80000004U);
+	cpg_write(SMSTPCR4, 0x80000000U | (mmio_read_32(SMSTPCR4) & 0x4));
 	cpg_write(SMSTPCR5, 0xC3FFFFFFU);
 	cpg_write(SMSTPCR6, 0xFFFFFFFFU);
 	cpg_write(SMSTPCR7, 0xFFFFFFFFU);
@@ -280,7 +279,7 @@
 	cpg_write(SMSTPCR1, 0xFFFFFFFFU);
 	cpg_write(SMSTPCR2, 0x000E2FDCU);
 	cpg_write(SMSTPCR3, 0xFFFFFBDFU);
-	cpg_write(SMSTPCR4, 0x80000004U);
+	cpg_write(SMSTPCR4, 0x80000000U | (mmio_read_32(SMSTPCR4) & 0x4));
 	cpg_write(SMSTPCR5, 0xC3FFFFFFU);
 	cpg_write(SMSTPCR6, 0xFFFFFFFFU);
 	cpg_write(SMSTPCR7, 0xFFFFFFFFU);
@@ -292,23 +291,6 @@
 #endif
 
 #if (RCAR_LSI == RCAR_AUTO) || (RCAR_LSI == RCAR_D3)
-static void bl2_realtime_cpg_init_d3(void)
-{
-	/* Realtime Module Stop Control Registers */
-	cpg_write(RMSTPCR0, 0x00010000U);
-	cpg_write(RMSTPCR1, 0xFFFFFFFFU);
-	cpg_write(RMSTPCR2, 0x00060FDCU);
-	cpg_write(RMSTPCR3, 0xFFFFFFDFU);
-	cpg_write(RMSTPCR4, 0x80000184U);
-	cpg_write(RMSTPCR5, 0x83FFFFFFU);
-	cpg_write(RMSTPCR6, 0xFFFFFFFFU);
-	cpg_write(RMSTPCR7, 0xFFFFFFFFU);
-	cpg_write(RMSTPCR8, 0x00F1FFF7U);
-	cpg_write(RMSTPCR9, 0xF3F5E016U);
-	cpg_write(RMSTPCR10, 0xFFFEFFE0U);
-	cpg_write(RMSTPCR11, 0x000000B7U);
-}
-
 static void bl2_system_cpg_init_d3(void)
 {
 	/* System Module Stop Control Registers */
@@ -316,7 +298,7 @@
 	cpg_write(SMSTPCR1, 0xFFFFFFFFU);
 	cpg_write(SMSTPCR2, 0x00060FDCU);
 	cpg_write(SMSTPCR3, 0xFFFFFBDFU);
-	cpg_write(SMSTPCR4, 0x00000084U);
+	cpg_write(SMSTPCR4, 0x00000080U | (mmio_read_32(SMSTPCR4) & 0x4));
 	cpg_write(SMSTPCR5, 0x83FFFFFFU);
 	cpg_write(SMSTPCR6, 0xFFFFFFFFU);
 	cpg_write(SMSTPCR7, 0xFFFFFFFFU);
@@ -356,7 +338,7 @@
 			bl2_realtime_cpg_init_e3();
 			break;
 		case PRR_PRODUCT_D3:
-			bl2_realtime_cpg_init_d3();
+			/* no need */
 			break;
 		default:
 			panic();
@@ -373,7 +355,7 @@
 #elif RCAR_LSI == RCAR_E3 || RCAR_LSI == RZ_G2E
 		bl2_realtime_cpg_init_e3();
 #elif RCAR_LSI == RCAR_D3
-		bl2_realtime_cpg_init_d3();
+		/* no need */
 #else
 #error "Don't have CPG initialize routine(unknown)."
 #endif
diff --git a/plat/renesas/common/bl2_secure_setting.c b/plat/renesas/common/bl2_secure_setting.c
index 095d1f6..2f8b001 100644
--- a/plat/renesas/common/bl2_secure_setting.c
+++ b/plat/renesas/common/bl2_secure_setting.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2020, Renesas Electronics Corporation. All rights reserved.
+ * Copyright (c) 2015-2021, Renesas Electronics Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -49,10 +49,10 @@
 	/*
 	 * Bit13: SCEG PKA (secure APB) slave ports
 	 *        0: registers accessed from secure resource only
-	 *        1: Reserved[R-Car E3]
+	 *        1: Reserved[R-Car E3/D3]
 	 * Bit12: SCEG PKA (public APB) slave ports
 	 *	  0: registers accessed from secure resource only
-	 *	  1: Reserved[R-Car E3]
+	 *	  1: Reserved[R-Car E3/D3]
 	 * Bit10: SCEG Secure Core slave ports
 	 *	  0: registers accessed from secure resource only
 	 */
@@ -152,14 +152,14 @@
 	 * Security group 1 attribute setting for slave ports 6
 	 * Bit13: SCEG PKA (secure APB) slave ports
 	 *	  SecurityGroup3
-	 *	  Reserved[R-Car E3]
+	 *	  Reserved[R-Car E3/D3]
 	 * Bit12: SCEG PKA (public APB) slave ports
 	 *	  SecurityGroup3
-	 *	  Reserved[R-Car E3]
+	 *	  Reserved[R-Car E3/D3]
 	 * Bit10: SCEG Secure Core slave ports
 	 *	  SecurityGroup3
 	 */
-#if RCAR_LSI == RCAR_E3
+#if RCAR_LSI == RCAR_E3 || RCAR_LSI == RCAR_D3
 	{ SEC_GRP0COND6, 0x00000400U },
 	{ SEC_GRP1COND6, 0x00000400U },
 #else /* RCAR_LSI == RCAR_E3 */
diff --git a/plat/renesas/common/common.mk b/plat/renesas/common/common.mk
index fafce98..0d88d65 100644
--- a/plat/renesas/common/common.mk
+++ b/plat/renesas/common/common.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2018-2020, Renesas Electronics Corporation. All rights reserved.
+# Copyright (c) 2018-2021, Renesas Electronics Corporation. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -65,10 +65,12 @@
 ERRATA_A53_835769  := 1
 ERRATA_A53_843419  := 1
 ERRATA_A53_855873  := 1
+ERRATA_A53_1530924 := 1
 
 # Enable workarounds for selected Cortex-A57 erratas.
 ERRATA_A57_859972  := 1
 ERRATA_A57_813419  := 1
+ERRATA_A57_1319537 := 1
 
 PLAT_INCLUDES	:=	-Iplat/renesas/common/include/registers	\
 			-Iplat/renesas/common/include		\
@@ -77,9 +79,8 @@
 PLAT_BL_COMMON_SOURCES	:=	drivers/renesas/common/iic_dvfs/iic_dvfs.c \
 				plat/renesas/common/rcar_common.c
 
-RCAR_GIC_SOURCES	:=	drivers/arm/gic/common/gic_common.c	\
-				drivers/arm/gic/v2/gicv2_main.c		\
-				drivers/arm/gic/v2/gicv2_helpers.c	\
+include drivers/arm/gic/v2/gicv2.mk
+RCAR_GIC_SOURCES	:=	${GICV2_SOURCES} \
 				plat/common/plat_gicv2.c
 
 BL2_SOURCES	+=	${RCAR_GIC_SOURCES}				\
diff --git a/plat/renesas/common/include/platform_def.h b/plat/renesas/common/include/platform_def.h
index 72c7688..1213a3c 100644
--- a/plat/renesas/common/include/platform_def.h
+++ b/plat/renesas/common/include/platform_def.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2020, Renesas Electronics Corporation. All rights reserved.
+ * Copyright (c) 2015-2021, Renesas Electronics Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -144,7 +144,7 @@
  ******************************************************************************/
 #ifndef SPD_NONE
 #define BL32_BASE		U(0x44100000)
-#define BL32_LIMIT		(BL32_BASE + U(0x100000))
+#define BL32_LIMIT		(BL32_BASE + U(0x200000))
 #endif
 
 /*******************************************************************************
diff --git a/plat/renesas/common/include/rcar_def.h b/plat/renesas/common/include/rcar_def.h
index 93a65f1..2cd26ed 100644
--- a/plat/renesas/common/include/rcar_def.h
+++ b/plat/renesas/common/include/rcar_def.h
@@ -148,9 +148,13 @@
 #define RCAR_PWRER5		U(0xE61801D4)	/* shutoff/resume error */
 #define RCAR_SYSCISR		U(0xE6180004)	/* Interrupt status     */
 #define RCAR_SYSCISCR		U(0xE6180008)	/* Interrupt stat clear */
+#define RCAR_SYSCEXTMASK	U(0xE61802F8)	/* External Request Mask */
+						/* H3/H3-N, M3 v3.0, M3-N, E3 */
 /* Product register */
 #define RCAR_PRR			U(0xFFF00044)
 #define RCAR_M3_CUT_VER11		U(0x00000010)	/* M3 Ver.1.1/Ver.1.2 */
+#define RCAR_D3_CUT_VER10		U(0x00000000)	/* D3 Ver.1.0 */
+#define RCAR_D3_CUT_VER11		U(0x00000010)	/* D3 Ver.1.1 */
 #define RCAR_MAJOR_MASK			U(0x000000F0)
 #define RCAR_MINOR_MASK			U(0x0000000F)
 #define PRR_PRODUCT_SHIFT		U(8)
diff --git a/plat/renesas/rcar/bl2_plat_setup.c b/plat/renesas/rcar/bl2_plat_setup.c
index 41b2d11..e07b96f 100644
--- a/plat/renesas/rcar/bl2_plat_setup.c
+++ b/plat/renesas/rcar/bl2_plat_setup.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020, Renesas Electronics Corporation. All rights reserved.
+ * Copyright (c) 2018-2021, Renesas Electronics Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -263,9 +263,6 @@
 	if (product == PRR_PRODUCT_H3 && PRR_PRODUCT_20 > cut)
 		goto tlb;
 
-	if (product == PRR_PRODUCT_D3)
-		goto tlb;
-
 	/* Disable MFIS write protection */
 	mmio_write_32(MFISWPCNTR, MFISWPCNTR_PASSWORD | 1);
 
@@ -708,6 +705,7 @@
 		[4] = 0x600000000ULL,
 		[6] = 0x700000000ULL,
 	};
+	uint32_t cut = mmio_read_32(RCAR_PRR) & PRR_CUT_MASK;
 
 	switch (product) {
 	case PRR_PRODUCT_H3:
@@ -733,15 +731,21 @@
 		break;
 
 	case PRR_PRODUCT_M3:
+		if (cut < PRR_PRODUCT_30) {
 #if (RCAR_GEN3_ULCB == 1)
-		/* 2GB(1GBx2 2ch split) */
-		dram_config[1] = 0x40000000ULL;
-		dram_config[5] = 0x40000000ULL;
+			/* 2GB(1GBx2 2ch split) */
+			dram_config[1] = 0x40000000ULL;
+			dram_config[5] = 0x40000000ULL;
 #else
-		/* 4GB(2GBx2 2ch split) */
-		dram_config[1] = 0x80000000ULL;
-		dram_config[5] = 0x80000000ULL;
+			/* 4GB(2GBx2 2ch split) */
+			dram_config[1] = 0x80000000ULL;
+			dram_config[5] = 0x80000000ULL;
 #endif
+		} else {
+			/* 8GB(2GBx4 2ch split) */
+			dram_config[1] = 0x100000000ULL;
+			dram_config[5] = 0x100000000ULL;
+		}
 		break;
 
 	case PRR_PRODUCT_M3N:
@@ -897,6 +901,14 @@
 				str,
 				(reg & RCAR_MINOR_MASK) + RCAR_M3_MINOR_OFFSET);
 		}
+	} else if (product == PRR_PRODUCT_D3) {
+		if (RCAR_D3_CUT_VER10 == (reg & PRR_CUT_MASK)) {
+			NOTICE("BL2: PRR is R-Car %s Ver.1.0\n", str);
+		} else  if (RCAR_D3_CUT_VER11 == (reg & PRR_CUT_MASK)) {
+			NOTICE("BL2: PRR is R-Car %s Ver.1.1\n", str);
+		} else {
+			NOTICE("BL2: PRR is R-Car %s Ver.X.X\n", str);
+		}
 	} else {
 		major = (reg & RCAR_MAJOR_MASK) >> RCAR_MAJOR_SHIFT;
 		major = major + RCAR_MAJOR_OFFSET;
@@ -904,7 +916,7 @@
 		NOTICE("BL2: PRR is R-Car %s Ver.%d.%d\n", str, major, minor);
 	}
 
-	if (product == PRR_PRODUCT_E3) {
+	if (PRR_PRODUCT_E3 == product || PRR_PRODUCT_D3 == product) {
 		reg = mmio_read_32(RCAR_MODEMR);
 		sscg = reg & RCAR_SSCG_MASK;
 		str = sscg == RCAR_SSCG_ENABLE ? sscg_on : sscg_off;
@@ -968,10 +980,6 @@
 		str = boot_emmc25x1;
 		break;
 	case MODEMR_BOOT_DEV_EMMC_50X8:
-#if RCAR_LSI == RCAR_D3
-		ERROR("BL2: Failed to Initialize. eMMC is not supported.\n");
-		panic();
-#endif
 		str = boot_emmc50x8;
 		break;
 	default:
diff --git a/plat/st/stm32mp1/stm32mp1_def.h b/plat/st/stm32mp1/stm32mp1_def.h
index 0d36660..469c2d9 100644
--- a/plat/st/stm32mp1/stm32mp1_def.h
+++ b/plat/st/stm32mp1/stm32mp1_def.h
@@ -205,6 +205,8 @@
 #define DEBUG_UART_TX_CLKSRC		RCC_UART24CKSELR_HSI
 #define DEBUG_UART_TX_EN_REG		RCC_MP_APB1ENSETR
 #define DEBUG_UART_TX_EN		RCC_MP_APB1ENSETR_UART4EN
+#define DEBUG_UART_RST_REG		RCC_APB1RSTSETR
+#define DEBUG_UART_RST_BIT		RCC_APB1RSTSETR_UART4RST
 
 /*******************************************************************************
  * STM32MP1 ETZPC
diff --git a/plat/st/stm32mp1/stm32mp1_helper.S b/plat/st/stm32mp1/stm32mp1_helper.S
index 84e9e8d..cac9752 100644
--- a/plat/st/stm32mp1/stm32mp1_helper.S
+++ b/plat/st/stm32mp1/stm32mp1_helper.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -148,6 +148,19 @@
 	 * ---------------------------------------------
 	 */
 func plat_crash_console_init
+	/* Reset UART peripheral */
+	ldr	r1, =(RCC_BASE + DEBUG_UART_RST_REG)
+	ldr	r2, =DEBUG_UART_RST_BIT
+	str	r2, [r1]
+1:
+	ldr	r0, [r1]
+	ands	r2, r0, r2
+	beq	1b
+	str	r2, [r1, #4] /* RSTCLR register */
+2:
+	ldr	r0, [r1]
+	ands	r2, r0, r2
+	bne	2b
 	/* Enable GPIOs for UART TX */
 	ldr	r1, =(RCC_BASE + DEBUG_UART_TX_GPIO_BANK_CLK_REG)
 	ldr	r2, [r1]
diff --git a/services/arm_arch_svc/arm_arch_svc_setup.c b/services/arm_arch_svc/arm_arch_svc_setup.c
index 37bfc62..1d4423c 100644
--- a/services/arm_arch_svc/arm_arch_svc_setup.c
+++ b/services/arm_arch_svc/arm_arch_svc_setup.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -11,9 +11,19 @@
 #include <lib/cpus/wa_cve_2018_3639.h>
 #include <lib/smccc.h>
 #include <services/arm_arch_svc.h>
+#include <services/rmi_svc.h>
+#include <services/rmmd_svc.h>
 #include <smccc_helpers.h>
 #include <plat/common/platform.h>
 
+#if ENABLE_RME
+/* Setup Arm architecture Services */
+static int32_t arm_arch_svc_setup(void)
+{
+	return rmmd_setup();
+}
+#endif
+
 static int32_t smccc_version(void)
 {
 	return MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION);
@@ -133,6 +143,16 @@
 		SMC_RET0(handle);
 #endif
 	default:
+#if ENABLE_RME
+		/*
+		 * RMI functions are allocated from the Arch service range. Call
+		 * the RMM dispatcher to handle RMI calls.
+		 */
+		if (is_rmi_fid(smc_fid)) {
+			return rmmd_rmi_handler(smc_fid, x1, x2, x3, x4, cookie,
+						handle, flags);
+		}
+#endif
 		WARN("Unimplemented Arm Architecture Service Call: 0x%x \n",
 			smc_fid);
 		SMC_RET1(handle, SMC_UNK);
@@ -145,6 +165,10 @@
 		OEN_ARM_START,
 		OEN_ARM_END,
 		SMC_TYPE_FAST,
+#if ENABLE_RME
+		arm_arch_svc_setup,
+#else
 		NULL,
+#endif
 		arm_arch_svc_smc_handler
 );
diff --git a/services/std_svc/rmmd/aarch64/rmmd_helpers.S b/services/std_svc/rmmd/aarch64/rmmd_helpers.S
new file mode 100644
index 0000000..6229baf
--- /dev/null
+++ b/services/std_svc/rmmd/aarch64/rmmd_helpers.S
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "../rmmd_private.h"
+#include <asm_macros.S>
+
+	.global rmmd_rmm_enter
+	.global rmmd_rmm_exit
+
+	/* ---------------------------------------------------------------------
+	 * This function is called with SP_EL0 as stack. Here we stash our EL3
+	 * callee-saved registers on to the stack as a part of saving the C
+	 * runtime and enter the secure payload.
+	 * 'x0' contains a pointer to the memory where the address of the C
+	 *  runtime context is to be saved.
+	 * ---------------------------------------------------------------------
+	 */
+func rmmd_rmm_enter
+	/* Make space for the registers that we're going to save */
+	mov	x3, sp
+	str	x3, [x0, #0]
+	sub	sp, sp, #RMMD_C_RT_CTX_SIZE
+
+	/* Save callee-saved registers on to the stack */
+	stp	x19, x20, [sp, #RMMD_C_RT_CTX_X19]
+	stp	x21, x22, [sp, #RMMD_C_RT_CTX_X21]
+	stp	x23, x24, [sp, #RMMD_C_RT_CTX_X23]
+	stp	x25, x26, [sp, #RMMD_C_RT_CTX_X25]
+	stp	x27, x28, [sp, #RMMD_C_RT_CTX_X27]
+	stp	x29, x30, [sp, #RMMD_C_RT_CTX_X29]
+
+	/* ---------------------------------------------------------------------
+	 * Everything is setup now. el3_exit() will use the secure context to
+	 * restore to the general purpose and EL3 system registers to ERET
+	 * into the secure payload.
+	 * ---------------------------------------------------------------------
+	 */
+	b	el3_exit
+endfunc rmmd_rmm_enter
+
+	/* ---------------------------------------------------------------------
+	 * This function is called with 'x0' pointing to a C runtime context.
+	 * It restores the saved registers and jumps to that runtime with 'x0'
+	 * as the new SP register. This destroys the C runtime context that had
+	 * been built on the stack below the saved context by the caller. Later
+	 * the second parameter 'x1' is passed as a return value to the caller.
+	 * ---------------------------------------------------------------------
+	 */
+func rmmd_rmm_exit
+	/* Restore the previous stack */
+	mov	sp, x0
+
+	/* Restore callee-saved registers on to the stack */
+	ldp	x19, x20, [x0, #(RMMD_C_RT_CTX_X19 - RMMD_C_RT_CTX_SIZE)]
+	ldp	x21, x22, [x0, #(RMMD_C_RT_CTX_X21 - RMMD_C_RT_CTX_SIZE)]
+	ldp	x23, x24, [x0, #(RMMD_C_RT_CTX_X23 - RMMD_C_RT_CTX_SIZE)]
+	ldp	x25, x26, [x0, #(RMMD_C_RT_CTX_X25 - RMMD_C_RT_CTX_SIZE)]
+	ldp	x27, x28, [x0, #(RMMD_C_RT_CTX_X27 - RMMD_C_RT_CTX_SIZE)]
+	ldp	x29, x30, [x0, #(RMMD_C_RT_CTX_X29 - RMMD_C_RT_CTX_SIZE)]
+
+	/* ---------------------------------------------------------------------
+	 * This should take us back to the instruction after the call to the
+	 * last rmmd_rmm_enter().* Place the second parameter to x0
+	 * so that the caller will see it as a return value from the original
+	 * entry call.
+	 * ---------------------------------------------------------------------
+	 */
+	mov	x0, x1
+	ret
+endfunc rmmd_rmm_exit
diff --git a/services/std_svc/rmmd/rmmd.mk b/services/std_svc/rmmd/rmmd.mk
new file mode 100644
index 0000000..bac0a9f
--- /dev/null
+++ b/services/std_svc/rmmd/rmmd.mk
@@ -0,0 +1,18 @@
+#
+# Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${ARCH},aarch64)
+        $(error "Error: RMMD is only supported on aarch64.")
+endif
+
+include services/std_svc/rmmd/trp/trp.mk
+
+RMMD_SOURCES	+=	$(addprefix services/std_svc/rmmd/,	\
+			${ARCH}/rmmd_helpers.S			\
+			rmmd_main.c)
+
+# Let the top-level Makefile know that we intend to include RMM image
+NEED_RMM	:=	yes
diff --git a/services/std_svc/rmmd/rmmd_initial_context.h b/services/std_svc/rmmd/rmmd_initial_context.h
new file mode 100644
index 0000000..d7a743d
--- /dev/null
+++ b/services/std_svc/rmmd/rmmd_initial_context.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RMMD_INITIAL_CONTEXT_H
+#define RMMD_INITIAL_CONTEXT_H
+
+#include <arch.h>
+
+/*
+ * SPSR_EL2
+ *   M=0x9 (0b1001 EL2h)
+ *   M[4]=0
+ *   DAIF=0xF Exceptions masked on entry.
+ *   BTYPE=0  BTI not yet supported.
+ *   SSBS=0   Not yet supported.
+ *   IL=0     Not an illegal exception return.
+ *   SS=0     Not single stepping.
+ *   PAN=1    RMM shouldn't access realm memory.
+ *   UAO=0
+ *   DIT=0
+ *   TCO=0
+ *   NZCV=0
+ */
+#define REALM_SPSR_EL2		(					\
+					SPSR_M_EL2H			| \
+					(0xF << SPSR_DAIF_SHIFT)	| \
+					SPSR_PAN_BIT			\
+				)
+
+#endif /* RMMD_INITIAL_CONTEXT_H */
diff --git a/services/std_svc/rmmd/rmmd_main.c b/services/std_svc/rmmd/rmmd_main.c
new file mode 100644
index 0000000..dacd150
--- /dev/null
+++ b/services/std_svc/rmmd/rmmd_main.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <arch_features.h>
+#include <bl31/bl31.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <context.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/el3_runtime/pubsub.h>
+#include <lib/gpt_rme/gpt_rme.h>
+
+#include <lib/spinlock.h>
+#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/common_def.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+#include <services/gtsi_svc.h>
+#include <services/rmi_svc.h>
+#include <services/rmmd_svc.h>
+#include <smccc_helpers.h>
+#include "rmmd_initial_context.h"
+#include "rmmd_private.h"
+
+/*******************************************************************************
+ * RMM context information.
+ ******************************************************************************/
+rmmd_rmm_context_t rmm_context[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * RMM entry point information. Discovered on the primary core and reused
+ * on secondary cores.
+ ******************************************************************************/
+static entry_point_info_t *rmm_ep_info;
+
+/*******************************************************************************
+ * Static function declaration.
+ ******************************************************************************/
+static int32_t rmm_init(void);
+static uint64_t rmmd_smc_forward(uint32_t smc_fid, uint32_t src_sec_state,
+					uint32_t dst_sec_state, uint64_t x1,
+					uint64_t x2, uint64_t x3, uint64_t x4,
+					void *handle);
+
+/*******************************************************************************
+ * This function takes an RMM context pointer and performs a synchronous entry
+ * into it.
+ ******************************************************************************/
+uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx)
+{
+	uint64_t rc;
+
+	assert(rmm_ctx != NULL);
+
+	cm_set_context(&(rmm_ctx->cpu_ctx), REALM);
+
+	/* Save the current el1/el2 context before loading realm context. */
+	cm_el1_sysregs_context_save(NON_SECURE);
+	cm_el2_sysregs_context_save(NON_SECURE);
+
+	/* Restore the realm context assigned above */
+	cm_el1_sysregs_context_restore(REALM);
+	cm_el2_sysregs_context_restore(REALM);
+	cm_set_next_eret_context(REALM);
+
+	/* Enter RMM */
+	rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx);
+
+	/* Save realm context */
+	cm_el1_sysregs_context_save(REALM);
+	cm_el2_sysregs_context_save(REALM);
+
+	/* Restore the el1/el2 context again. */
+	cm_el1_sysregs_context_restore(NON_SECURE);
+	cm_el2_sysregs_context_restore(NON_SECURE);
+
+	return rc;
+}
+
+/*******************************************************************************
+ * This function returns to the place where rmmd_rmm_sync_entry() was
+ * called originally.
+ ******************************************************************************/
+__dead2 void rmmd_rmm_sync_exit(uint64_t rc)
+{
+	rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
+
+	/* Get context of the RMM in use by this CPU. */
+	assert(cm_get_context(REALM) == &(ctx->cpu_ctx));
+
+	/*
+	 * The RMMD must have initiated the original request through a
+	 * synchronous entry into RMM. Jump back to the original C runtime
+	 * context with the value of rc in x0;
+	 */
+	rmmd_rmm_exit(ctx->c_rt_ctx, rc);
+
+	panic();
+}
+
+static void rmm_el2_context_init(el2_sysregs_t *regs)
+{
+	regs->ctx_regs[CTX_SPSR_EL2 >> 3] = REALM_SPSR_EL2;
+	regs->ctx_regs[CTX_SCTLR_EL2 >> 3] = SCTLR_EL2_RES1;
+}
+
+/*******************************************************************************
+ * Jump to the RMM for the first time.
+ ******************************************************************************/
+static int32_t rmm_init(void)
+{
+
+	uint64_t rc;
+
+	rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
+
+	INFO("RMM init start.\n");
+	ctx->state = RMM_STATE_RESET;
+
+	/* Initialize RMM EL2 context. */
+	rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
+
+	rc = rmmd_rmm_sync_entry(ctx);
+	if (rc != 0ULL) {
+		ERROR("RMM initialisation failed 0x%llx\n", rc);
+		panic();
+	}
+
+	ctx->state = RMM_STATE_IDLE;
+	INFO("RMM init end.\n");
+
+	return 1;
+}
+
+/*******************************************************************************
+ * Load and read RMM manifest, setup RMM.
+ ******************************************************************************/
+int rmmd_setup(void)
+{
+	uint32_t ep_attr;
+	unsigned int linear_id = plat_my_core_pos();
+	rmmd_rmm_context_t *rmm_ctx = &rmm_context[linear_id];
+
+	/* Make sure RME is supported. */
+	assert(get_armv9_2_feat_rme_support() != 0U);
+
+	rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM);
+	if (rmm_ep_info == NULL) {
+		WARN("No RMM image provided by BL2 boot loader, Booting "
+		     "device without RMM initialization. SMCs destined for "
+		     "RMM will return SMC_UNK\n");
+		return -ENOENT;
+	}
+
+	/* Under no circumstances will this parameter be 0 */
+	assert(rmm_ep_info->pc == RMM_BASE);
+
+	/* Initialise an entrypoint to set up the CPU context */
+	ep_attr = EP_REALM;
+	if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) {
+		ep_attr |= EP_EE_BIG;
+	}
+
+	SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr);
+	rmm_ep_info->spsr = SPSR_64(MODE_EL2,
+					MODE_SP_ELX,
+					DISABLE_ALL_EXCEPTIONS);
+
+	/* Initialise RMM context with this entry point information */
+	cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info);
+
+	INFO("RMM setup done.\n");
+
+	/* Register init function for deferred init.  */
+	bl31_register_rmm_init(&rmm_init);
+
+	return 0;
+}
+
+/*******************************************************************************
+ * Forward SMC to the other security state
+ ******************************************************************************/
+static uint64_t	rmmd_smc_forward(uint32_t smc_fid, uint32_t src_sec_state,
+					uint32_t dst_sec_state, uint64_t x1,
+					uint64_t x2, uint64_t x3, uint64_t x4,
+					void *handle)
+{
+	/* Save incoming security state */
+	cm_el1_sysregs_context_save(src_sec_state);
+	cm_el2_sysregs_context_save(src_sec_state);
+
+	/* Restore outgoing security state */
+	cm_el1_sysregs_context_restore(dst_sec_state);
+	cm_el2_sysregs_context_restore(dst_sec_state);
+	cm_set_next_eret_context(dst_sec_state);
+
+	SMC_RET8(cm_get_context(dst_sec_state), smc_fid, x1, x2, x3, x4,
+			SMC_GET_GP(handle, CTX_GPREG_X5),
+			SMC_GET_GP(handle, CTX_GPREG_X6),
+			SMC_GET_GP(handle, CTX_GPREG_X7));
+}
+
+/*******************************************************************************
+ * This function handles all SMCs in the range reserved for RMI. Each call is
+ * either forwarded to the other security state or handled by the RMM dispatcher
+ ******************************************************************************/
+uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
+				uint64_t x3, uint64_t x4, void *cookie,
+				void *handle, uint64_t flags)
+{
+	rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
+	uint32_t src_sec_state;
+
+	/* Determine which security state this SMC originated from */
+	src_sec_state = caller_sec_state(flags);
+
+	/* RMI must not be invoked by the Secure world */
+	if (src_sec_state == SMC_FROM_SECURE) {
+		WARN("RMM: RMI invoked by secure world.\n");
+		SMC_RET1(handle, SMC_UNK);
+	}
+
+	/*
+	 * Forward an RMI call from the Normal world to the Realm world as it
+	 * is.
+	 */
+	if (src_sec_state == SMC_FROM_NON_SECURE) {
+		VERBOSE("RMM: RMI call from non-secure world.\n");
+		return rmmd_smc_forward(smc_fid, NON_SECURE, REALM,
+					x1, x2, x3, x4, handle);
+	}
+
+	assert(src_sec_state == SMC_FROM_REALM);
+
+	switch (smc_fid) {
+	case RMI_RMM_REQ_COMPLETE:
+		if (ctx->state == RMM_STATE_RESET) {
+			VERBOSE("RMM: running rmmd_rmm_sync_exit\n");
+			rmmd_rmm_sync_exit(x1);
+		}
+
+		return rmmd_smc_forward(x1, REALM, NON_SECURE,
+					x2, x3, x4, 0, handle);
+
+	default:
+		WARN("RMM: Unsupported RMM call 0x%08x\n", smc_fid);
+		SMC_RET1(handle, SMC_UNK);
+	}
+}
+
+/*******************************************************************************
+ * This cpu has been turned on. Enter RMM to initialise R-EL2.  Entry into RMM
+ * is done after initialising minimal architectural state that guarantees safe
+ * execution.
+ ******************************************************************************/
+static void *rmmd_cpu_on_finish_handler(const void *arg)
+{
+	int32_t rc;
+	uint32_t linear_id = plat_my_core_pos();
+	rmmd_rmm_context_t *ctx = &rmm_context[linear_id];
+
+	ctx->state = RMM_STATE_RESET;
+
+	/* Initialise RMM context with this entry point information */
+	cm_setup_context(&ctx->cpu_ctx, rmm_ep_info);
+
+	/* Initialize RMM EL2 context. */
+	rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
+
+	rc = rmmd_rmm_sync_entry(ctx);
+	if (rc != 0) {
+		ERROR("RMM initialisation failed (%d) on CPU%d\n", rc,
+		      linear_id);
+		panic();
+	}
+
+	ctx->state = RMM_STATE_IDLE;
+	return NULL;
+}
+
+/* Subscribe to PSCI CPU on to initialize RMM on secondary */
+SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler);
+
+static int gtsi_transition_granule(uint64_t pa,
+					unsigned int src_sec_state,
+					unsigned int target_pas)
+{
+	int ret;
+
+	ret = gpt_transition_pas(pa, PAGE_SIZE_4KB, src_sec_state, target_pas);
+
+	/* Convert TF-A error codes into GTSI error codes */
+	if (ret == -EINVAL) {
+		ERROR("[GTSI] Transition failed: invalid %s\n", "address");
+		ERROR("       PA: 0x%llx, SRC: %d, PAS: %d\n", pa,
+		      src_sec_state, target_pas);
+		ret = GRAN_TRANS_RET_BAD_ADDR;
+	} else if (ret == -EPERM) {
+		ERROR("[GTSI] Transition failed: invalid %s\n", "caller/PAS");
+		ERROR("       PA: 0x%llx, SRC: %d, PAS: %d\n", pa,
+		      src_sec_state, target_pas);
+		ret = GRAN_TRANS_RET_BAD_PAS;
+	}
+
+	return ret;
+}
+
+/*******************************************************************************
+ * This function handles all SMCs in the range reserved for GTF.
+ ******************************************************************************/
+uint64_t rmmd_gtsi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
+				uint64_t x3, uint64_t x4, void *cookie,
+				void *handle, uint64_t flags)
+{
+	uint32_t src_sec_state;
+
+	/* Determine which security state this SMC originated from */
+	src_sec_state = caller_sec_state(flags);
+
+	if (src_sec_state != SMC_FROM_REALM) {
+		WARN("RMM: GTF call originated from secure or normal world\n");
+		SMC_RET1(handle, SMC_UNK);
+	}
+
+	switch (smc_fid) {
+	case SMC_ASC_MARK_REALM:
+		SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM,
+								GPT_GPI_REALM));
+	case SMC_ASC_MARK_NONSECURE:
+		SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM,
+								GPT_GPI_NS));
+	default:
+		WARN("RMM: Unsupported GTF call 0x%08x\n", smc_fid);
+		SMC_RET1(handle, SMC_UNK);
+	}
+}
diff --git a/services/std_svc/rmmd/rmmd_private.h b/services/std_svc/rmmd/rmmd_private.h
new file mode 100644
index 0000000..d170bcd
--- /dev/null
+++ b/services/std_svc/rmmd/rmmd_private.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RMMD_PRIVATE_H
+#define RMMD_PRIVATE_H
+
+#include <context.h>
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define RMMD_C_RT_CTX_X19		0x0
+#define RMMD_C_RT_CTX_X20		0x8
+#define RMMD_C_RT_CTX_X21		0x10
+#define RMMD_C_RT_CTX_X22		0x18
+#define RMMD_C_RT_CTX_X23		0x20
+#define RMMD_C_RT_CTX_X24		0x28
+#define RMMD_C_RT_CTX_X25		0x30
+#define RMMD_C_RT_CTX_X26		0x38
+#define RMMD_C_RT_CTX_X27		0x40
+#define RMMD_C_RT_CTX_X28		0x48
+#define RMMD_C_RT_CTX_X29		0x50
+#define RMMD_C_RT_CTX_X30		0x58
+
+#define RMMD_C_RT_CTX_SIZE		0x60
+#define RMMD_C_RT_CTX_ENTRIES		(RMMD_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+#ifndef __ASSEMBLER__
+#include <stdint.h>
+#include <services/rmi_svc.h>
+
+typedef enum rmm_state {
+	RMM_STATE_RESET = 0,
+	RMM_STATE_IDLE
+} rmm_state_t;
+
+/*
+ * Data structure used by the RMM dispatcher (RMMD) in EL3 to track context of
+ * the RMM at R-EL2.
+ */
+typedef struct rmmd_rmm_context {
+	uint64_t c_rt_ctx;
+	cpu_context_t cpu_ctx;
+	rmm_state_t state;
+} rmmd_rmm_context_t;
+
+/* Functions used to enter/exit the RMM synchronously */
+uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *ctx);
+__dead2 void rmmd_rmm_sync_exit(uint64_t rc);
+
+/* Assembly helpers */
+uint64_t rmmd_rmm_enter(uint64_t *c_rt_ctx);
+void __dead2 rmmd_rmm_exit(uint64_t c_rt_ctx, uint64_t ret);
+
+/* Reference to PM ops for the RMMD */
+extern const spd_pm_ops_t rmmd_pm;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* RMMD_PRIVATE_H */
diff --git a/services/std_svc/rmmd/trp/linker.lds b/services/std_svc/rmmd/trp/linker.lds
new file mode 100644
index 0000000..2b7f383
--- /dev/null
+++ b/services/std_svc/rmmd/trp/linker.lds
@@ -0,0 +1,71 @@
+/*
+ * (C) COPYRIGHT 2021 Arm Limited or its affiliates.
+ * ALL RIGHTS RESERVED
+ */
+
+#include <common/bl_common.ld.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+
+/* Mapped using 4K pages, requires us to align different sections with
+ * different property at the same granularity. */
+PAGE_SIZE_4K = 4096;
+
+OUTPUT_FORMAT("elf64-littleaarch64")
+OUTPUT_ARCH(aarch64)
+ENTRY(trp_head)
+
+MEMORY {
+	RAM (rwx): ORIGIN = RMM_BASE, LENGTH = RMM_LIMIT - RMM_BASE
+}
+
+
+SECTIONS
+{
+	. = RMM_BASE;
+
+	.text : {
+		*(.head.text)
+		. = ALIGN(8);
+		*(.text*)
+	} >RAM
+
+	. = ALIGN(PAGE_SIZE_4K);
+
+	.rodata : {
+		*(.rodata*)
+	} >RAM
+
+	. = ALIGN(PAGE_SIZE_4K);
+
+	 __RW_START__ = . ;
+
+	.data : {
+		*(.data*)
+	} >RAM
+
+	.bss (NOLOAD) : {
+		__BSS_START__ = .;
+		*(.bss*)
+		__BSS_END__ = .;
+	} >RAM
+	__BSS_SIZE__ = SIZEOF(.bss);
+
+
+	STACK_SECTION >RAM
+
+
+	/*
+	* Define a linker symbol to mark the end of the RW memory area for this
+	* image.
+	*/
+	__RW_END__ = .;
+	__RMM_END__ = .;
+
+
+	/DISCARD/ : { *(.dynstr*) }
+	/DISCARD/ : { *(.dynamic*) }
+	/DISCARD/ : { *(.plt*) }
+	/DISCARD/ : { *(.interp*) }
+	/DISCARD/ : { *(.gnu*) }
+	/DISCARD/ : { *(.note*) }
+}
diff --git a/services/std_svc/rmmd/trp/trp.mk b/services/std_svc/rmmd/trp/trp.mk
new file mode 100644
index 0000000..a4f6e03
--- /dev/null
+++ b/services/std_svc/rmmd/trp/trp.mk
@@ -0,0 +1,20 @@
+#
+# Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+RMM_SOURCES		+=	services/std_svc/rmmd/trp/trp_entry.S	\
+				services/std_svc/rmmd/trp/trp_main.c
+
+RMM_LINKERFILE		:=	services/std_svc/rmmd/trp/linker.lds
+
+# Include the platform-specific TRP Makefile
+# If no platform-specific TRP Makefile exists, it means TRP is not supported
+# on this platform.
+TRP_PLAT_MAKEFILE := $(wildcard ${PLAT_DIR}/trp/trp-${PLAT}.mk)
+ifeq (,${TRP_PLAT_MAKEFILE})
+  $(error TRP is not supported on platform ${PLAT})
+else
+  include ${TRP_PLAT_MAKEFILE}
+endif
diff --git a/services/std_svc/rmmd/trp/trp_entry.S b/services/std_svc/rmmd/trp/trp_entry.S
new file mode 100644
index 0000000..23b48fb
--- /dev/null
+++ b/services/std_svc/rmmd/trp/trp_entry.S
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <services/gtsi_svc.h>
+#include <services/rmi_svc.h>
+#include "trp_private.h"
+
+.global trp_head
+.global trp_smc
+
+.section ".head.text", "ax"
+
+	/* ---------------------------------------------
+	 * Populate the params in x0-x7 from the pointer
+	 * to the smc args structure in x0.
+	 * ---------------------------------------------
+	 */
+	.macro restore_args_call_smc
+	ldp	x6, x7, [x0, #TRP_ARG6]
+	ldp	x4, x5, [x0, #TRP_ARG4]
+	ldp	x2, x3, [x0, #TRP_ARG2]
+	ldp	x0, x1, [x0, #TRP_ARG0]
+	smc	#0
+	.endm
+
+	/* ---------------------------------------------
+	 * Entry point for TRP
+	 * ---------------------------------------------
+	 */
+trp_head:
+	bl	plat_set_my_stack
+	bl	plat_is_my_cpu_primary
+	cbz	x0, trp_secondary_cpu_entry
+
+	/* ---------------------------------------------
+	 * Zero out BSS section
+	 * ---------------------------------------------
+	 */
+	ldr	x0, =__BSS_START__
+	ldr	x1, =__BSS_SIZE__
+	bl	zeromem
+
+	bl	trp_setup
+
+	bl	trp_main
+trp_secondary_cpu_entry:
+	mov_imm	x0, RMI_RMM_REQ_COMPLETE
+	mov	x1, xzr
+	smc	#0
+	b	trp_handler
+
+	/* ---------------------------------------------
+	 *   Direct SMC call to BL31 service provided by
+	 *   RMM Dispatcher
+	 * ---------------------------------------------
+	 */
+func trp_smc
+	restore_args_call_smc
+	ret
+endfunc trp_smc
+
+	/* ---------------------------------------------
+	 * RMI call handler
+	 * ---------------------------------------------
+	 */
+func trp_handler
+	bl	trp_rmi_handler
+	restore_args_call_smc
+	b	trp_handler
+endfunc trp_handler
diff --git a/services/std_svc/rmmd/trp/trp_main.c b/services/std_svc/rmmd/trp/trp_main.c
new file mode 100644
index 0000000..2ab9ecc
--- /dev/null
+++ b/services/std_svc/rmmd/trp/trp_main.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <common/debug.h>
+#include <plat/common/platform.h>
+#include <services/gtsi_svc.h>
+#include <services/rmi_svc.h>
+#include <services/trp/platform_trp.h>
+
+#include <platform_def.h>
+#include "trp_private.h"
+
+/*******************************************************************************
+ * Per cpu data structure to populate parameters for an SMC in C code and use
+ * a pointer to this structure in assembler code to populate x0-x7
+ ******************************************************************************/
+static trp_args_t trp_smc_args[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * Set the arguments for SMC call
+ ******************************************************************************/
+static trp_args_t *set_smc_args(uint64_t arg0,
+				uint64_t arg1,
+				uint64_t arg2,
+				uint64_t arg3,
+				uint64_t arg4,
+				uint64_t arg5,
+				uint64_t arg6,
+				uint64_t arg7)
+{
+	uint32_t linear_id;
+	trp_args_t *pcpu_smc_args;
+
+	/*
+	 * Return to Secure Monitor by raising an SMC. The results of the
+	 * service are passed as an arguments to the SMC
+	 */
+	linear_id = plat_my_core_pos();
+	pcpu_smc_args = &trp_smc_args[linear_id];
+	write_trp_arg(pcpu_smc_args, TRP_ARG0, arg0);
+	write_trp_arg(pcpu_smc_args, TRP_ARG1, arg1);
+	write_trp_arg(pcpu_smc_args, TRP_ARG2, arg2);
+	write_trp_arg(pcpu_smc_args, TRP_ARG3, arg3);
+	write_trp_arg(pcpu_smc_args, TRP_ARG4, arg4);
+	write_trp_arg(pcpu_smc_args, TRP_ARG5, arg5);
+	write_trp_arg(pcpu_smc_args, TRP_ARG6, arg6);
+	write_trp_arg(pcpu_smc_args, TRP_ARG7, arg7);
+
+	return pcpu_smc_args;
+}
+
+/*******************************************************************************
+ * Setup function for TRP.
+ ******************************************************************************/
+void trp_setup(void)
+{
+	/* Perform early platform-specific setup */
+	trp_early_platform_setup();
+}
+
+/* Main function for TRP */
+void trp_main(void)
+{
+	NOTICE("TRP: %s\n", version_string);
+	NOTICE("TRP: %s\n", build_message);
+	INFO("TRP: Memory base : 0x%lx\n", (unsigned long)RMM_BASE);
+	INFO("TRP: Total size : 0x%lx bytes\n", (unsigned long)(RMM_END
+								- RMM_BASE));
+}
+
+/*******************************************************************************
+ * Returning RMI version back to Normal World
+ ******************************************************************************/
+static trp_args_t *trp_ret_rmi_version(void)
+{
+	VERBOSE("RMM version is %u.%u\n", RMI_ABI_VERSION_MAJOR,
+					  RMI_ABI_VERSION_MINOR);
+	return set_smc_args(RMI_RMM_REQ_COMPLETE, RMI_ABI_VERSION,
+			    0, 0, 0, 0, 0, 0);
+}
+
+/*******************************************************************************
+ * Transitioning granule of NON-SECURE type to REALM type
+ ******************************************************************************/
+static trp_args_t *trp_asc_mark_realm(unsigned long long x1)
+{
+	unsigned long long ret;
+
+	VERBOSE("Delegating granule 0x%llx\n", x1);
+	ret = trp_smc(set_smc_args(SMC_ASC_MARK_REALM, x1, 0, 0, 0, 0, 0, 0));
+
+	if (ret != 0ULL) {
+		ERROR("Granule transition from NON-SECURE type to REALM type "
+			"failed 0x%llx\n", ret);
+	}
+	return set_smc_args(RMI_RMM_REQ_COMPLETE, ret, 0, 0, 0, 0, 0, 0);
+}
+
+/*******************************************************************************
+ * Transitioning granule of REALM type to NON-SECURE type
+ ******************************************************************************/
+static trp_args_t *trp_asc_mark_nonsecure(unsigned long long x1)
+{
+	unsigned long long ret;
+
+	VERBOSE("Undelegating granule 0x%llx\n", x1);
+	ret = trp_smc(set_smc_args(SMC_ASC_MARK_NONSECURE, x1, 0, 0, 0, 0, 0, 0));
+
+	if (ret != 0ULL) {
+		ERROR("Granule transition from REALM type to NON-SECURE type "
+			"failed 0x%llx\n", ret);
+	}
+	return set_smc_args(RMI_RMM_REQ_COMPLETE, ret, 0, 0, 0, 0, 0, 0);
+}
+
+/*******************************************************************************
+ * Main RMI SMC handler function
+ ******************************************************************************/
+trp_args_t *trp_rmi_handler(unsigned long fid, unsigned long long x1)
+{
+	switch (fid) {
+	case RMI_RMM_REQ_VERSION:
+		return trp_ret_rmi_version();
+	case RMI_RMM_GRANULE_DELEGATE:
+		return trp_asc_mark_realm(x1);
+	case RMI_RMM_GRANULE_UNDELEGATE:
+		return trp_asc_mark_nonsecure(x1);
+	default:
+		ERROR("Invalid SMC code to %s, FID %lu\n", __func__, fid);
+	}
+	return set_smc_args(SMC_UNK, 0, 0, 0, 0, 0, 0, 0);
+}
diff --git a/services/std_svc/rmmd/trp/trp_private.h b/services/std_svc/rmmd/trp/trp_private.h
new file mode 100644
index 0000000..9231390
--- /dev/null
+++ b/services/std_svc/rmmd/trp/trp_private.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TRP_PRIVATE_H
+#define TRP_PRIVATE_H
+
+/* Definitions to help the assembler access the SMC/ERET args structure */
+#define TRP_ARGS_SIZE		TRP_ARGS_END
+#define TRP_ARG0		0x0
+#define TRP_ARG1		0x8
+#define TRP_ARG2		0x10
+#define TRP_ARG3		0x18
+#define TRP_ARG4		0x20
+#define TRP_ARG5		0x28
+#define TRP_ARG6		0x30
+#define TRP_ARG7		0x38
+#define TRP_ARGS_END		0x40
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+/* Data structure to hold SMC arguments */
+typedef struct trp_args {
+	uint64_t regs[TRP_ARGS_END >> 3];
+} __aligned(CACHE_WRITEBACK_GRANULE) trp_args_t;
+
+#define write_trp_arg(args, offset, val) (((args)->regs[offset >> 3])	\
+					 = val)
+
+/* Definitions for RMI VERSION */
+#define RMI_ABI_VERSION_MAJOR		U(0x0)
+#define RMI_ABI_VERSION_MINOR		U(0x0)
+#define RMI_ABI_VERSION			((RMI_ABI_VERSION_MAJOR << 16) | \
+					RMI_ABI_VERSION_MINOR)
+
+/* Helper to issue SMC calls to BL31 */
+uint64_t trp_smc(trp_args_t *);
+
+/* The main function to executed only by Primary CPU */
+void trp_main(void);
+
+/* Setup TRP. Executed only by Primary CPU */
+void trp_setup(void);
+
+#endif /* __ASSEMBLER__ */
+#endif /* TRP_PRIVATE_H */
diff --git a/services/std_svc/std_svc_setup.c b/services/std_svc/std_svc_setup.c
index 1917d0a..39db429 100644
--- a/services/std_svc/std_svc_setup.c
+++ b/services/std_svc/std_svc_setup.c
@@ -13,7 +13,9 @@
 #include <lib/pmf/pmf.h>
 #include <lib/psci/psci.h>
 #include <lib/runtime_instr.h>
+#include <services/gtsi_svc.h>
 #include <services/pci_svc.h>
+#include <services/rmmd_svc.h>
 #include <services/sdei.h>
 #include <services/spm_mm_svc.h>
 #include <services/spmd_svc.h>
@@ -158,6 +160,16 @@
 				flags);
 	}
 #endif
+#if ENABLE_RME
+	/*
+	 * Granule transition service interface functions (GTSI) are allocated
+	 * from the Std service range. Call the RMM dispatcher to handle calls.
+	 */
+	if (is_gtsi_fid(smc_fid)) {
+		return rmmd_gtsi_handler(smc_fid, x1, x2, x3, x4, cookie,
+						handle, flags);
+	}
+#endif
 
 #if SMC_PCI_SUPPORT
 	if (is_pci_fid(smc_fid)) {
diff --git a/tools/fiptool/tbbr_config.c b/tools/fiptool/tbbr_config.c
index c1e5217..4998bb2 100644
--- a/tools/fiptool/tbbr_config.c
+++ b/tools/fiptool/tbbr_config.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -67,6 +67,11 @@
 		.uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
 		.cmdline_name = "nt-fw"
 	},
+	{
+		.name = "Realm Monitor Management Firmware",
+		.uuid = UUID_REALM_MONITOR_MGMT_FIRMWARE,
+		.cmdline_name = "rmm-fw"
+	},
 	/* Dynamic Configs */
 	{
 		.name = "FW_CONFIG",
diff --git a/tools/renesas/rcar_layout_create/sa6.c b/tools/renesas/rcar_layout_create/sa6.c
index fa828b9..8fafdad 100644
--- a/tools/renesas/rcar_layout_create/sa6.c
+++ b/tools/renesas/rcar_layout_create/sa6.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, Renesas Electronics Corporation. All rights reserved.
+ * Copyright (c) 2015-2021, Renesas Electronics Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -96,7 +96,7 @@
 #define RCAR_BL32DST_ADDRESS		(0x44100000U)
 #define RCAR_BL32DST_ADDRESSH		(0x00000000U)
 /* Destination size for BL32 */
-#define RCAR_BL32DST_SIZE		(0x00040000U)
+#define RCAR_BL32DST_SIZE		(0x00080000U)
 /* Destination address for BL33 */
 #define RCAR_BL33DST_ADDRESS		(0x50000000U)
 #define RCAR_BL33DST_ADDRESSH		(0x00000000U)
diff --git a/tools/sptool/sp_mk_generator.py b/tools/sptool/sp_mk_generator.py
index a37e702..f983ff3 100755
--- a/tools/sptool/sp_mk_generator.py
+++ b/tools/sptool/sp_mk_generator.py
@@ -1,5 +1,5 @@
 #!/usr/bin/python3
-# Copyright (c) 2020, Arm Limited. All rights reserved.
+# Copyright (c) 2020-2021, Arm Limited. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 
@@ -110,24 +110,36 @@
         Extract uuid from partition manifest
         """
         pm_file = open(dts)
-        uuid_key = "uuid"
-
         for line in pm_file:
-            if uuid_key in line:
-                uuid_hex = re.findall(r'\<(.+?)\>', line)[0];
+            if "uuid" in line:
+                # re.findall returns a list of string tuples.
+                # uuid_hex is the first item in this list representing the four
+                # uuid hex integers from the manifest uuid field. The heading
+                # '0x' of the hexadecimal representation is stripped out.
+                # e.g. uuid = <0x1e67b5b4 0xe14f904a 0x13fb1fb8 0xcbdae1da>;
+                # uuid_hex = ('1e67b5b4', 'e14f904a', '13fb1fb8', 'cbdae1da')
+                uuid_hex = re.findall(r'0x([0-9a-f]+) 0x([0-9a-f]+) 0x([0-9a-f]+) 0x([0-9a-f]+)', line)[0];
+
+        # uuid_hex is a list of four hex string values
+        if len(uuid_hex) != 4:
+            print("ERROR: malformed UUID")
+            exit(-1)
 
-        # PM has uuid in format 0xABC... 0x... 0x... 0x...
-        # Get rid of '0x' and spaces and convert to string of hex digits
-        uuid_hex = uuid_hex.replace('0x','').replace(' ','')
-        # make UUID from a string of hex digits
-        uuid_std = uuid.UUID(uuid_hex)
-        # convert UUID to a string of hex digits in standard form
-        uuid_std = str(uuid_std)
+        # The uuid field in SP manifest is the little endian representation
+        # mapped to arguments as described in SMCCC section 5.3.
+        # Convert each unsigned integer value to a big endian representation
+        # required by fiptool.
+        y=list(map(bytearray.fromhex, uuid_hex))
+        z=(int.from_bytes(y[0], byteorder='little', signed=False),
+        int.from_bytes(y[1], byteorder='little', signed=False),
+        int.from_bytes(y[2], byteorder='little', signed=False),
+        int.from_bytes(y[3], byteorder='little', signed=False))
+        uuid_std = uuid.UUID(f'{z[0]:04x}{z[1]:04x}{z[2]:04x}{z[3]:04x}')
 
         """
         Append FIP_ARGS
         """
-        out_file.write("FIP_ARGS += --blob uuid=" + uuid_std + ",file=" + dst + "\n")
+        out_file.write("FIP_ARGS += --blob uuid=" + str(uuid_std) + ",file=" + dst + "\n")
 
         """
         Append CRT_ARGS