Merge "fix(spm): add device-regions used in tf-a-tests" into integration
diff --git a/include/arch/aarch64/arch_helpers.h b/include/arch/aarch64/arch_helpers.h
index da7b162..ab32b18 100644
--- a/include/arch/aarch64/arch_helpers.h
+++ b/include/arch/aarch64/arch_helpers.h
@@ -712,16 +712,88 @@
 }
 
 /*
- * TLBIPAALLOS instruction
- * (TLB Inivalidate GPT Information by PA,
- * All Entries, Outer Shareable)
+ * TLBI PAALLOS instruction
+ * (TLB Invalidate GPT Information by PA, All Entries, Outer Shareable)
  */
 static inline void tlbipaallos(void)
 {
-	__asm__("SYS #6,c8,c1,#4");
+	__asm__("sys #6, c8, c1, #4");
 }
 
 /*
+ * TLBI RPALOS instructions
+ * (TLB Range Invalidate GPT Information by PA, Last level, Outer Shareable)
+ *
+ * command SIZE, bits [47:44] field:
+ * 0b0000	4KB
+ * 0b0001	16KB
+ * 0b0010	64KB
+ * 0b0011	2MB
+ * 0b0100	32MB
+ * 0b0101	512MB
+ * 0b0110	1GB
+ * 0b0111	16GB
+ * 0b1000	64GB
+ * 0b1001	512GB
+ */
+#define TLBI_SZ_4K		0UL
+#define TLBI_SZ_16K		1UL
+#define TLBI_SZ_64K		2UL
+#define TLBI_SZ_2M		3UL
+#define TLBI_SZ_32M		4UL
+#define TLBI_SZ_512M		5UL
+#define TLBI_SZ_1G		6UL
+#define TLBI_SZ_16G		7UL
+#define TLBI_SZ_64G		8UL
+#define TLBI_SZ_512G		9UL
+
+#define	TLBI_ADDR_SHIFT		U(12)
+#define	TLBI_SIZE_SHIFT		U(44)
+
+#define TLBIRPALOS(_addr, _size)				\
+{								\
+	u_register_t arg = ((_addr) >> TLBI_ADDR_SHIFT) |	\
+			   ((_size) << TLBI_SIZE_SHIFT);	\
+	__asm__("sys #6, c8, c4, #7, %0" : : "r" (arg));	\
+}
+
+/* Note: addr must be aligned to 4KB */
+static inline void tlbirpalos_4k(uintptr_t addr)
+{
+	TLBIRPALOS(addr, TLBI_SZ_4K);
+}
+
+/* Note: addr must be aligned to 16KB */
+static inline void tlbirpalos_16k(uintptr_t addr)
+{
+	TLBIRPALOS(addr, TLBI_SZ_16K);
+}
+
+/* Note: addr must be aligned to 64KB */
+static inline void tlbirpalos_64k(uintptr_t addr)
+{
+	TLBIRPALOS(addr, TLBI_SZ_64K);
+}
+
+/* Note: addr must be aligned to 2MB */
+static inline void tlbirpalos_2m(uintptr_t addr)
+{
+	TLBIRPALOS(addr, TLBI_SZ_2M);
+}
+
+/* Note: addr must be aligned to 32MB */
+static inline void tlbirpalos_32m(uintptr_t addr)
+{
+	TLBIRPALOS(addr, TLBI_SZ_32M);
+}
+
+/* Note: addr must be aligned to 512MB */
+static inline void tlbirpalos_512m(uintptr_t addr)
+{
+	TLBIRPALOS(addr, TLBI_SZ_512M);
+}
+
+/*
  * Invalidate TLBs of GPT entries by Physical address, last level.
  *
  * @pa: the starting address for the range
@@ -730,7 +802,6 @@
  */
 void gpt_tlbi_by_pa_ll(uint64_t pa, size_t size);
 
-
 /* Previously defined accessor functions with incomplete register names  */
 
 #define read_current_el()	read_CurrentEl()
diff --git a/include/lib/spinlock.h b/include/lib/spinlock.h
index 9fd3fc6..055a911 100644
--- a/include/lib/spinlock.h
+++ b/include/lib/spinlock.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2018, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -15,15 +15,21 @@
 	volatile uint32_t lock;
 } spinlock_t;
 
+typedef struct bitlock {
+	volatile uint8_t lock;
+} bitlock_t;
+
 void spin_lock(spinlock_t *lock);
 void spin_unlock(spinlock_t *lock);
 
+void bit_lock(bitlock_t *lock, uint8_t mask);
+void bit_unlock(bitlock_t *lock, uint8_t mask);
+
 #else
 
 /* Spin lock definitions for use in assembly */
 #define SPINLOCK_ASM_ALIGN	2
 #define SPINLOCK_ASM_SIZE	4
 
-#endif
-
+#endif /* __ASSEMBLER__ */
 #endif /* SPINLOCK_H */
diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c
index e96156d..301c60c 100644
--- a/lib/el3_runtime/aarch32/context_mgmt.c
+++ b/lib/el3_runtime/aarch32/context_mgmt.c
@@ -149,11 +149,9 @@
 		trf_init_el3();
 	}
 
-	/*
-	 * Also applies to PMU < v3. The PMU is only disabled for EL3 and Secure
-	 * state execution. This does not affect lower NS ELs.
-	 */
-	pmuv3_init_el3();
+	if (read_feat_pmuv3_id_field() >= 3U) {
+		pmuv3_init_el3();
+	}
 #endif /*  IMAGE_BL32 */
 }
 
diff --git a/lib/extensions/pmuv3/aarch32/pmuv3.c b/lib/extensions/pmuv3/aarch32/pmuv3.c
index effb7e0..456a48e 100644
--- a/lib/extensions/pmuv3/aarch32/pmuv3.c
+++ b/lib/extensions/pmuv3/aarch32/pmuv3.c
@@ -25,10 +25,6 @@
 	return sdcr;
 }
 
-/*
- * Applies to all PMU versions. Name is PMUv3 for compatibility with aarch64 and
- * to not clash with platforms which reuse the PMU name
- */
 void pmuv3_init_el3(void)
 {
 	u_register_t sdcr = read_sdcr();
diff --git a/lib/gpt_rme/gpt_rme.c b/lib/gpt_rme/gpt_rme.c
index 36f7a51..72e905e 100644
--- a/lib/gpt_rme/gpt_rme.c
+++ b/lib/gpt_rme/gpt_rme.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022, Arm Limited. All rights reserved.
+ * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -21,7 +21,7 @@
 #include <lib/xlat_tables/xlat_tables_v2.h>
 
 #if !ENABLE_RME
-#error "ENABLE_RME must be enabled to use the GPT library."
+#error "ENABLE_RME must be enabled to use the GPT library"
 #endif
 
 /*
@@ -58,7 +58,7 @@
 static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P};
 
 /*
- * This structure contains GPT configuration data.
+ * This structure contains GPT configuration data
  */
 typedef struct {
 	uintptr_t plat_gpt_l0_base;
@@ -70,7 +70,7 @@
 
 static gpt_config_t gpt_config;
 
-/* These variables are used during initialization of the L1 tables. */
+/* These variables are used during initialization of the L1 tables */
 static unsigned int gpt_next_l1_tbl_idx;
 static uintptr_t gpt_l1_tbl;
 
@@ -91,7 +91,7 @@
  * Return
  *   true for a valid GPI, false for an invalid one.
  */
-static bool gpt_is_gpi_valid(unsigned int gpi)
+static bool is_gpi_valid(unsigned int gpi)
 {
 	if ((gpi == GPT_GPI_NO_ACCESS) || (gpi == GPT_GPI_ANY) ||
 	    ((gpi >= GPT_GPI_SECURE) && (gpi <= GPT_GPI_REALM))) {
@@ -112,8 +112,8 @@
  * Return
  *   True if PAS regions overlap, false if they do not.
  */
-static bool gpt_check_pas_overlap(uintptr_t base_1, size_t size_1,
-				  uintptr_t base_2, size_t size_2)
+static bool check_pas_overlap(uintptr_t base_1, size_t size_1,
+			      uintptr_t base_2, size_t size_2)
 {
 	if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) {
 		return true;
@@ -133,13 +133,13 @@
  * Return
  *   True if a PAS region occupies the L0 region in question, false if not.
  */
-static bool gpt_does_previous_pas_exist_here(unsigned int l0_idx,
-					     pas_region_t *pas_regions,
-					     unsigned int pas_idx)
+static bool does_previous_pas_exist_here(unsigned int l0_idx,
+					 pas_region_t *pas_regions,
+					 unsigned int pas_idx)
 {
-	/* Iterate over PAS regions up to pas_idx. */
+	/* Iterate over PAS regions up to pas_idx */
 	for (unsigned int i = 0U; i < pas_idx; i++) {
-		if (gpt_check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx),
+		if (check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx),
 		    GPT_L0GPTSZ_ACTUAL_SIZE,
 		    pas_regions[i].base_pa, pas_regions[i].size)) {
 			return true;
@@ -164,8 +164,8 @@
  *   Negative Linux error code in the event of a failure, number of L1 regions
  *   required when successful.
  */
-static int gpt_validate_pas_mappings(pas_region_t *pas_regions,
-				     unsigned int pas_region_cnt)
+static int validate_pas_mappings(pas_region_t *pas_regions,
+				 unsigned int pas_region_cnt)
 {
 	unsigned int idx;
 	unsigned int l1_cnt = 0U;
@@ -176,18 +176,18 @@
 	assert(pas_region_cnt != 0U);
 
 	for (idx = 0U; idx < pas_region_cnt; idx++) {
-		/* Check for arithmetic overflow in region. */
+		/* Check for arithmetic overflow in region */
 		if ((ULONG_MAX - pas_regions[idx].base_pa) <
 		    pas_regions[idx].size) {
-			ERROR("[GPT] Address overflow in PAS[%u]!\n", idx);
+			ERROR("GPT: Address overflow in PAS[%u]!\n", idx);
 			return -EOVERFLOW;
 		}
 
-		/* Initial checks for PAS validity. */
+		/* Initial checks for PAS validity */
 		if (((pas_regions[idx].base_pa + pas_regions[idx].size) >
 		    GPT_PPS_ACTUAL_SIZE(gpt_config.t)) ||
-		    !gpt_is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) {
-			ERROR("[GPT] PAS[%u] is invalid!\n", idx);
+		    !is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) {
+			ERROR("GPT: PAS[%u] is invalid!\n", idx);
 			return -EFAULT;
 		}
 
@@ -196,12 +196,12 @@
 		 * start from idx + 1 instead of 0 since prior PAS mappings will
 		 * have already checked themselves against this one.
 		 */
-		for (unsigned int i = idx + 1; i < pas_region_cnt; i++) {
-			if (gpt_check_pas_overlap(pas_regions[idx].base_pa,
+		for (unsigned int i = idx + 1U; i < pas_region_cnt; i++) {
+			if (check_pas_overlap(pas_regions[idx].base_pa,
 			    pas_regions[idx].size,
 			    pas_regions[i].base_pa,
 			    pas_regions[i].size)) {
-				ERROR("[GPT] PAS[%u] overlaps with PAS[%u]\n",
+				ERROR("GPT: PAS[%u] overlaps with PAS[%u]\n",
 					i, idx);
 				return -EFAULT;
 			}
@@ -214,11 +214,12 @@
 		 * initialized.
 		 */
 		for (unsigned int i = GPT_L0_IDX(pas_regions[idx].base_pa);
-		     i <= GPT_L0_IDX(pas_regions[idx].base_pa + pas_regions[idx].size - 1);
+		     i <= GPT_L0_IDX(pas_regions[idx].base_pa +
+						pas_regions[idx].size - 1UL);
 		     i++) {
 			if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) &&
 			    (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) {
-				/* This descriptor is unused so continue. */
+				/* This descriptor is unused so continue */
 				continue;
 			}
 
@@ -226,18 +227,18 @@
 			 * This descriptor has been initialized in a previous
 			 * call to this function so cannot be initialized again.
 			 */
-			ERROR("[GPT] PAS[%u] overlaps with previous L0[%d]!\n",
+			ERROR("GPT: PAS[%u] overlaps with previous L0[%d]!\n",
 			      idx, i);
 			return -EFAULT;
 		}
 
-		/* Check for block mapping (L0) type. */
+		/* Check for block mapping (L0) type */
 		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
 		    GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
-			/* Make sure base and size are block-aligned. */
+			/* Make sure base and size are block-aligned */
 			if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) ||
 			    !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) {
-				ERROR("[GPT] PAS[%u] is not block-aligned!\n",
+				ERROR("GPT: PAS[%u] is not block-aligned!\n",
 				      idx);
 				return -EFAULT;
 			}
@@ -245,21 +246,21 @@
 			continue;
 		}
 
-		/* Check for granule mapping (L1) type. */
+		/* Check for granule mapping (L1) type */
 		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
 		    GPT_PAS_ATTR_MAP_TYPE_GRANULE) {
-			/* Make sure base and size are granule-aligned. */
+			/* Make sure base and size are granule-aligned */
 			if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) ||
 			    !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) {
-				ERROR("[GPT] PAS[%u] is not granule-aligned!\n",
+				ERROR("GPT: PAS[%u] is not granule-aligned!\n",
 				      idx);
 				return -EFAULT;
 			}
 
-			/* Find how many L1 tables this PAS occupies. */
+			/* Find how many L1 tables this PAS occupies */
 			pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa +
-				     pas_regions[idx].size - 1) -
-				     GPT_L0_IDX(pas_regions[idx].base_pa) + 1);
+				     pas_regions[idx].size - 1UL) -
+				     GPT_L0_IDX(pas_regions[idx].base_pa) + 1U);
 
 			/*
 			 * This creates a situation where, if multiple PAS
@@ -277,26 +278,26 @@
 			 * both for overlap against other PAS.
 			 */
 			if (pas_l1_cnt > 1) {
-				if (gpt_does_previous_pas_exist_here(
+				if (does_previous_pas_exist_here(
 				    GPT_L0_IDX(pas_regions[idx].base_pa +
-				    pas_regions[idx].size - 1),
+				    pas_regions[idx].size - 1UL),
 				    pas_regions, idx)) {
-					pas_l1_cnt = pas_l1_cnt - 1;
+					pas_l1_cnt--;
 				}
 			}
 
-			if (gpt_does_previous_pas_exist_here(
+			if (does_previous_pas_exist_here(
 			    GPT_L0_IDX(pas_regions[idx].base_pa),
 			    pas_regions, idx)) {
-				pas_l1_cnt = pas_l1_cnt - 1;
+				pas_l1_cnt--;
 			}
 
 			l1_cnt += pas_l1_cnt;
 			continue;
 		}
 
-		/* If execution reaches this point, mapping type is invalid. */
-		ERROR("[GPT] PAS[%u] has invalid mapping type 0x%x.\n", idx,
+		/* If execution reaches this point, mapping type is invalid */
+		ERROR("GPT: PAS[%u] has invalid mapping type 0x%x.\n", idx,
 		      GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
 		return -EINVAL;
 	}
@@ -314,8 +315,8 @@
  * Return
  *   Negative Linux error code in the event of a failure, 0 for success.
  */
-static int gpt_validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base,
-				  size_t l0_mem_size)
+static int validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base,
+				size_t l0_mem_size)
 {
 	size_t l0_alignment;
 
@@ -324,27 +325,29 @@
 	 * to work.
 	 */
 	if (pps > GPT_PPS_MAX) {
-		ERROR("[GPT] Invalid PPS: 0x%x\n", pps);
+		ERROR("GPT: Invalid PPS: 0x%x\n", pps);
 		return -EINVAL;
 	}
 	gpt_config.pps = pps;
 	gpt_config.t = gpt_t_lookup[pps];
 
-	/* Alignment must be the greater of 4k or l0 table size. */
+	/* Alignment must be the greater of 4KB or l0 table size */
 	l0_alignment = PAGE_SIZE_4KB;
 	if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) {
 		l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t);
 	}
 
-	/* Check base address. */
-	if ((l0_mem_base == 0U) || ((l0_mem_base & (l0_alignment - 1)) != 0U)) {
-		ERROR("[GPT] Invalid L0 base address: 0x%lx\n", l0_mem_base);
+	/* Check base address */
+	if ((l0_mem_base == 0UL) ||
+	   ((l0_mem_base & (l0_alignment - 1UL)) != 0UL)) {
+		ERROR("GPT: Invalid L0 base address: 0x%lx\n", l0_mem_base);
 		return -EFAULT;
 	}
 
-	/* Check size. */
+	/* Check size */
 	if (l0_mem_size < GPT_L0_TABLE_SIZE(gpt_config.t)) {
-		ERROR("[GPT] Inadequate L0 memory: need 0x%lx, have 0x%lx)\n",
+		ERROR("%sL0%s\n", "GPT: Inadequate ", " memory\n");
+		ERROR("      Expected 0x%lx bytes, got 0x%lx bytes\n",
 		      GPT_L0_TABLE_SIZE(gpt_config.t),
 		      l0_mem_size);
 		return -ENOMEM;
@@ -365,8 +368,8 @@
  * Return
  *   Negative Linux error code in the event of a failure, 0 for success.
  */
-static int gpt_validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size,
-				  unsigned int l1_gpt_cnt)
+static int validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size,
+				unsigned int l1_gpt_cnt)
 {
 	size_t l1_gpt_mem_sz;
 
@@ -376,31 +379,31 @@
 		return -EPERM;
 	}
 
-	/* Make sure L1 tables are aligned to their size. */
-	if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1)) != 0U) {
-		ERROR("[GPT] Unaligned L1 GPT base address: 0x%lx\n",
+	/* Make sure L1 tables are aligned to their size */
+	if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1UL)) != 0UL) {
+		ERROR("GPT: Unaligned L1 GPT base address: 0x%"PRIxPTR"\n",
 		      l1_mem_base);
 		return -EFAULT;
 	}
 
-	/* Get total memory needed for L1 tables. */
+	/* Get total memory needed for L1 tables */
 	l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p);
 
-	/* Check for overflow. */
+	/* Check for overflow */
 	if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) {
-		ERROR("[GPT] Overflow calculating L1 memory size.\n");
+		ERROR("GPT: Overflow calculating L1 memory size\n");
 		return -ENOMEM;
 	}
 
-	/* Make sure enough space was supplied. */
+	/* Make sure enough space was supplied */
 	if (l1_mem_size < l1_gpt_mem_sz) {
-		ERROR("[GPT] Inadequate memory for L1 GPTs. ");
-		ERROR("      Expected 0x%lx bytes. Got 0x%lx bytes\n",
+		ERROR("%sL1 GPTs%s", "GPT: Inadequate ", " memory\n");
+		ERROR("      Expected 0x%lx bytes, got 0x%lx bytes\n",
 		      l1_gpt_mem_sz, l1_mem_size);
 		return -ENOMEM;
 	}
 
-	VERBOSE("[GPT] Requested 0x%lx bytes for L1 GPTs.\n", l1_gpt_mem_sz);
+	VERBOSE("GPT: Requested 0x%lx bytes for L1 GPTs\n", l1_gpt_mem_sz);
 	return 0;
 }
 
@@ -412,7 +415,7 @@
  *   *pas		Pointer to the structure defining the PAS region to
  *			initialize.
  */
-static void gpt_generate_l0_blk_desc(pas_region_t *pas)
+static void generate_l0_blk_desc(pas_region_t *pas)
 {
 	uint64_t gpt_desc;
 	unsigned int end_idx;
@@ -424,7 +427,7 @@
 
 	/*
 	 * Checking of PAS parameters has already been done in
-	 * gpt_validate_pas_mappings so no need to check the same things again.
+	 * validate_pas_mappings so no need to check the same things again.
 	 */
 
 	l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base;
@@ -442,10 +445,10 @@
 	 */
 	end_idx = GPT_L0_IDX(pas->base_pa + pas->size);
 
-	/* Generate the needed block descriptors. */
+	/* Generate the needed block descriptors */
 	for (; idx < end_idx; idx++) {
 		l0_gpt_arr[idx] = gpt_desc;
-		VERBOSE("[GPT] L0 entry (BLOCK) index %u [%p]: GPI = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
+		VERBOSE("GPT: L0 entry (BLOCK) index %u [%p]: GPI = 0x%"PRIx64" (0x%"PRIx64")\n",
 			idx, &l0_gpt_arr[idx],
 			(gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) &
 			GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]);
@@ -465,7 +468,7 @@
  * Return
  *   The PA of the end of the current range.
  */
-static uintptr_t gpt_get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa)
+static uintptr_t get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa)
 {
 	uintptr_t cur_idx;
 	uintptr_t end_idx;
@@ -492,11 +495,11 @@
  *   first		Address of first granule in range.
  *   last		Address of last granule in range (inclusive).
  */
-static void gpt_fill_l1_tbl(uint64_t gpi, uint64_t *l1, uintptr_t first,
+static void fill_l1_tbl(uint64_t gpi, uint64_t *l1, uintptr_t first,
 			    uintptr_t last)
 {
 	uint64_t gpi_field = GPT_BUILD_L1_DESC(gpi);
-	uint64_t gpi_mask = 0xFFFFFFFFFFFFFFFF;
+	uint64_t gpi_mask = ULONG_MAX;
 
 	assert(first <= last);
 	assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U);
@@ -504,25 +507,25 @@
 	assert(GPT_L0_IDX(first) == GPT_L0_IDX(last));
 	assert(l1 != NULL);
 
-	/* Shift the mask if we're starting in the middle of an L1 entry. */
+	/* Shift the mask if we're starting in the middle of an L1 entry */
 	gpi_mask = gpi_mask << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2);
 
-	/* Fill out each L1 entry for this region. */
+	/* Fill out each L1 entry for this region */
 	for (unsigned int i = GPT_L1_IDX(gpt_config.p, first);
 	     i <= GPT_L1_IDX(gpt_config.p, last); i++) {
-		/* Account for stopping in the middle of an L1 entry. */
+		/* Account for stopping in the middle of an L1 entry */
 		if (i == GPT_L1_IDX(gpt_config.p, last)) {
-			gpi_mask &= (gpi_mask >> ((15 -
+			gpi_mask &= (gpi_mask >> ((15U -
 				    GPT_L1_GPI_IDX(gpt_config.p, last)) << 2));
 		}
 
-		/* Write GPI values. */
+		/* Write GPI values */
 		assert((l1[i] & gpi_mask) ==
 		       (GPT_BUILD_L1_DESC(GPT_GPI_ANY) & gpi_mask));
 		l1[i] = (l1[i] & ~gpi_mask) | (gpi_mask & gpi_field);
 
-		/* Reset mask. */
-		gpi_mask = 0xFFFFFFFFFFFFFFFF;
+		/* Reset mask */
+		gpi_mask = ULONG_MAX;
 	}
 }
 
@@ -537,14 +540,14 @@
  * Return
  *   Pointer to the next available L1 table.
  */
-static uint64_t *gpt_get_new_l1_tbl(void)
+static uint64_t *get_new_l1_tbl(void)
 {
-	/* Retrieve the next L1 table. */
+	/* Retrieve the next L1 table */
 	uint64_t *l1 = (uint64_t *)((uint64_t)(gpt_l1_tbl) +
 		       (GPT_L1_TABLE_SIZE(gpt_config.p) *
 		       gpt_next_l1_tbl_idx));
 
-	/* Increment L1 counter. */
+	/* Increment L1 counter */
 	gpt_next_l1_tbl_idx++;
 
 	/* Initialize all GPIs to GPT_GPI_ANY */
@@ -563,7 +566,7 @@
  * Parameters
  *   *pas		Pointer to the structure defining the PAS region.
  */
-static void gpt_generate_l0_tbl_desc(pas_region_t *pas)
+static void generate_l0_tbl_desc(pas_region_t *pas)
 {
 	uintptr_t end_pa;
 	uintptr_t cur_pa;
@@ -577,7 +580,7 @@
 
 	/*
 	 * Checking of PAS parameters has already been done in
-	 * gpt_validate_pas_mappings so no need to check the same things again.
+	 * validate_pas_mappings so no need to check the same things again.
 	 */
 
 	end_pa = pas->base_pa + pas->size;
@@ -586,7 +589,7 @@
 	/* We start working from the granule at base PA */
 	cur_pa = pas->base_pa;
 
-	/* Iterate over each L0 region in this memory range. */
+	/* Iterate over each L0 region in this memory range */
 	for (l0_idx = GPT_L0_IDX(pas->base_pa);
 	     l0_idx <= GPT_L0_IDX(end_pa - 1U);
 	     l0_idx++) {
@@ -596,25 +599,23 @@
 		 * need to create one.
 		 */
 		if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) {
-			/* Get the L1 array from the L0 entry. */
+			/* Get the L1 array from the L0 entry */
 			l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]);
 		} else {
-			/* Get a new L1 table from the L1 memory space. */
-			l1_gpt_arr = gpt_get_new_l1_tbl();
+			/* Get a new L1 table from the L1 memory space */
+			l1_gpt_arr = get_new_l1_tbl();
 
-			/* Fill out the L0 descriptor and flush it. */
+			/* Fill out the L0 descriptor and flush it */
 			l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr);
 		}
 
-		VERBOSE("[GPT] L0 entry (TABLE) index %u [%p] ==> L1 Addr 0x%llx (0x%" PRIx64 ")\n",
-			l0_idx, &l0_gpt_base[l0_idx],
-			(unsigned long long)(l1_gpt_arr),
-			l0_gpt_base[l0_idx]);
+		VERBOSE("GPT: L0 entry (TABLE) index %u [%p] ==> L1 Addr %p (0x%"PRIx64")\n",
+			l0_idx, &l0_gpt_base[l0_idx], l1_gpt_arr, l0_gpt_base[l0_idx]);
 
 		/*
 		 * Determine the PA of the last granule in this L0 descriptor.
 		 */
-		last_gran_pa = gpt_get_l1_end_pa(cur_pa, end_pa) -
+		last_gran_pa = get_l1_end_pa(cur_pa, end_pa) -
 			       GPT_PGS_ACTUAL_SIZE(gpt_config.p);
 
 		/*
@@ -622,11 +623,11 @@
 		 * function needs the addresses of the first granule and last
 		 * granule in the range.
 		 */
-		gpt_fill_l1_tbl(GPT_PAS_ATTR_GPI(pas->attrs), l1_gpt_arr,
+		fill_l1_tbl(GPT_PAS_ATTR_GPI(pas->attrs), l1_gpt_arr,
 				cur_pa, last_gran_pa);
 
-		/* Advance cur_pa to first granule in next L0 region. */
-		cur_pa = gpt_get_l1_end_pa(cur_pa, end_pa);
+		/* Advance cur_pa to first granule in next L0 region */
+		cur_pa = get_l1_end_pa(cur_pa, end_pa);
 	}
 }
 
@@ -649,19 +650,19 @@
 	uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base;
 
 	assert(pas != NULL);
-	assert(pas_count > 0);
+	assert(pas_count != 0U);
 
-	/* Initial start and end values. */
+	/* Initial start and end values */
 	start_idx = GPT_L0_IDX(pas[0].base_pa);
-	end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1);
+	end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1UL);
 
-	/* Find lowest and highest L0 indices used in this PAS array. */
-	for (idx = 1; idx < pas_count; idx++) {
+	/* Find lowest and highest L0 indices used in this PAS array */
+	for (idx = 1U; idx < pas_count; idx++) {
 		if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) {
 			start_idx = GPT_L0_IDX(pas[idx].base_pa);
 		}
-		if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1) > end_idx) {
-			end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1);
+		if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL) > end_idx) {
+			end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL);
 		}
 	}
 
@@ -670,7 +671,7 @@
 	 * the end index value.
 	 */
 	flush_dcache_range((uintptr_t)&l0[start_idx],
-			   ((end_idx + 1) - start_idx) * sizeof(uint64_t));
+			   ((end_idx + 1U) - start_idx) * sizeof(uint64_t));
 }
 
 /*
@@ -689,8 +690,8 @@
 	 * Granule tables must be initialised before enabling
 	 * granule protection.
 	 */
-	if (gpt_config.plat_gpt_l0_base == 0U) {
-		ERROR("[GPT] Tables have not been initialized!\n");
+	if (gpt_config.plat_gpt_l0_base == 0UL) {
+		ERROR("GPT: Tables have not been initialized!\n");
 		return -EPERM;
 	}
 
@@ -711,7 +712,7 @@
 	 */
 	gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_IS);
 
-	/* Outer and Inner cacheability set to Normal memory, WB, RA, WA. */
+	/* Outer and Inner cacheability set to Normal memory, WB, RA, WA */
 	gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA);
 	gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA);
 
@@ -727,7 +728,7 @@
 	/* Enable GPT */
 	gpccr_el3 |= GPCCR_GPC_BIT;
 
-	/* TODO: Configure GPCCR_EL3_GPCP for Fault control. */
+	/* TODO: Configure GPCCR_EL3_GPCP for Fault control */
 	write_gpccr_el3(gpccr_el3);
 	isb();
 	tlbipaallos();
@@ -769,16 +770,16 @@
 	int ret;
 	uint64_t gpt_desc;
 
-	/* Ensure that MMU and Data caches are enabled. */
+	/* Ensure that MMU and Data caches are enabled */
 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
 
-	/* Validate other parameters. */
-	ret = gpt_validate_l0_params(pps, l0_mem_base, l0_mem_size);
+	/* Validate other parameters */
+	ret = validate_l0_params(pps, l0_mem_base, l0_mem_size);
 	if (ret != 0) {
 		return ret;
 	}
 
-	/* Create the descriptor to initialize L0 entries with. */
+	/* Create the descriptor to initialize L0 entries with */
 	gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY);
 
 	/* Iterate through all L0 entries */
@@ -786,11 +787,11 @@
 		((uint64_t *)l0_mem_base)[i] = gpt_desc;
 	}
 
-	/* Flush updated L0 tables to memory. */
+	/* Flush updated L0 tables to memory */
 	flush_dcache_range((uintptr_t)l0_mem_base,
 			   (size_t)GPT_L0_TABLE_SIZE(gpt_config.t));
 
-	/* Stash the L0 base address once initial setup is complete. */
+	/* Stash the L0 base address once initial setup is complete */
 	gpt_config.plat_gpt_l0_base = l0_mem_base;
 
 	return 0;
@@ -824,79 +825,79 @@
 	int ret;
 	int l1_gpt_cnt;
 
-	/* Ensure that MMU and Data caches are enabled. */
+	/* Ensure that MMU and Data caches are enabled */
 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
 
-	/* PGS is needed for gpt_validate_pas_mappings so check it now. */
+	/* PGS is needed for validate_pas_mappings so check it now */
 	if (pgs > GPT_PGS_MAX) {
-		ERROR("[GPT] Invalid PGS: 0x%x\n", pgs);
+		ERROR("GPT: Invalid PGS: 0x%x\n", pgs);
 		return -EINVAL;
 	}
 	gpt_config.pgs = pgs;
 	gpt_config.p = gpt_p_lookup[pgs];
 
-	/* Make sure L0 tables have been initialized. */
+	/* Make sure L0 tables have been initialized */
 	if (gpt_config.plat_gpt_l0_base == 0U) {
-		ERROR("[GPT] L0 tables must be initialized first!\n");
+		ERROR("GPT: L0 tables must be initialized first!\n");
 		return -EPERM;
 	}
 
-	/* Check if L1 GPTs are required and how many. */
-	l1_gpt_cnt = gpt_validate_pas_mappings(pas_regions, pas_count);
+	/* Check if L1 GPTs are required and how many */
+	l1_gpt_cnt = validate_pas_mappings(pas_regions, pas_count);
 	if (l1_gpt_cnt < 0) {
 		return l1_gpt_cnt;
 	}
 
-	VERBOSE("[GPT] %u L1 GPTs requested.\n", l1_gpt_cnt);
+	VERBOSE("GPT: %i L1 GPTs requested\n", l1_gpt_cnt);
 
-	/* If L1 tables are needed then validate the L1 parameters. */
+	/* If L1 tables are needed then validate the L1 parameters */
 	if (l1_gpt_cnt > 0) {
-		ret = gpt_validate_l1_params(l1_mem_base, l1_mem_size,
-		      l1_gpt_cnt);
+		ret = validate_l1_params(l1_mem_base, l1_mem_size,
+					(unsigned int)l1_gpt_cnt);
 		if (ret != 0) {
 			return ret;
 		}
 
-		/* Set up parameters for L1 table generation. */
+		/* Set up parameters for L1 table generation */
 		gpt_l1_tbl = l1_mem_base;
 		gpt_next_l1_tbl_idx = 0U;
 	}
 
-	INFO("[GPT] Boot Configuration\n");
+	INFO("GPT: Boot Configuration\n");
 	INFO("  PPS/T:     0x%x/%u\n", gpt_config.pps, gpt_config.t);
 	INFO("  PGS/P:     0x%x/%u\n", gpt_config.pgs, gpt_config.p);
 	INFO("  L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
-	INFO("  PAS count: 0x%x\n", pas_count);
-	INFO("  L0 base:   0x%lx\n", gpt_config.plat_gpt_l0_base);
+	INFO("  PAS count: %u\n", pas_count);
+	INFO("  L0 base:   0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
 
-	/* Generate the tables in memory. */
+	/* Generate the tables in memory */
 	for (unsigned int idx = 0U; idx < pas_count; idx++) {
-		INFO("[GPT] PAS[%u]: base 0x%lx, size 0x%lx, GPI 0x%x, type 0x%x\n",
-		     idx, pas_regions[idx].base_pa, pas_regions[idx].size,
-		     GPT_PAS_ATTR_GPI(pas_regions[idx].attrs),
-		     GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
+		VERBOSE("GPT: PAS[%u]: base 0x%"PRIxPTR"\tsize 0x%lx\tGPI 0x%x\ttype 0x%x\n",
+			idx, pas_regions[idx].base_pa, pas_regions[idx].size,
+			GPT_PAS_ATTR_GPI(pas_regions[idx].attrs),
+			GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
 
 		/* Check if a block or table descriptor is required */
 		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
 		    GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
-			gpt_generate_l0_blk_desc(&pas_regions[idx]);
+			generate_l0_blk_desc(&pas_regions[idx]);
 
 		} else {
-			gpt_generate_l0_tbl_desc(&pas_regions[idx]);
+			generate_l0_tbl_desc(&pas_regions[idx]);
 		}
 	}
 
-	/* Flush modified L0 tables. */
+	/* Flush modified L0 tables */
 	flush_l0_for_pas_array(pas_regions, pas_count);
 
-	/* Flush L1 tables if needed. */
+	/* Flush L1 tables if needed */
 	if (l1_gpt_cnt > 0) {
 		flush_dcache_range(l1_mem_base,
 				   GPT_L1_TABLE_SIZE(gpt_config.p) *
 				   l1_gpt_cnt);
 	}
 
-	/* Make sure that all the entries are written to the memory. */
+	/* Make sure that all the entries are written to the memory */
 	dsbishst();
 	tlbipaallos();
 	dsb();
@@ -920,12 +921,12 @@
 {
 	u_register_t reg;
 
-	/* Ensure that MMU and Data caches are enabled. */
+	/* Ensure that MMU and Data caches are enabled */
 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
 
-	/* Ensure GPC are already enabled. */
+	/* Ensure GPC are already enabled */
 	if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0U) {
-		ERROR("[GPT] Granule protection checks are not enabled!\n");
+		ERROR("GPT: Granule protection checks are not enabled!\n");
 		return -EPERM;
 	}
 
@@ -938,18 +939,18 @@
 				      GPTBR_BADDR_MASK) <<
 				      GPTBR_BADDR_VAL_SHIFT;
 
-	/* Read GPCCR to get PGS and PPS values. */
+	/* Read GPCCR to get PGS and PPS values */
 	reg = read_gpccr_el3();
 	gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK;
 	gpt_config.t = gpt_t_lookup[gpt_config.pps];
 	gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK;
 	gpt_config.p = gpt_p_lookup[gpt_config.pgs];
 
-	VERBOSE("[GPT] Runtime Configuration\n");
+	VERBOSE("GPT: Runtime Configuration\n");
 	VERBOSE("  PPS/T:     0x%x/%u\n", gpt_config.pps, gpt_config.t);
 	VERBOSE("  PGS/P:     0x%x/%u\n", gpt_config.pgs, gpt_config.p);
 	VERBOSE("  L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
-	VERBOSE("  L0 base:   0x%lx\n", gpt_config.plat_gpt_l0_base);
+	VERBOSE("  L0 base:   0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
 
 	return 0;
 }
@@ -963,7 +964,7 @@
 
 /*
  * A helper to write the value (target_pas << gpi_shift) to the index of
- * the gpt_l1_addr
+ * the gpt_l1_addr.
  */
 static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr,
 			     unsigned int gpi_shift, unsigned int idx,
@@ -976,7 +977,7 @@
 
 /*
  * Helper to retrieve the gpt_l1_* information from the base address
- * returned in gpi_info
+ * returned in gpi_info.
  */
 static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info)
 {
@@ -985,12 +986,12 @@
 	gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
 	gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)];
 	if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) {
-		VERBOSE("[GPT] Granule is not covered by a table descriptor!\n");
-		VERBOSE("      Base=0x%" PRIx64 "\n", base);
+		VERBOSE("GPT: Granule is not covered by a table descriptor!\n");
+		VERBOSE("      Base=0x%"PRIx64"\n", base);
 		return -EINVAL;
 	}
 
-	/* Get the table index and GPI shift from PA. */
+	/* Get the table index and GPI shift from PA */
 	gpi_info->gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc);
 	gpi_info->idx = GPT_L1_IDX(gpt_config.p, base);
 	gpi_info->gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2;
@@ -1025,36 +1026,36 @@
 	int res;
 	unsigned int target_pas;
 
-	/* Ensure that the tables have been set up before taking requests. */
+	/* Ensure that the tables have been set up before taking requests */
 	assert(gpt_config.plat_gpt_l0_base != 0UL);
 
-	/* Ensure that caches are enabled. */
+	/* Ensure that caches are enabled */
 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
 
 	/* Delegate request can only come from REALM or SECURE */
 	assert(src_sec_state == SMC_FROM_REALM ||
 	       src_sec_state == SMC_FROM_SECURE);
 
-	/* See if this is a single or a range of granule transition. */
+	/* See if this is a single or a range of granule transition */
 	if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
 		return -EINVAL;
 	}
 
 	/* Check that base and size are valid */
 	if ((ULONG_MAX - base) < size) {
-		VERBOSE("[GPT] Transition request address overflow!\n");
-		VERBOSE("      Base=0x%" PRIx64 "\n", base);
+		VERBOSE("GPT: Transition request address overflow!\n");
+		VERBOSE("      Base=0x%"PRIx64"\n", base);
 		VERBOSE("      Size=0x%lx\n", size);
 		return -EINVAL;
 	}
 
-	/* Make sure base and size are valid. */
-	if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
-	    ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
+	/* Make sure base and size are valid */
+	if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
+	    ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
 	    (size == 0UL) ||
 	    ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
-		VERBOSE("[GPT] Invalid granule transition address range!\n");
-		VERBOSE("      Base=0x%" PRIx64 "\n", base);
+		VERBOSE("GPT: Invalid granule transition address range!\n");
+		VERBOSE("      Base=0x%"PRIx64"\n", base);
 		VERBOSE("      Size=0x%lx\n", size);
 		return -EINVAL;
 	}
@@ -1078,7 +1079,7 @@
 
 	/* Check that the current address is in NS state */
 	if (gpi_info.gpi != GPT_GPI_NS) {
-		VERBOSE("[GPT] Only Granule in NS state can be delegated.\n");
+		VERBOSE("GPT: Only Granule in NS state can be delegated.\n");
 		VERBOSE("      Caller: %u, Current GPI: %u\n", src_sec_state,
 			gpi_info.gpi);
 		spin_unlock(&gpt_lock);
@@ -1094,7 +1095,7 @@
 	/*
 	 * In order to maintain mutual distrust between Realm and Secure
 	 * states, remove any data speculatively fetched into the target
-	 * physical address space. Issue DC CIPAPA over address range
+	 * physical address space. Issue DC CIPAPA over address range.
 	 */
 	if (is_feat_mte2_supported()) {
 		flush_dcache_to_popa_range_mte2(nse | base,
@@ -1121,14 +1122,14 @@
 					   GPT_PGS_ACTUAL_SIZE(gpt_config.p));
 	}
 
-	/* Unlock access to the L1 tables. */
+	/* Unlock access to the L1 tables */
 	spin_unlock(&gpt_lock);
 
 	/*
 	 * The isb() will be done as part of context
-	 * synchronization when returning to lower EL
+	 * synchronization when returning to lower EL.
 	 */
-	VERBOSE("[GPT] Granule 0x%" PRIx64 ", GPI 0x%x->0x%x\n",
+	VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
 		base, gpi_info.gpi, target_pas);
 
 	return 0;
@@ -1157,36 +1158,36 @@
 	uint64_t nse;
 	int res;
 
-	/* Ensure that the tables have been set up before taking requests. */
+	/* Ensure that the tables have been set up before taking requests */
 	assert(gpt_config.plat_gpt_l0_base != 0UL);
 
-	/* Ensure that MMU and caches are enabled. */
+	/* Ensure that MMU and caches are enabled */
 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
 
 	/* Delegate request can only come from REALM or SECURE */
 	assert(src_sec_state == SMC_FROM_REALM ||
 	       src_sec_state == SMC_FROM_SECURE);
 
-	/* See if this is a single or a range of granule transition. */
+	/* See if this is a single or a range of granule transition */
 	if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
 		return -EINVAL;
 	}
 
 	/* Check that base and size are valid */
 	if ((ULONG_MAX - base) < size) {
-		VERBOSE("[GPT] Transition request address overflow!\n");
-		VERBOSE("      Base=0x%" PRIx64 "\n", base);
+		VERBOSE("GPT: Transition request address overflow!\n");
+		VERBOSE("      Base=0x%"PRIx64"\n", base);
 		VERBOSE("      Size=0x%lx\n", size);
 		return -EINVAL;
 	}
 
-	/* Make sure base and size are valid. */
-	if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
-	    ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
+	/* Make sure base and size are valid */
+	if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
+	    ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
 	    (size == 0UL) ||
 	    ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
-		VERBOSE("[GPT] Invalid granule transition address range!\n");
-		VERBOSE("      Base=0x%" PRIx64 "\n", base);
+		VERBOSE("GPT: Invalid granule transition address range!\n");
+		VERBOSE("      Base=0x%"PRIx64"\n", base);
 		VERBOSE("      Size=0x%lx\n", size);
 		return -EINVAL;
 	}
@@ -1209,8 +1210,8 @@
 	     gpi_info.gpi != GPT_GPI_REALM) ||
 	    (src_sec_state == SMC_FROM_SECURE &&
 	     gpi_info.gpi != GPT_GPI_SECURE)) {
-		VERBOSE("[GPT] Only Granule in REALM or SECURE state can be undelegated.\n");
-		VERBOSE("      Caller: %u, Current GPI: %u\n", src_sec_state,
+		VERBOSE("GPT: Only Granule in REALM or SECURE state can be undelegated.\n");
+		VERBOSE("      Caller: %u Current GPI: %u\n", src_sec_state,
 			gpi_info.gpi);
 		spin_unlock(&gpt_lock);
 		return -EPERM;
@@ -1272,9 +1273,9 @@
 
 	/*
 	 * The isb() will be done as part of context
-	 * synchronization when returning to lower EL
+	 * synchronization when returning to lower EL.
 	 */
-	VERBOSE("[GPT] Granule 0x%" PRIx64 ", GPI 0x%x->0x%x\n",
+	VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
 		base, gpi_info.gpi, GPT_GPI_NS);
 
 	return 0;
diff --git a/lib/gpt_rme/gpt_rme_private.h b/lib/gpt_rme/gpt_rme_private.h
index 3c817f3..b2a5dae 100644
--- a/lib/gpt_rme/gpt_rme_private.h
+++ b/lib/gpt_rme/gpt_rme_private.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022, Arm Limited. All rights reserved.
+ * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -15,13 +15,13 @@
 /* GPT descriptor definitions                                                 */
 /******************************************************************************/
 
-/* GPT level 0 descriptor bit definitions. */
+/* GPT level 0 descriptor bit definitions */
 #define GPT_L0_TYPE_MASK		UL(0xF)
 #define GPT_L0_TYPE_SHIFT		U(0)
 
-/* For now, we don't support contiguous descriptors, only table and block. */
-#define GPT_L0_TYPE_TBL_DESC		UL(0x3)
-#define GPT_L0_TYPE_BLK_DESC		UL(0x1)
+/* For now, we don't support contiguous descriptors, only table and block */
+#define GPT_L0_TYPE_TBL_DESC		UL(3)
+#define GPT_L0_TYPE_BLK_DESC		UL(1)
 
 #define GPT_L0_TBL_DESC_L1ADDR_MASK	UL(0xFFFFFFFFFF)
 #define GPT_L0_TBL_DESC_L1ADDR_SHIFT	U(12)
@@ -57,7 +57,7 @@
 /* GPT platform configuration                                                 */
 /******************************************************************************/
 
-/* This value comes from GPCCR_EL3 so no externally supplied definition. */
+/* This value comes from GPCCR_EL3 so no externally supplied definition */
 #define GPT_L0GPTSZ		((unsigned int)((read_gpccr_el3() >> \
 				GPCCR_L0GPTSZ_SHIFT) & GPCCR_L0GPTSZ_MASK))
 
@@ -117,10 +117,10 @@
 	unsigned int gpi;
 } gpi_info_t;
 
-/* Max valid value for PGS. */
+/* Max valid value for PGS */
 #define GPT_PGS_MAX			(2U)
 
-/* Max valid value for PPS. */
+/* Max valid value for PPS */
 #define GPT_PPS_MAX			(6U)
 
 /******************************************************************************/
@@ -139,7 +139,7 @@
 #define GPT_L0_IDX_WIDTH(_t)		(((_t) > GPT_S_VAL) ? \
 					((_t) - GPT_S_VAL) : (0U))
 
-/* Bit shift for the L0 index field in a PA. */
+/* Bit shift for the L0 index field in a PA */
 #define GPT_L0_IDX_SHIFT		(GPT_S_VAL)
 
 /*
@@ -153,13 +153,13 @@
 #define GPT_L0_IDX_MASK(_t)		(0x3FFFFFUL >> (22U - \
 					(GPT_L0_IDX_WIDTH(_t))))
 
-/* Total number of L0 regions. */
+/* Total number of L0 regions */
 #define GPT_L0_REGION_COUNT(_t)		((GPT_L0_IDX_MASK(_t)) + 1U)
 
-/* Total size of each GPT L0 region in bytes. */
+/* Total size of each GPT L0 region in bytes */
 #define GPT_L0_REGION_SIZE		(1UL << (GPT_L0_IDX_SHIFT))
 
-/* Total size in bytes of the whole L0 table. */
+/* Total size in bytes of the whole L0 table */
 #define GPT_L0_TABLE_SIZE(_t)		((GPT_L0_REGION_COUNT(_t)) << 3U)
 
 /******************************************************************************/
@@ -175,7 +175,7 @@
  */
 #define GPT_L1_IDX_WIDTH(_p)		((GPT_S_VAL - 1U) - ((_p) + 3U))
 
-/* Bit shift for the L1 index field. */
+/* Bit shift for the L1 index field */
 #define GPT_L1_IDX_SHIFT(_p)		((_p) + 4U)
 
 /*
@@ -183,38 +183,38 @@
  *
  * The value 0x7FFFFF is 23 bits wide and is the maximum possible width of the
  * L1 index within a physical address. It is calculated by
- * ((s_max - 1) - (p_min + 4) + 1) where s_max is 39 for 512gb, the largest
+ * ((s_max - 1) - (p_min + 4) + 1) where s_max is 39 for 512GB, the largest
  * L0GPTSZ, and p_min is 12 for 4KB granules, the smallest PGS.
  */
 #define GPT_L1_IDX_MASK(_p)		(0x7FFFFFUL >> (23U - \
 					(GPT_L1_IDX_WIDTH(_p))))
 
-/* Bit shift for the index of the L1 GPI in a PA. */
+/* Bit shift for the index of the L1 GPI in a PA */
 #define GPT_L1_GPI_IDX_SHIFT(_p)	(_p)
 
-/* Mask for the index of the L1 GPI in a PA. */
+/* Mask for the index of the L1 GPI in a PA */
 #define GPT_L1_GPI_IDX_MASK		(0xF)
 
-/* Total number of entries in each L1 table. */
+/* Total number of entries in each L1 table */
 #define GPT_L1_ENTRY_COUNT(_p)		((GPT_L1_IDX_MASK(_p)) + 1U)
 
-/* Total size in bytes of each L1 table. */
+/* Total size in bytes of each L1 table */
 #define GPT_L1_TABLE_SIZE(_p)		((GPT_L1_ENTRY_COUNT(_p)) << 3U)
 
 /******************************************************************************/
 /* General helper macros                                                      */
 /******************************************************************************/
 
-/* Protected space actual size in bytes. */
+/* Protected space actual size in bytes */
 #define GPT_PPS_ACTUAL_SIZE(_t)	(1UL << (_t))
 
-/* Granule actual size in bytes. */
+/* Granule actual size in bytes */
 #define GPT_PGS_ACTUAL_SIZE(_p)	(1UL << (_p))
 
-/* L0 GPT region size in bytes. */
+/* L0 GPT region size in bytes */
 #define GPT_L0GPTSZ_ACTUAL_SIZE	(1UL << GPT_S_VAL)
 
-/* Get the index of the L0 entry from a physical address. */
+/* Get the index of the L0 entry from a physical address */
 #define GPT_L0_IDX(_pa)		((_pa) >> GPT_L0_IDX_SHIFT)
 
 /*
@@ -223,38 +223,38 @@
  */
 #define GPT_IS_L0_ALIGNED(_pa)	(((_pa) & (GPT_L0_REGION_SIZE - U(1))) == U(0))
 
-/* Get the type field from an L0 descriptor. */
+/* Get the type field from an L0 descriptor */
 #define GPT_L0_TYPE(_desc)	(((_desc) >> GPT_L0_TYPE_SHIFT) & \
 				GPT_L0_TYPE_MASK)
 
-/* Create an L0 block descriptor. */
+/* Create an L0 block descriptor */
 #define GPT_L0_BLK_DESC(_gpi)	(GPT_L0_TYPE_BLK_DESC | \
 				(((_gpi) & GPT_L0_BLK_DESC_GPI_MASK) << \
 				GPT_L0_BLK_DESC_GPI_SHIFT))
 
-/* Create an L0 table descriptor with an L1 table address. */
+/* Create an L0 table descriptor with an L1 table address */
 #define GPT_L0_TBL_DESC(_pa)	(GPT_L0_TYPE_TBL_DESC | ((uint64_t)(_pa) & \
 				(GPT_L0_TBL_DESC_L1ADDR_MASK << \
 				GPT_L0_TBL_DESC_L1ADDR_SHIFT)))
 
-/* Get the GPI from an L0 block descriptor. */
+/* Get the GPI from an L0 block descriptor */
 #define GPT_L0_BLKD_GPI(_desc)	(((_desc) >> GPT_L0_BLK_DESC_GPI_SHIFT) & \
 				GPT_L0_BLK_DESC_GPI_MASK)
 
-/* Get the L1 address from an L0 table descriptor. */
+/* Get the L1 address from an L0 table descriptor */
 #define GPT_L0_TBLD_ADDR(_desc)	((uint64_t *)(((_desc) & \
 				(GPT_L0_TBL_DESC_L1ADDR_MASK << \
 				GPT_L0_TBL_DESC_L1ADDR_SHIFT))))
 
-/* Get the index into the L1 table from a physical address. */
+/* Get the index into the L1 table from a physical address */
 #define GPT_L1_IDX(_p, _pa)	(((_pa) >> GPT_L1_IDX_SHIFT(_p)) & \
 				GPT_L1_IDX_MASK(_p))
 
-/* Get the index of the GPI within an L1 table entry from a physical address. */
+/* Get the index of the GPI within an L1 table entry from a physical address */
 #define GPT_L1_GPI_IDX(_p, _pa)	(((_pa) >> GPT_L1_GPI_IDX_SHIFT(_p)) & \
 				GPT_L1_GPI_IDX_MASK)
 
-/* Determine if an address is granule-aligned. */
+/* Determine if an address is granule-aligned */
 #define GPT_IS_L1_ALIGNED(_p, _pa) (((_pa) & (GPT_PGS_ACTUAL_SIZE(_p) - U(1))) \
 				   == U(0))
 
diff --git a/lib/locks/exclusive/aarch64/spinlock.S b/lib/locks/exclusive/aarch64/spinlock.S
index 5144bf7..77bb7fe 100644
--- a/lib/locks/exclusive/aarch64/spinlock.S
+++ b/lib/locks/exclusive/aarch64/spinlock.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2019, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -8,6 +8,8 @@
 
 	.globl	spin_lock
 	.globl	spin_unlock
+	.globl	bit_lock
+	.globl	bit_unlock
 
 #if USE_SPINLOCK_CAS
 #if !ARM_ARCH_AT_LEAST(8, 1)
@@ -73,3 +75,43 @@
 	stlr	wzr, [x0]
 	ret
 endfunc spin_unlock
+
+/*
+ * Atomic bit clear and set instructions require FEAT_LSE which is
+ * mandatory from Armv8.1.
+ */
+#if ARM_ARCH_AT_LEAST(8, 1)
+
+/*
+ * Acquire bitlock using atomic bit set on byte. If the original read value
+ * has the bit set, use load exclusive semantics to monitor the address and
+ * enter WFE.
+ *
+ * void bit_lock(bitlock_t *lock, uint8_t mask);
+ */
+func bit_lock
+1:	ldsetab	w1, w2, [x0]
+	tst	w2, w1
+	b.eq	2f
+	ldxrb	w2, [x0]
+	tst	w2, w1
+	b.eq	1b
+	wfe
+	b	1b
+2:
+	ret
+endfunc bit_lock
+
+/*
+ * Use atomic bit clear store-release to unconditionally clear bitlock variable.
+ * Store operation generates an event to all cores waiting in WFE when address
+ * is monitored by the global monitor.
+ *
+ * void bit_unlock(bitlock_t *lock, uint8_t mask);
+ */
+func bit_unlock
+	stclrlb	w1, [x0]
+	ret
+endfunc bit_unlock
+
+#endif /* ARM_ARCH_AT_LEAST(8, 1) */