feat(rme): add register definitions and helper functions for FEAT_RME
This patch adds new register and bit definitions for the Armv9-A
Realm Management Extension (RME) as described in the Arm
document DDI0615 (https://developer.arm.com/documentation/ddi0615/latest).
The patch also adds TLB maintenance functions and a function to
detect the presence of RME feature.
Signed-off-by: Zelalem Aweke <zelalem.aweke@arm.com>
Change-Id: I03d2af7ea41a20a9e8a362a36b8099e3b4d18a11
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index 1b3ae02..1053006 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -182,6 +182,11 @@
#define ID_AA64PFR0_CSV2_SHIFT U(56)
#define ID_AA64PFR0_CSV2_MASK ULL(0xf)
#define ID_AA64PFR0_CSV2_LENGTH U(4)
+#define ID_AA64PFR0_FEAT_RME_SHIFT U(52)
+#define ID_AA64PFR0_FEAT_RME_MASK ULL(0xf)
+#define ID_AA64PFR0_FEAT_RME_LENGTH U(4)
+#define ID_AA64PFR0_FEAT_RME_NOT_SUPPORTED U(0)
+#define ID_AA64PFR0_FEAT_RME_V1 U(1)
/* Exception level handling */
#define EL_IMPL_NONE ULL(0)
@@ -432,6 +437,9 @@
/* SCR definitions */
#define SCR_RES1_BITS ((U(1) << 4) | (U(1) << 5))
+#define SCR_NSE_SHIFT U(62)
+#define SCR_NSE_BIT (ULL(1) << SCR_NSE_SHIFT)
+#define SCR_GPF_BIT (UL(1) << 48)
#define SCR_TWEDEL_SHIFT U(30)
#define SCR_TWEDEL_MASK ULL(0xf)
#define SCR_HXEn_BIT (UL(1) << 38)
@@ -1093,6 +1101,90 @@
#define AMEVCNTVOFF1F_EL2 S3_4_C13_C11_7
/*******************************************************************************
+ * Realm management extension register definitions
+ ******************************************************************************/
+
+/* GPCCR_EL3 definitions */
+#define GPCCR_EL3 S3_6_C2_C1_6
+
+/* Least significant address bits protected by each entry in level 0 GPT */
+#define GPCCR_L0GPTSZ_SHIFT U(20)
+#define GPCCR_L0GPTSZ_MASK U(0xF)
+#define GPCCR_L0GPTSZ_30BITS U(0x0)
+#define GPCCR_L0GPTSZ_34BITS U(0x4)
+#define GPCCR_L0GPTSZ_36BITS U(0x6)
+#define GPCCR_L0GPTSZ_39BITS U(0x9)
+#define SET_GPCCR_L0GPTSZ(x) \
+ ((x & GPCCR_L0GPTSZ_MASK) << GPCCR_L0GPTSZ_SHIFT)
+
+/* Granule protection check priority bit definitions */
+#define GPCCR_GPCP_SHIFT U(17)
+#define GPCCR_GPCP_BIT (ULL(1) << GPCCR_EL3_GPCP_SHIFT)
+
+/* Granule protection check bit definitions */
+#define GPCCR_GPC_SHIFT U(16)
+#define GPCCR_GPC_BIT (ULL(1) << GPCCR_GPC_SHIFT)
+
+/* Physical granule size bit definitions */
+#define GPCCR_PGS_SHIFT U(14)
+#define GPCCR_PGS_MASK U(0x3)
+#define GPCCR_PGS_4K U(0x0)
+#define GPCCR_PGS_16K U(0x2)
+#define GPCCR_PGS_64K U(0x1)
+#define SET_GPCCR_PGS(x) \
+ ((x & GPCCR_PGS_MASK) << GPCCR_PGS_SHIFT)
+
+/* GPT fetch shareability attribute bit definitions */
+#define GPCCR_SH_SHIFT U(12)
+#define GPCCR_SH_MASK U(0x3)
+#define GPCCR_SH_NS U(0x0)
+#define GPCCR_SH_OS U(0x2)
+#define GPCCR_SH_IS U(0x3)
+#define SET_GPCCR_SH(x) \
+ ((x & GPCCR_SH_MASK) << GPCCR_SH_SHIFT)
+
+/* GPT fetch outer cacheability attribute bit definitions */
+#define GPCCR_ORGN_SHIFT U(10)
+#define GPCCR_ORGN_MASK U(0x3)
+#define GPCCR_ORGN_NC U(0x0)
+#define GPCCR_ORGN_WB_RA_WA U(0x1)
+#define GPCCR_ORGN_WT_RA_NWA U(0x2)
+#define GPCCR_ORGN_WB_RA_NWA U(0x3)
+#define SET_GPCCR_ORGN(x) \
+ ((x & GPCCR_ORGN_MASK) << GPCCR_ORGN_SHIFT)
+
+/* GPT fetch inner cacheability attribute bit definitions */
+#define GPCCR_IRGN_SHIFT U(8)
+#define GPCCR_IRGN_MASK U(0x3)
+#define GPCCR_IRGN_NC U(0x0)
+#define GPCCR_IRGN_WB_RA_WA U(0x1)
+#define GPCCR_IRGN_WT_RA_NWA U(0x2)
+#define GPCCR_IRGN_WB_RA_NWA U(0x3)
+#define SET_GPCCR_IRGN(x) \
+ ((x & GPCCR_IRGN_MASK) << GPCCR_IRGN_SHIFT)
+
+/* Protected physical address size bit definitions */
+#define GPCCR_PPS_SHIFT U(0)
+#define GPCCR_PPS_MASK U(0x7)
+#define GPCCR_PPS_4GB U(0x0)
+#define GPCCR_PPS_64GB U(0x1)
+#define GPCCR_PPS_1TB U(0x2)
+#define GPCCR_PPS_4TB U(0x3)
+#define GPCCR_PPS_16TB U(0x4)
+#define GPCCR_PPS_256TB U(0x5)
+#define GPCCR_PPS_4PB U(0x6)
+#define SET_GPCCR_PPS(x) \
+ ((x & GPCCR_PPS_MASK) << GPCCR_PPS_SHIFT)
+
+/* GPTBR_EL3 definitions */
+#define GPTBR_EL3 S3_6_C2_C1_4
+
+/* Base Address for the GPT bit definitions */
+#define GPTBR_BADDR_SHIFT U(0)
+#define GPTBR_BADDR_VAL_SHIFT U(12)
+#define GPTBR_BADDR_MASK ULL(0xffffffffff)
+
+/*******************************************************************************
* RAS system registers
******************************************************************************/
#define DISR_EL1 S3_0_C12_C1_1
diff --git a/include/arch/aarch64/arch_features.h b/include/arch/aarch64/arch_features.h
index 3ff67e5..46cd1c9 100644
--- a/include/arch/aarch64/arch_features.h
+++ b/include/arch/aarch64/arch_features.h
@@ -123,4 +123,15 @@
ID_AA64MMFR1_EL1_HCX_MASK) == ID_AA64MMFR1_EL1_HCX_SUPPORTED);
}
+static inline unsigned int get_armv9_2_feat_rme_support(void)
+{
+ /*
+ * Return the RME version, zero if not supported. This function can be
+ * used as both an integer value for the RME version or compared to zero
+ * to detect RME presence.
+ */
+ return (unsigned int)(read_id_aa64pfr0_el1() >>
+ ID_AA64PFR0_FEAT_RME_SHIFT) & ID_AA64PFR0_FEAT_RME_MASK;
+}
+
#endif /* ARCH_FEATURES_H */
diff --git a/include/arch/aarch64/arch_helpers.h b/include/arch/aarch64/arch_helpers.h
index 72b87c8..1aadf0b 100644
--- a/include/arch/aarch64/arch_helpers.h
+++ b/include/arch/aarch64/arch_helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -540,6 +540,10 @@
/* DynamIQ Shared Unit power management */
DEFINE_RENAME_SYSREG_RW_FUNCS(clusterpwrdn_el1, CLUSTERPWRDN_EL1)
+/* Armv9.2 RME Registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(gptbr_el3, GPTBR_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(gpccr_el3, GPCCR_EL3)
+
#define IS_IN_EL(x) \
(GET_EL(read_CurrentEl()) == MODE_EL##x)
@@ -583,7 +587,28 @@
}
}
+/*
+ * TLBIPAALLOS instruction
+ * (TLB Inivalidate GPT Information by PA,
+ * All Entries, Outer Shareable)
+ */
+static inline void tlbipaallos(void)
+{
+ __asm__("SYS #6,c8,c1,#4");
+}
+
-/* Previously defined accesor functions with incomplete register names */
+/*
+ * Invalidate cached copies of GPT entries
+ * from TLBs by physical address
+ *
+ * @pa: the starting address for the range
+ * of invalidation
+ * @size: size of the range of invalidation
+ */
+void gpt_tlbi_by_pa(uint64_t pa, size_t size);
+
+
+/* Previously defined accessor functions with incomplete register names */
#define read_current_el() read_CurrentEl()
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
index b6f6c9d..cc5c575 100644
--- a/lib/aarch64/misc_helpers.S
+++ b/lib/aarch64/misc_helpers.S
@@ -15,6 +15,7 @@
.globl zero_normalmem
.globl zeromem
.globl memcpy16
+ .globl gpt_tlbi_by_pa
.globl disable_mmu_el1
.globl disable_mmu_el3
@@ -592,3 +593,20 @@
b.lo 1b
ret
endfunc fixup_gdt_reloc
+
+/*
+ * TODO: Currently only supports size of 4KB,
+ * support other sizes as well.
+ */
+func gpt_tlbi_by_pa
+#if ENABLE_ASSERTIONS
+ cmp x1, #PAGE_SIZE_4KB
+ ASM_ASSERT(eq)
+ tst x0, #(PAGE_SIZE_MASK)
+ ASM_ASSERT(eq)
+#endif
+ lsr x0, x0, #FOUR_KB_SHIFT /* 4KB size encoding is zero */
+ sys #6, c8, c4, #3, x0 /* TLBI RPAOS, <Xt> */
+ dsb sy
+ ret
+endfunc gpt_tlbi_by_pa