feat(cpufeat): add support for FEAT_PAUTH_LR
This patch enables FEAT_PAUTH_LR at EL3 on systems that support it when
the new ENABLE_FEAT_PAUTH_LR flag is set.
Currently, PAUTH_LR is only supported by arm clang compiler and not GCC.
Change-Id: I7db1e34b661ed95cad75850b62878ac5d98466ea
Signed-off-by: John Powell <john.powell@arm.com>
diff --git a/Makefile b/Makefile
index 6d5a0c3..6e4737f 100644
--- a/Makefile
+++ b/Makefile
@@ -652,7 +652,7 @@
################################################################################
include ${MAKE_HELPERS_DIRECTORY}march.mk
-TF_CFLAGS += $(march-directive)
+TF_CFLAGS += $(march-directive)
ASFLAGS += $(march-directive)
# This internal flag is common option which is set to 1 for scenarios
@@ -938,6 +938,34 @@
endif
endif #(CTX_INCLUDE_PAUTH_REGS)
+# Check ENABLE_FEAT_PAUTH_LR
+ifneq (${ENABLE_FEAT_PAUTH_LR},0)
+
+# Make sure PAUTH is enabled
+ifeq (${ENABLE_PAUTH},0)
+ $(error Error: PAUTH_LR cannot be used without PAUTH (see BRANCH_PROTECTION))
+endif
+
+# Make sure SCTLR2 is enabled
+ifeq (${ENABLE_FEAT_SCTLR2},0)
+ $(error Error: PAUTH_LR cannot be used without ENABLE_FEAT_SCTLR2)
+endif
+
+# FEAT_PAUTH_LR is only supported in aarch64 state
+ifneq (${ARCH},aarch64)
+ $(error ENABLE_FEAT_PAUTH_LR requires AArch64)
+endif
+
+# Currently, FEAT_PAUTH_LR is only supported by arm/clang compilers
+# TODO implement for GCC when support is added
+ifeq ($($(ARCH)-cc-id),arm-clang)
+ arch-features := $(arch-features)+pauth-lr
+else
+ $(error Error: ENABLE_FEAT_PAUTH_LR not supported for GCC compiler)
+endif
+
+endif # ${ENABLE_FEAT_PAUTH_LR}
+
ifeq ($(FEATURE_DETECTION),1)
$(info FEATURE_DETECTION is an experimental feature)
endif #(FEATURE_DETECTION)
@@ -1324,6 +1352,7 @@
ENABLE_TRBE_FOR_NS \
ENABLE_BTI \
ENABLE_PAUTH \
+ ENABLE_FEAT_PAUTH_LR \
ENABLE_FEAT_AMU \
ENABLE_FEAT_AMUv1p1 \
ENABLE_FEAT_CSV2_2 \
@@ -1410,6 +1439,7 @@
ENABLE_FEAT_DEBUGV8P9 \
ENABLE_FEAT_MPAM \
ENABLE_PAUTH \
+ ENABLE_FEAT_PAUTH_LR \
ENABLE_PIE \
ENABLE_PMF \
ENABLE_PSCI_STAT \
diff --git a/bl31/bl31_traps.c b/bl31/bl31_traps.c
index 984fdaa..114a57d 100644
--- a/bl31/bl31_traps.c
+++ b/bl31/bl31_traps.c
@@ -90,19 +90,17 @@
* Explicitly create all bits of SPSR to get PSTATE at exception return.
*
* The code is based on "Aarch64.exceptions.takeexception" described in
- * DDI0602 revision 2023-06.
- * "https://developer.arm.com/documentation/ddi0602/2023-06/Shared-Pseudocode/
+ * DDI0602 revision 2025-03.
+ * "https://developer.arm.com/documentation/ddi0597/2025-03/Shared-Pseudocode/
* aarch64-exceptions-takeexception"
*
- * NOTE: This piece of code must be reviewed every release to ensure that
- * we keep up with new ARCH features which introduces a new SPSR bit.
+ * NOTE: This piece of code must be reviewed every release against the latest
+ * takeexception sequence to ensure that we keep up with new arch features that
+ * affect the PSTATE.
*
- * TF-A 2.12 release review
- * The latest version available is 2024-09, which has two extra features which
- * impacts generation of SPSR, since these features are not implemented in TF-A
- * at the time of release, just log the feature names here to be taken up when
- * feature support is introduced.
- * - FEAT_PAuth_LR (2023 extension)
+ * TF-A 2.13 release review
+ *
+ * Review of version 2025-03 indicates we are missing support for one feature.
* - FEAT_UINJ (2024 extension)
*/
u_register_t create_spsr(u_register_t old_spsr, unsigned int target_el)
@@ -204,6 +202,12 @@
new_spsr |= (gcscr & GCSCR_EXLOCK_EN_BIT) ? SPSR_EXLOCK_BIT_AARCH64 : 0;
}
+ /* If FEAT_PAUTH_LR present then zero the PACM bit. */
+ new_spsr |= old_spsr & SPSR_PACM_BIT_AARCH64;
+ if (is_feat_pauth_lr_present()) {
+ new_spsr &= ~SPSR_PACM_BIT_AARCH64;
+ }
+
return new_spsr;
}
diff --git a/common/feat_detect.c b/common/feat_detect.c
index 4d285d3..a1f68a2 100644
--- a/common/feat_detect.c
+++ b/common/feat_detect.c
@@ -429,6 +429,7 @@
/* v9.4 features */
check_feature(ENABLE_FEAT_GCS, read_feat_gcs_id_field(), "GCS", 1, 1);
check_feature(ENABLE_RME, read_feat_rme_id_field(), "RME", 1, 1);
+ check_feature(ENABLE_FEAT_PAUTH_LR, is_feat_pauth_lr_present(), "PAUTH_LR", 1, 1);
if (tainted) {
panic();
diff --git a/docs/getting_started/build-options.rst b/docs/getting_started/build-options.rst
index e5f7b30..32daf1e 100644
--- a/docs/getting_started/build-options.rst
+++ b/docs/getting_started/build-options.rst
@@ -405,6 +405,12 @@
flag can take values 0 to 2, to align with the ``ENABLE_FEAT``
mechanism. Default value is ``0``.
+- ``ENABLE_FEAT_PAUTH_LR``: Numeric value to enable the ``FEAT_PAUTH_LR``
+ extension. ``FEAT_PAUTH_LR`` is an optional feature available from Arm v9.4
+ onwards. This feature requires PAUTH to be enabled via the
+ ``BRANCH_PROTECTION`` flag. This flag can take the values 0 to 2, to align
+ with the ``ENABLE_FEAT`` mechanism. Default value is ``0``.
+
- ``ENABLE_FEAT_RNG``: Numeric value to enable the ``FEAT_RNG`` extension.
``FEAT_RNG`` is an optional feature available on Arm v8.5 onwards. This
flag can take the values 0 to 2, to align with the ``ENABLE_FEAT``
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index 3707520..83e5867 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -606,6 +606,11 @@
#define SCTLR_EPAN_BIT (ULL(1) << 57)
#define SCTLR_RESET_VAL SCTLR_EL3_RES1
+#define SCTLR2_EnPACM_BIT (ULL(1) << 7)
+
+/* SCTLR2 currently has no RES1 fields so reset to 0 */
+#define SCTLR2_RESET_VAL ULL(0)
+
/* CPACR_EL1 definitions */
#define CPACR_EL1_FPEN(x) ((x) << 20)
#define CPACR_EL1_FP_TRAP_EL0 UL(0x1)
@@ -855,6 +860,7 @@
#define SPSR_PPEND_BIT BIT(33)
#define SPSR_EXLOCK_BIT_AARCH64 BIT_64(34)
#define SPSR_NZCV (SPSR_V_BIT | SPSR_C_BIT | SPSR_Z_BIT | SPSR_N_BIT)
+#define SPSR_PACM_BIT_AARCH64 BIT_64(35)
#define DISABLE_ALL_EXCEPTIONS \
(DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
@@ -1531,6 +1537,7 @@
/*******************************************************************************
* FEAT_SCTLR2 - Extension to SCTLR_ELx Registers
******************************************************************************/
+#define SCTLR2_EL3 S3_6_C1_C0_3
#define SCTLR2_EL2 S3_4_C1_C0_3
#define SCTLR2_EL1 S3_0_C1_C0_3
diff --git a/include/arch/aarch64/arch_features.h b/include/arch/aarch64/arch_features.h
index e3068d1..757ce06 100644
--- a/include/arch/aarch64/arch_features.h
+++ b/include/arch/aarch64/arch_features.h
@@ -146,6 +146,8 @@
* +----------------------------+
* | FEAT_MOPS |
* +----------------------------+
+ * | FEAT_PAUTH_LR |
+ * +----------------------------+
*/
__attribute__((always_inline))
@@ -196,6 +198,35 @@
CREATE_FEATURE_SUPPORTED(feat_pauth, is_feat_pauth_present, ENABLE_PAUTH)
CREATE_FEATURE_SUPPORTED(ctx_pauth, is_feat_pauth_present, CTX_INCLUDE_PAUTH_REGS)
+/*
+ * FEAT_PAUTH_LR
+ * This feature has a non-standard discovery method so define this function
+ * manually then call use the CREATE_FEATURE_SUPPORTED macro with it. This
+ * feature is enabled with ENABLE_PAUTH when present.
+ */
+__attribute__((always_inline))
+static inline bool is_feat_pauth_lr_present(void)
+{
+ /*
+ * FEAT_PAUTH_LR support is indicated by up to 3 fields, if one or more
+ * of these is 0b0110 then the feature is present.
+ * 1) id_aa64isr1_el1.api
+ * 2) id_aa64isr1_el1.apa
+ * 3) id_aa64isr2_el1.apa3
+ */
+ if (ISOLATE_FIELD(read_id_aa64isar1_el1(), ID_AA64ISAR1_API_SHIFT, ID_AA64ISAR1_API_MASK) == 0b0110) {
+ return true;
+ }
+ if (ISOLATE_FIELD(read_id_aa64isar1_el1(), ID_AA64ISAR1_APA_SHIFT, ID_AA64ISAR1_APA_MASK) == 0b0110) {
+ return true;
+ }
+ if (ISOLATE_FIELD(read_id_aa64isar2_el1(), ID_AA64ISAR2_APA3_SHIFT, ID_AA64ISAR2_APA3_MASK) == 0b0110) {
+ return true;
+ }
+ return false;
+}
+CREATE_FEATURE_SUPPORTED(feat_pauth_lr, is_feat_pauth_lr_present, ENABLE_FEAT_PAUTH_LR)
+
/* FEAT_TTST: Small translation tables */
CREATE_FEATURE_PRESENT(feat_ttst, id_aa64mmfr2_el1, ID_AA64MMFR2_EL1_ST_SHIFT,
ID_AA64MMFR2_EL1_ST_MASK, 1U)
diff --git a/include/arch/aarch64/arch_helpers.h b/include/arch/aarch64/arch_helpers.h
index 9419583..50a5832 100644
--- a/include/arch/aarch64/arch_helpers.h
+++ b/include/arch/aarch64/arch_helpers.h
@@ -733,6 +733,7 @@
/* FEAT_SCTLR2 Registers */
DEFINE_RENAME_SYSREG_RW_FUNCS(sctlr2_el1, SCTLR2_EL1)
DEFINE_RENAME_SYSREG_RW_FUNCS(sctlr2_el2, SCTLR2_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(sctlr2_el3, SCTLR2_EL3)
/* FEAT_LS64_ACCDATA Registers */
DEFINE_RENAME_SYSREG_RW_FUNCS(accdata_el1, ACCDATA_EL1)
diff --git a/include/arch/aarch64/asm_macros.S b/include/arch/aarch64/asm_macros.S
index da51bf8..078c414 100644
--- a/include/arch/aarch64/asm_macros.S
+++ b/include/arch/aarch64/asm_macros.S
@@ -351,6 +351,11 @@
tst \reg, \clobber
.endm
+ .macro is_feat_sctlr2_present_asm reg:req
+ mrs \reg, ID_AA64MMFR3_EL1
+ ands \reg, \reg, #(ID_AA64MMFR3_EL1_SCTLR2_MASK << ID_AA64MMFR3_EL1_SCTLR2_SHIFT)
+ .endm
+
.macro call_reset_handler
#if !(defined(IMAGE_BL2) && ENABLE_RME)
/* ---------------------------------------------------------------------
diff --git a/include/arch/aarch64/el3_common_macros.S b/include/arch/aarch64/el3_common_macros.S
index 07dffb1..fce0f2c 100644
--- a/include/arch/aarch64/el3_common_macros.S
+++ b/include/arch/aarch64/el3_common_macros.S
@@ -45,6 +45,16 @@
msr sctlr_el3, x0
isb
+#if ENABLE_FEAT_SCTLR2
+#if ENABLE_FEAT_SCTLR2 > 1
+ is_feat_sctlr2_present_asm x1
+ beq feat_sctlr2_not_supported\@
+#endif
+ mov x1, #SCTLR2_RESET_VAL
+ msr SCTLR2_EL3, x1
+feat_sctlr2_not_supported\@:
+#endif
+
#ifdef IMAGE_BL31
/* ---------------------------------------------------------------------
* Initialise the per-cpu cache pointer to the CPU.
diff --git a/lib/extensions/pauth/pauth.c b/lib/extensions/pauth/pauth.c
index 2dd0d28..fbbcaa2 100644
--- a/lib/extensions/pauth/pauth.c
+++ b/lib/extensions/pauth/pauth.c
@@ -62,16 +62,25 @@
void __no_pauth pauth_enable_el3(void)
{
write_sctlr_el3(read_sctlr_el3() | SCTLR_EnIA_BIT);
+
+ if (is_feat_pauth_lr_supported()) {
+ write_sctlr2_el3(read_sctlr2_el3() | SCTLR2_EnPACM_BIT);
+ }
+
isb();
}
void __no_pauth pauth_enable_el1(void)
{
write_sctlr_el1(read_sctlr_el1() | SCTLR_EnIA_BIT);
+
+ if (is_feat_pauth_lr_supported()) {
+ write_sctlr2_el1(read_sctlr2_el1() | SCTLR2_EnPACM_BIT);
+ }
+
isb();
}
-/* Enable PAuth for EL2 */
void pauth_enable_el2(void)
{
u_register_t hcr_el2 = read_hcr_el2();
diff --git a/make_helpers/arch_features.mk b/make_helpers/arch_features.mk
index 56bfb64..1561a59 100644
--- a/make_helpers/arch_features.mk
+++ b/make_helpers/arch_features.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2022-2024, Arm Limited. All rights reserved.
+# Copyright (c) 2022-2025, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -160,6 +160,11 @@
# direct setting. Use BRANCH_PROTECTION to enable PAUTH.
ENABLE_PAUTH ?= 0
+# FEAT_PAUTH_LR is an optional architectural feature, so this flag must be set
+# manually in addition to the BRANCH_PROTECTION flag which is used for other
+# branch protection and pointer authentication features.
+ENABLE_FEAT_PAUTH_LR ?= 0
+
# Include pointer authentication (ARMv8.3-PAuth) registers in cpu context. This
# must be set to 1 if the platform wants to use this feature in the Secure
# world. It is not necessary for use in the Non-secure world.