feat(smccc): implement SMCCC_ARCH_FEATURE_AVAILABILITY

SMCCC_ARCH_FEATURE_AVAILABILITY [1] is a call to query firmware about
the features it is aware of and enables. This is useful when a feature
is not enabled at EL3, eg due to an older FW image, but it is present in
hardware. In those cases, the EL1 ID registers do not reflect the usable
feature set and this call should provide the necessary information to
remedy that.

The call itself is very lightweight - effectively a sanitised read of
the relevant system register. Bits that are not relevant to feature
enablement are masked out and active low bits are converted to active
high.

The implementation is also very simple. All relevant, irrelevant, and
inverted bits combined into bitmasks at build time. Then at runtime the
masks are unconditionally applied to produce the right result. This
assumes that context managers will make sure that disabled features
do not have their bits set and the registers are context switched if
any fields in them make enablement ambiguous.

Features that are not yet supported in TF-A have not been added. On
debug builds, calling this function will fail an assert if any bits that
are not expected are set. In combination with CI this should allow for
this feature to to stay up to date as new architectural features are
added.

If a call for MPAM3_EL3 is made when MPAM is not enabled, the call
will return INVALID_PARAM, while if it is FEAT_STATE_CHECK, it will
return zero. This should be fairly consistent with feature detection.

The bitmask is meant to be interpreted as the logical AND of the
relevant ID registers. It would be permissible for this to return 1
while the ID returns 0. Despite this, this implementation takes steps
not to. In the general case, the two should match exactly.

Finally, it is not entirely clear whether this call replies to SMC32
requests. However, it will not, as the return values are all 64 bits.

[1]: https://developer.arm.com/documentation/den0028/galp1/?lang=en

Co-developed-by: Charlie Bareham <charlie.bareham@arm.com>
Signed-off-by: Boyan Karatotev <boyan.karatotev@arm.com>
Change-Id: I1a74e7d0b3459b1396961b8fa27f84e3f0ad6a6f
diff --git a/Makefile b/Makefile
index 6f080b4..45b0b21 100644
--- a/Makefile
+++ b/Makefile
@@ -959,6 +959,10 @@
 	ifneq (${ENABLE_FEAT_FPMR},0)
                 $(error "ENABLE_FEAT_FPMR cannot be used with ARCH=aarch32")
 	endif
+
+	ifeq (${ARCH_FEATURE_AVAILABILITY},1)
+                $(error "ARCH_FEATURE_AVAILABILITY cannot be used with ARCH=aarch32")
+	endif
 endif #(ARCH=aarch32)
 
 ifneq (${ENABLE_FEAT_FPMR},0)
@@ -1207,6 +1211,7 @@
 	PROGRAMMABLE_RESET_ADDRESS \
 	PSCI_EXTENDED_STATE_ID \
 	PSCI_OS_INIT_MODE \
+	ARCH_FEATURE_AVAILABILITY \
 	RESET_TO_BL31 \
 	SAVE_KEYS \
 	SEPARATE_CODE_AND_RODATA \
@@ -1388,6 +1393,7 @@
 	PROGRAMMABLE_RESET_ADDRESS \
 	PSCI_EXTENDED_STATE_ID \
 	PSCI_OS_INIT_MODE \
+	ARCH_FEATURE_AVAILABILITY \
 	RESET_TO_BL31 \
 	RME_GPT_BITLOCK_BLOCK \
 	RME_GPT_MAX_BLOCK \
diff --git a/docs/getting_started/build-options.rst b/docs/getting_started/build-options.rst
index d2d2eb5..a8184e6 100644
--- a/docs/getting_started/build-options.rst
+++ b/docs/getting_started/build-options.rst
@@ -858,6 +858,11 @@
 -  ``PSCI_OS_INIT_MODE``: Boolean flag to enable support for optional PSCI
    OS-initiated mode. This option defaults to 0.
 
+-  ``ARCH_FEATURE_AVAILABILITY``: Boolean flag to enable support for the
+   optional SMCCC_ARCH_FEATURE_AVAILABILITY call. This option implicitly
+   interacts with IMPDEF_SYSREG_TRAP and software emulation. This option
+   defaults to 0.
+
 -  ``ENABLE_FEAT_RAS``: Boolean flag to enable Armv8.2 RAS features. RAS features
    are an optional extension for pre-Armv8.2 CPUs, but are mandatory for Armv8.2
    or later CPUs. This flag can take the values 0 or 1. The default value is 0.
diff --git a/docs/porting-guide.rst b/docs/porting-guide.rst
index 5cb20fd..0f37368 100644
--- a/docs/porting-guide.rst
+++ b/docs/porting-guide.rst
@@ -3553,7 +3553,10 @@
 This function is invoked by BL31's exception handler when there is a synchronous
 system register trap caused by access to the implementation defined registers.
 It allows platforms enabling ``IMPDEF_SYSREG_TRAP`` to emulate those system
-registers choosing to program bits of their choice.
+registers choosing to program bits of their choice. If using in combination with
+``ARCH_FEATURE_AVAILABILITY``, the macros
+{SCR,MDCR,CPTR}_PLAT_{BITS,IGNORED,FLIPPED} should be defined to report correct
+results.
 
 The first parameter (``uint64_t esr_el3``) contains the content of the ESR_EL3
 syndrome register, which encodes the instruction that was trapped.
diff --git a/include/services/arm_arch_svc.h b/include/services/arm_arch_svc.h
index 645b388..c2b1f41 100644
--- a/include/services/arm_arch_svc.h
+++ b/include/services/arm_arch_svc.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -13,8 +13,310 @@
 #define SMCCC_ARCH_WORKAROUND_1		U(0x80008000)
 #define SMCCC_ARCH_WORKAROUND_2		U(0x80007FFF)
 #define SMCCC_ARCH_WORKAROUND_3		U(0x80003FFF)
+#define SMCCC_ARCH_FEATURE_AVAILABILITY		U(0x80000003)
 
 #define SMCCC_GET_SOC_VERSION		U(0)
 #define SMCCC_GET_SOC_REVISION		U(1)
 
+#ifndef __ASSEMBLER__
+#if ARCH_FEATURE_AVAILABILITY
+#include <lib/cassert.h>
+
+#if ENABLE_FEAT_FGT2
+#define SCR_FEAT_FGT2 SCR_FGTEN2_BIT
+#else
+#define SCR_FEAT_FGT2 (0)
+#endif
+
+#if ENABLE_FEAT_FPMR
+#define SCR_FEAT_FPMR SCR_EnFPM_BIT
+#else
+#define SCR_FEAT_FPMR
+#endif
+
+#if ENABLE_FEAT_D128
+#define SCR_FEAT_D128 SCR_D128En_BIT
+#else
+#define SCR_FEAT_D128 (0)
+#endif
+
+#if ENABLE_FEAT_S1PIE
+#define SCR_FEAT_S1PIE SCR_PIEN_BIT
+#else
+#define SCR_FEAT_S1PIE (0)
+#endif
+
+#if ENABLE_FEAT_SCTLR2
+#define SCR_FEAT_SCTLR2 SCR_SCTLR2En_BIT
+#else
+#define SCR_FEAT_SCTLR2 (0)
+#endif
+
+#if ENABLE_FEAT_TCR2
+#define SCR_FEAT_TCR2 SCR_TCR2EN_BIT
+#else
+#define SCR_FEAT_TCR2 (0)
+#endif
+
+#if ENABLE_FEAT_THE
+#define SCR_FEAT_THE SCR_RCWMASKEn_BIT
+#else
+#define SCR_FEAT_THE (0)
+#endif
+
+#if ENABLE_SME_FOR_NS
+#define SCR_FEAT_SME SCR_ENTP2_BIT
+#else
+#define SCR_FEAT_SME (0)
+#endif
+
+#if ENABLE_FEAT_GCS
+#define SCR_FEAT_GCS SCR_GCSEn_BIT
+#else
+#define SCR_FEAT_GCS (0)
+#endif
+
+#if ENABLE_FEAT_HCX
+#define SCR_FEAT_HCX SCR_HXEn_BIT
+#else
+#define SCR_FEAT_HCX (0)
+#endif
+
+#if ENABLE_FEAT_LS64_ACCDATA
+#define SCR_FEAT_LS64_ACCDATA (SCR_ADEn_BIT | SCR_EnAS0_BIT)
+#else
+#define SCR_FEAT_LS64_ACCDATA (0)
+#endif
+
+#if ENABLE_FEAT_AMUv1p1
+#define SCR_FEAT_AMUv1p1 SCR_AMVOFFEN_BIT
+#else
+#define SCR_FEAT_AMUv1p1 (0)
+#endif
+
+#if ENABLE_FEAT_ECV
+#define SCR_FEAT_ECV SCR_ECVEN_BIT
+#else
+#define SCR_FEAT_ECV (0)
+#endif
+
+#if ENABLE_FEAT_FGT
+#define SCR_FEAT_FGT SCR_FGTEN_BIT
+#else
+#define SCR_FEAT_FGT (0)
+#endif
+
+#if ENABLE_FEAT_MTE2
+#define SCR_FEAT_MTE2 SCR_ATA_BIT
+#else
+#define SCR_FEAT_MTE2 (0)
+#endif
+
+#if ENABLE_FEAT_CSV2_2
+#define SCR_FEAT_CSV2_2 SCR_EnSCXT_BIT
+#else
+#define SCR_FEAT_CSV2_2 (0)
+#endif
+
+#if ENABLE_FEAT_RAS
+#define SCR_FEAT_RAS SCR_TERR_BIT
+#else
+#define SCR_FEAT_RAS (0)
+#endif
+
+#ifndef SCR_PLAT_FEATS
+#define SCR_PLAT_FEATS (0)
+#endif
+#ifndef SCR_PLAT_FLIPPED
+#define SCR_PLAT_FLIPPED (0)
+#endif
+#ifndef SCR_PLAT_IGNORED
+#define SCR_PLAT_IGNORED (0)
+#endif
+
+#ifndef CPTR_PLAT_FEATS
+#define CPTR_PLAT_FEATS (0)
+#endif
+#ifndef CPTR_PLAT_FLIPPED
+#define CPTR_PLAT_FLIPPED (0)
+#endif
+
+#ifndef MDCR_PLAT_FEATS
+#define MDCR_PLAT_FEATS (0)
+#endif
+#ifndef MDCR_PLAT_FLIPPED
+#define MDCR_PLAT_FLIPPED (0)
+#endif
+#ifndef MDCR_PLAT_IGNORED
+#define MDCR_PLAT_IGNORED (0)
+#endif
+/*
+ * XYZ_EL3_FEATS - list all bits that are relevant for feature enablement. It's
+ * a constant list based on what features are expected. This relies on the fact
+ * that if the feature is in any way disabled, then the relevant bit will not be
+ * written by context management.
+ *
+ * XYZ_EL3_FLIPPED - bits with an active 0, rather than the usual active 1. The
+ * spec always uses active 1 to mean that the feature will not trap.
+ *
+ * XYZ_EL3_IGNORED - list of all bits that are not relevant for feature
+ * enablement and should not be reported to lower ELs
+ */
+#define SCR_EL3_FEATS (								\
+	SCR_FEAT_FGT2		|						\
+	SCR_FEAT_FPMR		|						\
+	SCR_FEAT_D128		|						\
+	SCR_FEAT_S1PIE		|						\
+	SCR_FEAT_SCTLR2		|						\
+	SCR_FEAT_TCR2		|						\
+	SCR_FEAT_THE		|						\
+	SCR_FEAT_SME		|						\
+	SCR_FEAT_GCS		|						\
+	SCR_FEAT_HCX		|						\
+	SCR_FEAT_LS64_ACCDATA	|						\
+	SCR_FEAT_AMUv1p1	|						\
+	SCR_FEAT_ECV		|						\
+	SCR_FEAT_FGT		|						\
+	SCR_FEAT_MTE2		|						\
+	SCR_FEAT_CSV2_2		|						\
+	SCR_APK_BIT		| /* FEAT_Pauth */				\
+	SCR_FEAT_RAS		|						\
+	SCR_PLAT_FEATS)
+#define SCR_EL3_FLIPPED (							\
+	SCR_FEAT_RAS		|						\
+	SCR_PLAT_FLIPPED)
+#define SCR_EL3_IGNORED (							\
+	SCR_API_BIT		|						\
+	SCR_RW_BIT		|						\
+	SCR_SIF_BIT		|						\
+	SCR_HCE_BIT		|						\
+	SCR_FIQ_BIT		|						\
+	SCR_IRQ_BIT		|						\
+	SCR_NS_BIT		|						\
+	SCR_RES1_BITS		|						\
+	SCR_PLAT_IGNORED)
+CASSERT((SCR_EL3_FEATS & SCR_EL3_IGNORED) == 0, scr_feat_is_ignored);
+CASSERT((SCR_EL3_FLIPPED & SCR_EL3_FEATS) == SCR_EL3_FLIPPED, scr_flipped_not_a_feat);
+
+#if ENABLE_SYS_REG_TRACE_FOR_NS
+#define CPTR_SYS_REG_TRACE (TCPAC_BIT | TTA_BIT)
+#else
+#define CPTR_SYS_REG_TRACE (0)
+#endif
+
+#if ENABLE_FEAT_AMU
+#define CPTR_FEAT_AMU TAM_BIT
+#else
+#define CPTR_FEAT_AMU (0)
+#endif
+
+#if ENABLE_SME_FOR_NS
+#define CPTR_FEAT_SME ESM_BIT
+#else
+#define CPTR_FEAT_SME (0)
+#endif
+
+#if ENABLE_SVE_FOR_NS
+#define CPTR_FEAT_SVE CPTR_EZ_BIT
+#else
+#define CPTR_FEAT_SVE (0)
+#endif
+
+#define CPTR_EL3_FEATS (							\
+	CPTR_SYS_REG_TRACE	|						\
+	CPTR_FEAT_AMU		|						\
+	CPTR_FEAT_SME		|						\
+	TFP_BIT			|						\
+	CPTR_FEAT_SVE		|						\
+	CPTR_PLAT_FEATS)
+#define CPTR_EL3_FLIPPED (							\
+	CPTR_SYS_REG_TRACE	|						\
+	CPTR_FEAT_AMU		|						\
+	TFP_BIT			|						\
+	CPTR_PLAT_FLIPPED)
+CASSERT((CPTR_EL3_FLIPPED & CPTR_EL3_FEATS) == CPTR_EL3_FLIPPED, cptr_flipped_not_a_feat);
+
+/*
+ * Some features enables are expressed with more than 1 bit in order to cater
+ * for multi world enablement. In those cases (BRB, TRB, SPE) only the last bit
+ * is used and reported. This (ab)uses the convenient fact that the last bit
+ * always means "enabled for this world" when context switched correctly.
+ * The per-world values have been adjusted such that this is always true.
+ */
+#if ENABLE_BRBE_FOR_NS
+#define MDCR_FEAT_BRBE MDCR_SBRBE(1UL)
+#else
+#define MDCR_FEAT_BRBE (0)
+#endif
+
+#if ENABLE_FEAT_FGT
+#define MDCR_FEAT_FGT MDCR_TDCC_BIT
+#else
+#define MDCR_FEAT_FGT (0)
+#endif
+
+#if ENABLE_TRBE_FOR_NS
+#define MDCR_FEAT_TRBE MDCR_NSTB(1UL)
+#else
+#define MDCR_FEAT_TRBE (0)
+#endif
+
+#if ENABLE_TRF_FOR_NS
+#define MDCR_FEAT_TRF MDCR_TTRF_BIT
+#else
+#define MDCR_FEAT_TRF (0)
+#endif
+
+#if ENABLE_SPE_FOR_NS
+#define MDCR_FEAT_SPE MDCR_NSPB(1UL)
+#else
+#define MDCR_FEAT_SPE (0)
+#endif
+
+#define MDCR_EL3_FEATS (							\
+	MDCR_FEAT_BRBE		|						\
+	MDCR_FEAT_FGT		|						\
+	MDCR_FEAT_TRBE		|						\
+	MDCR_FEAT_TRF		|						\
+	MDCR_FEAT_SPE		|						\
+	MDCR_TDOSA_BIT		|						\
+	MDCR_TDA_BIT		|						\
+	MDCR_TPM_BIT		| /* FEAT_PMUv3 */				\
+	MDCR_PLAT_FEATS)
+#define MDCR_EL3_FLIPPED (							\
+	MDCR_FEAT_FGT		|						\
+	MDCR_FEAT_TRF		|						\
+	MDCR_TDOSA_BIT		|						\
+	MDCR_TDA_BIT		|						\
+	MDCR_TPM_BIT		|						\
+	MDCR_PLAT_FLIPPED)
+#define MDCR_EL3_IGNORED (							\
+	MDCR_EBWE_BIT		|						\
+	MDCR_EnPMSN_BIT		|						\
+	MDCR_SBRBE(2UL)		|						\
+	MDCR_MTPME_BIT		|						\
+	MDCR_NSTBE_BIT		|						\
+	MDCR_NSTB(2UL)		|						\
+	MDCR_SDD_BIT		|						\
+	MDCR_SPD32(3UL)		|						\
+	MDCR_NSPB(2UL)		|						\
+	MDCR_NSPBE_BIT		|						\
+	MDCR_PLAT_IGNORED)
+CASSERT((MDCR_EL3_FEATS & MDCR_EL3_IGNORED) == 0, mdcr_feat_is_ignored);
+CASSERT((MDCR_EL3_FLIPPED & MDCR_EL3_FEATS) == MDCR_EL3_FLIPPED, mdcr_flipped_not_a_feat);
+
+#define MPAM3_EL3_FEATS		(MPAM3_EL3_TRAPLOWER_BIT)
+#define MPAM3_EL3_FLIPPED	(MPAM3_EL3_TRAPLOWER_BIT)
+#define MPAM3_EL3_IGNORED	(MPAM3_EL3_MPAMEN_BIT)
+CASSERT((MPAM3_EL3_FEATS & MPAM3_EL3_IGNORED) == 0, mpam3_feat_is_ignored);
+CASSERT((MPAM3_EL3_FLIPPED & MPAM3_EL3_FEATS) == MPAM3_EL3_FLIPPED, mpam3_flipped_not_a_feat);
+
+/* The hex representations of these registers' S3 encoding */
+#define SCR_EL3_OPCODE  			U(0x1E1100)
+#define CPTR_EL3_OPCODE 			U(0x1E1140)
+#define MDCR_EL3_OPCODE 			U(0x1E1320)
+#define MPAM3_EL3_OPCODE 			U(0x1EA500)
+
+#endif /* ARCH_FEATURE_AVAILABILITY */
+#endif /* __ASSEMBLER__ */
 #endif /* ARM_ARCH_SVC_H */
diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk
index 8a0975b..4985c0c 100644
--- a/make_helpers/defaults.mk
+++ b/make_helpers/defaults.mk
@@ -210,6 +210,9 @@
 # Enable PSCI OS-initiated mode support
 PSCI_OS_INIT_MODE		:= 0
 
+# SMCCC_ARCH_FEATURE_AVAILABILITY support
+ARCH_FEATURE_AVAILABILITY	:= 0
+
 # By default, BL1 acts as the reset handler, not BL31
 RESET_TO_BL31			:= 0
 
diff --git a/services/arm_arch_svc/arm_arch_svc_setup.c b/services/arm_arch_svc/arm_arch_svc_setup.c
index 5456164..6acd1b6 100644
--- a/services/arm_arch_svc/arm_arch_svc_setup.c
+++ b/services/arm_arch_svc/arm_arch_svc_setup.c
@@ -14,6 +14,9 @@
 #include <services/arm_arch_svc.h>
 #include <smccc_helpers.h>
 #include <plat/common/platform.h>
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <lib/el3_runtime/context_mgmt.h>
 
 static int32_t smccc_version(void)
 {
@@ -90,6 +93,12 @@
 		}
 		return 0; /* ERRATA_APPLIES || ERRATA_MISSING */
 #endif
+
+#if ARCH_FEATURE_AVAILABILITY
+	case SMCCC_ARCH_FEATURE_AVAILABILITY:
+		return SMC_ARCH_CALL_SUCCESS;
+#endif /* ARCH_FEATURE_AVAILABILITY */
+
 #endif /* __aarch64__ */
 
 	/* Fallthrough */
@@ -111,6 +120,91 @@
 	}
 	return SMC_ARCH_CALL_INVAL_PARAM;
 }
+
+/*
+ * Reads a system register, sanitises its value, and returns a bitmask
+ * representing which feature in that sysreg has been enabled by firmware. The
+ * bitmask is a 1:1 mapping to the register's fields.
+ */
+#if ARCH_FEATURE_AVAILABILITY
+static uintptr_t smccc_arch_feature_availability(u_register_t reg,
+						 void *handle,
+						 u_register_t flags)
+{
+	cpu_context_t *caller_context;
+	per_world_context_t *caller_per_world_context;
+	el3_state_t *state;
+	u_register_t bitmask, check;
+
+	/* check the caller security state */
+	if (is_caller_secure(flags)) {
+		caller_context = cm_get_context(SECURE);
+		caller_per_world_context = &per_world_context[CPU_CONTEXT_SECURE];
+	} else if (is_caller_non_secure(flags)) {
+		caller_context = cm_get_context(NON_SECURE);
+		caller_per_world_context = &per_world_context[CPU_CONTEXT_NS];
+	} else {
+#if ENABLE_RME
+		caller_context = cm_get_context(REALM);
+		caller_per_world_context = &per_world_context[CPU_CONTEXT_REALM];
+#else /* !ENABLE_RME */
+		assert(0); /* shouldn't be possible */
+#endif /* ENABLE_RME */
+	}
+
+	state = get_el3state_ctx(caller_context);
+
+	switch (reg) {
+	case SCR_EL3_OPCODE:
+		bitmask  = read_ctx_reg(state, CTX_SCR_EL3);
+		bitmask &= ~SCR_EL3_IGNORED;
+		check    = bitmask & ~SCR_EL3_FEATS;
+		bitmask &= SCR_EL3_FEATS;
+		bitmask ^= SCR_EL3_FLIPPED;
+		/* will only report 0 if neither is implemented */
+		if (is_feat_rng_trap_supported() || is_feat_rng_present())
+			bitmask |= SCR_TRNDR_BIT;
+		break;
+	case CPTR_EL3_OPCODE:
+		bitmask  = caller_per_world_context->ctx_cptr_el3;
+		check    = bitmask & ~CPTR_EL3_FEATS;
+		bitmask &= CPTR_EL3_FEATS;
+		bitmask ^= CPTR_EL3_FLIPPED;
+		break;
+	case MDCR_EL3_OPCODE:
+		bitmask  = read_ctx_reg(state, CTX_MDCR_EL3);
+		bitmask &= ~MDCR_EL3_IGNORED;
+		check    = bitmask & ~MDCR_EL3_FEATS;
+		bitmask &= MDCR_EL3_FEATS;
+		bitmask ^= MDCR_EL3_FLIPPED;
+		break;
+#if ENABLE_FEAT_MPAM
+	case MPAM3_EL3_OPCODE:
+		bitmask  = caller_per_world_context->ctx_mpam3_el3;
+		bitmask &= ~MPAM3_EL3_IGNORED;
+		check    = bitmask & ~MPAM3_EL3_FEATS;
+		bitmask &= MPAM3_EL3_FEATS;
+		bitmask ^= MPAM3_EL3_FLIPPED;
+		break;
+#endif /* ENABLE_FEAT_MPAM */
+	default:
+		SMC_RET2(handle, SMC_INVALID_PARAM, ULL(0));
+	}
+
+	/*
+	 * failing this means that the requested register has a bit set that
+	 * hasn't been declared as a known feature bit or an ignore bit. This is
+	 * likely to happen when support for a new feature is added but the
+	 * bitmask macros are not updated.
+	 */
+	if (ENABLE_ASSERTIONS && check != 0) {
+		ERROR("Unexpected bits 0x%lx were set in register %lx!\n", check, reg);
+		assert(0);
+	}
+
+	SMC_RET2(handle, SMC_ARCH_CALL_SUCCESS, bitmask);
+}
+#endif /* ARCH_FEATURE_AVAILABILITY */
 
 /*
  * Top-level Arm Architectural Service SMC handler.
@@ -161,6 +255,11 @@
 		SMC_RET0(handle);
 #endif
 #endif /* __aarch64__ */
+#if ARCH_FEATURE_AVAILABILITY
+	/* return is 64 bit so only reply on SMC64 requests */
+	case SMCCC_ARCH_FEATURE_AVAILABILITY | (SMC_64 << FUNCID_CC_SHIFT):
+		return smccc_arch_feature_availability(x1, handle, flags);
+#endif /* ARCH_FEATURE_AVAILABILITY */
 	default:
 		WARN("Unimplemented Arm Architecture Service Call: 0x%x \n",
 			smc_fid);