Merge changes Iccfa7ec6,Ide9a7af4 into integration
* changes:
feat(intel): add macro to switch between different UART PORT
feat(intel): add SMC support for ROM Patch SHA384 mailbox
diff --git a/Makefile b/Makefile
index 95c9075..3a8a522 100644
--- a/Makefile
+++ b/Makefile
@@ -263,24 +263,6 @@
# Determine if FEAT_SB is supported
ENABLE_FEAT_SB = $(if $(findstring sb,${arch-features}),1,0)
-ifeq "8.5" "$(word 1, $(sort 8.5 $(ARM_ARCH_MAJOR).$(ARM_ARCH_MINOR)))"
-ENABLE_FEAT_SB = 1
-endif
-
-# Determine and enable FEAT_FGT to access HDFGRTR_EL2 register for v8.6 and higher versions.
-ifeq "8.6" "$(word 1, $(sort 8.6 $(ARM_ARCH_MAJOR).$(ARM_ARCH_MINOR)))"
-ENABLE_FEAT_FGT = 1
-endif
-
-# Determine and enable FEAT_ECV to access CNTPOFF_EL2 register for v8.6 and higher versions.
-ifeq "8.6" "$(word 1, $(sort 8.6 $(ARM_ARCH_MAJOR).$(ARM_ARCH_MINOR)))"
-ENABLE_FEAT_ECV = 1
-endif
-
-ifeq "8.4" "$(word 1, $(sort 8.4 $(ARM_ARCH_MAJOR).$(ARM_ARCH_MINOR)))"
-ENABLE_FEAT_DIT = 1
-endif
-
ifneq ($(findstring armclang,$(notdir $(CC))),)
TF_CFLAGS_aarch32 = -target arm-arm-none-eabi $(march32-directive)
TF_CFLAGS_aarch64 = -target aarch64-arm-none-eabi $(march64-directive)
@@ -467,13 +449,10 @@
DTC_CPPFLAGS += -P -nostdinc -Iinclude -Ifdts -undef \
-x assembler-with-cpp $(DEFINES)
-ifeq ($(MEASURED_BOOT),1)
-DTC_CPPFLAGS += -DMEASURED_BOOT -DBL2_HASH_SIZE=${TCG_DIGEST_SIZE}
-endif
-
################################################################################
# Common sources and include directories
################################################################################
+include ${MAKE_HELPERS_DIRECTORY}arch_features.mk
include lib/compiler-rt/compiler-rt.mk
BL_COMMON_SOURCES += common/bl_common.c \
@@ -544,6 +523,9 @@
ifeq ($(CTX_INCLUDE_EL2_REGS),0)
$(error SPMD with SPM at S-EL2 requires CTX_INCLUDE_EL2_REGS option)
endif
+ ifeq ($(SPMC_AT_EL3),1)
+ $(error SPM cannot be enabled in both S-EL2 and EL3.)
+ endif
endif
ifeq ($(findstring optee_sp,$(ARM_SPMC_MANIFEST_DTS)),optee_sp)
@@ -594,6 +576,9 @@
ifneq (${ARCH},aarch64)
$(error ENABLE_RME requires AArch64)
endif
+ifeq ($(SPMC_AT_EL3),1)
+ $(error SPMC_AT_EL3 and ENABLE_RME cannot both be enabled.)
+endif
include services/std_svc/rmmd/rmmd.mk
$(warning "RME is an experimental feature")
endif
@@ -776,6 +761,10 @@
$(info PSA_FWU_SUPPORT is an experimental feature)
endif
+ifeq ($(FEATURE_DETECTION),1)
+ $(info FEATURE_DETECTION is an experimental feature)
+endif
+
ifeq (${ARM_XLAT_TABLES_LIB_V1}, 1)
ifeq (${ALLOW_RO_XLAT_TABLES}, 1)
$(error "ALLOW_RO_XLAT_TABLES requires translation tables library v2")
@@ -980,10 +969,7 @@
CREATE_KEYS \
CTX_INCLUDE_AARCH32_REGS \
CTX_INCLUDE_FPREGS \
- CTX_INCLUDE_PAUTH_REGS \
- CTX_INCLUDE_MTE_REGS \
CTX_INCLUDE_EL2_REGS \
- CTX_INCLUDE_NEVE_REGS \
DEBUG \
DISABLE_MTPMU \
DYN_DISABLE_AUTH \
@@ -993,11 +979,9 @@
ENABLE_AMU_FCONF \
AMU_RESTRICT_COUNTERS \
ENABLE_ASSERTIONS \
- ENABLE_MPAM_FOR_LOWER_ELS \
ENABLE_PIE \
ENABLE_PMF \
ENABLE_PSCI_STAT \
- ENABLE_RME \
ENABLE_RUNTIME_INSTRUMENTATION \
ENABLE_SME_FOR_NS \
ENABLE_SME_FOR_SWD \
@@ -1017,7 +1001,6 @@
PL011_GENERIC_UART \
PROGRAMMABLE_RESET_ADDRESS \
PSCI_EXTENDED_STATE_ID \
- RAS_EXTENSION \
RESET_TO_BL31 \
SAVE_KEYS \
SEPARATE_CODE_AND_RODATA \
@@ -1025,6 +1008,7 @@
SEPARATE_NOBITS_REGION \
SPIN_ON_BL1_EXIT \
SPM_MM \
+ SPMC_AT_EL3 \
SPMD_SPM_AT_SEL2 \
TRUSTED_BOARD_BOOT \
CRYPTO_SUPPORT \
@@ -1046,20 +1030,13 @@
RAS_TRAP_LOWER_EL_ERR_ACCESS \
COT_DESC_IN_DTB \
USE_SP804_TIMER \
- ENABLE_FEAT_RNG \
- ENABLE_FEAT_SB \
- ENABLE_FEAT_DIT \
PSA_FWU_SUPPORT \
ENABLE_TRBE_FOR_NS \
ENABLE_SYS_REG_TRACE_FOR_NS \
- ENABLE_TRF_FOR_NS \
- ENABLE_FEAT_HCX \
ENABLE_MPMM \
ENABLE_MPMM_FCONF \
- ENABLE_FEAT_FGT \
- ENABLE_FEAT_AMUv1 \
- ENABLE_FEAT_ECV \
SIMICS_BUILD \
+ FEATURE_DETECTION \
)))
$(eval $(call assert_numerics,\
@@ -1067,9 +1044,30 @@
ARM_ARCH_MAJOR \
ARM_ARCH_MINOR \
BRANCH_PROTECTION \
+ CTX_INCLUDE_PAUTH_REGS \
+ CTX_INCLUDE_MTE_REGS \
+ CTX_INCLUDE_NEVE_REGS \
+ ENABLE_BTI \
+ ENABLE_PAUTH \
+ ENABLE_FEAT_AMUv1 \
+ ENABLE_FEAT_AMUv1p1 \
+ ENABLE_FEAT_CSV2_2 \
+ ENABLE_FEAT_DIT \
+ ENABLE_FEAT_ECV \
+ ENABLE_FEAT_FGT \
+ ENABLE_FEAT_HCX \
+ ENABLE_FEAT_PAN \
+ ENABLE_FEAT_RNG \
+ ENABLE_FEAT_SB \
+ ENABLE_FEAT_SEL2 \
+ ENABLE_FEAT_VHE \
+ ENABLE_MPAM_FOR_LOWER_ELS \
+ ENABLE_RME \
+ ENABLE_TRF_FOR_NS \
FW_ENC_STATUS \
NR_OF_FW_BANKS \
NR_OF_IMAGES_IN_FW_BANK \
+ RAS_EXTENSION \
)))
ifdef KEY_SIZE
@@ -1143,6 +1141,7 @@
SPD_${SPD} \
SPIN_ON_BL1_EXIT \
SPM_MM \
+ SPMC_AT_EL3 \
SPMD_SPM_AT_SEL2 \
TRUSTED_BOARD_BOOT \
CRYPTO_SUPPORT \
@@ -1179,6 +1178,12 @@
ENABLE_FEAT_AMUv1 \
ENABLE_FEAT_ECV \
SIMICS_BUILD \
+ ENABLE_FEAT_AMUv1p1 \
+ ENABLE_FEAT_SEL2 \
+ ENABLE_FEAT_VHE \
+ ENABLE_FEAT_CSV2_2 \
+ ENABLE_FEAT_PAN \
+ FEATURE_DETECTION \
)))
ifeq (${SANITIZE_UB},trap)
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index e751824..214cf2f 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -18,12 +18,21 @@
$(error EL3_EXCEPTION_HANDLING must be 1 for SPM-MM support)
else
$(info Including SPM Management Mode (MM) makefile)
- include services/std_svc/spm_mm/spm_mm.mk
+ include services/std_svc/spm/common/spm.mk
+ include services/std_svc/spm/spm_mm/spm_mm.mk
endif
endif
include lib/extensions/amu/amu.mk
include lib/mpmm/mpmm.mk
+
+ifeq (${SPMC_AT_EL3},1)
+ $(warning "EL3 SPMC is an experimental feature")
+ $(info Including EL3 SPMC makefile)
+ include services/std_svc/spm/common/spm.mk
+ include services/std_svc/spm/el3_spmc/spmc.mk
+endif
+
include lib/psci/psci_lib.mk
BL31_SOURCES += bl31/bl31_main.c \
@@ -40,6 +49,8 @@
services/std_svc/std_svc_setup.c \
${PSCI_LIB_SOURCES} \
${SPMD_SOURCES} \
+ ${SPM_MM_SOURCES} \
+ ${SPMC_SOURCES} \
${SPM_SOURCES}
ifeq (${DISABLE_MTPMU},1)
@@ -128,6 +139,10 @@
${RMMD_SOURCES}
endif
+ifeq ($(FEATURE_DETECTION),1)
+BL31_SOURCES += common/feat_detect.c
+endif
+
BL31_LINKERFILE := bl31/bl31.ld.S
# Flag used to indicate if Crash reporting via console should be included
diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c
index 9ac10e2..2a3d838 100644
--- a/bl31/bl31_main.c
+++ b/bl31/bl31_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -14,6 +14,7 @@
#include <bl31/ehf.h>
#include <common/bl_common.h>
#include <common/debug.h>
+#include <common/feat_detect.h>
#include <common/runtime_svc.h>
#include <drivers/console.h>
#include <lib/el3_runtime/context_mgmt.h>
@@ -123,6 +124,11 @@
NOTICE("BL31: %s\n", version_string);
NOTICE("BL31: %s\n", build_message);
+#if FEATURE_DETECTION
+ /* Detect if features enabled during compilation are supported by PE. */
+ detect_arch_features();
+#endif /* FEATURE_DETECTION */
+
#ifdef SUPPORT_UNKNOWN_MPID
if (unsupported_mpid_flag == 0) {
NOTICE("Unsupported MPID detected!\n");
@@ -253,7 +259,16 @@
(image_type == SECURE) ? "secure" : "normal");
print_entry_point_info(next_image_info);
cm_init_my_context(next_image_info);
- cm_prepare_el3_exit(image_type);
+
+ /*
+ * If we are entering the Non-secure world, use
+ * 'cm_prepare_el3_exit_ns' to exit.
+ */
+ if (image_type == NON_SECURE) {
+ cm_prepare_el3_exit_ns();
+ } else {
+ cm_prepare_el3_exit(image_type);
+ }
}
/*******************************************************************************
diff --git a/changelog.yaml b/changelog.yaml
index 939fb65..add81ef 100644
--- a/changelog.yaml
+++ b/changelog.yaml
@@ -442,6 +442,13 @@
deprecated:
- plat/st/stm32mp1
+ - title: Texas Instruments
+ scope: ti
+
+ subsections:
+ - title: K3
+ scope: k3
+
- title: Xilinx
scope: xilinx
diff --git a/common/feat_detect.c b/common/feat_detect.c
new file mode 100644
index 0000000..ef09b86
--- /dev/null
+++ b/common/feat_detect.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/feat_detect.h>
+
+/*******************************************************************************
+ * This section lists the wrapper modules for each feature to evaluate the
+ * feature states (FEAT_STATE_1 and FEAT_STATE_2) and perform necessary action
+ * as below:
+ *
+ * It verifies whether the FEAT_XXX (eg: FEAT_SB) is supported by the PE or not.
+ * Without this check an exception would occur during context save/restore
+ * routines, if the feature is enabled but not supported by PE.
+ ******************************************************************************/
+
+/******************************************
+ * Feature : FEAT_SB (Speculation Barrier)
+ *****************************************/
+static void read_feat_sb(void)
+{
+#if (ENABLE_FEAT_SB == FEAT_STATE_1)
+ feat_detect_panic(is_armv8_0_feat_sb_present(), "SB");
+#endif
+}
+
+/******************************************************
+ * Feature : FEAT_CSV2_2 (Cache Speculation Variant 2)
+ *****************************************************/
+static void read_feat_csv2_2(void)
+{
+#if (ENABLE_FEAT_CSV2_2 == FEAT_STATE_1)
+ feat_detect_panic(is_armv8_0_feat_csv2_2_present(), "CSV2_2");
+#endif
+}
+
+/***********************************************
+ * Feature : FEAT_PAN (Privileged Access Never)
+ **********************************************/
+static void read_feat_pan(void)
+{
+#if (ENABLE_FEAT_PAN == FEAT_STATE_1)
+ feat_detect_panic(is_armv8_1_pan_present(), "PAN");
+#endif
+}
+
+/******************************************************
+ * Feature : FEAT_VHE (Virtualization Host Extensions)
+ *****************************************************/
+static void read_feat_vhe(void)
+{
+#if (ENABLE_FEAT_VHE == FEAT_STATE_1)
+ feat_detect_panic(is_armv8_1_vhe_present(), "VHE");
+#endif
+}
+
+/*******************************************************************************
+ * Feature : FEAT_RAS (Reliability, Availability, and Serviceability Extension)
+ ******************************************************************************/
+static void read_feat_ras(void)
+{
+#if (RAS_EXTENSION == FEAT_STATE_1)
+ feat_detect_panic(is_armv8_2_feat_ras_present(), "RAS");
+#endif
+}
+
+/************************************************
+ * Feature : FEAT_PAUTH (Pointer Authentication)
+ ***********************************************/
+static void read_feat_pauth(void)
+{
+#if (ENABLE_PAUTH == FEAT_STATE_1) || (CTX_INCLUDE_PAUTH_REGS == FEAT_STATE_1)
+ feat_detect_panic(is_armv8_3_pauth_present(), "PAUTH");
+#endif
+}
+
+/************************************************************
+ * Feature : FEAT_DIT (Data Independent Timing Instructions)
+ ***********************************************************/
+static void read_feat_dit(void)
+{
+#if (ENABLE_FEAT_DIT == FEAT_STATE_1)
+ feat_detect_panic(is_armv8_4_feat_dit_present(), "DIT");
+#endif
+}
+
+/*********************************************************
+ * Feature : FEAT_AMUv1 (Activity Monitors Extensions v1)
+ ********************************************************/
+static void read_feat_amuv1(void)
+{
+#if (ENABLE_FEAT_AMUv1 == FEAT_STATE_1)
+ feat_detect_panic(is_armv8_4_feat_amuv1_present(), "AMUv1");
+#endif
+}
+
+/****************************************************************************
+ * Feature : FEAT_MPAM (Memory Partitioning and Monitoring (MPAM) Extension)
+ ***************************************************************************/
+static void read_feat_mpam(void)
+{
+#if (ENABLE_MPAM_FOR_LOWER_ELS == FEAT_STATE_1)
+ feat_detect_panic(get_mpam_version() != 0U, "MPAM");
+#endif
+}
+
+/**************************************************************
+ * Feature : FEAT_NV2 (Enhanced Nested Virtualization Support)
+ *************************************************************/
+static void read_feat_nv2(void)
+{
+#if (CTX_INCLUDE_NEVE_REGS == FEAT_STATE_1)
+ unsigned int nv = get_armv8_4_feat_nv_support();
+
+ feat_detect_panic((nv == ID_AA64MMFR2_EL1_NV2_SUPPORTED), "NV2");
+#endif
+}
+
+/***********************************
+ * Feature : FEAT_SEL2 (Secure EL2)
+ **********************************/
+static void read_feat_sel2(void)
+{
+#if (ENABLE_FEAT_SEL2 == FEAT_STATE_1)
+ feat_detect_panic(is_armv8_4_sel2_present(), "SEL2");
+#endif
+}
+
+/****************************************************
+ * Feature : FEAT_TRF (Self-hosted Trace Extensions)
+ ***************************************************/
+static void read_feat_trf(void)
+{
+#if (ENABLE_TRF_FOR_NS == FEAT_STATE_1)
+ feat_detect_panic(is_arm8_4_feat_trf_present(), "TRF");
+#endif
+}
+
+/************************************************
+ * Feature : FEAT_MTE (Memory Tagging Extension)
+ ***********************************************/
+static void read_feat_mte(void)
+{
+#if (CTX_INCLUDE_MTE_REGS == FEAT_STATE_1)
+ unsigned int mte = get_armv8_5_mte_support();
+
+ feat_detect_panic((mte != MTE_UNIMPLEMENTED), "MTE");
+#endif
+}
+
+/***********************************************
+ * Feature : FEAT_RNG (Random Number Generator)
+ **********************************************/
+static void read_feat_rng(void)
+{
+#if (ENABLE_FEAT_RNG == FEAT_STATE_1)
+ feat_detect_panic(is_armv8_5_rng_present(), "RNG");
+#endif
+}
+
+/****************************************************
+ * Feature : FEAT_BTI (Branch Target Identification)
+ ***************************************************/
+static void read_feat_bti(void)
+{
+#if (ENABLE_BTI == FEAT_STATE_1)
+ feat_detect_panic(is_armv8_5_bti_present(), "BTI");
+#endif
+}
+
+/****************************************
+ * Feature : FEAT_FGT (Fine Grain Traps)
+ ***************************************/
+static void read_feat_fgt(void)
+{
+#if (ENABLE_FEAT_FGT == FEAT_STATE_1)
+ feat_detect_panic(is_armv8_6_fgt_present(), "FGT");
+#endif
+}
+
+/***********************************************
+ * Feature : FEAT_AMUv1p1 (AMU Extensions v1.1)
+ **********************************************/
+static void read_feat_amuv1p1(void)
+{
+#if (ENABLE_FEAT_AMUv1p1 == FEAT_STATE_1)
+ feat_detect_panic(is_armv8_6_feat_amuv1p1_present(), "AMUv1p1");
+#endif
+}
+
+/*******************************************************
+ * Feature : FEAT_ECV (Enhanced Counter Virtualization)
+ ******************************************************/
+static void read_feat_ecv(void)
+{
+#if (ENABLE_FEAT_ECV == FEAT_STATE_1)
+ unsigned int ecv = get_armv8_6_ecv_support();
+
+ feat_detect_panic(((ecv == ID_AA64MMFR0_EL1_ECV_SUPPORTED) ||
+ (ecv == ID_AA64MMFR0_EL1_ECV_SELF_SYNCH)), "ECV");
+#endif
+}
+
+/******************************************************************
+ * Feature : FEAT_HCX (Extended Hypervisor Configuration Register)
+ *****************************************************************/
+static void read_feat_hcx(void)
+{
+#if (ENABLE_FEAT_HCX == FEAT_STATE_1)
+ feat_detect_panic(is_feat_hcx_present(), "HCX");
+#endif
+}
+
+/**************************************************
+ * Feature : FEAT_RME (Realm Management Extension)
+ *************************************************/
+static void read_feat_rme(void)
+{
+#if (ENABLE_RME == FEAT_STATE_1)
+ feat_detect_panic((get_armv9_2_feat_rme_support() !=
+ ID_AA64PFR0_FEAT_RME_NOT_SUPPORTED), "RME");
+#endif
+}
+
+/***********************************************************************************
+ * TF-A supports many Arm architectural features starting from arch version
+ * (8.0 till 8.7+). These features are mostly enabled through build flags. This
+ * mechanism helps in validating these build flags in the early boot phase
+ * either in BL1 or BL31 depending on the platform and assists in identifying
+ * and notifying the features which are enabled but not supported by the PE.
+ *
+ * It reads all the enabled features ID-registers and ensures the features
+ * are supported by the PE.
+ * In case if they aren't it stops booting at an early phase and logs the error
+ * messages, notifying the platforms about the features that are not supported.
+ *
+ * Further the procedure is implemented with a tri-state approach for each feature:
+ * ENABLE_FEAT_xxx = 0 : The feature is disabled statically at compile time
+ * ENABLE_FEAT_xxx = 1 : The feature is enabled and must be present in hardware.
+ * There will be panic if feature is not present at cold boot.
+ * ENABLE_FEAT_xxx = 2 : The feature is enabled but dynamically enabled at runtime
+ * depending on hardware capability.
+ *
+ * For better readability, state values are defined with macros namely:
+ * { FEAT_STATE_0, FEAT_STATE_1, FEAT_STATE_2 } taking values as their naming.
+ **********************************************************************************/
+void detect_arch_features(void)
+{
+ /* v8.0 features */
+ read_feat_sb();
+ read_feat_csv2_2();
+
+ /* v8.1 features */
+ read_feat_pan();
+ read_feat_vhe();
+
+ /* v8.2 features */
+ read_feat_ras();
+
+ /* v8.3 features */
+ read_feat_pauth();
+
+ /* v8.4 features */
+ read_feat_dit();
+ read_feat_amuv1();
+ read_feat_mpam();
+ read_feat_nv2();
+ read_feat_sel2();
+ read_feat_trf();
+
+ /* v8.5 features */
+ read_feat_mte();
+ read_feat_rng();
+ read_feat_bti();
+
+ /* v8.6 features */
+ read_feat_amuv1p1();
+ read_feat_fgt();
+ read_feat_ecv();
+
+ /* v8.7 features */
+ read_feat_hcx();
+
+ /* v9.2 features */
+ read_feat_rme();
+}
diff --git a/docs/about/contact.rst b/docs/about/contact.rst
index 4440a37..4f482bd 100644
--- a/docs/about/contact.rst
+++ b/docs/about/contact.rst
@@ -47,10 +47,10 @@
via their partner managers.
.. _`issue tracker`: https://developer.trustedfirmware.org
-.. _`TF-A development`: https://lists.trustedfirmware.org/pipermail/tf-a/
-.. _`TF-A-Tests development`: https://lists.trustedfirmware.org/pipermail/tf-a-tests/
-.. _`summary of all the lists`: https://lists.trustedfirmware.org
+.. _`TF-A development`: https://lists.trustedfirmware.org/mailman3/lists/tf-a.lists.trustedfirmware.org/
+.. _`TF-A-Tests development`: https://lists.trustedfirmware.org/mailman3/lists/tf-a-tests.lists.trustedfirmware.org/
+.. _`summary of all the lists`: https://lists.trustedfirmware.org/mailman3/lists/
--------------
-*Copyright (c) 2019-2020, Arm Limited. All rights reserved.*
+*Copyright (c) 2019-2022, Arm Limited. All rights reserved.*
diff --git a/docs/components/secure-partition-manager.rst b/docs/components/secure-partition-manager.rst
index af298e3..2eaae75 100644
--- a/docs/components/secure-partition-manager.rst
+++ b/docs/components/secure-partition-manager.rst
@@ -127,14 +127,18 @@
This section explains the TF-A build options involved in building with
support for an FF-A based SPM where the SPMD is located at EL3 and the
-SPMC located at S-EL1 or S-EL2:
+SPMC located at S-EL1, S-EL2 or EL3:
- **SPD=spmd**: this option selects the SPMD component to relay the FF-A
protocol from NWd to SWd back and forth. It is not possible to
enable another Secure Payload Dispatcher when this option is chosen.
- **SPMD_SPM_AT_SEL2**: this option adjusts the SPMC exception
- level to being S-EL1 or S-EL2. It defaults to enabled (value 1) when
+ level to being at S-EL2. It defaults to enabled (value 1) when
SPD=spmd is chosen.
+- **SPMC_AT_EL3**: this option adjusts the SPMC exception level to being
+ at EL3.
+- If neither **SPMD_SPM_AT_SEL2** or **SPMC_AT_EL3** are enabled the SPMC
+ exception level is set to S-EL1.
- **CTX_INCLUDE_EL2_REGS**: this option permits saving (resp.
restoring) the EL2 system register context before entering (resp.
after leaving) the SPMC. It is mandatorily enabled when
@@ -146,14 +150,16 @@
is required when ``SPMD_SPM_AT_SEL2`` is enabled hence when multiple
secure partitions are to be loaded on behalf of the SPMC.
-+---------------+----------------------+------------------+
-| | CTX_INCLUDE_EL2_REGS | SPMD_SPM_AT_SEL2 |
-+---------------+----------------------+------------------+
-| SPMC at S-EL1 | 0 | 0 |
-+---------------+----------------------+------------------+
-| SPMC at S-EL2 | 1 | 1 (default when |
-| | | SPD=spmd) |
-+---------------+----------------------+------------------+
++---------------+----------------------+------------------+-------------+
+| | CTX_INCLUDE_EL2_REGS | SPMD_SPM_AT_SEL2 | SPMC_AT_EL3 |
++---------------+----------------------+------------------+-------------+
+| SPMC at S-EL1 | 0 | 0 | 0 |
++---------------+----------------------+------------------+-------------+
+| SPMC at S-EL2 | 1 | 1 (default when | 0 |
+| | | SPD=spmd) | |
++---------------+----------------------+------------------+-------------+
+| SPMC at EL3 | 0 | 0 | 1 |
++---------------+----------------------+------------------+-------------+
Other combinations of such build options either break the build or are not
supported.
@@ -229,6 +235,20 @@
GENERATE_COT=1 \
all fip
+Sample TF-A build command line when SPMC is located at EL3:
+
+.. code:: shell
+
+ make \
+ CROSS_COMPILE=aarch64-none-elf- \
+ SPD=spmd \
+ SPMD_SPM_AT_SEL2=0 \
+ SPMC_AT_EL3=1 \
+ BL32=<path-to-tee-binary> \
+ BL33=<path-to-bl33-binary> \
+ PLAT=fvp \
+ all fip
+
FVP model invocation
====================
@@ -1280,7 +1300,7 @@
.. _[8]:
-[8] https://lists.trustedfirmware.org/pipermail/tf-a/2020-February/000296.html
+[8] https://lists.trustedfirmware.org/archives/list/tf-a@lists.trustedfirmware.org/thread/CFQFGU6H2D5GZYMUYGTGUSXIU3OYZP6U/
.. _[9]:
diff --git a/docs/design/cpu-specific-build-macros.rst b/docs/design/cpu-specific-build-macros.rst
index af0e769..3029458 100644
--- a/docs/design/cpu-specific-build-macros.rst
+++ b/docs/design/cpu-specific-build-macros.rst
@@ -490,6 +490,10 @@
Cortex-X2 CPU. This needs to be enabled only for revisions r0p0, r1p0 and
r2p0 of the CPU, it is fixed in r2p1.
+- ``ERRATA_X2_2147715``: This applies errata 2147715 workaround to
+ Cortex-X2 CPU. This needs to be enabled only for revision r2p0 of the CPU,
+ it is fixed in r2p1.
+
For Cortex-A510, the following errata build flags are defined :
- ``ERRATA_A510_1922240``: This applies errata 1922240 workaround to
diff --git a/docs/getting_started/build-options.rst b/docs/getting_started/build-options.rst
index adc05e6..d30e22f 100644
--- a/docs/getting_started/build-options.rst
+++ b/docs/getting_started/build-options.rst
@@ -174,14 +174,23 @@
registers to be included when saving and restoring the CPU context. Default
is 0.
-- ``CTX_INCLUDE_NEVE_REGS``: Boolean option that, when set to 1, will cause the
- Armv8.4-NV registers to be saved/restored when entering/exiting an EL2
- execution context. Default value is 0.
+- ``CTX_INCLUDE_MTE_REGS``: Numeric value to include Memory Tagging Extension
+ registers in cpu context. This must be enabled, if the platform wants to use
+ this feature in the Secure world and MTE is enabled at ELX. This flag can
+ take values 0 to 2, to align with the ``FEATURE_DETECTION`` mechanism.
+ Default value is 0.
-- ``CTX_INCLUDE_PAUTH_REGS``: Boolean option that, when set to 1, enables
- Pointer Authentication for Secure world. This will cause the ARMv8.3-PAuth
- registers to be included when saving and restoring the CPU context as
- part of world switch. Default value is 0.
+- ``CTX_INCLUDE_NEVE_REGS``: Numeric value, when set will cause the Armv8.4-NV
+ registers to be saved/restored when entering/exiting an EL2 execution
+ context. This flag can take values 0 to 2, to align with the
+ ``FEATURE_DETECTION`` mechanism. Default value is 0.
+
+- ``CTX_INCLUDE_PAUTH_REGS``: Numeric value to enable the Pointer
+ Authentication for Secure world. This will cause the ARMv8.3-PAuth registers
+ to be included when saving and restoring the CPU context as part of world
+ switch. This flag can take values 0 to 2, to align with ``FEATURE_DETECTION``
+ mechanism. Default value is 0.
+
Note that Pointer Authentication is enabled for Non-secure world irrespective
of the value of this flag if the CPU supports it.
@@ -246,42 +255,101 @@
builds, but this behaviour can be overridden in each platform's Makefile or
in the build command line.
-- ``ENABLE_FEAT_AMUv1``: Boolean option to enable access to the HAFGRTR_EL2
+- ``ENABLE_FEAT_AMUv1``: Numeric value to enable access to the HAFGRTR_EL2
(Hypervisor Activity Monitors Fine-Grained Read Trap Register) during EL2
- to EL3 context save/restore operations. It is an optional feature available
- on v8.4 and onwards and must be set to 1 alongside ``ENABLE_FEAT_FGT``, to
- access the HAFGRTR_EL2 register. Defaults to ``0``.
+ to EL3 context save/restore operations. This flag can take the values 0 to 2,
+ to align with the ``FEATURE_DETECTION`` mechanism. It is an optional feature
+ available on v8.4 and onwards and must be set to either 1 or 2 alongside
+ ``ENABLE_FEAT_FGT``, to access the HAFGRTR_EL2 register.
+ Default value is ``0``.
+
+- ``ENABLE_FEAT_AMUv1p1``: Numeric value to enable the ``FEAT_AMUv1p1``
+ extension. ``FEAT_AMUv1p1`` is an optional feature available on Arm v8.6
+ onwards. This flag can take the values 0 to 2, to align with the
+ ``FEATURE_DETECTION`` mechanism. Default value is ``0``.
-- ``ENABLE_FEAT_ECV``: Boolean option to enable support for the Enhanced Counter
+- ``ENABLE_FEAT_CSV2_2``: Numeric value to enable the ``FEAT_CSV2_2``
+ extension. It allows access to the SCXTNUM_EL2 (Software Context Number)
+ register during EL2 context save/restore operations. ``FEAT_CSV2_2`` is an
+ optional feature available on Arm v8.0 onwards. This flag can take values
+ 0 to 2, to align with the ``FEATURE_DETECTION`` mechanism.
+ Default value is ``0``.
+
+- ``ENABLE_FEAT_DIT``: Numeric value to enable ``FEAT_DIT`` (Data Independent
+ Timing) extension. It allows setting the ``DIT`` bit of PSTATE in EL3.
+ ``FEAT_DIT`` is a mandatory architectural feature and is enabled from v8.4
+ and upwards. This flag can take the values 0 to 2, to align with the
+ ``FEATURE_DETECTION`` mechanism. Default value is ``0``.
+
+- ``ENABLE_FEAT_ECV``: Numeric value to enable support for the Enhanced Counter
Virtualization feature, allowing for access to the CNTPOFF_EL2 (Counter-timer
Physical Offset register) during EL2 to EL3 context save/restore operations.
- Its a mandatory architectural feature in Armv8.6 and defaults to ``1`` for
- v8.6 or later CPUs.
+ Its a mandatory architectural feature and is enabled from v8.6 and upwards.
+ This flag can take the values 0 to 2, to align with the ``FEATURE_DETECTION``
+ mechanism. Default value is ``0``.
-- ``ENABLE_FEAT_FGT``: Boolean option to enable support for FGT (Fine Grain Traps)
+- ``ENABLE_FEAT_FGT``: Numeric value to enable support for FGT (Fine Grain Traps)
feature allowing for access to the HDFGRTR_EL2 (Hypervisor Debug Fine-Grained
- Read Trap Register) during EL2 to EL3 context save/restore operations.
- Its a mandatory architectural feature in Armv8.6 and defaults to ``1`` for
- v8.6 or later CPUs.
+ Read Trap Register) during EL2 to EL3 context save/restore operations.
+ Its a mandatory architectural feature and is enabled from v8.6 and upwards.
+ This flag can take the values 0 to 2, to align with the ``FEATURE_DETECTION``
+ mechanism. Default value is ``0``.
-- ``ENABLE_FEAT_HCX``: This option sets the bit SCR_EL3.HXEn in EL3 to allow
- access to HCRX_EL2 (extended hypervisor control register) from EL2 as well as
- adding HCRX_EL2 to the EL2 context save/restore operations.
+- ``ENABLE_FEAT_HCX``: Numeric value to set the bit SCR_EL3.HXEn in EL3 to
+ allow access to HCRX_EL2 (extended hypervisor control register) from EL2 as
+ well as adding HCRX_EL2 to the EL2 context save/restore operations. Its a
+ mandatory architectural feature and is enabled from v8.7 and upwards. This
+ flag can take the values 0 to 2, to align with the ``FEATURE_DETECTION``
+ mechanism. Default value is ``0``.
+
+- ``ENABLE_FEAT_PAN``: Numeric value to enable the ``FEAT_PAN`` (Privileged
+ Access Never) extension. ``FEAT_PAN`` adds a bit to PSTATE, generating a
+ permission fault for any privileged data access from EL1/EL2 to virtual
+ memory address, accessible at EL0, provided (HCR_EL2.E2H=1). It is a
+ mandatory architectural feature and is enabled from v8.1 and upwards. This
+ flag can take values 0 to 2, to align with the ``FEATURE_DETECTION``
+ mechanism. Default value is ``0``.
+
+- ``ENABLE_FEAT_RNG``: Numeric value to enable the ``FEAT_RNG`` extension.
+ ``FEAT_RNG`` is an optional feature available on Arm v8.5 onwards. This
+ flag can take the values 0 to 2, to align with the ``FEATURE_DETECTION``
+ mechanism. Default is ``0``.
+
+- ``ENABLE_FEAT_SB``: Numeric value to enable the ``FEAT_SB`` (Speculation
+ Barrier) extension allowing access to ``sb`` instruction. ``FEAT_SB`` is an
+ optional feature and defaults to ``0`` for pre-Armv8.5 CPUs but are mandatory
+ for Armv8.5 or later CPUs. This flag can take values 0 to 2, to align with
+ ``FEATURE_DETECTION`` mechanism. It is enabled from v8.5 and upwards and if
+ needed could be overidden from platforms explicitly. Default value is ``0``.
+
+- ``ENABLE_FEAT_SEL2``: Numeric value to enable the ``FEAT_SEL2`` (Secure EL2)
+ extension. ``FEAT_SEL2`` is a mandatory feature available on Arm v8.4.
+ This flag can take values 0 to 2, to align with the ``FEATURE_DETECTION``
+ mechanism. Default is ``0``.
+
+- ``ENABLE_FEAT_VHE``: Numeric value to enable the ``FEAT_VHE`` (Virtualization
+ Host Extensions) extension. It allows access to CONTEXTIDR_EL2 register
+ during EL2 context save/restore operations.``FEAT_VHE`` is a mandatory
+ architectural feature and is enabled from v8.1 and upwards. It can take
+ values 0 to 2, to align with the ``FEATURE_DETECTION`` mechanism.
+ Default value is ``0``.
- ``ENABLE_LTO``: Boolean option to enable Link Time Optimization (LTO)
support in GCC for TF-A. This option is currently only supported for
AArch64. Default is 0.
-- ``ENABLE_MPAM_FOR_LOWER_ELS``: Boolean option to enable lower ELs to use MPAM
+- ``ENABLE_MPAM_FOR_LOWER_ELS``: Numeric value to enable lower ELs to use MPAM
feature. MPAM is an optional Armv8.4 extension that enables various memory
system components and resources to define partitions; software running at
various ELs can assign themselves to desired partition to control their
performance aspects.
- When this option is set to ``1``, EL3 allows lower ELs to access their own
- MPAM registers without trapping into EL3. This option doesn't make use of
- partitioning in EL3, however. Platform initialisation code should configure
- and use partitions in EL3 as required. This option defaults to ``0``.
+ This flag can take values 0 to 2, to align with the ``FEATURE_DETECTION``
+ mechanism. When this option is set to ``1`` or ``2``, EL3 allows lower ELs to
+ access their own MPAM registers without trapping into EL3. This option
+ doesn't make use of partitioning in EL3, however. Platform initialisation
+ code should configure and use partitions in EL3 as required. This option
+ defaults to ``0``.
- ``ENABLE_MPMM``: Boolean option to enable support for the Maximum Power
Mitigation Mechanism supported by certain Arm cores, which allows the SoC
@@ -307,9 +375,10 @@
be enabled. If ``ENABLE_PMF`` is set, the residency statistics are tracked in
software.
-- ``ENABLE_RME``: Boolean option to enable support for the ARMv9 Realm
- Management Extension. Default value is 0. This is currently an experimental
- feature.
+- ``ENABLE_RME``: Numeric value to enable support for the ARMv9 Realm
+ Management Extension. This flag can take the values 0 to 2, to align with
+ the ``FEATURE_DETECTION`` mechanism. Default value is 0. This is currently
+ an experimental feature.
- ``ENABLE_RUNTIME_INSTRUMENTATION``: Boolean option to enable runtime
instrumentation which injects timestamp collection points into TF-A to
@@ -352,8 +421,8 @@
- ``ENABLE_SVE_FOR_SWD``: Boolean option to enable SVE for the Secure world.
SVE is an optional architectural feature for AArch64. Note that this option
- requires ENABLE_SVE_FOR_NS to be enabled. The default is 0 and it is
- automatically disabled when the target architecture is AArch32.
+ requires ENABLE_SVE_FOR_NS to be enabled. The default is 0 and it
+ is automatically disabled when the target architecture is AArch32.
- ``ENABLE_STACK_PROTECTOR``: String option to enable the stack protection
checks in GCC. Allowed values are "all", "strong", "default" and "none". The
@@ -399,6 +468,43 @@
This feature is intended for testing purposes only, and is advisable to keep
disabled for production images.
+- ``FEATURE_DETECTION``: Boolean option to enable the architectural features
+ detection mechanism. It detects whether the Architectural features enabled
+ through feature specific build flags are supported by the PE or not by
+ validating them either at boot phase or at runtime based on the value
+ possessed by the feature flag (0 to 2) and report error messages at an early
+ stage.
+
+ This prevents and benefits us from EL3 runtime exceptions during context save
+ and restore routines guarded by these build flags. Henceforth validating them
+ before their usage provides more control on the actions taken under them.
+
+ The mechanism permits the build flags to take values 0, 1 or 2 and
+ evaluates them accordingly.
+
+ Lets consider ``ENABLE_FEAT_HCX``, build flag for ``FEAT_HCX`` as an example:
+
+ ::
+
+ ENABLE_FEAT_HCX = 0: Feature disabled statically at compile time.
+ ENABLE_FEAT_HCX = 1: Feature Enabled and the flag is validated at boottime.
+ ENABLE_FEAT_HCX = 2: Feature Enabled and the flag is validated at runtime.
+
+ In the above example, if the feature build flag, ``ENABLE_FEAT_HCX`` set to
+ 0, feature is disabled statically during compilation. If it is defined as 1,
+ feature is validated, wherein FEAT_HCX is detected at boot time. In case not
+ implemented by the PE, a hard panic is generated. Finally, if the flag is set
+ to 2, feature is validated at runtime.
+
+ Note that the entire implementation is divided into two phases, wherein as
+ as part of phase-1 we are supporting the values 0,1. Value 2 is currently not
+ supported and is planned to be handled explicilty in phase-2 implementation.
+
+ FEATURE_DETECTION macro is disabled by default, and is currently an
+ experimental procedure. Platforms can explicitly make use of this by
+ mechanism, by enabling it to validate whether they have set their build flags
+ properly at an early phase.
+
- ``FIP_NAME``: This is an optional build option which specifies the FIP
filename for the ``fip`` target. Default is ``fip.bin``.
@@ -588,9 +694,10 @@
enabled on Arm platforms, the option ``ARM_RECOM_STATE_ID_ENC`` needs to be
set to 1 as well.
-- ``RAS_EXTENSION``: When set to ``1``, enable Armv8.2 RAS features. RAS features
+- ``RAS_EXTENSION``: Numeric value to enable Armv8.2 RAS features. RAS features
are an optional extension for pre-Armv8.2 CPUs, but are mandatory for Armv8.2
- or later CPUs.
+ or later CPUs. This flag can take the values 0 to 2, to align with the
+ ``FEATURE_DETECTION`` mechanism.
When ``RAS_EXTENSION`` is set to ``1``, ``HANDLE_EA_EL3_FIRST`` must also be
set to ``1``.
@@ -673,13 +780,20 @@
firmware images have been loaded in memory, and the MMU and caches are
turned off. Refer to the "Debugging options" section for more details.
-- ``SPMD_SPM_AT_SEL2`` : this boolean option is used jointly with the SPM
+- ``SPMC_AT_EL3`` : This boolean option is used jointly with the SPM
Dispatcher option (``SPD=spmd``). When enabled (1) it indicates the SPMC
- component runs at the S-EL2 execution state provided by the Armv8.4-SecEL2
+ component runs at the EL3 exception level. The default value is ``0`` (
+ disabled). This configuration supports pre-Armv8.4 platforms (aka not
+ implementing the ``FEAT_SEL2`` extension). This is an experimental feature.
+
+- ``SPMD_SPM_AT_SEL2`` : This boolean option is used jointly with the SPM
+ Dispatcher option (``SPD=spmd``). When enabled (1) it indicates the SPMC
+ component runs at the S-EL2 exception level provided by the ``FEAT_SEL2``
extension. This is the default when enabling the SPM Dispatcher. When
disabled (0) it indicates the SPMC component runs at the S-EL1 execution
- state. This latter configuration supports pre-Armv8.4 platforms (aka not
- implementing the Armv8.4-SecEL2 extension).
+ state or at EL3 if ``SPMC_AT_EL3`` is enabled. The latter configurations
+ support pre-Armv8.4 platforms (aka not implementing the ``FEAT_SEL2``
+ extension).
- ``SPM_MM`` : Boolean option to enable the Management Mode (MM)-based Secure
Partition Manager (SPM) implementation. The default value is ``0``
@@ -851,9 +965,10 @@
but unused). This feature is available if trace unit such as ETMv4.x, and
ETE(extending ETM feature) is implemented. This flag is disabled by default.
-- ``ENABLE_TRF_FOR_NS``: Boolean option to enable trace filter control registers
+- ``ENABLE_TRF_FOR_NS``: Numeric value to enable trace filter control registers
access from NS ELs, NS-EL2 or NS-EL1 (when NS-EL2 is implemented but unused),
- if FEAT_TRF is implemented. This flag is disabled by default.
+ if FEAT_TRF is implemented. This flag can take the values 0 to 2, to align
+ with the ``FEATURE_DETECTION`` mechanism. This flag is disabled by default.
GICv3 driver options
--------------------
diff --git a/docs/getting_started/prerequisites.rst b/docs/getting_started/prerequisites.rst
index ee30128..bf207a7 100644
--- a/docs/getting_started/prerequisites.rst
+++ b/docs/getting_started/prerequisites.rst
@@ -161,7 +161,7 @@
*Copyright (c) 2021, Arm Limited. All rights reserved.*
-.. _Arm Developer website: https://developer.arm.com/open-source/gnu-toolchain/gnu-a/downloads
+.. _Arm Developer website: https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/downloads
.. _Gerrit Code Review: https://www.gerritcodereview.com/
.. _Linaro Release Notes: https://community.arm.com/dev-platforms/w/docs/226/old-release-notes
.. _Linaro instructions: https://community.arm.com/dev-platforms/w/docs/304/arm-reference-platforms-deliverables
diff --git a/docs/process/contributing.rst b/docs/process/contributing.rst
index f80389d..ef9ebd3 100644
--- a/docs/process/contributing.rst
+++ b/docs/process/contributing.rst
@@ -299,6 +299,6 @@
.. _TF-A Tests: https://trustedfirmware-a-tests.readthedocs.io
.. _Trusted Firmware binary repository: https://review.trustedfirmware.org/admin/repos/tf-binaries
.. _tf-binaries-readme: https://git.trustedfirmware.org/tf-binaries.git/tree/readme.rst
-.. _TF-A mailing list: https://lists.trustedfirmware.org/mailman/listinfo/tf-a
+.. _TF-A mailing list: https://lists.trustedfirmware.org/mailman3/lists/tf-a.lists.trustedfirmware.org/
.. _tf-a-ci-scripts repository: https://git.trustedfirmware.org/ci/tf-a-ci-scripts.git/
.. _tf-cov-make: https://git.trustedfirmware.org/ci/tf-a-ci-scripts.git/tree/script/tf-coverity/tf-cov-make
diff --git a/docs/process/platform-compatibility-policy.rst b/docs/process/platform-compatibility-policy.rst
index be1f9ba..a10236c 100644
--- a/docs/process/platform-compatibility-policy.rst
+++ b/docs/process/platform-compatibility-policy.rst
@@ -31,6 +31,6 @@
--------------
-*Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.*
+*Copyright (c) 2018-2022, Arm Limited and Contributors. All rights reserved.*
-.. _TF-A public mailing list: https://lists.trustedfirmware.org/mailman/listinfo/tf-a
+.. _TF-A public mailing list: https://lists.trustedfirmware.org/mailman3/lists/tf-a.lists.trustedfirmware.org/
diff --git a/docs/process/security.rst b/docs/process/security.rst
index a3b9971..e15783b 100644
--- a/docs/process/security.rst
+++ b/docs/process/security.rst
@@ -71,7 +71,7 @@
+-----------+------------------------------------------------------------------+
.. _issue tracker: https://developer.trustedfirmware.org/project/board/1/
-.. _mailing list: https://lists.trustedfirmware.org/mailman/listinfo/tf-a
+.. _mailing list: https://lists.trustedfirmware.org/mailman3/lists/tf-a.lists.trustedfirmware.org/
.. |TFV-1| replace:: :ref:`Advisory TFV-1 (CVE-2016-10319)`
.. |TFV-2| replace:: :ref:`Advisory TFV-2 (CVE-2017-7564)`
@@ -86,4 +86,4 @@
--------------
-*Copyright (c) 2019-2020, Arm Limited. All rights reserved.*
+*Copyright (c) 2019-2022, Arm Limited. All rights reserved.*
diff --git a/drivers/arm/gic/v3/gic600ae_fmu.c b/drivers/arm/gic/v3/gic600ae_fmu.c
index 13979fa..0262f48 100644
--- a/drivers/arm/gic/v3/gic600ae_fmu.c
+++ b/drivers/arm/gic/v3/gic600ae_fmu.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
+ * Copyright (c) 2021-2022, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,6 +9,7 @@
*/
#include <assert.h>
+#include <inttypes.h>
#include <arch_helpers.h>
#include <common/debug.h>
@@ -112,6 +113,135 @@
"Wake-GICD AXI4-Stream interface error"
};
+/* Helper function to find detailed information for a specific IERR */
+static char __unused *ras_ierr_to_str(unsigned int blkid, unsigned int ierr)
+{
+ char *str = NULL;
+
+ /* Find the correct record */
+ switch (blkid) {
+ case FMU_BLK_GICD:
+ assert(ierr < ARRAY_SIZE(gicd_sm_info));
+ str = gicd_sm_info[ierr];
+ break;
+
+ case FMU_BLK_SPICOL:
+ assert(ierr < ARRAY_SIZE(spicol_sm_info));
+ str = spicol_sm_info[ierr];
+ break;
+
+ case FMU_BLK_WAKERQ:
+ assert(ierr < ARRAY_SIZE(wkrqst_sm_info));
+ str = wkrqst_sm_info[ierr];
+ break;
+
+ case FMU_BLK_ITS0...FMU_BLK_ITS7:
+ assert(ierr < ARRAY_SIZE(its_sm_info));
+ str = its_sm_info[ierr];
+ break;
+
+ case FMU_BLK_PPI0...FMU_BLK_PPI31:
+ assert(ierr < ARRAY_SIZE(ppi_sm_info));
+ str = ppi_sm_info[ierr];
+ break;
+
+ default:
+ assert(false);
+ break;
+ }
+
+ return str;
+}
+
+/*
+ * Probe for error in memory-mapped registers containing error records.
+ * Upon detecting an error, set probe data to the index of the record
+ * in error, and return 1; otherwise, return 0.
+ */
+int gic600_fmu_probe(uint64_t base, int *probe_data)
+{
+ uint64_t gsr;
+
+ assert(base != 0UL);
+
+ /*
+ * Read ERR_GSR to find the error record 'M'
+ */
+ gsr = gic_fmu_read_errgsr(base);
+ if (gsr == U(0)) {
+ return 0;
+ }
+
+ /* Return the index of the record in error */
+ if (probe_data != NULL) {
+ *probe_data = (int)__builtin_ctzll(gsr);
+ }
+
+ return 1;
+}
+
+/*
+ * The handler function to read RAS records and find the safety
+ * mechanism with the error.
+ */
+int gic600_fmu_ras_handler(uint64_t base, int probe_data)
+{
+ uint64_t errstatus;
+ unsigned int blkid = (unsigned int)probe_data, ierr, serr;
+
+ assert(base != 0UL);
+
+ /*
+ * FMU_ERRGSR indicates the ID of the GIC
+ * block that faulted.
+ */
+ assert(blkid <= FMU_BLK_PPI31);
+
+ /*
+ * Find more information by reading FMU_ERR<M>STATUS
+ * register
+ */
+ errstatus = gic_fmu_read_errstatus(base, blkid);
+
+ /*
+ * If FMU_ERR<M>STATUS.V is set to 0, no RAS records
+ * need to be scanned.
+ */
+ if ((errstatus & FMU_ERRSTATUS_V_BIT) == U(0)) {
+ return 0;
+ }
+
+ /*
+ * FMU_ERR<M>STATUS.IERR indicates which Safety Mechanism
+ * reported the error.
+ */
+ ierr = (errstatus >> FMU_ERRSTATUS_IERR_SHIFT) &
+ FMU_ERRSTATUS_IERR_MASK;
+
+ /*
+ * FMU_ERR<M>STATUS.SERR indicates architecturally
+ * defined primary error code.
+ */
+ serr = errstatus & FMU_ERRSTATUS_SERR_MASK;
+
+ ERROR("**************************************\n");
+ ERROR("RAS %s Error detected by GIC600 AE FMU\n",
+ ((errstatus & FMU_ERRSTATUS_UE_BIT) != 0U) ?
+ "Uncorrectable" : "Corrected");
+ ERROR("\tStatus = 0x%lx \n", errstatus);
+ ERROR("\tBlock ID = 0x%x\n", blkid);
+ ERROR("\tSafety Mechanism ID = 0x%x (%s)\n", ierr,
+ ras_ierr_to_str(blkid, ierr));
+ ERROR("\tArchitecturally defined primary error code = 0x%x\n",
+ serr);
+ ERROR("**************************************\n");
+
+ /* Clear FMU_ERR<M>STATUS */
+ gic_fmu_write_errstatus(base, probe_data, errstatus);
+
+ return 0;
+}
+
/*
* Initialization sequence for the FMU
*
@@ -138,8 +268,12 @@
/* Enable error detection for all error records */
for (unsigned int i = 0U; i < num_blk; i++) {
- /* Skip next steps if the block is not present */
+ /*
+ * Disable all safety mechanisms for blocks that are not
+ * present and skip the next steps.
+ */
if ((blk_present_mask & BIT(i)) == 0U) {
+ gic_fmu_disable_all_sm_blkid(base, i);
continue;
}
@@ -168,22 +302,26 @@
*/
if ((blk_present_mask & BIT(FMU_BLK_GICD)) != 0U) {
smen = (GICD_MBIST_REQ_ERROR << FMU_SMEN_SMID_SHIFT) |
- (FMU_BLK_GICD << FMU_SMEN_BLK_SHIFT);
+ (FMU_BLK_GICD << FMU_SMEN_BLK_SHIFT) |
+ FMU_SMEN_EN_BIT;
gic_fmu_write_smen(base, smen);
smen = (GICD_FMU_CLKGATE_ERROR << FMU_SMEN_SMID_SHIFT) |
- (FMU_BLK_GICD << FMU_SMEN_BLK_SHIFT);
+ (FMU_BLK_GICD << FMU_SMEN_BLK_SHIFT) |
+ FMU_SMEN_EN_BIT;
gic_fmu_write_smen(base, smen);
}
for (unsigned int i = FMU_BLK_PPI0; i < FMU_BLK_PPI31; i++) {
if ((blk_present_mask & BIT(i)) != 0U) {
smen = (PPI_MBIST_REQ_ERROR << FMU_SMEN_SMID_SHIFT) |
- (i << FMU_SMEN_BLK_SHIFT);
+ (i << FMU_SMEN_BLK_SHIFT) |
+ FMU_SMEN_EN_BIT;
gic_fmu_write_smen(base, smen);
smen = (PPI_FMU_CLKGATE_ERROR << FMU_SMEN_SMID_SHIFT) |
- (i << FMU_SMEN_BLK_SHIFT);
+ (i << FMU_SMEN_BLK_SHIFT) |
+ FMU_SMEN_EN_BIT;
gic_fmu_write_smen(base, smen);
}
}
@@ -191,11 +329,13 @@
for (unsigned int i = FMU_BLK_ITS0; i < FMU_BLK_ITS7; i++) {
if ((blk_present_mask & BIT(i)) != 0U) {
smen = (ITS_MBIST_REQ_ERROR << FMU_SMEN_SMID_SHIFT) |
- (i << FMU_SMEN_BLK_SHIFT);
+ (i << FMU_SMEN_BLK_SHIFT) |
+ FMU_SMEN_EN_BIT;
gic_fmu_write_smen(base, smen);
smen = (ITS_FMU_CLKGATE_ERROR << FMU_SMEN_SMID_SHIFT) |
- (i << FMU_SMEN_BLK_SHIFT);
+ (i << FMU_SMEN_BLK_SHIFT) |
+ FMU_SMEN_EN_BIT;
gic_fmu_write_smen(base, smen);
}
}
diff --git a/drivers/arm/gic/v3/gic600ae_fmu_helpers.c b/drivers/arm/gic/v3/gic600ae_fmu_helpers.c
index 4aa0efb..09806dc 100644
--- a/drivers/arm/gic/v3/gic600ae_fmu_helpers.c
+++ b/drivers/arm/gic/v3/gic600ae_fmu_helpers.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
+ * Copyright (c) 2021-2022, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -258,3 +258,47 @@
{
GIC_FMU_WRITE_64(base, GICFMU_PINGMASK, 0, val);
}
+
+/*
+ * Helper function to disable all safety mechanisms for a given block
+ */
+void gic_fmu_disable_all_sm_blkid(uintptr_t base, unsigned int blkid)
+{
+ uint32_t smen, max_smid = U(0);
+
+ /* Sanity check block ID */
+ assert((blkid >= FMU_BLK_GICD) && (blkid <= FMU_BLK_PPI31));
+
+ /* Find the max safety mechanism ID for the block */
+ switch (blkid) {
+ case FMU_BLK_GICD:
+ max_smid = FMU_SMID_GICD_MAX;
+ break;
+
+ case FMU_BLK_SPICOL:
+ max_smid = FMU_SMID_SPICOL_MAX;
+ break;
+
+ case FMU_BLK_WAKERQ:
+ max_smid = FMU_SMID_WAKERQ_MAX;
+ break;
+
+ case FMU_BLK_ITS0...FMU_BLK_ITS7:
+ max_smid = FMU_SMID_ITS_MAX;
+ break;
+
+ case FMU_BLK_PPI0...FMU_BLK_PPI31:
+ max_smid = FMU_SMID_PPI_MAX;
+ break;
+
+ default:
+ assert(false);
+ break;
+ }
+
+ /* Disable all Safety Mechanisms for a given block id */
+ for (unsigned int i = 0U; i < max_smid; i++) {
+ smen = (blkid << FMU_SMEN_BLK_SHIFT) | (i << FMU_SMEN_SMID_SHIFT);
+ gic_fmu_write_smen(base, smen);
+ }
+}
diff --git a/drivers/ufs/ufs.c b/drivers/ufs/ufs.c
index 3c27aff..15d80ae 100644
--- a/drivers/ufs/ufs.c
+++ b/drivers/ufs/ufs.c
@@ -356,7 +356,6 @@
hd->prdto = (utrd->size_upiu + utrd->size_resp_upiu) >> 2;
}
- flush_dcache_range((uintptr_t)utrd, sizeof(utp_utrd_t));
flush_dcache_range((uintptr_t)utrd->header, UFS_DESC_SIZE);
return 0;
}
@@ -415,7 +414,6 @@
assert(0);
break;
}
- flush_dcache_range((uintptr_t)utrd, sizeof(utp_utrd_t));
flush_dcache_range((uintptr_t)utrd->header, UFS_DESC_SIZE);
return 0;
}
@@ -439,7 +437,6 @@
nop_out->trans_type = 0;
nop_out->task_tag = utrd->task_tag;
- flush_dcache_range((uintptr_t)utrd, sizeof(utp_utrd_t));
flush_dcache_range((uintptr_t)utrd->header, UFS_DESC_SIZE);
}
@@ -473,7 +470,6 @@
hd = (utrd_header_t *)utrd->header;
resp = (resp_upiu_t *)utrd->resp_upiu;
- inv_dcache_range((uintptr_t)hd, UFS_DESC_SIZE);
do {
data = mmio_read_32(ufs_params.reg_base + IS);
if ((data & ~(UFS_INT_UCCS | UFS_INT_UTRCS)) != 0)
@@ -483,6 +479,12 @@
data = mmio_read_32(ufs_params.reg_base + UTRLDBR);
assert((data & (1 << slot)) == 0);
+ /*
+ * Invalidate the header after DMA read operation has
+ * completed to avoid cpu referring to the prefetched
+ * data brought in before DMA completion.
+ */
+ inv_dcache_range((uintptr_t)hd, UFS_DESC_SIZE);
assert(hd->ocs == OCS_SUCCESS);
assert((resp->trans_type & TRANS_TYPE_CODE_MASK) == trans_type);
(void)resp;
@@ -656,7 +658,6 @@
sense_data_t *sense;
unsigned char data[CACHE_WRITEBACK_GRANULE << 1];
uintptr_t buf;
- int result;
int retry;
assert((ufs_params.reg_base != 0) &&
@@ -668,8 +669,6 @@
buf = (uintptr_t)data;
buf = (buf + CACHE_WRITEBACK_GRANULE - 1) &
~(CACHE_WRITEBACK_GRANULE - 1);
- memset((void *)buf, 0, CACHE_WRITEBACK_GRANULE);
- flush_dcache_range(buf, CACHE_WRITEBACK_GRANULE);
do {
ufs_send_cmd(&utrd, CDBCMD_READ_CAPACITY_10, lun, 0,
buf, READ_CAPACITY_LENGTH);
@@ -693,14 +692,12 @@
/* logical block length in bytes */
*size = be32toh(*(unsigned int *)(buf + 4));
} while (retry);
- (void)result;
}
size_t ufs_read_blocks(int lun, int lba, uintptr_t buf, size_t size)
{
utp_utrd_t utrd;
resp_upiu_t *resp;
- int result;
assert((ufs_params.reg_base != 0) &&
(ufs_params.desc_base != 0) &&
@@ -710,8 +707,12 @@
#ifdef UFS_RESP_DEBUG
dump_upiu(&utrd);
#endif
+ /*
+ * Invalidate prefetched cache contents before cpu
+ * accesses the buf.
+ */
+ inv_dcache_range(buf, size);
resp = (resp_upiu_t *)utrd.resp_upiu;
- (void)result;
return size - resp->res_trans_cnt;
}
@@ -719,7 +720,6 @@
{
utp_utrd_t utrd;
resp_upiu_t *resp;
- int result;
assert((ufs_params.reg_base != 0) &&
(ufs_params.desc_base != 0) &&
@@ -730,7 +730,6 @@
dump_upiu(&utrd);
#endif
resp = (resp_upiu_t *)utrd.resp_upiu;
- (void)result;
return size - resp->res_trans_cnt;
}
diff --git a/fdts/stm32mp131.dtsi b/fdts/stm32mp131.dtsi
index dff1b33..adf7a91 100644
--- a/fdts/stm32mp131.dtsi
+++ b/fdts/stm32mp131.dtsi
@@ -26,26 +26,6 @@
};
};
- nvmem_layout: nvmem_layout@0 {
- compatible = "st,stm32-nvmem-layout";
-
- nvmem-cells = <&cfg0_otp>,
- <&part_number_otp>,
- <&monotonic_otp>,
- <&nand_otp>,
- <&nand2_otp>,
- <&uid_otp>,
- <&hw2_otp>;
-
- nvmem-cell-names = "cfg0_otp",
- "part_number_otp",
- "monotonic_otp",
- "nand_otp",
- "nand2_otp",
- "uid_otp",
- "hw2_otp";
- };
-
clocks {
clk_csi: clk-csi {
#clock-cells = <0>;
diff --git a/fdts/stm32mp135f-dk.dts b/fdts/stm32mp135f-dk.dts
index 0fa064b..6240381 100644
--- a/fdts/stm32mp135f-dk.dts
+++ b/fdts/stm32mp135f-dk.dts
@@ -175,28 +175,6 @@
status = "okay";
};
-&nvmem_layout {
- nvmem-cells = <&cfg0_otp>,
- <&part_number_otp>,
- <&monotonic_otp>,
- <&nand_otp>,
- <&nand2_otp>,
- <&uid_otp>,
- <&hw2_otp>,
- <&pkh_otp>,
- <&board_id>;
-
- nvmem-cell-names = "cfg0_otp",
- "part_number_otp",
- "monotonic_otp",
- "nand_otp",
- "nand2_otp",
- "uid_otp",
- "hw2_otp",
- "pkh_otp",
- "board_id";
-};
-
&pka {
secure-status = "okay";
};
diff --git a/fdts/stm32mp151.dtsi b/fdts/stm32mp151.dtsi
index 454e124..63cc917 100644
--- a/fdts/stm32mp151.dtsi
+++ b/fdts/stm32mp151.dtsi
@@ -24,26 +24,6 @@
};
};
- nvmem_layout: nvmem_layout@0 {
- compatible = "st,stm32-nvmem-layout";
-
- nvmem-cells = <&cfg0_otp>,
- <&part_number_otp>,
- <&monotonic_otp>,
- <&nand_otp>,
- <&uid_otp>,
- <&package_otp>,
- <&hw2_otp>;
-
- nvmem-cell-names = "cfg0_otp",
- "part_number_otp",
- "monotonic_otp",
- "nand_otp",
- "uid_otp",
- "package_otp",
- "hw2_otp";
- };
-
psci {
compatible = "arm,psci-1.0";
method = "smc";
diff --git a/fdts/stm32mp157c-ed1.dts b/fdts/stm32mp157c-ed1.dts
index 44c7016..659e8bf 100644
--- a/fdts/stm32mp157c-ed1.dts
+++ b/fdts/stm32mp157c-ed1.dts
@@ -195,26 +195,6 @@
status = "okay";
};
-&nvmem_layout {
- nvmem-cells = <&cfg0_otp>,
- <&part_number_otp>,
- <&monotonic_otp>,
- <&nand_otp>,
- <&uid_otp>,
- <&package_otp>,
- <&hw2_otp>,
- <&board_id>;
-
- nvmem-cell-names = "cfg0_otp",
- "part_number_otp",
- "monotonic_otp",
- "nand_otp",
- "uid_otp",
- "package_otp",
- "hw2_otp",
- "board_id";
-};
-
&pwr_regulators {
vdd-supply = <&vdd>;
vdd_3v3_usbfs-supply = <&vdd_usb>;
diff --git a/fdts/stm32mp15xx-dkx.dtsi b/fdts/stm32mp15xx-dkx.dtsi
index 2eb3a57..05eb46a 100644
--- a/fdts/stm32mp15xx-dkx.dtsi
+++ b/fdts/stm32mp15xx-dkx.dtsi
@@ -183,26 +183,6 @@
secure-status = "okay";
};
-&nvmem_layout {
- nvmem-cells = <&cfg0_otp>,
- <&part_number_otp>,
- <&monotonic_otp>,
- <&nand_otp>,
- <&uid_otp>,
- <&package_otp>,
- <&hw2_otp>,
- <&board_id>;
-
- nvmem-cell-names = "cfg0_otp",
- "part_number_otp",
- "monotonic_otp",
- "nand_otp",
- "uid_otp",
- "package_otp",
- "hw2_otp",
- "board_id";
-};
-
&pwr_regulators {
vdd-supply = <&vdd>;
vdd_3v3_usbfs-supply = <&vdd_usb>;
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index 29da33c..b4608ae 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -99,7 +99,6 @@
/*******************************************************************************
* Definitions for EL2 system registers for save/restore routine
******************************************************************************/
-
#define CNTPOFF_EL2 S3_4_C14_C0_6
#define HAFGRTR_EL2 S3_4_C3_C1_6
#define HDFGRTR_EL2 S3_4_C3_C1_4
@@ -155,39 +154,55 @@
#endif
/* ID_AA64PFR0_EL1 definitions */
-#define ID_AA64PFR0_EL0_SHIFT U(0)
-#define ID_AA64PFR0_EL1_SHIFT U(4)
-#define ID_AA64PFR0_EL2_SHIFT U(8)
-#define ID_AA64PFR0_EL3_SHIFT U(12)
-#define ID_AA64PFR0_AMU_SHIFT U(44)
-#define ID_AA64PFR0_AMU_MASK ULL(0xf)
-#define ID_AA64PFR0_AMU_NOT_SUPPORTED U(0x0)
-#define ID_AA64PFR0_AMU_V1 U(0x1)
-#define ID_AA64PFR0_AMU_V1P1 U(0x2)
-#define ID_AA64PFR0_ELX_MASK ULL(0xf)
-#define ID_AA64PFR0_GIC_SHIFT U(24)
-#define ID_AA64PFR0_GIC_WIDTH U(4)
-#define ID_AA64PFR0_GIC_MASK ULL(0xf)
-#define ID_AA64PFR0_SVE_SHIFT U(32)
-#define ID_AA64PFR0_SVE_MASK ULL(0xf)
-#define ID_AA64PFR0_SVE_LENGTH U(4)
-#define ID_AA64PFR0_SEL2_SHIFT U(36)
-#define ID_AA64PFR0_SEL2_MASK ULL(0xf)
-#define ID_AA64PFR0_MPAM_SHIFT U(40)
-#define ID_AA64PFR0_MPAM_MASK ULL(0xf)
-#define ID_AA64PFR0_DIT_SHIFT U(48)
-#define ID_AA64PFR0_DIT_MASK ULL(0xf)
-#define ID_AA64PFR0_DIT_LENGTH U(4)
-#define ID_AA64PFR0_DIT_SUPPORTED U(1)
-#define ID_AA64PFR0_CSV2_SHIFT U(56)
-#define ID_AA64PFR0_CSV2_MASK ULL(0xf)
-#define ID_AA64PFR0_CSV2_LENGTH U(4)
+#define ID_AA64PFR0_EL0_SHIFT U(0)
+#define ID_AA64PFR0_EL1_SHIFT U(4)
+#define ID_AA64PFR0_EL2_SHIFT U(8)
+#define ID_AA64PFR0_EL3_SHIFT U(12)
+
+#define ID_AA64PFR0_AMU_SHIFT U(44)
+#define ID_AA64PFR0_AMU_MASK ULL(0xf)
+#define ID_AA64PFR0_AMU_NOT_SUPPORTED U(0x0)
+#define ID_AA64PFR0_AMU_V1 ULL(0x1)
+#define ID_AA64PFR0_AMU_V1P1 U(0x2)
+
+#define ID_AA64PFR0_ELX_MASK ULL(0xf)
+
+#define ID_AA64PFR0_GIC_SHIFT U(24)
+#define ID_AA64PFR0_GIC_WIDTH U(4)
+#define ID_AA64PFR0_GIC_MASK ULL(0xf)
+
+#define ID_AA64PFR0_SVE_SHIFT U(32)
+#define ID_AA64PFR0_SVE_MASK ULL(0xf)
+#define ID_AA64PFR0_SVE_SUPPORTED ULL(0x1)
+#define ID_AA64PFR0_SVE_LENGTH U(4)
+
+#define ID_AA64PFR0_SEL2_SHIFT U(36)
+#define ID_AA64PFR0_SEL2_MASK ULL(0xf)
+
+#define ID_AA64PFR0_MPAM_SHIFT U(40)
+#define ID_AA64PFR0_MPAM_MASK ULL(0xf)
+
+#define ID_AA64PFR0_DIT_SHIFT U(48)
+#define ID_AA64PFR0_DIT_MASK ULL(0xf)
+#define ID_AA64PFR0_DIT_LENGTH U(4)
+#define ID_AA64PFR0_DIT_SUPPORTED U(1)
+
+#define ID_AA64PFR0_CSV2_SHIFT U(56)
+#define ID_AA64PFR0_CSV2_MASK ULL(0xf)
+#define ID_AA64PFR0_CSV2_LENGTH U(4)
+#define ID_AA64PFR0_CSV2_2_SUPPORTED ULL(0x2)
+
#define ID_AA64PFR0_FEAT_RME_SHIFT U(52)
#define ID_AA64PFR0_FEAT_RME_MASK ULL(0xf)
#define ID_AA64PFR0_FEAT_RME_LENGTH U(4)
#define ID_AA64PFR0_FEAT_RME_NOT_SUPPORTED U(0)
#define ID_AA64PFR0_FEAT_RME_V1 U(1)
+#define ID_AA64PFR0_RAS_SHIFT U(28)
+#define ID_AA64PFR0_RAS_MASK ULL(0xf)
+#define ID_AA64PFR0_RAS_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64PFR0_RAS_LENGTH U(4)
+
/* Exception level handling */
#define EL_IMPL_NONE ULL(0)
#define EL_IMPL_A64ONLY ULL(1)
@@ -204,8 +219,10 @@
#define ID_AA64DFR0_TRACEFILT_LENGTH U(4)
/* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
-#define ID_AA64DFR0_PMS_SHIFT U(32)
-#define ID_AA64DFR0_PMS_MASK ULL(0xf)
+#define ID_AA64DFR0_PMS_SHIFT U(32)
+#define ID_AA64DFR0_PMS_MASK ULL(0xf)
+#define ID_AA64DFR0_SPE_SUPPORTED ULL(0x1)
+#define ID_AA64DFR0_SPE_NOT_SUPPORTED ULL(0x0)
/* ID_AA64DFR0_EL1.TraceBuffer definitions */
#define ID_AA64DFR0_TRACEBUFFER_SHIFT U(44)
@@ -222,15 +239,22 @@
#define ID_AA64ISAR0_RNDR_MASK ULL(0xf)
/* ID_AA64ISAR1_EL1 definitions */
-#define ID_AA64ISAR1_EL1 S3_0_C0_C6_1
-#define ID_AA64ISAR1_GPI_SHIFT U(28)
-#define ID_AA64ISAR1_GPI_MASK ULL(0xf)
-#define ID_AA64ISAR1_GPA_SHIFT U(24)
-#define ID_AA64ISAR1_GPA_MASK ULL(0xf)
-#define ID_AA64ISAR1_API_SHIFT U(8)
-#define ID_AA64ISAR1_API_MASK ULL(0xf)
-#define ID_AA64ISAR1_APA_SHIFT U(4)
-#define ID_AA64ISAR1_APA_MASK ULL(0xf)
+#define ID_AA64ISAR1_EL1 S3_0_C0_C6_1
+
+#define ID_AA64ISAR1_GPI_SHIFT U(28)
+#define ID_AA64ISAR1_GPI_MASK ULL(0xf)
+#define ID_AA64ISAR1_GPA_SHIFT U(24)
+#define ID_AA64ISAR1_GPA_MASK ULL(0xf)
+
+#define ID_AA64ISAR1_API_SHIFT U(8)
+#define ID_AA64ISAR1_API_MASK ULL(0xf)
+#define ID_AA64ISAR1_APA_SHIFT U(4)
+#define ID_AA64ISAR1_APA_MASK ULL(0xf)
+
+#define ID_AA64ISAR1_SB_SHIFT U(36)
+#define ID_AA64ISAR1_SB_MASK ULL(0xf)
+#define ID_AA64ISAR1_SB_SUPPORTED ULL(0x1)
+#define ID_AA64ISAR1_SB_NOT_SUPPORTED ULL(0x0)
/* ID_AA64MMFR0_EL1 definitions */
#define ID_AA64MMFR0_EL1_PARANGE_SHIFT U(0)
@@ -292,17 +316,23 @@
#define ID_AA64MMFR1_EL1_HCX_NOT_SUPPORTED ULL(0x0)
/* ID_AA64MMFR2_EL1 definitions */
-#define ID_AA64MMFR2_EL1 S3_0_C0_C7_2
+#define ID_AA64MMFR2_EL1 S3_0_C0_C7_2
-#define ID_AA64MMFR2_EL1_ST_SHIFT U(28)
-#define ID_AA64MMFR2_EL1_ST_MASK ULL(0xf)
+#define ID_AA64MMFR2_EL1_ST_SHIFT U(28)
+#define ID_AA64MMFR2_EL1_ST_MASK ULL(0xf)
-#define ID_AA64MMFR2_EL1_CCIDX_SHIFT U(20)
-#define ID_AA64MMFR2_EL1_CCIDX_MASK ULL(0xf)
-#define ID_AA64MMFR2_EL1_CCIDX_LENGTH U(4)
+#define ID_AA64MMFR2_EL1_CCIDX_SHIFT U(20)
+#define ID_AA64MMFR2_EL1_CCIDX_MASK ULL(0xf)
+#define ID_AA64MMFR2_EL1_CCIDX_LENGTH U(4)
+
+#define ID_AA64MMFR2_EL1_CNP_SHIFT U(0)
+#define ID_AA64MMFR2_EL1_CNP_MASK ULL(0xf)
-#define ID_AA64MMFR2_EL1_CNP_SHIFT U(0)
-#define ID_AA64MMFR2_EL1_CNP_MASK ULL(0xf)
+#define ID_AA64MMFR2_EL1_NV_SHIFT U(24)
+#define ID_AA64MMFR2_EL1_NV_MASK ULL(0xf)
+#define ID_AA64MMFR2_EL1_NV_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR2_EL1_NV_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR2_EL1_NV2_SUPPORTED ULL(0x2)
/* ID_AA64PFR1_EL1 definitions */
#define ID_AA64PFR1_EL1_SSBS_SHIFT U(4)
diff --git a/include/arch/aarch64/arch_features.h b/include/arch/aarch64/arch_features.h
index a260f03..29710e7 100644
--- a/include/arch/aarch64/arch_features.h
+++ b/include/arch/aarch64/arch_features.h
@@ -140,4 +140,88 @@
ID_AA64PFR0_FEAT_RME_SHIFT) & ID_AA64PFR0_FEAT_RME_MASK;
}
+/*********************************************************************************
+ * Function to identify the presence of FEAT_SB (Speculation Barrier Instruction)
+ ********************************************************************************/
+static inline bool is_armv8_0_feat_sb_present(void)
+{
+ return (((read_id_aa64isar1_el1() >> ID_AA64ISAR1_SB_SHIFT) &
+ ID_AA64ISAR1_SB_MASK) == ID_AA64ISAR1_SB_SUPPORTED);
+}
+
+/*********************************************************************************
+ * Function to identify the presence of FEAT_CSV2_2 (Cache Speculation Variant 2)
+ ********************************************************************************/
+static inline bool is_armv8_0_feat_csv2_2_present(void)
+{
+ return (((read_id_aa64pfr0_el1() >> ID_AA64PFR0_CSV2_SHIFT) &
+ ID_AA64PFR0_CSV2_MASK) == ID_AA64PFR0_CSV2_2_SUPPORTED);
+}
+
+/**********************************************************************************
+ * Function to identify the presence of FEAT_SPE (Statistical Profiling Extension)
+ *********************************************************************************/
+static inline bool is_armv8_2_feat_spe_present(void)
+{
+ return (((read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT) &
+ ID_AA64DFR0_PMS_MASK) != ID_AA64DFR0_SPE_NOT_SUPPORTED);
+}
+
+/*******************************************************************************
+ * Function to identify the presence of FEAT_SVE (Scalable Vector Extension)
+ ******************************************************************************/
+static inline bool is_armv8_2_feat_sve_present(void)
+{
+ return (((read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT) &
+ ID_AA64PFR0_SVE_MASK) == ID_AA64PFR0_SVE_SUPPORTED);
+}
+
+/*******************************************************************************
+ * Function to identify the presence of FEAT_RAS (Reliability,Availability,
+ * and Serviceability Extension)
+ ******************************************************************************/
+static inline bool is_armv8_2_feat_ras_present(void)
+{
+ return (((read_id_aa64pfr0_el1() >> ID_AA64PFR0_RAS_SHIFT) &
+ ID_AA64PFR0_RAS_MASK) != ID_AA64PFR0_RAS_NOT_SUPPORTED);
+}
+
+/**************************************************************************
+ * Function to identify the presence of FEAT_DIT (Data Independent Timing)
+ *************************************************************************/
+static inline bool is_armv8_4_feat_dit_present(void)
+{
+ return (((read_id_aa64pfr0_el1() >> ID_AA64PFR0_DIT_SHIFT) &
+ ID_AA64PFR0_DIT_MASK) == ID_AA64PFR0_DIT_SUPPORTED);
+}
+
+/*************************************************************************
+ * Function to identify the presence of FEAT_TRF (TraceLift)
+ ************************************************************************/
+static inline bool is_arm8_4_feat_trf_present(void)
+{
+ return (((read_id_aa64dfr0_el1() >> ID_AA64DFR0_TRACEFILT_SHIFT) &
+ ID_AA64DFR0_TRACEFILT_MASK) == ID_AA64DFR0_TRACEFILT_SUPPORTED);
+}
+
+/*******************************************************************************
+ * Function to identify the presence of FEAT_AMUv1 (Activity Monitors-
+ * Extension v1)
+ ******************************************************************************/
+static inline bool is_armv8_4_feat_amuv1_present(void)
+{
+ return (((read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
+ ID_AA64PFR0_AMU_MASK) >= ID_AA64PFR0_AMU_V1);
+}
+
+/********************************************************************************
+ * Function to identify the presence of FEAT_NV2 (Enhanced Nested Virtualization
+ * Support)
+ *******************************************************************************/
+static inline unsigned int get_armv8_4_feat_nv_support(void)
+{
+ return (((read_id_aa64mmfr2_el1() >> ID_AA64MMFR2_EL1_NV_SHIFT) &
+ ID_AA64MMFR2_EL1_NV_MASK));
+}
+
#endif /* ARCH_FEATURES_H */
diff --git a/include/common/bl_common.ld.h b/include/common/bl_common.ld.h
index 5147e37..9888a3c 100644
--- a/include/common/bl_common.ld.h
+++ b/include/common/bl_common.ld.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -70,7 +70,9 @@
*/
#define BASE_XLAT_TABLE \
. = ALIGN(16); \
- *(base_xlat_table)
+ __BASE_XLAT_TABLE_START__ = .; \
+ *(base_xlat_table) \
+ __BASE_XLAT_TABLE_END__ = .;
#if PLAT_RO_XLAT_TABLES
#define BASE_XLAT_TABLE_RO BASE_XLAT_TABLE
@@ -210,7 +212,9 @@
*/
#define XLAT_TABLE_SECTION \
xlat_table (NOLOAD) : { \
+ __XLAT_TABLE_START__ = .; \
*(xlat_table) \
+ __XLAT_TABLE_END__ = .; \
}
#endif /* BL_COMMON_LD_H */
diff --git a/include/common/feat_detect.h b/include/common/feat_detect.h
new file mode 100644
index 0000000..0f0f105
--- /dev/null
+++ b/include/common/feat_detect.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FEAT_DETECT_H
+#define FEAT_DETECT_H
+
+#include <arch_features.h>
+#include <common/debug.h>
+
+/* Function Prototypes */
+void detect_arch_features(void);
+
+/* Macro Definitions */
+#define FEAT_STATE_1 1
+#define FEAT_STATE_2 2
+#define feat_detect_panic(a, b) ((a) ? (void)0 : feature_panic(b))
+
+/*******************************************************************************
+ * Function : feature_panic
+ * Customised panic module with error logging mechanism to list the feature
+ * not supported by the PE.
+ ******************************************************************************/
+static inline void feature_panic(char *feat_name)
+{
+ ERROR("FEAT_%s not supported by the PE\n", feat_name);
+ panic();
+}
+
+#endif /* FEAT_DETECT_H */
diff --git a/include/drivers/arm/gic600ae_fmu.h b/include/drivers/arm/gic600ae_fmu.h
index 691ffc7..88b87b9 100644
--- a/include/drivers/arm/gic600ae_fmu.h
+++ b/include/drivers/arm/gic600ae_fmu.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
+ * Copyright (c) 2021-2022, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -37,6 +37,7 @@
/* SMEN constants */
#define FMU_SMEN_BLK_SHIFT U(8)
#define FMU_SMEN_SMID_SHIFT U(24)
+#define FMU_SMEN_EN_BIT BIT(0)
/* Error record IDs */
#define FMU_BLK_GICD U(0)
@@ -86,10 +87,10 @@
/* Safety Mechamism limit */
#define FMU_SMID_GICD_MAX U(33)
+#define FMU_SMID_PPI_MAX U(12)
+#define FMU_SMID_ITS_MAX U(14)
#define FMU_SMID_SPICOL_MAX U(5)
#define FMU_SMID_WAKERQ_MAX U(2)
-#define FMU_SMID_ITS_MAX U(14)
-#define FMU_SMID_PPI_MAX U(12)
/* MBIST Safety Mechanism ID */
#define GICD_MBIST_REQ_ERROR U(23)
@@ -100,12 +101,17 @@
#define ITS_FMU_CLKGATE_ERROR U(14)
/* ERRSTATUS bits */
-#define FMU_ERRSTATUS_V_BIT BIT(30)
-#define FMU_ERRSTATUS_UE_BIT BIT(29)
-#define FMU_ERRSTATUS_OV_BIT BIT(27)
-#define FMU_ERRSTATUS_CE_BITS (BIT(25) | BIT(24))
-#define FMU_ERRSTATUS_CLEAR (FMU_ERRSTATUS_V_BIT | FMU_ERRSTATUS_UE_BIT | \
- FMU_ERRSTATUS_OV_BIT | FMU_ERRSTATUS_CE_BITS)
+#define FMU_ERRSTATUS_BLKID_SHIFT U(32)
+#define FMU_ERRSTATUS_BLKID_MASK U(0xFF)
+#define FMU_ERRSTATUS_V_BIT BIT(30)
+#define FMU_ERRSTATUS_UE_BIT BIT(29)
+#define FMU_ERRSTATUS_OV_BIT BIT(27)
+#define FMU_ERRSTATUS_CE_BITS (BIT(25) | BIT(24))
+#define FMU_ERRSTATUS_CLEAR (FMU_ERRSTATUS_V_BIT | FMU_ERRSTATUS_UE_BIT | \
+ FMU_ERRSTATUS_OV_BIT | FMU_ERRSTATUS_CE_BITS)
+#define FMU_ERRSTATUS_IERR_MASK U(0xFF)
+#define FMU_ERRSTATUS_IERR_SHIFT U(8)
+#define FMU_ERRSTATUS_SERR_MASK U(0xFF)
/* PINGCTLR constants */
#define FMU_PINGCTLR_INTDIFF_SHIFT U(16)
@@ -137,11 +143,14 @@
void gic_fmu_write_smen(uintptr_t base, uint32_t val);
void gic_fmu_write_sminjerr(uintptr_t base, uint32_t val);
void gic_fmu_write_pingmask(uintptr_t base, uint64_t val);
+void gic_fmu_disable_all_sm_blkid(uintptr_t base, unsigned int blkid);
void gic600_fmu_init(uint64_t base, uint64_t blk_present_mask, bool errctlr_ce_en, bool errctlr_ue_en);
void gic600_fmu_enable_ping(uint64_t base, uint64_t blk_present_mask,
unsigned int timeout_val, unsigned int interval_diff);
void gic600_fmu_print_sm_info(uint64_t base, unsigned int blk, unsigned int smid);
+int gic600_fmu_probe(uint64_t base, int *probe_data);
+int gic600_fmu_ras_handler(uint64_t base, int probe_data);
#endif /* __ASSEMBLER__ */
diff --git a/include/lib/cpus/aarch64/cortex_x2.h b/include/lib/cpus/aarch64/cortex_x2.h
index 62530e2..92140b1 100644
--- a/include/lib/cpus/aarch64/cortex_x2.h
+++ b/include/lib/cpus/aarch64/cortex_x2.h
@@ -34,6 +34,12 @@
#define CORTEX_X2_CPUPWRCTLR_EL1_CORE_PWRDN_BIT U(1)
/*******************************************************************************
+ * CPU Auxiliary Control Register definitions
+ ******************************************************************************/
+#define CORTEX_X2_CPUACTLR_EL1 S3_0_C15_C1_0
+#define CORTEX_X2_CPUACTLR_EL1_BIT_22 (ULL(1) << 22)
+
+/*******************************************************************************
* CPU Auxiliary Control Register 5 definitions
******************************************************************************/
#define CORTEX_X2_CPUACTLR5_EL1 S3_0_C15_C8_0
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
index 512d196..3a09383 100644
--- a/include/lib/el3_runtime/aarch64/context.h
+++ b/include/lib/el3_runtime/aarch64/context.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -217,23 +217,20 @@
// Starting with Armv8.4
#define CTX_CONTEXTIDR_EL2 U(0x198)
-#define CTX_SDER32_EL2 U(0x1a0)
-#define CTX_TTBR1_EL2 U(0x1a8)
-#define CTX_VDISR_EL2 U(0x1b0)
+#define CTX_TTBR1_EL2 U(0x1a0)
+#define CTX_VDISR_EL2 U(0x1a8)
+#define CTX_VSESR_EL2 U(0x1b0)
#define CTX_VNCR_EL2 U(0x1b8)
-#define CTX_VSESR_EL2 U(0x1c0)
-#define CTX_VSTCR_EL2 U(0x1c8)
-#define CTX_VSTTBR_EL2 U(0x1d0)
-#define CTX_TRFCR_EL2 U(0x1d8)
+#define CTX_TRFCR_EL2 U(0x1c0)
// Starting with Armv8.5
-#define CTX_SCXTNUM_EL2 U(0x1e0)
+#define CTX_SCXTNUM_EL2 U(0x1c8)
// Register for FEAT_HCX
-#define CTX_HCRX_EL2 U(0x1e8)
+#define CTX_HCRX_EL2 U(0x1d0)
/* Align to the next 16 byte boundary */
-#define CTX_EL2_SYSREGS_END U(0x1f0)
+#define CTX_EL2_SYSREGS_END U(0x1e0)
#endif /* CTX_INCLUDE_EL2_REGS */
diff --git a/include/lib/el3_runtime/context_mgmt.h b/include/lib/el3_runtime/context_mgmt.h
index 2090687..1a76d8e 100644
--- a/include/lib/el3_runtime/context_mgmt.h
+++ b/include/lib/el3_runtime/context_mgmt.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -34,6 +34,7 @@
const struct entry_point_info *ep);
void cm_setup_context(cpu_context_t *ctx, const struct entry_point_info *ep);
void cm_prepare_el3_exit(uint32_t security_state);
+void cm_prepare_el3_exit_ns(void);
#ifdef __aarch64__
#if CTX_INCLUDE_EL2_REGS
diff --git a/include/services/ffa_svc.h b/include/services/ffa_svc.h
index 9a7c489..d3fb012 100644
--- a/include/services/ffa_svc.h
+++ b/include/services/ffa_svc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,6 +7,8 @@
#ifndef FFA_SVC_H
#define FFA_SVC_H
+#include <stdbool.h>
+
#include <lib/smccc.h>
#include <lib/utils_def.h>
#include <tools_share/uuid.h>
@@ -107,6 +109,7 @@
#define FFA_VERSION FFA_FID(SMC_32, FFA_FNUM_VERSION)
#define FFA_FEATURES FFA_FID(SMC_32, FFA_FNUM_FEATURES)
#define FFA_RX_RELEASE FFA_FID(SMC_32, FFA_FNUM_RX_RELEASE)
+#define FFA_RX_ACQUIRE FFA_FID(SMC_32, FFA_FNUM_RX_ACQUIRE)
#define FFA_RXTX_MAP_SMC32 FFA_FID(SMC_32, FFA_FNUM_RXTX_MAP)
#define FFA_RXTX_UNMAP FFA_FID(SMC_32, FFA_FNUM_RXTX_UNMAP)
#define FFA_PARTITION_INFO_GET FFA_FID(SMC_32, FFA_FNUM_PARTITION_INFO_GET)
@@ -176,6 +179,15 @@
#define FFA_ENDPOINT_ID_MAX U(1 << 16)
/*
+ * Reserve endpoint id for the SPMD.
+ */
+#define SPMD_DIRECT_MSG_ENDPOINT_ID U(FFA_ENDPOINT_ID_MAX - 1)
+
+/* Mask and shift to check valid secure FF-A Endpoint ID. */
+#define SPMC_SECURE_ID_MASK U(1)
+#define SPMC_SECURE_ID_SHIFT U(15)
+
+/*
* Mask for source and destination endpoint id in
* a direct message request/response.
*/
@@ -209,4 +221,24 @@
FFA_DIRECT_MSG_ENDPOINT_ID_MASK;
}
+/******************************************************************************
+ * FF-A helper functions to determine partition ID world.
+ *****************************************************************************/
+
+/*
+ * Determine if provided ID is in the secure world.
+ */
+static inline bool ffa_is_secure_world_id(uint16_t id)
+{
+ return ((id >> SPMC_SECURE_ID_SHIFT) & SPMC_SECURE_ID_MASK) == 1;
+}
+
+/*
+ * Determine if provided ID is in the normal world.
+ */
+static inline bool ffa_is_normal_world_id(uint16_t id)
+{
+ return !ffa_is_secure_world_id(id);
+}
+
#endif /* FFA_SVC_H */
diff --git a/include/services/spmc_svc.h b/include/services/spmc_svc.h
new file mode 100644
index 0000000..8ee61e9
--- /dev/null
+++ b/include/services/spmc_svc.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPMC_SVC_H
+#define SPMC_SVC_H
+
+#ifndef __ASSEMBLER__
+#include <stdint.h>
+
+#include <lib/utils_def.h>
+#include <services/ffa_svc.h>
+#include <services/spm_core_manifest.h>
+
+int spmc_setup(void);
+void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs);
+void *spmc_get_config_addr(void);
+
+void spmc_set_config_addr(uintptr_t soc_fw_config);
+
+uint64_t spmc_smc_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags);
+
+static inline bool is_spmc_at_el3(void)
+{
+ return SPMC_AT_EL3 == 1;
+}
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* SPMC_SVC_H */
diff --git a/include/services/spmd_svc.h b/include/services/spmd_svc.h
index 1e7e6aa..29dfdad 100644
--- a/include/services/spmd_svc.h
+++ b/include/services/spmd_svc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -12,6 +12,14 @@
#include <stdint.h>
int spmd_setup(void);
+uint64_t spmd_ffa_smc_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags);
uint64_t spmd_smc_handler(uint32_t smc_fid,
uint64_t x1,
uint64_t x2,
@@ -20,6 +28,13 @@
void *cookie,
void *handle,
uint64_t flags);
+uint64_t spmd_smc_switch_state(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *handle);
#endif /* __ASSEMBLER__ */
#endif /* SPMD_SVC_H */
diff --git a/lib/cpus/aarch64/cortex_x2.S b/lib/cpus/aarch64/cortex_x2.S
index 9586a5b..90a906b 100644
--- a/lib/cpus/aarch64/cortex_x2.S
+++ b/lib/cpus/aarch64/cortex_x2.S
@@ -237,6 +237,36 @@
ret
endfunc check_errata_cve_2022_23960
+ /* ---------------------------------------------------------
+ * Errata Workaround for Cortex-X2 Errata 2147715.
+ * This applies only to revisions r2p0 and is fixed in r2p1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x17
+ * ---------------------------------------------------------
+ */
+func errata_x2_2147715_wa
+ /* Compare x0 against revision r2p0 */
+ mov x17, x30
+ bl check_errata_2147715
+ cbz x0, 1f
+
+ /* Apply the workaround by setting bit 22 in CPUACTLR_EL1. */
+ mrs x1, CORTEX_X2_CPUACTLR_EL1
+ orr x1, x1, CORTEX_X2_CPUACTLR_EL1_BIT_22
+ msr CORTEX_X2_CPUACTLR_EL1, x1
+
+1:
+ ret x17
+endfunc errata_x2_2147715_wa
+
+func check_errata_2147715
+ /* Applies to r2p0 */
+ mov x1, #0x20
+ mov x2, #0x20
+ b cpu_rev_var_range
+endfunc check_errata_2147715
+
/* ----------------------------------------------------
* HW will do the cache maintenance while powering down
* ----------------------------------------------------
@@ -268,10 +298,11 @@
* checking functions of each errata.
*/
report_errata ERRATA_X2_2002765, cortex_x2, 2002765
- report_errata ERRATA_X2_2058056, cortex_x2, 2058056
- report_errata ERRATA_X2_2083908, cortex_x2, 2083908
report_errata ERRATA_X2_2017096, cortex_x2, 2017096
+ report_errata ERRATA_X2_2058056, cortex_x2, 2058056
report_errata ERRATA_X2_2081180, cortex_x2, 2081180
+ report_errata ERRATA_X2_2083908, cortex_x2, 2083908
+ report_errata ERRATA_X2_2147715, cortex_x2, 2147715
report_errata ERRATA_X2_2216384, cortex_x2, 2216384
report_errata WORKAROUND_CVE_2022_23960, cortex_x2, cve_2022_23960
@@ -321,6 +352,11 @@
bl errata_x2_2216384_wa
#endif
+#if ERRATA_X2_2147715
+ mov x0, x18
+ bl errata_x2_2147715_wa
+#endif
+
#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
/*
* The Cortex-X2 generic vectors are overridden to apply errata
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 8840f8e..462ca9d 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -547,6 +547,10 @@
# r2p1.
ERRATA_X2_2216384 ?=0
+# Flag to apply erratum 2147715 workaround during reset. This erratum applies
+# only to revision r2p0 of the Cortex-X2 cpu, it is fixed in r2p1.
+ERRATA_X2_2147715 ?=0
+
# Flag to apply erratum 1922240 workaround during reset. This erratum applies
# to revision r0p0 of the Cortex-A510 cpu and is fixed in r0p1.
ERRATA_A510_1922240 ?=0
@@ -1046,6 +1050,10 @@
$(eval $(call assert_boolean,ERRATA_X2_2216384))
$(eval $(call add_define,ERRATA_X2_2216384))
+# Process ERRATA_X2_2147715 flag
+$(eval $(call assert_boolean,ERRATA_X2_2147715))
+$(eval $(call add_define,ERRATA_X2_2147715))
+
# Process ERRATA_A510_1922240 flag
$(eval $(call assert_boolean,ERRATA_A510_1922240))
$(eval $(call add_define,ERRATA_A510_1922240))
diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c
index 3ef378c..af8edf5 100644
--- a/lib/el3_runtime/aarch32/context_mgmt.c
+++ b/lib/el3_runtime/aarch32/context_mgmt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -332,3 +332,12 @@
enable_extensions_nonsecure(el2_unused);
}
}
+
+/*******************************************************************************
+ * This function is used to exit to Non-secure world. It simply calls the
+ * cm_prepare_el3_exit function for AArch32.
+ ******************************************************************************/
+void cm_prepare_el3_exit_ns(void)
+{
+ cm_prepare_el3_exit(NON_SECURE);
+}
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index c1c0612..69acc2f 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -13,14 +13,14 @@
#if CTX_INCLUDE_EL2_REGS
.global el2_sysregs_context_save
.global el2_sysregs_context_restore
-#endif
+#endif /* CTX_INCLUDE_EL2_REGS */
.global el1_sysregs_context_save
.global el1_sysregs_context_restore
#if CTX_INCLUDE_FPREGS
.global fpregs_context_save
.global fpregs_context_restore
-#endif
+#endif /* CTX_INCLUDE_FPREGS */
.global prepare_el3_entry
.global restore_gp_pmcr_pauth_regs
.global save_and_update_ptw_el1_sys_regs
@@ -62,7 +62,7 @@
#if CTX_INCLUDE_AARCH32_REGS
mrs x16, dbgvcr32_el2
str x16, [x0, #CTX_DBGVCR32_EL2]
-#endif
+#endif /* CTX_INCLUDE_AARCH32_REGS */
mrs x9, elr_el2
mrs x10, esr_el2
@@ -91,7 +91,8 @@
#if ENABLE_SPE_FOR_LOWER_ELS
mrs x13, PMSCR_EL2
str x13, [x0, #CTX_PMSCR_EL2]
-#endif
+#endif /* ENABLE_SPE_FOR_LOWER_ELS */
+
mrs x14, sctlr_el2
str x14, [x0, #CTX_SCTLR_EL2]
@@ -118,7 +119,7 @@
#if CTX_INCLUDE_MTE_REGS
mrs x9, TFSR_EL2
str x9, [x0, #CTX_TFSR_EL2]
-#endif
+#endif /* CTX_INCLUDE_MTE_REGS */
#if ENABLE_MPAM_FOR_LOWER_ELS
mrs x10, MPAM2_EL2
@@ -143,7 +144,7 @@
mrs x11, MPAMVPM7_EL2
mrs x12, MPAMVPMV_EL2
stp x11, x12, [x0, #CTX_MPAMVPM7_EL2]
-#endif
+#endif /* ENABLE_MPAM_FOR_LOWER_ELS */
#if ENABLE_FEAT_FGT
mrs x13, HDFGRTR_EL2
@@ -152,7 +153,7 @@
stp x13, x14, [x0, #CTX_HDFGRTR_EL2]
#else
str x13, [x0, #CTX_HDFGRTR_EL2]
-#endif
+#endif /* ENABLE_FEAT_AMUv1 */
mrs x15, HDFGWTR_EL2
mrs x16, HFGITR_EL2
stp x15, x16, [x0, #CTX_HDFGWTR_EL2]
@@ -160,48 +161,61 @@
mrs x9, HFGRTR_EL2
mrs x10, HFGWTR_EL2
stp x9, x10, [x0, #CTX_HFGRTR_EL2]
-#endif
+#endif /* ENABLE_FEAT_FGT */
#if ENABLE_FEAT_ECV
mrs x11, CNTPOFF_EL2
str x11, [x0, #CTX_CNTPOFF_EL2]
-#endif
+#endif /* ENABLE_FEAT_ECV */
-#if ARM_ARCH_AT_LEAST(8, 4)
- mrs x12, contextidr_el2
- str x12, [x0, #CTX_CONTEXTIDR_EL2]
+#if ENABLE_FEAT_VHE
+ /*
+ * CONTEXTIDR_EL2 register is saved only when FEAT_VHE or
+ * FEAT_Debugv8p2 (currently not in TF-A) is supported.
+ */
+ mrs x9, contextidr_el2
+ mrs x10, ttbr1_el2
+ stp x9, x10, [x0, #CTX_CONTEXTIDR_EL2]
+#endif /* ENABLE_FEAT_VHE */
-#if CTX_INCLUDE_AARCH32_REGS
- mrs x13, sder32_el2
- str x13, [x0, #CTX_SDER32_EL2]
-#endif
- mrs x14, ttbr1_el2
- mrs x15, vdisr_el2
- stp x14, x15, [x0, #CTX_TTBR1_EL2]
+#if RAS_EXTENSION
+ /*
+ * VDISR_EL2 and VSESR_EL2 registers are saved only when
+ * FEAT_RAS is supported.
+ */
+ mrs x11, vdisr_el2
+ mrs x12, vsesr_el2
+ stp x11, x12, [x0, #CTX_VDISR_EL2]
+#endif /* RAS_EXTENSION */
#if CTX_INCLUDE_NEVE_REGS
+ /*
+ * VNCR_EL2 register is saved only when FEAT_NV2 is supported.
+ */
mrs x16, vncr_el2
str x16, [x0, #CTX_VNCR_EL2]
-#endif
+#endif /* CTX_INCLUDE_NEVE_REGS */
- mrs x9, vsesr_el2
- mrs x10, vstcr_el2
- stp x9, x10, [x0, #CTX_VSESR_EL2]
-
- mrs x11, vsttbr_el2
+#if ENABLE_TRF_FOR_NS
+ /*
+ * TRFCR_EL2 register is saved only when FEAT_TRF is supported.
+ */
mrs x12, TRFCR_EL2
- stp x11, x12, [x0, #CTX_VSTTBR_EL2]
-#endif
+ str x12, [x0, #CTX_TRFCR_EL2]
+#endif /* ENABLE_TRF_FOR_NS */
-#if ARM_ARCH_AT_LEAST(8, 5)
+#if ENABLE_FEAT_CSV2_2
+ /*
+ * SCXTNUM_EL2 register is saved only when FEAT_CSV2_2 is supported.
+ */
mrs x13, scxtnum_el2
str x13, [x0, #CTX_SCXTNUM_EL2]
-#endif
+#endif /* ENABLE_FEAT_CSV2_2 */
#if ENABLE_FEAT_HCX
mrs x14, hcrx_el2
str x14, [x0, #CTX_HCRX_EL2]
-#endif
+#endif /* ENABLE_FEAT_HCX */
ret
endfunc el2_sysregs_context_save
@@ -241,7 +255,7 @@
#if CTX_INCLUDE_AARCH32_REGS
ldr x16, [x0, #CTX_DBGVCR32_EL2]
msr dbgvcr32_el2, x16
-#endif
+#endif /* CTX_INCLUDE_AARCH32_REGS */
ldp x9, x10, [x0, #CTX_ELR_EL2]
msr elr_el2, x9
@@ -270,7 +284,8 @@
#if ENABLE_SPE_FOR_LOWER_ELS
ldr x13, [x0, #CTX_PMSCR_EL2]
msr PMSCR_EL2, x13
-#endif
+#endif /* ENABLE_SPE_FOR_LOWER_ELS */
+
ldr x14, [x0, #CTX_SCTLR_EL2]
msr sctlr_el2, x14
@@ -297,7 +312,7 @@
#if CTX_INCLUDE_MTE_REGS
ldr x9, [x0, #CTX_TFSR_EL2]
msr TFSR_EL2, x9
-#endif
+#endif /* CTX_INCLUDE_MTE_REGS */
#if ENABLE_MPAM_FOR_LOWER_ELS
ldr x10, [x0, #CTX_MPAM2_EL2]
@@ -322,7 +337,7 @@
ldp x11, x12, [x0, #CTX_MPAMVPM7_EL2]
msr MPAMVPM7_EL2, x11
msr MPAMVPMV_EL2, x12
-#endif
+#endif /* ENABLE_MPAM_FOR_LOWER_ELS */
#if ENABLE_FEAT_FGT
#if ENABLE_FEAT_AMUv1
@@ -330,7 +345,7 @@
msr HAFGRTR_EL2, x14
#else
ldr x13, [x0, #CTX_HDFGRTR_EL2]
-#endif
+#endif /* ENABLE_FEAT_AMUv1 */
msr HDFGRTR_EL2, x13
ldp x15, x16, [x0, #CTX_HDFGWTR_EL2]
@@ -340,48 +355,61 @@
ldp x9, x10, [x0, #CTX_HFGRTR_EL2]
msr HFGRTR_EL2, x9
msr HFGWTR_EL2, x10
-#endif
+#endif /* ENABLE_FEAT_FGT */
#if ENABLE_FEAT_ECV
ldr x11, [x0, #CTX_CNTPOFF_EL2]
msr CNTPOFF_EL2, x11
-#endif
+#endif /* ENABLE_FEAT_ECV */
-#if ARM_ARCH_AT_LEAST(8, 4)
- ldr x12, [x0, #CTX_CONTEXTIDR_EL2]
- msr contextidr_el2, x12
+#if ENABLE_FEAT_VHE
+ /*
+ * CONTEXTIDR_EL2 register is restored only when FEAT_VHE or
+ * FEAT_Debugv8p2 (currently not in TF-A) is supported.
+ */
+ ldp x9, x10, [x0, #CTX_CONTEXTIDR_EL2]
+ msr contextidr_el2, x9
+ msr ttbr1_el2, x10
+#endif /* ENABLE_FEAT_VHE */
-#if CTX_INCLUDE_AARCH32_REGS
- ldr x13, [x0, #CTX_SDER32_EL2]
- msr sder32_el2, x13
-#endif
- ldp x14, x15, [x0, #CTX_TTBR1_EL2]
- msr ttbr1_el2, x14
- msr vdisr_el2, x15
+#if RAS_EXTENSION
+ /*
+ * VDISR_EL2 and VSESR_EL2 registers are restored only when FEAT_RAS
+ * is supported.
+ */
+ ldp x11, x12, [x0, #CTX_VDISR_EL2]
+ msr vdisr_el2, x11
+ msr vsesr_el2, x12
+#endif /* RAS_EXTENSION */
#if CTX_INCLUDE_NEVE_REGS
+ /*
+ * VNCR_EL2 register is restored only when FEAT_NV2 is supported.
+ */
ldr x16, [x0, #CTX_VNCR_EL2]
msr vncr_el2, x16
-#endif
+#endif /* CTX_INCLUDE_NEVE_REGS */
- ldp x9, x10, [x0, #CTX_VSESR_EL2]
- msr vsesr_el2, x9
- msr vstcr_el2, x10
-
- ldp x11, x12, [x0, #CTX_VSTTBR_EL2]
- msr vsttbr_el2, x11
+#if ENABLE_TRF_FOR_NS
+ /*
+ * TRFCR_EL2 register is restored only when FEAT_TRF is supported.
+ */
+ ldr x12, [x0, #CTX_TRFCR_EL2]
msr TRFCR_EL2, x12
-#endif
+#endif /* ENABLE_TRF_FOR_NS */
-#if ARM_ARCH_AT_LEAST(8, 5)
+#if ENABLE_FEAT_CSV2_2
+ /*
+ * SCXTNUM_EL2 register is restored only when FEAT_CSV2_2 is supported.
+ */
ldr x13, [x0, #CTX_SCXTNUM_EL2]
msr scxtnum_el2, x13
-#endif
+#endif /* ENABLE_FEAT_CSV2_2 */
#if ENABLE_FEAT_HCX
ldr x14, [x0, #CTX_HCRX_EL2]
msr hcrx_el2, x14
-#endif
+#endif /* ENABLE_FEAT_HCX */
ret
endfunc el2_sysregs_context_restore
@@ -405,7 +433,7 @@
mrs x15, sctlr_el1
mrs x16, tcr_el1
stp x15, x16, [x0, #CTX_SCTLR_EL1]
-#endif
+#endif /* ERRATA_SPECULATIVE_AT */
mrs x17, cpacr_el1
mrs x9, csselr_el1
@@ -456,7 +484,7 @@
mrs x15, dacr32_el2
mrs x16, ifsr32_el2
stp x15, x16, [x0, #CTX_DACR32_EL2]
-#endif
+#endif /* CTX_INCLUDE_AARCH32_REGS */
/* Save NS timer registers if the build has instructed so */
#if NS_TIMER_SWITCH
@@ -470,7 +498,7 @@
mrs x14, cntkctl_el1
str x14, [x0, #CTX_CNTKCTL_EL1]
-#endif
+#endif /* NS_TIMER_SWITCH */
/* Save MTE system registers if the build has instructed so */
#if CTX_INCLUDE_MTE_REGS
@@ -481,7 +509,7 @@
mrs x9, RGSR_EL1
mrs x10, GCR_EL1
stp x9, x10, [x0, #CTX_RGSR_EL1]
-#endif
+#endif /* CTX_INCLUDE_MTE_REGS */
ret
endfunc el1_sysregs_context_save
@@ -504,7 +532,7 @@
ldp x15, x16, [x0, #CTX_SCTLR_EL1]
msr sctlr_el1, x15
msr tcr_el1, x16
-#endif
+#endif /* ERRATA_SPECULATIVE_AT */
ldp x17, x9, [x0, #CTX_CPACR_EL1]
msr cpacr_el1, x17
@@ -555,7 +583,8 @@
ldp x15, x16, [x0, #CTX_DACR32_EL2]
msr dacr32_el2, x15
msr ifsr32_el2, x16
-#endif
+#endif /* CTX_INCLUDE_AARCH32_REGS */
+
/* Restore NS timer registers if the build has instructed so */
#if NS_TIMER_SWITCH
ldp x10, x11, [x0, #CTX_CNTP_CTL_EL0]
@@ -568,7 +597,8 @@
ldr x14, [x0, #CTX_CNTKCTL_EL1]
msr cntkctl_el1, x14
-#endif
+#endif /* NS_TIMER_SWITCH */
+
/* Restore MTE system registers if the build has instructed so */
#if CTX_INCLUDE_MTE_REGS
ldp x11, x12, [x0, #CTX_TFSRE0_EL1]
@@ -578,7 +608,7 @@
ldp x13, x14, [x0, #CTX_RGSR_EL1]
msr RGSR_EL1, x13
msr GCR_EL1, x14
-#endif
+#endif /* CTX_INCLUDE_MTE_REGS */
/* No explict ISB required here as ERET covers it */
ret
@@ -626,7 +656,7 @@
#if CTX_INCLUDE_AARCH32_REGS
mrs x11, fpexc32_el2
str x11, [x0, #CTX_FP_FPEXC32_EL2]
-#endif
+#endif /* CTX_INCLUDE_AARCH32_REGS */
ret
endfunc fpregs_context_save
@@ -671,7 +701,8 @@
#if CTX_INCLUDE_AARCH32_REGS
ldr x11, [x0, #CTX_FP_FPEXC32_EL2]
msr fpexc32_el2, x11
-#endif
+#endif /* CTX_INCLUDE_AARCH32_REGS */
+
/*
* No explict ISB required here as ERET to
* switch to secure EL1 or non-secure world
@@ -688,13 +719,13 @@
* in ARM DDI 0487F.c page J1-7635 to a default value.
*/
.macro set_unset_pstate_bits
- /*
- * If Data Independent Timing (DIT) functionality is implemented,
- * always enable DIT in EL3
- */
+ /*
+ * If Data Independent Timing (DIT) functionality is implemented,
+ * always enable DIT in EL3
+ */
#if ENABLE_FEAT_DIT
- mov x8, #DIT_BIT
- msr DIT, x8
+ mov x8, #DIT_BIT
+ msr DIT, x8
#endif /* ENABLE_FEAT_DIT */
.endm /* set_unset_pstate_bits */
@@ -933,7 +964,7 @@
mrs x17, spsel
cmp x17, #MODE_SP_EL0
ASM_ASSERT(eq)
-#endif
+#endif /* ENABLE_ASSERTIONS */
/* ----------------------------------------------------------
* Save the current SP_EL0 i.e. the EL3 runtime stack which
@@ -971,7 +1002,7 @@
isb
msr S3_6_C1_C2_0, x20 /* zcr_el3 */
sve_not_enabled:
-#endif
+#endif /* IMAGE_BL31 */
#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639
/* ----------------------------------------------------------
@@ -982,7 +1013,8 @@
cbz x17, 1f
blr x17
1:
-#endif
+#endif /* IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639 */
+
restore_ptw_el1_sys_regs
/* ----------------------------------------------------------
@@ -1005,10 +1037,12 @@
esb
#else
dsb sy
-#endif
+#endif /* IMAGE_BL31 && RAS_EXTENSION */
+
#ifdef IMAGE_BL31
str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3]
-#endif
+#endif /* IMAGE_BL31 */
+
exception_return
endfunc el3_exit
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index c69dc95..459ca2c 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -16,6 +16,7 @@
#include <bl31/interrupt_mgmt.h>
#include <common/bl_common.h>
#include <context.h>
+#include <drivers/arm/gicv3.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/el3_runtime/pubsub_events.h>
#include <lib/extensions/amu.h>
@@ -31,54 +32,158 @@
static void manage_extensions_secure(cpu_context_t *ctx);
-/*******************************************************************************
- * Context management library initialisation routine. This library is used by
- * runtime services to share pointers to 'cpu_context' structures for the secure
- * and non-secure states. Management of the structures and their associated
- * memory is not done by the context management library e.g. the PSCI service
- * manages the cpu context used for entry from and exit to the non-secure state.
- * The Secure payload dispatcher service manages the context(s) corresponding to
- * the secure state. It also uses this library to get access to the non-secure
- * state cpu context pointers.
- * Lastly, this library provides the api to make SP_EL3 point to the cpu context
- * which will used for programming an entry into a lower EL. The same context
- * will used to save state upon exception entry from that EL.
- ******************************************************************************/
-void __init cm_init(void)
+/******************************************************************************
+ * This function performs initializations that are specific to SECURE state
+ * and updates the cpu context specified by 'ctx'.
+ *****************************************************************************/
+static void setup_secure_context(cpu_context_t *ctx, const struct entry_point_info *ep)
{
+ u_register_t scr_el3;
+ el3_state_t *state;
+
+ state = get_el3state_ctx(ctx);
+ scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
+
+#if defined(IMAGE_BL31) && !defined(SPD_spmd)
/*
- * The context management library has only global data to intialize, but
- * that will be done when the BSS is zeroed out
+ * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as
+ * indicated by the interrupt routing model for BL31.
*/
+ scr_el3 |= get_scr_el3_from_routing_model(SECURE);
+#endif
+
+#if !CTX_INCLUDE_MTE_REGS || ENABLE_ASSERTIONS
+ /* Get Memory Tagging Extension support level */
+ unsigned int mte = get_armv8_5_mte_support();
+#endif
+ /*
+ * Allow access to Allocation Tags when CTX_INCLUDE_MTE_REGS
+ * is set, or when MTE is only implemented at EL0.
+ */
+#if CTX_INCLUDE_MTE_REGS
+ assert((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE_IMPLEMENTED_ASY));
+ scr_el3 |= SCR_ATA_BIT;
+#else
+ if (mte == MTE_IMPLEMENTED_EL0) {
+ scr_el3 |= SCR_ATA_BIT;
+ }
+#endif /* CTX_INCLUDE_MTE_REGS */
+
+ /* Enable S-EL2 if the next EL is EL2 and S-EL2 is present */
+ if ((GET_EL(ep->spsr) == MODE_EL2) && is_armv8_4_sel2_present()) {
+ if (GET_RW(ep->spsr) != MODE_RW_64) {
+ ERROR("S-EL2 can not be used in AArch32\n.");
+ panic();
+ }
+
+ scr_el3 |= SCR_EEL2_BIT;
+ }
+
+ write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
+
+ manage_extensions_secure(ctx);
}
+#if ENABLE_RME
+/******************************************************************************
+ * This function performs initializations that are specific to REALM state
+ * and updates the cpu context specified by 'ctx'.
+ *****************************************************************************/
+static void setup_realm_context(cpu_context_t *ctx, const struct entry_point_info *ep)
+{
+ u_register_t scr_el3;
+ el3_state_t *state;
+
+ state = get_el3state_ctx(ctx);
+ scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
+
+ scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT | SCR_EnSCXT_BIT;
+
+ write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
+}
+#endif /* ENABLE_RME */
+
+/******************************************************************************
+ * This function performs initializations that are specific to NON-SECURE state
+ * and updates the cpu context specified by 'ctx'.
+ *****************************************************************************/
+static void setup_ns_context(cpu_context_t *ctx, const struct entry_point_info *ep)
+{
+ u_register_t scr_el3;
+ el3_state_t *state;
+
+ state = get_el3state_ctx(ctx);
+ scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
+
+ /* SCR_NS: Set the NS bit */
+ scr_el3 |= SCR_NS_BIT;
+
+#if !CTX_INCLUDE_PAUTH_REGS
+ /*
+ * If the pointer authentication registers aren't saved during world
+ * switches the value of the registers can be leaked from the Secure to
+ * the Non-secure world. To prevent this, rather than enabling pointer
+ * authentication everywhere, we only enable it in the Non-secure world.
+ *
+ * If the Secure world wants to use pointer authentication,
+ * CTX_INCLUDE_PAUTH_REGS must be set to 1.
+ */
+ scr_el3 |= SCR_API_BIT | SCR_APK_BIT;
+#endif /* !CTX_INCLUDE_PAUTH_REGS */
+
+ /* Allow access to Allocation Tags when MTE is implemented. */
+ scr_el3 |= SCR_ATA_BIT;
+
+#ifdef IMAGE_BL31
+ /*
+ * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as
+ * indicated by the interrupt routing model for BL31.
+ */
+ scr_el3 |= get_scr_el3_from_routing_model(NON_SECURE);
+#endif
+ write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
+
+ /* Initialize EL2 context registers */
+#if CTX_INCLUDE_EL2_REGS
+
+ /*
+ * Initialize SCTLR_EL2 context register using Endianness value
+ * taken from the entrypoint attribute.
+ */
+ u_register_t sctlr_el2 = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL;
+ sctlr_el2 |= SCTLR_EL2_RES1;
+ write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_SCTLR_EL2,
+ sctlr_el2);
+
+ /*
+ * The GICv3 driver initializes the ICC_SRE_EL2 register during
+ * platform setup. Use the same setting for the corresponding
+ * context register to make sure the correct bits are set when
+ * restoring NS context.
+ */
+ u_register_t icc_sre_el2 = read_icc_sre_el2();
+ icc_sre_el2 |= (ICC_SRE_DIB_BIT | ICC_SRE_DFB_BIT);
+ icc_sre_el2 |= (ICC_SRE_EN_BIT | ICC_SRE_SRE_BIT);
+ write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_ICC_SRE_EL2,
+ icc_sre_el2);
+#endif /* CTX_INCLUDE_EL2_REGS */
+}
+
/*******************************************************************************
- * The following function initializes the cpu_context 'ctx' for
- * first use, and sets the initial entrypoint state as specified by the
- * entry_point_info structure.
- *
- * The security state to initialize is determined by the SECURE attribute
- * of the entry_point_info.
+ * The following function performs initialization of the cpu_context 'ctx'
+ * for first use that is common to all security states, and sets the
+ * initial entrypoint state as specified by the entry_point_info structure.
*
* The EE and ST attributes are used to configure the endianness and secure
* timer availability for the new execution context.
- *
- * To prepare the register state for entry call cm_prepare_el3_exit() and
- * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
- * cm_el1_sysregs_context_restore().
******************************************************************************/
-void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
+static void setup_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
{
- unsigned int security_state;
u_register_t scr_el3;
el3_state_t *state;
gp_regs_t *gp_regs;
u_register_t sctlr_elx, actlr_elx;
- assert(ctx != NULL);
-
- security_state = GET_SECURITY_STATE(ep->h.attr);
-
/* Clear any residual register values from the context */
zeromem(ctx, sizeof(*ctx));
@@ -93,26 +198,7 @@
*/
scr_el3 = read_scr();
scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT |
- SCR_ST_BIT | SCR_HCE_BIT);
-
-#if ENABLE_RME
- /* When RME support is enabled, clear the NSE bit as well. */
- scr_el3 &= ~SCR_NSE_BIT;
-#endif /* ENABLE_RME */
-
- /*
- * SCR_NS: Set the security state of the next EL.
- */
- if (security_state == NON_SECURE) {
- scr_el3 |= SCR_NS_BIT;
- }
-
-#if ENABLE_RME
- /* Check for realm state if RME support enabled. */
- if (security_state == REALM) {
- scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT | SCR_EnSCXT_BIT;
- }
-#endif /* ENABLE_RME */
+ SCR_ST_BIT | SCR_HCE_BIT | SCR_NSE_BIT);
/*
* SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next
@@ -121,6 +207,7 @@
if (GET_RW(ep->spsr) == MODE_RW_64) {
scr_el3 |= SCR_RW_BIT;
}
+
/*
* SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical
* Secure timer registers to EL3, from AArch64 state only, if specified
@@ -149,8 +236,8 @@
#if !HANDLE_EA_EL3_FIRST
/*
* SCR_EL3.EA: Do not route External Abort and SError Interrupt External
- * to EL3 when executing at a lower EL. When executing at EL3, External
- * Aborts are taken to EL3.
+ * to EL3 when executing at a lower EL. When executing at EL3, External
+ * Aborts are taken to EL3.
*/
scr_el3 &= ~SCR_EA_BIT;
#endif
@@ -159,69 +246,12 @@
/* Enable fault injection from lower ELs */
scr_el3 |= SCR_FIEN_BIT;
#endif
-
-#if !CTX_INCLUDE_PAUTH_REGS
- /*
- * If the pointer authentication registers aren't saved during world
- * switches the value of the registers can be leaked from the Secure to
- * the Non-secure world. To prevent this, rather than enabling pointer
- * authentication everywhere, we only enable it in the Non-secure world.
- *
- * If the Secure world wants to use pointer authentication,
- * CTX_INCLUDE_PAUTH_REGS must be set to 1.
- */
- if (security_state == NON_SECURE) {
- scr_el3 |= SCR_API_BIT | SCR_APK_BIT;
- }
-#endif /* !CTX_INCLUDE_PAUTH_REGS */
-
-#if !CTX_INCLUDE_MTE_REGS || ENABLE_ASSERTIONS
- /* Get Memory Tagging Extension support level */
- unsigned int mte = get_armv8_5_mte_support();
-#endif
- /*
- * Enable MTE support. Support is enabled unilaterally for the normal
- * world, and only for the secure world when CTX_INCLUDE_MTE_REGS is
- * set.
- */
-#if CTX_INCLUDE_MTE_REGS
- assert((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE_IMPLEMENTED_ASY));
- scr_el3 |= SCR_ATA_BIT;
-#else
- /*
- * When MTE is only implemented at EL0, it can be enabled
- * across both worlds as no MTE registers are used.
- */
- if ((mte == MTE_IMPLEMENTED_EL0) ||
- /*
- * When MTE is implemented at all ELs, it can be only enabled
- * in Non-Secure world without register saving.
- */
- (((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE_IMPLEMENTED_ASY)) &&
- (security_state == NON_SECURE))) {
- scr_el3 |= SCR_ATA_BIT;
- }
-#endif /* CTX_INCLUDE_MTE_REGS */
-#ifdef IMAGE_BL31
/*
- * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as
- * indicated by the interrupt routing model for BL31.
- *
- * TODO: The interrupt routing model code is not updated for REALM
- * state. Use the default values of IRQ = FIQ = 0 for REALM security
- * state for now.
+ * CPTR_EL3 was initialized out of reset, copy that value to the
+ * context register.
*/
- if (security_state != REALM) {
- scr_el3 |= get_scr_el3_from_routing_model(security_state);
- }
-#endif
-
- /* Save the initialized value of CPTR_EL3 register */
write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, read_cptr_el3());
- if (security_state == SECURE) {
- manage_extensions_secure(ctx);
- }
/*
* SCR_EL3.HCE: Enable HVC instructions if next execution state is
@@ -249,16 +279,6 @@
}
}
- /* Enable S-EL2 if the next EL is EL2 and security state is secure */
- if ((security_state == SECURE) && (GET_EL(ep->spsr) == MODE_EL2)) {
- if (GET_RW(ep->spsr) != MODE_RW_64) {
- ERROR("S-EL2 can not be used in AArch32.");
- panic();
- }
-
- scr_el3 |= SCR_EEL2_BIT;
- }
-
/*
* FEAT_AMUv1p1 virtual offset registers are only accessible from EL3
* and EL2, when clear, this bit traps accesses from EL2 so we set it
@@ -362,6 +382,66 @@
}
/*******************************************************************************
+ * Context management library initialization routine. This library is used by
+ * runtime services to share pointers to 'cpu_context' structures for secure
+ * non-secure and realm states. Management of the structures and their associated
+ * memory is not done by the context management library e.g. the PSCI service
+ * manages the cpu context used for entry from and exit to the non-secure state.
+ * The Secure payload dispatcher service manages the context(s) corresponding to
+ * the secure state. It also uses this library to get access to the non-secure
+ * state cpu context pointers.
+ * Lastly, this library provides the API to make SP_EL3 point to the cpu context
+ * which will be used for programming an entry into a lower EL. The same context
+ * will be used to save state upon exception entry from that EL.
+ ******************************************************************************/
+void __init cm_init(void)
+{
+ /*
+ * The context management library has only global data to intialize, but
+ * that will be done when the BSS is zeroed out.
+ */
+}
+
+/*******************************************************************************
+ * This is the high-level function used to initialize the cpu_context 'ctx' for
+ * first use. It performs initializations that are common to all security states
+ * and initializations specific to the security state specified in 'ep'
+ ******************************************************************************/
+void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
+{
+ unsigned int security_state;
+
+ assert(ctx != NULL);
+
+ /*
+ * Perform initializations that are common
+ * to all security states
+ */
+ setup_context_common(ctx, ep);
+
+ security_state = GET_SECURITY_STATE(ep->h.attr);
+
+ /* Perform security state specific initializations */
+ switch (security_state) {
+ case SECURE:
+ setup_secure_context(ctx, ep);
+ break;
+#if ENABLE_RME
+ case REALM:
+ setup_realm_context(ctx, ep);
+ break;
+#endif
+ case NON_SECURE:
+ setup_ns_context(ctx, ep);
+ break;
+ default:
+ ERROR("Invalid security state\n");
+ panic();
+ break;
+ }
+}
+
+/*******************************************************************************
* Enable architecture extensions on first entry to Non-secure world.
* When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
* it is zero.
@@ -738,6 +818,40 @@
#endif /* CTX_INCLUDE_EL2_REGS */
/*******************************************************************************
+ * This function is used to exit to Non-secure world. If CTX_INCLUDE_EL2_REGS
+ * is enabled, it restores EL1 and EL2 sysreg contexts instead of directly
+ * updating EL1 and EL2 registers. Otherwise, it calls the generic
+ * cm_prepare_el3_exit function.
+ ******************************************************************************/
+void cm_prepare_el3_exit_ns(void)
+{
+#if CTX_INCLUDE_EL2_REGS
+ cpu_context_t *ctx = cm_get_context(NON_SECURE);
+ assert(ctx != NULL);
+
+ /*
+ * Currently some extensions are configured using
+ * direct register updates. Therefore, do this here
+ * instead of when setting up context.
+ */
+ manage_extensions_nonsecure(0, ctx);
+
+ /*
+ * Set the NS bit to be able to access the ICC_SRE_EL2
+ * register when restoring context.
+ */
+ write_scr_el3(read_scr_el3() | SCR_NS_BIT);
+
+ /* Restore EL2 and EL1 sysreg contexts */
+ cm_el2_sysregs_context_restore(NON_SECURE);
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+#else
+ cm_prepare_el3_exit(NON_SECURE);
+#endif /* CTX_INCLUDE_EL2_REGS */
+}
+
+/*******************************************************************************
* The next four functions are used by runtime services to save and restore
* EL1 context on the 'cpu_context' structure for the specified security
* state.
diff --git a/lib/extensions/mpam/mpam.c b/lib/extensions/mpam/mpam.c
index 65601dd..884d480 100644
--- a/lib/extensions/mpam/mpam.c
+++ b/lib/extensions/mpam/mpam.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -27,7 +27,6 @@
/*
* If EL2 is implemented but unused, disable trapping to EL2 when lower
* ELs access their own MPAM registers.
- * If EL2 is implemented and used, enable trapping to EL2.
*/
if (el2_unused) {
write_mpam2_el2(0ULL);
@@ -35,12 +34,5 @@
if ((read_mpamidr_el1() & MPAMIDR_HAS_HCR_BIT) != 0U) {
write_mpamhcr_el2(0ULL);
}
- } else {
- write_mpam2_el2(MPAM2_EL2_TRAPMPAM0EL1 |
- MPAM2_EL2_TRAPMPAM1EL1);
-
- if ((read_mpamidr_el1() & MPAMIDR_HAS_HCR_BIT) != 0U) {
- write_mpamhcr_el2(MPAMHCR_EL2_TRAP_MPAMIDR_EL1);
- }
}
}
diff --git a/lib/locks/bakery/bakery_lock_normal.c b/lib/locks/bakery/bakery_lock_normal.c
index 7d35dea..faea6c5 100644
--- a/lib/locks/bakery/bakery_lock_normal.c
+++ b/lib/locks/bakery/bakery_lock_normal.c
@@ -83,7 +83,7 @@
}
/* Helper function to check if the lock is acquired */
-static inline bool is_lock_acquired(const bakery_info_t *my_bakery_info,
+static inline __unused bool is_lock_acquired(const bakery_info_t *my_bakery_info,
bool is_cached)
{
/*
diff --git a/lib/psci/psci_on.c b/lib/psci/psci_on.c
index dd48e10..c70b377 100644
--- a/lib/psci/psci_on.c
+++ b/lib/psci/psci_on.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -229,5 +229,5 @@
* information that we had stashed away during the cpu_on
* call to set this cpu on its way.
*/
- cm_prepare_el3_exit(NON_SECURE);
+ cm_prepare_el3_exit_ns();
}
diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c
index da9f328..ffe3a91 100644
--- a/lib/psci/psci_suspend.c
+++ b/lib/psci/psci_suspend.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -331,5 +331,5 @@
* information that we had stashed away during the suspend
* call to set this cpu on its way.
*/
- cm_prepare_el3_exit(NON_SECURE);
+ cm_prepare_el3_exit_ns();
}
diff --git a/lib/xlat_tables_v2/ro_xlat_tables.mk b/lib/xlat_tables_v2/ro_xlat_tables.mk
index 7991e1a..fb8a426 100644
--- a/lib/xlat_tables_v2/ro_xlat_tables.mk
+++ b/lib/xlat_tables_v2/ro_xlat_tables.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2020, ARM Limited. All rights reserved.
+# Copyright (c) 2020-2022, ARM Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -34,4 +34,8 @@
attributes, which is not possible once the translation tables \
have been made read-only.")
endif
+ ifeq (${SPMC_AT_EL3},1)
+ $(error "EL3 SPMC requires functionality from the dynamic translation \
+ library and is incompatible with ALLOW_RO_XLAT_TABLES.")
+ endif
endif
diff --git a/make_helpers/arch_features.mk b/make_helpers/arch_features.mk
new file mode 100644
index 0000000..01e3e09
--- /dev/null
+++ b/make_helpers/arch_features.mk
@@ -0,0 +1,36 @@
+#
+# Copyright (c) 2022, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# This file lists all the checks related to the Architectural Feature
+# Enablement flags, based on the Architectural version.
+
+# Enable the features which are mandatory from ARCH version 8.1 and upwards.
+ifeq "8.1" "$(word 1, $(sort 8.1 $(ARM_ARCH_MAJOR).$(ARM_ARCH_MINOR)))"
+ENABLE_FEAT_PAN = 1
+ENABLE_FEAT_VHE = 1
+endif
+
+# Enable the features which are mandatory from ARCH version 8.4 and upwards.
+ifeq "8.4" "$(word 1, $(sort 8.4 $(ARM_ARCH_MAJOR).$(ARM_ARCH_MINOR)))"
+ENABLE_FEAT_DIT = 1
+ENABLE_FEAT_SEL2 = 1
+endif
+
+# Enable the features which are mandatory from ARCH version 8.5 and upwards.
+ifeq "8.5" "$(word 1, $(sort 8.5 $(ARM_ARCH_MAJOR).$(ARM_ARCH_MINOR)))"
+ENABLE_FEAT_SB = 1
+endif
+
+# Enable the features which are mandatory from ARCH version 8.6 and upwards.
+ifeq "8.6" "$(word 1, $(sort 8.6 $(ARM_ARCH_MAJOR).$(ARM_ARCH_MINOR)))"
+ENABLE_FEAT_FGT = 1
+ENABLE_FEAT_ECV = 1
+endif
+
+# Enable the features which are mandatory from ARCH version 8.7 and upwards.
+ifeq "8.7" "$(word 1, $(sort 8.7 $(ARM_ARCH_MAJOR).$(ARM_ARCH_MINOR)))"
+ENABLE_FEAT_HCX = 1
+endif
diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk
index 910ffdf..7b66569 100644
--- a/make_helpers/defaults.mk
+++ b/make_helpers/defaults.mk
@@ -133,12 +133,18 @@
# Use BRANCH_PROTECTION to enable PAUTH.
ENABLE_PAUTH := 0
-# Flag to enable access to the HCRX_EL2 register by setting SCR_EL3.HXEn.
-ENABLE_FEAT_HCX := 0
-
# Flag to enable access to the HAFGRTR_EL2 register
ENABLE_FEAT_AMUv1 := 0
+# Flag to enable AMUv1p1 extension.
+ENABLE_FEAT_AMUv1p1 := 0
+
+# Flag to enable CSV2_2 extension.
+ENABLE_FEAT_CSV2_2 := 0
+
+# Flag to enable access to the HCRX_EL2 register by setting SCR_EL3.HXEn.
+ENABLE_FEAT_HCX := 0
+
# Flag to enable access to the HDFGRTR_EL2 register
ENABLE_FEAT_FGT := 0
@@ -148,6 +154,21 @@
# Flag to enable use of the DIT feature.
ENABLE_FEAT_DIT := 0
+# Flag to enable access to Privileged Access Never bit of PSTATE.
+ENABLE_FEAT_PAN := 0
+
+# Flag to enable access to the Random Number Generator registers
+ENABLE_FEAT_RNG := 0
+
+# Flag to enable Speculation Barrier Instruction
+ENABLE_FEAT_SB := 0
+
+# Flag to enable Secure EL-2 feature.
+ENABLE_FEAT_SEL2 := 0
+
+# Flag to enable Virtualization Host Extensions
+ENABLE_FEAT_VHE := 0
+
# By default BL31 encryption disabled
ENCRYPT_BL31 := 0
@@ -166,6 +187,9 @@
# Fault injection support
FAULT_INJECTION_SUPPORT := 0
+# Flag to enable architectural features detection mechanism
+FEATURE_DETECTION := 0
+
# Byte alignment that each component in FIP is aligned to
FIP_ALIGN := 0
@@ -264,6 +288,9 @@
# Enable the Management Mode (MM)-based Secure Partition Manager implementation
SPM_MM := 0
+# Use the FF-A SPMC implementation in EL3.
+SPMC_AT_EL3 := 0
+
# Use SPM at S-EL2 as a default config for SPMD
SPMD_SPM_AT_SEL2 := 1
diff --git a/plat/arm/board/corstone1000/common/corstone1000_bl2_mem_params_desc.c b/plat/arm/board/corstone1000/common/corstone1000_bl2_mem_params_desc.c
index 3ee396c..fe521a9 100644
--- a/plat/arm/board/corstone1000/common/corstone1000_bl2_mem_params_desc.c
+++ b/plat/arm/board/corstone1000/common/corstone1000_bl2_mem_params_desc.c
@@ -57,8 +57,8 @@
{
.image_id = TOS_FW_CONFIG_ID,
.image_info.image_base = CORSTONE1000_TOS_FW_CONFIG_BASE,
- .image_info.image_max_size = CORSTONE1000_TOS_FW_CONFIG_LIMIT - \
- CORSTONE1000_TOS_FW_CONFIG_BASE,
+ .image_info.image_max_size = (CORSTONE1000_TOS_FW_CONFIG_LIMIT -
+ CORSTONE1000_TOS_FW_CONFIG_BASE),
SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY,
VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
diff --git a/plat/arm/board/corstone1000/common/corstone1000_plat.c b/plat/arm/board/corstone1000/common/corstone1000_plat.c
index a96baae..0235f8b 100644
--- a/plat/arm/board/corstone1000/common/corstone1000_plat.c
+++ b/plat/arm/board/corstone1000/common/corstone1000_plat.c
@@ -34,12 +34,13 @@
{
const struct plat_io_policy *policy;
/*
- * metadata for firmware update is written at 0x0000 offset of the flash.
- * PLAT_ARM_BOOT_BANK_FLAG contains the boot bank that TF-M is booted.
- * As per firmware update spec, at a given point of time, only one bank is active.
- * This means, TF-A should boot from the same bank as TF-M.
- */
+ * metadata for firmware update is written at 0x0000 offset of the flash.
+ * PLAT_ARM_BOOT_BANK_FLAG contains the boot bank that TF-M is booted.
+ * As per firmware update spec, at a given point of time, only one bank
+ * is active. This means, TF-A should boot from the same bank as TF-M.
+ */
volatile uint32_t *boot_bank_flag = (uint32_t *)(PLAT_ARM_BOOT_BANK_FLAG);
+
if (*boot_bank_flag > 1) {
VERBOSE("Boot_bank is set higher than possible values");
}
diff --git a/plat/arm/board/corstone1000/common/corstone1000_pm.c b/plat/arm/board/corstone1000/common/corstone1000_pm.c
index 98dea79..4b0a791 100644
--- a/plat/arm/board/corstone1000/common/corstone1000_pm.c
+++ b/plat/arm/board/corstone1000/common/corstone1000_pm.c
@@ -21,8 +21,8 @@
*(watchdog_val_reg) = SECURE_WATCHDOG_COUNTDOWN_VAL;
*watchdog_ctrl_reg = SECURE_WATCHDOG_MASK_ENABLE;
while (1) {
- wfi();
- }
+ wfi();
+ }
}
plat_psci_ops_t plat_arm_psci_pm_ops = {
diff --git a/plat/arm/board/corstone1000/common/corstone1000_trusted_boot.c b/plat/arm/board/corstone1000/common/corstone1000_trusted_boot.c
index cec7332..7e8fbb2 100644
--- a/plat/arm/board/corstone1000/common/corstone1000_trusted_boot.c
+++ b/plat/arm/board/corstone1000/common/corstone1000_trusted_boot.c
@@ -38,8 +38,8 @@
*/
int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr)
{
- *nv_ctr = CORSTONE1000_FW_NVCTR_VAL;
- return 0;
+ *nv_ctr = CORSTONE1000_FW_NVCTR_VAL;
+ return 0;
}
/*
@@ -49,5 +49,5 @@
*/
int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr)
{
- return 0;
+ return 0;
}
diff --git a/plat/arm/board/corstone1000/common/include/platform_def.h b/plat/arm/board/corstone1000/common/include/platform_def.h
index 2523d72..584d485 100644
--- a/plat/arm/board/corstone1000/common/include/platform_def.h
+++ b/plat/arm/board/corstone1000/common/include/platform_def.h
@@ -16,11 +16,11 @@
#include <plat/common/common_def.h>
#include <plat/arm/soc/common/soc_css_def.h>
-#define ARM_ROTPK_HEADER_LEN 19
-#define ARM_ROTPK_HASH_LEN 32
+#define ARM_ROTPK_HEADER_LEN 19
+#define ARM_ROTPK_HASH_LEN 32
/* Special value used to verify platform parameters from BL2 to BL31 */
-#define ARM_BL31_PLAT_PARAM_VAL ULL(0x0f1e2d3c4b5a6978)
+#define ARM_BL31_PLAT_PARAM_VAL ULL(0x0f1e2d3c4b5a6978)
/* PL011 UART related constants */
#ifdef V2M_IOFPGA_UART0_CLK_IN_HZ
@@ -31,368 +31,324 @@
#undef V2M_IOFPGA_UART1_CLK_IN_HZ
#endif
-#define V2M_IOFPGA_UART0_CLK_IN_HZ 50000000
-#define V2M_IOFPGA_UART1_CLK_IN_HZ 50000000
+#define V2M_IOFPGA_UART0_CLK_IN_HZ 50000000
+#define V2M_IOFPGA_UART1_CLK_IN_HZ 50000000
/* Core/Cluster/Thread counts for corstone1000 */
-#define CORSTONE1000_CLUSTER_COUNT U(1)
-#define CORSTONE1000_MAX_CPUS_PER_CLUSTER U(4)
-#define CORSTONE1000_MAX_PE_PER_CPU U(1)
-#define CORSTONE1000_PRIMARY_CPU U(0)
+#define CORSTONE1000_CLUSTER_COUNT U(1)
+#define CORSTONE1000_MAX_CPUS_PER_CLUSTER U(4)
+#define CORSTONE1000_MAX_PE_PER_CPU U(1)
+#define CORSTONE1000_PRIMARY_CPU U(0)
-#define PLAT_ARM_CLUSTER_COUNT CORSTONE1000_CLUSTER_COUNT
+#define PLAT_ARM_CLUSTER_COUNT CORSTONE1000_CLUSTER_COUNT
-#define PLATFORM_CORE_COUNT (PLAT_ARM_CLUSTER_COUNT * \
- CORSTONE1000_MAX_CPUS_PER_CLUSTER * \
- CORSTONE1000_MAX_PE_PER_CPU)
+#define PLATFORM_CORE_COUNT (PLAT_ARM_CLUSTER_COUNT * \
+ CORSTONE1000_MAX_CPUS_PER_CLUSTER * \
+ CORSTONE1000_MAX_PE_PER_CPU)
/* UART related constants */
-#define PLAT_ARM_BOOT_UART_BASE 0x1a510000
-#define PLAT_ARM_BOOT_UART_CLK_IN_HZ V2M_IOFPGA_UART0_CLK_IN_HZ
-#define PLAT_ARM_RUN_UART_BASE 0x1a520000
-#define PLAT_ARM_RUN_UART_CLK_IN_HZ V2M_IOFPGA_UART1_CLK_IN_HZ
-#define ARM_CONSOLE_BAUDRATE 115200
-#define PLAT_ARM_CRASH_UART_BASE PLAT_ARM_RUN_UART_BASE
-#define PLAT_ARM_CRASH_UART_CLK_IN_HZ PLAT_ARM_RUN_UART_CLK_IN_HZ
+#define PLAT_ARM_BOOT_UART_BASE 0x1a510000
+#define PLAT_ARM_BOOT_UART_CLK_IN_HZ V2M_IOFPGA_UART0_CLK_IN_HZ
+#define PLAT_ARM_RUN_UART_BASE 0x1a520000
+#define PLAT_ARM_RUN_UART_CLK_IN_HZ V2M_IOFPGA_UART1_CLK_IN_HZ
+#define ARM_CONSOLE_BAUDRATE 115200
+#define PLAT_ARM_CRASH_UART_BASE PLAT_ARM_RUN_UART_BASE
+#define PLAT_ARM_CRASH_UART_CLK_IN_HZ PLAT_ARM_RUN_UART_CLK_IN_HZ
/* Memory related constants */
/* SRAM (CVM) memory layout
*
* <ARM_TRUSTED_SRAM_BASE>
- *
- * partition size: sizeof(meminfo_t) = 16 bytes
- *
- * content: memory info area used by the next BL
+ * partition size: sizeof(meminfo_t) = 16 bytes
+ * content: memory info area used by the next BL
*
* <ARM_FW_CONFIG_BASE>
- *
- * partition size: 4080 bytes
+ * partition size: 4080 bytes
*
* <ARM_BL2_MEM_DESC_BASE>
- *
- * partition size: 4 KB
- *
- * content:
- *
- * Area where BL2 copies the images descriptors
+ * partition size: 4 KB
+ * content: Area where BL2 copies the images descriptors
*
* <ARM_BL_RAM_BASE> = <BL32_BASE>
- *
- * partition size: 688 KB
- *
- * content:
- *
- * BL32 (optee-os)
+ * partition size: 688 KB
+ * content: BL32 (optee-os)
*
* <CORSTONE1000_TOS_FW_CONFIG_BASE> = 0x20ae000
- *
- * partition size: 8 KB
- *
- * content:
- *
- * BL32 config (TOS_FW_CONFIG)
+ * partition size: 8 KB
+ * content: BL32 config (TOS_FW_CONFIG)
*
* <BL31_BASE>
- *
- * partition size: 140 KB
- *
- * content:
- *
- * BL31
+ * partition size: 140 KB
+ * content: BL31
*
* <BL2_SIGNATURE_BASE>
- *
- * partition size: 4 KB
- *
- * content:
- *
- * MCUBOOT data needed to verify TF-A BL2
+ * partition size: 4 KB
+ * content: MCUBOOT data needed to verify TF-A BL2
*
* <BL2_BASE>
- *
- * partition size: 176 KB
- *
- * content:
- *
- * BL2
+ * partition size: 176 KB
+ * content: BL2
*
* <ARM_NS_SHARED_RAM_BASE> = <ARM_TRUSTED_SRAM_BASE> + 1 MB
- *
- * partition size: 512 KB
- *
- * content:
- *
- * BL33 (u-boot)
+ * partition size: 512 KB
+ * content: BL33 (u-boot)
*/
/* DDR memory */
-#define ARM_DRAM1_BASE UL(0x80000000)
-#define ARM_DRAM1_SIZE (SZ_2G) /* 2GB*/
-#define ARM_DRAM1_END (ARM_DRAM1_BASE + \
- ARM_DRAM1_SIZE - 1)
+#define ARM_DRAM1_BASE UL(0x80000000)
+#define ARM_DRAM1_SIZE (SZ_2G) /* 2GB*/
+#define ARM_DRAM1_END (ARM_DRAM1_BASE + ARM_DRAM1_SIZE - 1)
/* DRAM1 and DRAM2 are the same for corstone1000 */
-#define ARM_DRAM2_BASE ARM_DRAM1_BASE
-#define ARM_DRAM2_SIZE ARM_DRAM1_SIZE
-#define ARM_DRAM2_END ARM_DRAM1_END
+#define ARM_DRAM2_BASE ARM_DRAM1_BASE
+#define ARM_DRAM2_SIZE ARM_DRAM1_SIZE
+#define ARM_DRAM2_END ARM_DRAM1_END
-#define ARM_NS_DRAM1_BASE ARM_DRAM1_BASE
-#define ARM_NS_DRAM1_SIZE ARM_DRAM1_SIZE
-#define ARM_NS_DRAM1_END (ARM_NS_DRAM1_BASE +\
- ARM_NS_DRAM1_SIZE - 1)
+#define ARM_NS_DRAM1_BASE ARM_DRAM1_BASE
+#define ARM_NS_DRAM1_SIZE ARM_DRAM1_SIZE
+#define ARM_NS_DRAM1_END (ARM_NS_DRAM1_BASE + ARM_NS_DRAM1_SIZE - 1)
/* The first 8 KB of Trusted SRAM are used as shared memory */
-#define ARM_TRUSTED_SRAM_BASE UL(0x02000000)
-#define ARM_SHARED_RAM_SIZE (SZ_8K) /* 8 KB */
-#define ARM_SHARED_RAM_BASE ARM_TRUSTED_SRAM_BASE
+#define ARM_TRUSTED_SRAM_BASE UL(0x02000000)
+#define ARM_SHARED_RAM_SIZE (SZ_8K) /* 8 KB */
+#define ARM_SHARED_RAM_BASE ARM_TRUSTED_SRAM_BASE
/* The remaining Trusted SRAM is used to load the BL images */
-#define TOTAL_SRAM_SIZE (SZ_4M) /* 4 MB */
+#define TOTAL_SRAM_SIZE (SZ_4M) /* 4 MB */
-/* Last 512KB of CVM is allocated for shared RAM
- * as an example openAMP */
-#define ARM_NS_SHARED_RAM_SIZE (512 * SZ_1K)
+/* Last 512KB of CVM is allocated for shared RAM as an example openAMP */
+#define ARM_NS_SHARED_RAM_SIZE (512 * SZ_1K)
-#define PLAT_ARM_TRUSTED_SRAM_SIZE (TOTAL_SRAM_SIZE - \
- ARM_NS_SHARED_RAM_SIZE - \
- ARM_SHARED_RAM_SIZE)
+#define PLAT_ARM_TRUSTED_SRAM_SIZE (TOTAL_SRAM_SIZE - \
+ ARM_NS_SHARED_RAM_SIZE - \
+ ARM_SHARED_RAM_SIZE)
-#define PLAT_ARM_MAX_BL2_SIZE (180 * SZ_1K) /* 180 KB */
+#define PLAT_ARM_MAX_BL2_SIZE (180 * SZ_1K) /* 180 KB */
-#define PLAT_ARM_MAX_BL31_SIZE (140 * SZ_1K) /* 140 KB */
+#define PLAT_ARM_MAX_BL31_SIZE (140 * SZ_1K) /* 140 KB */
-#define ARM_BL_RAM_BASE (ARM_SHARED_RAM_BASE + \
- ARM_SHARED_RAM_SIZE)
-#define ARM_BL_RAM_SIZE (PLAT_ARM_TRUSTED_SRAM_SIZE - \
- ARM_SHARED_RAM_SIZE)
+#define ARM_BL_RAM_BASE (ARM_SHARED_RAM_BASE + ARM_SHARED_RAM_SIZE)
+#define ARM_BL_RAM_SIZE (PLAT_ARM_TRUSTED_SRAM_SIZE - \
+ ARM_SHARED_RAM_SIZE)
-#define BL2_SIGNATURE_SIZE (SZ_4K) /* 4 KB */
+#define BL2_SIGNATURE_SIZE (SZ_4K) /* 4 KB */
-#define BL2_SIGNATURE_BASE (BL2_LIMIT - \
- PLAT_ARM_MAX_BL2_SIZE)
-#define BL2_BASE (BL2_LIMIT - \
- PLAT_ARM_MAX_BL2_SIZE + \
- BL2_SIGNATURE_SIZE)
-#define BL2_LIMIT (ARM_BL_RAM_BASE + \
- ARM_BL_RAM_SIZE)
+#define BL2_SIGNATURE_BASE (BL2_LIMIT - PLAT_ARM_MAX_BL2_SIZE)
+#define BL2_BASE (BL2_LIMIT - \
+ PLAT_ARM_MAX_BL2_SIZE + \
+ BL2_SIGNATURE_SIZE)
+#define BL2_LIMIT (ARM_BL_RAM_BASE + ARM_BL_RAM_SIZE)
-#define BL31_BASE (BL2_SIGNATURE_BASE - \
- PLAT_ARM_MAX_BL31_SIZE)
-#define BL31_LIMIT BL2_SIGNATURE_BASE
+#define BL31_BASE (BL2_SIGNATURE_BASE - PLAT_ARM_MAX_BL31_SIZE)
+#define BL31_LIMIT BL2_SIGNATURE_BASE
-#define CORSTONE1000_TOS_FW_CONFIG_BASE (BL31_BASE - \
- CORSTONE1000_TOS_FW_CONFIG_SIZE)
-#define CORSTONE1000_TOS_FW_CONFIG_SIZE (SZ_8K) /* 8 KB */
-#define CORSTONE1000_TOS_FW_CONFIG_LIMIT BL31_BASE
+#define CORSTONE1000_TOS_FW_CONFIG_BASE (BL31_BASE - \
+ CORSTONE1000_TOS_FW_CONFIG_SIZE)
+#define CORSTONE1000_TOS_FW_CONFIG_SIZE (SZ_8K) /* 8 KB */
+#define CORSTONE1000_TOS_FW_CONFIG_LIMIT BL31_BASE
-#define BL32_BASE ARM_BL_RAM_BASE
-#define PLAT_ARM_MAX_BL32_SIZE (CORSTONE1000_TOS_FW_CONFIG_BASE - \
- BL32_BASE)
+#define BL32_BASE ARM_BL_RAM_BASE
+#define PLAT_ARM_MAX_BL32_SIZE (CORSTONE1000_TOS_FW_CONFIG_BASE - BL32_BASE)
-#define BL32_LIMIT (BL32_BASE + \
- PLAT_ARM_MAX_BL32_SIZE)
+#define BL32_LIMIT (BL32_BASE + PLAT_ARM_MAX_BL32_SIZE)
/* SPD_spmd settings */
-#define PLAT_ARM_SPMC_BASE BL32_BASE
-#define PLAT_ARM_SPMC_SIZE PLAT_ARM_MAX_BL32_SIZE
+#define PLAT_ARM_SPMC_BASE BL32_BASE
+#define PLAT_ARM_SPMC_SIZE PLAT_ARM_MAX_BL32_SIZE
/* NS memory */
/* The last 512KB of the SRAM is allocated as shared memory */
-#define ARM_NS_SHARED_RAM_BASE (ARM_TRUSTED_SRAM_BASE + TOTAL_SRAM_SIZE - \
- (PLAT_ARM_MAX_BL31_SIZE + \
- PLAT_ARM_MAX_BL32_SIZE))
+#define ARM_NS_SHARED_RAM_BASE (ARM_TRUSTED_SRAM_BASE + TOTAL_SRAM_SIZE - \
+ (PLAT_ARM_MAX_BL31_SIZE + \
+ PLAT_ARM_MAX_BL32_SIZE))
-#define BL33_BASE ARM_DRAM1_BASE
-#define PLAT_ARM_MAX_BL33_SIZE (12 * SZ_1M) /* 12 MB*/
-#define BL33_LIMIT (ARM_DRAM1_BASE + PLAT_ARM_MAX_BL33_SIZE)
+#define BL33_BASE ARM_DRAM1_BASE
+#define PLAT_ARM_MAX_BL33_SIZE (12 * SZ_1M) /* 12 MB*/
+#define BL33_LIMIT (ARM_DRAM1_BASE + PLAT_ARM_MAX_BL33_SIZE)
/* end of the definition of SRAM memory layout */
/* NOR Flash */
-#define PLAT_ARM_BOOT_BANK_FLAG UL(0x08002000)
-#define PLAT_ARM_FIP_BASE_BANK0 UL(0x081EF000)
-#define PLAT_ARM_FIP_BASE_BANK1 UL(0x0916F000)
-#define PLAT_ARM_FIP_MAX_SIZE UL(0x1ff000) /* 1.996 MB */
+#define PLAT_ARM_BOOT_BANK_FLAG UL(0x08002000)
+#define PLAT_ARM_FIP_BASE_BANK0 UL(0x081EF000)
+#define PLAT_ARM_FIP_BASE_BANK1 UL(0x0916F000)
+#define PLAT_ARM_FIP_MAX_SIZE UL(0x1ff000) /* 1.996 MB */
-#define PLAT_ARM_NVM_BASE V2M_FLASH0_BASE
-#define PLAT_ARM_NVM_SIZE (SZ_32M) /* 32 MB */
+#define PLAT_ARM_NVM_BASE V2M_FLASH0_BASE
+#define PLAT_ARM_NVM_SIZE (SZ_32M) /* 32 MB */
-#define PLAT_ARM_FLASH_IMAGE_BASE PLAT_ARM_FIP_BASE_BANK0
-#define PLAT_ARM_FLASH_IMAGE_MAX_SIZE PLAT_ARM_FIP_MAX_SIZE
+#define PLAT_ARM_FLASH_IMAGE_BASE PLAT_ARM_FIP_BASE_BANK0
+#define PLAT_ARM_FLASH_IMAGE_MAX_SIZE PLAT_ARM_FIP_MAX_SIZE
/*
* Some data must be aligned on the biggest cache line size in the platform.
* This is known only to the platform as it might have a combination of
* integrated and external caches.
*/
-#define CACHE_WRITEBACK_GRANULE (U(1) << ARM_CACHE_WRITEBACK_SHIFT)
-#define ARM_CACHE_WRITEBACK_SHIFT 6
+#define CACHE_WRITEBACK_GRANULE (U(1) << ARM_CACHE_WRITEBACK_SHIFT)
+#define ARM_CACHE_WRITEBACK_SHIFT 6
/*
* Define FW_CONFIG area base and limit. Leave enough space for BL2 meminfo.
* FW_CONFIG is intended to host the device tree. Currently, This area is not
* used because corstone1000 platform doesn't use a device tree at TF-A level.
*/
-#define ARM_FW_CONFIG_BASE (ARM_SHARED_RAM_BASE \
- + sizeof(meminfo_t))
-#define ARM_FW_CONFIG_LIMIT (ARM_SHARED_RAM_BASE \
- + (ARM_SHARED_RAM_SIZE >> 1))
+#define ARM_FW_CONFIG_BASE (ARM_SHARED_RAM_BASE + sizeof(meminfo_t))
+#define ARM_FW_CONFIG_LIMIT (ARM_SHARED_RAM_BASE + \
+ (ARM_SHARED_RAM_SIZE >> 1))
/*
* Boot parameters passed from BL2 to BL31/BL32 are stored here
*/
-#define ARM_BL2_MEM_DESC_BASE ARM_FW_CONFIG_LIMIT
-#define ARM_BL2_MEM_DESC_LIMIT ARM_BL_RAM_BASE
+#define ARM_BL2_MEM_DESC_BASE ARM_FW_CONFIG_LIMIT
+#define ARM_BL2_MEM_DESC_LIMIT ARM_BL_RAM_BASE
/*
* The max number of regions like RO(code), coherent and data required by
* different BL stages which need to be mapped in the MMU.
*/
-#define ARM_BL_REGIONS 3
-#define PLAT_ARM_MMAP_ENTRIES 8
-#define MAX_XLAT_TABLES 5
-#define MAX_MMAP_REGIONS (PLAT_ARM_MMAP_ENTRIES + \
- ARM_BL_REGIONS)
-#define MAX_IO_DEVICES 2
-#define MAX_IO_HANDLES 3
-#define MAX_IO_BLOCK_DEVICES 1
+#define ARM_BL_REGIONS 3
+#define PLAT_ARM_MMAP_ENTRIES 8
+#define MAX_XLAT_TABLES 5
+#define MAX_MMAP_REGIONS (PLAT_ARM_MMAP_ENTRIES + ARM_BL_REGIONS)
+#define MAX_IO_DEVICES 2
+#define MAX_IO_HANDLES 3
+#define MAX_IO_BLOCK_DEVICES 1
/* GIC related constants */
-#define PLAT_ARM_GICD_BASE 0x1C010000
-#define PLAT_ARM_GICC_BASE 0x1C02F000
+#define PLAT_ARM_GICD_BASE 0x1C010000
+#define PLAT_ARM_GICC_BASE 0x1C02F000
/* MHUv2 Secure Channel receiver and sender */
-#define PLAT_SDK700_MHU0_SEND 0x1B800000
-#define PLAT_SDK700_MHU0_RECV 0x1B810000
+#define PLAT_SDK700_MHU0_SEND 0x1B800000
+#define PLAT_SDK700_MHU0_RECV 0x1B810000
/* Timer/watchdog related constants */
-#define ARM_SYS_CNTCTL_BASE UL(0x1a200000)
-#define ARM_SYS_CNTREAD_BASE UL(0x1a210000)
-#define ARM_SYS_TIMCTL_BASE UL(0x1a220000)
+#define ARM_SYS_CNTCTL_BASE UL(0x1a200000)
+#define ARM_SYS_CNTREAD_BASE UL(0x1a210000)
+#define ARM_SYS_TIMCTL_BASE UL(0x1a220000)
-#define SECURE_WATCHDOG_ADDR_CTRL_REG 0x1A320000
-#define SECURE_WATCHDOG_ADDR_VAL_REG 0x1A320008
-#define SECURE_WATCHDOG_MASK_ENABLE 0x01
-#define SECURE_WATCHDOG_COUNTDOWN_VAL 0x1000
+#define SECURE_WATCHDOG_ADDR_CTRL_REG 0x1A320000
+#define SECURE_WATCHDOG_ADDR_VAL_REG 0x1A320008
+#define SECURE_WATCHDOG_MASK_ENABLE 0x01
+#define SECURE_WATCHDOG_COUNTDOWN_VAL 0x1000
-#define SYS_COUNTER_FREQ_IN_TICKS UL(50000000) /* 50MHz */
+#define SYS_COUNTER_FREQ_IN_TICKS UL(50000000) /* 50MHz */
-#define CORSTONE1000_IRQ_TZ_WDOG 32
-#define CORSTONE1000_IRQ_SEC_SYS_TIMER 34
+#define CORSTONE1000_IRQ_TZ_WDOG 32
+#define CORSTONE1000_IRQ_SEC_SYS_TIMER 34
-#define PLAT_MAX_PWR_LVL 2
+#define PLAT_MAX_PWR_LVL 2
/*
* Macros mapping the MPIDR Affinity levels to ARM Platform Power levels. The
* power levels have a 1:1 mapping with the MPIDR affinity levels.
*/
-#define ARM_PWR_LVL0 MPIDR_AFFLVL0
-#define ARM_PWR_LVL1 MPIDR_AFFLVL1
-#define ARM_PWR_LVL2 MPIDR_AFFLVL2
+#define ARM_PWR_LVL0 MPIDR_AFFLVL0
+#define ARM_PWR_LVL1 MPIDR_AFFLVL1
+#define ARM_PWR_LVL2 MPIDR_AFFLVL2
/*
* Macros for local power states in ARM platforms encoded by State-ID field
* within the power-state parameter.
*/
/* Local power state for power domains in Run state. */
-#define ARM_LOCAL_STATE_RUN U(0)
+#define ARM_LOCAL_STATE_RUN U(0)
/* Local power state for retention. Valid only for CPU power domains */
-#define ARM_LOCAL_STATE_RET U(1)
+#define ARM_LOCAL_STATE_RET U(1)
/* Local power state for OFF/power-down. Valid for CPU and cluster
* power domains
*/
-#define ARM_LOCAL_STATE_OFF U(2)
+#define ARM_LOCAL_STATE_OFF U(2)
-#define PLAT_ARM_TRUSTED_MAILBOX_BASE ARM_TRUSTED_SRAM_BASE
-#define PLAT_ARM_NSTIMER_FRAME_ID U(1)
+#define PLAT_ARM_TRUSTED_MAILBOX_BASE ARM_TRUSTED_SRAM_BASE
+#define PLAT_ARM_NSTIMER_FRAME_ID U(1)
-#define PLAT_ARM_NS_IMAGE_BASE (ARM_NS_SHARED_RAM_BASE)
+#define PLAT_ARM_NS_IMAGE_BASE (ARM_NS_SHARED_RAM_BASE)
-#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 32)
-#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 32)
+#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 32)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 32)
/*
* This macro defines the deepest retention state possible. A higher state
* ID will represent an invalid or a power down state.
*/
-#define PLAT_MAX_RET_STATE 1
+#define PLAT_MAX_RET_STATE 1
/*
* This macro defines the deepest power down states possible. Any state ID
* higher than this is invalid.
*/
-#define PLAT_MAX_OFF_STATE 2
+#define PLAT_MAX_OFF_STATE 2
-#define PLATFORM_STACK_SIZE UL(0x440)
+#define PLATFORM_STACK_SIZE UL(0x440)
-#define CORSTONE1000_EXTERNAL_FLASH MAP_REGION_FLAT( \
- PLAT_ARM_NVM_BASE, \
- PLAT_ARM_NVM_SIZE, \
- MT_DEVICE | MT_RO | MT_SECURE)
+#define CORSTONE1000_EXTERNAL_FLASH MAP_REGION_FLAT( \
+ PLAT_ARM_NVM_BASE, \
+ PLAT_ARM_NVM_SIZE, \
+ MT_DEVICE | MT_RO | MT_SECURE)
-#define ARM_MAP_SHARED_RAM MAP_REGION_FLAT( \
- ARM_SHARED_RAM_BASE, \
- ARM_SHARED_RAM_SIZE, \
- MT_MEMORY | MT_RW | MT_SECURE)
+#define ARM_MAP_SHARED_RAM MAP_REGION_FLAT( \
+ ARM_SHARED_RAM_BASE, \
+ ARM_SHARED_RAM_SIZE, \
+ MT_MEMORY | MT_RW | MT_SECURE)
-#define ARM_MAP_NS_SHARED_RAM MAP_REGION_FLAT( \
- ARM_NS_SHARED_RAM_BASE, \
- ARM_NS_SHARED_RAM_SIZE, \
- MT_MEMORY | MT_RW | MT_NS)
+#define ARM_MAP_NS_SHARED_RAM MAP_REGION_FLAT( \
+ ARM_NS_SHARED_RAM_BASE, \
+ ARM_NS_SHARED_RAM_SIZE, \
+ MT_MEMORY | MT_RW | MT_NS)
-#define ARM_MAP_NS_DRAM1 MAP_REGION_FLAT( \
- ARM_NS_DRAM1_BASE, \
- ARM_NS_DRAM1_SIZE, \
- MT_MEMORY | MT_RW | MT_NS)
+#define ARM_MAP_NS_DRAM1 MAP_REGION_FLAT( \
+ ARM_NS_DRAM1_BASE, \
+ ARM_NS_DRAM1_SIZE, \
+ MT_MEMORY | MT_RW | MT_NS)
-#define ARM_MAP_BL_RO MAP_REGION_FLAT( \
- BL_CODE_BASE, \
- BL_CODE_END \
- - BL_CODE_BASE, \
- MT_CODE | MT_SECURE), \
- MAP_REGION_FLAT( \
- BL_RO_DATA_BASE, \
- BL_RO_DATA_END \
- - BL_RO_DATA_BASE, \
- MT_RO_DATA | MT_SECURE)
+#define ARM_MAP_BL_RO MAP_REGION_FLAT( \
+ BL_CODE_BASE, \
+ (BL_CODE_END - BL_CODE_BASE), \
+ MT_CODE | MT_SECURE), \
+ MAP_REGION_FLAT( \
+ BL_RO_DATA_BASE, \
+ (BL_RO_DATA_END - BL_RO_DATA_BASE), \
+ MT_RO_DATA | MT_SECURE)
#if USE_COHERENT_MEM
-#define ARM_MAP_BL_COHERENT_RAM MAP_REGION_FLAT( \
- BL_COHERENT_RAM_BASE, \
- BL_COHERENT_RAM_END \
- - BL_COHERENT_RAM_BASE, \
- MT_DEVICE | MT_RW | MT_SECURE)
+#define ARM_MAP_BL_COHERENT_RAM MAP_REGION_FLAT( \
+ BL_COHERENT_RAM_BASE, \
+ (BL_COHERENT_RAM_END \
+ - BL_COHERENT_RAM_BASE), \
+ MT_DEVICE | MT_RW | MT_SECURE)
#endif
/*
* Map the region for the optional device tree configuration with read and
* write permissions
*/
-#define ARM_MAP_BL_CONFIG_REGION MAP_REGION_FLAT( \
- ARM_FW_CONFIG_BASE, \
- (ARM_FW_CONFIG_LIMIT- \
- ARM_FW_CONFIG_BASE), \
- MT_MEMORY | MT_RW | MT_SECURE)
+#define ARM_MAP_BL_CONFIG_REGION MAP_REGION_FLAT( \
+ ARM_FW_CONFIG_BASE, \
+ (ARM_FW_CONFIG_LIMIT \
+ - ARM_FW_CONFIG_BASE), \
+ MT_MEMORY | MT_RW | MT_SECURE)
-#define CORSTONE1000_DEVICE_BASE (0x1A000000)
-#define CORSTONE1000_DEVICE_SIZE (0x26000000)
-#define CORSTONE1000_MAP_DEVICE MAP_REGION_FLAT( \
- CORSTONE1000_DEVICE_BASE, \
- CORSTONE1000_DEVICE_SIZE, \
- MT_DEVICE | MT_RW | MT_SECURE)
+#define CORSTONE1000_DEVICE_BASE (0x1A000000)
+#define CORSTONE1000_DEVICE_SIZE (0x26000000)
+#define CORSTONE1000_MAP_DEVICE MAP_REGION_FLAT( \
+ CORSTONE1000_DEVICE_BASE, \
+ CORSTONE1000_DEVICE_SIZE, \
+ MT_DEVICE | MT_RW | MT_SECURE)
-#define ARM_IRQ_SEC_PHY_TIMER 29
+#define ARM_IRQ_SEC_PHY_TIMER 29
-#define ARM_IRQ_SEC_SGI_0 8
-#define ARM_IRQ_SEC_SGI_1 9
-#define ARM_IRQ_SEC_SGI_2 10
-#define ARM_IRQ_SEC_SGI_3 11
-#define ARM_IRQ_SEC_SGI_4 12
-#define ARM_IRQ_SEC_SGI_5 13
-#define ARM_IRQ_SEC_SGI_6 14
-#define ARM_IRQ_SEC_SGI_7 15
+#define ARM_IRQ_SEC_SGI_0 8
+#define ARM_IRQ_SEC_SGI_1 9
+#define ARM_IRQ_SEC_SGI_2 10
+#define ARM_IRQ_SEC_SGI_3 11
+#define ARM_IRQ_SEC_SGI_4 12
+#define ARM_IRQ_SEC_SGI_5 13
+#define ARM_IRQ_SEC_SGI_6 14
+#define ARM_IRQ_SEC_SGI_7 15
/*
* Define a list of Group 1 Secure and Group 0 interrupt properties as per GICv3
@@ -424,12 +380,14 @@
* terminology. On a GICv2 system or mode, the lists will be merged and treated
* as Group 0 interrupts.
*/
-#define PLAT_ARM_G1S_IRQ_PROPS(grp) \
- ARM_G1S_IRQ_PROPS(grp), \
- INTR_PROP_DESC(CORSTONE1000_IRQ_TZ_WDOG, GIC_HIGHEST_SEC_PRIORITY, \
- (grp), GIC_INTR_CFG_LEVEL), \
- INTR_PROP_DESC(CORSTONE1000_IRQ_SEC_SYS_TIMER, \
- GIC_HIGHEST_SEC_PRIORITY, (grp), GIC_INTR_CFG_LEVEL)
+#define PLAT_ARM_G1S_IRQ_PROPS(grp) \
+ ARM_G1S_IRQ_PROPS(grp), \
+ INTR_PROP_DESC(CORSTONE1000_IRQ_TZ_WDOG, \
+ GIC_HIGHEST_SEC_PRIORITY, \
+ (grp), GIC_INTR_CFG_LEVEL), \
+ INTR_PROP_DESC(CORSTONE1000_IRQ_SEC_SYS_TIMER, \
+ GIC_HIGHEST_SEC_PRIORITY, \
+ (grp), GIC_INTR_CFG_LEVEL)
#define PLAT_ARM_G0_IRQ_PROPS(grp) ARM_G0_IRQ_PROPS(grp)
diff --git a/plat/arm/board/corstone700/common/corstone700_plat.c b/plat/arm/board/corstone700/common/corstone700_plat.c
index 629f076..dd7531d 100644
--- a/plat/arm/board/corstone700/common/corstone700_plat.c
+++ b/plat/arm/board/corstone700/common/corstone700_plat.c
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2019-2020, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <common/bl_common.h>
-#include <mhu.h>
+#include <corstone700_mhu.h>
#include <plat/arm/common/plat_arm.h>
#include <plat/common/platform.h>
#include <platform_def.h>
diff --git a/plat/arm/board/corstone700/common/drivers/mhu/mhu.c b/plat/arm/board/corstone700/common/drivers/mhu/corstone700_mhu.c
similarity index 96%
rename from plat/arm/board/corstone700/common/drivers/mhu/mhu.c
rename to plat/arm/board/corstone700/common/drivers/mhu/corstone700_mhu.c
index 2231d11..832cfb7 100644
--- a/plat/arm/board/corstone700/common/drivers/mhu/mhu.c
+++ b/plat/arm/board/corstone700/common/drivers/mhu/corstone700_mhu.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020, ARM Limited. All rights reserved.
+ * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -12,7 +12,7 @@
#include <lib/bakery_lock.h>
#include <lib/mmio.h>
-#include "mhu.h"
+#include "corstone700_mhu.h"
#include <plat_arm.h>
#include <platform_def.h>
diff --git a/plat/arm/board/corstone700/common/drivers/mhu/mhu.h b/plat/arm/board/corstone700/common/drivers/mhu/corstone700_mhu.h
similarity index 86%
rename from plat/arm/board/corstone700/common/drivers/mhu/mhu.h
rename to plat/arm/board/corstone700/common/drivers/mhu/corstone700_mhu.h
index 3808746..7f14ca5 100644
--- a/plat/arm/board/corstone700/common/drivers/mhu/mhu.h
+++ b/plat/arm/board/corstone700/common/drivers/mhu/corstone700_mhu.h
@@ -1,11 +1,11 @@
/*
- * Copyright (c) 2019-2020, ARM Limited. All rights reserved.
+ * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef MHU_H
-#define MHU_H
+#ifndef CORSTONE700_MHU_H
+#define CORSTONE700_MHU_H
#define MHU_POLL_INTR_STAT_TIMEOUT 50000 /*timeout value in us*/
@@ -34,4 +34,4 @@
void mhu_secure_message_end(uintptr_t address, unsigned int slot_id);
void mhu_secure_init(void);
-#endif /* MHU_H */
+#endif /* CORSTONE700_MHU_H */
diff --git a/plat/arm/board/corstone700/platform.mk b/plat/arm/board/corstone700/platform.mk
index 9a8d38c..75833f6 100644
--- a/plat/arm/board/corstone700/platform.mk
+++ b/plat/arm/board/corstone700/platform.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2019-2020, Arm Limited and Contributors. All rights reserved.
+# Copyright (c) 2019-2022, Arm Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -17,7 +17,7 @@
lib/xlat_tables/aarch32/xlat_tables.c \
lib/xlat_tables/xlat_tables_common.c \
${CORSTONE700_CPU_LIBS} \
- plat/arm/board/corstone700/common/drivers/mhu/mhu.c
+ plat/arm/board/corstone700/common/drivers/mhu/corstone700_mhu.c
PLAT_INCLUDES := -Iplat/arm/board/corstone700/common/include \
-Iinclude/plat/arm/common \
diff --git a/plat/arm/board/juno/include/platform_def.h b/plat/arm/board/juno/include/platform_def.h
index d61ba5d..3265b0b 100644
--- a/plat/arm/board/juno/include/platform_def.h
+++ b/plat/arm/board/juno/include/platform_def.h
@@ -9,7 +9,7 @@
#include <drivers/arm/tzc400.h>
#if TRUSTED_BOARD_BOOT
-#include <drivers/auth/mbedtls/mbedtls_config.h>
+#include MBEDTLS_CONFIG_FILE
#endif
#include <plat/arm/board/common/board_css_def.h>
#include <plat/arm/board/common/v2m_def.h>
diff --git a/plat/arm/common/aarch64/execution_state_switch.c b/plat/arm/common/aarch64/execution_state_switch.c
index bed929a..2353e6a 100644
--- a/plat/arm/common/aarch64/execution_state_switch.c
+++ b/plat/arm/common/aarch64/execution_state_switch.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -162,7 +162,7 @@
* calling EL.
*/
cm_init_my_context(&ep);
- cm_prepare_el3_exit(NON_SECURE);
+ cm_prepare_el3_exit_ns();
/*
* State switch success. The caller of SMC wouldn't see the SMC
diff --git a/plat/arm/common/arm_dyn_cfg.c b/plat/arm/common/arm_dyn_cfg.c
index 7abd1cd..83e3f9a 100644
--- a/plat/arm/common/arm_dyn_cfg.c
+++ b/plat/arm/common/arm_dyn_cfg.c
@@ -14,7 +14,7 @@
#include <common/desc_image_load.h>
#include <common/tbbr/tbbr_img_def.h>
#if CRYPTO_SUPPORT
-#include <drivers/auth/mbedtls/mbedtls_config.h>
+#include MBEDTLS_CONFIG_FILE
#endif /* CRYPTO_SUPPORT */
#include <lib/fconf/fconf.h>
#include <lib/fconf/fconf_dyn_cfg_getter.h>
diff --git a/plat/st/common/bl2_io_storage.c b/plat/st/common/bl2_io_storage.c
index e129dfd..5cc3390 100644
--- a/plat/st/common/bl2_io_storage.c
+++ b/plat/st/common/bl2_io_storage.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -485,22 +485,46 @@
#if (STM32MP_SDMMC || STM32MP_EMMC) && PSA_FWU_SUPPORT
/*
- * Eventually, this function will return the
- * boot index to be passed on to the Update
- * Agent after performing certain checks like
- * a watchdog timeout, or Auth failure while
- * trying to load from a certain bank.
- * For now, since we do not have that logic
- * implemented, just pass the active_index
- * read from the metadata.
+ * In each boot in non-trial mode, we set the BKP register to
+ * FWU_MAX_TRIAL_REBOOT, and return the active_index from metadata.
+ *
+ * As long as the update agent didn't update the "accepted" field in metadata
+ * (i.e. we are in trial mode), we select the new active_index.
+ * To avoid infinite boot loop at trial boot we decrement a BKP register.
+ * If this counter is 0:
+ * - an unexpected TAMPER event raised (that resets the BKP registers to 0)
+ * - a power-off occurs before the update agent was able to update the
+ * "accepted' field
+ * - we already boot FWU_MAX_TRIAL_REBOOT times in trial mode.
+ * we select the previous_active_index.
*/
+#define INVALID_BOOT_IDX 0xFFFFFFFF
+
uint32_t plat_fwu_get_boot_idx(void)
{
- const struct fwu_metadata *metadata;
+ /*
+ * Select boot index and update boot counter only once per boot
+ * even if this function is called several times.
+ */
+ static uint32_t boot_idx = INVALID_BOOT_IDX;
+ const struct fwu_metadata *data;
- metadata = fwu_get_metadata();
+ data = fwu_get_metadata();
+
+ if (boot_idx == INVALID_BOOT_IDX) {
+ boot_idx = data->active_index;
+ if (fwu_is_trial_run_state()) {
+ if (stm32_get_and_dec_fwu_trial_boot_cnt() == 0U) {
+ WARN("Trial FWU fails %u times\n",
+ FWU_MAX_TRIAL_REBOOT);
+ boot_idx = data->previous_active_index;
+ }
+ } else {
+ stm32_set_max_fwu_trial_boot_cnt();
+ }
+ }
- return metadata->active_index;
+ return boot_idx;
}
static void *stm32_get_image_spec(const uuid_t *img_type_uuid)
diff --git a/plat/st/common/include/stm32mp_common.h b/plat/st/common/include/stm32mp_common.h
index d8d1c13..0010cd8 100644
--- a/plat/st/common/include/stm32mp_common.h
+++ b/plat/st/common/include/stm32mp_common.h
@@ -129,6 +129,8 @@
#if !STM32MP_USE_STM32IMAGE && PSA_FWU_SUPPORT
void stm32mp1_fwu_set_boot_idx(void);
+uint32_t stm32_get_and_dec_fwu_trial_boot_cnt(void);
+void stm32_set_max_fwu_trial_boot_cnt(void);
#endif /* !STM32MP_USE_STM32IMAGE && PSA_FWU_SUPPORT */
#endif /* STM32MP_COMMON_H */
diff --git a/plat/st/common/stm32mp_dt.c b/plat/st/common/stm32mp_dt.c
index ea71571..c9efeb5 100644
--- a/plat/st/common/stm32mp_dt.c
+++ b/plat/st/common/stm32mp_dt.c
@@ -11,8 +11,6 @@
#include <common/fdt_wrappers.h>
#include <drivers/st/regulator.h>
#include <drivers/st/stm32_gpio.h>
-#include <drivers/st/stm32mp1_ddr.h>
-#include <drivers/st/stm32mp1_ram.h>
#include <libfdt.h>
#include <platform_def.h>
@@ -328,48 +326,32 @@
int dt_find_otp_name(const char *name, uint32_t *otp, uint32_t *otp_len)
{
int node;
- int index, len;
+ int len;
const fdt32_t *cuint;
if ((name == NULL) || (otp == NULL)) {
return -FDT_ERR_BADVALUE;
}
- node = fdt_node_offset_by_compatible(fdt, -1, DT_NVMEM_LAYOUT_COMPAT);
+ node = fdt_node_offset_by_compatible(fdt, -1, DT_BSEC_COMPAT);
if (node < 0) {
return node;
}
- index = fdt_stringlist_search(fdt, node, "nvmem-cell-names", name);
- if (index < 0) {
- return index;
- }
-
- cuint = fdt_getprop(fdt, node, "nvmem-cells", &len);
- if (cuint == NULL) {
- return -FDT_ERR_NOTFOUND;
- }
-
- if ((index * (int)sizeof(uint32_t)) > len) {
- return -FDT_ERR_BADVALUE;
- }
-
- cuint += index;
-
- node = fdt_node_offset_by_phandle(fdt, fdt32_to_cpu(*cuint));
+ node = fdt_subnode_offset(fdt, node, name);
if (node < 0) {
- ERROR("Malformed nvmem_layout node: ignored\n");
+ ERROR("nvmem node %s not found\n", name);
return node;
}
cuint = fdt_getprop(fdt, node, "reg", &len);
if ((cuint == NULL) || (len != (2 * (int)sizeof(uint32_t)))) {
- ERROR("Malformed nvmem_layout node: ignored\n");
+ ERROR("Malformed nvmem node %s: ignored\n", name);
return -FDT_ERR_BADVALUE;
}
if (fdt32_to_cpu(*cuint) % sizeof(uint32_t)) {
- ERROR("Misaligned nvmem_layout element: ignored\n");
+ ERROR("Misaligned nvmem %s element: ignored\n", name);
return -FDT_ERR_BADVALUE;
}
diff --git a/plat/st/stm32mp1/platform.mk b/plat/st/stm32mp1/platform.mk
index 9e732d6..9e67989 100644
--- a/plat/st/stm32mp1/platform.mk
+++ b/plat/st/stm32mp1/platform.mk
@@ -146,7 +146,7 @@
endif
$(eval DTC_V = $(shell $(DTC) -v | awk '{print $$NF}'))
-$(eval DTC_VERSION = $(shell printf "%d" $(shell echo ${DTC_V} | cut -d- -f1 | sed "s/\./0/g")))
+$(eval DTC_VERSION = $(shell printf "%d" $(shell echo ${DTC_V} | cut -d- -f1 | sed "s/\./0/g" | grep -o "[0-9]*")))
DTC_CPPFLAGS += ${INCLUDES}
DTC_FLAGS += -Wno-unit_address_vs_reg
ifeq ($(shell test $(DTC_VERSION) -ge 10601; echo $$?),0)
diff --git a/plat/st/stm32mp1/stm32mp1_def.h b/plat/st/stm32mp1/stm32mp1_def.h
index d869978..7e0745a 100644
--- a/plat/st/stm32mp1/stm32mp1_def.h
+++ b/plat/st/stm32mp1/stm32mp1_def.h
@@ -511,6 +511,9 @@
/* UID OTP */
#define UID_WORD_NB U(3)
+/* FWU configuration (max supported value is 15) */
+#define FWU_MAX_TRIAL_REBOOT U(3)
+
/*******************************************************************************
* STM32MP1 TAMP
******************************************************************************/
@@ -621,7 +624,6 @@
#define DT_DDR_COMPAT "st,stm32mp1-ddr"
#endif
#define DT_IWDG_COMPAT "st,stm32mp1-iwdg"
-#define DT_NVMEM_LAYOUT_COMPAT "st,stm32-nvmem-layout"
#define DT_PWR_COMPAT "st,stm32mp1,pwr-reg"
#if STM32MP13
#define DT_RCC_CLK_COMPAT "st,stm32mp13-rcc"
diff --git a/plat/st/stm32mp1/stm32mp1_private.c b/plat/st/stm32mp1/stm32mp1_private.c
index a9b9f4c..1617afd 100644
--- a/plat/st/stm32mp1/stm32mp1_private.c
+++ b/plat/st/stm32mp1/stm32mp1_private.c
@@ -46,7 +46,16 @@
#define TAMP_BOOT_MODE_ITF_MASK U(0x0000FF00)
#define TAMP_BOOT_MODE_ITF_SHIFT 8
-#define TAMP_BOOT_COUNTER_REG_ID U(21)
+/*
+ * Backup register to store fwu update information.
+ * It should be writeable only by secure world, but also readable by non secure
+ * (so it should be in Zone 2).
+ */
+#define TAMP_BOOT_FWU_INFO_REG_ID U(10)
+#define TAMP_BOOT_FWU_INFO_IDX_MSK U(0xF)
+#define TAMP_BOOT_FWU_INFO_IDX_OFF U(0)
+#define TAMP_BOOT_FWU_INFO_CNT_MSK U(0xF0)
+#define TAMP_BOOT_FWU_INFO_CNT_OFF U(4)
#if defined(IMAGE_BL2)
#define MAP_SEC_SYSRAM MAP_REGION_FLAT(STM32MP_SYSRAM_BASE, \
@@ -732,9 +741,42 @@
#if !STM32MP_USE_STM32IMAGE && PSA_FWU_SUPPORT
void stm32mp1_fwu_set_boot_idx(void)
{
+ clk_enable(RTCAPB);
+ mmio_clrsetbits_32(tamp_bkpr(TAMP_BOOT_FWU_INFO_REG_ID),
+ TAMP_BOOT_FWU_INFO_IDX_MSK,
+ (plat_fwu_get_boot_idx() << TAMP_BOOT_FWU_INFO_IDX_OFF) &
+ TAMP_BOOT_FWU_INFO_IDX_MSK);
+ clk_disable(RTCAPB);
+}
+
+uint32_t stm32_get_and_dec_fwu_trial_boot_cnt(void)
+{
+ uintptr_t bkpr_fwu_cnt = tamp_bkpr(TAMP_BOOT_FWU_INFO_REG_ID);
+ uint32_t try_cnt;
+
+ clk_enable(RTCAPB);
+ try_cnt = (mmio_read_32(bkpr_fwu_cnt) & TAMP_BOOT_FWU_INFO_CNT_MSK) >>
+ TAMP_BOOT_FWU_INFO_CNT_OFF;
+
+ assert(try_cnt <= FWU_MAX_TRIAL_REBOOT);
+
+ if (try_cnt != 0U) {
+ mmio_clrsetbits_32(bkpr_fwu_cnt, TAMP_BOOT_FWU_INFO_CNT_MSK,
+ (try_cnt - 1U) << TAMP_BOOT_FWU_INFO_CNT_OFF);
+ }
+ clk_disable(RTCAPB);
+
+ return try_cnt;
+}
+
+void stm32_set_max_fwu_trial_boot_cnt(void)
+{
+ uintptr_t bkpr_fwu_cnt = tamp_bkpr(TAMP_BOOT_FWU_INFO_REG_ID);
+
clk_enable(RTCAPB);
- mmio_write_32(tamp_bkpr(TAMP_BOOT_COUNTER_REG_ID),
- plat_fwu_get_boot_idx());
+ mmio_clrsetbits_32(bkpr_fwu_cnt, TAMP_BOOT_FWU_INFO_CNT_MSK,
+ (FWU_MAX_TRIAL_REBOOT << TAMP_BOOT_FWU_INFO_CNT_OFF) &
+ TAMP_BOOT_FWU_INFO_CNT_MSK);
clk_disable(RTCAPB);
}
#endif /* !STM32MP_USE_STM32IMAGE && PSA_FWU_SUPPORT */
diff --git a/services/std_svc/rmmd/rmmd_main.c b/services/std_svc/rmmd/rmmd_main.c
index cf5ff7b..746419e 100644
--- a/services/std_svc/rmmd/rmmd_main.c
+++ b/services/std_svc/rmmd/rmmd_main.c
@@ -60,10 +60,6 @@
cm_set_context(&(rmm_ctx->cpu_ctx), REALM);
- /* Save the current el1/el2 context before loading realm context. */
- cm_el1_sysregs_context_save(NON_SECURE);
- cm_el2_sysregs_context_save(NON_SECURE);
-
/* Restore the realm context assigned above */
cm_el1_sysregs_context_restore(REALM);
cm_el2_sysregs_context_restore(REALM);
@@ -72,14 +68,15 @@
/* Enter RMM */
rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx);
- /* Save realm context */
+ /*
+ * Save realm context. EL1 and EL2 Non-secure
+ * contexts will be restored before exiting to
+ * Non-secure world, therefore there is no need
+ * to clear EL1 and EL2 context registers.
+ */
cm_el1_sysregs_context_save(REALM);
cm_el2_sysregs_context_save(REALM);
- /* Restore the el1/el2 context again. */
- cm_el1_sysregs_context_restore(NON_SECURE);
- cm_el2_sysregs_context_restore(NON_SECURE);
-
return rc;
}
diff --git a/services/std_svc/spm_mm/aarch64/spm_mm_helpers.S b/services/std_svc/spm/common/aarch64/spm_helpers.S
similarity index 96%
rename from services/std_svc/spm_mm/aarch64/spm_mm_helpers.S
rename to services/std_svc/spm/common/aarch64/spm_helpers.S
index 2c3aaf7..95e69fb 100644
--- a/services/std_svc/spm_mm/aarch64/spm_mm_helpers.S
+++ b/services/std_svc/spm/common/aarch64/spm_helpers.S
@@ -1,11 +1,11 @@
/*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <asm_macros.S>
-#include "../spm_mm_private.h"
+#include "spm_common.h"
.global spm_secure_partition_enter
.global spm_secure_partition_exit
diff --git a/services/std_svc/spm/common/include/spm_common.h b/services/std_svc/spm/common/include/spm_common.h
new file mode 100644
index 0000000..68805fc
--- /dev/null
+++ b/services/std_svc/spm/common/include/spm_common.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPM_COMMON_H
+#define SPM_COMMON_H
+
+#include <context.h>
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define SP_C_RT_CTX_X19 0x0
+#define SP_C_RT_CTX_X20 0x8
+#define SP_C_RT_CTX_X21 0x10
+#define SP_C_RT_CTX_X22 0x18
+#define SP_C_RT_CTX_X23 0x20
+#define SP_C_RT_CTX_X24 0x28
+#define SP_C_RT_CTX_X25 0x30
+#define SP_C_RT_CTX_X26 0x38
+#define SP_C_RT_CTX_X27 0x40
+#define SP_C_RT_CTX_X28 0x48
+#define SP_C_RT_CTX_X29 0x50
+#define SP_C_RT_CTX_X30 0x58
+
+#define SP_C_RT_CTX_SIZE 0x60
+#define SP_C_RT_CTX_ENTRIES (SP_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+/* Assembly helpers */
+uint64_t spm_secure_partition_enter(uint64_t *c_rt_ctx);
+void __dead2 spm_secure_partition_exit(uint64_t c_rt_ctx, uint64_t ret);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* SPM_COMMON_H */
diff --git a/services/std_svc/spm/common/spm.mk b/services/std_svc/spm/common/spm.mk
new file mode 100644
index 0000000..9aa96be
--- /dev/null
+++ b/services/std_svc/spm/common/spm.mk
@@ -0,0 +1,17 @@
+#
+# Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${ARCH},aarch64)
+ $(error "Error: SPM is only supported on aarch64.")
+endif
+
+INCLUDES += -Iservices/std_svc/spm/common/include
+
+SPM_SOURCES := $(addprefix services/std_svc/spm/common/,\
+ ${ARCH}/spm_helpers.S)
+
+# Let the top-level Makefile know that we intend to include a BL32 image
+NEED_BL32 := yes
diff --git a/services/std_svc/spm/el3_spmc/spmc.h b/services/std_svc/spm/el3_spmc/spmc.h
new file mode 100644
index 0000000..df0aa61
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPMC_H
+#define SPMC_H
+
+#include <stdint.h>
+
+#include <lib/psci/psci.h>
+#include <lib/spinlock.h>
+#include "spm_common.h"
+
+/*
+ * Ranges of FF-A IDs for Normal world and Secure world components. The
+ * convention matches that used by other SPMCs i.e. Hafnium and OP-TEE.
+ */
+#define FFA_NWD_ID_BASE 0x0
+#define FFA_NWD_ID_LIMIT 0x7FFF
+#define FFA_SWD_ID_BASE 0x8000
+#define FFA_SWD_ID_LIMIT SPMD_DIRECT_MSG_ENDPOINT_ID - 1
+#define FFA_SWD_ID_MASK 0x8000
+
+/* ID 0 is reserved for the normal world entity, (Hypervisor or OS Kernel). */
+#define FFA_NWD_ID U(0)
+/* First ID is reserved for the SPMC */
+#define FFA_SPMC_ID U(FFA_SWD_ID_BASE)
+/* SP IDs are allocated after the SPMC ID */
+#define FFA_SP_ID_BASE (FFA_SPMC_ID + 1)
+/* Align with Hafnium implementation */
+#define INV_SP_ID 0x7FFF
+
+/* FF-A warm boot types. */
+#define FFA_WB_TYPE_S2RAM 0
+#define FFA_WB_TYPE_NOTS2RAM 1
+
+/*
+ * Runtime states of an execution context as per the FF-A v1.1 specification.
+ */
+enum sp_runtime_states {
+ RT_STATE_WAITING,
+ RT_STATE_RUNNING,
+ RT_STATE_PREEMPTED,
+ RT_STATE_BLOCKED
+};
+
+/*
+ * Runtime model of an execution context as per the FF-A v1.1 specification. Its
+ * value is valid only if the execution context is not in the waiting state.
+ */
+enum sp_runtime_model {
+ RT_MODEL_DIR_REQ,
+ RT_MODEL_RUN,
+ RT_MODEL_INIT,
+ RT_MODEL_INTR
+};
+
+enum sp_runtime_el {
+ EL1 = 0,
+ S_EL0,
+ S_EL1
+};
+
+enum sp_execution_state {
+ SP_STATE_AARCH64 = 0,
+ SP_STATE_AARCH32
+};
+
+/*
+ * Execution context members for an SP. This is a bit like struct
+ * vcpu in a hypervisor.
+ */
+struct sp_exec_ctx {
+ /*
+ * Store the stack address to restore C runtime context from after
+ * returning from a synchronous entry into the SP.
+ */
+ uint64_t c_rt_ctx;
+
+ /* Space to maintain the architectural state of an SP. */
+ cpu_context_t cpu_ctx;
+
+ /* Track the current runtime state of the SP. */
+ enum sp_runtime_states rt_state;
+
+ /* Track the current runtime model of the SP. */
+ enum sp_runtime_model rt_model;
+};
+
+/*
+ * Structure to describe the cumulative properties of an SP.
+ */
+struct secure_partition_desc {
+ /*
+ * Execution contexts allocated to this endpoint. Ideally,
+ * we need as many contexts as there are physical cpus only
+ * for a S-EL1 SP which is MP-pinned.
+ */
+ struct sp_exec_ctx ec[PLATFORM_CORE_COUNT];
+
+ /* ID of the Secure Partition. */
+ uint16_t sp_id;
+
+ /* Runtime EL. */
+ enum sp_runtime_el runtime_el;
+
+ /* Partition UUID. */
+ uint32_t uuid[4];
+
+ /* Partition Properties. */
+ uint32_t properties;
+
+ /* Supported FF-A Version. */
+ uint32_t ffa_version;
+
+ /* Execution State. */
+ enum sp_execution_state execution_state;
+
+ /* Secondary entrypoint. Only valid for a S-EL1 SP. */
+ uintptr_t secondary_ep;
+};
+
+/*
+ * This define identifies the only SP that will be initialised and participate
+ * in FF-A communication. The implementation leaves the door open for more SPs
+ * to be managed in future but for now it is reasonable to assume that either a
+ * single S-EL0 or a single S-EL1 SP will be supported. This define will be used
+ * to identify which SP descriptor to initialise and manage during SP runtime.
+ */
+#define ACTIVE_SP_DESC_INDEX 0
+
+/*
+ * Structure to describe the cumulative properties of the Hypervisor and
+ * NS-Endpoints.
+ */
+struct ns_endpoint_desc {
+ /*
+ * ID of the NS-Endpoint or Hypervisor.
+ */
+ uint16_t ns_ep_id;
+
+ /*
+ * Supported FF-A Version.
+ */
+ uint32_t ffa_version;
+};
+
+/* Setup Function for different SP types. */
+void spmc_sp_common_setup(struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info);
+void spmc_el1_sp_setup(struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info);
+void spmc_sp_common_ep_commit(struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info);
+
+/*
+ * Helper function to perform a synchronous entry into a SP.
+ */
+uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec);
+
+/*
+ * Helper function to obtain the descriptor of the current SP on a physical cpu.
+ */
+struct secure_partition_desc *spmc_get_current_sp_ctx(void);
+
+/*
+ * Helper function to obtain the execution context of an SP on a
+ * physical cpu.
+ */
+struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp);
+
+/*
+ * Helper function to obtain the index of the execution context of an SP on a
+ * physical cpu.
+ */
+unsigned int get_ec_index(struct secure_partition_desc *sp);
+
+uint64_t spmc_ffa_error_return(void *handle, int error_code);
+
+/*
+ * Ensure a partition ID does not clash and follows the secure world convention.
+ */
+bool is_ffa_secure_id_valid(uint16_t partition_id);
+
+#endif /* SPMC_H */
diff --git a/services/std_svc/spm/el3_spmc/spmc.mk b/services/std_svc/spm/el3_spmc/spmc.mk
new file mode 100644
index 0000000..2b154dd
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc.mk
@@ -0,0 +1,17 @@
+#
+# Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${ARCH},aarch64)
+ $(error "Error: SPMC is only supported on aarch64.")
+endif
+
+SPMC_SOURCES := $(addprefix services/std_svc/spm/el3_spmc/, \
+ spmc_main.c \
+ spmc_setup.c)
+
+
+# Let the top-level Makefile know that we intend to include a BL32 image
+NEED_BL32 := yes
diff --git a/services/std_svc/spm/el3_spmc/spmc_main.c b/services/std_svc/spm/el3_spmc/spmc_main.c
new file mode 100644
index 0000000..3fd8c78
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc_main.c
@@ -0,0 +1,788 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <arch_helpers.h>
+#include <bl31/bl31.h>
+#include <bl31/ehf.h>
+#include <common/debug.h>
+#include <common/fdt_wrappers.h>
+#include <common/runtime_svc.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/smccc.h>
+#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <libfdt.h>
+#include <plat/common/platform.h>
+#include <services/ffa_svc.h>
+#include <services/spmc_svc.h>
+#include <services/spmd_svc.h>
+#include "spmc.h"
+
+#include <platform_def.h>
+
+/*
+ * Allocate a secure partition descriptor to describe each SP in the system that
+ * does not reside at EL3.
+ */
+static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT];
+
+/*
+ * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
+ * the system that interacts with a SP. It is used to track the Hypervisor
+ * buffer pair, version and ID for now. It could be extended to track VM
+ * properties when the SPMC supports indirect messaging.
+ */
+static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
+
+/*
+ * Helper function to obtain the descriptor of the last SP to whom control was
+ * handed to on this physical cpu. Currently, we assume there is only one SP.
+ * TODO: Expand to track multiple partitions when required.
+ */
+struct secure_partition_desc *spmc_get_current_sp_ctx(void)
+{
+ return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
+}
+
+/*
+ * Helper function to obtain the execution context of an SP on the
+ * current physical cpu.
+ */
+struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp)
+{
+ return &(sp->ec[get_ec_index(sp)]);
+}
+
+/* Helper function to get pointer to SP context from its ID. */
+struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
+{
+ /* Check for SWd Partitions. */
+ for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
+ if (sp_desc[i].sp_id == id) {
+ return &(sp_desc[i]);
+ }
+ }
+ return NULL;
+}
+
+/******************************************************************************
+ * This function returns to the place where spmc_sp_synchronous_entry() was
+ * called originally.
+ ******************************************************************************/
+__dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc)
+{
+ /*
+ * The SPM must have initiated the original request through a
+ * synchronous entry into the secure partition. Jump back to the
+ * original C runtime context with the value of rc in x0;
+ */
+ spm_secure_partition_exit(ec->c_rt_ctx, rc);
+
+ panic();
+}
+
+/*******************************************************************************
+ * Return FFA_ERROR with specified error code.
+ ******************************************************************************/
+uint64_t spmc_ffa_error_return(void *handle, int error_code)
+{
+ SMC_RET8(handle, FFA_ERROR,
+ FFA_TARGET_INFO_MBZ, error_code,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ);
+}
+
+/******************************************************************************
+ * Helper function to validate a secure partition ID to ensure it does not
+ * conflict with any other FF-A component and follows the convention to
+ * indicate it resides within the secure world.
+ ******************************************************************************/
+bool is_ffa_secure_id_valid(uint16_t partition_id)
+{
+ /* Ensure the ID is not the invalid partition ID. */
+ if (partition_id == INV_SP_ID) {
+ return false;
+ }
+
+ /* Ensure the ID is not the SPMD ID. */
+ if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) {
+ return false;
+ }
+
+ /*
+ * Ensure the ID follows the convention to indicate it resides
+ * in the secure world.
+ */
+ if (!ffa_is_secure_world_id(partition_id)) {
+ return false;
+ }
+
+ /* Ensure we don't conflict with the SPMC partition ID. */
+ if (partition_id == FFA_SPMC_ID) {
+ return false;
+ }
+
+ /* Ensure we do not already have an SP context with this ID. */
+ if (spmc_get_sp_ctx(partition_id)) {
+ return false;
+ }
+
+ return true;
+}
+
+/*******************************************************************************
+ * This function either forwards the request to the other world or returns
+ * with an ERET depending on the source of the call.
+ ******************************************************************************/
+static uint64_t spmc_smc_return(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *handle,
+ void *cookie,
+ uint64_t flags,
+ uint16_t dst_id)
+{
+ /* If the destination is in the normal world always go via the SPMD. */
+ if (ffa_is_normal_world_id(dst_id)) {
+ return spmd_smc_handler(smc_fid, x1, x2, x3, x4,
+ cookie, handle, flags);
+ }
+ /*
+ * If the caller is secure and we want to return to the secure world,
+ * ERET directly.
+ */
+ else if (secure_origin && ffa_is_secure_world_id(dst_id)) {
+ SMC_RET5(handle, smc_fid, x1, x2, x3, x4);
+ }
+ /* If we originated in the normal world then switch contexts. */
+ else if (!secure_origin && ffa_is_secure_world_id(dst_id)) {
+ return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2,
+ x3, x4, handle);
+ } else {
+ /* Unknown State. */
+ panic();
+ }
+
+ /* Shouldn't be Reached. */
+ return 0;
+}
+
+/*******************************************************************************
+ * FF-A ABI Handlers.
+ ******************************************************************************/
+
+/*******************************************************************************
+ * Helper function to validate arg2 as part of a direct message.
+ ******************************************************************************/
+static inline bool direct_msg_validate_arg2(uint64_t x2)
+{
+ /*
+ * We currently only support partition messages, therefore ensure x2 is
+ * not set.
+ */
+ if (x2 != (uint64_t) 0) {
+ VERBOSE("Arg2 MBZ for partition messages (0x%lx).\n", x2);
+ return false;
+ }
+ return true;
+}
+
+/*******************************************************************************
+ * Handle direct request messages and route to the appropriate destination.
+ ******************************************************************************/
+static uint64_t direct_req_smc_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ uint16_t dst_id = ffa_endpoint_destination(x1);
+ struct secure_partition_desc *sp;
+ unsigned int idx;
+
+ /* Check if arg2 has been populated correctly based on message type. */
+ if (!direct_msg_validate_arg2(x2)) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /*
+ * If called by the secure world it is an invalid call since a
+ * SP cannot call into the Normal world and there is no other SP to call
+ * into. If there are other SPs in future then the partition runtime
+ * model would need to be validated as well.
+ */
+ if (secure_origin) {
+ VERBOSE("Direct request not supported to the Normal World.\n");
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Check if the SP ID is valid. */
+ sp = spmc_get_sp_ctx(dst_id);
+ if (sp == NULL) {
+ VERBOSE("Direct request to unknown partition ID (0x%x).\n",
+ dst_id);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Check that the target execution context is in a waiting state before
+ * forwarding the direct request to it.
+ */
+ idx = get_ec_index(sp);
+ if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
+ VERBOSE("SP context on core%u is not waiting (%u).\n",
+ idx, sp->ec[idx].rt_model);
+ return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
+ }
+
+ /*
+ * Everything checks out so forward the request to the SP after updating
+ * its state and runtime model.
+ */
+ sp->ec[idx].rt_state = RT_STATE_RUNNING;
+ sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
+ return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
+ handle, cookie, flags, dst_id);
+}
+
+/*******************************************************************************
+ * Handle direct response messages and route to the appropriate destination.
+ ******************************************************************************/
+static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ uint16_t dst_id = ffa_endpoint_destination(x1);
+ struct secure_partition_desc *sp;
+ unsigned int idx;
+
+ /* Check if arg2 has been populated correctly based on message type. */
+ if (!direct_msg_validate_arg2(x2)) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Check that the response did not originate from the Normal world. */
+ if (!secure_origin) {
+ VERBOSE("Direct Response not supported from Normal World.\n");
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Check that the response is either targeted to the Normal world or the
+ * SPMC e.g. a PM response.
+ */
+ if ((dst_id != FFA_SPMC_ID) && ffa_is_secure_world_id(dst_id)) {
+ VERBOSE("Direct response to invalid partition ID (0x%x).\n",
+ dst_id);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Obtain the SP descriptor and update its runtime state. */
+ sp = spmc_get_sp_ctx(ffa_endpoint_source(x1));
+ if (sp == NULL) {
+ VERBOSE("Direct response to unknown partition ID (0x%x).\n",
+ dst_id);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Sanity check state is being tracked correctly in the SPMC. */
+ idx = get_ec_index(sp);
+ assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
+
+ /* Ensure SP execution context was in the right runtime model. */
+ if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
+ VERBOSE("SP context on core%u not handling direct req (%u).\n",
+ idx, sp->ec[idx].rt_model);
+ return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+ }
+
+ /* Update the state of the SP execution context. */
+ sp->ec[idx].rt_state = RT_STATE_WAITING;
+
+ /*
+ * If the receiver is not the SPMC then forward the response to the
+ * Normal world.
+ */
+ if (dst_id == FFA_SPMC_ID) {
+ spmc_sp_synchronous_exit(&sp->ec[idx], x4);
+ /* Should not get here. */
+ panic();
+ }
+
+ return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
+ handle, cookie, flags, dst_id);
+}
+
+/*******************************************************************************
+ * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its
+ * cycles.
+ ******************************************************************************/
+static uint64_t msg_wait_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ struct secure_partition_desc *sp;
+ unsigned int idx;
+
+ /*
+ * Check that the response did not originate from the Normal world as
+ * only the secure world can call this ABI.
+ */
+ if (!secure_origin) {
+ VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n");
+ return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ /* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */
+ sp = spmc_get_current_sp_ctx();
+ if (sp == NULL) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Get the execution context of the SP that invoked FFA_MSG_WAIT.
+ */
+ idx = get_ec_index(sp);
+
+ /* Ensure SP execution context was in the right runtime model. */
+ if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
+ return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+ }
+
+ /* Sanity check the state is being tracked correctly in the SPMC. */
+ assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
+
+ /*
+ * Perform a synchronous exit if the partition was initialising. The
+ * state is updated after the exit.
+ */
+ if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
+ spmc_sp_synchronous_exit(&sp->ec[idx], x4);
+ /* Should not get here */
+ panic();
+ }
+
+ /* Update the state of the SP execution context. */
+ sp->ec[idx].rt_state = RT_STATE_WAITING;
+
+ /* Resume normal world if a secure interrupt was handled. */
+ if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
+ /* FFA_MSG_WAIT can only be called from the secure world. */
+ unsigned int secure_state_in = SECURE;
+ unsigned int secure_state_out = NON_SECURE;
+
+ cm_el1_sysregs_context_save(secure_state_in);
+ cm_el1_sysregs_context_restore(secure_state_out);
+ cm_set_next_eret_context(secure_state_out);
+ SMC_RET0(cm_get_context(secure_state_out));
+ }
+
+ /* Forward the response to the Normal world. */
+ return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
+ handle, cookie, flags, FFA_NWD_ID);
+}
+
+static uint64_t ffa_error_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ struct secure_partition_desc *sp;
+ unsigned int idx;
+
+ /* Check that the response did not originate from the Normal world. */
+ if (!secure_origin) {
+ return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ /* Get the descriptor of the SP that invoked FFA_ERROR. */
+ sp = spmc_get_current_sp_ctx();
+ if (sp == NULL) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Get the execution context of the SP that invoked FFA_ERROR. */
+ idx = get_ec_index(sp);
+
+ /*
+ * We only expect FFA_ERROR to be received during SP initialisation
+ * otherwise this is an invalid call.
+ */
+ if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
+ ERROR("SP 0x%x failed to initialize.\n", sp->sp_id);
+ spmc_sp_synchronous_exit(&sp->ec[idx], x2);
+ /* Should not get here. */
+ panic();
+ }
+
+ return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+}
+
+/*******************************************************************************
+ * This function will parse the Secure Partition Manifest. From manifest, it
+ * will fetch details for preparing Secure partition image context and secure
+ * partition image boot arguments if any.
+ ******************************************************************************/
+static int sp_manifest_parse(void *sp_manifest, int offset,
+ struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info)
+{
+ int32_t ret, node;
+ uint32_t config_32;
+
+ /*
+ * Look for the mandatory fields that are expected to be present in
+ * the SP manifests.
+ */
+ node = fdt_path_offset(sp_manifest, "/");
+ if (node < 0) {
+ ERROR("Did not find root node.\n");
+ return node;
+ }
+
+ ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32);
+ if (ret != 0) {
+ ERROR("Missing SP Exception Level information.\n");
+ return ret;
+ }
+
+ sp->runtime_el = config_32;
+
+ ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32);
+ if (ret != 0) {
+ ERROR("Missing Secure Partition FF-A Version.\n");
+ return ret;
+ }
+
+ sp->ffa_version = config_32;
+
+ ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32);
+ if (ret != 0) {
+ ERROR("Missing Secure Partition Execution State.\n");
+ return ret;
+ }
+
+ sp->execution_state = config_32;
+
+ /*
+ * Look for the optional fields that are expected to be present in
+ * an SP manifest.
+ */
+ ret = fdt_read_uint32(sp_manifest, node, "id", &config_32);
+ if (ret != 0) {
+ WARN("Missing Secure Partition ID.\n");
+ } else {
+ if (!is_ffa_secure_id_valid(config_32)) {
+ ERROR("Invalid Secure Partition ID (0x%x).\n",
+ config_32);
+ return -EINVAL;
+ }
+ sp->sp_id = config_32;
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ * This function gets the Secure Partition Manifest base and maps the manifest
+ * region.
+ * Currently only one Secure Partition manifest is considered which is used to
+ * prepare the context for the single Secure Partition.
+ ******************************************************************************/
+static int find_and_prepare_sp_context(void)
+{
+ void *sp_manifest;
+ uintptr_t manifest_base;
+ uintptr_t manifest_base_align;
+ entry_point_info_t *next_image_ep_info;
+ int32_t ret;
+ struct secure_partition_desc *sp;
+
+ next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+ if (next_image_ep_info == NULL) {
+ WARN("No Secure Partition image provided by BL2.\n");
+ return -ENOENT;
+ }
+
+ sp_manifest = (void *)next_image_ep_info->args.arg0;
+ if (sp_manifest == NULL) {
+ WARN("Secure Partition manifest absent.\n");
+ return -ENOENT;
+ }
+
+ manifest_base = (uintptr_t)sp_manifest;
+ manifest_base_align = page_align(manifest_base, DOWN);
+
+ /*
+ * Map the secure partition manifest region in the EL3 translation
+ * regime.
+ * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
+ * alignment the region of 1 PAGE_SIZE from manifest align base may
+ * not completely accommodate the secure partition manifest region.
+ */
+ ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
+ manifest_base_align,
+ PAGE_SIZE * 2,
+ MT_RO_DATA);
+ if (ret != 0) {
+ ERROR("Error while mapping SP manifest (%d).\n", ret);
+ return ret;
+ }
+
+ ret = fdt_node_offset_by_compatible(sp_manifest, -1,
+ "arm,ffa-manifest-1.0");
+ if (ret < 0) {
+ ERROR("Error happened in SP manifest reading.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Store the size of the manifest so that it can be used later to pass
+ * the manifest as boot information later.
+ */
+ next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest);
+ INFO("Manifest size = %lu bytes.\n", next_image_ep_info->args.arg1);
+
+ /*
+ * Select an SP descriptor for initialising the partition's execution
+ * context on the primary CPU.
+ */
+ sp = spmc_get_current_sp_ctx();
+
+ /* Initialize entry point information for the SP */
+ SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1,
+ SECURE | EP_ST_ENABLE);
+
+ /* Parse the SP manifest. */
+ ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info);
+ if (ret != 0) {
+ ERROR("Error in Secure Partition manifest parsing.\n");
+ return ret;
+ }
+
+ /* Check that the runtime EL in the manifest was correct. */
+ if (sp->runtime_el != S_EL1) {
+ ERROR("Unexpected runtime EL: %d\n", sp->runtime_el);
+ return -EINVAL;
+ }
+
+ /* Perform any common initialisation. */
+ spmc_sp_common_setup(sp, next_image_ep_info);
+
+ /* Perform any initialisation specific to S-EL1 SPs. */
+ spmc_el1_sp_setup(sp, next_image_ep_info);
+
+ /* Initialize the SP context with the required ep info. */
+ spmc_sp_common_ep_commit(sp, next_image_ep_info);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * This function takes an SP context pointer and performs a synchronous entry
+ * into it.
+ ******************************************************************************/
+uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec)
+{
+ uint64_t rc;
+
+ assert(ec != NULL);
+
+ /* Assign the context of the SP to this CPU */
+ cm_set_context(&(ec->cpu_ctx), SECURE);
+
+ /* Restore the context assigned above */
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+
+ /* Invalidate TLBs at EL1. */
+ tlbivmalle1();
+ dsbish();
+
+ /* Enter Secure Partition */
+ rc = spm_secure_partition_enter(&ec->c_rt_ctx);
+
+ /* Save secure state */
+ cm_el1_sysregs_context_save(SECURE);
+
+ return rc;
+}
+
+/*******************************************************************************
+ * SPMC Helper Functions.
+ ******************************************************************************/
+static int32_t sp_init(void)
+{
+ uint64_t rc;
+ struct secure_partition_desc *sp;
+ struct sp_exec_ctx *ec;
+
+ sp = spmc_get_current_sp_ctx();
+ ec = spmc_get_sp_ec(sp);
+ ec->rt_model = RT_MODEL_INIT;
+ ec->rt_state = RT_STATE_RUNNING;
+
+ INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
+
+ rc = spmc_sp_synchronous_entry(ec);
+ if (rc != 0) {
+ /* Indicate SP init was not successful. */
+ ERROR("SP (0x%x) failed to initialize (%lu).\n",
+ sp->sp_id, rc);
+ return 0;
+ }
+
+ ec->rt_state = RT_STATE_WAITING;
+ INFO("Secure Partition initialized.\n");
+
+ return 1;
+}
+
+static void initalize_sp_descs(void)
+{
+ struct secure_partition_desc *sp;
+
+ for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
+ sp = &sp_desc[i];
+ sp->sp_id = INV_SP_ID;
+ sp->secondary_ep = 0;
+ }
+}
+
+static void initalize_ns_ep_descs(void)
+{
+ struct ns_endpoint_desc *ns_ep;
+
+ for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) {
+ ns_ep = &ns_ep_desc[i];
+ /*
+ * Clashes with the Hypervisor ID but will not be a
+ * problem in practice.
+ */
+ ns_ep->ns_ep_id = 0;
+ ns_ep->ffa_version = 0;
+ }
+}
+
+/*******************************************************************************
+ * Initialize SPMC attributes for the SPMD.
+ ******************************************************************************/
+void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs)
+{
+ spmc_attrs->major_version = FFA_VERSION_MAJOR;
+ spmc_attrs->minor_version = FFA_VERSION_MINOR;
+ spmc_attrs->exec_state = MODE_RW_64;
+ spmc_attrs->spmc_id = FFA_SPMC_ID;
+}
+
+/*******************************************************************************
+ * Initialize contexts of all Secure Partitions.
+ ******************************************************************************/
+int32_t spmc_setup(void)
+{
+ int32_t ret;
+
+ /* Initialize endpoint descriptors */
+ initalize_sp_descs();
+ initalize_ns_ep_descs();
+
+ /* Perform physical SP setup. */
+
+ /* Disable MMU at EL1 (initialized by BL2) */
+ disable_mmu_icache_el1();
+
+ /* Initialize context of the SP */
+ INFO("Secure Partition context setup start.\n");
+
+ ret = find_and_prepare_sp_context();
+ if (ret != 0) {
+ ERROR("Error in SP finding and context preparation.\n");
+ return ret;
+ }
+
+ /* Register init function for deferred init. */
+ bl31_register_bl32_init(&sp_init);
+
+ INFO("Secure Partition setup done.\n");
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Secure Partition Manager SMC handler.
+ ******************************************************************************/
+uint64_t spmc_smc_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ switch (smc_fid) {
+
+ case FFA_MSG_SEND_DIRECT_REQ_SMC32:
+ case FFA_MSG_SEND_DIRECT_REQ_SMC64:
+ return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
+ x3, x4, cookie, handle, flags);
+
+ case FFA_MSG_SEND_DIRECT_RESP_SMC32:
+ case FFA_MSG_SEND_DIRECT_RESP_SMC64:
+ return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
+ x3, x4, cookie, handle, flags);
+
+ case FFA_MSG_WAIT:
+ return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
+ cookie, handle, flags);
+
+ case FFA_ERROR:
+ return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
+ cookie, handle, flags);
+
+ default:
+ WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
+ break;
+ }
+ return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+}
diff --git a/services/std_svc/spm/el3_spmc/spmc_setup.c b/services/std_svc/spm/el3_spmc/spmc_setup.c
new file mode 100644
index 0000000..7b23c9e
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc_setup.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <context.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/common_def.h>
+#include <plat/common/platform.h>
+#include <services/ffa_svc.h>
+#include "spm_common.h"
+#include "spmc.h"
+
+#include <platform_def.h>
+
+/*
+ * We are assuming that the index of the execution
+ * context used is the linear index of the current physical cpu.
+ */
+unsigned int get_ec_index(struct secure_partition_desc *sp)
+{
+ return plat_my_core_pos();
+}
+
+/* S-EL1 partition specific initialisation. */
+void spmc_el1_sp_setup(struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info)
+{
+ /* Sanity check input arguments. */
+ assert(sp != NULL);
+ assert(ep_info != NULL);
+
+ /* Initialise the SPSR for S-EL1 SPs. */
+ ep_info->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS);
+
+ /*
+ * Check whether setup is being performed for the primary or a secondary
+ * execution context. In the latter case, indicate to the SP that this
+ * is a warm boot.
+ * TODO: This check would need to be reworked if the same entry point is
+ * used for both primary and secondary initialisation.
+ */
+ if (sp->secondary_ep != 0U) {
+ /*
+ * Sanity check that the secondary entry point is still what was
+ * originally set.
+ */
+ assert(sp->secondary_ep == ep_info->pc);
+ ep_info->args.arg0 = FFA_WB_TYPE_S2RAM;
+ }
+}
+
+/* Common initialisation for all SPs. */
+void spmc_sp_common_setup(struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info)
+{
+ uint16_t sp_id;
+
+ /* Assign FF-A Partition ID if not already assigned. */
+ if (sp->sp_id == INV_SP_ID) {
+ sp_id = FFA_SP_ID_BASE + ACTIVE_SP_DESC_INDEX;
+ /*
+ * Ensure we don't clash with previously assigned partition
+ * IDs.
+ */
+ while (!is_ffa_secure_id_valid(sp_id)) {
+ sp_id++;
+
+ if (sp_id == FFA_SWD_ID_LIMIT) {
+ ERROR("Unable to determine valid SP ID.\n");
+ panic();
+ }
+ }
+ sp->sp_id = sp_id;
+ }
+
+ /*
+ * We currently only support S-EL1 partitions so ensure this is the
+ * case.
+ */
+ assert(sp->runtime_el == S_EL1);
+
+ /*
+ * Clear the general purpose registers. These should be populated as
+ * required.
+ */
+ zeromem(&ep_info->args, sizeof(ep_info->args));
+}
+
+/*
+ * Initialise the SP context now we have populated the common and EL specific
+ * entrypoint information.
+ */
+void spmc_sp_common_ep_commit(struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info)
+{
+ cpu_context_t *cpu_ctx;
+
+ cpu_ctx = &(spmc_get_sp_ec(sp)->cpu_ctx);
+ print_entry_point_info(ep_info);
+ cm_setup_context(cpu_ctx, ep_info);
+}
diff --git a/services/std_svc/spm_mm/aarch64/spm_mm_shim_exceptions.S b/services/std_svc/spm/spm_mm/aarch64/spm_mm_shim_exceptions.S
similarity index 97%
rename from services/std_svc/spm_mm/aarch64/spm_mm_shim_exceptions.S
rename to services/std_svc/spm/spm_mm/aarch64/spm_mm_shim_exceptions.S
index be4084c..836f75c 100644
--- a/services/std_svc/spm_mm/aarch64/spm_mm_shim_exceptions.S
+++ b/services/std_svc/spm/spm_mm/aarch64/spm_mm_shim_exceptions.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
diff --git a/services/std_svc/spm_mm/spm_mm.mk b/services/std_svc/spm/spm_mm/spm_mm.mk
similarity index 78%
rename from services/std_svc/spm_mm/spm_mm.mk
rename to services/std_svc/spm/spm_mm/spm_mm.mk
index a87bdd8..78ef0c9 100644
--- a/services/std_svc/spm_mm/spm_mm.mk
+++ b/services/std_svc/spm/spm_mm/spm_mm.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -17,11 +17,10 @@
$(error "Error: SPM_MM is not compatible with ENABLE_SME_FOR_NS")
endif
-SPM_SOURCES := $(addprefix services/std_svc/spm_mm/, \
- ${ARCH}/spm_mm_helpers.S \
+SPM_MM_SOURCES := $(addprefix services/std_svc/spm/spm_mm/, \
${ARCH}/spm_mm_shim_exceptions.S \
- spm_mm_main.c \
- spm_mm_setup.c \
+ spm_mm_main.c \
+ spm_mm_setup.c \
spm_mm_xlat.c)
diff --git a/services/std_svc/spm_mm/spm_mm_main.c b/services/std_svc/spm/spm_mm/spm_mm_main.c
similarity index 98%
rename from services/std_svc/spm_mm/spm_mm_main.c
rename to services/std_svc/spm/spm_mm/spm_mm_main.c
index 14c0038..e71e65b 100644
--- a/services/std_svc/spm_mm/spm_mm_main.c
+++ b/services/std_svc/spm/spm_mm/spm_mm_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -22,6 +22,7 @@
#include <services/spm_mm_svc.h>
#include <smccc_helpers.h>
+#include "spm_common.h"
#include "spm_mm_private.h"
/*******************************************************************************
diff --git a/services/std_svc/spm_mm/spm_mm_private.h b/services/std_svc/spm/spm_mm/spm_mm_private.h
similarity index 88%
rename from services/std_svc/spm_mm/spm_mm_private.h
rename to services/std_svc/spm/spm_mm/spm_mm_private.h
index 45b4789..0eff1c0 100644
--- a/services/std_svc/spm_mm/spm_mm_private.h
+++ b/services/std_svc/spm/spm_mm/spm_mm_private.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,6 +8,7 @@
#define SPM_MM_PRIVATE_H
#include <context.h>
+#include "spm_common.h"
/*******************************************************************************
* Constants that allow assembler code to preserve callee-saved registers of the
@@ -51,9 +52,6 @@
spinlock_t state_lock;
} sp_context_t;
-/* Assembly helpers */
-uint64_t spm_secure_partition_enter(uint64_t *c_rt_ctx);
-void __dead2 spm_secure_partition_exit(uint64_t c_rt_ctx, uint64_t ret);
void spm_sp_setup(sp_context_t *sp_ctx);
diff --git a/services/std_svc/spm_mm/spm_mm_setup.c b/services/std_svc/spm/spm_mm/spm_mm_setup.c
similarity index 98%
rename from services/std_svc/spm_mm/spm_mm_setup.c
rename to services/std_svc/spm/spm_mm/spm_mm_setup.c
index 9d681c2..04dc212 100644
--- a/services/std_svc/spm_mm/spm_mm_setup.c
+++ b/services/std_svc/spm/spm_mm/spm_mm_setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -19,6 +19,7 @@
#include <plat/common/platform.h>
#include <services/spm_mm_partition.h>
+#include "spm_common.h"
#include "spm_mm_private.h"
#include "spm_mm_shim_private.h"
diff --git a/services/std_svc/spm_mm/spm_mm_shim_private.h b/services/std_svc/spm/spm_mm/spm_mm_shim_private.h
similarity index 90%
rename from services/std_svc/spm_mm/spm_mm_shim_private.h
rename to services/std_svc/spm/spm_mm/spm_mm_shim_private.h
index 0c8d894..f69c748 100644
--- a/services/std_svc/spm_mm/spm_mm_shim_private.h
+++ b/services/std_svc/spm/spm_mm/spm_mm_shim_private.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
diff --git a/services/std_svc/spm_mm/spm_mm_xlat.c b/services/std_svc/spm/spm_mm/spm_mm_xlat.c
similarity index 98%
rename from services/std_svc/spm_mm/spm_mm_xlat.c
rename to services/std_svc/spm/spm_mm/spm_mm_xlat.c
index eae597c..6261016 100644
--- a/services/std_svc/spm_mm/spm_mm_xlat.c
+++ b/services/std_svc/spm/spm_mm/spm_mm_xlat.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
diff --git a/services/std_svc/spmd/spmd_main.c b/services/std_svc/spmd/spmd_main.c
index 27a8382..5b131cd 100644
--- a/services/std_svc/spmd/spmd_main.c
+++ b/services/std_svc/spmd/spmd_main.c
@@ -24,6 +24,7 @@
#include <plat/common/platform.h>
#include <platform_def.h>
#include <services/ffa_svc.h>
+#include <services/spmc_svc.h>
#include <services/spmd_svc.h>
#include <smccc_helpers.h>
#include "spmd_private.h"
@@ -34,7 +35,8 @@
static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
/*******************************************************************************
- * SPM Core attribute information read from its manifest.
+ * SPM Core attribute information is read from its manifest if the SPMC is not
+ * at EL3. Else, it is populated from the SPMC directly.
******************************************************************************/
static spmc_manifest_attribute_t spmc_attrs;
@@ -88,7 +90,9 @@
uint64_t x2,
uint64_t x3,
uint64_t x4,
- void *handle);
+ void *cookie,
+ void *handle,
+ uint64_t flags);
/******************************************************************************
* Builds an SPMD to SPMC direct message request.
@@ -385,8 +389,23 @@
******************************************************************************/
int spmd_setup(void)
{
- void *spmc_manifest;
int rc;
+ void *spmc_manifest;
+
+ /*
+ * If the SPMC is at EL3, then just initialise it directly. The
+ * shenanigans of when it is at a lower EL are not needed.
+ */
+ if (is_spmc_at_el3()) {
+ /* Allow the SPMC to populate its attributes directly. */
+ spmc_populate_attrs(&spmc_attrs);
+
+ rc = spmc_setup();
+ if (rc != 0) {
+ ERROR("SPMC initialisation failed 0x%x.\n", rc);
+ }
+ return rc;
+ }
spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
if (spmc_ep_info == NULL) {
@@ -417,15 +436,15 @@
}
/*******************************************************************************
- * Forward SMC to the other security state
+ * Forward FF-A SMCs to the other security state.
******************************************************************************/
-static uint64_t spmd_smc_forward(uint32_t smc_fid,
- bool secure_origin,
- uint64_t x1,
- uint64_t x2,
- uint64_t x3,
- uint64_t x4,
- void *handle)
+uint64_t spmd_smc_switch_state(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *handle)
{
unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
@@ -458,6 +477,28 @@
}
/*******************************************************************************
+ * Forward SMCs to the other security state.
+ ******************************************************************************/
+static uint64_t spmd_smc_forward(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ if (is_spmc_at_el3() && !secure_origin) {
+ return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4,
+ cookie, handle, flags);
+ }
+ return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4,
+ handle);
+
+}
+
+/*******************************************************************************
* Return FFA_ERROR with specified error code
******************************************************************************/
static uint64_t spmd_ffa_error_return(void *handle, int error_code)
@@ -484,6 +525,10 @@
*****************************************************************************/
static bool spmd_is_spmc_message(unsigned int ep)
{
+ if (is_spmc_at_el3()) {
+ return false;
+ }
+
return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
&& (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
}
@@ -502,6 +547,35 @@
}
/*******************************************************************************
+ * This function forwards FF-A SMCs to either the main SPMD handler or the
+ * SPMC at EL3, depending on the origin security state, if enabled.
+ ******************************************************************************/
+uint64_t spmd_ffa_smc_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ if (is_spmc_at_el3()) {
+ /*
+ * If we have an SPMC at EL3 allow handling of the SMC first.
+ * The SPMC will call back through to SPMD handler if required.
+ */
+ if (is_caller_secure(flags)) {
+ return spmc_smc_handler(smc_fid,
+ is_caller_secure(flags),
+ x1, x2, x3, x4, cookie,
+ handle, flags);
+ }
+ }
+ return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
+ handle, flags);
+}
+
+/*******************************************************************************
* This function handles all SMCs in the range reserved for FFA. Each call is
* either forwarded to the other security state or handled by the SPM dispatcher
******************************************************************************/
@@ -542,7 +616,8 @@
}
return spmd_smc_forward(smc_fid, secure_origin,
- x1, x2, x3, x4, handle);
+ x1, x2, x3, x4, cookie,
+ handle, flags);
break; /* not reached */
case FFA_VERSION:
@@ -553,9 +628,11 @@
* If caller is non secure and SPMC was initialized,
* return SPMC's version.
* Sanity check to "input_version".
+ * If the EL3 SPMC is enabled, ignore the SPMC state as
+ * this is not used.
*/
if ((input_version & FFA_VERSION_BIT31_MASK) ||
- (ctx->state == SPMC_STATE_RESET)) {
+ (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) {
ret = FFA_ERROR_NOT_SUPPORTED;
} else if (!secure_origin) {
gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
@@ -610,7 +687,8 @@
*/
return spmd_smc_forward(ret, true, FFA_PARAM_MBZ,
FFA_PARAM_MBZ, FFA_PARAM_MBZ,
- FFA_PARAM_MBZ, gpregs);
+ FFA_PARAM_MBZ, cookie, gpregs,
+ flags);
} else {
ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
FFA_VERSION_MINOR);
@@ -630,7 +708,8 @@
/* Forward SMC from Normal world to the SPM Core */
if (!secure_origin) {
return spmd_smc_forward(smc_fid, secure_origin,
- x1, x2, x3, x4, handle);
+ x1, x2, x3, x4, cookie,
+ handle, flags);
}
/*
@@ -726,7 +805,8 @@
} else {
/* Forward direct message to the other world */
return spmd_smc_forward(smc_fid, secure_origin,
- x1, x2, x3, x4, handle);
+ x1, x2, x3, x4, cookie,
+ handle, flags);
}
break; /* Not reached */
@@ -736,7 +816,8 @@
} else {
/* Forward direct message to the other world */
return spmd_smc_forward(smc_fid, secure_origin,
- x1, x2, x3, x4, handle);
+ x1, x2, x3, x4, cookie,
+ handle, flags);
}
break; /* Not reached */
@@ -755,6 +836,7 @@
case FFA_NOTIFICATION_INFO_GET:
case FFA_NOTIFICATION_INFO_GET_SMC64:
case FFA_MSG_SEND2:
+ case FFA_RX_ACQUIRE:
#endif
case FFA_MSG_RUN:
/*
@@ -791,7 +873,8 @@
*/
return spmd_smc_forward(smc_fid, secure_origin,
- x1, x2, x3, x4, handle);
+ x1, x2, x3, x4, cookie,
+ handle, flags);
break; /* not reached */
case FFA_MSG_WAIT:
@@ -814,7 +897,8 @@
}
return spmd_smc_forward(smc_fid, secure_origin,
- x1, x2, x3, x4, handle);
+ x1, x2, x3, x4, cookie,
+ handle, flags);
break; /* not reached */
case FFA_NORMAL_WORLD_RESUME:
diff --git a/services/std_svc/spmd/spmd_private.h b/services/std_svc/spmd/spmd_private.h
index 4cd6a74..4c298c9 100644
--- a/services/std_svc/spmd/spmd_private.h
+++ b/services/std_svc/spmd/spmd_private.h
@@ -58,12 +58,6 @@
*/
#define FFA_NS_ENDPOINT_ID U(0)
-/* Mask and shift to check valid secure FF-A Endpoint ID. */
-#define SPMC_SECURE_ID_MASK U(1)
-#define SPMC_SECURE_ID_SHIFT U(15)
-
-#define SPMD_DIRECT_MSG_ENDPOINT_ID U(FFA_ENDPOINT_ID_MAX - 1)
-
/* Define SPMD target function IDs for framework messages to the SPMC */
#define SPMD_FWK_MSG_BIT BIT(31)
#define SPMD_FWK_MSG_PSCI U(0)
diff --git a/services/std_svc/std_svc_setup.c b/services/std_svc/std_svc_setup.c
index bfe26ca..b1e3db9 100644
--- a/services/std_svc/std_svc_setup.c
+++ b/services/std_svc/std_svc_setup.c
@@ -17,6 +17,7 @@
#include <services/rmmd_svc.h>
#include <services/sdei.h>
#include <services/spm_mm_svc.h>
+#include <services/spmc_svc.h>
#include <services/spmd_svc.h>
#include <services/std_svc.h>
#include <services/trng_svc.h>
@@ -147,8 +148,8 @@
* dispatcher and return its return value
*/
if (is_ffa_fid(smc_fid)) {
- return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
- handle, flags);
+ return spmd_ffa_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
+ handle, flags);
}
#endif
diff --git a/tools/memory/print_memory_map.py b/tools/memory/print_memory_map.py
index 8a84018..ef53f7e 100755
--- a/tools/memory/print_memory_map.py
+++ b/tools/memory/print_memory_map.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+# Copyright (c) 2019-2022, Arm Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -17,12 +17,24 @@
blx_symbols = ['__BL1_RAM_START__', '__BL1_RAM_END__',
'__BL2_END__',
'__BL31_END__',
+ '__RO_START__', '__RO_END_UNALIGNED__', '__RO_END__',
'__TEXT_START__', '__TEXT_END__',
+ '__TEXT_RESIDENT_START__', '__TEXT_RESIDENT_END__',
'__RODATA_START__', '__RODATA_END__',
'__DATA_START__', '__DATA_END__',
'__STACKS_START__', '__STACKS_END__',
- '__BSS_END',
+ '__BSS_START__', '__BSS_END__',
'__COHERENT_RAM_START__', '__COHERENT_RAM_END__',
+ '__CPU_OPS_START__', '__CPU_OPS_END__',
+ '__FCONF_POPULATOR_START__', '__FCONF_POPULATOR_END__',
+ '__GOT_START__', '__GOT_END__',
+ '__PARSER_LIB_DESCS_START__', '__PARSER_LIB_DESCS_END__',
+ '__PMF_TIMESTAMP_START__', '__PMF_TIMESTAMP_END__',
+ '__PMF_SVC_DESCS_START__', '__PMF_SVC_DESCS_END__',
+ '__RELA_START__', '__RELA_END__',
+ '__RT_SVC_DESCS_START__', '__RT_SVC_DESCS_END__',
+ '__BASE_XLAT_TABLE_START__', '__BASE_XLAT_TABLE_END__',
+ '__XLAT_TABLE_START__', '__XLAT_TABLE_END__',
]
# Regex to extract address from map file
@@ -40,6 +52,10 @@
else:
build_dir = 'build/fvp/debug'
+max_len = max(len(word) for word in blx_symbols) + 2
+if (max_len % 2) != 0:
+ max_len += 1
+
# Extract all the required symbols from the map files
for image in bl_images:
file_path = os.path.join(build_dir, image, '{}.map'.format(image))
@@ -47,6 +63,7 @@
with open (file_path, 'rt') as mapfile:
for line in mapfile:
for symbol in blx_symbols:
+ skip_symbol = 0
# Regex to find symbol definition
line_pattern = re.compile(r"\b0x\w*\s*" + symbol + "\s= .")
match = line_pattern.search(line)
@@ -54,7 +71,13 @@
# Extract address from line
match = address_pattern.search(line)
if match:
- address_list.append([match.group(0), symbol, image])
+ if '_END__' in symbol:
+ sym_start = symbol.replace('_END__', '_START__')
+ if [match.group(0), sym_start, image] in address_list:
+ address_list.remove([match.group(0), sym_start, image])
+ skip_symbol = 1
+ if skip_symbol == 0:
+ address_list.append([match.group(0), symbol, image])
# Sort by address
address_list.sort(key=operator.itemgetter(0))
@@ -64,16 +87,16 @@
address_list = reversed(address_list)
# Generate memory view
-print('{:-^93}'.format('Memory Map from: ' + build_dir))
+print(('{:-^%d}' % (max_len * 3 + 20 + 7)).format('Memory Map from: ' + build_dir))
for address in address_list:
if "bl1" in address[2]:
- print(address[0], '+{:-^22}+ |{:^22}| |{:^22}|'.format(address[1], '', ''))
+ print(address[0], ('+{:-^%d}+ |{:^%d}| |{:^%d}|' % (max_len, max_len, max_len)).format(address[1], '', ''))
elif "bl2" in address[2]:
- print(address[0], '|{:^22}| +{:-^22}+ |{:^22}|'.format('', address[1], ''))
+ print(address[0], ('|{:^%d}| +{:-^%d}+ |{:^%d}|' % (max_len, max_len, max_len)).format('', address[1], ''))
elif "bl31" in address[2]:
- print(address[0], '|{:^22}| |{:^22}| +{:-^22}+'.format('', '', address[1]))
+ print(address[0], ('|{:^%d}| |{:^%d}| +{:-^%d}+' % (max_len, max_len, max_len)).format('', '', address[1]))
else:
- print(address[0], '|{:^22}| |{:^22}| +{:-^22}+'.format('', '', address[1]))
+ print(address[0], ('|{:^%d}| |{:^%d}| +{:-^%d}+' % (max_len, max_len, max_len)).format('', '', address[1]))
-print('{:^20}{:_^22} {:_^22} {:_^22}'.format('', '', '', ''))
-print('{:^20}{:^22} {:^22} {:^22}'.format('address', 'bl1', 'bl2', 'bl31'))
+print(('{:^20}{:_^%d} {:_^%d} {:_^%d}' % (max_len, max_len, max_len)).format('', '', '', ''))
+print(('{:^20}{:^%d} {:^%d} {:^%d}' % (max_len, max_len, max_len)).format('address', 'bl1', 'bl2', 'bl31'))