Merge changes I9c2bf78a,Iaff5f1fa,I44686a36 into integration
* changes:
fix(imx8m): map BL32 memory only if SPD_opteed or SPD_trusty is enabled
feat(imx8mn): add workaround for errata ERR050362
feat(imx8m): enable snvs privileged registers access
diff --git a/Makefile b/Makefile
index 6678179..464544f 100644
--- a/Makefile
+++ b/Makefile
@@ -1037,6 +1037,10 @@
# Determine if FEAT_SB is supported
ENABLE_FEAT_SB = $(if $(findstring sb,${arch-features}),1,0)
+ifeq ($(PSA_CRYPTO),1)
+ $(info PSA_CRYPTO is an experimental feature)
+endif
+
################################################################################
# Process platform overrideable behaviour
################################################################################
@@ -1218,6 +1222,7 @@
ERRATA_NON_ARM_INTERCONNECT \
CONDITIONAL_CMO \
RAS_FFH_SUPPORT \
+ PSA_CRYPTO \
)))
# Numeric_Flags
@@ -1408,6 +1413,7 @@
IMPDEF_SYSREG_TRAP \
SVE_VECTOR_LEN \
ENABLE_SPMD_LP \
+ PSA_CRYPTO \
)))
ifeq (${SANITIZE_UB},trap)
diff --git a/changelog.yaml b/changelog.yaml
index 3f7979e..2d6986b 100644
--- a/changelog.yaml
+++ b/changelog.yaml
@@ -809,6 +809,9 @@
- title: mbedTLS
scope: mbedtls
+ - title: mbedTLS-PSA
+ scope: mbedtls-psa
+
- title: Console
scope: console
diff --git a/docs/design/cpu-specific-build-macros.rst b/docs/design/cpu-specific-build-macros.rst
index bf04558..ad05a50 100644
--- a/docs/design/cpu-specific-build-macros.rst
+++ b/docs/design/cpu-specific-build-macros.rst
@@ -726,6 +726,10 @@
For Cortex-X3, the following errata build flags are defined :
+- ``ERRATA_X3_2070301``: This applies errata 2070301 workaround to the Cortex-X3
+ CPU. This needs to be enabled only for revisions r0p0, r1p0, r1p1 and r1p2 of
+ the CPU and is still open.
+
- ``ERRATA_X3_2313909``: This applies errata 2313909 workaround to
Cortex-X3 CPU. This needs to be enabled only for revisions r0p0 and r1p0
of the CPU, it is fixed in r1p1.
diff --git a/docs/getting_started/build-options.rst b/docs/getting_started/build-options.rst
index 7c84ef1..34d83f2 100644
--- a/docs/getting_started/build-options.rst
+++ b/docs/getting_started/build-options.rst
@@ -1185,6 +1185,12 @@
errata mitigation for platforms with a non-arm interconnect using the errata
ABI. By default its disabled (``0``).
+- ``PSA_CRYPTO``: Boolean option for enabling MbedTLS PSA crypto APIs support.
+ The platform will use PSA compliant Crypto APIs during authentication and
+ image measurement process by enabling this option. It uses APIs defined as
+ per the `PSA Crypto API specification`_. This feature is only supported if
+ using MbedTLS 3.x version. By default it is disabled (``0``).
+
GICv3 driver options
--------------------
@@ -1306,3 +1312,4 @@
.. _GCC: https://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html
.. _Clang: https://clang.llvm.org/docs/DiagnosticsReference.html
.. _Firmware Handoff specification: https://github.com/FirmwareHandoff/firmware_handoff/releases/tag/v0.9
+.. _PSA Crypto API specification: https://armmbed.github.io/mbed-crypto/html/
diff --git a/drivers/auth/mbedtls/mbedtls_common.mk b/drivers/auth/mbedtls/mbedtls_common.mk
index 79c4512..376b6b7 100644
--- a/drivers/auth/mbedtls/mbedtls_common.mk
+++ b/drivers/auth/mbedtls/mbedtls_common.mk
@@ -23,7 +23,11 @@
ifeq (${MBEDTLS_MAJOR}, 2)
MBEDTLS_CONFIG_FILE ?= "<drivers/auth/mbedtls/mbedtls_config-2.h>"
else ifeq (${MBEDTLS_MAJOR}, 3)
- MBEDTLS_CONFIG_FILE ?= "<drivers/auth/mbedtls/mbedtls_config-3.h>"
+ ifeq (${PSA_CRYPTO},1)
+ MBEDTLS_CONFIG_FILE ?= "<drivers/auth/mbedtls/psa_mbedtls_config.h>"
+ else
+ MBEDTLS_CONFIG_FILE ?= "<drivers/auth/mbedtls/mbedtls_config-3.h>"
+ endif
endif
$(eval $(call add_define,MBEDTLS_CONFIG_FILE))
@@ -77,6 +81,18 @@
LIBMBEDTLS_CFLAGS += -Wno-error=redundant-decls
endif
+ifeq (${PSA_CRYPTO},1)
+LIBMBEDTLS_SRCS += $(addprefix ${MBEDTLS_DIR}/library/, \
+ psa_crypto.c \
+ psa_crypto_client.c \
+ psa_crypto_driver_wrappers.c \
+ psa_crypto_hash.c \
+ psa_crypto_rsa.c \
+ psa_crypto_ecp.c \
+ psa_crypto_slot_management.c \
+ )
+endif
+
# The platform may define the variable 'TF_MBEDTLS_KEY_ALG' to select the key
# algorithm to use. If the variable is not defined, select it based on
# algorithm used for key generation `KEY_ALG`. If `KEY_ALG` is not defined,
diff --git a/drivers/auth/mbedtls/mbedtls_crypto.mk b/drivers/auth/mbedtls/mbedtls_crypto.mk
index 2a9fbbf..bd36730 100644
--- a/drivers/auth/mbedtls/mbedtls_crypto.mk
+++ b/drivers/auth/mbedtls/mbedtls_crypto.mk
@@ -1,11 +1,16 @@
#
-# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2015-2023, Arm Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
include drivers/auth/mbedtls/mbedtls_common.mk
-MBEDTLS_SOURCES += drivers/auth/mbedtls/mbedtls_crypto.c
-
-
+ifeq (${PSA_CRYPTO},1)
+ # Some of the PSA functions are declared in multiple header files
+ # that triggers this warning.
+ TF_CFLAGS += -Wno-error=redundant-decls
+ MBEDTLS_SOURCES += drivers/auth/mbedtls/mbedtls_psa_crypto.c
+else
+ MBEDTLS_SOURCES += drivers/auth/mbedtls/mbedtls_crypto.c
+endif
diff --git a/drivers/auth/mbedtls/mbedtls_psa_crypto.c b/drivers/auth/mbedtls/mbedtls_psa_crypto.c
new file mode 100644
index 0000000..2fa8e63
--- /dev/null
+++ b/drivers/auth/mbedtls/mbedtls_psa_crypto.c
@@ -0,0 +1,499 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <string.h>
+
+/* mbed TLS headers */
+#include <mbedtls/gcm.h>
+#include <mbedtls/md.h>
+#include <mbedtls/memory_buffer_alloc.h>
+#include <mbedtls/oid.h>
+#include <mbedtls/platform.h>
+#include <mbedtls/version.h>
+#include <mbedtls/x509.h>
+#include <psa/crypto.h>
+#include <psa/crypto_platform.h>
+#include <psa/crypto_types.h>
+#include <psa/crypto_values.h>
+
+#include <common/debug.h>
+#include <drivers/auth/crypto_mod.h>
+#include <drivers/auth/mbedtls/mbedtls_common.h>
+#include <plat/common/platform.h>
+
+#define LIB_NAME "mbed TLS PSA"
+
+#if CRYPTO_SUPPORT == CRYPTO_HASH_CALC_ONLY || \
+CRYPTO_SUPPORT == CRYPTO_AUTH_VERIFY_AND_HASH_CALC
+/*
+ * CRYPTO_MD_MAX_SIZE value is as per current stronger algorithm available
+ * so make sure that mbed TLS MD maximum size must be lesser than this.
+ */
+CASSERT(CRYPTO_MD_MAX_SIZE >= MBEDTLS_MD_MAX_SIZE,
+ assert_mbedtls_md_size_overflow);
+
+#endif /*
+ * CRYPTO_SUPPORT == CRYPTO_HASH_CALC_ONLY || \
+ * CRYPTO_SUPPORT == CRYPTO_AUTH_VERIFY_AND_HASH_CALC
+ */
+
+static inline psa_algorithm_t mbedtls_md_psa_alg_from_type(
+ mbedtls_md_type_t md_type)
+{
+ assert((md_type == MBEDTLS_MD_SHA256) ||
+ (md_type == MBEDTLS_MD_SHA384) ||
+ (md_type == MBEDTLS_MD_SHA512));
+
+ return PSA_ALG_CATEGORY_HASH | (psa_algorithm_t) (md_type + 0x5);
+}
+
+/*
+ * AlgorithmIdentifier ::= SEQUENCE {
+ * algorithm OBJECT IDENTIFIER,
+ * parameters ANY DEFINED BY algorithm OPTIONAL
+ * }
+ *
+ * SubjectPublicKeyInfo ::= SEQUENCE {
+ * algorithm AlgorithmIdentifier,
+ * subjectPublicKey BIT STRING
+ * }
+ *
+ * DigestInfo ::= SEQUENCE {
+ * digestAlgorithm AlgorithmIdentifier,
+ * digest OCTET STRING
+ * }
+ */
+
+/*
+ * We pretend using an external RNG (through MBEDTLS_PSA_CRYPTO_EXTERNAL_RNG
+ * mbedTLS config option) so we need to provide an implementation of
+ * mbedtls_psa_external_get_random(). Provide a fake one, since we do not
+ * actually have any external RNG and TF-A itself doesn't engage in
+ * cryptographic operations that demands randomness.
+ */
+psa_status_t mbedtls_psa_external_get_random(
+ mbedtls_psa_external_random_context_t *context,
+ uint8_t *output, size_t output_size,
+ size_t *output_length)
+{
+ return PSA_ERROR_INSUFFICIENT_ENTROPY;
+}
+
+/*
+ * Initialize the library and export the descriptor
+ */
+static void init(void)
+{
+ /* Initialize mbed TLS */
+ mbedtls_init();
+
+ /* Initialise PSA mbedTLS */
+ psa_status_t status = psa_crypto_init();
+
+ if (status != PSA_SUCCESS) {
+ ERROR("Failed to initialize %s crypto (%d).\n", LIB_NAME, status);
+ panic();
+ }
+
+ INFO("PSA crypto initialized successfully!\n");
+}
+
+#if CRYPTO_SUPPORT == CRYPTO_AUTH_VERIFY_ONLY || \
+CRYPTO_SUPPORT == CRYPTO_AUTH_VERIFY_AND_HASH_CALC
+
+static void construct_psa_key_alg_and_type(mbedtls_pk_type_t pk_alg,
+ mbedtls_md_type_t md_alg,
+ psa_algorithm_t *psa_alg,
+ psa_key_type_t *psa_key_type)
+{
+ psa_algorithm_t psa_md_alg = mbedtls_md_psa_alg_from_type(md_alg);
+
+ switch (pk_alg) {
+ case MBEDTLS_PK_RSASSA_PSS:
+ *psa_alg = PSA_ALG_RSA_PSS(psa_md_alg);
+ *psa_key_type = PSA_KEY_TYPE_RSA_PUBLIC_KEY;
+ break;
+ default:
+ *psa_alg = PSA_ALG_NONE;
+ *psa_key_type = PSA_KEY_TYPE_NONE;
+ break;
+ }
+}
+
+/*
+ * Verify a signature.
+ *
+ * Parameters are passed using the DER encoding format following the ASN.1
+ * structures detailed above.
+ */
+static int verify_signature(void *data_ptr, unsigned int data_len,
+ void *sig_ptr, unsigned int sig_len,
+ void *sig_alg, unsigned int sig_alg_len,
+ void *pk_ptr, unsigned int pk_len)
+{
+ mbedtls_asn1_buf sig_oid, sig_params;
+ mbedtls_asn1_buf signature;
+ mbedtls_md_type_t md_alg;
+ mbedtls_pk_type_t pk_alg;
+ int rc;
+ void *sig_opts = NULL;
+ unsigned char *p, *end;
+
+ /* construct PSA key algo and type */
+ psa_status_t status = PSA_SUCCESS;
+ psa_key_attributes_t psa_key_attr = PSA_KEY_ATTRIBUTES_INIT;
+ psa_key_id_t psa_key_id = PSA_KEY_ID_NULL;
+ psa_key_type_t psa_key_type;
+ psa_algorithm_t psa_alg;
+
+ /* Get pointers to signature OID and parameters */
+ p = (unsigned char *)sig_alg;
+ end = (unsigned char *)(p + sig_alg_len);
+ rc = mbedtls_asn1_get_alg(&p, end, &sig_oid, &sig_params);
+ if (rc != 0) {
+ return CRYPTO_ERR_SIGNATURE;
+ }
+
+ /* Get the actual signature algorithm (MD + PK) */
+ rc = mbedtls_x509_get_sig_alg(&sig_oid, &sig_params, &md_alg, &pk_alg, &sig_opts);
+ if (rc != 0) {
+ return CRYPTO_ERR_SIGNATURE;
+ }
+
+ /* Get the signature (bitstring) */
+ p = (unsigned char *)sig_ptr;
+ end = (unsigned char *)(p + sig_len);
+ signature.tag = *p;
+ rc = mbedtls_asn1_get_bitstring_null(&p, end, &signature.len);
+ if ((rc != 0) || ((size_t)(end - p) != signature.len)) {
+ rc = CRYPTO_ERR_SIGNATURE;
+ goto end2;
+ }
+ signature.p = p;
+
+ /* Convert this pk_alg and md_alg to PSA key type and key algorithm */
+ construct_psa_key_alg_and_type(pk_alg, md_alg,
+ &psa_alg, &psa_key_type);
+
+
+ if ((psa_alg == PSA_ALG_NONE) || (psa_key_type == PSA_KEY_TYPE_NONE)) {
+ rc = CRYPTO_ERR_SIGNATURE;
+ goto end2;
+ }
+
+ /* filled-in key_attributes */
+ psa_set_key_algorithm(&psa_key_attr, psa_alg);
+ psa_set_key_type(&psa_key_attr, psa_key_type);
+ psa_set_key_usage_flags(&psa_key_attr, PSA_KEY_USAGE_VERIFY_MESSAGE);
+
+ /* Get the key_id using import API */
+ status = psa_import_key(&psa_key_attr,
+ pk_ptr,
+ (size_t)pk_len,
+ &psa_key_id);
+
+ if (status != PSA_SUCCESS) {
+ rc = CRYPTO_ERR_SIGNATURE;
+ goto end2;
+ }
+
+ /*
+ * Hash calculation and Signature verification of the given data payload
+ * is wrapped under the psa_verify_message function.
+ */
+ status = psa_verify_message(psa_key_id, psa_alg,
+ data_ptr, data_len,
+ signature.p, signature.len);
+
+ if (status != PSA_SUCCESS) {
+ rc = CRYPTO_ERR_SIGNATURE;
+ goto end1;
+ }
+
+ /* Signature verification success */
+ rc = CRYPTO_SUCCESS;
+
+end1:
+ /*
+ * Destroy the key if it is created successfully
+ */
+ psa_destroy_key(psa_key_id);
+end2:
+ mbedtls_free(sig_opts);
+ return rc;
+}
+
+/*
+ * Match a hash
+ *
+ * Digest info is passed in DER format following the ASN.1 structure detailed
+ * above.
+ */
+static int verify_hash(void *data_ptr, unsigned int data_len,
+ void *digest_info_ptr, unsigned int digest_info_len)
+{
+ mbedtls_asn1_buf hash_oid, params;
+ mbedtls_md_type_t md_alg;
+ unsigned char *p, *end, *hash;
+ size_t len;
+ int rc;
+ psa_status_t status;
+ psa_algorithm_t psa_md_alg;
+
+ /*
+ * Digest info should be an MBEDTLS_ASN1_SEQUENCE, but padding after
+ * it is allowed. This is necessary to support multiple hash
+ * algorithms.
+ */
+ p = (unsigned char *)digest_info_ptr;
+ end = p + digest_info_len;
+ rc = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED |
+ MBEDTLS_ASN1_SEQUENCE);
+ if (rc != 0) {
+ return CRYPTO_ERR_HASH;
+ }
+
+ end = p + len;
+
+ /* Get the hash algorithm */
+ rc = mbedtls_asn1_get_alg(&p, end, &hash_oid, ¶ms);
+ if (rc != 0) {
+ return CRYPTO_ERR_HASH;
+ }
+
+ /* Hash should be octet string type and consume all bytes */
+ rc = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_OCTET_STRING);
+ if ((rc != 0) || ((size_t)(end - p) != len)) {
+ return CRYPTO_ERR_HASH;
+ }
+ hash = p;
+
+ rc = mbedtls_oid_get_md_alg(&hash_oid, &md_alg);
+ if (rc != 0) {
+ return CRYPTO_ERR_HASH;
+ }
+
+ /* convert the md_alg to psa_algo */
+ psa_md_alg = mbedtls_md_psa_alg_from_type(md_alg);
+
+ /* Length of hash must match the algorithm's size */
+ if (len != PSA_HASH_LENGTH(psa_md_alg)) {
+ return CRYPTO_ERR_HASH;
+ }
+
+ /*
+ * Calculate Hash and compare it against the retrieved hash from
+ * the certificate (one shot API).
+ */
+ status = psa_hash_compare(psa_md_alg,
+ data_ptr, (size_t)data_len,
+ (const uint8_t *)hash, len);
+
+ if (status != PSA_SUCCESS) {
+ return CRYPTO_ERR_HASH;
+ }
+
+ return CRYPTO_SUCCESS;
+}
+#endif /*
+ * CRYPTO_SUPPORT == CRYPTO_AUTH_VERIFY_ONLY || \
+ * CRYPTO_SUPPORT == CRYPTO_AUTH_VERIFY_AND_HASH_CALC
+ */
+
+#if CRYPTO_SUPPORT == CRYPTO_HASH_CALC_ONLY || \
+CRYPTO_SUPPORT == CRYPTO_AUTH_VERIFY_AND_HASH_CALC
+/*
+ * Map a generic crypto message digest algorithm to the corresponding macro used
+ * by Mbed TLS.
+ */
+static inline mbedtls_md_type_t md_type(enum crypto_md_algo algo)
+{
+ switch (algo) {
+ case CRYPTO_MD_SHA512:
+ return MBEDTLS_MD_SHA512;
+ case CRYPTO_MD_SHA384:
+ return MBEDTLS_MD_SHA384;
+ case CRYPTO_MD_SHA256:
+ return MBEDTLS_MD_SHA256;
+ default:
+ /* Invalid hash algorithm. */
+ return MBEDTLS_MD_NONE;
+ }
+}
+
+/*
+ * Calculate a hash
+ *
+ * output points to the computed hash
+ */
+static int calc_hash(enum crypto_md_algo md_algo, void *data_ptr,
+ unsigned int data_len,
+ unsigned char output[CRYPTO_MD_MAX_SIZE])
+{
+ size_t hash_length;
+ psa_status_t status;
+ psa_algorithm_t psa_md_alg;
+
+ /* convert the md_alg to psa_algo */
+ psa_md_alg = mbedtls_md_psa_alg_from_type(md_type(md_algo));
+
+ /*
+ * Calculate the hash of the data, it is safe to pass the
+ * 'output' hash buffer pointer considering its size is always
+ * bigger than or equal to MBEDTLS_MD_MAX_SIZE.
+ */
+ status = psa_hash_compute(psa_md_alg, data_ptr, (size_t)data_len,
+ (uint8_t *)output, CRYPTO_MD_MAX_SIZE,
+ &hash_length);
+ if (status != PSA_SUCCESS) {
+ return CRYPTO_ERR_HASH;
+ }
+
+ return CRYPTO_SUCCESS;
+}
+#endif /*
+ * CRYPTO_SUPPORT == CRYPTO_HASH_CALC_ONLY || \
+ * CRYPTO_SUPPORT == CRYPTO_AUTH_VERIFY_AND_HASH_CALC
+ */
+
+#if TF_MBEDTLS_USE_AES_GCM
+/*
+ * Stack based buffer allocation for decryption operation. It could
+ * be configured to balance stack usage vs execution speed.
+ */
+#define DEC_OP_BUF_SIZE 128
+
+static int aes_gcm_decrypt(void *data_ptr, size_t len, const void *key,
+ unsigned int key_len, const void *iv,
+ unsigned int iv_len, const void *tag,
+ unsigned int tag_len)
+{
+ mbedtls_gcm_context ctx;
+ mbedtls_cipher_id_t cipher = MBEDTLS_CIPHER_ID_AES;
+ unsigned char buf[DEC_OP_BUF_SIZE];
+ unsigned char tag_buf[CRYPTO_MAX_TAG_SIZE];
+ unsigned char *pt = data_ptr;
+ size_t dec_len;
+ int diff, i, rc;
+ size_t output_length __unused;
+
+ mbedtls_gcm_init(&ctx);
+
+ rc = mbedtls_gcm_setkey(&ctx, cipher, key, key_len * 8);
+ if (rc != 0) {
+ rc = CRYPTO_ERR_DECRYPTION;
+ goto exit_gcm;
+ }
+
+#if (MBEDTLS_VERSION_MAJOR < 3)
+ rc = mbedtls_gcm_starts(&ctx, MBEDTLS_GCM_DECRYPT, iv, iv_len, NULL, 0);
+#else
+ rc = mbedtls_gcm_starts(&ctx, MBEDTLS_GCM_DECRYPT, iv, iv_len);
+#endif
+ if (rc != 0) {
+ rc = CRYPTO_ERR_DECRYPTION;
+ goto exit_gcm;
+ }
+
+ while (len > 0) {
+ dec_len = MIN(sizeof(buf), len);
+
+#if (MBEDTLS_VERSION_MAJOR < 3)
+ rc = mbedtls_gcm_update(&ctx, dec_len, pt, buf);
+#else
+ rc = mbedtls_gcm_update(&ctx, pt, dec_len, buf, sizeof(buf), &output_length);
+#endif
+
+ if (rc != 0) {
+ rc = CRYPTO_ERR_DECRYPTION;
+ goto exit_gcm;
+ }
+
+ memcpy(pt, buf, dec_len);
+ pt += dec_len;
+ len -= dec_len;
+ }
+
+#if (MBEDTLS_VERSION_MAJOR < 3)
+ rc = mbedtls_gcm_finish(&ctx, tag_buf, sizeof(tag_buf));
+#else
+ rc = mbedtls_gcm_finish(&ctx, NULL, 0, &output_length, tag_buf, sizeof(tag_buf));
+#endif
+
+ if (rc != 0) {
+ rc = CRYPTO_ERR_DECRYPTION;
+ goto exit_gcm;
+ }
+
+ /* Check tag in "constant-time" */
+ for (diff = 0, i = 0; i < tag_len; i++)
+ diff |= ((const unsigned char *)tag)[i] ^ tag_buf[i];
+
+ if (diff != 0) {
+ rc = CRYPTO_ERR_DECRYPTION;
+ goto exit_gcm;
+ }
+
+ /* GCM decryption success */
+ rc = CRYPTO_SUCCESS;
+
+exit_gcm:
+ mbedtls_gcm_free(&ctx);
+ return rc;
+}
+
+/*
+ * Authenticated decryption of an image
+ */
+static int auth_decrypt(enum crypto_dec_algo dec_algo, void *data_ptr,
+ size_t len, const void *key, unsigned int key_len,
+ unsigned int key_flags, const void *iv,
+ unsigned int iv_len, const void *tag,
+ unsigned int tag_len)
+{
+ int rc;
+
+ assert((key_flags & ENC_KEY_IS_IDENTIFIER) == 0);
+
+ switch (dec_algo) {
+ case CRYPTO_GCM_DECRYPT:
+ rc = aes_gcm_decrypt(data_ptr, len, key, key_len, iv, iv_len,
+ tag, tag_len);
+ if (rc != 0)
+ return rc;
+ break;
+ default:
+ return CRYPTO_ERR_DECRYPTION;
+ }
+
+ return CRYPTO_SUCCESS;
+}
+#endif /* TF_MBEDTLS_USE_AES_GCM */
+
+/*
+ * Register crypto library descriptor
+ */
+#if CRYPTO_SUPPORT == CRYPTO_AUTH_VERIFY_AND_HASH_CALC
+#if TF_MBEDTLS_USE_AES_GCM
+REGISTER_CRYPTO_LIB(LIB_NAME, init, verify_signature, verify_hash, calc_hash,
+ auth_decrypt, NULL);
+#else
+REGISTER_CRYPTO_LIB(LIB_NAME, init, verify_signature, verify_hash, calc_hash,
+ NULL, NULL);
+#endif
+#elif CRYPTO_SUPPORT == CRYPTO_AUTH_VERIFY_ONLY
+#if TF_MBEDTLS_USE_AES_GCM
+REGISTER_CRYPTO_LIB(LIB_NAME, init, verify_signature, verify_hash, NULL,
+ auth_decrypt, NULL);
+#else
+REGISTER_CRYPTO_LIB(LIB_NAME, init, verify_signature, verify_hash, NULL,
+ NULL, NULL);
+#endif
+#elif CRYPTO_SUPPORT == CRYPTO_HASH_CALC_ONLY
+REGISTER_CRYPTO_LIB(LIB_NAME, init, NULL, NULL, calc_hash, NULL, NULL);
+#endif /* CRYPTO_SUPPORT == CRYPTO_AUTH_VERIFY_AND_HASH_CALC */
diff --git a/include/arch/aarch64/el3_common_macros.S b/include/arch/aarch64/el3_common_macros.S
index fa9310e..536d807 100644
--- a/include/arch/aarch64/el3_common_macros.S
+++ b/include/arch/aarch64/el3_common_macros.S
@@ -59,43 +59,14 @@
* zero here but are updated ahead of transitioning to a lower EL in the
* function cm_init_context_common().
*
- * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at
- * EL2, EL1 and EL0 are not trapped to EL3.
- *
- * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at
- * EL2, EL1 and EL0 are not trapped to EL3.
- *
* SCR_EL3.SIF: Set to one to disable instruction fetches from
* Non-secure memory.
*
- * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from
- * both Security states and both Execution states.
- *
* SCR_EL3.EA: Set to one to route External Aborts and SError Interrupts
* to EL3 when executing at any EL.
- *
- * SCR_EL3.{API,APK}: For Armv8.3 pointer authentication feature,
- * disable traps to EL3 when accessing key registers or using pointer
- * authentication instructions from lower ELs.
* ---------------------------------------------------------------------
*/
- mov_imm x0, ((SCR_RESET_VAL | SCR_EA_BIT | SCR_SIF_BIT) \
- & ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT))
-#if CTX_INCLUDE_PAUTH_REGS
- /*
- * If the pointer authentication registers are saved during world
- * switches, enable pointer authentication everywhere, as it is safe to
- * do so.
- */
- orr x0, x0, #(SCR_API_BIT | SCR_APK_BIT)
-#endif
-#if ENABLE_RME
- /*
- * TODO: Settting the EEL2 bit to allow EL3 access to secure only registers
- * in context management. This will need to be refactored.
- */
- orr x0, x0, #SCR_EEL2_BIT
-#endif
+ mov_imm x0, (SCR_RESET_VAL | SCR_EA_BIT | SCR_SIF_BIT)
msr scr_el3, x0
/* ---------------------------------------------------------------------
@@ -132,25 +103,9 @@
/* ---------------------------------------------------------------------
* Initialise CPTR_EL3, setting all fields rather than relying on hw.
* All fields are architecturally UNKNOWN on reset.
- *
- * CPTR_EL3.TCPAC: Set to zero so that any accesses to CPACR_EL1,
- * CPTR_EL2, CPACR, or HCPTR do not trap to EL3.
- *
- * CPTR_EL3.TFP: Set to zero so that accesses to the V- or Z- registers
- * by Advanced SIMD, floating-point or SVE instructions (if implemented)
- * do not trap to EL3.
- *
- * CPTR_EL3.TAM: Set to one so that Activity Monitor access is
- * trapped to EL3 by default.
- *
- * CPTR_EL3.EZ: Set to zero so that all SVE functionality is trapped
- * to EL3 by default.
- *
- * CPTR_EL3.ESM: Set to zero so that all SME functionality is trapped
- * to EL3 by default.
+ * ---------------------------------------------------------------------
*/
-
- mov_imm x0, (CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TFP_BIT))
+ mov_imm x0, CPTR_EL3_RESET_VAL
msr cptr_el3, x0
/*
diff --git a/include/drivers/auth/mbedtls/psa_mbedtls_config.h b/include/drivers/auth/mbedtls/psa_mbedtls_config.h
new file mode 100644
index 0000000..ad825f0
--- /dev/null
+++ b/include/drivers/auth/mbedtls/psa_mbedtls_config.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2023, Arm Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PSA_MBEDTLS_CONFIG_H
+#define PSA_MBEDTLS_CONFIG_H
+
+#include "mbedtls_config-3.h"
+
+#define MBEDTLS_PSA_CRYPTO_C
+
+/*
+ * Using PSA crypto API requires an RNG right now. If we don't define the macro
+ * below then we get build errors.
+ *
+ * This is a functionality gap in mbedTLS. The technical limitation is that
+ * psa_crypto_init() is all-or-nothing, and fixing that would require separate
+ * initialization of the keystore, the RNG, etc.
+ *
+ * By defining MBEDTLS_PSA_CRYPTO_EXTERNAL_RNG, we pretend using an external
+ * RNG. As a result, the PSA crypto init code does nothing when it comes to
+ * initializing the RNG, as we are supposed to take care of that ourselves.
+ */
+#define MBEDTLS_PSA_CRYPTO_EXTERNAL_RNG
+
+#endif /* PSA_MBEDTLS_CONFIG_H */
diff --git a/include/lib/cpus/aarch64/cortex_x3.h b/include/lib/cpus/aarch64/cortex_x3.h
index e648734..04548ea 100644
--- a/include/lib/cpus/aarch64/cortex_x3.h
+++ b/include/lib/cpus/aarch64/cortex_x3.h
@@ -38,4 +38,13 @@
#define CORTEX_X3_CPUACTLR5_EL1_BIT_55 (ULL(1) << 55)
#define CORTEX_X3_CPUACTLR5_EL1_BIT_56 (ULL(1) << 56)
+/*******************************************************************************
+ * CPU Extended Control register 2 specific definitions.
+ ******************************************************************************/
+#define CORTEX_X3_CPUECTLR2_EL1 S3_0_C15_C1_5
+
+#define CORTEX_X3_CPUECTLR2_EL1_PF_MODE_LSB U(11)
+#define CORTEX_X3_CPUECTLR2_EL1_PF_MODE_WIDTH U(4)
+#define CORTEX_X3_CPUECTLR2_EL1_PF_MODE_CNSRV ULL(0x9)
+
#endif /* CORTEX_X3_H */
diff --git a/lib/cpus/aarch64/cortex_x3.S b/lib/cpus/aarch64/cortex_x3.S
index 98d148e..0cb3b97 100644
--- a/lib/cpus/aarch64/cortex_x3.S
+++ b/lib/cpus/aarch64/cortex_x3.S
@@ -26,6 +26,13 @@
wa_cve_2022_23960_bhb_vector_table CORTEX_X3_BHB_LOOP_COUNT, cortex_x3
#endif /* WORKAROUND_CVE_2022_23960 */
+workaround_reset_start cortex_x3, ERRATUM(2070301), ERRATA_X3_2070301
+ sysreg_bitfield_insert CORTEX_X3_CPUECTLR2_EL1, CORTEX_X3_CPUECTLR2_EL1_PF_MODE_CNSRV, \
+ CORTEX_X3_CPUECTLR2_EL1_PF_MODE_LSB, CORTEX_X3_CPUECTLR2_EL1_PF_MODE_WIDTH
+workaround_reset_end cortex_x3, ERRATUM(2070301)
+
+check_erratum_ls cortex_x3, ERRATUM(2070301), CPU_REV(1, 2)
+
workaround_runtime_start cortex_x3, ERRATUM(2313909), ERRATA_X3_2313909
sysreg_bit_set CORTEX_X3_CPUACTLR2_EL1, CORTEX_X3_CPUACTLR2_EL1_BIT_36
workaround_runtime_end cortex_x3, ERRATUM(2313909), NO_ISB
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 77cc41e..e12795f 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -734,6 +734,11 @@
# still open.
CPU_FLAG_LIST += ERRATA_X2_2768515
+# Flag to apply erratum 2070301 workaround on reset. This erratum applies
+# to revisions r0p0, r1p0, r1p1 and r1p2 of the Cortex-X3 cpu and is
+# still open.
+CPU_FLAG_LIST += ERRATA_X3_2070301
+
# Flag to apply erratum 2313909 workaround on powerdown. This erratum applies
# to revisions r0p0 and r1p0 of the Cortex-X3 cpu, it is fixed in r1p1.
CPU_FLAG_LIST += ERRATA_X3_2313909
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index f47e779..290a93a 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -557,7 +557,6 @@
msr spsel, #MODE_SP_ELX
str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
-#if IMAGE_BL31
/* ----------------------------------------------------------
* Restore CPTR_EL3.
* ZCR is only restored if SVE is supported and enabled.
@@ -567,6 +566,7 @@
ldp x19, x20, [sp, #CTX_EL3STATE_OFFSET + CTX_CPTR_EL3]
msr cptr_el3, x19
+#if IMAGE_BL31
ands x19, x19, #CPTR_EZ_BIT
beq sve_not_enabled
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index b16c113..3a7e50f 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -135,16 +135,6 @@
}
#endif /* CTX_INCLUDE_MTE_REGS */
- /* Enable S-EL2 if the next EL is EL2 and S-EL2 is present */
- if ((GET_EL(ep->spsr) == MODE_EL2) && is_feat_sel2_supported()) {
- if (GET_RW(ep->spsr) != MODE_RW_64) {
- ERROR("S-EL2 can not be used in AArch32\n.");
- panic();
- }
-
- scr_el3 |= SCR_EEL2_BIT;
- }
-
write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
/*
@@ -197,21 +187,33 @@
/* SCR_NS: Set the NS bit */
scr_el3 |= SCR_NS_BIT;
+ /* Allow access to Allocation Tags when MTE is implemented. */
+ scr_el3 |= SCR_ATA_BIT;
+
#if !CTX_INCLUDE_PAUTH_REGS
/*
- * If the pointer authentication registers aren't saved during world
- * switches the value of the registers can be leaked from the Secure to
- * the Non-secure world. To prevent this, rather than enabling pointer
- * authentication everywhere, we only enable it in the Non-secure world.
+ * Pointer Authentication feature, if present, is always enabled by default
+ * for Non secure lower exception levels. We do not have an explicit
+ * flag to set it.
+ * CTX_INCLUDE_PAUTH_REGS flag, is explicitly used to enable for lower
+ * exception levels of secure and realm worlds.
*
- * If the Secure world wants to use pointer authentication,
- * CTX_INCLUDE_PAUTH_REGS must be set to 1.
+ * To prevent the leakage between the worlds during world switch,
+ * we enable it only for the non-secure world.
+ *
+ * If the Secure/realm world wants to use pointer authentication,
+ * CTX_INCLUDE_PAUTH_REGS must be explicitly set to 1, in which case
+ * it will be enabled globally for all the contexts.
+ *
+ * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs
+ * other than EL3
+ *
+ * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other
+ * than EL3
*/
scr_el3 |= SCR_API_BIT | SCR_APK_BIT;
-#endif /* !CTX_INCLUDE_PAUTH_REGS */
- /* Allow access to Allocation Tags when MTE is implemented. */
- scr_el3 |= SCR_ATA_BIT;
+#endif /* CTX_INCLUDE_PAUTH_REGS */
#if HANDLE_EA_EL3_FIRST_NS
/* SCR_EL3.EA: Route External Abort and SError Interrupt to EL3. */
@@ -258,15 +260,6 @@
write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_SCTLR_EL2,
sctlr_el2);
- /*
- * Program the ICC_SRE_EL2 to make sure the correct bits are set
- * when restoring NS context.
- */
- u_register_t icc_sre_el2 = ICC_SRE_DIB_BIT | ICC_SRE_DFB_BIT |
- ICC_SRE_EN_BIT | ICC_SRE_SRE_BIT;
- write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_ICC_SRE_EL2,
- icc_sre_el2);
-
if (is_feat_hcx_supported()) {
/*
* Initialize register HCRX_EL2 with its init value.
@@ -308,25 +301,53 @@
******************************************************************************/
static void setup_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
{
+ u_register_t cptr_el3;
u_register_t scr_el3;
el3_state_t *state;
gp_regs_t *gp_regs;
+ state = get_el3state_ctx(ctx);
+
/* Clear any residual register values from the context */
zeromem(ctx, sizeof(*ctx));
/*
+ * The lower-EL context is zeroed so that no stale values leak to a world.
+ * It is assumed that an all-zero lower-EL context is good enough for it
+ * to boot correctly. However, there are very few registers where this
+ * is not true and some values need to be recreated.
+ */
+#if CTX_INCLUDE_EL2_REGS
+ el2_sysregs_t *el2_ctx = get_el2_sysregs_ctx(ctx);
+
+ /*
+ * These bits are set in the gicv3 driver. Losing them (especially the
+ * SRE bit) is problematic for all worlds. Henceforth recreate them.
+ */
+ u_register_t icc_sre_el2 = ICC_SRE_DIB_BIT | ICC_SRE_DFB_BIT |
+ ICC_SRE_EN_BIT | ICC_SRE_SRE_BIT;
+ write_ctx_reg(el2_ctx, CTX_ICC_SRE_EL2, icc_sre_el2);
+#endif /* CTX_INCLUDE_EL2_REGS */
+
+ /* Start with a clean SCR_EL3 copy as all relevant values are set */
+ scr_el3 = SCR_RESET_VAL;
+
+ /*
- * SCR_EL3 was initialised during reset sequence in macro
- * el3_arch_init_common. This code modifies the SCR_EL3 fields that
- * affect the next EL.
+ * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at
+ * EL2, EL1 and EL0 are not trapped to EL3.
*
- * The following fields are initially set to zero and then updated to
- * the required value depending on the state of the SPSR_EL3 and the
- * Security state and entrypoint attributes of the next EL.
+ * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at
+ * EL2, EL1 and EL0 are not trapped to EL3.
+ *
+ * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from
+ * both Security states and both Execution states.
+ *
+ * SCR_EL3.SIF: Set to one to disable secure instruction execution from
+ * Non-secure memory.
*/
- scr_el3 = read_scr();
- scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_EA_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT |
- SCR_ST_BIT | SCR_HCE_BIT | SCR_NSE_BIT);
+ scr_el3 &= ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT);
+
+ scr_el3 |= SCR_SIF_BIT;
/*
* SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next
@@ -368,6 +389,19 @@
scr_el3 |= SCR_FIEN_BIT;
#endif
+#if CTX_INCLUDE_PAUTH_REGS
+ /*
+ * Enable Pointer Authentication globally for all the worlds.
+ *
+ * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs
+ * other than EL3
+ *
+ * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other
+ * than EL3
+ */
+ scr_el3 |= SCR_API_BIT | SCR_APK_BIT;
+#endif /* CTX_INCLUDE_PAUTH_REGS */
+
/*
* SCR_EL3.TCR2EN: Enable access to TCR2_ELx for AArch64 if present.
*/
@@ -391,10 +425,19 @@
}
/*
- * CPTR_EL3 was initialized out of reset, copy that value to the
- * context register.
+ * Initialise CPTR_EL3, setting all fields rather than relying on hw.
+ * All fields are architecturally UNKNOWN on reset.
+ *
+ * CPTR_EL3.TFP: Set to zero so that accesses to the V- or Z- registers
+ * by Advanced SIMD, floating-point or SVE instructions (if
+ * implemented) do not trap to EL3.
+ *
+ * CPTR_EL3.TCPAC: Set to zero so that accesses to CPACR_EL1,
+ * CPTR_EL2,CPACR, or HCPTR do not trap to EL3.
*/
- write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, read_cptr_el3());
+ cptr_el3 = CPTR_EL3_RESET_VAL & ~(TFP_BIT | TCPAC_BIT);
+
+ write_ctx_reg(state, CTX_CPTR_EL3, cptr_el3);
/*
* SCR_EL3.HCE: Enable HVC instructions if next execution state is
@@ -431,12 +474,18 @@
/* Enable WFE delay */
scr_el3 |= SCR_TWEDEn_BIT;
}
+
+#if IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2
+ /* Enable S-EL2 if FEAT_SEL2 is implemented for all the contexts. */
+ if (is_feat_sel2_supported()) {
+ scr_el3 |= SCR_EEL2_BIT;
+ }
+#endif /* (IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2) */
/*
* Populate EL3 state so that we've the right context
* before doing ERET
*/
- state = get_el3state_ctx(ctx);
write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
write_ctx_reg(state, CTX_ELR_EL3, ep->pc);
write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr);
@@ -1032,7 +1081,20 @@
write_ctx_reg(ctx, CTX_HCR_EL2, read_hcr_el2());
write_ctx_reg(ctx, CTX_HPFAR_EL2, read_hpfar_el2());
write_ctx_reg(ctx, CTX_HSTR_EL2, read_hstr_el2());
+
+ /*
+ * Set the NS bit to be able to access the ICC_SRE_EL2 register
+ * TODO: remove with root context
+ */
+ u_register_t scr_el3 = read_scr_el3();
+
+ write_scr_el3(scr_el3 | SCR_NS_BIT);
+ isb();
write_ctx_reg(ctx, CTX_ICC_SRE_EL2, read_icc_sre_el2());
+
+ write_scr_el3(scr_el3);
+ isb();
+
write_ctx_reg(ctx, CTX_ICH_HCR_EL2, read_ich_hcr_el2());
write_ctx_reg(ctx, CTX_ICH_VMCR_EL2, read_ich_vmcr_el2());
write_ctx_reg(ctx, CTX_MAIR_EL2, read_mair_el2());
@@ -1069,7 +1131,20 @@
write_hcr_el2(read_ctx_reg(ctx, CTX_HCR_EL2));
write_hpfar_el2(read_ctx_reg(ctx, CTX_HPFAR_EL2));
write_hstr_el2(read_ctx_reg(ctx, CTX_HSTR_EL2));
+
+ /*
+ * Set the NS bit to be able to access the ICC_SRE_EL2 register
+ * TODO: remove with root context
+ */
+ u_register_t scr_el3 = read_scr_el3();
+
+ write_scr_el3(scr_el3 | SCR_NS_BIT);
+ isb();
write_icc_sre_el2(read_ctx_reg(ctx, CTX_ICC_SRE_EL2));
+
+ write_scr_el3(scr_el3);
+ isb();
+
write_ich_hcr_el2(read_ctx_reg(ctx, CTX_ICH_HCR_EL2));
write_ich_vmcr_el2(read_ctx_reg(ctx, CTX_ICH_VMCR_EL2));
write_mair_el2(read_ctx_reg(ctx, CTX_MAIR_EL2));
@@ -1092,87 +1167,71 @@
******************************************************************************/
void cm_el2_sysregs_context_save(uint32_t security_state)
{
- u_register_t scr_el3 = read_scr();
-
- /*
- * Always save the non-secure and realm EL2 context, only save the
- * S-EL2 context if S-EL2 is enabled.
- */
- if ((security_state != SECURE) ||
- ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) {
- cpu_context_t *ctx;
- el2_sysregs_t *el2_sysregs_ctx;
+ cpu_context_t *ctx;
+ el2_sysregs_t *el2_sysregs_ctx;
- ctx = cm_get_context(security_state);
- assert(ctx != NULL);
+ ctx = cm_get_context(security_state);
+ assert(ctx != NULL);
- el2_sysregs_ctx = get_el2_sysregs_ctx(ctx);
+ el2_sysregs_ctx = get_el2_sysregs_ctx(ctx);
- el2_sysregs_context_save_common(el2_sysregs_ctx);
+ el2_sysregs_context_save_common(el2_sysregs_ctx);
#if CTX_INCLUDE_MTE_REGS
- write_ctx_reg(el2_sysregs_ctx, CTX_TFSR_EL2, read_tfsr_el2());
+ write_ctx_reg(el2_sysregs_ctx, CTX_TFSR_EL2, read_tfsr_el2());
#endif
- if (is_feat_mpam_supported()) {
- el2_sysregs_context_save_mpam(el2_sysregs_ctx);
- }
+ if (is_feat_mpam_supported()) {
+ el2_sysregs_context_save_mpam(el2_sysregs_ctx);
+ }
- if (is_feat_fgt_supported()) {
- el2_sysregs_context_save_fgt(el2_sysregs_ctx);
- }
+ if (is_feat_fgt_supported()) {
+ el2_sysregs_context_save_fgt(el2_sysregs_ctx);
+ }
- if (is_feat_ecv_v2_supported()) {
- write_ctx_reg(el2_sysregs_ctx, CTX_CNTPOFF_EL2,
- read_cntpoff_el2());
- }
+ if (is_feat_ecv_v2_supported()) {
+ write_ctx_reg(el2_sysregs_ctx, CTX_CNTPOFF_EL2, read_cntpoff_el2());
+ }
- if (is_feat_vhe_supported()) {
- write_ctx_reg(el2_sysregs_ctx, CTX_CONTEXTIDR_EL2,
- read_contextidr_el2());
- write_ctx_reg(el2_sysregs_ctx, CTX_TTBR1_EL2,
- read_ttbr1_el2());
- }
+ if (is_feat_vhe_supported()) {
+ write_ctx_reg(el2_sysregs_ctx, CTX_CONTEXTIDR_EL2, read_contextidr_el2());
+ write_ctx_reg(el2_sysregs_ctx, CTX_TTBR1_EL2, read_ttbr1_el2());
+ }
- if (is_feat_ras_supported()) {
- write_ctx_reg(el2_sysregs_ctx, CTX_VDISR_EL2,
- read_vdisr_el2());
- write_ctx_reg(el2_sysregs_ctx, CTX_VSESR_EL2,
- read_vsesr_el2());
- }
+ if (is_feat_ras_supported()) {
+ write_ctx_reg(el2_sysregs_ctx, CTX_VDISR_EL2, read_vdisr_el2());
+ write_ctx_reg(el2_sysregs_ctx, CTX_VSESR_EL2, read_vsesr_el2());
+ }
- if (is_feat_nv2_supported()) {
- write_ctx_reg(el2_sysregs_ctx, CTX_VNCR_EL2,
- read_vncr_el2());
- }
+ if (is_feat_nv2_supported()) {
+ write_ctx_reg(el2_sysregs_ctx, CTX_VNCR_EL2, read_vncr_el2());
+ }
- if (is_feat_trf_supported()) {
- write_ctx_reg(el2_sysregs_ctx, CTX_TRFCR_EL2, read_trfcr_el2());
- }
+ if (is_feat_trf_supported()) {
+ write_ctx_reg(el2_sysregs_ctx, CTX_TRFCR_EL2, read_trfcr_el2());
+ }
- if (is_feat_csv2_2_supported()) {
- write_ctx_reg(el2_sysregs_ctx, CTX_SCXTNUM_EL2,
- read_scxtnum_el2());
- }
+ if (is_feat_csv2_2_supported()) {
+ write_ctx_reg(el2_sysregs_ctx, CTX_SCXTNUM_EL2, read_scxtnum_el2());
+ }
- if (is_feat_hcx_supported()) {
- write_ctx_reg(el2_sysregs_ctx, CTX_HCRX_EL2, read_hcrx_el2());
- }
- if (is_feat_tcr2_supported()) {
- write_ctx_reg(el2_sysregs_ctx, CTX_TCR2_EL2, read_tcr2_el2());
- }
- if (is_feat_sxpie_supported()) {
- write_ctx_reg(el2_sysregs_ctx, CTX_PIRE0_EL2, read_pire0_el2());
- write_ctx_reg(el2_sysregs_ctx, CTX_PIR_EL2, read_pir_el2());
- }
- if (is_feat_s2pie_supported()) {
- write_ctx_reg(el2_sysregs_ctx, CTX_S2PIR_EL2, read_s2pir_el2());
- }
- if (is_feat_sxpoe_supported()) {
- write_ctx_reg(el2_sysregs_ctx, CTX_POR_EL2, read_por_el2());
- }
- if (is_feat_gcs_supported()) {
- write_ctx_reg(el2_sysregs_ctx, CTX_GCSPR_EL2, read_gcspr_el2());
- write_ctx_reg(el2_sysregs_ctx, CTX_GCSCR_EL2, read_gcscr_el2());
- }
+ if (is_feat_hcx_supported()) {
+ write_ctx_reg(el2_sysregs_ctx, CTX_HCRX_EL2, read_hcrx_el2());
+ }
+ if (is_feat_tcr2_supported()) {
+ write_ctx_reg(el2_sysregs_ctx, CTX_TCR2_EL2, read_tcr2_el2());
+ }
+ if (is_feat_sxpie_supported()) {
+ write_ctx_reg(el2_sysregs_ctx, CTX_PIRE0_EL2, read_pire0_el2());
+ write_ctx_reg(el2_sysregs_ctx, CTX_PIR_EL2, read_pir_el2());
+ }
+ if (is_feat_s2pie_supported()) {
+ write_ctx_reg(el2_sysregs_ctx, CTX_S2PIR_EL2, read_s2pir_el2());
+ }
+ if (is_feat_sxpoe_supported()) {
+ write_ctx_reg(el2_sysregs_ctx, CTX_POR_EL2, read_por_el2());
+ }
+ if (is_feat_gcs_supported()) {
+ write_ctx_reg(el2_sysregs_ctx, CTX_GCSPR_EL2, read_gcspr_el2());
+ write_ctx_reg(el2_sysregs_ctx, CTX_GCSCR_EL2, read_gcscr_el2());
}
}
@@ -1181,81 +1240,70 @@
******************************************************************************/
void cm_el2_sysregs_context_restore(uint32_t security_state)
{
- u_register_t scr_el3 = read_scr();
-
- /*
- * Always restore the non-secure and realm EL2 context, only restore the
- * S-EL2 context if S-EL2 is enabled.
- */
- if ((security_state != SECURE) ||
- ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) {
- cpu_context_t *ctx;
- el2_sysregs_t *el2_sysregs_ctx;
+ cpu_context_t *ctx;
+ el2_sysregs_t *el2_sysregs_ctx;
- ctx = cm_get_context(security_state);
- assert(ctx != NULL);
+ ctx = cm_get_context(security_state);
+ assert(ctx != NULL);
- el2_sysregs_ctx = get_el2_sysregs_ctx(ctx);
+ el2_sysregs_ctx = get_el2_sysregs_ctx(ctx);
- el2_sysregs_context_restore_common(el2_sysregs_ctx);
+ el2_sysregs_context_restore_common(el2_sysregs_ctx);
#if CTX_INCLUDE_MTE_REGS
- write_tfsr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TFSR_EL2));
+ write_tfsr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TFSR_EL2));
#endif
- if (is_feat_mpam_supported()) {
- el2_sysregs_context_restore_mpam(el2_sysregs_ctx);
- }
+ if (is_feat_mpam_supported()) {
+ el2_sysregs_context_restore_mpam(el2_sysregs_ctx);
+ }
- if (is_feat_fgt_supported()) {
- el2_sysregs_context_restore_fgt(el2_sysregs_ctx);
- }
+ if (is_feat_fgt_supported()) {
+ el2_sysregs_context_restore_fgt(el2_sysregs_ctx);
+ }
- if (is_feat_ecv_v2_supported()) {
- write_cntpoff_el2(read_ctx_reg(el2_sysregs_ctx,
- CTX_CNTPOFF_EL2));
- }
+ if (is_feat_ecv_v2_supported()) {
+ write_cntpoff_el2(read_ctx_reg(el2_sysregs_ctx, CTX_CNTPOFF_EL2));
+ }
- if (is_feat_vhe_supported()) {
- write_contextidr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_CONTEXTIDR_EL2));
- write_ttbr1_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TTBR1_EL2));
- }
+ if (is_feat_vhe_supported()) {
+ write_contextidr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_CONTEXTIDR_EL2));
+ write_ttbr1_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TTBR1_EL2));
+ }
- if (is_feat_ras_supported()) {
- write_vdisr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VDISR_EL2));
- write_vsesr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VSESR_EL2));
- }
+ if (is_feat_ras_supported()) {
+ write_vdisr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VDISR_EL2));
+ write_vsesr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VSESR_EL2));
+ }
- if (is_feat_nv2_supported()) {
- write_vncr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VNCR_EL2));
- }
- if (is_feat_trf_supported()) {
- write_trfcr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TRFCR_EL2));
- }
+ if (is_feat_nv2_supported()) {
+ write_vncr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VNCR_EL2));
+ }
+ if (is_feat_trf_supported()) {
+ write_trfcr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TRFCR_EL2));
+ }
- if (is_feat_csv2_2_supported()) {
- write_scxtnum_el2(read_ctx_reg(el2_sysregs_ctx,
- CTX_SCXTNUM_EL2));
- }
+ if (is_feat_csv2_2_supported()) {
+ write_scxtnum_el2(read_ctx_reg(el2_sysregs_ctx, CTX_SCXTNUM_EL2));
+ }
- if (is_feat_hcx_supported()) {
- write_hcrx_el2(read_ctx_reg(el2_sysregs_ctx, CTX_HCRX_EL2));
- }
- if (is_feat_tcr2_supported()) {
- write_tcr2_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TCR2_EL2));
- }
- if (is_feat_sxpie_supported()) {
- write_pire0_el2(read_ctx_reg(el2_sysregs_ctx, CTX_PIRE0_EL2));
- write_pir_el2(read_ctx_reg(el2_sysregs_ctx, CTX_PIR_EL2));
- }
- if (is_feat_s2pie_supported()) {
- write_s2pir_el2(read_ctx_reg(el2_sysregs_ctx, CTX_S2PIR_EL2));
- }
- if (is_feat_sxpoe_supported()) {
- write_por_el2(read_ctx_reg(el2_sysregs_ctx, CTX_POR_EL2));
- }
- if (is_feat_gcs_supported()) {
- write_gcscr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_GCSCR_EL2));
- write_gcspr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_GCSPR_EL2));
- }
+ if (is_feat_hcx_supported()) {
+ write_hcrx_el2(read_ctx_reg(el2_sysregs_ctx, CTX_HCRX_EL2));
+ }
+ if (is_feat_tcr2_supported()) {
+ write_tcr2_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TCR2_EL2));
+ }
+ if (is_feat_sxpie_supported()) {
+ write_pire0_el2(read_ctx_reg(el2_sysregs_ctx, CTX_PIRE0_EL2));
+ write_pir_el2(read_ctx_reg(el2_sysregs_ctx, CTX_PIR_EL2));
+ }
+ if (is_feat_s2pie_supported()) {
+ write_s2pir_el2(read_ctx_reg(el2_sysregs_ctx, CTX_S2PIR_EL2));
+ }
+ if (is_feat_sxpoe_supported()) {
+ write_por_el2(read_ctx_reg(el2_sysregs_ctx, CTX_POR_EL2));
+ }
+ if (is_feat_gcs_supported()) {
+ write_gcscr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_GCSCR_EL2));
+ write_gcspr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_GCSPR_EL2));
}
}
#endif /* CTX_INCLUDE_EL2_REGS */
@@ -1279,18 +1327,6 @@
(el_implemented(2U) != EL_IMPL_NONE));
#endif /* ENABLE_ASSERTIONS */
- /*
- * Set the NS bit to be able to access the ICC_SRE_EL2
- * register when restoring context.
- */
- write_scr_el3(read_scr_el3() | SCR_NS_BIT);
-
- /*
- * Ensure the NS bit change is committed before the EL2/EL1
- * state restoration.
- */
- isb();
-
/* Restore EL2 and EL1 sysreg contexts */
cm_el2_sysregs_context_restore(NON_SECURE);
cm_el1_sysregs_context_restore(NON_SECURE);
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
index 8c5ef0b..53bdb55 100644
--- a/lib/extensions/amu/aarch64/amu.c
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -69,16 +69,6 @@
((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
}
-static inline __unused void ctx_write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
-{
- uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
-
- value &= ~TAM_BIT;
- value |= (tam << TAM_SHIFT) & TAM_BIT;
-
- write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
-}
-
static inline __unused void ctx_write_scr_el3_amvoffen(cpu_context_t *ctx, uint64_t amvoffen)
{
uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
@@ -194,7 +184,10 @@
* Set CPTR_EL3.TAM to zero so that any accesses to the Activity Monitor
* registers do not trap to EL3.
*/
- ctx_write_cptr_el3_tam(ctx, 0U);
+ u_register_t cptr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
+
+ cptr_el3 &= ~TAM_BIT;
+ write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, cptr_el3);
/* Initialize FEAT_AMUv1p1 features if present. */
if (is_feat_amuv1p1_supported()) {
diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk
index b7e6f99..321ae9f 100644
--- a/make_helpers/defaults.mk
+++ b/make_helpers/defaults.mk
@@ -359,3 +359,6 @@
# By default, disable SPMD Logical partitions
ENABLE_SPMD_LP := 0
+
+# By default, disable PSA crypto (use MbedTLS legacy crypto API).
+PSA_CRYPTO := 0
diff --git a/plat/xilinx/common/plat_fdt.c b/plat/xilinx/common/plat_fdt.c
index 911f664..de5d1a1 100644
--- a/plat/xilinx/common/plat_fdt.c
+++ b/plat/xilinx/common/plat_fdt.c
@@ -15,61 +15,84 @@
void prepare_dtb(void)
{
+#if defined(XILINX_OF_BOARD_DTB_ADDR)
void *dtb;
- int ret;
-#if !defined(XILINX_OF_BOARD_DTB_ADDR)
- return;
-#else
+ int map_ret = 0;
+ int ret = 0;
+
dtb = (void *)XILINX_OF_BOARD_DTB_ADDR;
-#endif
- if (IS_TFA_IN_OCM(BL31_BASE))
- return;
+
+ if (!IS_TFA_IN_OCM(BL31_BASE)) {
#if defined(PLAT_XLAT_TABLES_DYNAMIC)
- ret = mmap_add_dynamic_region((unsigned long long)dtb,
- (uintptr_t)dtb,
- XILINX_OF_BOARD_DTB_MAX_SIZE,
- MT_MEMORY | MT_RW | MT_NS);
- if (ret != 0) {
- WARN("Failed to add dynamic region for dtb: error %d\n", ret);
- return;
- }
+ map_ret = mmap_add_dynamic_region((unsigned long long)dtb,
+ (uintptr_t)dtb,
+ XILINX_OF_BOARD_DTB_MAX_SIZE,
+ MT_MEMORY | MT_RW | MT_NS);
+ if (map_ret != 0) {
+ WARN("Failed to add dynamic region for dtb: error %d\n",
+ map_ret);
+ }
#endif
- /* Return if no device tree is detected */
- if (fdt_check_header(dtb) != 0) {
- NOTICE("Can't read DT at %p\n", dtb);
- return;
- }
+ if (!map_ret) {
+ /* Return if no device tree is detected */
+ if (fdt_check_header(dtb) != 0) {
+ NOTICE("Can't read DT at %p\n", dtb);
+ } else {
+ ret = fdt_open_into(dtb, dtb, XILINX_OF_BOARD_DTB_MAX_SIZE);
- ret = fdt_open_into(dtb, dtb, XILINX_OF_BOARD_DTB_MAX_SIZE);
- if (ret < 0) {
- ERROR("Invalid Device Tree at %p: error %d\n", dtb, ret);
- return;
- }
+ if (ret < 0) {
+ ERROR("Invalid Device Tree at %p: error %d\n",
+ dtb, ret);
+ } else {
- /* Reserve memory used by Trusted Firmware. */
- if (fdt_add_reserved_memory(dtb, "tf-a", BL31_BASE, BL31_LIMIT - BL31_BASE)) {
- WARN("Failed to add reserved memory nodes for BL31 to DT.\n");
- return;
- }
+ if (dt_add_psci_node(dtb)) {
+ WARN("Failed to add PSCI Device Tree node\n");
+ }
- ret = fdt_pack(dtb);
- if (ret < 0) {
- ERROR("Failed to pack Device Tree at %p: error %d\n", dtb, ret);
- return;
- }
+ if (dt_add_psci_cpu_enable_methods(dtb)) {
+ WARN("Failed to add PSCI cpu enable methods in DT\n");
+ }
- flush_dcache_range((uintptr_t)dtb, fdt_blob_size(dtb));
+ /* Reserve memory used by Trusted Firmware. */
+ ret = fdt_add_reserved_memory(dtb,
+ "tf-a",
+ BL31_BASE,
+ BL31_LIMIT
+ -
+ BL31_BASE);
+ if (ret < 0) {
+ WARN("Failed to add reserved memory nodes for BL31 to DT.\n");
+ }
+
+ ret = fdt_pack(dtb);
+ if (ret < 0) {
+ WARN("Failed to pack dtb at %p: error %d\n",
+ dtb, ret);
+ }
+ flush_dcache_range((uintptr_t)dtb,
+ fdt_blob_size(dtb));
+
+ INFO("Changed device tree to advertise PSCI and reserved memories.\n");
+
+ }
+ }
+
+ }
+
#if defined(PLAT_XLAT_TABLES_DYNAMIC)
- ret = mmap_remove_dynamic_region((uintptr_t)dtb,
+ if (!map_ret) {
+ ret = mmap_remove_dynamic_region((uintptr_t)dtb,
XILINX_OF_BOARD_DTB_MAX_SIZE);
- if (ret != 0) {
- WARN("Failed to remove dynamic region for dtb: error %d\n", ret);
- return;
- }
+ if (ret != 0) {
+ WARN("Failed to remove dynamic region for dtb:error %d\n",
+ ret);
+ }
+ }
#endif
+ }
- INFO("Changed device tree to advertise PSCI and reserved memories.\n");
+#endif
}
diff --git a/plat/xilinx/zynqmp/bl31_zynqmp_setup.c b/plat/xilinx/zynqmp/bl31_zynqmp_setup.c
index 7bdaf18..32bb982 100644
--- a/plat/xilinx/zynqmp/bl31_zynqmp_setup.c
+++ b/plat/xilinx/zynqmp/bl31_zynqmp_setup.c
@@ -21,6 +21,7 @@
#include <plat/common/platform.h>
#include <custom_svc.h>
+#include <plat_fdt.h>
#include <plat_private.h>
#include <plat_startup.h>
#include <zynqmp_def.h>
@@ -183,55 +184,9 @@
}
#endif
-#if (defined(XILINX_OF_BOARD_DTB_ADDR) && !IS_TFA_IN_OCM(BL31_BASE))
-static void prepare_dtb(void)
-{
- void *dtb = (void *)XILINX_OF_BOARD_DTB_ADDR;
- int ret;
-
- /* Return if no device tree is detected */
- if (fdt_check_header(dtb) != 0) {
- NOTICE("Can't read DT at %p\n", dtb);
- return;
- }
-
- ret = fdt_open_into(dtb, dtb, XILINX_OF_BOARD_DTB_MAX_SIZE);
- if (ret < 0) {
- ERROR("Invalid Device Tree at %p: error %d\n", dtb, ret);
- return;
- }
-
- if (dt_add_psci_node(dtb)) {
- ERROR("Failed to add PSCI Device Tree node\n");
- return;
- }
-
- if (dt_add_psci_cpu_enable_methods(dtb)) {
- ERROR("Failed to add PSCI cpu enable methods in Device Tree\n");
- return;
- }
-
- /* Reserve memory used by Trusted Firmware. */
- if (fdt_add_reserved_memory(dtb, "tf-a", BL31_BASE,
- (size_t) (BL31_LIMIT - BL31_BASE))) {
- WARN("Failed to add reserved memory nodes for BL31 to DT.\n");
- }
-
- ret = fdt_pack(dtb);
- if (ret < 0) {
- ERROR("Failed to pack Device Tree at %p: error %d\n", dtb, ret);
- }
-
- clean_dcache_range((uintptr_t)dtb, fdt_blob_size(dtb));
- INFO("Changed device tree to advertise PSCI and reserved memories.\n");
-}
-#endif
-
void bl31_platform_setup(void)
{
-#if (defined(XILINX_OF_BOARD_DTB_ADDR) && !IS_TFA_IN_OCM(BL31_BASE))
prepare_dtb();
-#endif
/* Initialize the gic cpu and distributor interfaces */
plat_arm_gic_driver_init();
diff --git a/plat/xilinx/zynqmp/platform.mk b/plat/xilinx/zynqmp/platform.mk
index 9c79855..e20cb22 100644
--- a/plat/xilinx/zynqmp/platform.mk
+++ b/plat/xilinx/zynqmp/platform.mk
@@ -127,6 +127,7 @@
${LIBFDT_SRCS} \
plat/xilinx/common/ipi_mailbox_service/ipi_mailbox_svc.c \
plat/xilinx/common/plat_startup.c \
+ plat/xilinx/common/plat_fdt.c \
plat/xilinx/zynqmp/bl31_zynqmp_setup.c \
plat/xilinx/zynqmp/plat_psci.c \
plat/xilinx/zynqmp/plat_zynqmp.c \
diff --git a/services/std_svc/errata_abi/errata_abi_main.c b/services/std_svc/errata_abi/errata_abi_main.c
index ca66396..c0a089b 100644
--- a/services/std_svc/errata_abi/errata_abi_main.c
+++ b/services/std_svc/errata_abi/errata_abi_main.c
@@ -428,10 +428,11 @@
{
.cpu_partnumber = CORTEX_X3_MIDR,
.cpu_errata_list = {
- [0] = {2313909, 0x00, 0x10, ERRATA_X3_2313909},
- [1] = {2615812, 0x00, 0x11, ERRATA_X3_2615812},
- [2] = {2742421, 0x00, 0x11, ERRATA_X3_2742421},
- [3 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ [0] = {2070301, 0x00, 0x12, ERRATA_X3_2070301},
+ [1] = {2313909, 0x00, 0x10, ERRATA_X3_2313909},
+ [2] = {2615812, 0x00, 0x11, ERRATA_X3_2615812},
+ [3] = {2742421, 0x00, 0x11, ERRATA_X3_2742421},
+ [4 ... ERRATA_LIST_END] = UNDEF_ERRATA,
}
},
#endif /* CORTEX_X3_H_INC */