Merge "build(npm): update Node.js and all packages" into integration
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index ed48311..962c362 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -476,21 +476,33 @@
bl handle_sysreg_trap
/*
* returns:
- * -1: unhandled trap, panic
+ * -1: unhandled trap, UNDEF injection into lower EL
* 0: handled trap, return to the trapping instruction (repeating it)
* 1: handled trap, return to the next instruction
*/
tst w0, w0
- b.mi elx_panic /* negative return value: panic */
- b.eq 1f /* zero: do not change ELR_EL3 */
+ b.mi 2f /* negative: undefined exception injection */
- /* advance the PC to continue after the instruction */
+ b.eq 1f /* zero: do not change ELR_EL3 */
+ /* positive: advance the PC to continue after the instruction */
ldr x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3]
add x1, x1, #4
str x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3]
1:
b el3_exit
+2:
+ /*
+ * UNDEF injection to lower EL, the support is only provided for lower
+ * EL in AArch64 mode, for AArch32 mode it will do elx_panic as before.
+ */
+ mrs x0, spsr_el3
+ tst x0, #(SPSR_M_MASK << SPSR_M_SHIFT)
+ b.ne elx_panic
+ /* Pass context pointer as an argument to inject_undef64 */
+ mov x0, x19
+ bl inject_undef64
+ b el3_exit
smc_unknown:
/*
diff --git a/bl31/bl31_traps.c b/bl31/bl31_traps.c
index 2cfe14a..d14a91e 100644
--- a/bl31/bl31_traps.c
+++ b/bl31/bl31_traps.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022, ARM Limited. All rights reserved.
+ * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
* Copyright (c) 2023, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,8 +7,11 @@
* Dispatch synchronous system register traps from lower ELs.
*/
+#include <arch_features.h>
+#include <arch_helpers.h>
#include <bl31/sync_handle.h>
#include <context.h>
+#include <lib/el3_runtime/context_mgmt.h>
int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx)
{
@@ -28,3 +31,205 @@
return TRAP_RET_UNHANDLED;
}
+
+static bool is_tge_enabled(void)
+{
+ u_register_t hcr_el2 = read_hcr_el2();
+
+ return ((read_feat_vhe_id_field() != 0U) && ((hcr_el2 & HCR_TGE_BIT) != 0U));
+}
+
+/*
+ * This function is to ensure that undef injection does not happen into
+ * non-existent S-EL2. This could happen when trap happens from S-EL{1,0}
+ * and non-secure world is running with TGE bit set, considering EL3 does
+ * not save/restore EL2 registers if only one world has EL2 enabled.
+ * So reading hcr_el2.TGE would give NS world value.
+ */
+static bool is_secure_trap_without_sel2(u_register_t scr)
+{
+ return ((scr & (SCR_NS_BIT | SCR_EEL2_BIT)) == 0);
+}
+
+static unsigned int target_el(unsigned int from_el, u_register_t scr)
+{
+ if (from_el > MODE_EL1) {
+ return from_el;
+ } else if (is_tge_enabled() && !is_secure_trap_without_sel2(scr)) {
+ return MODE_EL2;
+ } else {
+ return MODE_EL1;
+ }
+}
+
+static u_register_t get_elr_el3(u_register_t spsr_el3, u_register_t vbar, unsigned int target_el)
+{
+ unsigned int outgoing_el = GET_EL(spsr_el3);
+ u_register_t elr_el3 = 0;
+
+ if (outgoing_el == target_el) {
+ /*
+ * Target EL is either EL1 or EL2, lsb can tell us the SPsel
+ * Thread mode : 0
+ * Handler mode : 1
+ */
+ if ((spsr_el3 & (MODE_SP_MASK << MODE_SP_SHIFT)) == MODE_SP_ELX) {
+ elr_el3 = vbar + CURRENT_EL_SPX;
+ } else {
+ elr_el3 = vbar + CURRENT_EL_SP0;
+ }
+ } else {
+ /* Vector address for Lower EL using Aarch64 */
+ elr_el3 = vbar + LOWER_EL_AARCH64;
+ }
+
+ return elr_el3;
+}
+
+/*
+ * Explicitly create all bits of SPSR to get PSTATE at exception return.
+ *
+ * The code is based on "Aarch64.exceptions.takeexception" described in
+ * DDI0602 revision 2023-06.
+ * "https://developer.arm.com/documentation/ddi0602/2023-06/Shared-Pseudocode/
+ * aarch64-exceptions-takeexception"
+ *
+ * NOTE: This piece of code must be reviewed every release to ensure that
+ * we keep up with new ARCH features which introduces a new SPSR bit.
+ */
+static u_register_t create_spsr(u_register_t old_spsr, unsigned int target_el)
+{
+ u_register_t new_spsr = 0;
+ u_register_t sctlr;
+
+ /* Set M bits for target EL in AArch64 mode, also get sctlr */
+ if (target_el == MODE_EL2) {
+ sctlr = read_sctlr_el2();
+ new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL2H;
+ } else {
+ sctlr = read_sctlr_el1();
+ new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL1H;
+ }
+
+ /* Mask all exceptions, update DAIF bits */
+ new_spsr |= SPSR_DAIF_MASK << SPSR_DAIF_SHIFT;
+
+ /* If FEAT_BTI is present, clear BTYPE bits */
+ new_spsr |= old_spsr & (SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64);
+ if (is_armv8_5_bti_present()) {
+ new_spsr &= ~(SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64);
+ }
+
+ /* If SSBS is implemented, take the value from SCTLR.DSSBS */
+ new_spsr |= old_spsr & SPSR_SSBS_BIT_AARCH64;
+ if (is_feat_ssbs_present()) {
+ if ((sctlr & SCTLR_DSSBS_BIT) != 0U) {
+ new_spsr |= SPSR_SSBS_BIT_AARCH64;
+ } else {
+ new_spsr &= ~SPSR_SSBS_BIT_AARCH64;
+ }
+ }
+
+ /* If FEAT_NMI is implemented, ALLINT = !(SCTLR.SPINTMASK) */
+ new_spsr |= old_spsr & SPSR_ALLINT_BIT_AARCH64;
+ if (is_feat_nmi_present()) {
+ if ((sctlr & SCTLR_SPINTMASK_BIT) != 0U) {
+ new_spsr &= ~SPSR_ALLINT_BIT_AARCH64;
+ } else {
+ new_spsr |= SPSR_ALLINT_BIT_AARCH64;
+ }
+ }
+
+ /* Clear PSTATE.IL bit explicitly */
+ new_spsr &= ~SPSR_IL_BIT;
+
+ /* Clear PSTATE.SS bit explicitly */
+ new_spsr &= ~SPSR_SS_BIT;
+
+ /* Update PSTATE.PAN bit */
+ new_spsr |= old_spsr & SPSR_PAN_BIT;
+ if (is_feat_pan_present() &&
+ ((target_el == MODE_EL1) || ((target_el == MODE_EL2) && is_tge_enabled())) &&
+ ((sctlr & SCTLR_SPAN_BIT) == 0U)) {
+ new_spsr |= SPSR_PAN_BIT;
+ }
+
+ /* Clear UAO bit if FEAT_UAO is present */
+ new_spsr |= old_spsr & SPSR_UAO_BIT_AARCH64;
+ if (is_feat_uao_present()) {
+ new_spsr &= ~SPSR_UAO_BIT_AARCH64;
+ }
+
+ /* DIT bits are unchanged */
+ new_spsr |= old_spsr & SPSR_DIT_BIT;
+
+ /* If FEAT_MTE2 is implemented mask tag faults by setting TCO bit */
+ new_spsr |= old_spsr & SPSR_TCO_BIT_AARCH64;
+ if (read_feat_mte_id_field() >= MTE_IMPLEMENTED_ELX) {
+ new_spsr |= SPSR_TCO_BIT_AARCH64;
+ }
+
+ /* NZCV bits are unchanged */
+ new_spsr |= old_spsr & SPSR_NZCV;
+
+ /* If FEAT_EBEP is present set PM bit */
+ new_spsr |= old_spsr & SPSR_PM_BIT_AARCH64;
+ if (is_feat_ebep_present()) {
+ new_spsr |= SPSR_PM_BIT_AARCH64;
+ }
+
+ /* If FEAT_SEBEP is present clear PPEND bit */
+ new_spsr |= old_spsr & SPSR_PPEND_BIT;
+ if (is_feat_sebep_present()) {
+ new_spsr &= ~SPSR_PPEND_BIT;
+ }
+
+ /* If FEAT_GCS is present, update EXLOCK bit */
+ new_spsr |= old_spsr & SPSR_EXLOCK_BIT_AARCH64;
+ if (is_feat_gcs_present()) {
+ u_register_t gcscr;
+ if (target_el == MODE_EL2) {
+ gcscr = read_gcscr_el2();
+ } else {
+ gcscr = read_gcscr_el1();
+ }
+ new_spsr |= (gcscr & GCSCR_EXLOCK_EN_BIT) ? SPSR_EXLOCK_BIT_AARCH64 : 0;
+ }
+
+ return new_spsr;
+}
+
+/*
+ * Handler for injecting Undefined exception to lower EL which is caused by
+ * lower EL accessing system registers of which (old)EL3 firmware is unaware.
+ *
+ * This is a safety net to avoid EL3 panics caused by system register access
+ * that triggers an exception syndrome EC=0x18.
+ */
+void inject_undef64(cpu_context_t *ctx)
+{
+ u_register_t esr = (EC_UNKNOWN << ESR_EC_SHIFT) | ESR_IL_BIT;
+ el3_state_t *state = get_el3state_ctx(ctx);
+ u_register_t elr_el3 = read_ctx_reg(state, CTX_ELR_EL3);
+ u_register_t old_spsr = read_ctx_reg(state, CTX_SPSR_EL3);
+ u_register_t scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
+ u_register_t new_spsr = 0;
+ unsigned int to_el = target_el(GET_EL(old_spsr), scr_el3);
+
+ if (to_el == MODE_EL2) {
+ write_elr_el2(elr_el3);
+ elr_el3 = get_elr_el3(old_spsr, read_vbar_el2(), to_el);
+ write_esr_el2(esr);
+ write_spsr_el2(old_spsr);
+ } else {
+ write_elr_el1(elr_el3);
+ elr_el3 = get_elr_el3(old_spsr, read_vbar_el1(), to_el);
+ write_esr_el1(esr);
+ write_spsr_el1(old_spsr);
+ }
+
+ new_spsr = create_spsr(old_spsr, to_el);
+
+ write_ctx_reg(state, CTX_SPSR_EL3, new_spsr);
+ write_ctx_reg(state, CTX_ELR_EL3, elr_el3);
+}
diff --git a/drivers/partition/partition.c b/drivers/partition/partition.c
index c60820d..555fe7f 100644
--- a/drivers/partition/partition.c
+++ b/drivers/partition/partition.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -94,9 +94,8 @@
* If partition numbers could be found, check & update it.
*/
static int load_gpt_header(uintptr_t image_handle, size_t header_offset,
- unsigned long long *part_lba)
+ gpt_header_t *header)
{
- gpt_header_t header;
size_t bytes_read;
int result;
uint32_t header_crc, calc_crc;
@@ -107,7 +106,7 @@
header_offset);
return result;
}
- result = io_read(image_handle, (uintptr_t)&header,
+ result = io_read(image_handle, (uintptr_t)header,
sizeof(gpt_header_t), &bytes_read);
if ((result != 0) || (sizeof(gpt_header_t) != bytes_read)) {
VERBOSE("GPT header read error(%i) or read mismatch occurred,"
@@ -115,8 +114,8 @@
sizeof(gpt_header_t), bytes_read);
return result;
}
- if (memcmp(header.signature, GPT_SIGNATURE,
- sizeof(header.signature)) != 0) {
+ if (memcmp(header->signature, GPT_SIGNATURE,
+ sizeof(header->signature)) != 0) {
VERBOSE("GPT header signature failure\n");
return -EINVAL;
}
@@ -126,25 +125,24 @@
* computed by setting this field to 0, and computing the
* 32-bit CRC for HeaderSize bytes.
*/
- header_crc = header.header_crc;
- header.header_crc = 0U;
+ header_crc = header->header_crc;
+ header->header_crc = 0U;
- calc_crc = tf_crc32(0U, (uint8_t *)&header, sizeof(gpt_header_t));
+ calc_crc = tf_crc32(0U, (uint8_t *)header, sizeof(gpt_header_t));
if (header_crc != calc_crc) {
ERROR("Invalid GPT Header CRC: Expected 0x%x but got 0x%x.\n",
header_crc, calc_crc);
return -EINVAL;
}
- header.header_crc = header_crc;
+ header->header_crc = header_crc;
/* partition numbers can't exceed PLAT_PARTITION_MAX_ENTRIES */
- list.entry_count = header.list_num;
+ list.entry_count = header->list_num;
if (list.entry_count > PLAT_PARTITION_MAX_ENTRIES) {
list.entry_count = PLAT_PARTITION_MAX_ENTRIES;
}
- *part_lba = header.part_lba;
return 0;
}
@@ -231,12 +229,13 @@
* Retrieve each entry in the partition table, parse the data from each
* entry and store them in the list of partition table entries.
*/
-static int load_partition_gpt(uintptr_t image_handle,
- unsigned long long part_lba)
+static int load_partition_gpt(uintptr_t image_handle, gpt_header_t header)
{
- const signed long long gpt_entry_offset = LBA(part_lba);
+ const signed long long gpt_entry_offset = LBA(header.part_lba);
gpt_entry_t entry;
- int result, i;
+ int result;
+ unsigned int i;
+ uint32_t calc_crc = 0U;
result = io_seek(image_handle, IO_SEEK_SET, gpt_entry_offset);
if (result != 0) {
@@ -245,23 +244,36 @@
return result;
}
- for (i = 0; i < list.entry_count; i++) {
+ for (i = 0; i < (unsigned int)list.entry_count; i++) {
result = load_gpt_entry(image_handle, &entry);
if (result != 0) {
- VERBOSE("Failed to load gpt entry data(%i) error is (%i)\n",
+ VERBOSE("Failed to load gpt entry data(%u) error is (%i)\n",
i, result);
return result;
}
result = parse_gpt_entry(&entry, &list.list[i]);
if (result != 0) {
+ result = io_seek(image_handle, IO_SEEK_SET,
+ (gpt_entry_offset + (i * sizeof(gpt_entry_t))));
+ if (result != 0) {
+ VERBOSE("Failed to seek (%i)\n", result);
+ return result;
+ }
break;
}
+
+ /*
+ * Calculate CRC of Partition entry array to compare with CRC
+ * value in header
+ */
+ calc_crc = tf_crc32(calc_crc, (uint8_t *)&entry, sizeof(gpt_entry_t));
}
if (i == 0) {
VERBOSE("No Valid GPT Entries found\n");
return -EINVAL;
}
+
/*
* Only records the valid partition number that is loaded from
* partition table.
@@ -269,6 +281,29 @@
list.entry_count = i;
dump_entries(list.entry_count);
+ /*
+ * If there are less valid entries than the possible number of entries
+ * from the header, continue to load the partition entry table to
+ * calculate the full CRC in order to check against the partition CRC
+ * from the header for validation.
+ */
+ for (; i < header.list_num; i++) {
+ result = load_gpt_entry(image_handle, &entry);
+ if (result != 0) {
+ VERBOSE("Failed to load gpt entry data(%u) error is (%i)\n",
+ i, result);
+ return result;
+ }
+
+ calc_crc = tf_crc32(calc_crc, (uint8_t *)&entry, sizeof(gpt_entry_t));
+ }
+
+ if (header.part_crc != calc_crc) {
+ ERROR("Invalid GPT Partition Array Entry CRC: Expected 0x%x"
+ " but got 0x%x.\n", header.part_crc, calc_crc);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -279,7 +314,7 @@
static int load_backup_gpt(unsigned int image_id, unsigned int sector_nums)
{
int result;
- unsigned long long part_lba = 0;
+ gpt_header_t header;
size_t gpt_header_offset;
uintptr_t dev_handle, image_spec, image_handle;
io_block_spec_t *block_spec;
@@ -316,8 +351,8 @@
INFO("Trying to retrieve back-up GPT header\n");
/* Last block is backup-GPT header, after the end of GPT entries */
gpt_header_offset = LBA(part_num_entries);
- result = load_gpt_header(image_handle, gpt_header_offset, &part_lba);
- if ((result != 0) || (part_lba == 0)) {
+ result = load_gpt_header(image_handle, gpt_header_offset, &header);
+ if ((result != 0) || (header.part_lba == 0)) {
ERROR("Failed to retrieve Backup GPT header,"
"Partition maybe corrupted\n");
goto out;
@@ -327,7 +362,8 @@
* Note we mapped last 33 blocks(LBA-33), first block here starts with
* entries while last block was header.
*/
- result = load_partition_gpt(image_handle, 0);
+ header.part_lba = 0;
+ result = load_partition_gpt(image_handle, header);
out:
io_close(image_handle);
@@ -342,19 +378,19 @@
static int load_primary_gpt(uintptr_t image_handle, unsigned int first_lba)
{
int result;
- unsigned long long part_lba;
size_t gpt_header_offset;
+ gpt_header_t header;
/* Try to load Primary GPT header from LBA1 */
gpt_header_offset = LBA(first_lba);
- result = load_gpt_header(image_handle, gpt_header_offset, &part_lba);
- if ((result != 0) || (part_lba == 0)) {
+ result = load_gpt_header(image_handle, gpt_header_offset, &header);
+ if ((result != 0) || (header.part_lba == 0)) {
VERBOSE("Failed to retrieve Primary GPT header,"
"trying to retrieve back-up GPT header\n");
return result;
}
- return load_partition_gpt(image_handle, part_lba);
+ return load_partition_gpt(image_handle, header);
}
/*
diff --git a/include/arch/aarch32/arch.h b/include/arch/aarch32/arch.h
index a711753..7e759d81 100644
--- a/include/arch/aarch32/arch.h
+++ b/include/arch/aarch32/arch.h
@@ -163,6 +163,11 @@
#define ID_PFR1_SEC_MASK U(0xf)
#define ID_PFR1_ELx_ENABLED U(1)
+/* ID_PFR2 definitions */
+#define ID_PFR2_SSBS_SHIFT U(4)
+#define ID_PFR2_SSBS_MASK U(0xf)
+#define SSBS_UNAVAILABLE U(0)
+
/* SCTLR definitions */
#define SCTLR_RES1_DEF ((U(1) << 23) | (U(1) << 22) | (U(1) << 4) | \
(U(1) << 3))
@@ -552,6 +557,7 @@
#define ID_DFR1 p15, 0, c0, c3, 5
#define ID_PFR0 p15, 0, c0, c1, 0
#define ID_PFR1 p15, 0, c0, c1, 1
+#define ID_PFR2 p15, 0, c0, c3, 4
#define MAIR0 p15, 0, c10, c2, 0
#define MAIR1 p15, 0, c10, c2, 1
#define TTBCR p15, 0, c2, c0, 2
diff --git a/include/arch/aarch32/arch_features.h b/include/arch/aarch32/arch_features.h
index dd9b7ad..734a6b5 100644
--- a/include/arch/aarch32/arch_features.h
+++ b/include/arch/aarch32/arch_features.h
@@ -128,6 +128,17 @@
return read_feat_pan_id_field() != 0U;
}
+static inline bool is_feat_pan_present(void)
+{
+ return read_feat_pan_id_field() != 0U;
+}
+
+static inline unsigned int is_feat_ssbs_present(void)
+{
+ return ((read_id_pfr2() >> ID_PFR2_SSBS_SHIFT) &
+ ID_PFR2_SSBS_MASK) != SSBS_UNAVAILABLE;
+}
+
/*
* TWED, ECV, CSV2, RAS are only used by the AArch64 EL2 context switch
* code. In fact, EL2 context switching is only needed for AArch64 (since
@@ -164,6 +175,10 @@
static inline bool is_feat_s2pie_supported(void) { return false; }
static inline bool is_feat_s1pie_supported(void) { return false; }
static inline bool is_feat_sxpie_supported(void) { return false; }
+static inline bool is_feat_uao_present(void) { return false; }
+static inline bool is_feat_nmi_present(void) { return false; }
+static inline bool is_feat_ebep_present(void) { return false; }
+static inline bool is_feat_sebep_present(void) { return false; }
static inline unsigned int read_feat_pmuv3_id_field(void)
{
diff --git a/include/arch/aarch32/arch_helpers.h b/include/arch/aarch32/arch_helpers.h
index 3a7c768..3244d3b 100644
--- a/include/arch/aarch32/arch_helpers.h
+++ b/include/arch/aarch32/arch_helpers.h
@@ -224,6 +224,7 @@
DEFINE_COPROCR_READ_FUNC(id_dfr1, ID_DFR1)
DEFINE_COPROCR_READ_FUNC(id_pfr0, ID_PFR0)
DEFINE_COPROCR_READ_FUNC(id_pfr1, ID_PFR1)
+DEFINE_COPROCR_READ_FUNC(id_pfr2, ID_PFR2)
DEFINE_COPROCR_READ_FUNC(isr, ISR)
DEFINE_COPROCR_READ_FUNC(clidr, CLIDR)
DEFINE_COPROCR_READ_FUNC_64(cntpct, CNTPCT_64)
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index b88d6c6..8a4c071 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -75,6 +75,19 @@
#define INVALID_MPID U(0xFFFFFFFF)
/*******************************************************************************
+ * Definitions for Exception vector offsets
+ ******************************************************************************/
+#define CURRENT_EL_SP0 0x0
+#define CURRENT_EL_SPX 0x200
+#define LOWER_EL_AARCH64 0x400
+#define LOWER_EL_AARCH32 0x600
+
+#define SYNC_EXCEPTION 0x0
+#define IRQ_EXCEPTION 0x80
+#define FIQ_EXCEPTION 0x100
+#define SERROR_EXCEPTION 0x180
+
+/*******************************************************************************
* Definitions for CPU system register interface to GICv3
******************************************************************************/
#define ICC_IGRPEN1_EL1 S3_0_C12_C12_7
@@ -231,6 +244,11 @@
#define ID_AA64DFR0_PMUVER_PMUV3P7 U(7)
#define ID_AA64DFR0_PMUVER_IMP_DEF U(0xf)
+/* ID_AA64DFR0_EL1.SEBEP definitions */
+#define ID_AA64DFR0_SEBEP_SHIFT U(24)
+#define ID_AA64DFR0_SEBEP_MASK ULL(0xf)
+#define SEBEP_IMPLEMENTED ULL(1)
+
/* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
#define ID_AA64DFR0_PMS_SHIFT U(32)
#define ID_AA64DFR0_PMS_MASK ULL(0xf)
@@ -253,6 +271,11 @@
#define ID_AA64DFR0_BRBE_MASK ULL(0xf)
#define ID_AA64DFR0_BRBE_SUPPORTED ULL(1)
+/* ID_AA64DFR1_EL1 definitions */
+#define ID_AA64DFR1_EBEP_SHIFT U(48)
+#define ID_AA64DFR1_EBEP_MASK ULL(0xf)
+#define EBEP_IMPLEMENTED ULL(1)
+
/* ID_AA64ISAR0_EL1 definitions */
#define ID_AA64ISAR0_RNDR_SHIFT U(60)
#define ID_AA64ISAR0_RNDR_MASK ULL(0xf)
@@ -358,6 +381,9 @@
#define ID_AA64MMFR2_EL1_CCIDX_MASK ULL(0xf)
#define ID_AA64MMFR2_EL1_CCIDX_LENGTH U(4)
+#define ID_AA64MMFR2_EL1_UAO_SHIFT U(4)
+#define ID_AA64MMFR2_EL1_UAO_MASK ULL(0xf)
+
#define ID_AA64MMFR2_EL1_CNP_SHIFT U(0)
#define ID_AA64MMFR2_EL1_CNP_MASK ULL(0xf)
@@ -386,25 +412,29 @@
#define ID_AA64MMFR3_EL1_TCRX_MASK ULL(0xf)
/* ID_AA64PFR1_EL1 definitions */
-#define ID_AA64PFR1_EL1_GCS_SHIFT U(44)
-#define ID_AA64PFR1_EL1_GCS_MASK ULL(0xf)
-
-#define ID_AA64PFR1_EL1_SSBS_SHIFT U(4)
-#define ID_AA64PFR1_EL1_SSBS_MASK ULL(0xf)
-
-#define SSBS_UNAVAILABLE ULL(0) /* No architectural SSBS support */
#define ID_AA64PFR1_EL1_BT_SHIFT U(0)
#define ID_AA64PFR1_EL1_BT_MASK ULL(0xf)
-
#define BTI_IMPLEMENTED ULL(1) /* The BTI mechanism is implemented */
+#define ID_AA64PFR1_EL1_SSBS_SHIFT U(4)
+#define ID_AA64PFR1_EL1_SSBS_MASK ULL(0xf)
+#define SSBS_UNAVAILABLE ULL(0) /* No architectural SSBS support */
+
#define ID_AA64PFR1_EL1_MTE_SHIFT U(8)
#define ID_AA64PFR1_EL1_MTE_MASK ULL(0xf)
#define ID_AA64PFR1_EL1_RNDR_TRAP_SHIFT U(28)
#define ID_AA64PFR1_EL1_RNDR_TRAP_MASK U(0xf)
+#define ID_AA64PFR1_EL1_NMI_SHIFT U(36)
+#define ID_AA64PFR1_EL1_NMI_MASK ULL(0xf)
+#define NMI_IMPLEMENTED ULL(1)
+
+#define ID_AA64PFR1_EL1_GCS_SHIFT U(44)
+#define ID_AA64PFR1_EL1_GCS_MASK ULL(0xf)
+#define GCS_IMPLEMENTED ULL(1)
+
#define ID_AA64PFR1_EL1_RNG_TRAP_SUPPORTED ULL(0x1)
#define ID_AA64PFR1_EL1_RNG_TRAP_NOT_SUPPORTED ULL(0x0)
@@ -503,6 +533,7 @@
#define SCTLR_TCF0_SHIFT U(38)
#define SCTLR_TCF0_MASK ULL(3)
#define SCTLR_ENTP2_BIT (ULL(1) << 60)
+#define SCTLR_SPINTMASK_BIT (ULL(1) << 62)
/* Tag Check Faults in EL0 have no effect on the PE */
#define SCTLR_TCF0_NO_EFFECT U(0)
@@ -730,6 +761,10 @@
#define DAIF_IRQ_BIT (U(1) << 1)
#define DAIF_ABT_BIT (U(1) << 2)
#define DAIF_DBG_BIT (U(1) << 3)
+#define SPSR_V_BIT (U(1) << 28)
+#define SPSR_C_BIT (U(1) << 29)
+#define SPSR_Z_BIT (U(1) << 30)
+#define SPSR_N_BIT (U(1) << 31)
#define SPSR_DAIF_SHIFT U(6)
#define SPSR_DAIF_MASK U(0xf)
@@ -750,25 +785,32 @@
#define SPSR_M_MASK U(0x1)
#define SPSR_M_AARCH64 U(0x0)
#define SPSR_M_AARCH32 U(0x1)
+#define SPSR_M_EL1H U(0x5)
#define SPSR_M_EL2H U(0x9)
#define SPSR_EL_SHIFT U(2)
#define SPSR_EL_WIDTH U(2)
-#define SPSR_SSBS_SHIFT_AARCH64 U(12)
+#define SPSR_BTYPE_SHIFT_AARCH64 U(10)
+#define SPSR_BTYPE_MASK_AARCH64 U(0x3)
+#define SPSR_SSBS_SHIFT_AARCH64 U(12)
#define SPSR_SSBS_BIT_AARCH64 (ULL(1) << SPSR_SSBS_SHIFT_AARCH64)
#define SPSR_SSBS_SHIFT_AARCH32 U(23)
#define SPSR_SSBS_BIT_AARCH32 (ULL(1) << SPSR_SSBS_SHIFT_AARCH32)
-
+#define SPSR_ALLINT_BIT_AARCH64 BIT_64(13)
+#define SPSR_IL_BIT BIT_64(20)
+#define SPSR_SS_BIT BIT_64(21)
#define SPSR_PAN_BIT BIT_64(22)
-
+#define SPSR_UAO_BIT_AARCH64 BIT_64(23)
#define SPSR_DIT_BIT BIT(24)
-
#define SPSR_TCO_BIT_AARCH64 BIT_64(25)
+#define SPSR_PM_BIT_AARCH64 BIT_64(32)
+#define SPSR_PPEND_BIT BIT(33)
+#define SPSR_EXLOCK_BIT_AARCH64 BIT_64(34)
+#define SPSR_NZCV (SPSR_V_BIT | SPSR_C_BIT | SPSR_Z_BIT | SPSR_N_BIT)
#define DISABLE_ALL_EXCEPTIONS \
(DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
-
#define DISABLE_INTERRUPTS (DAIF_FIQ_BIT | DAIF_IRQ_BIT)
/*
@@ -946,6 +988,7 @@
#define ESR_EC_LENGTH U(6)
#define ESR_ISS_SHIFT U(0)
#define ESR_ISS_LENGTH U(25)
+#define ESR_IL_BIT (U(1) << 25)
#define EC_UNKNOWN U(0x0)
#define EC_WFE_WFI U(0x1)
#define EC_AARCH32_CP15_MRC_MCR U(0x3)
@@ -1408,6 +1451,9 @@
******************************************************************************/
#define GCSCR_EL2 S3_4_C2_C5_0
#define GCSPR_EL2 S3_4_C2_C5_1
+#define GCSCR_EL1 S3_0_C2_C5_0
+
+#define GCSCR_EXLOCK_EN_BIT (UL(1) << 6)
/*******************************************************************************
* Definitions for DynamicIQ Shared Unit registers
diff --git a/include/arch/aarch64/arch_features.h b/include/arch/aarch64/arch_features.h
index 60fb522..de59d45 100644
--- a/include/arch/aarch64/arch_features.h
+++ b/include/arch/aarch64/arch_features.h
@@ -42,6 +42,11 @@
CREATE_FEATURE_FUNCS(feat_pan, id_aa64mmfr1_el1, ID_AA64MMFR1_EL1_PAN_SHIFT,
ENABLE_FEAT_PAN)
+static inline bool is_feat_pan_present(void)
+{
+ return read_feat_pan_id_field() != 0U;
+}
+
CREATE_FEATURE_FUNCS(feat_vhe, id_aa64mmfr1_el1, ID_AA64MMFR1_EL1_VHE_SHIFT,
ENABLE_FEAT_VHE)
@@ -51,6 +56,12 @@
ID_AA64MMFR2_EL1_CNP_MASK) != 0U;
}
+static inline bool is_feat_uao_present(void)
+{
+ return ((read_id_aa64mmfr2_el1() >> ID_AA64MMFR2_EL1_UAO_SHIFT) &
+ ID_AA64MMFR2_EL1_UAO_MASK) != 0U;
+}
+
static inline bool is_feat_pacqarma3_present(void)
{
uint64_t mask_id_aa64isar2 =
@@ -89,6 +100,42 @@
ID_AA64PFR1_EL1_BT_MASK) == BTI_IMPLEMENTED;
}
+static inline unsigned int get_armv8_5_mte_support(void)
+{
+ return ((read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_MTE_SHIFT) &
+ ID_AA64PFR1_EL1_MTE_MASK);
+}
+
+static inline bool is_feat_ssbs_present(void)
+{
+ return ((read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_SSBS_SHIFT) &
+ ID_AA64PFR1_EL1_SSBS_MASK) != SSBS_UNAVAILABLE;
+}
+
+static inline bool is_feat_nmi_present(void)
+{
+ return ((read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_NMI_SHIFT) &
+ ID_AA64PFR1_EL1_NMI_MASK) == NMI_IMPLEMENTED;
+}
+
+static inline bool is_feat_gcs_present(void)
+{
+ return ((read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_GCS_SHIFT) &
+ ID_AA64PFR1_EL1_GCS_MASK) == GCS_IMPLEMENTED;
+}
+
+static inline bool is_feat_ebep_present(void)
+{
+ return ((read_id_aa64dfr1_el1() >> ID_AA64DFR1_EBEP_SHIFT) &
+ ID_AA64DFR1_EBEP_MASK) == EBEP_IMPLEMENTED;
+}
+
+static inline bool is_feat_sebep_present(void)
+{
+ return ((read_id_aa64dfr0_el1() >> ID_AA64DFR0_SEBEP_SHIFT) &
+ ID_AA64DFR0_SEBEP_MASK) == SEBEP_IMPLEMENTED;
+}
+
CREATE_FEATURE_FUNCS(feat_mte, id_aa64pfr1_el1, ID_AA64PFR1_EL1_MTE_SHIFT,
ENABLE_FEAT_MTE)
CREATE_FEATURE_FUNCS_VER(feat_mte2, read_feat_mte_id_field, MTE_IMPLEMENTED_ELX,
diff --git a/include/arch/aarch64/arch_helpers.h b/include/arch/aarch64/arch_helpers.h
index 2d97018..6356cab 100644
--- a/include/arch/aarch64/arch_helpers.h
+++ b/include/arch/aarch64/arch_helpers.h
@@ -241,6 +241,7 @@
void flush_dcache_range(uintptr_t addr, size_t size);
void flush_dcache_to_popa_range(uintptr_t addr, size_t size);
+void flush_dcache_to_popa_range_mte2(uintptr_t addr, size_t size);
void clean_dcache_range(uintptr_t addr, size_t size);
void inv_dcache_range(uintptr_t addr, size_t size);
bool is_dcache_enabled(void);
@@ -271,6 +272,7 @@
DEFINE_IDREG_READ_FUNC(id_aa64pfr1_el1)
DEFINE_RENAME_IDREG_READ_FUNC(id_aa64pfr2_el1, ID_AA64PFR2_EL1)
DEFINE_IDREG_READ_FUNC(id_aa64dfr0_el1)
+DEFINE_IDREG_READ_FUNC(id_aa64dfr1_el1)
DEFINE_IDREG_READ_FUNC(id_afr0_el1)
DEFINE_SYSREG_READ_FUNC(CurrentEl)
DEFINE_SYSREG_READ_FUNC(ctr_el0)
@@ -645,6 +647,7 @@
/* FEAT_GCS Registers */
DEFINE_RENAME_SYSREG_RW_FUNCS(gcscr_el2, GCSCR_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(gcspr_el2, GCSPR_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(gcscr_el1, GCSCR_EL1)
/* DynamIQ Shared Unit power management */
DEFINE_RENAME_SYSREG_RW_FUNCS(clusterpwrdn_el1, CLUSTERPWRDN_EL1)
diff --git a/include/bl31/sync_handle.h b/include/bl31/sync_handle.h
index 1ac4f98..ae61f31 100644
--- a/include/bl31/sync_handle.h
+++ b/include/bl31/sync_handle.h
@@ -55,6 +55,9 @@
*/
int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx);
+/* Handler for injecting UNDEF exception to lower EL */
+void inject_undef64(cpu_context_t *ctx);
+
/* Prototypes for system register emulation handlers provided by platforms. */
int plat_handle_impdef_trap(uint64_t esr_el3, cpu_context_t *ctx);
int plat_handle_rng_trap(uint64_t esr_el3, cpu_context_t *ctx);
diff --git a/lib/aarch64/cache_helpers.S b/lib/aarch64/cache_helpers.S
index 314ed6e..ff9a4e6 100644
--- a/lib/aarch64/cache_helpers.S
+++ b/lib/aarch64/cache_helpers.S
@@ -9,6 +9,7 @@
.globl flush_dcache_range
.globl flush_dcache_to_popa_range
+ .globl flush_dcache_to_popa_range_mte2
.globl clean_dcache_range
.globl inv_dcache_range
.globl dcsw_op_louis
@@ -17,6 +18,20 @@
.globl dcsw_op_level2
.globl dcsw_op_level3
+/* Opcodes for data cache maintenance by PA instructions. */
+
+/*
+ * sys #6, c7, c14, #1, x0
+ * DC CIPAPA, X0
+ */
+#define dc_cipapa_x0 0xd50e7e20
+
+/*
+ * sys #6, c7, c14, #3, x0
+ * DC CIDGPAPA, X0
+ */
+#define dc_cigdpapa_x0 0xd50e7ea0
+
/*
* This macro can be used for implementing various data cache operations `op`
*/
@@ -37,6 +52,24 @@
ret
.endm
+/* op: the hexadecimal instruction opcode for the cache operation */
+.macro do_dcache_maintenance_instr op
+ /* Exit early if size is zero */
+ cbz x1, exit_loop_\op
+ dcache_line_size x2, x3
+ sub x3, x2, #1
+ bic x0, x0, x3
+ add x1, x1, x0
+loop_\op:
+ .inst \op
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo loop_\op
+ dsb osh
+exit_loop_\op:
+ ret
+.endm
+
.macro check_plat_can_cmo
#if CONDITIONAL_CMO
mov x3, x30
@@ -49,10 +82,11 @@
mov x0, x2
#endif
.endm
- /* ------------------------------------------
- * Clean+Invalidate from base address till
- * size. 'x0' = addr, 'x1' = size
- * ------------------------------------------
+
+ /* -------------------------------------------
+ * DCache Clean+Invalidate by MVA from base
+ * address till size. 'x0' = addr, 'x1' = size
+ * -------------------------------------------
*/
func flush_dcache_range
check_plat_can_cmo
@@ -60,8 +94,8 @@
endfunc flush_dcache_range
/* ------------------------------------------
- * Clean from base address till size.
- * 'x0' = addr, 'x1' = size
+ * DCache Clean by MVA from base address till
+ * size. 'x0' = addr, 'x1' = size
* ------------------------------------------
*/
func clean_dcache_range
@@ -70,8 +104,8 @@
endfunc clean_dcache_range
/* ------------------------------------------
- * Invalidate from base address till
- * size. 'x0' = addr, 'x1' = size
+ * DCache Invalidate by MVA from base address
+ * till size. 'x0' = addr, 'x1' = size
* ------------------------------------------
*/
func inv_dcache_range
@@ -79,37 +113,36 @@
do_dcache_maintenance_by_mva ivac
endfunc inv_dcache_range
-
/*
- * On implementations with FEAT_MTE2,
- * Root firmware must issue DC_CIGDPAPA instead of DC_CIPAPA ,
- * in order to additionally clean and invalidate Allocation Tags
- * associated with the affected locations.
- *
* ------------------------------------------
- * Clean+Invalidate by PA to POPA
- * from base address till size.
+ * DCache Clean+Invalidate by PA to POPA from
+ * base address till size.
* 'x0' = addr, 'x1' = size
* ------------------------------------------
*/
func flush_dcache_to_popa_range
- /* Exit early if size is zero */
- cbz x1, exit_loop_dc_cipapa
check_plat_can_cmo
- dcache_line_size x2, x3
- sub x3, x2, #1
- bic x0, x0, x3
- add x1, x1, x0
-loop_dc_cipapa:
- sys #6, c7, c14, #1, x0 /* DC CIPAPA,<Xt> */
- add x0, x0, x2
- cmp x0, x1
- b.lo loop_dc_cipapa
- dsb osh
-exit_loop_dc_cipapa:
- ret
+ /* dc cipapa, x0 */
+ do_dcache_maintenance_instr dc_cipapa_x0
endfunc flush_dcache_to_popa_range
+ /*
+ * ------------------------------------------
+ * Clean+Invalidate by PA to POPA (MTE2)
+ * from base address till size.
+ * 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ * On implementations with FEAT_MTE2, Root firmware must issue
+ * DC_CIGDPAPA instead of DC_CIPAPA, in order to additionally
+ * clean and invalidate Allocation Tags associated with the
+ * affected locations.
+ */
+func flush_dcache_to_popa_range_mte2
+ check_plat_can_cmo
+ /* dc cigdpapa, x0 */
+ do_dcache_maintenance_instr dc_cigdpapa_x0
+endfunc flush_dcache_to_popa_range_mte2
+
/* ---------------------------------------------------------------
* Data cache operations by set/way to the level specified
*
diff --git a/lib/gpt_rme/gpt_rme.c b/lib/gpt_rme/gpt_rme.c
index f5353cb..36f7a51 100644
--- a/lib/gpt_rme/gpt_rme.c
+++ b/lib/gpt_rme/gpt_rme.c
@@ -11,6 +11,7 @@
#include <stdint.h>
#include <arch.h>
+#include <arch_features.h>
#include <arch_helpers.h>
#include <common/debug.h>
#include "gpt_rme_private.h"
@@ -1095,8 +1096,13 @@
* states, remove any data speculatively fetched into the target
* physical address space. Issue DC CIPAPA over address range
*/
- flush_dcache_to_popa_range(nse | base,
- GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ if (is_feat_mte2_supported()) {
+ flush_dcache_to_popa_range_mte2(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ } else {
+ flush_dcache_to_popa_range(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ }
write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
gpi_info.gpi_shift, gpi_info.idx, target_pas);
@@ -1107,8 +1113,13 @@
nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
- flush_dcache_to_popa_range(nse | base,
- GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ if (is_feat_mte2_supported()) {
+ flush_dcache_to_popa_range_mte2(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ } else {
+ flush_dcache_to_popa_range(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ }
/* Unlock access to the L1 tables. */
spin_unlock(&gpt_lock);
@@ -1225,8 +1236,13 @@
}
/* Ensure that the scrubbed data has made it past the PoPA */
- flush_dcache_to_popa_range(nse | base,
- GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ if (is_feat_mte2_supported()) {
+ flush_dcache_to_popa_range_mte2(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ } else {
+ flush_dcache_to_popa_range(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ }
/*
* Remove any data loaded speculatively
@@ -1234,8 +1250,13 @@
*/
nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
- flush_dcache_to_popa_range(nse | base,
- GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ if (is_feat_mte2_supported()) {
+ flush_dcache_to_popa_range_mte2(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ } else {
+ flush_dcache_to_popa_range(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ }
/* Clear existing GPI encoding and transition granule. */
write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
diff --git a/plat/arm/board/tc/fdts/tc_fw_config.dts b/plat/arm/board/tc/fdts/tc_fw_config.dts
index a84c7f8..982da5b 100644
--- a/plat/arm/board/tc/fdts/tc_fw_config.dts
+++ b/plat/arm/board/tc/fdts/tc_fw_config.dts
@@ -1,10 +1,11 @@
/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2024, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <common/tbbr/tbbr_img_def.h>
+#include <platform_def.h>
/dts-v1/;
@@ -25,8 +26,8 @@
};
hw-config {
- load-address = <0x0 0x83000000>;
- max-size = <0x8000>;
+ load-address = <0x0 PLAT_HW_CONFIG_DTB_BASE>;
+ max-size = <PLAT_HW_CONFIG_DTB_SIZE>;
id = <HW_CONFIG_ID>;
};
};
diff --git a/plat/arm/board/tc/include/platform_def.h b/plat/arm/board/tc/include/platform_def.h
index 130111e..a42e39d 100644
--- a/plat/arm/board/tc/include/platform_def.h
+++ b/plat/arm/board/tc/include/platform_def.h
@@ -28,6 +28,11 @@
* - Region to load secure partitions
*
*
+ * 0x8000_0000 ------------------ TC_NS_DRAM1_BASE
+ * | DTB |
+ * | (32K) |
+ * 0x8000_8000 ------------------
+ * | ... |
* 0xf8a0_0000 ------------------ TC_NS_FWU_BASE
* | FWU shmem |
* | (4MB) |
@@ -79,7 +84,7 @@
TC_TZC_DRAM1_SIZE, \
MT_MEMORY | MT_RW | MT_SECURE)
-#define PLAT_HW_CONFIG_DTB_BASE ULL(0x83000000)
+#define PLAT_HW_CONFIG_DTB_BASE TC_NS_DRAM1_BASE
#define PLAT_HW_CONFIG_DTB_SIZE ULL(0x8000)
#define PLAT_DTB_DRAM_NS MAP_REGION_FLAT( \
diff --git a/plat/arm/board/tc/platform.mk b/plat/arm/board/tc/platform.mk
index 5be1234..652a17e 100644
--- a/plat/arm/board/tc/platform.mk
+++ b/plat/arm/board/tc/platform.mk
@@ -13,6 +13,38 @@
# IOMMU: Enable the use of system or individual MMUs
TC_IOMMU_EN := 1
+# System setup
+CSS_USE_SCMI_SDS_DRIVER := 1
+HW_ASSISTED_COHERENCY := 1
+USE_COHERENT_MEM := 0
+GIC_ENABLE_V4_EXTN := 1
+GICV3_SUPPORT_GIC600 := 1
+override NEED_BL2U := no
+override ARM_PLAT_MT := 1
+
+# CPU setup
+ARM_ARCH_MINOR := 7
+BRANCH_PROTECTION := 1
+ENABLE_FEAT_MPAM := 1 # default is 2, optimise
+ENABLE_SVE_FOR_NS := 2 # to show we use it
+ENABLE_SVE_FOR_SWD := 1
+ENABLE_TRBE_FOR_NS := 1
+ENABLE_SYS_REG_TRACE_FOR_NS := 1
+ENABLE_FEAT_AMU := 1
+ENABLE_AMU_FCONF := 1
+ENABLE_AMU_AUXILIARY_COUNTERS := 1
+ENABLE_MPMM := 1
+ENABLE_MPMM_FCONF := 1
+
+CTX_INCLUDE_AARCH32_REGS := 0
+
+ifeq (${SPD},spmd)
+ SPMD_SPM_AT_SEL2 := 1
+ ENABLE_FEAT_MTE := 1
+ CTX_INCLUDE_PAUTH_REGS := 1
+endif
+
+
ifneq ($(shell expr $(TARGET_PLATFORM) \<= 1), 0)
$(warning Platform ${PLAT}$(TARGET_PLATFORM) is deprecated. \
Some of the features might not work as expected)
@@ -36,41 +68,6 @@
CSS_LOAD_SCP_IMAGES := 1
-CSS_USE_SCMI_SDS_DRIVER := 1
-
-ENABLE_FEAT_RAS := 1
-
-SDEI_SUPPORT := 0
-
-EL3_EXCEPTION_HANDLING := 0
-
-HANDLE_EA_EL3_FIRST_NS := 0
-
-# System coherency is managed in hardware
-HW_ASSISTED_COHERENCY := 1
-
-# When building for systems with hardware-assisted coherency, there's no need to
-# use USE_COHERENT_MEM. Require that USE_COHERENT_MEM must be set to 0 too.
-USE_COHERENT_MEM := 0
-
-GIC_ENABLE_V4_EXTN := 1
-
-# GIC-600 configuration
-GICV3_SUPPORT_GIC600 := 1
-
-# Enable SVE
-ENABLE_SVE_FOR_NS := 2
-ENABLE_SVE_FOR_SWD := 1
-
-# enable trace buffer control registers access to NS by default
-ENABLE_TRBE_FOR_NS := 1
-
-# enable trace system registers access to NS by default
-ENABLE_SYS_REG_TRACE_FOR_NS := 1
-
-# enable trace filter control registers access to NS by default
-ENABLE_TRF_FOR_NS := 1
-
# Include GICv3 driver files
include drivers/arm/gic/v3/gicv3.mk
@@ -78,10 +75,6 @@
plat/common/plat_gicv3.c \
plat/arm/common/arm_gicv3.c
-override NEED_BL2U := no
-
-override ARM_PLAT_MT := 1
-
TC_BASE = plat/arm/board/tc
PLAT_INCLUDES += -I${TC_BASE}/include/ \
@@ -174,19 +167,6 @@
# Add the HW_CONFIG to FIP and specify the same to certtool
$(eval $(call TOOL_ADD_PAYLOAD,${TC_HW_CONFIG},--hw-config,${TC_HW_CONFIG}))
-override CTX_INCLUDE_AARCH32_REGS := 0
-
-override CTX_INCLUDE_PAUTH_REGS := 1
-
-override ENABLE_SPE_FOR_NS := 0
-
-override ENABLE_FEAT_AMU := 1
-ENABLE_AMU_AUXILIARY_COUNTERS := 1
-ENABLE_AMU_FCONF := 1
-
-ENABLE_MPMM := 1
-ENABLE_MPMM_FCONF := 1
-
# Include Measured Boot makefile before any Crypto library makefile.
# Crypto library makefile may need default definitions of Measured Boot build
# flags present in Measured Boot makefile.