Merge changes from topic "mp/undef_injection" into integration
* changes:
feat(el3-runtime): introduce UNDEF injection to lower EL
feat(cpufeat): added few helper functions
diff --git a/drivers/partition/partition.c b/drivers/partition/partition.c
index c60820d..555fe7f 100644
--- a/drivers/partition/partition.c
+++ b/drivers/partition/partition.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -94,9 +94,8 @@
* If partition numbers could be found, check & update it.
*/
static int load_gpt_header(uintptr_t image_handle, size_t header_offset,
- unsigned long long *part_lba)
+ gpt_header_t *header)
{
- gpt_header_t header;
size_t bytes_read;
int result;
uint32_t header_crc, calc_crc;
@@ -107,7 +106,7 @@
header_offset);
return result;
}
- result = io_read(image_handle, (uintptr_t)&header,
+ result = io_read(image_handle, (uintptr_t)header,
sizeof(gpt_header_t), &bytes_read);
if ((result != 0) || (sizeof(gpt_header_t) != bytes_read)) {
VERBOSE("GPT header read error(%i) or read mismatch occurred,"
@@ -115,8 +114,8 @@
sizeof(gpt_header_t), bytes_read);
return result;
}
- if (memcmp(header.signature, GPT_SIGNATURE,
- sizeof(header.signature)) != 0) {
+ if (memcmp(header->signature, GPT_SIGNATURE,
+ sizeof(header->signature)) != 0) {
VERBOSE("GPT header signature failure\n");
return -EINVAL;
}
@@ -126,25 +125,24 @@
* computed by setting this field to 0, and computing the
* 32-bit CRC for HeaderSize bytes.
*/
- header_crc = header.header_crc;
- header.header_crc = 0U;
+ header_crc = header->header_crc;
+ header->header_crc = 0U;
- calc_crc = tf_crc32(0U, (uint8_t *)&header, sizeof(gpt_header_t));
+ calc_crc = tf_crc32(0U, (uint8_t *)header, sizeof(gpt_header_t));
if (header_crc != calc_crc) {
ERROR("Invalid GPT Header CRC: Expected 0x%x but got 0x%x.\n",
header_crc, calc_crc);
return -EINVAL;
}
- header.header_crc = header_crc;
+ header->header_crc = header_crc;
/* partition numbers can't exceed PLAT_PARTITION_MAX_ENTRIES */
- list.entry_count = header.list_num;
+ list.entry_count = header->list_num;
if (list.entry_count > PLAT_PARTITION_MAX_ENTRIES) {
list.entry_count = PLAT_PARTITION_MAX_ENTRIES;
}
- *part_lba = header.part_lba;
return 0;
}
@@ -231,12 +229,13 @@
* Retrieve each entry in the partition table, parse the data from each
* entry and store them in the list of partition table entries.
*/
-static int load_partition_gpt(uintptr_t image_handle,
- unsigned long long part_lba)
+static int load_partition_gpt(uintptr_t image_handle, gpt_header_t header)
{
- const signed long long gpt_entry_offset = LBA(part_lba);
+ const signed long long gpt_entry_offset = LBA(header.part_lba);
gpt_entry_t entry;
- int result, i;
+ int result;
+ unsigned int i;
+ uint32_t calc_crc = 0U;
result = io_seek(image_handle, IO_SEEK_SET, gpt_entry_offset);
if (result != 0) {
@@ -245,23 +244,36 @@
return result;
}
- for (i = 0; i < list.entry_count; i++) {
+ for (i = 0; i < (unsigned int)list.entry_count; i++) {
result = load_gpt_entry(image_handle, &entry);
if (result != 0) {
- VERBOSE("Failed to load gpt entry data(%i) error is (%i)\n",
+ VERBOSE("Failed to load gpt entry data(%u) error is (%i)\n",
i, result);
return result;
}
result = parse_gpt_entry(&entry, &list.list[i]);
if (result != 0) {
+ result = io_seek(image_handle, IO_SEEK_SET,
+ (gpt_entry_offset + (i * sizeof(gpt_entry_t))));
+ if (result != 0) {
+ VERBOSE("Failed to seek (%i)\n", result);
+ return result;
+ }
break;
}
+
+ /*
+ * Calculate CRC of Partition entry array to compare with CRC
+ * value in header
+ */
+ calc_crc = tf_crc32(calc_crc, (uint8_t *)&entry, sizeof(gpt_entry_t));
}
if (i == 0) {
VERBOSE("No Valid GPT Entries found\n");
return -EINVAL;
}
+
/*
* Only records the valid partition number that is loaded from
* partition table.
@@ -269,6 +281,29 @@
list.entry_count = i;
dump_entries(list.entry_count);
+ /*
+ * If there are less valid entries than the possible number of entries
+ * from the header, continue to load the partition entry table to
+ * calculate the full CRC in order to check against the partition CRC
+ * from the header for validation.
+ */
+ for (; i < header.list_num; i++) {
+ result = load_gpt_entry(image_handle, &entry);
+ if (result != 0) {
+ VERBOSE("Failed to load gpt entry data(%u) error is (%i)\n",
+ i, result);
+ return result;
+ }
+
+ calc_crc = tf_crc32(calc_crc, (uint8_t *)&entry, sizeof(gpt_entry_t));
+ }
+
+ if (header.part_crc != calc_crc) {
+ ERROR("Invalid GPT Partition Array Entry CRC: Expected 0x%x"
+ " but got 0x%x.\n", header.part_crc, calc_crc);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -279,7 +314,7 @@
static int load_backup_gpt(unsigned int image_id, unsigned int sector_nums)
{
int result;
- unsigned long long part_lba = 0;
+ gpt_header_t header;
size_t gpt_header_offset;
uintptr_t dev_handle, image_spec, image_handle;
io_block_spec_t *block_spec;
@@ -316,8 +351,8 @@
INFO("Trying to retrieve back-up GPT header\n");
/* Last block is backup-GPT header, after the end of GPT entries */
gpt_header_offset = LBA(part_num_entries);
- result = load_gpt_header(image_handle, gpt_header_offset, &part_lba);
- if ((result != 0) || (part_lba == 0)) {
+ result = load_gpt_header(image_handle, gpt_header_offset, &header);
+ if ((result != 0) || (header.part_lba == 0)) {
ERROR("Failed to retrieve Backup GPT header,"
"Partition maybe corrupted\n");
goto out;
@@ -327,7 +362,8 @@
* Note we mapped last 33 blocks(LBA-33), first block here starts with
* entries while last block was header.
*/
- result = load_partition_gpt(image_handle, 0);
+ header.part_lba = 0;
+ result = load_partition_gpt(image_handle, header);
out:
io_close(image_handle);
@@ -342,19 +378,19 @@
static int load_primary_gpt(uintptr_t image_handle, unsigned int first_lba)
{
int result;
- unsigned long long part_lba;
size_t gpt_header_offset;
+ gpt_header_t header;
/* Try to load Primary GPT header from LBA1 */
gpt_header_offset = LBA(first_lba);
- result = load_gpt_header(image_handle, gpt_header_offset, &part_lba);
- if ((result != 0) || (part_lba == 0)) {
+ result = load_gpt_header(image_handle, gpt_header_offset, &header);
+ if ((result != 0) || (header.part_lba == 0)) {
VERBOSE("Failed to retrieve Primary GPT header,"
"trying to retrieve back-up GPT header\n");
return result;
}
- return load_partition_gpt(image_handle, part_lba);
+ return load_partition_gpt(image_handle, header);
}
/*
diff --git a/include/arch/aarch64/arch_helpers.h b/include/arch/aarch64/arch_helpers.h
index f45290d..6356cab 100644
--- a/include/arch/aarch64/arch_helpers.h
+++ b/include/arch/aarch64/arch_helpers.h
@@ -241,6 +241,7 @@
void flush_dcache_range(uintptr_t addr, size_t size);
void flush_dcache_to_popa_range(uintptr_t addr, size_t size);
+void flush_dcache_to_popa_range_mte2(uintptr_t addr, size_t size);
void clean_dcache_range(uintptr_t addr, size_t size);
void inv_dcache_range(uintptr_t addr, size_t size);
bool is_dcache_enabled(void);
diff --git a/lib/aarch64/cache_helpers.S b/lib/aarch64/cache_helpers.S
index 314ed6e..ff9a4e6 100644
--- a/lib/aarch64/cache_helpers.S
+++ b/lib/aarch64/cache_helpers.S
@@ -9,6 +9,7 @@
.globl flush_dcache_range
.globl flush_dcache_to_popa_range
+ .globl flush_dcache_to_popa_range_mte2
.globl clean_dcache_range
.globl inv_dcache_range
.globl dcsw_op_louis
@@ -17,6 +18,20 @@
.globl dcsw_op_level2
.globl dcsw_op_level3
+/* Opcodes for data cache maintenance by PA instructions. */
+
+/*
+ * sys #6, c7, c14, #1, x0
+ * DC CIPAPA, X0
+ */
+#define dc_cipapa_x0 0xd50e7e20
+
+/*
+ * sys #6, c7, c14, #3, x0
+ * DC CIDGPAPA, X0
+ */
+#define dc_cigdpapa_x0 0xd50e7ea0
+
/*
* This macro can be used for implementing various data cache operations `op`
*/
@@ -37,6 +52,24 @@
ret
.endm
+/* op: the hexadecimal instruction opcode for the cache operation */
+.macro do_dcache_maintenance_instr op
+ /* Exit early if size is zero */
+ cbz x1, exit_loop_\op
+ dcache_line_size x2, x3
+ sub x3, x2, #1
+ bic x0, x0, x3
+ add x1, x1, x0
+loop_\op:
+ .inst \op
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo loop_\op
+ dsb osh
+exit_loop_\op:
+ ret
+.endm
+
.macro check_plat_can_cmo
#if CONDITIONAL_CMO
mov x3, x30
@@ -49,10 +82,11 @@
mov x0, x2
#endif
.endm
- /* ------------------------------------------
- * Clean+Invalidate from base address till
- * size. 'x0' = addr, 'x1' = size
- * ------------------------------------------
+
+ /* -------------------------------------------
+ * DCache Clean+Invalidate by MVA from base
+ * address till size. 'x0' = addr, 'x1' = size
+ * -------------------------------------------
*/
func flush_dcache_range
check_plat_can_cmo
@@ -60,8 +94,8 @@
endfunc flush_dcache_range
/* ------------------------------------------
- * Clean from base address till size.
- * 'x0' = addr, 'x1' = size
+ * DCache Clean by MVA from base address till
+ * size. 'x0' = addr, 'x1' = size
* ------------------------------------------
*/
func clean_dcache_range
@@ -70,8 +104,8 @@
endfunc clean_dcache_range
/* ------------------------------------------
- * Invalidate from base address till
- * size. 'x0' = addr, 'x1' = size
+ * DCache Invalidate by MVA from base address
+ * till size. 'x0' = addr, 'x1' = size
* ------------------------------------------
*/
func inv_dcache_range
@@ -79,37 +113,36 @@
do_dcache_maintenance_by_mva ivac
endfunc inv_dcache_range
-
/*
- * On implementations with FEAT_MTE2,
- * Root firmware must issue DC_CIGDPAPA instead of DC_CIPAPA ,
- * in order to additionally clean and invalidate Allocation Tags
- * associated with the affected locations.
- *
* ------------------------------------------
- * Clean+Invalidate by PA to POPA
- * from base address till size.
+ * DCache Clean+Invalidate by PA to POPA from
+ * base address till size.
* 'x0' = addr, 'x1' = size
* ------------------------------------------
*/
func flush_dcache_to_popa_range
- /* Exit early if size is zero */
- cbz x1, exit_loop_dc_cipapa
check_plat_can_cmo
- dcache_line_size x2, x3
- sub x3, x2, #1
- bic x0, x0, x3
- add x1, x1, x0
-loop_dc_cipapa:
- sys #6, c7, c14, #1, x0 /* DC CIPAPA,<Xt> */
- add x0, x0, x2
- cmp x0, x1
- b.lo loop_dc_cipapa
- dsb osh
-exit_loop_dc_cipapa:
- ret
+ /* dc cipapa, x0 */
+ do_dcache_maintenance_instr dc_cipapa_x0
endfunc flush_dcache_to_popa_range
+ /*
+ * ------------------------------------------
+ * Clean+Invalidate by PA to POPA (MTE2)
+ * from base address till size.
+ * 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ * On implementations with FEAT_MTE2, Root firmware must issue
+ * DC_CIGDPAPA instead of DC_CIPAPA, in order to additionally
+ * clean and invalidate Allocation Tags associated with the
+ * affected locations.
+ */
+func flush_dcache_to_popa_range_mte2
+ check_plat_can_cmo
+ /* dc cigdpapa, x0 */
+ do_dcache_maintenance_instr dc_cigdpapa_x0
+endfunc flush_dcache_to_popa_range_mte2
+
/* ---------------------------------------------------------------
* Data cache operations by set/way to the level specified
*
diff --git a/lib/gpt_rme/gpt_rme.c b/lib/gpt_rme/gpt_rme.c
index f5353cb..36f7a51 100644
--- a/lib/gpt_rme/gpt_rme.c
+++ b/lib/gpt_rme/gpt_rme.c
@@ -11,6 +11,7 @@
#include <stdint.h>
#include <arch.h>
+#include <arch_features.h>
#include <arch_helpers.h>
#include <common/debug.h>
#include "gpt_rme_private.h"
@@ -1095,8 +1096,13 @@
* states, remove any data speculatively fetched into the target
* physical address space. Issue DC CIPAPA over address range
*/
- flush_dcache_to_popa_range(nse | base,
- GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ if (is_feat_mte2_supported()) {
+ flush_dcache_to_popa_range_mte2(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ } else {
+ flush_dcache_to_popa_range(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ }
write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
gpi_info.gpi_shift, gpi_info.idx, target_pas);
@@ -1107,8 +1113,13 @@
nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
- flush_dcache_to_popa_range(nse | base,
- GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ if (is_feat_mte2_supported()) {
+ flush_dcache_to_popa_range_mte2(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ } else {
+ flush_dcache_to_popa_range(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ }
/* Unlock access to the L1 tables. */
spin_unlock(&gpt_lock);
@@ -1225,8 +1236,13 @@
}
/* Ensure that the scrubbed data has made it past the PoPA */
- flush_dcache_to_popa_range(nse | base,
- GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ if (is_feat_mte2_supported()) {
+ flush_dcache_to_popa_range_mte2(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ } else {
+ flush_dcache_to_popa_range(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ }
/*
* Remove any data loaded speculatively
@@ -1234,8 +1250,13 @@
*/
nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
- flush_dcache_to_popa_range(nse | base,
- GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ if (is_feat_mte2_supported()) {
+ flush_dcache_to_popa_range_mte2(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ } else {
+ flush_dcache_to_popa_range(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ }
/* Clear existing GPI encoding and transition granule. */
write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,