Merge pull request #1901 from AlexeiFedorov/af/restore_pauth_context_smc
Restore PAuth context in case of unknown SMC call
diff --git a/Makefile b/Makefile
index 8656da5..11d0d7a 100644
--- a/Makefile
+++ b/Makefile
@@ -478,6 +478,12 @@
ifeq ($(ENABLE_PAUTH),1)
ifeq ($(CTX_INCLUDE_PAUTH_REGS),0)
$(error ENABLE_PAUTH=1 requires CTX_INCLUDE_PAUTH_REGS=1)
+ else
+ $(info ENABLE_PAUTH and CTX_INCLUDE_PAUTH_REGS are experimental features)
+ endif
+else
+ ifeq ($(CTX_INCLUDE_PAUTH_REGS),1)
+ $(info CTX_INCLUDE_PAUTH_REGS is an experimental feature)
endif
endif
diff --git a/docs/firmware-design.rst b/docs/firmware-design.rst
index 52520ea..66b16fa 100644
--- a/docs/firmware-design.rst
+++ b/docs/firmware-design.rst
@@ -2565,11 +2565,16 @@
must be set to 1. This will add all pointer authentication system registers
to the context that is saved when doing a world switch.
- The Trusted Firmware itself has support for pointer authentication at runtime
+ The TF-A itself has support for pointer authentication at runtime
that can be enabled by setting both options ``ENABLE_PAUTH`` and
``CTX_INCLUDE_PAUTH_REGS`` to 1. This enables pointer authentication in BL1,
BL2, BL31, and the TSP if it is used.
+ These options are experimental features.
+
+ Note that Pointer Authentication is enabled for Non-secure world irrespective
+ of the value of these build flags if the CPU supports it.
+
If ``ARM_ARCH_MAJOR == 8`` and ``ARM_ARCH_MINOR >= 3`` the code footprint of
enabling PAuth is lower because the compiler will use the optimized
PAuth instructions rather than the backwards-compatible ones.
diff --git a/docs/user-guide.rst b/docs/user-guide.rst
index de4611c..c88b1f6 100644
--- a/docs/user-guide.rst
+++ b/docs/user-guide.rst
@@ -358,11 +358,12 @@
registers to be included when saving and restoring the CPU context. Default
is 0.
-- ``CTX_INCLUDE_PAUTH_REGS``: Boolean option that, when set to 1, will cause
- the ARMv8.3-PAuth registers to be included when saving and restoring the CPU
- context. Note that if the hardware supports this extension and this option is
- set to 0 the value of the registers will be leaked between Secure and
- Non-secure worlds if PAuth is used on both sides. The default is 0.
+- ``CTX_INCLUDE_PAUTH_REGS``: Boolean option that, when set to 1, enables
+ Pointer Authentication for Secure world. This will cause the ARMv8.3-PAuth
+ registers to be included when saving and restoring the CPU context as
+ part of world switch. Default value is 0 and this is an experimental feature.
+ Note that Pointer Authentication is enabled for Non-secure world irrespective
+ of the value of this flag if the CPU supports it.
- ``DEBUG``: Chooses between a debug and release build. It can take either 0
(release) or 1 (debug) as values. 0 is the default.
@@ -412,11 +413,11 @@
and use partitions in EL3 as required. This option defaults to ``0``.
- ``ENABLE_PAUTH``: Boolean option to enable ARMv8.3 Pointer Authentication
- (``ARMv8.3-PAuth``) support in the Trusted Firmware itself. Note that this
- option doesn't affect the saving of the registers introduced with this
- extension, they are always saved if they are detected regardless of the value
- of this option. If enabled, it is needed to use a compiler that supports the
- option ``-msign-return-address``. It defaults to 0.
+ support for TF-A BL images itself. If enabled, it is needed to use a compiler
+ that supports the option ``-msign-return-address``. This flag defaults to 0
+ and this is an experimental feature.
+ Note that Pointer Authentication is enabled for Non-secure world irrespective
+ of the value of this flag if the CPU supports it.
- ``ENABLE_PIE``: Boolean option to enable Position Independent Executable(PIE)
support within generic code in TF-A. This option is currently only supported
@@ -1049,7 +1050,7 @@
./tools/fiptool/fiptool
-Invoking the tool with ``--help`` will print a help message with all available
+Invoking the tool with ``help`` will print a help message with all available
options.
Example 1: create a new Firmware package ``fip.bin`` that contains BL2 and BL31:
diff --git a/include/lib/cpus/aarch64/neoverse_e1.h b/include/lib/cpus/aarch64/neoverse_e1.h
index 7084604..96b4661 100644
--- a/include/lib/cpus/aarch64/neoverse_e1.h
+++ b/include/lib/cpus/aarch64/neoverse_e1.h
@@ -9,7 +9,7 @@
#include <lib/utils_def.h>
-#define NEOVERSE_E1_MIDR U(0x410FD060)
+#define NEOVERSE_E1_MIDR U(0x410FD4A0)
/*******************************************************************************
* CPU Extended Control register specific definitions.
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index 4371cb2..e6ab19b 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -356,7 +356,7 @@
msr APIAKeyLo_EL1, x9
msr APIAKeyHi_EL1, x10
- ldp x9, x10, [x11, #CTX_PACIAKEY_LO]
+ ldp x9, x10, [x11, #CTX_PACIBKEY_LO]
msr APIBKeyLo_EL1, x9
msr APIBKeyHi_EL1, x10
diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c
index 7957b61..0e6a6fa 100644
--- a/lib/xlat_tables_v2/xlat_tables_core.c
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -325,9 +325,8 @@
return action;
}
-
/*
- * Function that writes to the translation tables and unmaps the
+ * Recursive function that writes to the translation tables and unmaps the
* specified region.
*/
static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
@@ -338,137 +337,70 @@
{
assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
- /*
- * data structure to track DESC_TABLE entry before iterate into subtable
- * of next translation level. it will be used to restore previous level
- * after finish subtable iteration.
- */
- struct desc_table_unmap {
- uint64_t *table_base;
- uintptr_t table_idx_va;
- unsigned int idx;
- } desc_tables[XLAT_TABLE_LEVEL_MAX + 1] = {
- {NULL, 0U, XLAT_TABLE_ENTRIES}, };
+ uint64_t *subtable;
+ uint64_t desc;
- unsigned int this_level = level;
- uint64_t *this_base = table_base;
- unsigned int max_entries = table_entries;
- size_t level_size = XLAT_BLOCK_SIZE(this_level);
- unsigned int table_idx;
uintptr_t table_idx_va;
+ uintptr_t table_idx_end_va; /* End VA of this entry */
uintptr_t region_end_va = mm->base_va + mm->size - 1U;
+ unsigned int table_idx;
+
table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
- while (this_base != NULL) {
-
- uint64_t desc;
- uint64_t desc_type;
- uintptr_t table_idx_end_va; /* End VA of this entry */
- action_t action;
-
- /* finish current xlat level iteration. */
- if (table_idx >= max_entries) {
- if (this_level > ctx->base_level) {
- xlat_table_dec_regions_count(ctx, this_base);
- }
-
- if (this_level > level) {
- uint64_t *subtable;
-
- /* back from subtable iteration, restore
- * previous DESC_TABLE entry.
- */
- this_level--;
- this_base = desc_tables[this_level].table_base;
- table_idx = desc_tables[this_level].idx;
- table_idx_va =
- desc_tables[this_level].table_idx_va;
- level_size = XLAT_BLOCK_SIZE(this_level);
-
- if (this_level == level) {
- max_entries = table_entries;
- } else {
- max_entries = XLAT_TABLE_ENTRIES;
- }
-
- desc = this_base[table_idx];
- subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
- /*
- * If the subtable is now empty, remove its reference.
- */
- if (xlat_table_is_empty(ctx, subtable)) {
- this_base[table_idx] = INVALID_DESC;
- xlat_arch_tlbi_va(table_idx_va,
- ctx->xlat_regime);
- }
- table_idx++;
- table_idx_va += level_size;
-
- } else {
- /* reached end of top level, exit.*/
- this_base = NULL;
- break;
- }
-
- }
-
- /* If reached the end of the region, stop iterating entries in
- * current xlat level.
- */
- if (region_end_va <= table_idx_va) {
- table_idx = max_entries;
- continue;
- }
-
+ while (table_idx < table_entries) {
- table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(this_level) - 1U;
+ table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1U;
- desc = this_base[table_idx];
- desc_type = desc & DESC_MASK;
+ desc = table_base[table_idx];
+ uint64_t desc_type = desc & DESC_MASK;
- action = xlat_tables_unmap_region_action(mm, table_idx_va,
- table_idx_end_va,
- this_level,
- desc_type);
+ action_t action = xlat_tables_unmap_region_action(mm,
+ table_idx_va, table_idx_end_va, level,
+ desc_type);
if (action == ACTION_WRITE_BLOCK_ENTRY) {
- this_base[table_idx] = INVALID_DESC;
+
+ table_base[table_idx] = INVALID_DESC;
xlat_arch_tlbi_va(table_idx_va, ctx->xlat_regime);
- table_idx++;
- table_idx_va += level_size;
} else if (action == ACTION_RECURSE_INTO_TABLE) {
- uint64_t *subtable;
- uintptr_t base_va;
-
subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
- desc_tables[this_level].table_base = this_base;
- desc_tables[this_level].table_idx_va = table_idx_va;
- base_va = table_idx_va;
- desc_tables[this_level].idx = table_idx;
-
- this_base = subtable;
- this_level++;
-
- max_entries = XLAT_TABLE_ENTRIES;
- level_size = XLAT_BLOCK_SIZE(this_level);
+ /* Recurse to write into subtable */
+ xlat_tables_unmap_region(ctx, mm, table_idx_va,
+ subtable, XLAT_TABLE_ENTRIES,
+ level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#endif
+ /*
+ * If the subtable is now empty, remove its reference.
+ */
+ if (xlat_table_is_empty(ctx, subtable)) {
+ table_base[table_idx] = INVALID_DESC;
+ xlat_arch_tlbi_va(table_idx_va,
+ ctx->xlat_regime);
+ }
- table_idx_va = xlat_tables_find_start_va(mm,
- base_va, this_level);
- table_idx = xlat_tables_va_to_index(base_va,
- table_idx_va, this_level);
} else {
assert(action == ACTION_NONE);
-
- table_idx++;
- table_idx_va += level_size;
}
+
+ table_idx++;
+ table_idx_va += XLAT_BLOCK_SIZE(level);
+
+ /* If reached the end of the region, exit */
+ if (region_end_va <= table_idx_va)
+ break;
}
+
+ if (level > ctx->base_level)
+ xlat_table_dec_regions_count(ctx, table_base);
}
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
@@ -605,169 +537,105 @@
}
/*
- * Function that writes to the translation tables and maps the
+ * Recursive function that writes to the translation tables and maps the
* specified region. On success, it returns the VA of the last byte that was
* successfully mapped. On error, it returns the VA of the next entry that
* should have been mapped.
*/
static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
- const uintptr_t table_base_va,
+ uintptr_t table_base_va,
uint64_t *const table_base,
unsigned int table_entries,
unsigned int level)
{
-
assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
- /*
- * data structure to track DESC_TABLE entry before iterate into subtable
- * of next translation level. it will be used to restore previous level
- * after finish subtable iteration.
- */
- struct desc_table_map {
- uint64_t *table_base;
- uintptr_t table_idx_va;
- unsigned int idx;
- } desc_tables[XLAT_TABLE_LEVEL_MAX + 1] = {
- {NULL, 0U, XLAT_TABLE_ENTRIES}, };
-
- unsigned int this_level = level;
- uint64_t *this_base = table_base;
- unsigned int max_entries = table_entries;
- size_t level_size = XLAT_BLOCK_SIZE(this_level);
uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
uintptr_t table_idx_va;
+ unsigned long long table_idx_pa;
+
+ uint64_t *subtable;
+ uint64_t desc;
+
unsigned int table_idx;
table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
- while (this_base != NULL) {
-
- uint64_t desc;
- uint64_t desc_type;
- unsigned long long table_idx_pa;
- action_t action;
-
- /* finish current xlat level iteration. */
- if (table_idx >= max_entries) {
- if (this_level <= level) {
- this_base = NULL;
- break;
- } else {
-
- /* back from subtable iteration, restore
- * previous DESC_TABLE entry.
- */
- this_level--;
- level_size = XLAT_BLOCK_SIZE(this_level);
- this_base = desc_tables[this_level].table_base;
- table_idx = desc_tables[this_level].idx;
- if (this_level == level) {
- max_entries = table_entries;
- } else {
- max_entries = XLAT_TABLE_ENTRIES;
- }
-#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
- uintptr_t subtable;
- desc = this_base[table_idx];
- subtable = (uintptr_t)(desc & TABLE_ADDR_MASK);
- xlat_clean_dcache_range(subtable,
- XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#if PLAT_XLAT_TABLES_DYNAMIC
+ if (level > ctx->base_level)
+ xlat_table_inc_regions_count(ctx, table_base);
#endif
- table_idx++;
- table_idx_va =
- desc_tables[this_level].table_idx_va +
- level_size;
- }
- }
+ while (table_idx < table_entries) {
- desc = this_base[table_idx];
- desc_type = desc & DESC_MASK;
+ desc = table_base[table_idx];
table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
- /* If reached the end of the region, simply exit since we
- * already write all BLOCK entries and create all required
- * subtables.
- */
- if (mm_end_va <= table_idx_va) {
- this_base = NULL;
- break;
- }
-
- action = xlat_tables_map_region_action(mm, desc_type,
- table_idx_pa, table_idx_va, this_level);
+ action_t action = xlat_tables_map_region_action(mm,
+ (uint32_t)(desc & DESC_MASK), table_idx_pa,
+ table_idx_va, level);
if (action == ACTION_WRITE_BLOCK_ENTRY) {
- this_base[table_idx] = xlat_desc(ctx, mm->attr,
- table_idx_pa, this_level);
- table_idx++;
- table_idx_va += level_size;
- } else if (action == ACTION_CREATE_NEW_TABLE) {
- uintptr_t base_va;
+ table_base[table_idx] =
+ xlat_desc(ctx, (uint32_t)mm->attr, table_idx_pa,
+ level);
- uint64_t *subtable = xlat_table_get_empty(ctx);
+ } else if (action == ACTION_CREATE_NEW_TABLE) {
+ uintptr_t end_va;
+
+ subtable = xlat_table_get_empty(ctx);
if (subtable == NULL) {
- /* Not enough free tables to map this region. */
+ /* Not enough free tables to map this region */
return table_idx_va;
}
/* Point to new subtable from this one. */
- this_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
-
- desc_tables[this_level].table_base = this_base;
- desc_tables[this_level].table_idx_va = table_idx_va;
- desc_tables[this_level].idx = table_idx;
- base_va = table_idx_va;
+ table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
- this_level++;
- this_base = subtable;
- level_size = XLAT_BLOCK_SIZE(this_level);
- table_idx_va = xlat_tables_find_start_va(mm, base_va,
- this_level);
- table_idx = xlat_tables_va_to_index(base_va,
- table_idx_va, this_level);
- max_entries = XLAT_TABLE_ENTRIES;
-
-#if PLAT_XLAT_TABLES_DYNAMIC
- if (this_level > ctx->base_level) {
- xlat_table_inc_regions_count(ctx, subtable);
- }
+ /* Recurse to write into subtable */
+ end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+ subtable, XLAT_TABLE_ENTRIES,
+ level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
#endif
+ if (end_va !=
+ (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
+ return end_va;
} else if (action == ACTION_RECURSE_INTO_TABLE) {
-
- uintptr_t base_va;
- uint64_t *subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+ uintptr_t end_va;
- desc_tables[this_level].table_base = this_base;
- desc_tables[this_level].table_idx_va = table_idx_va;
- desc_tables[this_level].idx = table_idx;
- base_va = table_idx_va;
-
- this_level++;
- level_size = XLAT_BLOCK_SIZE(this_level);
- table_idx_va = xlat_tables_find_start_va(mm, base_va,
- this_level);
- table_idx = xlat_tables_va_to_index(base_va,
- table_idx_va, this_level);
- this_base = subtable;
- max_entries = XLAT_TABLE_ENTRIES;
-
-#if PLAT_XLAT_TABLES_DYNAMIC
- if (this_level > ctx->base_level) {
- xlat_table_inc_regions_count(ctx, subtable);
- }
+ subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+ /* Recurse to write into subtable */
+ end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+ subtable, XLAT_TABLE_ENTRIES,
+ level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
#endif
+ if (end_va !=
+ (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
+ return end_va;
+
} else {
+
assert(action == ACTION_NONE);
- table_idx++;
- table_idx_va += level_size;
+
}
+
+ table_idx++;
+ table_idx_va += XLAT_BLOCK_SIZE(level);
+
+ /* If reached the end of the region, exit */
+ if (mm_end_va <= table_idx_va)
+ break;
}
return table_idx_va - 1U;
diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c
index 7d0449a..f5848a2 100644
--- a/lib/xlat_tables_v2/xlat_tables_utils.c
+++ b/lib/xlat_tables_v2/xlat_tables_utils.c
@@ -109,7 +109,7 @@
"%s(%d invalid descriptors omitted)\n";
/*
- * Function that reads the translation tables passed as an argument
+ * Recursive function that reads the translation tables passed as an argument
* and prints their status.
*/
static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
@@ -118,23 +118,10 @@
{
assert(level <= XLAT_TABLE_LEVEL_MAX);
- /*
- * data structure to track DESC_TABLE entry before iterate into subtable
- * of next translation level. it will be restored after return from
- * subtable iteration.
- */
- struct desc_table {
- const uint64_t *table_base;
- uintptr_t table_idx_va;
- unsigned int idx;
- } desc_tables[XLAT_TABLE_LEVEL_MAX + 1] = {
- {NULL, 0U, XLAT_TABLE_ENTRIES}, };
- unsigned int this_level = level;
- const uint64_t *this_base = table_base;
- unsigned int max_entries = table_entries;
- size_t level_size = XLAT_BLOCK_SIZE(this_level);
- unsigned int table_idx = 0U;
+ uint64_t desc;
uintptr_t table_idx_va = table_base_va;
+ unsigned int table_idx = 0U;
+ size_t level_size = XLAT_BLOCK_SIZE(level);
/*
* Keep track of how many invalid descriptors are counted in a row.
@@ -144,110 +131,67 @@
*/
int invalid_row_count = 0;
- while (this_base != NULL) {
- /* finish current xlat level */
- if (table_idx >= max_entries) {
- if (invalid_row_count > 1) {
- printf(invalid_descriptors_ommited,
- level_spacers[this_level],
- invalid_row_count - 1);
- }
- invalid_row_count = 0;
+ while (table_idx < table_entries) {
- /* no parent level to iterate. */
- if (this_level <= level) {
- this_base = NULL;
- table_idx = max_entries + 1;
- } else {
- /* retore previous DESC_TABLE entry and start
- * to iterate.
- */
- this_level--;
- level_size = XLAT_BLOCK_SIZE(this_level);
- this_base = desc_tables[this_level].table_base;
- table_idx = desc_tables[this_level].idx;
- table_idx_va =
- desc_tables[this_level].table_idx_va;
- if (this_level == level) {
- max_entries = table_entries;
- } else {
- max_entries = XLAT_TABLE_ENTRIES;
- }
+ desc = table_base[table_idx];
- assert(this_base != NULL);
+ if ((desc & DESC_MASK) == INVALID_DESC) {
+
+ if (invalid_row_count == 0) {
+ printf("%sVA:0x%lx size:0x%zx\n",
+ level_spacers[level],
+ table_idx_va, level_size);
}
+ invalid_row_count++;
+
} else {
- uint64_t desc = this_base[table_idx];
- if ((desc & DESC_MASK) == INVALID_DESC) {
- if (invalid_row_count == 0) {
- printf("%sVA:0x%lx size:0x%zx\n",
- level_spacers[this_level],
- table_idx_va, level_size);
- }
- invalid_row_count++;
- table_idx++;
- table_idx_va += level_size;
- } else {
- if (invalid_row_count > 1) {
- printf(invalid_descriptors_ommited,
- level_spacers[this_level],
- invalid_row_count - 1);
- }
- invalid_row_count = 0;
+ if (invalid_row_count > 1) {
+ printf(invalid_descriptors_ommited,
+ level_spacers[level],
+ invalid_row_count - 1);
+ }
+ invalid_row_count = 0;
+
+ /*
+ * Check if this is a table or a block. Tables are only
+ * allowed in levels other than 3, but DESC_PAGE has the
+ * same value as DESC_TABLE, so we need to check.
+ */
+ if (((desc & DESC_MASK) == TABLE_DESC) &&
+ (level < XLAT_TABLE_LEVEL_MAX)) {
/*
- * Check if this is a table or a block. Tables
- * are only allowed in levels other than 3, but
- * DESC_PAGE has the same value as DESC_TABLE,
- * so we need to check.
+ * Do not print any PA for a table descriptor,
+ * as it doesn't directly map physical memory
+ * but instead points to the next translation
+ * table in the translation table walk.
*/
-
- if (((desc & DESC_MASK) == TABLE_DESC) &&
- (this_level < XLAT_TABLE_LEVEL_MAX)) {
- uintptr_t addr_inner;
+ printf("%sVA:0x%lx size:0x%zx\n",
+ level_spacers[level],
+ table_idx_va, level_size);
- /*
- * Do not print any PA for a table
- * descriptor, as it doesn't directly
- * map physical memory but instead
- * points to the next translation
- * table in the translation table walk.
- */
- printf("%sVA:0x%lx size:0x%zx\n",
- level_spacers[this_level],
- table_idx_va, level_size);
+ uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
- addr_inner = desc & TABLE_ADDR_MASK;
- /* save current xlat level */
- desc_tables[this_level].table_base =
- this_base;
- desc_tables[this_level].idx =
- table_idx + 1;
- desc_tables[this_level].table_idx_va =
- table_idx_va + level_size;
-
- /* start iterating next level entries */
- this_base = (uint64_t *)addr_inner;
- max_entries = XLAT_TABLE_ENTRIES;
- this_level++;
- level_size =
- XLAT_BLOCK_SIZE(this_level);
- table_idx = 0U;
- } else {
- printf("%sVA:0x%lx PA:0x%llx size:0x%zx ",
- level_spacers[this_level],
- table_idx_va,
- (uint64_t)(desc & TABLE_ADDR_MASK),
- level_size);
- xlat_desc_print(ctx, desc);
- printf("\n");
-
- table_idx++;
- table_idx_va += level_size;
-
- }
+ xlat_tables_print_internal(ctx, table_idx_va,
+ (uint64_t *)addr_inner,
+ XLAT_TABLE_ENTRIES, level + 1U);
+ } else {
+ printf("%sVA:0x%lx PA:0x%llx size:0x%zx ",
+ level_spacers[level], table_idx_va,
+ (uint64_t)(desc & TABLE_ADDR_MASK),
+ level_size);
+ xlat_desc_print(ctx, desc);
+ printf("\n");
}
}
+
+ table_idx++;
+ table_idx_va += level_size;
+ }
+
+ if (invalid_row_count > 1) {
+ printf(invalid_descriptors_ommited,
+ level_spacers[level], invalid_row_count - 1);
}
}
diff --git a/plat/arm/common/aarch64/arm_pauth.c b/plat/arm/common/aarch64/arm_pauth.c
index c847119..a685c31 100644
--- a/plat/arm/common/aarch64/arm_pauth.c
+++ b/plat/arm/common/aarch64/arm_pauth.c
@@ -9,11 +9,9 @@
/*
* Instruction pointer authentication key A. The low 64-bit are at [0], and the
- * high bits at [1]. They are run-time constants so they are placed in the
- * rodata section. They are written before MMU is turned on and the permissions
- * are effective.
+ * high bits at [1].
*/
-uint64_t plat_apiakey[2] __section("rodata.apiakey");
+uint64_t plat_apiakey[2];
/*
* This is only a toy implementation to generate a seemingly random 128-bit key