Merge pull request #922 from davidcunado-arm/dc/smc_yielding_spds
Migrate secure payload dispatchers to new SMC terminology
diff --git a/include/lib/xlat_tables/xlat_tables_defs.h b/include/lib/xlat_tables/xlat_tables_defs.h
index 3097d9a..c54b729 100644
--- a/include/lib/xlat_tables/xlat_tables_defs.h
+++ b/include/lib/xlat_tables/xlat_tables_defs.h
@@ -32,7 +32,10 @@
#define SECOND_LEVEL_DESC_N TWO_MB_SHIFT
#define THIRD_LEVEL_DESC_N FOUR_KB_SHIFT
+/* XN: Translation regimes that support one VA range (EL2 and EL3). */
#define XN (ULL(1) << 2)
+/* UXN, PXN: Translation regimes that support two VA ranges (EL1&0). */
+#define UXN (ULL(1) << 2)
#define PXN (ULL(1) << 1)
#define CONT_HINT (ULL(1) << 0)
#define UPPER_ATTRS(x) (((x) & ULL(0x7)) << 52)
diff --git a/lib/xlat_tables/aarch32/xlat_tables.c b/lib/xlat_tables/aarch32/xlat_tables.c
index 03caf36..3c9051c 100644
--- a/lib/xlat_tables/aarch32/xlat_tables.c
+++ b/lib/xlat_tables/aarch32/xlat_tables.c
@@ -69,6 +69,20 @@
}
#endif /* ENABLE_ASSERTIONS */
+int xlat_arch_current_el(void)
+{
+ /*
+ * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
+ * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
+ */
+ return 3;
+}
+
+uint64_t xlat_arch_get_xn_desc(int el __unused)
+{
+ return UPPER_ATTRS(XN);
+}
+
void init_xlat_tables(void)
{
unsigned long long max_pa;
diff --git a/lib/xlat_tables/aarch64/xlat_tables.c b/lib/xlat_tables/aarch64/xlat_tables.c
index 2126180..309cb9b 100644
--- a/lib/xlat_tables/aarch64/xlat_tables.c
+++ b/lib/xlat_tables/aarch64/xlat_tables.c
@@ -122,6 +122,25 @@
}
#endif /* ENABLE_ASSERTIONS */
+int xlat_arch_current_el(void)
+{
+ int el = GET_EL(read_CurrentEl());
+
+ assert(el > 0);
+
+ return el;
+}
+
+uint64_t xlat_arch_get_xn_desc(int el)
+{
+ if (el == 3) {
+ return UPPER_ATTRS(XN);
+ } else {
+ assert(el == 1);
+ return UPPER_ATTRS(PXN);
+ }
+}
+
void init_xlat_tables(void)
{
unsigned long long max_pa;
diff --git a/lib/xlat_tables/xlat_tables_common.c b/lib/xlat_tables/xlat_tables_common.c
index f322a9a..17e7e6e 100644
--- a/lib/xlat_tables/xlat_tables_common.c
+++ b/lib/xlat_tables/xlat_tables_common.c
@@ -40,6 +40,8 @@
static unsigned long long xlat_max_pa;
static uintptr_t xlat_max_va;
+static uint64_t execute_never_mask;
+
/*
* Array of all memory regions stored in order of ascending base address.
* The list is terminated by the first entry with size == 0.
@@ -213,7 +215,8 @@
* fetch, which could be an issue if this memory region
* corresponds to a read-sensitive peripheral.
*/
- desc |= UPPER_ATTRS(XN);
+ desc |= execute_never_mask;
+
} else { /* Normal memory */
/*
* Always map read-write normal memory as execute-never.
@@ -221,7 +224,7 @@
* R/W memory is reserved for data storage, which must not be
* executable.)
* Note that setting the XN bit here is for consistency only.
- * The enable_mmu_elx() function sets the SCTLR_EL3.WXN bit,
+ * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
* which makes any writable memory region to be treated as
* execute-never, regardless of the value of the XN bit in the
* translation table.
@@ -229,8 +232,9 @@
* For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
* attribute to figure out the value of the XN bit.
*/
- if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER))
- desc |= UPPER_ATTRS(XN);
+ if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) {
+ desc |= execute_never_mask;
+ }
if (mem_type == MT_MEMORY) {
desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
@@ -377,7 +381,7 @@
int level, uintptr_t *max_va,
unsigned long long *max_pa)
{
-
+ execute_never_mask = xlat_arch_get_xn_desc(xlat_arch_current_el());
init_xlation_table_inner(mmap, base_va, table, level);
*max_va = xlat_max_va;
*max_pa = xlat_max_pa;
diff --git a/lib/xlat_tables/xlat_tables_private.h b/lib/xlat_tables/xlat_tables_private.h
index d568dc0..9207852 100644
--- a/lib/xlat_tables/xlat_tables_private.h
+++ b/lib/xlat_tables/xlat_tables_private.h
@@ -65,6 +65,17 @@
#endif /* AARCH32 */
void print_mmap(void);
+
+/* Returns the current Exception Level. The returned EL must be 1 or higher. */
+int xlat_arch_current_el(void);
+
+/*
+ * Returns the bit mask that has to be ORed to the rest of a translation table
+ * descriptor so that execution of code is prohibited at the given Exception
+ * Level.
+ */
+uint64_t xlat_arch_get_xn_desc(int el);
+
void init_xlation_table(uintptr_t base_va, uint64_t *table,
int level, uintptr_t *max_va,
unsigned long long *max_pa);
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
index 968a69a..afc65e7 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -67,6 +67,20 @@
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+int xlat_arch_current_el(void)
+{
+ /*
+ * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
+ * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
+ */
+ return 3;
+}
+
+uint64_t xlat_arch_get_xn_desc(int el __unused)
+{
+ return UPPER_ATTRS(XN);
+}
+
void init_xlat_tables_arch(unsigned long long max_pa)
{
assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <=
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
index 9ce7840..cc41fc3 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -127,6 +127,25 @@
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+int xlat_arch_current_el(void)
+{
+ int el = GET_EL(read_CurrentEl());
+
+ assert(el > 0);
+
+ return el;
+}
+
+uint64_t xlat_arch_get_xn_desc(int el)
+{
+ if (el == 3) {
+ return UPPER_ATTRS(XN);
+ } else {
+ assert(el == 1);
+ return UPPER_ATTRS(PXN);
+ }
+}
+
void init_xlat_tables_arch(unsigned long long max_pa)
{
assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <=
diff --git a/lib/xlat_tables_v2/xlat_tables_common.c b/lib/xlat_tables_v2/xlat_tables_common.c
index a8d021c..a6f3b7c 100644
--- a/lib/xlat_tables_v2/xlat_tables_common.c
+++ b/lib/xlat_tables_v2/xlat_tables_common.c
@@ -113,6 +113,8 @@
assert(!is_mmu_enabled());
assert(!tf_xlat_ctx.initialized);
print_mmap(tf_xlat_ctx.mmap);
+ tf_xlat_ctx.execute_never_mask =
+ xlat_arch_get_xn_desc(xlat_arch_current_el());
init_xlation_table(&tf_xlat_ctx);
xlat_tables_print(&tf_xlat_ctx);
diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c
index fd64813..2d556e6 100644
--- a/lib/xlat_tables_v2/xlat_tables_internal.c
+++ b/lib/xlat_tables_v2/xlat_tables_internal.c
@@ -92,7 +92,7 @@
/* Returns a block/page table descriptor for the given level and attributes. */
static uint64_t xlat_desc(mmap_attr_t attr, unsigned long long addr_pa,
- int level)
+ int level, uint64_t execute_never_mask)
{
uint64_t desc;
int mem_type;
@@ -134,7 +134,8 @@
* fetch, which could be an issue if this memory region
* corresponds to a read-sensitive peripheral.
*/
- desc |= UPPER_ATTRS(XN);
+ desc |= execute_never_mask;
+
} else { /* Normal memory */
/*
* Always map read-write normal memory as execute-never.
@@ -142,7 +143,7 @@
* R/W memory is reserved for data storage, which must not be
* executable.)
* Note that setting the XN bit here is for consistency only.
- * The enable_mmu_elx() function sets the SCTLR_EL3.WXN bit,
+ * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
* which makes any writable memory region to be treated as
* execute-never, regardless of the value of the XN bit in the
* translation table.
@@ -150,8 +151,9 @@
* For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
* attribute to figure out the value of the XN bit.
*/
- if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER))
- desc |= UPPER_ATTRS(XN);
+ if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) {
+ desc |= execute_never_mask;
+ }
if (mem_type == MT_MEMORY) {
desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
@@ -511,7 +513,8 @@
if (action == ACTION_WRITE_BLOCK_ENTRY) {
table_base[table_idx] =
- xlat_desc(mm->attr, table_idx_pa, level);
+ xlat_desc(mm->attr, table_idx_pa, level,
+ ctx->execute_never_mask);
} else if (action == ACTION_CREATE_NEW_TABLE) {
@@ -916,7 +919,7 @@
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
/* Print the attributes of the specified block descriptor. */
-static void xlat_desc_print(uint64_t desc)
+static void xlat_desc_print(uint64_t desc, uint64_t execute_never_mask)
{
int mem_type_index = ATTR_INDEX_GET(desc);
@@ -931,7 +934,7 @@
tf_printf(LOWER_ATTRS(AP_RO) & desc ? "-RO" : "-RW");
tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
- tf_printf(UPPER_ATTRS(XN) & desc ? "-XN" : "-EXEC");
+ tf_printf(execute_never_mask & desc ? "-XN" : "-EXEC");
}
static const char * const level_spacers[] = {
@@ -950,7 +953,7 @@
*/
static void xlat_tables_print_internal(const uintptr_t table_base_va,
uint64_t *const table_base, const int table_entries,
- const int level)
+ const int level, const uint64_t execute_never_mask)
{
assert(level <= XLAT_TABLE_LEVEL_MAX);
@@ -1011,14 +1014,15 @@
xlat_tables_print_internal(table_idx_va,
(uint64_t *)addr_inner,
- XLAT_TABLE_ENTRIES, level+1);
+ XLAT_TABLE_ENTRIES, level+1,
+ execute_never_mask);
} else {
tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
level_spacers[level],
(void *)table_idx_va,
(unsigned long long)(desc & TABLE_ADDR_MASK),
level_size);
- xlat_desc_print(desc);
+ xlat_desc_print(desc, execute_never_mask);
tf_printf("\n");
}
}
@@ -1039,7 +1043,7 @@
{
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
xlat_tables_print_internal(0, ctx->base_table, ctx->base_table_entries,
- ctx->base_level);
+ ctx->base_level, ctx->execute_never_mask);
#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
}
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
index 1cfbce0..07bf39f 100644
--- a/lib/xlat_tables_v2/xlat_tables_private.h
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -84,6 +84,13 @@
/* Set to 1 when the translation tables are initialized. */
int initialized;
+ /*
+ * Bit mask that has to be ORed to the rest of a translation table
+ * descriptor in order to prohibit execution of code at the exception
+ * level of this translation context.
+ */
+ uint64_t execute_never_mask;
+
} xlat_ctx_t;
#if PLAT_XLAT_TABLES_DYNAMIC
@@ -153,6 +160,16 @@
/*
* Architecture-specific initialization code.
*/
+
+/* Returns the current Exception Level. The returned EL must be 1 or higher. */
+int xlat_arch_current_el(void);
+
+/*
+ * Returns the bit mask that has to be ORed to the rest of a translation table
+ * descriptor so that execution of code is prohibited at the given Exception
+ * Level.
+ */
+uint64_t xlat_arch_get_xn_desc(int el);
/* Execute architecture-specific translation table initialization code. */
void init_xlat_tables_arch(unsigned long long max_pa);