blob: 3a9c0588d3f2b7fca971d273a2b9b3af7614e3b4 [file] [log] [blame]
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001/*
Zelalem Aweke173c6a22021-07-08 17:23:04 -05002 * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00005 */
6
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00007#include <assert.h>
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00008#include <errno.h>
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +01009#include <stdbool.h>
Antonio Nino Diaz4b32e622018-08-16 16:52:57 +010010#include <stdint.h>
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000011#include <string.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000012
13#include <platform_def.h>
14
Alexei Fedorov90f2e882019-05-24 12:17:09 +010015#include <arch_features.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000016#include <arch_helpers.h>
17#include <common/debug.h>
18#include <lib/utils_def.h>
19#include <lib/xlat_tables/xlat_tables_defs.h>
20#include <lib/xlat_tables/xlat_tables_v2.h>
Sandrine Bailleux090c8492017-05-19 09:59:37 +010021
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000022#include "xlat_tables_private.h"
23
Antonio Nino Diaz37a5efa2018-08-07 12:47:12 +010024/* Helper function that cleans the data cache only if it is enabled. */
Varun Wadekar6bd85492019-01-30 08:31:07 -080025static inline __attribute__((unused)) void xlat_clean_dcache_range(uintptr_t addr, size_t size)
Antonio Nino Diaz37a5efa2018-08-07 12:47:12 +010026{
27 if (is_dcache_enabled())
28 clean_dcache_range(addr, size);
29}
30
Antonio Nino Diazac998032017-02-27 17:23:54 +000031#if PLAT_XLAT_TABLES_DYNAMIC
32
33/*
34 * The following functions assume that they will be called using subtables only.
35 * The base table can't be unmapped, so it is not needed to do any special
36 * handling for it.
37 */
38
39/*
40 * Returns the index of the array corresponding to the specified translation
41 * table.
42 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010043static int xlat_table_get_index(const xlat_ctx_t *ctx, const uint64_t *table)
Antonio Nino Diazac998032017-02-27 17:23:54 +000044{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010045 for (int i = 0; i < ctx->tables_num; i++)
Antonio Nino Diazac998032017-02-27 17:23:54 +000046 if (ctx->tables[i] == table)
47 return i;
48
49 /*
50 * Maybe we were asked to get the index of the base level table, which
51 * should never happen.
52 */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +010053 assert(false);
Antonio Nino Diazac998032017-02-27 17:23:54 +000054
55 return -1;
56}
57
58/* Returns a pointer to an empty translation table. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010059static uint64_t *xlat_table_get_empty(const xlat_ctx_t *ctx)
Antonio Nino Diazac998032017-02-27 17:23:54 +000060{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010061 for (int i = 0; i < ctx->tables_num; i++)
Antonio Nino Diazac998032017-02-27 17:23:54 +000062 if (ctx->tables_mapped_regions[i] == 0)
63 return ctx->tables[i];
64
65 return NULL;
66}
67
68/* Increments region count for a given table. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010069static void xlat_table_inc_regions_count(const xlat_ctx_t *ctx,
70 const uint64_t *table)
Antonio Nino Diazac998032017-02-27 17:23:54 +000071{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010072 int idx = xlat_table_get_index(ctx, table);
73
74 ctx->tables_mapped_regions[idx]++;
Antonio Nino Diazac998032017-02-27 17:23:54 +000075}
76
77/* Decrements region count for a given table. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010078static void xlat_table_dec_regions_count(const xlat_ctx_t *ctx,
79 const uint64_t *table)
Antonio Nino Diazac998032017-02-27 17:23:54 +000080{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010081 int idx = xlat_table_get_index(ctx, table);
82
83 ctx->tables_mapped_regions[idx]--;
Antonio Nino Diazac998032017-02-27 17:23:54 +000084}
85
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010086/* Returns 0 if the specified table isn't empty, otherwise 1. */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +010087static bool xlat_table_is_empty(const xlat_ctx_t *ctx, const uint64_t *table)
Antonio Nino Diazac998032017-02-27 17:23:54 +000088{
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +010089 return ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)] == 0;
Antonio Nino Diazac998032017-02-27 17:23:54 +000090}
91
92#else /* PLAT_XLAT_TABLES_DYNAMIC */
93
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000094/* Returns a pointer to the first empty translation table. */
95static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
96{
97 assert(ctx->next_table < ctx->tables_num);
98
99 return ctx->tables[ctx->next_table++];
100}
101
Antonio Nino Diazac998032017-02-27 17:23:54 +0000102#endif /* PLAT_XLAT_TABLES_DYNAMIC */
103
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100104/*
105 * Returns a block/page table descriptor for the given level and attributes.
106 */
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100107uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100108 unsigned long long addr_pa, unsigned int level)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000109{
110 uint64_t desc;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100111 uint32_t mem_type;
Pramod Kumar316b73b2020-02-19 10:39:10 +0530112 uint32_t shareability_type;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000113
114 /* Make sure that the granularity is fine enough to map this address. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100115 assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0U);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000116
117 desc = addr_pa;
118 /*
119 * There are different translation table descriptors for level 3 and the
120 * rest.
121 */
122 desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
123 /*
Antonio Nino Diaz0842bd62018-07-12 15:54:10 +0100124 * Always set the access flag, as this library assumes access flag
125 * faults aren't managed.
126 */
127 desc |= LOWER_ATTRS(ACCESS_FLAG);
Zelalem Aweke173c6a22021-07-08 17:23:04 -0500128
129 /* Determine the physical address space this region belongs to. */
130 desc |= xlat_arch_get_pas(attr);
131
Antonio Nino Diaz0842bd62018-07-12 15:54:10 +0100132 /*
Zelalem Aweke173c6a22021-07-08 17:23:04 -0500133 * Deduce other fields of the descriptor based on the MT_RW memory
134 * region attributes.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000135 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100136 desc |= ((attr & MT_RW) != 0U) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000137
138 /*
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100139 * Do not allow unprivileged access when the mapping is for a privileged
140 * EL. For translation regimes that do not have mappings for access for
141 * lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED.
142 */
143 if (ctx->xlat_regime == EL1_EL0_REGIME) {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100144 if ((attr & MT_USER) != 0U) {
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100145 /* EL0 mapping requested, so we give User access */
146 desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED);
147 } else {
148 /* EL1 mapping requested, no User access granted */
149 desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
150 }
151 } else {
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +0100152 assert((ctx->xlat_regime == EL2_REGIME) ||
153 (ctx->xlat_regime == EL3_REGIME));
Antonio Nino Diaz49074492018-04-26 12:59:08 +0100154 desc |= LOWER_ATTRS(AP_ONE_VA_RANGE_RES1);
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100155 }
156
157 /*
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000158 * Deduce shareability domain and executability of the memory region
159 * from the memory type of the attributes (MT_TYPE).
160 *
161 * Data accesses to device memory and non-cacheable normal memory are
162 * coherent for all observers in the system, and correspondingly are
163 * always treated as being Outer Shareable. Therefore, for these 2 types
164 * of memory, it is not strictly needed to set the shareability field
165 * in the translation tables.
166 */
167 mem_type = MT_TYPE(attr);
168 if (mem_type == MT_DEVICE) {
169 desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
170 /*
171 * Always map device memory as execute-never.
172 * This is to avoid the possibility of a speculative instruction
173 * fetch, which could be an issue if this memory region
174 * corresponds to a read-sensitive peripheral.
175 */
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100176 desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100177
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000178 } else { /* Normal memory */
179 /*
180 * Always map read-write normal memory as execute-never.
Antonio Nino Diaz0842bd62018-07-12 15:54:10 +0100181 * This library assumes that it is used by software that does
182 * not self-modify its code, therefore R/W memory is reserved
183 * for data storage, which must not be executable.
184 *
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000185 * Note that setting the XN bit here is for consistency only.
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100186 * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000187 * which makes any writable memory region to be treated as
188 * execute-never, regardless of the value of the XN bit in the
189 * translation table.
190 *
191 * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100192 * attribute to figure out the value of the XN bit. The actual
193 * XN bit(s) to set in the descriptor depends on the context's
194 * translation regime and the policy applied in
195 * xlat_arch_regime_get_xn_desc().
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000196 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100197 if (((attr & MT_RW) != 0U) || ((attr & MT_EXECUTE_NEVER) != 0U)) {
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100198 desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100199 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000200
Pramod Kumar316b73b2020-02-19 10:39:10 +0530201 shareability_type = MT_SHAREABILITY(attr);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000202 if (mem_type == MT_MEMORY) {
Pramod Kumar316b73b2020-02-19 10:39:10 +0530203 desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX);
204 if (shareability_type == MT_SHAREABILITY_NSH) {
205 desc |= LOWER_ATTRS(NSH);
206 } else if (shareability_type == MT_SHAREABILITY_OSH) {
207 desc |= LOWER_ATTRS(OSH);
208 } else {
209 desc |= LOWER_ATTRS(ISH);
210 }
Alexei Fedorov90f2e882019-05-24 12:17:09 +0100211
212 /* Check if Branch Target Identification is enabled */
213#if ENABLE_BTI
214 /* Set GP bit for block and page code entries
215 * if BTI mechanism is implemented.
216 */
217 if (is_armv8_5_bti_present() &&
218 ((attr & (MT_TYPE_MASK | MT_RW |
219 MT_EXECUTE_NEVER)) == MT_CODE)) {
220 desc |= GP;
221 }
222#endif
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000223 } else {
224 assert(mem_type == MT_NON_CACHEABLE);
225 desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
226 }
227 }
228
229 return desc;
230}
231
232/*
233 * Enumeration of actions that can be made when mapping table entries depending
234 * on the previous value in that entry and information about the region being
235 * mapped.
236 */
237typedef enum {
238
239 /* Do nothing */
240 ACTION_NONE,
241
242 /* Write a block (or page, if in level 3) entry. */
243 ACTION_WRITE_BLOCK_ENTRY,
244
245 /*
246 * Create a new table and write a table entry pointing to it. Recurse
247 * into it for further processing.
248 */
249 ACTION_CREATE_NEW_TABLE,
250
251 /*
252 * There is a table descriptor in this entry, read it and recurse into
253 * that table for further processing.
254 */
255 ACTION_RECURSE_INTO_TABLE,
256
257} action_t;
258
David Pu5619c802019-02-22 02:23:57 -0800259/*
260 * Function that returns the first VA of the table affected by the specified
261 * mmap region.
262 */
263static uintptr_t xlat_tables_find_start_va(mmap_region_t *mm,
264 const uintptr_t table_base_va,
265 const unsigned int level)
266{
267 uintptr_t table_idx_va;
268
269 if (mm->base_va > table_base_va) {
270 /* Find the first index of the table affected by the region. */
271 table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
272 } else {
273 /* Start from the beginning of the table. */
274 table_idx_va = table_base_va;
275 }
276
277 return table_idx_va;
278}
279
280/*
281 * Function that returns table index for the given VA and level arguments.
282 */
283static inline unsigned int xlat_tables_va_to_index(const uintptr_t table_base_va,
284 const uintptr_t va,
285 const unsigned int level)
286{
287 return (unsigned int)((va - table_base_va) >> XLAT_ADDR_SHIFT(level));
288}
289
Antonio Nino Diazac998032017-02-27 17:23:54 +0000290#if PLAT_XLAT_TABLES_DYNAMIC
291
292/*
David Pu1507d412019-02-22 02:15:57 -0800293 * From the given arguments, it decides which action to take when unmapping the
294 * specified region.
295 */
296static action_t xlat_tables_unmap_region_action(const mmap_region_t *mm,
297 const uintptr_t table_idx_va, const uintptr_t table_idx_end_va,
298 const unsigned int level, const uint64_t desc_type)
299{
300 action_t action;
301 uintptr_t region_end_va = mm->base_va + mm->size - 1U;
302
303 if ((mm->base_va <= table_idx_va) &&
304 (region_end_va >= table_idx_end_va)) {
305 /* Region covers all block */
306
307 if (level == 3U) {
308 /*
309 * Last level, only page descriptors allowed,
310 * erase it.
311 */
312 assert(desc_type == PAGE_DESC);
313
314 action = ACTION_WRITE_BLOCK_ENTRY;
315 } else {
316 /*
317 * Other levels can have table descriptors. If
318 * so, recurse into it and erase descriptors
319 * inside it as needed. If there is a block
320 * descriptor, just erase it. If an invalid
321 * descriptor is found, this table isn't
322 * actually mapped, which shouldn't happen.
323 */
324 if (desc_type == TABLE_DESC) {
325 action = ACTION_RECURSE_INTO_TABLE;
326 } else {
327 assert(desc_type == BLOCK_DESC);
328 action = ACTION_WRITE_BLOCK_ENTRY;
329 }
330 }
331
332 } else if ((mm->base_va <= table_idx_end_va) ||
333 (region_end_va >= table_idx_va)) {
334 /*
335 * Region partially covers block.
336 *
337 * It can't happen in level 3.
338 *
339 * There must be a table descriptor here, if not there
340 * was a problem when mapping the region.
341 */
342 assert(level < 3U);
343 assert(desc_type == TABLE_DESC);
344
345 action = ACTION_RECURSE_INTO_TABLE;
346 } else {
347 /* The region doesn't cover the block at all */
348 action = ACTION_NONE;
349 }
350
351 return action;
352}
353/*
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000354 * Recursive function that writes to the translation tables and unmaps the
Antonio Nino Diazac998032017-02-27 17:23:54 +0000355 * specified region.
356 */
357static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
358 const uintptr_t table_base_va,
359 uint64_t *const table_base,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100360 const unsigned int table_entries,
Varun Wadekar66231d12017-06-07 09:57:42 -0700361 const unsigned int level)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000362{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100363 assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
Antonio Nino Diazac998032017-02-27 17:23:54 +0000364
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000365 uint64_t *subtable;
366 uint64_t desc;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000367
368 uintptr_t table_idx_va;
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000369 uintptr_t table_idx_end_va; /* End VA of this entry */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000370
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100371 uintptr_t region_end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000372
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000373 unsigned int table_idx;
374
David Pu5619c802019-02-22 02:23:57 -0800375 table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
376 table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000377
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000378 while (table_idx < table_entries) {
David Puce81ba12019-02-22 02:36:20 -0800379
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000380 table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1U;
David Puce81ba12019-02-22 02:36:20 -0800381
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000382 desc = table_base[table_idx];
383 uint64_t desc_type = desc & DESC_MASK;
David Puce81ba12019-02-22 02:36:20 -0800384
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000385 action_t action = xlat_tables_unmap_region_action(mm,
386 table_idx_va, table_idx_end_va, level,
387 desc_type);
David Puce81ba12019-02-22 02:36:20 -0800388
389 if (action == ACTION_WRITE_BLOCK_ENTRY) {
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000390
391 table_base[table_idx] = INVALID_DESC;
Antonio Nino Diazad5dc7f2018-07-11 09:46:45 +0100392 xlat_arch_tlbi_va(table_idx_va, ctx->xlat_regime);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000393
394 } else if (action == ACTION_RECURSE_INTO_TABLE) {
395
396 subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
397
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000398 /* Recurse to write into subtable */
399 xlat_tables_unmap_region(ctx, mm, table_idx_va,
400 subtable, XLAT_TABLE_ENTRIES,
401 level + 1U);
402#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
403 xlat_clean_dcache_range((uintptr_t)subtable,
404 XLAT_TABLE_ENTRIES * sizeof(uint64_t));
405#endif
406 /*
407 * If the subtable is now empty, remove its reference.
408 */
409 if (xlat_table_is_empty(ctx, subtable)) {
410 table_base[table_idx] = INVALID_DESC;
411 xlat_arch_tlbi_va(table_idx_va,
412 ctx->xlat_regime);
413 }
David Puce81ba12019-02-22 02:36:20 -0800414
Antonio Nino Diazac998032017-02-27 17:23:54 +0000415 } else {
416 assert(action == ACTION_NONE);
David Puce81ba12019-02-22 02:36:20 -0800417 }
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000418
419 table_idx++;
420 table_idx_va += XLAT_BLOCK_SIZE(level);
421
422 /* If reached the end of the region, exit */
423 if (region_end_va <= table_idx_va)
424 break;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000425 }
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000426
427 if (level > ctx->base_level)
428 xlat_table_dec_regions_count(ctx, table_base);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000429}
430
431#endif /* PLAT_XLAT_TABLES_DYNAMIC */
432
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000433/*
434 * From the given arguments, it decides which action to take when mapping the
435 * specified region.
436 */
437static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100438 unsigned int desc_type, unsigned long long dest_pa,
439 uintptr_t table_entry_base_va, unsigned int level)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000440{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100441 uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000442 uintptr_t table_entry_end_va =
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100443 table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000444
445 /*
446 * The descriptor types allowed depend on the current table level.
447 */
448
449 if ((mm->base_va <= table_entry_base_va) &&
450 (mm_end_va >= table_entry_end_va)) {
451
452 /*
453 * Table entry is covered by region
454 * --------------------------------
455 *
456 * This means that this table entry can describe the whole
457 * translation with this granularity in principle.
458 */
459
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100460 if (level == 3U) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000461 /*
462 * Last level, only page descriptors are allowed.
463 */
464 if (desc_type == PAGE_DESC) {
465 /*
466 * There's another region mapped here, don't
467 * overwrite.
468 */
469 return ACTION_NONE;
470 } else {
471 assert(desc_type == INVALID_DESC);
472 return ACTION_WRITE_BLOCK_ENTRY;
473 }
474
475 } else {
476
477 /*
478 * Other levels. Table descriptors are allowed. Block
479 * descriptors too, but they have some limitations.
480 */
481
482 if (desc_type == TABLE_DESC) {
483 /* There's already a table, recurse into it. */
484 return ACTION_RECURSE_INTO_TABLE;
485
486 } else if (desc_type == INVALID_DESC) {
487 /*
488 * There's nothing mapped here, create a new
489 * entry.
490 *
491 * Check if the destination granularity allows
492 * us to use a block descriptor or we need a
493 * finer table for it.
494 *
495 * Also, check if the current level allows block
496 * descriptors. If not, create a table instead.
497 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100498 if (((dest_pa & XLAT_BLOCK_MASK(level)) != 0U)
499 || (level < MIN_LVL_BLOCK_DESC) ||
Sandrine Bailleux8f23fa82017-09-28 21:58:12 +0100500 (mm->granularity < XLAT_BLOCK_SIZE(level)))
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000501 return ACTION_CREATE_NEW_TABLE;
502 else
503 return ACTION_WRITE_BLOCK_ENTRY;
504
505 } else {
506 /*
507 * There's another region mapped here, don't
508 * overwrite.
509 */
510 assert(desc_type == BLOCK_DESC);
511
512 return ACTION_NONE;
513 }
514 }
515
516 } else if ((mm->base_va <= table_entry_end_va) ||
517 (mm_end_va >= table_entry_base_va)) {
518
519 /*
520 * Region partially covers table entry
521 * -----------------------------------
522 *
523 * This means that this table entry can't describe the whole
524 * translation, a finer table is needed.
525
526 * There cannot be partial block overlaps in level 3. If that
527 * happens, some of the preliminary checks when adding the
528 * mmap region failed to detect that PA and VA must at least be
529 * aligned to PAGE_SIZE.
530 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100531 assert(level < 3U);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000532
533 if (desc_type == INVALID_DESC) {
534 /*
535 * The block is not fully covered by the region. Create
536 * a new table, recurse into it and try to map the
537 * region with finer granularity.
538 */
539 return ACTION_CREATE_NEW_TABLE;
540
541 } else {
542 assert(desc_type == TABLE_DESC);
543 /*
544 * The block is not fully covered by the region, but
545 * there is already a table here. Recurse into it and
546 * try to map with finer granularity.
547 *
548 * PAGE_DESC for level 3 has the same value as
549 * TABLE_DESC, but this code can't run on a level 3
550 * table because there can't be overlaps in level 3.
551 */
552 return ACTION_RECURSE_INTO_TABLE;
553 }
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100554 } else {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000555
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100556 /*
557 * This table entry is outside of the region specified in the
558 * arguments, don't write anything to it.
559 */
560 return ACTION_NONE;
561 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000562}
563
564/*
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000565 * Recursive function that writes to the translation tables and maps the
Antonio Nino Diazac998032017-02-27 17:23:54 +0000566 * specified region. On success, it returns the VA of the last byte that was
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100567 * successfully mapped. On error, it returns the VA of the next entry that
Antonio Nino Diazac998032017-02-27 17:23:54 +0000568 * should have been mapped.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000569 */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000570static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000571 uintptr_t table_base_va,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000572 uint64_t *const table_base,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100573 unsigned int table_entries,
574 unsigned int level)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000575{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100576 assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000577
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100578 uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000579
580 uintptr_t table_idx_va;
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000581 unsigned long long table_idx_pa;
582
583 uint64_t *subtable;
584 uint64_t desc;
585
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100586 unsigned int table_idx;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000587
David Pu5619c802019-02-22 02:23:57 -0800588 table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
589 table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000590
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000591#if PLAT_XLAT_TABLES_DYNAMIC
592 if (level > ctx->base_level)
593 xlat_table_inc_regions_count(ctx, table_base);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000594#endif
595
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000596 while (table_idx < table_entries) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000597
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000598 desc = table_base[table_idx];
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000599
600 table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
601
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000602 action_t action = xlat_tables_map_region_action(mm,
603 (uint32_t)(desc & DESC_MASK), table_idx_pa,
604 table_idx_va, level);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000605
David Pud1b7aa12019-02-22 02:31:40 -0800606 if (action == ACTION_WRITE_BLOCK_ENTRY) {
David Pud1b7aa12019-02-22 02:31:40 -0800607
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000608 table_base[table_idx] =
609 xlat_desc(ctx, (uint32_t)mm->attr, table_idx_pa,
610 level);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000611
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000612 } else if (action == ACTION_CREATE_NEW_TABLE) {
613 uintptr_t end_va;
614
615 subtable = xlat_table_get_empty(ctx);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000616 if (subtable == NULL) {
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000617 /* Not enough free tables to map this region */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000618 return table_idx_va;
619 }
620
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000621 /* Point to new subtable from this one. */
Deepika Bhavnanieb2b2b62019-09-03 21:06:17 +0300622 table_base[table_idx] =
623 TABLE_DESC | (uintptr_t)subtable;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000624
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000625 /* Recurse to write into subtable */
626 end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
627 subtable, XLAT_TABLE_ENTRIES,
628 level + 1U);
629#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
630 xlat_clean_dcache_range((uintptr_t)subtable,
631 XLAT_TABLE_ENTRIES * sizeof(uint64_t));
Antonio Nino Diaz37a5efa2018-08-07 12:47:12 +0100632#endif
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000633 if (end_va !=
634 (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
635 return end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000636
David Pud1b7aa12019-02-22 02:31:40 -0800637 } else if (action == ACTION_RECURSE_INTO_TABLE) {
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000638 uintptr_t end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000639
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000640 subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
641 /* Recurse to write into subtable */
642 end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
643 subtable, XLAT_TABLE_ENTRIES,
644 level + 1U);
645#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
646 xlat_clean_dcache_range((uintptr_t)subtable,
647 XLAT_TABLE_ENTRIES * sizeof(uint64_t));
David Pud1b7aa12019-02-22 02:31:40 -0800648#endif
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000649 if (end_va !=
650 (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
651 return end_va;
652
David Pud1b7aa12019-02-22 02:31:40 -0800653 } else {
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000654
David Pud1b7aa12019-02-22 02:31:40 -0800655 assert(action == ACTION_NONE);
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000656
David Pud1b7aa12019-02-22 02:31:40 -0800657 }
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000658
659 table_idx++;
660 table_idx_va += XLAT_BLOCK_SIZE(level);
661
662 /* If reached the end of the region, exit */
663 if (mm_end_va <= table_idx_va)
664 break;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000665 }
Antonio Nino Diazac998032017-02-27 17:23:54 +0000666
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100667 return table_idx_va - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000668}
669
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000670/*
671 * Function that verifies that a region can be mapped.
672 * Returns:
673 * 0: Success, the mapping is allowed.
674 * EINVAL: Invalid values were used as arguments.
675 * ERANGE: The memory limits were surpassed.
676 * ENOMEM: There is not enough memory in the mmap array.
677 * EPERM: Region overlaps another one in an invalid way.
678 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100679static int mmap_add_region_check(const xlat_ctx_t *ctx, const mmap_region_t *mm)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000680{
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100681 unsigned long long base_pa = mm->base_pa;
682 uintptr_t base_va = mm->base_va;
683 size_t size = mm->size;
Sandrine Bailleux8f23fa82017-09-28 21:58:12 +0100684 size_t granularity = mm->granularity;
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100685
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100686 unsigned long long end_pa = base_pa + size - 1U;
687 uintptr_t end_va = base_va + size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000688
689 if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
690 !IS_PAGE_ALIGNED(size))
691 return -EINVAL;
692
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100693 if ((granularity != XLAT_BLOCK_SIZE(1U)) &&
694 (granularity != XLAT_BLOCK_SIZE(2U)) &&
695 (granularity != XLAT_BLOCK_SIZE(3U))) {
Sandrine Bailleux8f23fa82017-09-28 21:58:12 +0100696 return -EINVAL;
697 }
698
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000699 /* Check for overflows */
700 if ((base_pa > end_pa) || (base_va > end_va))
701 return -ERANGE;
702
Masahiro Yamadab03d4cd2019-12-26 17:58:52 +0900703 if (end_va > ctx->va_max_address)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000704 return -ERANGE;
705
Masahiro Yamadab03d4cd2019-12-26 17:58:52 +0900706 if (end_pa > ctx->pa_max_address)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000707 return -ERANGE;
708
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100709 /* Check that there is space in the ctx->mmap array */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100710 if (ctx->mmap[ctx->mmap_num - 1].size != 0U)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000711 return -ENOMEM;
712
713 /* Check for PAs and VAs overlaps with all other regions */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100714 for (const mmap_region_t *mm_cursor = ctx->mmap;
715 mm_cursor->size != 0U; ++mm_cursor) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000716
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100717 uintptr_t mm_cursor_end_va = mm_cursor->base_va
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100718 + mm_cursor->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000719
720 /*
721 * Check if one of the regions is completely inside the other
722 * one.
723 */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100724 bool fully_overlapped_va =
725 ((base_va >= mm_cursor->base_va) &&
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100726 (end_va <= mm_cursor_end_va)) ||
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100727 ((mm_cursor->base_va >= base_va) &&
728 (mm_cursor_end_va <= end_va));
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000729
730 /*
731 * Full VA overlaps are only allowed if both regions are
732 * identity mapped (zero offset) or have the same VA to PA
733 * offset. Also, make sure that it's not the exact same area.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000734 * This can only be done with static regions.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000735 */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100736 if (fully_overlapped_va) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000737
Antonio Nino Diazac998032017-02-27 17:23:54 +0000738#if PLAT_XLAT_TABLES_DYNAMIC
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100739 if (((mm->attr & MT_DYNAMIC) != 0U) ||
740 ((mm_cursor->attr & MT_DYNAMIC) != 0U))
Antonio Nino Diazac998032017-02-27 17:23:54 +0000741 return -EPERM;
742#endif /* PLAT_XLAT_TABLES_DYNAMIC */
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100743 if ((mm_cursor->base_va - mm_cursor->base_pa) !=
744 (base_va - base_pa))
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000745 return -EPERM;
746
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100747 if ((base_va == mm_cursor->base_va) &&
748 (size == mm_cursor->size))
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000749 return -EPERM;
750
751 } else {
752 /*
753 * If the regions do not have fully overlapping VAs,
754 * then they must have fully separated VAs and PAs.
755 * Partial overlaps are not allowed
756 */
757
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100758 unsigned long long mm_cursor_end_pa =
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100759 mm_cursor->base_pa + mm_cursor->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000760
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100761 bool separated_pa = (end_pa < mm_cursor->base_pa) ||
762 (base_pa > mm_cursor_end_pa);
763 bool separated_va = (end_va < mm_cursor->base_va) ||
764 (base_va > mm_cursor_end_va);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000765
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100766 if (!separated_va || !separated_pa)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000767 return -EPERM;
768 }
769 }
770
771 return 0;
772}
773
Sandrine Bailleux66342932017-07-18 13:26:36 +0100774void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000775{
John Tsichritzisfdd92482018-05-25 09:12:48 +0100776 mmap_region_t *mm_cursor = ctx->mmap, *mm_destination;
Varun Wadekarccbd2e32018-04-03 10:44:41 -0700777 const mmap_region_t *mm_end = ctx->mmap + ctx->mmap_num;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100778 const mmap_region_t *mm_last;
779 unsigned long long end_pa = mm->base_pa + mm->size - 1U;
780 uintptr_t end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000781 int ret;
782
783 /* Ignore empty regions */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100784 if (mm->size == 0U)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000785 return;
786
Antonio Nino Diazac998032017-02-27 17:23:54 +0000787 /* Static regions must be added before initializing the xlat tables. */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100788 assert(!ctx->initialized);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000789
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100790 ret = mmap_add_region_check(ctx, mm);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000791 if (ret != 0) {
792 ERROR("mmap_add_region_check() failed. error %d\n", ret);
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100793 assert(false);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000794 return;
795 }
796
797 /*
798 * Find correct place in mmap to insert new region.
799 *
800 * 1 - Lower region VA end first.
801 * 2 - Smaller region size first.
802 *
803 * VA 0 0xFF
804 *
805 * 1st |------|
806 * 2nd |------------|
807 * 3rd |------|
808 * 4th |---|
809 * 5th |---|
810 * 6th |----------|
811 * 7th |-------------------------------------|
812 *
813 * This is required for overlapping regions only. It simplifies adding
814 * regions with the loop in xlat_tables_init_internal because the outer
815 * ones won't overwrite block or page descriptors of regions added
816 * previously.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000817 *
818 * Overlapping is only allowed for static regions.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000819 */
820
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100821 while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
822 && (mm_cursor->size != 0U)) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000823 ++mm_cursor;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100824 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000825
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100826 while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
827 (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000828 ++mm_cursor;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100829 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000830
Varun Wadekarccbd2e32018-04-03 10:44:41 -0700831 /*
832 * Find the last entry marker in the mmap
833 */
834 mm_last = ctx->mmap;
835 while ((mm_last->size != 0U) && (mm_last < mm_end)) {
836 ++mm_last;
837 }
838
839 /*
840 * Check if we have enough space in the memory mapping table.
841 * This shouldn't happen as we have checked in mmap_add_region_check
842 * that there is free space.
843 */
844 assert(mm_last->size == 0U);
Jeenu Viswambharan58e81482018-04-27 15:06:57 +0100845
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000846 /* Make room for new region by moving other regions up by one place */
John Tsichritzisfdd92482018-05-25 09:12:48 +0100847 mm_destination = mm_cursor + 1;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100848 (void)memmove(mm_destination, mm_cursor,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000849 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
850
851 /*
852 * Check we haven't lost the empty sentinel from the end of the array.
853 * This shouldn't happen as we have checked in mmap_add_region_check
854 * that there is free space.
855 */
Varun Wadekarccbd2e32018-04-03 10:44:41 -0700856 assert(mm_end->size == 0U);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000857
Douglas Raillardf68d2ed2017-09-12 10:31:49 +0100858 *mm_cursor = *mm;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000859
860 if (end_pa > ctx->max_pa)
861 ctx->max_pa = end_pa;
862 if (end_va > ctx->max_va)
863 ctx->max_va = end_va;
864}
865
Antonio Nino Diazc0033282018-11-20 16:03:11 +0000866/*
867 * Determine the table level closest to the initial lookup level that
868 * can describe this translation. Then, align base VA to the next block
869 * at the determined level.
870 */
871static void mmap_alloc_va_align_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
872{
873 /*
874 * By or'ing the size and base PA the alignment will be the one
875 * corresponding to the smallest boundary of the two of them.
876 *
877 * There are three different cases. For example (for 4 KiB page size):
878 *
879 * +--------------+------------------++--------------+
880 * | PA alignment | Size multiple of || VA alignment |
881 * +--------------+------------------++--------------+
882 * | 2 MiB | 2 MiB || 2 MiB | (1)
883 * | 2 MiB | 4 KiB || 4 KiB | (2)
884 * | 4 KiB | 2 MiB || 4 KiB | (3)
885 * +--------------+------------------++--------------+
886 *
887 * - In (1), it is possible to take advantage of the alignment of the PA
888 * and the size of the region to use a level 2 translation table
889 * instead of a level 3 one.
890 *
891 * - In (2), the size is smaller than a block entry of level 2, so it is
892 * needed to use a level 3 table to describe the region or the library
893 * will map more memory than the desired one.
894 *
895 * - In (3), even though the region has the size of one level 2 block
896 * entry, it isn't possible to describe the translation with a level 2
897 * block entry because of the alignment of the base PA.
898 *
899 * Only bits 47:21 of a level 2 block descriptor are used by the MMU,
900 * bits 20:0 of the resulting address are 0 in this case. Because of
901 * this, the PA generated as result of this translation is aligned to
902 * 2 MiB. The PA that was requested to be mapped is aligned to 4 KiB,
903 * though, which means that the resulting translation is incorrect.
904 * The only way to prevent this is by using a finer granularity.
905 */
906 unsigned long long align_check;
907
908 align_check = mm->base_pa | (unsigned long long)mm->size;
909
910 /*
911 * Assume it is always aligned to level 3. There's no need to check that
912 * level because its block size is PAGE_SIZE. The checks to verify that
913 * the addresses and size are aligned to PAGE_SIZE are inside
914 * mmap_add_region.
915 */
916 for (unsigned int level = ctx->base_level; level <= 2U; ++level) {
917
918 if ((align_check & XLAT_BLOCK_MASK(level)) != 0U)
919 continue;
920
921 mm->base_va = round_up(mm->base_va, XLAT_BLOCK_SIZE(level));
922 return;
923 }
924}
925
926void mmap_add_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
927{
928 mm->base_va = ctx->max_va + 1UL;
929
930 assert(mm->size > 0U);
931
932 mmap_alloc_va_align_ctx(ctx, mm);
933
934 /* Detect overflows. More checks are done in mmap_add_region_check(). */
935 assert(mm->base_va > ctx->max_va);
936
937 mmap_add_region_ctx(ctx, mm);
938}
939
Sandrine Bailleux66342932017-07-18 13:26:36 +0100940void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
941{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100942 const mmap_region_t *mm_cursor = mm;
943
Antonio Nino Diaz2cb864c2018-10-08 16:11:11 +0100944 while (mm_cursor->granularity != 0U) {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100945 mmap_add_region_ctx(ctx, mm_cursor);
946 mm_cursor++;
Sandrine Bailleux66342932017-07-18 13:26:36 +0100947 }
948}
949
Antonio Nino Diazac998032017-02-27 17:23:54 +0000950#if PLAT_XLAT_TABLES_DYNAMIC
951
952int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
953{
954 mmap_region_t *mm_cursor = ctx->mmap;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100955 const mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
956 unsigned long long end_pa = mm->base_pa + mm->size - 1U;
957 uintptr_t end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000958 int ret;
959
960 /* Nothing to do */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100961 if (mm->size == 0U)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000962 return 0;
963
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100964 /* Now this region is a dynamic one */
965 mm->attr |= MT_DYNAMIC;
966
967 ret = mmap_add_region_check(ctx, mm);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000968 if (ret != 0)
969 return ret;
970
971 /*
972 * Find the adequate entry in the mmap array in the same way done for
973 * static regions in mmap_add_region_ctx().
974 */
975
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100976 while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
977 && (mm_cursor->size != 0U)) {
Antonio Nino Diazac998032017-02-27 17:23:54 +0000978 ++mm_cursor;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100979 }
Antonio Nino Diazac998032017-02-27 17:23:54 +0000980
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100981 while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
982 (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
Antonio Nino Diazac998032017-02-27 17:23:54 +0000983 ++mm_cursor;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100984 }
Antonio Nino Diazac998032017-02-27 17:23:54 +0000985
986 /* Make room for new region by moving other regions up by one place */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100987 (void)memmove(mm_cursor + 1U, mm_cursor,
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100988 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000989
990 /*
Elyes Haouas2be03c02023-02-13 09:14:48 +0100991 * Check we haven't lost the empty sentinel from the end of the array.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000992 * This shouldn't happen as we have checked in mmap_add_region_check
993 * that there is free space.
994 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100995 assert(mm_last->size == 0U);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000996
Douglas Raillardf68d2ed2017-09-12 10:31:49 +0100997 *mm_cursor = *mm;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000998
999 /*
1000 * Update the translation tables if the xlat tables are initialized. If
1001 * not, this region will be mapped when they are initialized.
1002 */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +01001003 if (ctx->initialized) {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001004 end_va = xlat_tables_map_region(ctx, mm_cursor,
1005 0U, ctx->base_table, ctx->base_table_entries,
Douglas Raillard6a5f8f12017-09-21 08:42:21 +01001006 ctx->base_level);
Antonio Nino Diaz37a5efa2018-08-07 12:47:12 +01001007#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
1008 xlat_clean_dcache_range((uintptr_t)ctx->base_table,
1009 ctx->base_table_entries * sizeof(uint64_t));
1010#endif
Antonio Nino Diazac998032017-02-27 17:23:54 +00001011 /* Failed to map, remove mmap entry, unmap and return error. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001012 if (end_va != (mm_cursor->base_va + mm_cursor->size - 1U)) {
1013 (void)memmove(mm_cursor, mm_cursor + 1U,
Douglas Raillard6a5f8f12017-09-21 08:42:21 +01001014 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
Antonio Nino Diazac998032017-02-27 17:23:54 +00001015
1016 /*
1017 * Check if the mapping function actually managed to map
1018 * anything. If not, just return now.
1019 */
Antonio Nino Diaz3f518922018-01-05 11:30:36 +00001020 if (mm->base_va >= end_va)
Antonio Nino Diazac998032017-02-27 17:23:54 +00001021 return -ENOMEM;
1022
1023 /*
Douglas Raillard6a5f8f12017-09-21 08:42:21 +01001024 * Something went wrong after mapping some table
1025 * entries, undo every change done up to this point.
Antonio Nino Diazac998032017-02-27 17:23:54 +00001026 */
1027 mmap_region_t unmap_mm = {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001028 .base_pa = 0U,
Antonio Nino Diazac998032017-02-27 17:23:54 +00001029 .base_va = mm->base_va,
1030 .size = end_va - mm->base_va,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001031 .attr = 0U
Antonio Nino Diazac998032017-02-27 17:23:54 +00001032 };
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001033 xlat_tables_unmap_region(ctx, &unmap_mm, 0U,
1034 ctx->base_table, ctx->base_table_entries,
1035 ctx->base_level);
Antonio Nino Diaz37a5efa2018-08-07 12:47:12 +01001036#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
1037 xlat_clean_dcache_range((uintptr_t)ctx->base_table,
1038 ctx->base_table_entries * sizeof(uint64_t));
1039#endif
Antonio Nino Diazac998032017-02-27 17:23:54 +00001040 return -ENOMEM;
1041 }
1042
1043 /*
1044 * Make sure that all entries are written to the memory. There
1045 * is no need to invalidate entries when mapping dynamic regions
1046 * because new table/block/page descriptors only replace old
1047 * invalid descriptors, that aren't TLB cached.
1048 */
1049 dsbishst();
1050 }
1051
1052 if (end_pa > ctx->max_pa)
1053 ctx->max_pa = end_pa;
1054 if (end_va > ctx->max_va)
1055 ctx->max_va = end_va;
1056
1057 return 0;
1058}
1059
Antonio Nino Diazc0033282018-11-20 16:03:11 +00001060int mmap_add_dynamic_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
1061{
1062 mm->base_va = ctx->max_va + 1UL;
1063
1064 if (mm->size == 0U)
1065 return 0;
1066
1067 mmap_alloc_va_align_ctx(ctx, mm);
1068
1069 /* Detect overflows. More checks are done in mmap_add_region_check(). */
1070 if (mm->base_va < ctx->max_va) {
1071 return -ENOMEM;
1072 }
1073
1074 return mmap_add_dynamic_region_ctx(ctx, mm);
1075}
1076
Antonio Nino Diazac998032017-02-27 17:23:54 +00001077/*
1078 * Removes the region with given base Virtual Address and size from the given
1079 * context.
1080 *
1081 * Returns:
1082 * 0: Success.
1083 * EINVAL: Invalid values were used as arguments (region not found).
1084 * EPERM: Tried to remove a static region.
1085 */
1086int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
1087 size_t size)
1088{
1089 mmap_region_t *mm = ctx->mmap;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001090 const mmap_region_t *mm_last = mm + ctx->mmap_num;
Antonio Nino Diazac998032017-02-27 17:23:54 +00001091 int update_max_va_needed = 0;
1092 int update_max_pa_needed = 0;
1093
1094 /* Check sanity of mmap array. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001095 assert(mm[ctx->mmap_num].size == 0U);
Antonio Nino Diazac998032017-02-27 17:23:54 +00001096
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001097 while (mm->size != 0U) {
Antonio Nino Diazac998032017-02-27 17:23:54 +00001098 if ((mm->base_va == base_va) && (mm->size == size))
1099 break;
1100 ++mm;
1101 }
1102
1103 /* Check that the region was found */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001104 if (mm->size == 0U)
Antonio Nino Diazac998032017-02-27 17:23:54 +00001105 return -EINVAL;
1106
1107 /* If the region is static it can't be removed */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001108 if ((mm->attr & MT_DYNAMIC) == 0U)
Antonio Nino Diazac998032017-02-27 17:23:54 +00001109 return -EPERM;
1110
1111 /* Check if this region is using the top VAs or PAs. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001112 if ((mm->base_va + mm->size - 1U) == ctx->max_va)
Antonio Nino Diazac998032017-02-27 17:23:54 +00001113 update_max_va_needed = 1;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001114 if ((mm->base_pa + mm->size - 1U) == ctx->max_pa)
Antonio Nino Diazac998032017-02-27 17:23:54 +00001115 update_max_pa_needed = 1;
1116
1117 /* Update the translation tables if needed */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +01001118 if (ctx->initialized) {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001119 xlat_tables_unmap_region(ctx, mm, 0U, ctx->base_table,
Antonio Nino Diazac998032017-02-27 17:23:54 +00001120 ctx->base_table_entries,
1121 ctx->base_level);
Antonio Nino Diaz37a5efa2018-08-07 12:47:12 +01001122#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
1123 xlat_clean_dcache_range((uintptr_t)ctx->base_table,
1124 ctx->base_table_entries * sizeof(uint64_t));
1125#endif
Antonio Nino Diazac998032017-02-27 17:23:54 +00001126 xlat_arch_tlbi_va_sync();
1127 }
1128
1129 /* Remove this region by moving the rest down by one place. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001130 (void)memmove(mm, mm + 1U, (uintptr_t)mm_last - (uintptr_t)mm);
Antonio Nino Diazac998032017-02-27 17:23:54 +00001131
1132 /* Check if we need to update the max VAs and PAs */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001133 if (update_max_va_needed == 1) {
1134 ctx->max_va = 0U;
Antonio Nino Diazac998032017-02-27 17:23:54 +00001135 mm = ctx->mmap;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001136 while (mm->size != 0U) {
1137 if ((mm->base_va + mm->size - 1U) > ctx->max_va)
1138 ctx->max_va = mm->base_va + mm->size - 1U;
Antonio Nino Diazac998032017-02-27 17:23:54 +00001139 ++mm;
1140 }
1141 }
1142
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001143 if (update_max_pa_needed == 1) {
1144 ctx->max_pa = 0U;
Antonio Nino Diazac998032017-02-27 17:23:54 +00001145 mm = ctx->mmap;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001146 while (mm->size != 0U) {
1147 if ((mm->base_pa + mm->size - 1U) > ctx->max_pa)
1148 ctx->max_pa = mm->base_pa + mm->size - 1U;
Antonio Nino Diazac998032017-02-27 17:23:54 +00001149 ++mm;
1150 }
1151 }
1152
1153 return 0;
1154}
1155
Antonio Nino Diaz675d1552018-10-30 11:36:47 +00001156void xlat_setup_dynamic_ctx(xlat_ctx_t *ctx, unsigned long long pa_max,
1157 uintptr_t va_max, struct mmap_region *mmap,
1158 unsigned int mmap_num, uint64_t **tables,
1159 unsigned int tables_num, uint64_t *base_table,
1160 int xlat_regime, int *mapped_regions)
1161{
1162 ctx->xlat_regime = xlat_regime;
1163
1164 ctx->pa_max_address = pa_max;
1165 ctx->va_max_address = va_max;
1166
1167 ctx->mmap = mmap;
1168 ctx->mmap_num = mmap_num;
1169 memset(ctx->mmap, 0, sizeof(struct mmap_region) * mmap_num);
1170
1171 ctx->tables = (void *) tables;
1172 ctx->tables_num = tables_num;
1173
1174 uintptr_t va_space_size = va_max + 1;
1175 ctx->base_level = GET_XLAT_TABLE_LEVEL_BASE(va_space_size);
1176 ctx->base_table = base_table;
1177 ctx->base_table_entries = GET_NUM_BASE_LEVEL_ENTRIES(va_space_size);
1178
1179 ctx->tables_mapped_regions = mapped_regions;
1180
1181 ctx->max_pa = 0;
1182 ctx->max_va = 0;
1183 ctx->initialized = 0;
1184}
1185
Antonio Nino Diazac998032017-02-27 17:23:54 +00001186#endif /* PLAT_XLAT_TABLES_DYNAMIC */
1187
Daniel Boulby5a03a252018-08-30 16:48:56 +01001188void __init init_xlat_tables_ctx(xlat_ctx_t *ctx)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001189{
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +01001190 assert(ctx != NULL);
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +01001191 assert(!ctx->initialized);
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001192 assert((ctx->xlat_regime == EL3_REGIME) ||
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +01001193 (ctx->xlat_regime == EL2_REGIME) ||
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001194 (ctx->xlat_regime == EL1_EL0_REGIME));
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +01001195 assert(!is_mmu_enabled_ctx(ctx));
Sandrine Bailleux66342932017-07-18 13:26:36 +01001196
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +01001197 mmap_region_t *mm = ctx->mmap;
Sandrine Bailleux66342932017-07-18 13:26:36 +01001198
Sathees Balya74155972019-01-25 11:36:01 +00001199 assert(ctx->va_max_address >=
1200 (xlat_get_min_virt_addr_space_size() - 1U));
1201 assert(ctx->va_max_address <= (MAX_VIRT_ADDR_SPACE_SIZE - 1U));
1202 assert(IS_POWER_OF_TWO(ctx->va_max_address + 1U));
1203
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +01001204 xlat_mmap_print(mm);
Sandrine Bailleux66342932017-07-18 13:26:36 +01001205
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001206 /* All tables must be zeroed before mapping any region. */
1207
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001208 for (unsigned int i = 0U; i < ctx->base_table_entries; i++)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001209 ctx->base_table[i] = INVALID_DESC;
1210
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001211 for (int j = 0; j < ctx->tables_num; j++) {
Antonio Nino Diazac998032017-02-27 17:23:54 +00001212#if PLAT_XLAT_TABLES_DYNAMIC
1213 ctx->tables_mapped_regions[j] = 0;
1214#endif
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001215 for (unsigned int i = 0U; i < XLAT_TABLE_ENTRIES; i++)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001216 ctx->tables[j][i] = INVALID_DESC;
1217 }
1218
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001219 while (mm->size != 0U) {
1220 uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0U,
1221 ctx->base_table, ctx->base_table_entries,
1222 ctx->base_level);
Antonio Nino Diaz37a5efa2018-08-07 12:47:12 +01001223#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
1224 xlat_clean_dcache_range((uintptr_t)ctx->base_table,
1225 ctx->base_table_entries * sizeof(uint64_t));
1226#endif
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001227 if (end_va != (mm->base_va + mm->size - 1U)) {
Antonio Nino Diazac998032017-02-27 17:23:54 +00001228 ERROR("Not enough memory to map region:\n"
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001229 " VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x\n",
1230 mm->base_va, mm->base_pa, mm->size, mm->attr);
Antonio Nino Diazac998032017-02-27 17:23:54 +00001231 panic();
1232 }
1233
1234 mm++;
1235 }
1236
Sandrine Bailleux46c53a22017-07-11 15:11:10 +01001237 assert(ctx->pa_max_address <= xlat_arch_get_max_supported_pa());
Sandrine Bailleux66342932017-07-18 13:26:36 +01001238 assert(ctx->max_va <= ctx->va_max_address);
1239 assert(ctx->max_pa <= ctx->pa_max_address);
1240
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +01001241 ctx->initialized = true;
Sandrine Bailleuxc5b63772017-05-31 13:31:48 +01001242
1243 xlat_tables_print(ctx);
Sandrine Bailleux66342932017-07-18 13:26:36 +01001244}