blob: 80cd0a21f6db12ecdfc0a908683c73240e1bee67 [file] [log] [blame]
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001/*
Antonio Nino Diaz3f518922018-01-05 11:30:36 +00002 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00005 */
6
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00007#include <arch_helpers.h>
8#include <assert.h>
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00009#include <debug.h>
10#include <errno.h>
11#include <platform_def.h>
12#include <string.h>
13#include <types.h>
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010014#include <utils_def.h>
Sandrine Bailleux090c8492017-05-19 09:59:37 +010015#include <xlat_tables_defs.h>
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000016#include <xlat_tables_v2.h>
Sandrine Bailleux090c8492017-05-19 09:59:37 +010017
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000018#include "xlat_tables_private.h"
19
Antonio Nino Diazac998032017-02-27 17:23:54 +000020#if PLAT_XLAT_TABLES_DYNAMIC
21
22/*
23 * The following functions assume that they will be called using subtables only.
24 * The base table can't be unmapped, so it is not needed to do any special
25 * handling for it.
26 */
27
28/*
29 * Returns the index of the array corresponding to the specified translation
30 * table.
31 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010032static int xlat_table_get_index(const xlat_ctx_t *ctx, const uint64_t *table)
Antonio Nino Diazac998032017-02-27 17:23:54 +000033{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010034 for (int i = 0; i < ctx->tables_num; i++)
Antonio Nino Diazac998032017-02-27 17:23:54 +000035 if (ctx->tables[i] == table)
36 return i;
37
38 /*
39 * Maybe we were asked to get the index of the base level table, which
40 * should never happen.
41 */
42 assert(0);
43
44 return -1;
45}
46
47/* Returns a pointer to an empty translation table. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010048static uint64_t *xlat_table_get_empty(const xlat_ctx_t *ctx)
Antonio Nino Diazac998032017-02-27 17:23:54 +000049{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010050 for (int i = 0; i < ctx->tables_num; i++)
Antonio Nino Diazac998032017-02-27 17:23:54 +000051 if (ctx->tables_mapped_regions[i] == 0)
52 return ctx->tables[i];
53
54 return NULL;
55}
56
57/* Increments region count for a given table. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010058static void xlat_table_inc_regions_count(const xlat_ctx_t *ctx,
59 const uint64_t *table)
Antonio Nino Diazac998032017-02-27 17:23:54 +000060{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010061 int idx = xlat_table_get_index(ctx, table);
62
63 ctx->tables_mapped_regions[idx]++;
Antonio Nino Diazac998032017-02-27 17:23:54 +000064}
65
66/* Decrements region count for a given table. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010067static void xlat_table_dec_regions_count(const xlat_ctx_t *ctx,
68 const uint64_t *table)
Antonio Nino Diazac998032017-02-27 17:23:54 +000069{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010070 int idx = xlat_table_get_index(ctx, table);
71
72 ctx->tables_mapped_regions[idx]--;
Antonio Nino Diazac998032017-02-27 17:23:54 +000073}
74
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010075/* Returns 0 if the specified table isn't empty, otherwise 1. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010076static int xlat_table_is_empty(const xlat_ctx_t *ctx, const uint64_t *table)
Antonio Nino Diazac998032017-02-27 17:23:54 +000077{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010078 return (ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)] == 0)
79 ? 1 : 0;
Antonio Nino Diazac998032017-02-27 17:23:54 +000080}
81
82#else /* PLAT_XLAT_TABLES_DYNAMIC */
83
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000084/* Returns a pointer to the first empty translation table. */
85static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
86{
87 assert(ctx->next_table < ctx->tables_num);
88
89 return ctx->tables[ctx->next_table++];
90}
91
Antonio Nino Diazac998032017-02-27 17:23:54 +000092#endif /* PLAT_XLAT_TABLES_DYNAMIC */
93
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +010094/*
95 * Returns a block/page table descriptor for the given level and attributes.
96 */
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010097uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010098 unsigned long long addr_pa, unsigned int level)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000099{
100 uint64_t desc;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100101 uint32_t mem_type;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000102
103 /* Make sure that the granularity is fine enough to map this address. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100104 assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0U);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000105
106 desc = addr_pa;
107 /*
108 * There are different translation table descriptors for level 3 and the
109 * rest.
110 */
111 desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
112 /*
Antonio Nino Diaz0842bd62018-07-12 15:54:10 +0100113 * Always set the access flag, as this library assumes access flag
114 * faults aren't managed.
115 */
116 desc |= LOWER_ATTRS(ACCESS_FLAG);
117 /*
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000118 * Deduce other fields of the descriptor based on the MT_NS and MT_RW
119 * memory region attributes.
120 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100121 desc |= ((attr & MT_NS) != 0U) ? LOWER_ATTRS(NS) : 0U;
122 desc |= ((attr & MT_RW) != 0U) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000123
124 /*
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100125 * Do not allow unprivileged access when the mapping is for a privileged
126 * EL. For translation regimes that do not have mappings for access for
127 * lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED.
128 */
129 if (ctx->xlat_regime == EL1_EL0_REGIME) {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100130 if ((attr & MT_USER) != 0U) {
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100131 /* EL0 mapping requested, so we give User access */
132 desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED);
133 } else {
134 /* EL1 mapping requested, no User access granted */
135 desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
136 }
137 } else {
138 assert(ctx->xlat_regime == EL3_REGIME);
Antonio Nino Diaz49074492018-04-26 12:59:08 +0100139 desc |= LOWER_ATTRS(AP_ONE_VA_RANGE_RES1);
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100140 }
141
142 /*
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000143 * Deduce shareability domain and executability of the memory region
144 * from the memory type of the attributes (MT_TYPE).
145 *
146 * Data accesses to device memory and non-cacheable normal memory are
147 * coherent for all observers in the system, and correspondingly are
148 * always treated as being Outer Shareable. Therefore, for these 2 types
149 * of memory, it is not strictly needed to set the shareability field
150 * in the translation tables.
151 */
152 mem_type = MT_TYPE(attr);
153 if (mem_type == MT_DEVICE) {
154 desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
155 /*
156 * Always map device memory as execute-never.
157 * This is to avoid the possibility of a speculative instruction
158 * fetch, which could be an issue if this memory region
159 * corresponds to a read-sensitive peripheral.
160 */
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100161 desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100162
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000163 } else { /* Normal memory */
164 /*
165 * Always map read-write normal memory as execute-never.
Antonio Nino Diaz0842bd62018-07-12 15:54:10 +0100166 * This library assumes that it is used by software that does
167 * not self-modify its code, therefore R/W memory is reserved
168 * for data storage, which must not be executable.
169 *
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000170 * Note that setting the XN bit here is for consistency only.
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100171 * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000172 * which makes any writable memory region to be treated as
173 * execute-never, regardless of the value of the XN bit in the
174 * translation table.
175 *
176 * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100177 * attribute to figure out the value of the XN bit. The actual
178 * XN bit(s) to set in the descriptor depends on the context's
179 * translation regime and the policy applied in
180 * xlat_arch_regime_get_xn_desc().
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000181 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100182 if (((attr & MT_RW) != 0U) || ((attr & MT_EXECUTE_NEVER) != 0U)) {
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100183 desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100184 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000185
186 if (mem_type == MT_MEMORY) {
187 desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
188 } else {
189 assert(mem_type == MT_NON_CACHEABLE);
190 desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
191 }
192 }
193
194 return desc;
195}
196
197/*
198 * Enumeration of actions that can be made when mapping table entries depending
199 * on the previous value in that entry and information about the region being
200 * mapped.
201 */
202typedef enum {
203
204 /* Do nothing */
205 ACTION_NONE,
206
207 /* Write a block (or page, if in level 3) entry. */
208 ACTION_WRITE_BLOCK_ENTRY,
209
210 /*
211 * Create a new table and write a table entry pointing to it. Recurse
212 * into it for further processing.
213 */
214 ACTION_CREATE_NEW_TABLE,
215
216 /*
217 * There is a table descriptor in this entry, read it and recurse into
218 * that table for further processing.
219 */
220 ACTION_RECURSE_INTO_TABLE,
221
222} action_t;
223
Antonio Nino Diazac998032017-02-27 17:23:54 +0000224#if PLAT_XLAT_TABLES_DYNAMIC
225
226/*
227 * Recursive function that writes to the translation tables and unmaps the
228 * specified region.
229 */
230static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
231 const uintptr_t table_base_va,
232 uint64_t *const table_base,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100233 const unsigned int table_entries,
Varun Wadekar66231d12017-06-07 09:57:42 -0700234 const unsigned int level)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000235{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100236 assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
Antonio Nino Diazac998032017-02-27 17:23:54 +0000237
238 uint64_t *subtable;
239 uint64_t desc;
240
241 uintptr_t table_idx_va;
242 uintptr_t table_idx_end_va; /* End VA of this entry */
243
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100244 uintptr_t region_end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000245
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100246 unsigned int table_idx;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000247
248 if (mm->base_va > table_base_va) {
249 /* Find the first index of the table affected by the region. */
250 table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
251
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100252 table_idx = (unsigned int)((table_idx_va - table_base_va) >>
253 XLAT_ADDR_SHIFT(level));
Antonio Nino Diazac998032017-02-27 17:23:54 +0000254
255 assert(table_idx < table_entries);
256 } else {
257 /* Start from the beginning of the table. */
258 table_idx_va = table_base_va;
259 table_idx = 0;
260 }
261
262 while (table_idx < table_entries) {
263
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100264 table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1U;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000265
266 desc = table_base[table_idx];
267 uint64_t desc_type = desc & DESC_MASK;
268
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100269 action_t action;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000270
271 if ((mm->base_va <= table_idx_va) &&
272 (region_end_va >= table_idx_end_va)) {
Antonio Nino Diazac998032017-02-27 17:23:54 +0000273 /* Region covers all block */
274
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100275 if (level == 3U) {
Antonio Nino Diazac998032017-02-27 17:23:54 +0000276 /*
277 * Last level, only page descriptors allowed,
278 * erase it.
279 */
280 assert(desc_type == PAGE_DESC);
281
282 action = ACTION_WRITE_BLOCK_ENTRY;
283 } else {
284 /*
285 * Other levels can have table descriptors. If
286 * so, recurse into it and erase descriptors
287 * inside it as needed. If there is a block
288 * descriptor, just erase it. If an invalid
289 * descriptor is found, this table isn't
290 * actually mapped, which shouldn't happen.
291 */
292 if (desc_type == TABLE_DESC) {
293 action = ACTION_RECURSE_INTO_TABLE;
294 } else {
295 assert(desc_type == BLOCK_DESC);
296 action = ACTION_WRITE_BLOCK_ENTRY;
297 }
298 }
299
300 } else if ((mm->base_va <= table_idx_end_va) ||
301 (region_end_va >= table_idx_va)) {
Antonio Nino Diazac998032017-02-27 17:23:54 +0000302 /*
303 * Region partially covers block.
304 *
305 * It can't happen in level 3.
306 *
307 * There must be a table descriptor here, if not there
308 * was a problem when mapping the region.
309 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100310 assert(level < 3U);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000311 assert(desc_type == TABLE_DESC);
312
313 action = ACTION_RECURSE_INTO_TABLE;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100314 } else {
315 /* The region doesn't cover the block at all */
316 action = ACTION_NONE;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000317 }
318
319 if (action == ACTION_WRITE_BLOCK_ENTRY) {
320
321 table_base[table_idx] = INVALID_DESC;
Antonio Nino Diazad5dc7f2018-07-11 09:46:45 +0100322 xlat_arch_tlbi_va(table_idx_va, ctx->xlat_regime);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000323
324 } else if (action == ACTION_RECURSE_INTO_TABLE) {
325
326 subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
327
328 /* Recurse to write into subtable */
329 xlat_tables_unmap_region(ctx, mm, table_idx_va,
330 subtable, XLAT_TABLE_ENTRIES,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100331 level + 1U);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000332
333 /*
334 * If the subtable is now empty, remove its reference.
335 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100336 if (xlat_table_is_empty(ctx, subtable) != 0) {
Antonio Nino Diazac998032017-02-27 17:23:54 +0000337 table_base[table_idx] = INVALID_DESC;
Antonio Nino Diazad5dc7f2018-07-11 09:46:45 +0100338 xlat_arch_tlbi_va(table_idx_va,
339 ctx->xlat_regime);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000340 }
341
342 } else {
343 assert(action == ACTION_NONE);
344 }
345
346 table_idx++;
347 table_idx_va += XLAT_BLOCK_SIZE(level);
348
349 /* If reached the end of the region, exit */
350 if (region_end_va <= table_idx_va)
351 break;
352 }
353
354 if (level > ctx->base_level)
355 xlat_table_dec_regions_count(ctx, table_base);
356}
357
358#endif /* PLAT_XLAT_TABLES_DYNAMIC */
359
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000360/*
361 * From the given arguments, it decides which action to take when mapping the
362 * specified region.
363 */
364static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100365 unsigned int desc_type, unsigned long long dest_pa,
366 uintptr_t table_entry_base_va, unsigned int level)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000367{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100368 uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000369 uintptr_t table_entry_end_va =
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100370 table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000371
372 /*
373 * The descriptor types allowed depend on the current table level.
374 */
375
376 if ((mm->base_va <= table_entry_base_va) &&
377 (mm_end_va >= table_entry_end_va)) {
378
379 /*
380 * Table entry is covered by region
381 * --------------------------------
382 *
383 * This means that this table entry can describe the whole
384 * translation with this granularity in principle.
385 */
386
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100387 if (level == 3U) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000388 /*
389 * Last level, only page descriptors are allowed.
390 */
391 if (desc_type == PAGE_DESC) {
392 /*
393 * There's another region mapped here, don't
394 * overwrite.
395 */
396 return ACTION_NONE;
397 } else {
398 assert(desc_type == INVALID_DESC);
399 return ACTION_WRITE_BLOCK_ENTRY;
400 }
401
402 } else {
403
404 /*
405 * Other levels. Table descriptors are allowed. Block
406 * descriptors too, but they have some limitations.
407 */
408
409 if (desc_type == TABLE_DESC) {
410 /* There's already a table, recurse into it. */
411 return ACTION_RECURSE_INTO_TABLE;
412
413 } else if (desc_type == INVALID_DESC) {
414 /*
415 * There's nothing mapped here, create a new
416 * entry.
417 *
418 * Check if the destination granularity allows
419 * us to use a block descriptor or we need a
420 * finer table for it.
421 *
422 * Also, check if the current level allows block
423 * descriptors. If not, create a table instead.
424 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100425 if (((dest_pa & XLAT_BLOCK_MASK(level)) != 0U)
426 || (level < MIN_LVL_BLOCK_DESC) ||
Sandrine Bailleux8f23fa82017-09-28 21:58:12 +0100427 (mm->granularity < XLAT_BLOCK_SIZE(level)))
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000428 return ACTION_CREATE_NEW_TABLE;
429 else
430 return ACTION_WRITE_BLOCK_ENTRY;
431
432 } else {
433 /*
434 * There's another region mapped here, don't
435 * overwrite.
436 */
437 assert(desc_type == BLOCK_DESC);
438
439 return ACTION_NONE;
440 }
441 }
442
443 } else if ((mm->base_va <= table_entry_end_va) ||
444 (mm_end_va >= table_entry_base_va)) {
445
446 /*
447 * Region partially covers table entry
448 * -----------------------------------
449 *
450 * This means that this table entry can't describe the whole
451 * translation, a finer table is needed.
452
453 * There cannot be partial block overlaps in level 3. If that
454 * happens, some of the preliminary checks when adding the
455 * mmap region failed to detect that PA and VA must at least be
456 * aligned to PAGE_SIZE.
457 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100458 assert(level < 3U);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000459
460 if (desc_type == INVALID_DESC) {
461 /*
462 * The block is not fully covered by the region. Create
463 * a new table, recurse into it and try to map the
464 * region with finer granularity.
465 */
466 return ACTION_CREATE_NEW_TABLE;
467
468 } else {
469 assert(desc_type == TABLE_DESC);
470 /*
471 * The block is not fully covered by the region, but
472 * there is already a table here. Recurse into it and
473 * try to map with finer granularity.
474 *
475 * PAGE_DESC for level 3 has the same value as
476 * TABLE_DESC, but this code can't run on a level 3
477 * table because there can't be overlaps in level 3.
478 */
479 return ACTION_RECURSE_INTO_TABLE;
480 }
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100481 } else {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000482
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100483 /*
484 * This table entry is outside of the region specified in the
485 * arguments, don't write anything to it.
486 */
487 return ACTION_NONE;
488 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000489}
490
491/*
492 * Recursive function that writes to the translation tables and maps the
Antonio Nino Diazac998032017-02-27 17:23:54 +0000493 * specified region. On success, it returns the VA of the last byte that was
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100494 * successfully mapped. On error, it returns the VA of the next entry that
Antonio Nino Diazac998032017-02-27 17:23:54 +0000495 * should have been mapped.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000496 */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000497static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100498 uintptr_t table_base_va,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000499 uint64_t *const table_base,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100500 unsigned int table_entries,
501 unsigned int level)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000502{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100503 assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000504
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100505 uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000506
507 uintptr_t table_idx_va;
508 unsigned long long table_idx_pa;
509
510 uint64_t *subtable;
511 uint64_t desc;
512
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100513 unsigned int table_idx;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000514
515 if (mm->base_va > table_base_va) {
516 /* Find the first index of the table affected by the region. */
517 table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
518
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100519 table_idx = (unsigned int)((table_idx_va - table_base_va) >>
520 XLAT_ADDR_SHIFT(level));
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000521
522 assert(table_idx < table_entries);
523 } else {
524 /* Start from the beginning of the table. */
525 table_idx_va = table_base_va;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100526 table_idx = 0U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000527 }
528
Antonio Nino Diazac998032017-02-27 17:23:54 +0000529#if PLAT_XLAT_TABLES_DYNAMIC
530 if (level > ctx->base_level)
531 xlat_table_inc_regions_count(ctx, table_base);
532#endif
533
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000534 while (table_idx < table_entries) {
535
536 desc = table_base[table_idx];
537
538 table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
539
540 action_t action = xlat_tables_map_region_action(mm,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100541 (uint32_t)(desc & DESC_MASK), table_idx_pa,
542 table_idx_va, level);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000543
544 if (action == ACTION_WRITE_BLOCK_ENTRY) {
545
546 table_base[table_idx] =
Antonio Nino Diaze8811472018-04-17 15:10:18 +0100547 xlat_desc(ctx, (uint32_t)mm->attr, table_idx_pa,
548 level);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000549
550 } else if (action == ACTION_CREATE_NEW_TABLE) {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100551 uintptr_t end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000552
553 subtable = xlat_table_get_empty(ctx);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000554 if (subtable == NULL) {
555 /* Not enough free tables to map this region */
556 return table_idx_va;
557 }
558
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000559 /* Point to new subtable from this one. */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000560 table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
561
562 /* Recurse to write into subtable */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100563 end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
Antonio Nino Diazac998032017-02-27 17:23:54 +0000564 subtable, XLAT_TABLE_ENTRIES,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100565 level + 1U);
566 if (end_va !=
567 (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
Antonio Nino Diazac998032017-02-27 17:23:54 +0000568 return end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000569
570 } else if (action == ACTION_RECURSE_INTO_TABLE) {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100571 uintptr_t end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000572
573 subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
574 /* Recurse to write into subtable */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100575 end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
Antonio Nino Diazac998032017-02-27 17:23:54 +0000576 subtable, XLAT_TABLE_ENTRIES,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100577 level + 1U);
578 if (end_va !=
579 (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
Antonio Nino Diazac998032017-02-27 17:23:54 +0000580 return end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000581
582 } else {
583
584 assert(action == ACTION_NONE);
585
586 }
587
588 table_idx++;
589 table_idx_va += XLAT_BLOCK_SIZE(level);
590
591 /* If reached the end of the region, exit */
592 if (mm_end_va <= table_idx_va)
593 break;
594 }
Antonio Nino Diazac998032017-02-27 17:23:54 +0000595
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100596 return table_idx_va - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000597}
598
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000599/*
600 * Function that verifies that a region can be mapped.
601 * Returns:
602 * 0: Success, the mapping is allowed.
603 * EINVAL: Invalid values were used as arguments.
604 * ERANGE: The memory limits were surpassed.
605 * ENOMEM: There is not enough memory in the mmap array.
606 * EPERM: Region overlaps another one in an invalid way.
607 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100608static int mmap_add_region_check(const xlat_ctx_t *ctx, const mmap_region_t *mm)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000609{
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100610 unsigned long long base_pa = mm->base_pa;
611 uintptr_t base_va = mm->base_va;
612 size_t size = mm->size;
Sandrine Bailleux8f23fa82017-09-28 21:58:12 +0100613 size_t granularity = mm->granularity;
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100614
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100615 unsigned long long end_pa = base_pa + size - 1U;
616 uintptr_t end_va = base_va + size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000617
618 if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
619 !IS_PAGE_ALIGNED(size))
620 return -EINVAL;
621
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100622 if ((granularity != XLAT_BLOCK_SIZE(1U)) &&
623 (granularity != XLAT_BLOCK_SIZE(2U)) &&
624 (granularity != XLAT_BLOCK_SIZE(3U))) {
Sandrine Bailleux8f23fa82017-09-28 21:58:12 +0100625 return -EINVAL;
626 }
627
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000628 /* Check for overflows */
629 if ((base_pa > end_pa) || (base_va > end_va))
630 return -ERANGE;
631
632 if ((base_va + (uintptr_t)size - (uintptr_t)1) > ctx->va_max_address)
633 return -ERANGE;
634
635 if ((base_pa + (unsigned long long)size - 1ULL) > ctx->pa_max_address)
636 return -ERANGE;
637
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100638 /* Check that there is space in the ctx->mmap array */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100639 if (ctx->mmap[ctx->mmap_num - 1].size != 0U)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000640 return -ENOMEM;
641
642 /* Check for PAs and VAs overlaps with all other regions */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100643 for (const mmap_region_t *mm_cursor = ctx->mmap;
644 mm_cursor->size != 0U; ++mm_cursor) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000645
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100646 uintptr_t mm_cursor_end_va = mm_cursor->base_va
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100647 + mm_cursor->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000648
649 /*
650 * Check if one of the regions is completely inside the other
651 * one.
652 */
653 int fully_overlapped_va =
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100654 (((base_va >= mm_cursor->base_va) &&
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100655 (end_va <= mm_cursor_end_va)) ||
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100656 ((mm_cursor->base_va >= base_va) &&
657 (mm_cursor_end_va <= end_va)))
658 ? 1 : 0;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000659
660 /*
661 * Full VA overlaps are only allowed if both regions are
662 * identity mapped (zero offset) or have the same VA to PA
663 * offset. Also, make sure that it's not the exact same area.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000664 * This can only be done with static regions.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000665 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100666 if (fully_overlapped_va != 0) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000667
Antonio Nino Diazac998032017-02-27 17:23:54 +0000668#if PLAT_XLAT_TABLES_DYNAMIC
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100669 if (((mm->attr & MT_DYNAMIC) != 0U) ||
670 ((mm_cursor->attr & MT_DYNAMIC) != 0U))
Antonio Nino Diazac998032017-02-27 17:23:54 +0000671 return -EPERM;
672#endif /* PLAT_XLAT_TABLES_DYNAMIC */
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100673 if ((mm_cursor->base_va - mm_cursor->base_pa) !=
674 (base_va - base_pa))
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000675 return -EPERM;
676
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100677 if ((base_va == mm_cursor->base_va) &&
678 (size == mm_cursor->size))
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000679 return -EPERM;
680
681 } else {
682 /*
683 * If the regions do not have fully overlapping VAs,
684 * then they must have fully separated VAs and PAs.
685 * Partial overlaps are not allowed
686 */
687
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100688 unsigned long long mm_cursor_end_pa =
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100689 mm_cursor->base_pa + mm_cursor->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000690
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100691 int separated_pa = ((end_pa < mm_cursor->base_pa) ||
692 (base_pa > mm_cursor_end_pa)) ? 1 : 0;
693 int separated_va = ((end_va < mm_cursor->base_va) ||
694 (base_va > mm_cursor_end_va)) ? 1 : 0;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000695
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100696 if ((separated_va == 0) || (separated_pa == 0))
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000697 return -EPERM;
698 }
699 }
700
701 return 0;
702}
703
Sandrine Bailleux66342932017-07-18 13:26:36 +0100704void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000705{
John Tsichritzisfdd92482018-05-25 09:12:48 +0100706 mmap_region_t *mm_cursor = ctx->mmap, *mm_destination;
Varun Wadekarccbd2e32018-04-03 10:44:41 -0700707 const mmap_region_t *mm_end = ctx->mmap + ctx->mmap_num;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100708 const mmap_region_t *mm_last;
709 unsigned long long end_pa = mm->base_pa + mm->size - 1U;
710 uintptr_t end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000711 int ret;
712
713 /* Ignore empty regions */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100714 if (mm->size == 0U)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000715 return;
716
Antonio Nino Diazac998032017-02-27 17:23:54 +0000717 /* Static regions must be added before initializing the xlat tables. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100718 assert(ctx->initialized == 0);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000719
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100720 ret = mmap_add_region_check(ctx, mm);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000721 if (ret != 0) {
722 ERROR("mmap_add_region_check() failed. error %d\n", ret);
723 assert(0);
724 return;
725 }
726
727 /*
728 * Find correct place in mmap to insert new region.
729 *
730 * 1 - Lower region VA end first.
731 * 2 - Smaller region size first.
732 *
733 * VA 0 0xFF
734 *
735 * 1st |------|
736 * 2nd |------------|
737 * 3rd |------|
738 * 4th |---|
739 * 5th |---|
740 * 6th |----------|
741 * 7th |-------------------------------------|
742 *
743 * This is required for overlapping regions only. It simplifies adding
744 * regions with the loop in xlat_tables_init_internal because the outer
745 * ones won't overwrite block or page descriptors of regions added
746 * previously.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000747 *
748 * Overlapping is only allowed for static regions.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000749 */
750
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100751 while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
752 && (mm_cursor->size != 0U)) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000753 ++mm_cursor;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100754 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000755
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100756 while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
757 (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000758 ++mm_cursor;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100759 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000760
Varun Wadekarccbd2e32018-04-03 10:44:41 -0700761 /*
762 * Find the last entry marker in the mmap
763 */
764 mm_last = ctx->mmap;
765 while ((mm_last->size != 0U) && (mm_last < mm_end)) {
766 ++mm_last;
767 }
768
769 /*
770 * Check if we have enough space in the memory mapping table.
771 * This shouldn't happen as we have checked in mmap_add_region_check
772 * that there is free space.
773 */
774 assert(mm_last->size == 0U);
Jeenu Viswambharan58e81482018-04-27 15:06:57 +0100775
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000776 /* Make room for new region by moving other regions up by one place */
John Tsichritzisfdd92482018-05-25 09:12:48 +0100777 mm_destination = mm_cursor + 1;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100778 (void)memmove(mm_destination, mm_cursor,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000779 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
780
781 /*
782 * Check we haven't lost the empty sentinel from the end of the array.
783 * This shouldn't happen as we have checked in mmap_add_region_check
784 * that there is free space.
785 */
Varun Wadekarccbd2e32018-04-03 10:44:41 -0700786 assert(mm_end->size == 0U);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000787
Douglas Raillardf68d2ed2017-09-12 10:31:49 +0100788 *mm_cursor = *mm;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000789
790 if (end_pa > ctx->max_pa)
791 ctx->max_pa = end_pa;
792 if (end_va > ctx->max_va)
793 ctx->max_va = end_va;
794}
795
Sandrine Bailleux66342932017-07-18 13:26:36 +0100796void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
797{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100798 const mmap_region_t *mm_cursor = mm;
799
800 while (mm_cursor->size != 0U) {
801 mmap_add_region_ctx(ctx, mm_cursor);
802 mm_cursor++;
Sandrine Bailleux66342932017-07-18 13:26:36 +0100803 }
804}
805
Antonio Nino Diazac998032017-02-27 17:23:54 +0000806#if PLAT_XLAT_TABLES_DYNAMIC
807
808int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
809{
810 mmap_region_t *mm_cursor = ctx->mmap;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100811 const mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
812 unsigned long long end_pa = mm->base_pa + mm->size - 1U;
813 uintptr_t end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000814 int ret;
815
816 /* Nothing to do */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100817 if (mm->size == 0U)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000818 return 0;
819
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100820 /* Now this region is a dynamic one */
821 mm->attr |= MT_DYNAMIC;
822
823 ret = mmap_add_region_check(ctx, mm);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000824 if (ret != 0)
825 return ret;
826
827 /*
828 * Find the adequate entry in the mmap array in the same way done for
829 * static regions in mmap_add_region_ctx().
830 */
831
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100832 while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
833 && (mm_cursor->size != 0U)) {
Antonio Nino Diazac998032017-02-27 17:23:54 +0000834 ++mm_cursor;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100835 }
Antonio Nino Diazac998032017-02-27 17:23:54 +0000836
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100837 while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
838 (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
Antonio Nino Diazac998032017-02-27 17:23:54 +0000839 ++mm_cursor;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100840 }
Antonio Nino Diazac998032017-02-27 17:23:54 +0000841
842 /* Make room for new region by moving other regions up by one place */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100843 (void)memmove(mm_cursor + 1U, mm_cursor,
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100844 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000845
846 /*
847 * Check we haven't lost the empty sentinal from the end of the array.
848 * This shouldn't happen as we have checked in mmap_add_region_check
849 * that there is free space.
850 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100851 assert(mm_last->size == 0U);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000852
Douglas Raillardf68d2ed2017-09-12 10:31:49 +0100853 *mm_cursor = *mm;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000854
855 /*
856 * Update the translation tables if the xlat tables are initialized. If
857 * not, this region will be mapped when they are initialized.
858 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100859 if (ctx->initialized != 0) {
860 end_va = xlat_tables_map_region(ctx, mm_cursor,
861 0U, ctx->base_table, ctx->base_table_entries,
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100862 ctx->base_level);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000863
864 /* Failed to map, remove mmap entry, unmap and return error. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100865 if (end_va != (mm_cursor->base_va + mm_cursor->size - 1U)) {
866 (void)memmove(mm_cursor, mm_cursor + 1U,
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100867 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000868
869 /*
870 * Check if the mapping function actually managed to map
871 * anything. If not, just return now.
872 */
Antonio Nino Diaz3f518922018-01-05 11:30:36 +0000873 if (mm->base_va >= end_va)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000874 return -ENOMEM;
875
876 /*
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100877 * Something went wrong after mapping some table
878 * entries, undo every change done up to this point.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000879 */
880 mmap_region_t unmap_mm = {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100881 .base_pa = 0U,
Antonio Nino Diazac998032017-02-27 17:23:54 +0000882 .base_va = mm->base_va,
883 .size = end_va - mm->base_va,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100884 .attr = 0U
Antonio Nino Diazac998032017-02-27 17:23:54 +0000885 };
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100886 xlat_tables_unmap_region(ctx, &unmap_mm, 0U,
887 ctx->base_table, ctx->base_table_entries,
888 ctx->base_level);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000889
890 return -ENOMEM;
891 }
892
893 /*
894 * Make sure that all entries are written to the memory. There
895 * is no need to invalidate entries when mapping dynamic regions
896 * because new table/block/page descriptors only replace old
897 * invalid descriptors, that aren't TLB cached.
898 */
899 dsbishst();
900 }
901
902 if (end_pa > ctx->max_pa)
903 ctx->max_pa = end_pa;
904 if (end_va > ctx->max_va)
905 ctx->max_va = end_va;
906
907 return 0;
908}
909
910/*
911 * Removes the region with given base Virtual Address and size from the given
912 * context.
913 *
914 * Returns:
915 * 0: Success.
916 * EINVAL: Invalid values were used as arguments (region not found).
917 * EPERM: Tried to remove a static region.
918 */
919int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
920 size_t size)
921{
922 mmap_region_t *mm = ctx->mmap;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100923 const mmap_region_t *mm_last = mm + ctx->mmap_num;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000924 int update_max_va_needed = 0;
925 int update_max_pa_needed = 0;
926
927 /* Check sanity of mmap array. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100928 assert(mm[ctx->mmap_num].size == 0U);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000929
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100930 while (mm->size != 0U) {
Antonio Nino Diazac998032017-02-27 17:23:54 +0000931 if ((mm->base_va == base_va) && (mm->size == size))
932 break;
933 ++mm;
934 }
935
936 /* Check that the region was found */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100937 if (mm->size == 0U)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000938 return -EINVAL;
939
940 /* If the region is static it can't be removed */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100941 if ((mm->attr & MT_DYNAMIC) == 0U)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000942 return -EPERM;
943
944 /* Check if this region is using the top VAs or PAs. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100945 if ((mm->base_va + mm->size - 1U) == ctx->max_va)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000946 update_max_va_needed = 1;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100947 if ((mm->base_pa + mm->size - 1U) == ctx->max_pa)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000948 update_max_pa_needed = 1;
949
950 /* Update the translation tables if needed */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100951 if (ctx->initialized != 0) {
952 xlat_tables_unmap_region(ctx, mm, 0U, ctx->base_table,
Antonio Nino Diazac998032017-02-27 17:23:54 +0000953 ctx->base_table_entries,
954 ctx->base_level);
955 xlat_arch_tlbi_va_sync();
956 }
957
958 /* Remove this region by moving the rest down by one place. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100959 (void)memmove(mm, mm + 1U, (uintptr_t)mm_last - (uintptr_t)mm);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000960
961 /* Check if we need to update the max VAs and PAs */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100962 if (update_max_va_needed == 1) {
963 ctx->max_va = 0U;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000964 mm = ctx->mmap;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100965 while (mm->size != 0U) {
966 if ((mm->base_va + mm->size - 1U) > ctx->max_va)
967 ctx->max_va = mm->base_va + mm->size - 1U;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000968 ++mm;
969 }
970 }
971
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100972 if (update_max_pa_needed == 1) {
973 ctx->max_pa = 0U;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000974 mm = ctx->mmap;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100975 while (mm->size != 0U) {
976 if ((mm->base_pa + mm->size - 1U) > ctx->max_pa)
977 ctx->max_pa = mm->base_pa + mm->size - 1U;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000978 ++mm;
979 }
980 }
981
982 return 0;
983}
984
985#endif /* PLAT_XLAT_TABLES_DYNAMIC */
986
Sandrine Bailleux66342932017-07-18 13:26:36 +0100987void init_xlat_tables_ctx(xlat_ctx_t *ctx)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000988{
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100989 assert(ctx != NULL);
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100990 assert(ctx->initialized == 0);
991 assert((ctx->xlat_regime == EL3_REGIME) ||
992 (ctx->xlat_regime == EL1_EL0_REGIME));
993 assert(is_mmu_enabled_ctx(ctx) == 0);
Sandrine Bailleux66342932017-07-18 13:26:36 +0100994
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100995 mmap_region_t *mm = ctx->mmap;
Sandrine Bailleux66342932017-07-18 13:26:36 +0100996
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100997 xlat_mmap_print(mm);
Sandrine Bailleux66342932017-07-18 13:26:36 +0100998
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000999 /* All tables must be zeroed before mapping any region. */
1000
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001001 for (unsigned int i = 0U; i < ctx->base_table_entries; i++)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001002 ctx->base_table[i] = INVALID_DESC;
1003
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001004 for (int j = 0; j < ctx->tables_num; j++) {
Antonio Nino Diazac998032017-02-27 17:23:54 +00001005#if PLAT_XLAT_TABLES_DYNAMIC
1006 ctx->tables_mapped_regions[j] = 0;
1007#endif
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001008 for (unsigned int i = 0U; i < XLAT_TABLE_ENTRIES; i++)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001009 ctx->tables[j][i] = INVALID_DESC;
1010 }
1011
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001012 while (mm->size != 0U) {
1013 uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0U,
1014 ctx->base_table, ctx->base_table_entries,
1015 ctx->base_level);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001016
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001017 if (end_va != (mm->base_va + mm->size - 1U)) {
Antonio Nino Diazac998032017-02-27 17:23:54 +00001018 ERROR("Not enough memory to map region:\n"
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001019 " VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x\n",
1020 mm->base_va, mm->base_pa, mm->size, mm->attr);
Antonio Nino Diazac998032017-02-27 17:23:54 +00001021 panic();
1022 }
1023
1024 mm++;
1025 }
1026
Sandrine Bailleux46c53a22017-07-11 15:11:10 +01001027 assert(ctx->pa_max_address <= xlat_arch_get_max_supported_pa());
Sandrine Bailleux66342932017-07-18 13:26:36 +01001028 assert(ctx->max_va <= ctx->va_max_address);
1029 assert(ctx->max_pa <= ctx->pa_max_address);
1030
Sandrine Bailleuxc5b63772017-05-31 13:31:48 +01001031 ctx->initialized = 1;
1032
1033 xlat_tables_print(ctx);
Sandrine Bailleux66342932017-07-18 13:26:36 +01001034}