blob: 54e58341ac034cd9b8ddd1c49888ba802ce40ac3 [file] [log] [blame]
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001/*
Antonio Nino Diaz3f518922018-01-05 11:30:36 +00002 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00005 */
6
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00007#include <arch_helpers.h>
8#include <assert.h>
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00009#include <debug.h>
10#include <errno.h>
11#include <platform_def.h>
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +010012#include <stdbool.h>
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000013#include <string.h>
14#include <types.h>
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010015#include <utils_def.h>
Sandrine Bailleux090c8492017-05-19 09:59:37 +010016#include <xlat_tables_defs.h>
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000017#include <xlat_tables_v2.h>
Sandrine Bailleux090c8492017-05-19 09:59:37 +010018
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000019#include "xlat_tables_private.h"
20
Antonio Nino Diazac998032017-02-27 17:23:54 +000021#if PLAT_XLAT_TABLES_DYNAMIC
22
23/*
24 * The following functions assume that they will be called using subtables only.
25 * The base table can't be unmapped, so it is not needed to do any special
26 * handling for it.
27 */
28
29/*
30 * Returns the index of the array corresponding to the specified translation
31 * table.
32 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010033static int xlat_table_get_index(const xlat_ctx_t *ctx, const uint64_t *table)
Antonio Nino Diazac998032017-02-27 17:23:54 +000034{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010035 for (int i = 0; i < ctx->tables_num; i++)
Antonio Nino Diazac998032017-02-27 17:23:54 +000036 if (ctx->tables[i] == table)
37 return i;
38
39 /*
40 * Maybe we were asked to get the index of the base level table, which
41 * should never happen.
42 */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +010043 assert(false);
Antonio Nino Diazac998032017-02-27 17:23:54 +000044
45 return -1;
46}
47
48/* Returns a pointer to an empty translation table. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010049static uint64_t *xlat_table_get_empty(const xlat_ctx_t *ctx)
Antonio Nino Diazac998032017-02-27 17:23:54 +000050{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010051 for (int i = 0; i < ctx->tables_num; i++)
Antonio Nino Diazac998032017-02-27 17:23:54 +000052 if (ctx->tables_mapped_regions[i] == 0)
53 return ctx->tables[i];
54
55 return NULL;
56}
57
58/* Increments region count for a given table. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010059static void xlat_table_inc_regions_count(const xlat_ctx_t *ctx,
60 const uint64_t *table)
Antonio Nino Diazac998032017-02-27 17:23:54 +000061{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010062 int idx = xlat_table_get_index(ctx, table);
63
64 ctx->tables_mapped_regions[idx]++;
Antonio Nino Diazac998032017-02-27 17:23:54 +000065}
66
67/* Decrements region count for a given table. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010068static void xlat_table_dec_regions_count(const xlat_ctx_t *ctx,
69 const uint64_t *table)
Antonio Nino Diazac998032017-02-27 17:23:54 +000070{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010071 int idx = xlat_table_get_index(ctx, table);
72
73 ctx->tables_mapped_regions[idx]--;
Antonio Nino Diazac998032017-02-27 17:23:54 +000074}
75
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010076/* Returns 0 if the specified table isn't empty, otherwise 1. */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +010077static bool xlat_table_is_empty(const xlat_ctx_t *ctx, const uint64_t *table)
Antonio Nino Diazac998032017-02-27 17:23:54 +000078{
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +010079 return ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)] == 0;
Antonio Nino Diazac998032017-02-27 17:23:54 +000080}
81
82#else /* PLAT_XLAT_TABLES_DYNAMIC */
83
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000084/* Returns a pointer to the first empty translation table. */
85static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
86{
87 assert(ctx->next_table < ctx->tables_num);
88
89 return ctx->tables[ctx->next_table++];
90}
91
Antonio Nino Diazac998032017-02-27 17:23:54 +000092#endif /* PLAT_XLAT_TABLES_DYNAMIC */
93
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +010094/*
95 * Returns a block/page table descriptor for the given level and attributes.
96 */
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010097uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010098 unsigned long long addr_pa, unsigned int level)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000099{
100 uint64_t desc;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100101 uint32_t mem_type;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000102
103 /* Make sure that the granularity is fine enough to map this address. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100104 assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0U);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000105
106 desc = addr_pa;
107 /*
108 * There are different translation table descriptors for level 3 and the
109 * rest.
110 */
111 desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
112 /*
Antonio Nino Diaz0842bd62018-07-12 15:54:10 +0100113 * Always set the access flag, as this library assumes access flag
114 * faults aren't managed.
115 */
116 desc |= LOWER_ATTRS(ACCESS_FLAG);
117 /*
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000118 * Deduce other fields of the descriptor based on the MT_NS and MT_RW
119 * memory region attributes.
120 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100121 desc |= ((attr & MT_NS) != 0U) ? LOWER_ATTRS(NS) : 0U;
122 desc |= ((attr & MT_RW) != 0U) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000123
124 /*
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100125 * Do not allow unprivileged access when the mapping is for a privileged
126 * EL. For translation regimes that do not have mappings for access for
127 * lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED.
128 */
129 if (ctx->xlat_regime == EL1_EL0_REGIME) {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100130 if ((attr & MT_USER) != 0U) {
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100131 /* EL0 mapping requested, so we give User access */
132 desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED);
133 } else {
134 /* EL1 mapping requested, no User access granted */
135 desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
136 }
137 } else {
138 assert(ctx->xlat_regime == EL3_REGIME);
Antonio Nino Diaz49074492018-04-26 12:59:08 +0100139 desc |= LOWER_ATTRS(AP_ONE_VA_RANGE_RES1);
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100140 }
141
142 /*
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000143 * Deduce shareability domain and executability of the memory region
144 * from the memory type of the attributes (MT_TYPE).
145 *
146 * Data accesses to device memory and non-cacheable normal memory are
147 * coherent for all observers in the system, and correspondingly are
148 * always treated as being Outer Shareable. Therefore, for these 2 types
149 * of memory, it is not strictly needed to set the shareability field
150 * in the translation tables.
151 */
152 mem_type = MT_TYPE(attr);
153 if (mem_type == MT_DEVICE) {
154 desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
155 /*
156 * Always map device memory as execute-never.
157 * This is to avoid the possibility of a speculative instruction
158 * fetch, which could be an issue if this memory region
159 * corresponds to a read-sensitive peripheral.
160 */
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100161 desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100162
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000163 } else { /* Normal memory */
164 /*
165 * Always map read-write normal memory as execute-never.
Antonio Nino Diaz0842bd62018-07-12 15:54:10 +0100166 * This library assumes that it is used by software that does
167 * not self-modify its code, therefore R/W memory is reserved
168 * for data storage, which must not be executable.
169 *
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000170 * Note that setting the XN bit here is for consistency only.
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100171 * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000172 * which makes any writable memory region to be treated as
173 * execute-never, regardless of the value of the XN bit in the
174 * translation table.
175 *
176 * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100177 * attribute to figure out the value of the XN bit. The actual
178 * XN bit(s) to set in the descriptor depends on the context's
179 * translation regime and the policy applied in
180 * xlat_arch_regime_get_xn_desc().
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000181 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100182 if (((attr & MT_RW) != 0U) || ((attr & MT_EXECUTE_NEVER) != 0U)) {
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100183 desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100184 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000185
186 if (mem_type == MT_MEMORY) {
187 desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
188 } else {
189 assert(mem_type == MT_NON_CACHEABLE);
190 desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
191 }
192 }
193
194 return desc;
195}
196
197/*
198 * Enumeration of actions that can be made when mapping table entries depending
199 * on the previous value in that entry and information about the region being
200 * mapped.
201 */
202typedef enum {
203
204 /* Do nothing */
205 ACTION_NONE,
206
207 /* Write a block (or page, if in level 3) entry. */
208 ACTION_WRITE_BLOCK_ENTRY,
209
210 /*
211 * Create a new table and write a table entry pointing to it. Recurse
212 * into it for further processing.
213 */
214 ACTION_CREATE_NEW_TABLE,
215
216 /*
217 * There is a table descriptor in this entry, read it and recurse into
218 * that table for further processing.
219 */
220 ACTION_RECURSE_INTO_TABLE,
221
222} action_t;
223
Antonio Nino Diazac998032017-02-27 17:23:54 +0000224#if PLAT_XLAT_TABLES_DYNAMIC
225
226/*
227 * Recursive function that writes to the translation tables and unmaps the
228 * specified region.
229 */
230static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
231 const uintptr_t table_base_va,
232 uint64_t *const table_base,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100233 const unsigned int table_entries,
Varun Wadekar66231d12017-06-07 09:57:42 -0700234 const unsigned int level)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000235{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100236 assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
Antonio Nino Diazac998032017-02-27 17:23:54 +0000237
238 uint64_t *subtable;
239 uint64_t desc;
240
241 uintptr_t table_idx_va;
242 uintptr_t table_idx_end_va; /* End VA of this entry */
243
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100244 uintptr_t region_end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000245
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100246 unsigned int table_idx;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000247
248 if (mm->base_va > table_base_va) {
249 /* Find the first index of the table affected by the region. */
250 table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
251
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100252 table_idx = (unsigned int)((table_idx_va - table_base_va) >>
253 XLAT_ADDR_SHIFT(level));
Antonio Nino Diazac998032017-02-27 17:23:54 +0000254
255 assert(table_idx < table_entries);
256 } else {
257 /* Start from the beginning of the table. */
258 table_idx_va = table_base_va;
259 table_idx = 0;
260 }
261
262 while (table_idx < table_entries) {
263
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100264 table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1U;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000265
266 desc = table_base[table_idx];
267 uint64_t desc_type = desc & DESC_MASK;
268
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100269 action_t action;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000270
271 if ((mm->base_va <= table_idx_va) &&
272 (region_end_va >= table_idx_end_va)) {
Antonio Nino Diazac998032017-02-27 17:23:54 +0000273 /* Region covers all block */
274
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100275 if (level == 3U) {
Antonio Nino Diazac998032017-02-27 17:23:54 +0000276 /*
277 * Last level, only page descriptors allowed,
278 * erase it.
279 */
280 assert(desc_type == PAGE_DESC);
281
282 action = ACTION_WRITE_BLOCK_ENTRY;
283 } else {
284 /*
285 * Other levels can have table descriptors. If
286 * so, recurse into it and erase descriptors
287 * inside it as needed. If there is a block
288 * descriptor, just erase it. If an invalid
289 * descriptor is found, this table isn't
290 * actually mapped, which shouldn't happen.
291 */
292 if (desc_type == TABLE_DESC) {
293 action = ACTION_RECURSE_INTO_TABLE;
294 } else {
295 assert(desc_type == BLOCK_DESC);
296 action = ACTION_WRITE_BLOCK_ENTRY;
297 }
298 }
299
300 } else if ((mm->base_va <= table_idx_end_va) ||
301 (region_end_va >= table_idx_va)) {
Antonio Nino Diazac998032017-02-27 17:23:54 +0000302 /*
303 * Region partially covers block.
304 *
305 * It can't happen in level 3.
306 *
307 * There must be a table descriptor here, if not there
308 * was a problem when mapping the region.
309 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100310 assert(level < 3U);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000311 assert(desc_type == TABLE_DESC);
312
313 action = ACTION_RECURSE_INTO_TABLE;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100314 } else {
315 /* The region doesn't cover the block at all */
316 action = ACTION_NONE;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000317 }
318
319 if (action == ACTION_WRITE_BLOCK_ENTRY) {
320
321 table_base[table_idx] = INVALID_DESC;
Antonio Nino Diazad5dc7f2018-07-11 09:46:45 +0100322 xlat_arch_tlbi_va(table_idx_va, ctx->xlat_regime);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000323
324 } else if (action == ACTION_RECURSE_INTO_TABLE) {
325
326 subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
327
328 /* Recurse to write into subtable */
329 xlat_tables_unmap_region(ctx, mm, table_idx_va,
330 subtable, XLAT_TABLE_ENTRIES,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100331 level + 1U);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000332
333 /*
334 * If the subtable is now empty, remove its reference.
335 */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100336 if (xlat_table_is_empty(ctx, subtable)) {
Antonio Nino Diazac998032017-02-27 17:23:54 +0000337 table_base[table_idx] = INVALID_DESC;
Antonio Nino Diazad5dc7f2018-07-11 09:46:45 +0100338 xlat_arch_tlbi_va(table_idx_va,
339 ctx->xlat_regime);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000340 }
341
342 } else {
343 assert(action == ACTION_NONE);
344 }
345
346 table_idx++;
347 table_idx_va += XLAT_BLOCK_SIZE(level);
348
349 /* If reached the end of the region, exit */
350 if (region_end_va <= table_idx_va)
351 break;
352 }
353
354 if (level > ctx->base_level)
355 xlat_table_dec_regions_count(ctx, table_base);
356}
357
358#endif /* PLAT_XLAT_TABLES_DYNAMIC */
359
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000360/*
361 * From the given arguments, it decides which action to take when mapping the
362 * specified region.
363 */
364static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100365 unsigned int desc_type, unsigned long long dest_pa,
366 uintptr_t table_entry_base_va, unsigned int level)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000367{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100368 uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000369 uintptr_t table_entry_end_va =
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100370 table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000371
372 /*
373 * The descriptor types allowed depend on the current table level.
374 */
375
376 if ((mm->base_va <= table_entry_base_va) &&
377 (mm_end_va >= table_entry_end_va)) {
378
379 /*
380 * Table entry is covered by region
381 * --------------------------------
382 *
383 * This means that this table entry can describe the whole
384 * translation with this granularity in principle.
385 */
386
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100387 if (level == 3U) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000388 /*
389 * Last level, only page descriptors are allowed.
390 */
391 if (desc_type == PAGE_DESC) {
392 /*
393 * There's another region mapped here, don't
394 * overwrite.
395 */
396 return ACTION_NONE;
397 } else {
398 assert(desc_type == INVALID_DESC);
399 return ACTION_WRITE_BLOCK_ENTRY;
400 }
401
402 } else {
403
404 /*
405 * Other levels. Table descriptors are allowed. Block
406 * descriptors too, but they have some limitations.
407 */
408
409 if (desc_type == TABLE_DESC) {
410 /* There's already a table, recurse into it. */
411 return ACTION_RECURSE_INTO_TABLE;
412
413 } else if (desc_type == INVALID_DESC) {
414 /*
415 * There's nothing mapped here, create a new
416 * entry.
417 *
418 * Check if the destination granularity allows
419 * us to use a block descriptor or we need a
420 * finer table for it.
421 *
422 * Also, check if the current level allows block
423 * descriptors. If not, create a table instead.
424 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100425 if (((dest_pa & XLAT_BLOCK_MASK(level)) != 0U)
426 || (level < MIN_LVL_BLOCK_DESC) ||
Sandrine Bailleux8f23fa82017-09-28 21:58:12 +0100427 (mm->granularity < XLAT_BLOCK_SIZE(level)))
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000428 return ACTION_CREATE_NEW_TABLE;
429 else
430 return ACTION_WRITE_BLOCK_ENTRY;
431
432 } else {
433 /*
434 * There's another region mapped here, don't
435 * overwrite.
436 */
437 assert(desc_type == BLOCK_DESC);
438
439 return ACTION_NONE;
440 }
441 }
442
443 } else if ((mm->base_va <= table_entry_end_va) ||
444 (mm_end_va >= table_entry_base_va)) {
445
446 /*
447 * Region partially covers table entry
448 * -----------------------------------
449 *
450 * This means that this table entry can't describe the whole
451 * translation, a finer table is needed.
452
453 * There cannot be partial block overlaps in level 3. If that
454 * happens, some of the preliminary checks when adding the
455 * mmap region failed to detect that PA and VA must at least be
456 * aligned to PAGE_SIZE.
457 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100458 assert(level < 3U);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000459
460 if (desc_type == INVALID_DESC) {
461 /*
462 * The block is not fully covered by the region. Create
463 * a new table, recurse into it and try to map the
464 * region with finer granularity.
465 */
466 return ACTION_CREATE_NEW_TABLE;
467
468 } else {
469 assert(desc_type == TABLE_DESC);
470 /*
471 * The block is not fully covered by the region, but
472 * there is already a table here. Recurse into it and
473 * try to map with finer granularity.
474 *
475 * PAGE_DESC for level 3 has the same value as
476 * TABLE_DESC, but this code can't run on a level 3
477 * table because there can't be overlaps in level 3.
478 */
479 return ACTION_RECURSE_INTO_TABLE;
480 }
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100481 } else {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000482
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100483 /*
484 * This table entry is outside of the region specified in the
485 * arguments, don't write anything to it.
486 */
487 return ACTION_NONE;
488 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000489}
490
491/*
492 * Recursive function that writes to the translation tables and maps the
Antonio Nino Diazac998032017-02-27 17:23:54 +0000493 * specified region. On success, it returns the VA of the last byte that was
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100494 * successfully mapped. On error, it returns the VA of the next entry that
Antonio Nino Diazac998032017-02-27 17:23:54 +0000495 * should have been mapped.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000496 */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000497static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100498 uintptr_t table_base_va,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000499 uint64_t *const table_base,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100500 unsigned int table_entries,
501 unsigned int level)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000502{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100503 assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000504
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100505 uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000506
507 uintptr_t table_idx_va;
508 unsigned long long table_idx_pa;
509
510 uint64_t *subtable;
511 uint64_t desc;
512
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100513 unsigned int table_idx;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000514
515 if (mm->base_va > table_base_va) {
516 /* Find the first index of the table affected by the region. */
517 table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
518
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100519 table_idx = (unsigned int)((table_idx_va - table_base_va) >>
520 XLAT_ADDR_SHIFT(level));
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000521
522 assert(table_idx < table_entries);
523 } else {
524 /* Start from the beginning of the table. */
525 table_idx_va = table_base_va;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100526 table_idx = 0U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000527 }
528
Antonio Nino Diazac998032017-02-27 17:23:54 +0000529#if PLAT_XLAT_TABLES_DYNAMIC
530 if (level > ctx->base_level)
531 xlat_table_inc_regions_count(ctx, table_base);
532#endif
533
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000534 while (table_idx < table_entries) {
535
536 desc = table_base[table_idx];
537
538 table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
539
540 action_t action = xlat_tables_map_region_action(mm,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100541 (uint32_t)(desc & DESC_MASK), table_idx_pa,
542 table_idx_va, level);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000543
544 if (action == ACTION_WRITE_BLOCK_ENTRY) {
545
546 table_base[table_idx] =
Antonio Nino Diaze8811472018-04-17 15:10:18 +0100547 xlat_desc(ctx, (uint32_t)mm->attr, table_idx_pa,
548 level);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000549
550 } else if (action == ACTION_CREATE_NEW_TABLE) {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100551 uintptr_t end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000552
553 subtable = xlat_table_get_empty(ctx);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000554 if (subtable == NULL) {
555 /* Not enough free tables to map this region */
556 return table_idx_va;
557 }
558
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000559 /* Point to new subtable from this one. */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000560 table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
561
562 /* Recurse to write into subtable */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100563 end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
Antonio Nino Diazac998032017-02-27 17:23:54 +0000564 subtable, XLAT_TABLE_ENTRIES,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100565 level + 1U);
566 if (end_va !=
567 (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
Antonio Nino Diazac998032017-02-27 17:23:54 +0000568 return end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000569
570 } else if (action == ACTION_RECURSE_INTO_TABLE) {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100571 uintptr_t end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000572
573 subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
574 /* Recurse to write into subtable */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100575 end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
Antonio Nino Diazac998032017-02-27 17:23:54 +0000576 subtable, XLAT_TABLE_ENTRIES,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100577 level + 1U);
578 if (end_va !=
579 (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
Antonio Nino Diazac998032017-02-27 17:23:54 +0000580 return end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000581
582 } else {
583
584 assert(action == ACTION_NONE);
585
586 }
587
588 table_idx++;
589 table_idx_va += XLAT_BLOCK_SIZE(level);
590
591 /* If reached the end of the region, exit */
592 if (mm_end_va <= table_idx_va)
593 break;
594 }
Antonio Nino Diazac998032017-02-27 17:23:54 +0000595
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100596 return table_idx_va - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000597}
598
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000599/*
600 * Function that verifies that a region can be mapped.
601 * Returns:
602 * 0: Success, the mapping is allowed.
603 * EINVAL: Invalid values were used as arguments.
604 * ERANGE: The memory limits were surpassed.
605 * ENOMEM: There is not enough memory in the mmap array.
606 * EPERM: Region overlaps another one in an invalid way.
607 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100608static int mmap_add_region_check(const xlat_ctx_t *ctx, const mmap_region_t *mm)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000609{
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100610 unsigned long long base_pa = mm->base_pa;
611 uintptr_t base_va = mm->base_va;
612 size_t size = mm->size;
Sandrine Bailleux8f23fa82017-09-28 21:58:12 +0100613 size_t granularity = mm->granularity;
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100614
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100615 unsigned long long end_pa = base_pa + size - 1U;
616 uintptr_t end_va = base_va + size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000617
618 if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
619 !IS_PAGE_ALIGNED(size))
620 return -EINVAL;
621
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100622 if ((granularity != XLAT_BLOCK_SIZE(1U)) &&
623 (granularity != XLAT_BLOCK_SIZE(2U)) &&
624 (granularity != XLAT_BLOCK_SIZE(3U))) {
Sandrine Bailleux8f23fa82017-09-28 21:58:12 +0100625 return -EINVAL;
626 }
627
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000628 /* Check for overflows */
629 if ((base_pa > end_pa) || (base_va > end_va))
630 return -ERANGE;
631
632 if ((base_va + (uintptr_t)size - (uintptr_t)1) > ctx->va_max_address)
633 return -ERANGE;
634
635 if ((base_pa + (unsigned long long)size - 1ULL) > ctx->pa_max_address)
636 return -ERANGE;
637
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100638 /* Check that there is space in the ctx->mmap array */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100639 if (ctx->mmap[ctx->mmap_num - 1].size != 0U)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000640 return -ENOMEM;
641
642 /* Check for PAs and VAs overlaps with all other regions */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100643 for (const mmap_region_t *mm_cursor = ctx->mmap;
644 mm_cursor->size != 0U; ++mm_cursor) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000645
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100646 uintptr_t mm_cursor_end_va = mm_cursor->base_va
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100647 + mm_cursor->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000648
649 /*
650 * Check if one of the regions is completely inside the other
651 * one.
652 */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100653 bool fully_overlapped_va =
654 ((base_va >= mm_cursor->base_va) &&
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100655 (end_va <= mm_cursor_end_va)) ||
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100656 ((mm_cursor->base_va >= base_va) &&
657 (mm_cursor_end_va <= end_va));
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000658
659 /*
660 * Full VA overlaps are only allowed if both regions are
661 * identity mapped (zero offset) or have the same VA to PA
662 * offset. Also, make sure that it's not the exact same area.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000663 * This can only be done with static regions.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000664 */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100665 if (fully_overlapped_va) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000666
Antonio Nino Diazac998032017-02-27 17:23:54 +0000667#if PLAT_XLAT_TABLES_DYNAMIC
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100668 if (((mm->attr & MT_DYNAMIC) != 0U) ||
669 ((mm_cursor->attr & MT_DYNAMIC) != 0U))
Antonio Nino Diazac998032017-02-27 17:23:54 +0000670 return -EPERM;
671#endif /* PLAT_XLAT_TABLES_DYNAMIC */
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100672 if ((mm_cursor->base_va - mm_cursor->base_pa) !=
673 (base_va - base_pa))
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000674 return -EPERM;
675
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100676 if ((base_va == mm_cursor->base_va) &&
677 (size == mm_cursor->size))
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000678 return -EPERM;
679
680 } else {
681 /*
682 * If the regions do not have fully overlapping VAs,
683 * then they must have fully separated VAs and PAs.
684 * Partial overlaps are not allowed
685 */
686
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100687 unsigned long long mm_cursor_end_pa =
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100688 mm_cursor->base_pa + mm_cursor->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000689
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100690 bool separated_pa = (end_pa < mm_cursor->base_pa) ||
691 (base_pa > mm_cursor_end_pa);
692 bool separated_va = (end_va < mm_cursor->base_va) ||
693 (base_va > mm_cursor_end_va);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000694
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100695 if (!separated_va || !separated_pa)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000696 return -EPERM;
697 }
698 }
699
700 return 0;
701}
702
Sandrine Bailleux66342932017-07-18 13:26:36 +0100703void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000704{
John Tsichritzisfdd92482018-05-25 09:12:48 +0100705 mmap_region_t *mm_cursor = ctx->mmap, *mm_destination;
Varun Wadekarccbd2e32018-04-03 10:44:41 -0700706 const mmap_region_t *mm_end = ctx->mmap + ctx->mmap_num;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100707 const mmap_region_t *mm_last;
708 unsigned long long end_pa = mm->base_pa + mm->size - 1U;
709 uintptr_t end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000710 int ret;
711
712 /* Ignore empty regions */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100713 if (mm->size == 0U)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000714 return;
715
Antonio Nino Diazac998032017-02-27 17:23:54 +0000716 /* Static regions must be added before initializing the xlat tables. */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100717 assert(!ctx->initialized);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000718
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100719 ret = mmap_add_region_check(ctx, mm);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000720 if (ret != 0) {
721 ERROR("mmap_add_region_check() failed. error %d\n", ret);
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100722 assert(false);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000723 return;
724 }
725
726 /*
727 * Find correct place in mmap to insert new region.
728 *
729 * 1 - Lower region VA end first.
730 * 2 - Smaller region size first.
731 *
732 * VA 0 0xFF
733 *
734 * 1st |------|
735 * 2nd |------------|
736 * 3rd |------|
737 * 4th |---|
738 * 5th |---|
739 * 6th |----------|
740 * 7th |-------------------------------------|
741 *
742 * This is required for overlapping regions only. It simplifies adding
743 * regions with the loop in xlat_tables_init_internal because the outer
744 * ones won't overwrite block or page descriptors of regions added
745 * previously.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000746 *
747 * Overlapping is only allowed for static regions.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000748 */
749
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100750 while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
751 && (mm_cursor->size != 0U)) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000752 ++mm_cursor;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100753 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000754
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100755 while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
756 (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000757 ++mm_cursor;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100758 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000759
Varun Wadekarccbd2e32018-04-03 10:44:41 -0700760 /*
761 * Find the last entry marker in the mmap
762 */
763 mm_last = ctx->mmap;
764 while ((mm_last->size != 0U) && (mm_last < mm_end)) {
765 ++mm_last;
766 }
767
768 /*
769 * Check if we have enough space in the memory mapping table.
770 * This shouldn't happen as we have checked in mmap_add_region_check
771 * that there is free space.
772 */
773 assert(mm_last->size == 0U);
Jeenu Viswambharan58e81482018-04-27 15:06:57 +0100774
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000775 /* Make room for new region by moving other regions up by one place */
John Tsichritzisfdd92482018-05-25 09:12:48 +0100776 mm_destination = mm_cursor + 1;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100777 (void)memmove(mm_destination, mm_cursor,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000778 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
779
780 /*
781 * Check we haven't lost the empty sentinel from the end of the array.
782 * This shouldn't happen as we have checked in mmap_add_region_check
783 * that there is free space.
784 */
Varun Wadekarccbd2e32018-04-03 10:44:41 -0700785 assert(mm_end->size == 0U);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000786
Douglas Raillardf68d2ed2017-09-12 10:31:49 +0100787 *mm_cursor = *mm;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000788
789 if (end_pa > ctx->max_pa)
790 ctx->max_pa = end_pa;
791 if (end_va > ctx->max_va)
792 ctx->max_va = end_va;
793}
794
Sandrine Bailleux66342932017-07-18 13:26:36 +0100795void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
796{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100797 const mmap_region_t *mm_cursor = mm;
798
799 while (mm_cursor->size != 0U) {
800 mmap_add_region_ctx(ctx, mm_cursor);
801 mm_cursor++;
Sandrine Bailleux66342932017-07-18 13:26:36 +0100802 }
803}
804
Antonio Nino Diazac998032017-02-27 17:23:54 +0000805#if PLAT_XLAT_TABLES_DYNAMIC
806
807int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
808{
809 mmap_region_t *mm_cursor = ctx->mmap;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100810 const mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
811 unsigned long long end_pa = mm->base_pa + mm->size - 1U;
812 uintptr_t end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000813 int ret;
814
815 /* Nothing to do */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100816 if (mm->size == 0U)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000817 return 0;
818
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100819 /* Now this region is a dynamic one */
820 mm->attr |= MT_DYNAMIC;
821
822 ret = mmap_add_region_check(ctx, mm);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000823 if (ret != 0)
824 return ret;
825
826 /*
827 * Find the adequate entry in the mmap array in the same way done for
828 * static regions in mmap_add_region_ctx().
829 */
830
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100831 while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
832 && (mm_cursor->size != 0U)) {
Antonio Nino Diazac998032017-02-27 17:23:54 +0000833 ++mm_cursor;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100834 }
Antonio Nino Diazac998032017-02-27 17:23:54 +0000835
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100836 while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
837 (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
Antonio Nino Diazac998032017-02-27 17:23:54 +0000838 ++mm_cursor;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100839 }
Antonio Nino Diazac998032017-02-27 17:23:54 +0000840
841 /* Make room for new region by moving other regions up by one place */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100842 (void)memmove(mm_cursor + 1U, mm_cursor,
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100843 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000844
845 /*
846 * Check we haven't lost the empty sentinal from the end of the array.
847 * This shouldn't happen as we have checked in mmap_add_region_check
848 * that there is free space.
849 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100850 assert(mm_last->size == 0U);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000851
Douglas Raillardf68d2ed2017-09-12 10:31:49 +0100852 *mm_cursor = *mm;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000853
854 /*
855 * Update the translation tables if the xlat tables are initialized. If
856 * not, this region will be mapped when they are initialized.
857 */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100858 if (ctx->initialized) {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100859 end_va = xlat_tables_map_region(ctx, mm_cursor,
860 0U, ctx->base_table, ctx->base_table_entries,
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100861 ctx->base_level);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000862
863 /* Failed to map, remove mmap entry, unmap and return error. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100864 if (end_va != (mm_cursor->base_va + mm_cursor->size - 1U)) {
865 (void)memmove(mm_cursor, mm_cursor + 1U,
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100866 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000867
868 /*
869 * Check if the mapping function actually managed to map
870 * anything. If not, just return now.
871 */
Antonio Nino Diaz3f518922018-01-05 11:30:36 +0000872 if (mm->base_va >= end_va)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000873 return -ENOMEM;
874
875 /*
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100876 * Something went wrong after mapping some table
877 * entries, undo every change done up to this point.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000878 */
879 mmap_region_t unmap_mm = {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100880 .base_pa = 0U,
Antonio Nino Diazac998032017-02-27 17:23:54 +0000881 .base_va = mm->base_va,
882 .size = end_va - mm->base_va,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100883 .attr = 0U
Antonio Nino Diazac998032017-02-27 17:23:54 +0000884 };
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100885 xlat_tables_unmap_region(ctx, &unmap_mm, 0U,
886 ctx->base_table, ctx->base_table_entries,
887 ctx->base_level);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000888
889 return -ENOMEM;
890 }
891
892 /*
893 * Make sure that all entries are written to the memory. There
894 * is no need to invalidate entries when mapping dynamic regions
895 * because new table/block/page descriptors only replace old
896 * invalid descriptors, that aren't TLB cached.
897 */
898 dsbishst();
899 }
900
901 if (end_pa > ctx->max_pa)
902 ctx->max_pa = end_pa;
903 if (end_va > ctx->max_va)
904 ctx->max_va = end_va;
905
906 return 0;
907}
908
909/*
910 * Removes the region with given base Virtual Address and size from the given
911 * context.
912 *
913 * Returns:
914 * 0: Success.
915 * EINVAL: Invalid values were used as arguments (region not found).
916 * EPERM: Tried to remove a static region.
917 */
918int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
919 size_t size)
920{
921 mmap_region_t *mm = ctx->mmap;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100922 const mmap_region_t *mm_last = mm + ctx->mmap_num;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000923 int update_max_va_needed = 0;
924 int update_max_pa_needed = 0;
925
926 /* Check sanity of mmap array. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100927 assert(mm[ctx->mmap_num].size == 0U);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000928
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100929 while (mm->size != 0U) {
Antonio Nino Diazac998032017-02-27 17:23:54 +0000930 if ((mm->base_va == base_va) && (mm->size == size))
931 break;
932 ++mm;
933 }
934
935 /* Check that the region was found */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100936 if (mm->size == 0U)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000937 return -EINVAL;
938
939 /* If the region is static it can't be removed */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100940 if ((mm->attr & MT_DYNAMIC) == 0U)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000941 return -EPERM;
942
943 /* Check if this region is using the top VAs or PAs. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100944 if ((mm->base_va + mm->size - 1U) == ctx->max_va)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000945 update_max_va_needed = 1;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100946 if ((mm->base_pa + mm->size - 1U) == ctx->max_pa)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000947 update_max_pa_needed = 1;
948
949 /* Update the translation tables if needed */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100950 if (ctx->initialized) {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100951 xlat_tables_unmap_region(ctx, mm, 0U, ctx->base_table,
Antonio Nino Diazac998032017-02-27 17:23:54 +0000952 ctx->base_table_entries,
953 ctx->base_level);
954 xlat_arch_tlbi_va_sync();
955 }
956
957 /* Remove this region by moving the rest down by one place. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100958 (void)memmove(mm, mm + 1U, (uintptr_t)mm_last - (uintptr_t)mm);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000959
960 /* Check if we need to update the max VAs and PAs */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100961 if (update_max_va_needed == 1) {
962 ctx->max_va = 0U;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000963 mm = ctx->mmap;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100964 while (mm->size != 0U) {
965 if ((mm->base_va + mm->size - 1U) > ctx->max_va)
966 ctx->max_va = mm->base_va + mm->size - 1U;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000967 ++mm;
968 }
969 }
970
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100971 if (update_max_pa_needed == 1) {
972 ctx->max_pa = 0U;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000973 mm = ctx->mmap;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100974 while (mm->size != 0U) {
975 if ((mm->base_pa + mm->size - 1U) > ctx->max_pa)
976 ctx->max_pa = mm->base_pa + mm->size - 1U;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000977 ++mm;
978 }
979 }
980
981 return 0;
982}
983
984#endif /* PLAT_XLAT_TABLES_DYNAMIC */
985
Sandrine Bailleux66342932017-07-18 13:26:36 +0100986void init_xlat_tables_ctx(xlat_ctx_t *ctx)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000987{
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100988 assert(ctx != NULL);
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100989 assert(!ctx->initialized);
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100990 assert((ctx->xlat_regime == EL3_REGIME) ||
991 (ctx->xlat_regime == EL1_EL0_REGIME));
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100992 assert(!is_mmu_enabled_ctx(ctx));
Sandrine Bailleux66342932017-07-18 13:26:36 +0100993
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100994 mmap_region_t *mm = ctx->mmap;
Sandrine Bailleux66342932017-07-18 13:26:36 +0100995
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100996 xlat_mmap_print(mm);
Sandrine Bailleux66342932017-07-18 13:26:36 +0100997
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000998 /* All tables must be zeroed before mapping any region. */
999
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001000 for (unsigned int i = 0U; i < ctx->base_table_entries; i++)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001001 ctx->base_table[i] = INVALID_DESC;
1002
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001003 for (int j = 0; j < ctx->tables_num; j++) {
Antonio Nino Diazac998032017-02-27 17:23:54 +00001004#if PLAT_XLAT_TABLES_DYNAMIC
1005 ctx->tables_mapped_regions[j] = 0;
1006#endif
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001007 for (unsigned int i = 0U; i < XLAT_TABLE_ENTRIES; i++)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001008 ctx->tables[j][i] = INVALID_DESC;
1009 }
1010
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001011 while (mm->size != 0U) {
1012 uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0U,
1013 ctx->base_table, ctx->base_table_entries,
1014 ctx->base_level);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001015
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001016 if (end_va != (mm->base_va + mm->size - 1U)) {
Antonio Nino Diazac998032017-02-27 17:23:54 +00001017 ERROR("Not enough memory to map region:\n"
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001018 " VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x\n",
1019 mm->base_va, mm->base_pa, mm->size, mm->attr);
Antonio Nino Diazac998032017-02-27 17:23:54 +00001020 panic();
1021 }
1022
1023 mm++;
1024 }
1025
Sandrine Bailleux46c53a22017-07-11 15:11:10 +01001026 assert(ctx->pa_max_address <= xlat_arch_get_max_supported_pa());
Sandrine Bailleux66342932017-07-18 13:26:36 +01001027 assert(ctx->max_va <= ctx->va_max_address);
1028 assert(ctx->max_pa <= ctx->pa_max_address);
1029
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +01001030 ctx->initialized = true;
Sandrine Bailleuxc5b63772017-05-31 13:31:48 +01001031
1032 xlat_tables_print(ctx);
Sandrine Bailleux66342932017-07-18 13:26:36 +01001033}