blob: feca964b182286e565203b14fb890beb26909c1d [file] [log] [blame]
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00005 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000010#include <common_def.h>
11#include <debug.h>
12#include <errno.h>
13#include <platform_def.h>
14#include <string.h>
15#include <types.h>
16#include <utils.h>
Sandrine Bailleux090c8492017-05-19 09:59:37 +010017#include <xlat_tables_arch.h>
18#include <xlat_tables_defs.h>
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000019#include <xlat_tables_v2.h>
Sandrine Bailleux090c8492017-05-19 09:59:37 +010020
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000021#include "xlat_tables_private.h"
22
Sandrine Bailleux66342932017-07-18 13:26:36 +010023/*
24 * Each platform can define the size of its physical and virtual address spaces.
25 * If the platform hasn't defined one or both of them, default to
26 * ADDR_SPACE_SIZE. The latter is deprecated, though.
27 */
28#if ERROR_DEPRECATED
29# ifdef ADDR_SPACE_SIZE
30# error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
31# endif
32#elif defined(ADDR_SPACE_SIZE)
33# ifndef PLAT_PHY_ADDR_SPACE_SIZE
34# define PLAT_PHY_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
35# endif
36# ifndef PLAT_VIRT_ADDR_SPACE_SIZE
37# define PLAT_VIRT_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
38# endif
39#endif
40
41/*
42 * Allocate and initialise the default translation context for the BL image
43 * currently executing.
44 */
45REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
46 PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
47
Antonio Nino Diazac998032017-02-27 17:23:54 +000048#if PLAT_XLAT_TABLES_DYNAMIC
49
50/*
51 * The following functions assume that they will be called using subtables only.
52 * The base table can't be unmapped, so it is not needed to do any special
53 * handling for it.
54 */
55
56/*
57 * Returns the index of the array corresponding to the specified translation
58 * table.
59 */
60static int xlat_table_get_index(xlat_ctx_t *ctx, const uint64_t *table)
61{
Varun Wadekar66231d12017-06-07 09:57:42 -070062 for (unsigned int i = 0; i < ctx->tables_num; i++)
Antonio Nino Diazac998032017-02-27 17:23:54 +000063 if (ctx->tables[i] == table)
64 return i;
65
66 /*
67 * Maybe we were asked to get the index of the base level table, which
68 * should never happen.
69 */
70 assert(0);
71
72 return -1;
73}
74
75/* Returns a pointer to an empty translation table. */
76static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
77{
Varun Wadekar66231d12017-06-07 09:57:42 -070078 for (unsigned int i = 0; i < ctx->tables_num; i++)
Antonio Nino Diazac998032017-02-27 17:23:54 +000079 if (ctx->tables_mapped_regions[i] == 0)
80 return ctx->tables[i];
81
82 return NULL;
83}
84
85/* Increments region count for a given table. */
86static void xlat_table_inc_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
87{
88 ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]++;
89}
90
91/* Decrements region count for a given table. */
92static void xlat_table_dec_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
93{
94 ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]--;
95}
96
97/* Returns 0 if the speficied table isn't empty, otherwise 1. */
98static int xlat_table_is_empty(xlat_ctx_t *ctx, const uint64_t *table)
99{
100 return !ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)];
101}
102
103#else /* PLAT_XLAT_TABLES_DYNAMIC */
104
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000105/* Returns a pointer to the first empty translation table. */
106static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
107{
108 assert(ctx->next_table < ctx->tables_num);
109
110 return ctx->tables[ctx->next_table++];
111}
112
Antonio Nino Diazac998032017-02-27 17:23:54 +0000113#endif /* PLAT_XLAT_TABLES_DYNAMIC */
114
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000115/* Returns a block/page table descriptor for the given level and attributes. */
Sandrine Bailleux04980a32017-04-19 14:02:23 +0100116static uint64_t xlat_desc(mmap_attr_t attr, unsigned long long addr_pa,
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100117 int level, uint64_t execute_never_mask)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000118{
119 uint64_t desc;
120 int mem_type;
121
122 /* Make sure that the granularity is fine enough to map this address. */
123 assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0);
124
125 desc = addr_pa;
126 /*
127 * There are different translation table descriptors for level 3 and the
128 * rest.
129 */
130 desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
131 /*
132 * Always set the access flag, as TF doesn't manage access flag faults.
133 * Deduce other fields of the descriptor based on the MT_NS and MT_RW
134 * memory region attributes.
135 */
136 desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
137 desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
138 desc |= LOWER_ATTRS(ACCESS_FLAG);
139
140 /*
141 * Deduce shareability domain and executability of the memory region
142 * from the memory type of the attributes (MT_TYPE).
143 *
144 * Data accesses to device memory and non-cacheable normal memory are
145 * coherent for all observers in the system, and correspondingly are
146 * always treated as being Outer Shareable. Therefore, for these 2 types
147 * of memory, it is not strictly needed to set the shareability field
148 * in the translation tables.
149 */
150 mem_type = MT_TYPE(attr);
151 if (mem_type == MT_DEVICE) {
152 desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
153 /*
154 * Always map device memory as execute-never.
155 * This is to avoid the possibility of a speculative instruction
156 * fetch, which could be an issue if this memory region
157 * corresponds to a read-sensitive peripheral.
158 */
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100159 desc |= execute_never_mask;
160
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000161 } else { /* Normal memory */
162 /*
163 * Always map read-write normal memory as execute-never.
164 * (Trusted Firmware doesn't self-modify its code, therefore
165 * R/W memory is reserved for data storage, which must not be
166 * executable.)
167 * Note that setting the XN bit here is for consistency only.
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100168 * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000169 * which makes any writable memory region to be treated as
170 * execute-never, regardless of the value of the XN bit in the
171 * translation table.
172 *
173 * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
174 * attribute to figure out the value of the XN bit.
175 */
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100176 if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) {
177 desc |= execute_never_mask;
178 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000179
180 if (mem_type == MT_MEMORY) {
181 desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
182 } else {
183 assert(mem_type == MT_NON_CACHEABLE);
184 desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
185 }
186 }
187
188 return desc;
189}
190
191/*
192 * Enumeration of actions that can be made when mapping table entries depending
193 * on the previous value in that entry and information about the region being
194 * mapped.
195 */
196typedef enum {
197
198 /* Do nothing */
199 ACTION_NONE,
200
201 /* Write a block (or page, if in level 3) entry. */
202 ACTION_WRITE_BLOCK_ENTRY,
203
204 /*
205 * Create a new table and write a table entry pointing to it. Recurse
206 * into it for further processing.
207 */
208 ACTION_CREATE_NEW_TABLE,
209
210 /*
211 * There is a table descriptor in this entry, read it and recurse into
212 * that table for further processing.
213 */
214 ACTION_RECURSE_INTO_TABLE,
215
216} action_t;
217
Antonio Nino Diazac998032017-02-27 17:23:54 +0000218#if PLAT_XLAT_TABLES_DYNAMIC
219
220/*
221 * Recursive function that writes to the translation tables and unmaps the
222 * specified region.
223 */
224static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
225 const uintptr_t table_base_va,
226 uint64_t *const table_base,
227 const int table_entries,
Varun Wadekar66231d12017-06-07 09:57:42 -0700228 const unsigned int level)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000229{
230 assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
231
232 uint64_t *subtable;
233 uint64_t desc;
234
235 uintptr_t table_idx_va;
236 uintptr_t table_idx_end_va; /* End VA of this entry */
237
238 uintptr_t region_end_va = mm->base_va + mm->size - 1;
239
240 int table_idx;
241
242 if (mm->base_va > table_base_va) {
243 /* Find the first index of the table affected by the region. */
244 table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
245
246 table_idx = (table_idx_va - table_base_va) >>
247 XLAT_ADDR_SHIFT(level);
248
249 assert(table_idx < table_entries);
250 } else {
251 /* Start from the beginning of the table. */
252 table_idx_va = table_base_va;
253 table_idx = 0;
254 }
255
256 while (table_idx < table_entries) {
257
258 table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1;
259
260 desc = table_base[table_idx];
261 uint64_t desc_type = desc & DESC_MASK;
262
263 action_t action = ACTION_NONE;
264
265 if ((mm->base_va <= table_idx_va) &&
266 (region_end_va >= table_idx_end_va)) {
267
268 /* Region covers all block */
269
270 if (level == 3) {
271 /*
272 * Last level, only page descriptors allowed,
273 * erase it.
274 */
275 assert(desc_type == PAGE_DESC);
276
277 action = ACTION_WRITE_BLOCK_ENTRY;
278 } else {
279 /*
280 * Other levels can have table descriptors. If
281 * so, recurse into it and erase descriptors
282 * inside it as needed. If there is a block
283 * descriptor, just erase it. If an invalid
284 * descriptor is found, this table isn't
285 * actually mapped, which shouldn't happen.
286 */
287 if (desc_type == TABLE_DESC) {
288 action = ACTION_RECURSE_INTO_TABLE;
289 } else {
290 assert(desc_type == BLOCK_DESC);
291 action = ACTION_WRITE_BLOCK_ENTRY;
292 }
293 }
294
295 } else if ((mm->base_va <= table_idx_end_va) ||
296 (region_end_va >= table_idx_va)) {
297
298 /*
299 * Region partially covers block.
300 *
301 * It can't happen in level 3.
302 *
303 * There must be a table descriptor here, if not there
304 * was a problem when mapping the region.
305 */
306
307 assert(level < 3);
308
309 assert(desc_type == TABLE_DESC);
310
311 action = ACTION_RECURSE_INTO_TABLE;
312 }
313
314 if (action == ACTION_WRITE_BLOCK_ENTRY) {
315
316 table_base[table_idx] = INVALID_DESC;
317 xlat_arch_tlbi_va(table_idx_va);
318
319 } else if (action == ACTION_RECURSE_INTO_TABLE) {
320
321 subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
322
323 /* Recurse to write into subtable */
324 xlat_tables_unmap_region(ctx, mm, table_idx_va,
325 subtable, XLAT_TABLE_ENTRIES,
326 level + 1);
327
328 /*
329 * If the subtable is now empty, remove its reference.
330 */
331 if (xlat_table_is_empty(ctx, subtable)) {
332 table_base[table_idx] = INVALID_DESC;
333 xlat_arch_tlbi_va(table_idx_va);
334 }
335
336 } else {
337 assert(action == ACTION_NONE);
338 }
339
340 table_idx++;
341 table_idx_va += XLAT_BLOCK_SIZE(level);
342
343 /* If reached the end of the region, exit */
344 if (region_end_va <= table_idx_va)
345 break;
346 }
347
348 if (level > ctx->base_level)
349 xlat_table_dec_regions_count(ctx, table_base);
350}
351
352#endif /* PLAT_XLAT_TABLES_DYNAMIC */
353
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000354/*
355 * From the given arguments, it decides which action to take when mapping the
356 * specified region.
357 */
358static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
359 const int desc_type, const unsigned long long dest_pa,
Sandrine Bailleux12e86442017-07-19 10:11:13 +0100360 const uintptr_t table_entry_base_va, const unsigned int level)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000361{
362 uintptr_t mm_end_va = mm->base_va + mm->size - 1;
363 uintptr_t table_entry_end_va =
364 table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1;
365
366 /*
367 * The descriptor types allowed depend on the current table level.
368 */
369
370 if ((mm->base_va <= table_entry_base_va) &&
371 (mm_end_va >= table_entry_end_va)) {
372
373 /*
374 * Table entry is covered by region
375 * --------------------------------
376 *
377 * This means that this table entry can describe the whole
378 * translation with this granularity in principle.
379 */
380
381 if (level == 3) {
382 /*
383 * Last level, only page descriptors are allowed.
384 */
385 if (desc_type == PAGE_DESC) {
386 /*
387 * There's another region mapped here, don't
388 * overwrite.
389 */
390 return ACTION_NONE;
391 } else {
392 assert(desc_type == INVALID_DESC);
393 return ACTION_WRITE_BLOCK_ENTRY;
394 }
395
396 } else {
397
398 /*
399 * Other levels. Table descriptors are allowed. Block
400 * descriptors too, but they have some limitations.
401 */
402
403 if (desc_type == TABLE_DESC) {
404 /* There's already a table, recurse into it. */
405 return ACTION_RECURSE_INTO_TABLE;
406
407 } else if (desc_type == INVALID_DESC) {
408 /*
409 * There's nothing mapped here, create a new
410 * entry.
411 *
412 * Check if the destination granularity allows
413 * us to use a block descriptor or we need a
414 * finer table for it.
415 *
416 * Also, check if the current level allows block
417 * descriptors. If not, create a table instead.
418 */
419 if ((dest_pa & XLAT_BLOCK_MASK(level)) ||
Sandrine Bailleux8f23fa82017-09-28 21:58:12 +0100420 (level < MIN_LVL_BLOCK_DESC) ||
421 (mm->granularity < XLAT_BLOCK_SIZE(level)))
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000422 return ACTION_CREATE_NEW_TABLE;
423 else
424 return ACTION_WRITE_BLOCK_ENTRY;
425
426 } else {
427 /*
428 * There's another region mapped here, don't
429 * overwrite.
430 */
431 assert(desc_type == BLOCK_DESC);
432
433 return ACTION_NONE;
434 }
435 }
436
437 } else if ((mm->base_va <= table_entry_end_va) ||
438 (mm_end_va >= table_entry_base_va)) {
439
440 /*
441 * Region partially covers table entry
442 * -----------------------------------
443 *
444 * This means that this table entry can't describe the whole
445 * translation, a finer table is needed.
446
447 * There cannot be partial block overlaps in level 3. If that
448 * happens, some of the preliminary checks when adding the
449 * mmap region failed to detect that PA and VA must at least be
450 * aligned to PAGE_SIZE.
451 */
452 assert(level < 3);
453
454 if (desc_type == INVALID_DESC) {
455 /*
456 * The block is not fully covered by the region. Create
457 * a new table, recurse into it and try to map the
458 * region with finer granularity.
459 */
460 return ACTION_CREATE_NEW_TABLE;
461
462 } else {
463 assert(desc_type == TABLE_DESC);
464 /*
465 * The block is not fully covered by the region, but
466 * there is already a table here. Recurse into it and
467 * try to map with finer granularity.
468 *
469 * PAGE_DESC for level 3 has the same value as
470 * TABLE_DESC, but this code can't run on a level 3
471 * table because there can't be overlaps in level 3.
472 */
473 return ACTION_RECURSE_INTO_TABLE;
474 }
475 }
476
477 /*
478 * This table entry is outside of the region specified in the arguments,
479 * don't write anything to it.
480 */
481 return ACTION_NONE;
482}
483
484/*
485 * Recursive function that writes to the translation tables and maps the
Antonio Nino Diazac998032017-02-27 17:23:54 +0000486 * specified region. On success, it returns the VA of the last byte that was
487 * succesfully mapped. On error, it returns the VA of the next entry that
488 * should have been mapped.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000489 */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000490static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000491 const uintptr_t table_base_va,
492 uint64_t *const table_base,
493 const int table_entries,
Varun Wadekar66231d12017-06-07 09:57:42 -0700494 const unsigned int level)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000495{
496 assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
497
498 uintptr_t mm_end_va = mm->base_va + mm->size - 1;
499
500 uintptr_t table_idx_va;
501 unsigned long long table_idx_pa;
502
503 uint64_t *subtable;
504 uint64_t desc;
505
506 int table_idx;
507
508 if (mm->base_va > table_base_va) {
509 /* Find the first index of the table affected by the region. */
510 table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
511
512 table_idx = (table_idx_va - table_base_va) >>
513 XLAT_ADDR_SHIFT(level);
514
515 assert(table_idx < table_entries);
516 } else {
517 /* Start from the beginning of the table. */
518 table_idx_va = table_base_va;
519 table_idx = 0;
520 }
521
Antonio Nino Diazac998032017-02-27 17:23:54 +0000522#if PLAT_XLAT_TABLES_DYNAMIC
523 if (level > ctx->base_level)
524 xlat_table_inc_regions_count(ctx, table_base);
525#endif
526
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000527 while (table_idx < table_entries) {
528
529 desc = table_base[table_idx];
530
531 table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
532
533 action_t action = xlat_tables_map_region_action(mm,
534 desc & DESC_MASK, table_idx_pa, table_idx_va, level);
535
536 if (action == ACTION_WRITE_BLOCK_ENTRY) {
537
538 table_base[table_idx] =
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100539 xlat_desc(mm->attr, table_idx_pa, level,
540 ctx->execute_never_mask);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000541
542 } else if (action == ACTION_CREATE_NEW_TABLE) {
543
544 subtable = xlat_table_get_empty(ctx);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000545 if (subtable == NULL) {
546 /* Not enough free tables to map this region */
547 return table_idx_va;
548 }
549
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000550 /* Point to new subtable from this one. */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000551 table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
552
553 /* Recurse to write into subtable */
554 uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
555 subtable, XLAT_TABLE_ENTRIES,
556 level + 1);
557 if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
558 return end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000559
560 } else if (action == ACTION_RECURSE_INTO_TABLE) {
561
562 subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
563 /* Recurse to write into subtable */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000564 uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
565 subtable, XLAT_TABLE_ENTRIES,
566 level + 1);
567 if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
568 return end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000569
570 } else {
571
572 assert(action == ACTION_NONE);
573
574 }
575
576 table_idx++;
577 table_idx_va += XLAT_BLOCK_SIZE(level);
578
579 /* If reached the end of the region, exit */
580 if (mm_end_va <= table_idx_va)
581 break;
582 }
Antonio Nino Diazac998032017-02-27 17:23:54 +0000583
584 return table_idx_va - 1;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000585}
586
587void print_mmap(mmap_region_t *const mmap)
588{
589#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
590 tf_printf("mmap:\n");
591 mmap_region_t *mm = mmap;
592
593 while (mm->size) {
Sandrine Bailleux8f23fa82017-09-28 21:58:12 +0100594 tf_printf(" VA:%p PA:0x%llx size:0x%zx attr:0x%x",
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000595 (void *)mm->base_va, mm->base_pa,
596 mm->size, mm->attr);
Sandrine Bailleux8f23fa82017-09-28 21:58:12 +0100597 tf_printf(" granularity:0x%zx\n", mm->granularity);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000598 ++mm;
599 };
600 tf_printf("\n");
601#endif
602}
603
604/*
605 * Function that verifies that a region can be mapped.
606 * Returns:
607 * 0: Success, the mapping is allowed.
608 * EINVAL: Invalid values were used as arguments.
609 * ERANGE: The memory limits were surpassed.
610 * ENOMEM: There is not enough memory in the mmap array.
611 * EPERM: Region overlaps another one in an invalid way.
612 */
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100613static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000614{
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100615 unsigned long long base_pa = mm->base_pa;
616 uintptr_t base_va = mm->base_va;
617 size_t size = mm->size;
Sandrine Bailleux8f23fa82017-09-28 21:58:12 +0100618 size_t granularity = mm->granularity;
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100619
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000620 unsigned long long end_pa = base_pa + size - 1;
621 uintptr_t end_va = base_va + size - 1;
622
623 if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
624 !IS_PAGE_ALIGNED(size))
625 return -EINVAL;
626
Sandrine Bailleux8f23fa82017-09-28 21:58:12 +0100627 if ((granularity != XLAT_BLOCK_SIZE(1)) &&
628 (granularity != XLAT_BLOCK_SIZE(2)) &&
629 (granularity != XLAT_BLOCK_SIZE(3))) {
630 return -EINVAL;
631 }
632
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000633 /* Check for overflows */
634 if ((base_pa > end_pa) || (base_va > end_va))
635 return -ERANGE;
636
637 if ((base_va + (uintptr_t)size - (uintptr_t)1) > ctx->va_max_address)
638 return -ERANGE;
639
640 if ((base_pa + (unsigned long long)size - 1ULL) > ctx->pa_max_address)
641 return -ERANGE;
642
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100643 /* Check that there is space in the ctx->mmap array */
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000644 if (ctx->mmap[ctx->mmap_num - 1].size != 0)
645 return -ENOMEM;
646
647 /* Check for PAs and VAs overlaps with all other regions */
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100648 for (mmap_region_t *mm_cursor = ctx->mmap;
649 mm_cursor->size; ++mm_cursor) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000650
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100651 uintptr_t mm_cursor_end_va = mm_cursor->base_va
652 + mm_cursor->size - 1;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000653
654 /*
655 * Check if one of the regions is completely inside the other
656 * one.
657 */
658 int fully_overlapped_va =
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100659 ((base_va >= mm_cursor->base_va) &&
660 (end_va <= mm_cursor_end_va)) ||
661
662 ((mm_cursor->base_va >= base_va) &&
663 (mm_cursor_end_va <= end_va));
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000664
665 /*
666 * Full VA overlaps are only allowed if both regions are
667 * identity mapped (zero offset) or have the same VA to PA
668 * offset. Also, make sure that it's not the exact same area.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000669 * This can only be done with static regions.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000670 */
671 if (fully_overlapped_va) {
672
Antonio Nino Diazac998032017-02-27 17:23:54 +0000673#if PLAT_XLAT_TABLES_DYNAMIC
Sandrine Bailleux8f23fa82017-09-28 21:58:12 +0100674 if ((mm->attr & MT_DYNAMIC) ||
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100675 (mm_cursor->attr & MT_DYNAMIC))
Antonio Nino Diazac998032017-02-27 17:23:54 +0000676 return -EPERM;
677#endif /* PLAT_XLAT_TABLES_DYNAMIC */
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100678 if ((mm_cursor->base_va - mm_cursor->base_pa) !=
679 (base_va - base_pa))
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000680 return -EPERM;
681
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100682 if ((base_va == mm_cursor->base_va) &&
683 (size == mm_cursor->size))
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000684 return -EPERM;
685
686 } else {
687 /*
688 * If the regions do not have fully overlapping VAs,
689 * then they must have fully separated VAs and PAs.
690 * Partial overlaps are not allowed
691 */
692
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100693 unsigned long long mm_cursor_end_pa =
694 mm_cursor->base_pa + mm_cursor->size - 1;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000695
696 int separated_pa =
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100697 (end_pa < mm_cursor->base_pa) ||
698 (base_pa > mm_cursor_end_pa);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000699 int separated_va =
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100700 (end_va < mm_cursor->base_va) ||
701 (base_va > mm_cursor_end_va);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000702
703 if (!(separated_va && separated_pa))
704 return -EPERM;
705 }
706 }
707
708 return 0;
709}
710
Sandrine Bailleux66342932017-07-18 13:26:36 +0100711void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000712{
713 mmap_region_t *mm_cursor = ctx->mmap;
714 mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
715 unsigned long long end_pa = mm->base_pa + mm->size - 1;
716 uintptr_t end_va = mm->base_va + mm->size - 1;
717 int ret;
718
719 /* Ignore empty regions */
720 if (!mm->size)
721 return;
722
Antonio Nino Diazac998032017-02-27 17:23:54 +0000723 /* Static regions must be added before initializing the xlat tables. */
724 assert(!ctx->initialized);
725
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100726 ret = mmap_add_region_check(ctx, mm);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000727 if (ret != 0) {
728 ERROR("mmap_add_region_check() failed. error %d\n", ret);
729 assert(0);
730 return;
731 }
732
733 /*
734 * Find correct place in mmap to insert new region.
735 *
736 * 1 - Lower region VA end first.
737 * 2 - Smaller region size first.
738 *
739 * VA 0 0xFF
740 *
741 * 1st |------|
742 * 2nd |------------|
743 * 3rd |------|
744 * 4th |---|
745 * 5th |---|
746 * 6th |----------|
747 * 7th |-------------------------------------|
748 *
749 * This is required for overlapping regions only. It simplifies adding
750 * regions with the loop in xlat_tables_init_internal because the outer
751 * ones won't overwrite block or page descriptors of regions added
752 * previously.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000753 *
754 * Overlapping is only allowed for static regions.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000755 */
756
757 while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va
758 && mm_cursor->size)
759 ++mm_cursor;
760
761 while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va)
762 && (mm_cursor->size < mm->size))
763 ++mm_cursor;
764
765 /* Make room for new region by moving other regions up by one place */
766 memmove(mm_cursor + 1, mm_cursor,
767 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
768
769 /*
770 * Check we haven't lost the empty sentinel from the end of the array.
771 * This shouldn't happen as we have checked in mmap_add_region_check
772 * that there is free space.
773 */
774 assert(mm_last->size == 0);
775
Douglas Raillardf68d2ed2017-09-12 10:31:49 +0100776 *mm_cursor = *mm;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000777
778 if (end_pa > ctx->max_pa)
779 ctx->max_pa = end_pa;
780 if (end_va > ctx->max_va)
781 ctx->max_va = end_va;
782}
783
Sandrine Bailleux66342932017-07-18 13:26:36 +0100784void mmap_add_region(unsigned long long base_pa,
785 uintptr_t base_va,
786 size_t size,
787 mmap_attr_t attr)
788{
Douglas Raillard35c09f12017-08-31 16:20:25 +0100789 mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
Sandrine Bailleux66342932017-07-18 13:26:36 +0100790 mmap_add_region_ctx(&tf_xlat_ctx, &mm);
791}
792
793
794void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
795{
796 while (mm->size) {
797 mmap_add_region_ctx(ctx, mm);
798 mm++;
799 }
800}
801
802void mmap_add(const mmap_region_t *mm)
803{
804 mmap_add_ctx(&tf_xlat_ctx, mm);
805}
806
Antonio Nino Diazac998032017-02-27 17:23:54 +0000807#if PLAT_XLAT_TABLES_DYNAMIC
808
809int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
810{
811 mmap_region_t *mm_cursor = ctx->mmap;
812 mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
813 unsigned long long end_pa = mm->base_pa + mm->size - 1;
814 uintptr_t end_va = mm->base_va + mm->size - 1;
815 int ret;
816
817 /* Nothing to do */
818 if (!mm->size)
819 return 0;
820
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100821 /* Now this region is a dynamic one */
822 mm->attr |= MT_DYNAMIC;
823
824 ret = mmap_add_region_check(ctx, mm);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000825 if (ret != 0)
826 return ret;
827
828 /*
829 * Find the adequate entry in the mmap array in the same way done for
830 * static regions in mmap_add_region_ctx().
831 */
832
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100833 while ((mm_cursor->base_va + mm_cursor->size - 1)
834 < end_va && mm_cursor->size)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000835 ++mm_cursor;
836
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100837 while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va)
838 && (mm_cursor->size < mm->size))
Antonio Nino Diazac998032017-02-27 17:23:54 +0000839 ++mm_cursor;
840
841 /* Make room for new region by moving other regions up by one place */
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100842 memmove(mm_cursor + 1, mm_cursor,
843 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000844
845 /*
846 * Check we haven't lost the empty sentinal from the end of the array.
847 * This shouldn't happen as we have checked in mmap_add_region_check
848 * that there is free space.
849 */
850 assert(mm_last->size == 0);
851
Douglas Raillardf68d2ed2017-09-12 10:31:49 +0100852 *mm_cursor = *mm;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000853
854 /*
855 * Update the translation tables if the xlat tables are initialized. If
856 * not, this region will be mapped when they are initialized.
857 */
858 if (ctx->initialized) {
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100859 uintptr_t end_va = xlat_tables_map_region(ctx, mm_cursor,
860 0, ctx->base_table, ctx->base_table_entries,
861 ctx->base_level);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000862
863 /* Failed to map, remove mmap entry, unmap and return error. */
864 if (end_va != mm_cursor->base_va + mm_cursor->size - 1) {
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100865 memmove(mm_cursor, mm_cursor + 1,
866 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000867
868 /*
869 * Check if the mapping function actually managed to map
870 * anything. If not, just return now.
871 */
872 if (mm_cursor->base_va >= end_va)
873 return -ENOMEM;
874
875 /*
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100876 * Something went wrong after mapping some table
877 * entries, undo every change done up to this point.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000878 */
879 mmap_region_t unmap_mm = {
880 .base_pa = 0,
881 .base_va = mm->base_va,
882 .size = end_va - mm->base_va,
883 .attr = 0
884 };
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100885 xlat_tables_unmap_region(ctx,
886 &unmap_mm, 0, ctx->base_table,
887 ctx->base_table_entries, ctx->base_level);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000888
889 return -ENOMEM;
890 }
891
892 /*
893 * Make sure that all entries are written to the memory. There
894 * is no need to invalidate entries when mapping dynamic regions
895 * because new table/block/page descriptors only replace old
896 * invalid descriptors, that aren't TLB cached.
897 */
898 dsbishst();
899 }
900
901 if (end_pa > ctx->max_pa)
902 ctx->max_pa = end_pa;
903 if (end_va > ctx->max_va)
904 ctx->max_va = end_va;
905
906 return 0;
907}
908
Sandrine Bailleux66342932017-07-18 13:26:36 +0100909int mmap_add_dynamic_region(unsigned long long base_pa,
910 uintptr_t base_va, size_t size, mmap_attr_t attr)
911{
Douglas Raillard35c09f12017-08-31 16:20:25 +0100912 mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
Sandrine Bailleux66342932017-07-18 13:26:36 +0100913 return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
914}
915
Antonio Nino Diazac998032017-02-27 17:23:54 +0000916/*
917 * Removes the region with given base Virtual Address and size from the given
918 * context.
919 *
920 * Returns:
921 * 0: Success.
922 * EINVAL: Invalid values were used as arguments (region not found).
923 * EPERM: Tried to remove a static region.
924 */
925int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
926 size_t size)
927{
928 mmap_region_t *mm = ctx->mmap;
929 mmap_region_t *mm_last = mm + ctx->mmap_num;
930 int update_max_va_needed = 0;
931 int update_max_pa_needed = 0;
932
933 /* Check sanity of mmap array. */
934 assert(mm[ctx->mmap_num].size == 0);
935
936 while (mm->size) {
937 if ((mm->base_va == base_va) && (mm->size == size))
938 break;
939 ++mm;
940 }
941
942 /* Check that the region was found */
943 if (mm->size == 0)
944 return -EINVAL;
945
946 /* If the region is static it can't be removed */
947 if (!(mm->attr & MT_DYNAMIC))
948 return -EPERM;
949
950 /* Check if this region is using the top VAs or PAs. */
951 if ((mm->base_va + mm->size - 1) == ctx->max_va)
952 update_max_va_needed = 1;
953 if ((mm->base_pa + mm->size - 1) == ctx->max_pa)
954 update_max_pa_needed = 1;
955
956 /* Update the translation tables if needed */
957 if (ctx->initialized) {
958 xlat_tables_unmap_region(ctx, mm, 0, ctx->base_table,
959 ctx->base_table_entries,
960 ctx->base_level);
961 xlat_arch_tlbi_va_sync();
962 }
963
964 /* Remove this region by moving the rest down by one place. */
965 memmove(mm, mm + 1, (uintptr_t)mm_last - (uintptr_t)mm);
966
967 /* Check if we need to update the max VAs and PAs */
968 if (update_max_va_needed) {
969 ctx->max_va = 0;
970 mm = ctx->mmap;
971 while (mm->size) {
972 if ((mm->base_va + mm->size - 1) > ctx->max_va)
973 ctx->max_va = mm->base_va + mm->size - 1;
974 ++mm;
975 }
976 }
977
978 if (update_max_pa_needed) {
979 ctx->max_pa = 0;
980 mm = ctx->mmap;
981 while (mm->size) {
982 if ((mm->base_pa + mm->size - 1) > ctx->max_pa)
983 ctx->max_pa = mm->base_pa + mm->size - 1;
984 ++mm;
985 }
986 }
987
988 return 0;
989}
990
Sandrine Bailleux66342932017-07-18 13:26:36 +0100991int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
992{
993 return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
994 base_va, size);
995}
996
Antonio Nino Diazac998032017-02-27 17:23:54 +0000997#endif /* PLAT_XLAT_TABLES_DYNAMIC */
998
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000999#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
1000
1001/* Print the attributes of the specified block descriptor. */
Antonio Nino Diazefabaa92017-04-27 13:30:22 +01001002static void xlat_desc_print(uint64_t desc, uint64_t execute_never_mask)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001003{
1004 int mem_type_index = ATTR_INDEX_GET(desc);
1005
1006 if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
1007 tf_printf("MEM");
1008 } else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
1009 tf_printf("NC");
1010 } else {
1011 assert(mem_type_index == ATTR_DEVICE_INDEX);
1012 tf_printf("DEV");
1013 }
1014
1015 tf_printf(LOWER_ATTRS(AP_RO) & desc ? "-RO" : "-RW");
1016 tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
Antonio Nino Diazefabaa92017-04-27 13:30:22 +01001017 tf_printf(execute_never_mask & desc ? "-XN" : "-EXEC");
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001018}
1019
1020static const char * const level_spacers[] = {
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +00001021 "[LV0] ",
1022 " [LV1] ",
1023 " [LV2] ",
1024 " [LV3] "
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001025};
1026
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +00001027static const char *invalid_descriptors_ommited =
1028 "%s(%d invalid descriptors omitted)\n";
1029
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001030/*
1031 * Recursive function that reads the translation tables passed as an argument
1032 * and prints their status.
1033 */
1034static void xlat_tables_print_internal(const uintptr_t table_base_va,
1035 uint64_t *const table_base, const int table_entries,
Varun Wadekar8bf7f232017-06-16 14:15:34 -07001036 const unsigned int level, const uint64_t execute_never_mask)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001037{
1038 assert(level <= XLAT_TABLE_LEVEL_MAX);
1039
1040 uint64_t desc;
1041 uintptr_t table_idx_va = table_base_va;
1042 int table_idx = 0;
1043
1044 size_t level_size = XLAT_BLOCK_SIZE(level);
1045
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +00001046 /*
1047 * Keep track of how many invalid descriptors are counted in a row.
1048 * Whenever multiple invalid descriptors are found, only the first one
1049 * is printed, and a line is added to inform about how many descriptors
1050 * have been omitted.
1051 */
1052 int invalid_row_count = 0;
1053
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001054 while (table_idx < table_entries) {
1055
1056 desc = table_base[table_idx];
1057
1058 if ((desc & DESC_MASK) == INVALID_DESC) {
1059
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +00001060 if (invalid_row_count == 0) {
1061 tf_printf("%sVA:%p size:0x%zx\n",
1062 level_spacers[level],
1063 (void *)table_idx_va, level_size);
1064 }
1065 invalid_row_count++;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001066
1067 } else {
1068
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +00001069 if (invalid_row_count > 1) {
1070 tf_printf(invalid_descriptors_ommited,
1071 level_spacers[level],
1072 invalid_row_count - 1);
1073 }
1074 invalid_row_count = 0;
1075
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001076 /*
1077 * Check if this is a table or a block. Tables are only
1078 * allowed in levels other than 3, but DESC_PAGE has the
1079 * same value as DESC_TABLE, so we need to check.
1080 */
1081 if (((desc & DESC_MASK) == TABLE_DESC) &&
1082 (level < XLAT_TABLE_LEVEL_MAX)) {
1083 /*
1084 * Do not print any PA for a table descriptor,
1085 * as it doesn't directly map physical memory
1086 * but instead points to the next translation
1087 * table in the translation table walk.
1088 */
1089 tf_printf("%sVA:%p size:0x%zx\n",
1090 level_spacers[level],
1091 (void *)table_idx_va, level_size);
1092
1093 uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
1094
1095 xlat_tables_print_internal(table_idx_va,
1096 (uint64_t *)addr_inner,
Antonio Nino Diazefabaa92017-04-27 13:30:22 +01001097 XLAT_TABLE_ENTRIES, level+1,
1098 execute_never_mask);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001099 } else {
1100 tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
1101 level_spacers[level],
1102 (void *)table_idx_va,
1103 (unsigned long long)(desc & TABLE_ADDR_MASK),
1104 level_size);
Antonio Nino Diazefabaa92017-04-27 13:30:22 +01001105 xlat_desc_print(desc, execute_never_mask);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001106 tf_printf("\n");
1107 }
1108 }
1109
1110 table_idx++;
1111 table_idx_va += level_size;
1112 }
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +00001113
1114 if (invalid_row_count > 1) {
1115 tf_printf(invalid_descriptors_ommited,
1116 level_spacers[level], invalid_row_count - 1);
1117 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001118}
1119
1120#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
1121
1122void xlat_tables_print(xlat_ctx_t *ctx)
1123{
1124#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
Sandrine Bailleux4e8f1452017-05-26 15:47:08 +01001125 VERBOSE("Translation tables state:\n");
1126 VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address);
1127 VERBOSE(" Max allowed VA: %p\n", (void *) ctx->va_max_address);
1128 VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa);
1129 VERBOSE(" Max mapped VA: %p\n", (void *) ctx->max_va);
1130
1131 VERBOSE(" Initial lookup level: %i\n", ctx->base_level);
1132 VERBOSE(" Entries @initial lookup level: %i\n",
1133 ctx->base_table_entries);
1134
1135 int used_page_tables;
1136#if PLAT_XLAT_TABLES_DYNAMIC
1137 used_page_tables = 0;
Sandrine Bailleuxde6628d2017-08-01 09:16:38 +01001138 for (unsigned int i = 0; i < ctx->tables_num; ++i) {
Sandrine Bailleux4e8f1452017-05-26 15:47:08 +01001139 if (ctx->tables_mapped_regions[i] != 0)
1140 ++used_page_tables;
1141 }
1142#else
1143 used_page_tables = ctx->next_table;
1144#endif
1145 VERBOSE(" Used %i sub-tables out of %i (spare: %i)\n",
1146 used_page_tables, ctx->tables_num,
1147 ctx->tables_num - used_page_tables);
1148
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001149 xlat_tables_print_internal(0, ctx->base_table, ctx->base_table_entries,
Antonio Nino Diazefabaa92017-04-27 13:30:22 +01001150 ctx->base_level, ctx->execute_never_mask);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001151#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
1152}
1153
Sandrine Bailleux66342932017-07-18 13:26:36 +01001154void init_xlat_tables_ctx(xlat_ctx_t *ctx)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001155{
1156 mmap_region_t *mm = ctx->mmap;
1157
Sandrine Bailleux66342932017-07-18 13:26:36 +01001158 assert(!is_mmu_enabled());
1159 assert(!ctx->initialized);
1160
1161 print_mmap(mm);
1162
1163 ctx->execute_never_mask =
1164 xlat_arch_get_xn_desc(xlat_arch_current_el());
1165
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001166 /* All tables must be zeroed before mapping any region. */
1167
Varun Wadekar66231d12017-06-07 09:57:42 -07001168 for (unsigned int i = 0; i < ctx->base_table_entries; i++)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001169 ctx->base_table[i] = INVALID_DESC;
1170
Varun Wadekar66231d12017-06-07 09:57:42 -07001171 for (unsigned int j = 0; j < ctx->tables_num; j++) {
Antonio Nino Diazac998032017-02-27 17:23:54 +00001172#if PLAT_XLAT_TABLES_DYNAMIC
1173 ctx->tables_mapped_regions[j] = 0;
1174#endif
Varun Wadekar66231d12017-06-07 09:57:42 -07001175 for (unsigned int i = 0; i < XLAT_TABLE_ENTRIES; i++)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001176 ctx->tables[j][i] = INVALID_DESC;
1177 }
1178
Antonio Nino Diazac998032017-02-27 17:23:54 +00001179 while (mm->size) {
1180 uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0, ctx->base_table,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001181 ctx->base_table_entries, ctx->base_level);
1182
Antonio Nino Diazac998032017-02-27 17:23:54 +00001183 if (end_va != mm->base_va + mm->size - 1) {
1184 ERROR("Not enough memory to map region:\n"
1185 " VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
1186 (void *)mm->base_va, mm->base_pa, mm->size, mm->attr);
1187 panic();
1188 }
1189
1190 mm++;
1191 }
1192
Sandrine Bailleux46c53a22017-07-11 15:11:10 +01001193 assert(ctx->pa_max_address <= xlat_arch_get_max_supported_pa());
Sandrine Bailleux66342932017-07-18 13:26:36 +01001194 assert(ctx->max_va <= ctx->va_max_address);
1195 assert(ctx->max_pa <= ctx->pa_max_address);
1196
Sandrine Bailleuxc5b63772017-05-31 13:31:48 +01001197 ctx->initialized = 1;
1198
1199 xlat_tables_print(ctx);
Sandrine Bailleux66342932017-07-18 13:26:36 +01001200}
1201
1202void init_xlat_tables(void)
1203{
1204 init_xlat_tables_ctx(&tf_xlat_ctx);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001205}
Sandrine Bailleux66342932017-07-18 13:26:36 +01001206
Sandrine Bailleuxc5b63772017-05-31 13:31:48 +01001207/*
1208 * If dynamic allocation of new regions is disabled then by the time we call the
1209 * function enabling the MMU, we'll have registered all the memory regions to
1210 * map for the system's lifetime. Therefore, at this point we know the maximum
1211 * physical address that will ever be mapped.
1212 *
1213 * If dynamic allocation is enabled then we can't make any such assumption
1214 * because the maximum physical address could get pushed while adding a new
1215 * region. Therefore, in this case we have to assume that the whole address
1216 * space size might be mapped.
1217 */
1218#ifdef PLAT_XLAT_TABLES_DYNAMIC
Sandrine Bailleux46c53a22017-07-11 15:11:10 +01001219#define MAX_PHYS_ADDR tf_xlat_ctx.pa_max_address
Sandrine Bailleuxc5b63772017-05-31 13:31:48 +01001220#else
1221#define MAX_PHYS_ADDR tf_xlat_ctx.max_pa
1222#endif
1223
Sandrine Bailleux66342932017-07-18 13:26:36 +01001224#ifdef AARCH32
1225
1226void enable_mmu_secure(unsigned int flags)
1227{
Sandrine Bailleux46c53a22017-07-11 15:11:10 +01001228 enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
1229 tf_xlat_ctx.va_max_address);
Sandrine Bailleux66342932017-07-18 13:26:36 +01001230}
1231
1232#else
1233
1234void enable_mmu_el1(unsigned int flags)
1235{
Sandrine Bailleux46c53a22017-07-11 15:11:10 +01001236 enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
1237 tf_xlat_ctx.va_max_address);
Sandrine Bailleux66342932017-07-18 13:26:36 +01001238}
1239
1240void enable_mmu_el3(unsigned int flags)
1241{
Sandrine Bailleux46c53a22017-07-11 15:11:10 +01001242 enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
1243 tf_xlat_ctx.va_max_address);
Sandrine Bailleux66342932017-07-18 13:26:36 +01001244}
1245
1246#endif /* AARCH32 */