blob: da658b114e70ab1670f1fe2c8b45ef47e0df29a1 [file] [log] [blame]
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00005 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000010#include <common_def.h>
11#include <debug.h>
12#include <errno.h>
13#include <platform_def.h>
14#include <string.h>
15#include <types.h>
16#include <utils.h>
Sandrine Bailleux090c8492017-05-19 09:59:37 +010017#include <xlat_tables_arch.h>
18#include <xlat_tables_defs.h>
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000019#include <xlat_tables_v2.h>
Sandrine Bailleux090c8492017-05-19 09:59:37 +010020
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000021#include "xlat_tables_private.h"
22
Sandrine Bailleux66342932017-07-18 13:26:36 +010023/*
24 * Each platform can define the size of its physical and virtual address spaces.
25 * If the platform hasn't defined one or both of them, default to
26 * ADDR_SPACE_SIZE. The latter is deprecated, though.
27 */
28#if ERROR_DEPRECATED
29# ifdef ADDR_SPACE_SIZE
30# error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
31# endif
32#elif defined(ADDR_SPACE_SIZE)
33# ifndef PLAT_PHY_ADDR_SPACE_SIZE
34# define PLAT_PHY_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
35# endif
36# ifndef PLAT_VIRT_ADDR_SPACE_SIZE
37# define PLAT_VIRT_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
38# endif
39#endif
40
41/*
42 * Allocate and initialise the default translation context for the BL image
43 * currently executing.
44 */
45REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
46 PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
47
Antonio Nino Diazac998032017-02-27 17:23:54 +000048#if PLAT_XLAT_TABLES_DYNAMIC
49
50/*
51 * The following functions assume that they will be called using subtables only.
52 * The base table can't be unmapped, so it is not needed to do any special
53 * handling for it.
54 */
55
56/*
57 * Returns the index of the array corresponding to the specified translation
58 * table.
59 */
60static int xlat_table_get_index(xlat_ctx_t *ctx, const uint64_t *table)
61{
Varun Wadekar66231d12017-06-07 09:57:42 -070062 for (unsigned int i = 0; i < ctx->tables_num; i++)
Antonio Nino Diazac998032017-02-27 17:23:54 +000063 if (ctx->tables[i] == table)
64 return i;
65
66 /*
67 * Maybe we were asked to get the index of the base level table, which
68 * should never happen.
69 */
70 assert(0);
71
72 return -1;
73}
74
75/* Returns a pointer to an empty translation table. */
76static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
77{
Varun Wadekar66231d12017-06-07 09:57:42 -070078 for (unsigned int i = 0; i < ctx->tables_num; i++)
Antonio Nino Diazac998032017-02-27 17:23:54 +000079 if (ctx->tables_mapped_regions[i] == 0)
80 return ctx->tables[i];
81
82 return NULL;
83}
84
85/* Increments region count for a given table. */
86static void xlat_table_inc_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
87{
88 ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]++;
89}
90
91/* Decrements region count for a given table. */
92static void xlat_table_dec_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
93{
94 ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]--;
95}
96
97/* Returns 0 if the speficied table isn't empty, otherwise 1. */
98static int xlat_table_is_empty(xlat_ctx_t *ctx, const uint64_t *table)
99{
100 return !ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)];
101}
102
103#else /* PLAT_XLAT_TABLES_DYNAMIC */
104
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000105/* Returns a pointer to the first empty translation table. */
106static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
107{
108 assert(ctx->next_table < ctx->tables_num);
109
110 return ctx->tables[ctx->next_table++];
111}
112
Antonio Nino Diazac998032017-02-27 17:23:54 +0000113#endif /* PLAT_XLAT_TABLES_DYNAMIC */
114
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000115/* Returns a block/page table descriptor for the given level and attributes. */
Sandrine Bailleux04980a32017-04-19 14:02:23 +0100116static uint64_t xlat_desc(mmap_attr_t attr, unsigned long long addr_pa,
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100117 int level, uint64_t execute_never_mask)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000118{
119 uint64_t desc;
120 int mem_type;
121
122 /* Make sure that the granularity is fine enough to map this address. */
123 assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0);
124
125 desc = addr_pa;
126 /*
127 * There are different translation table descriptors for level 3 and the
128 * rest.
129 */
130 desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
131 /*
132 * Always set the access flag, as TF doesn't manage access flag faults.
133 * Deduce other fields of the descriptor based on the MT_NS and MT_RW
134 * memory region attributes.
135 */
136 desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
137 desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
138 desc |= LOWER_ATTRS(ACCESS_FLAG);
139
140 /*
141 * Deduce shareability domain and executability of the memory region
142 * from the memory type of the attributes (MT_TYPE).
143 *
144 * Data accesses to device memory and non-cacheable normal memory are
145 * coherent for all observers in the system, and correspondingly are
146 * always treated as being Outer Shareable. Therefore, for these 2 types
147 * of memory, it is not strictly needed to set the shareability field
148 * in the translation tables.
149 */
150 mem_type = MT_TYPE(attr);
151 if (mem_type == MT_DEVICE) {
152 desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
153 /*
154 * Always map device memory as execute-never.
155 * This is to avoid the possibility of a speculative instruction
156 * fetch, which could be an issue if this memory region
157 * corresponds to a read-sensitive peripheral.
158 */
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100159 desc |= execute_never_mask;
160
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000161 } else { /* Normal memory */
162 /*
163 * Always map read-write normal memory as execute-never.
164 * (Trusted Firmware doesn't self-modify its code, therefore
165 * R/W memory is reserved for data storage, which must not be
166 * executable.)
167 * Note that setting the XN bit here is for consistency only.
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100168 * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000169 * which makes any writable memory region to be treated as
170 * execute-never, regardless of the value of the XN bit in the
171 * translation table.
172 *
173 * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
174 * attribute to figure out the value of the XN bit.
175 */
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100176 if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) {
177 desc |= execute_never_mask;
178 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000179
180 if (mem_type == MT_MEMORY) {
181 desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
182 } else {
183 assert(mem_type == MT_NON_CACHEABLE);
184 desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
185 }
186 }
187
188 return desc;
189}
190
191/*
192 * Enumeration of actions that can be made when mapping table entries depending
193 * on the previous value in that entry and information about the region being
194 * mapped.
195 */
196typedef enum {
197
198 /* Do nothing */
199 ACTION_NONE,
200
201 /* Write a block (or page, if in level 3) entry. */
202 ACTION_WRITE_BLOCK_ENTRY,
203
204 /*
205 * Create a new table and write a table entry pointing to it. Recurse
206 * into it for further processing.
207 */
208 ACTION_CREATE_NEW_TABLE,
209
210 /*
211 * There is a table descriptor in this entry, read it and recurse into
212 * that table for further processing.
213 */
214 ACTION_RECURSE_INTO_TABLE,
215
216} action_t;
217
Antonio Nino Diazac998032017-02-27 17:23:54 +0000218#if PLAT_XLAT_TABLES_DYNAMIC
219
220/*
221 * Recursive function that writes to the translation tables and unmaps the
222 * specified region.
223 */
224static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
225 const uintptr_t table_base_va,
226 uint64_t *const table_base,
227 const int table_entries,
Varun Wadekar66231d12017-06-07 09:57:42 -0700228 const unsigned int level)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000229{
230 assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
231
232 uint64_t *subtable;
233 uint64_t desc;
234
235 uintptr_t table_idx_va;
236 uintptr_t table_idx_end_va; /* End VA of this entry */
237
238 uintptr_t region_end_va = mm->base_va + mm->size - 1;
239
240 int table_idx;
241
242 if (mm->base_va > table_base_va) {
243 /* Find the first index of the table affected by the region. */
244 table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
245
246 table_idx = (table_idx_va - table_base_va) >>
247 XLAT_ADDR_SHIFT(level);
248
249 assert(table_idx < table_entries);
250 } else {
251 /* Start from the beginning of the table. */
252 table_idx_va = table_base_va;
253 table_idx = 0;
254 }
255
256 while (table_idx < table_entries) {
257
258 table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1;
259
260 desc = table_base[table_idx];
261 uint64_t desc_type = desc & DESC_MASK;
262
263 action_t action = ACTION_NONE;
264
265 if ((mm->base_va <= table_idx_va) &&
266 (region_end_va >= table_idx_end_va)) {
267
268 /* Region covers all block */
269
270 if (level == 3) {
271 /*
272 * Last level, only page descriptors allowed,
273 * erase it.
274 */
275 assert(desc_type == PAGE_DESC);
276
277 action = ACTION_WRITE_BLOCK_ENTRY;
278 } else {
279 /*
280 * Other levels can have table descriptors. If
281 * so, recurse into it and erase descriptors
282 * inside it as needed. If there is a block
283 * descriptor, just erase it. If an invalid
284 * descriptor is found, this table isn't
285 * actually mapped, which shouldn't happen.
286 */
287 if (desc_type == TABLE_DESC) {
288 action = ACTION_RECURSE_INTO_TABLE;
289 } else {
290 assert(desc_type == BLOCK_DESC);
291 action = ACTION_WRITE_BLOCK_ENTRY;
292 }
293 }
294
295 } else if ((mm->base_va <= table_idx_end_va) ||
296 (region_end_va >= table_idx_va)) {
297
298 /*
299 * Region partially covers block.
300 *
301 * It can't happen in level 3.
302 *
303 * There must be a table descriptor here, if not there
304 * was a problem when mapping the region.
305 */
306
307 assert(level < 3);
308
309 assert(desc_type == TABLE_DESC);
310
311 action = ACTION_RECURSE_INTO_TABLE;
312 }
313
314 if (action == ACTION_WRITE_BLOCK_ENTRY) {
315
316 table_base[table_idx] = INVALID_DESC;
317 xlat_arch_tlbi_va(table_idx_va);
318
319 } else if (action == ACTION_RECURSE_INTO_TABLE) {
320
321 subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
322
323 /* Recurse to write into subtable */
324 xlat_tables_unmap_region(ctx, mm, table_idx_va,
325 subtable, XLAT_TABLE_ENTRIES,
326 level + 1);
327
328 /*
329 * If the subtable is now empty, remove its reference.
330 */
331 if (xlat_table_is_empty(ctx, subtable)) {
332 table_base[table_idx] = INVALID_DESC;
333 xlat_arch_tlbi_va(table_idx_va);
334 }
335
336 } else {
337 assert(action == ACTION_NONE);
338 }
339
340 table_idx++;
341 table_idx_va += XLAT_BLOCK_SIZE(level);
342
343 /* If reached the end of the region, exit */
344 if (region_end_va <= table_idx_va)
345 break;
346 }
347
348 if (level > ctx->base_level)
349 xlat_table_dec_regions_count(ctx, table_base);
350}
351
352#endif /* PLAT_XLAT_TABLES_DYNAMIC */
353
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000354/*
355 * From the given arguments, it decides which action to take when mapping the
356 * specified region.
357 */
358static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
359 const int desc_type, const unsigned long long dest_pa,
Sandrine Bailleux12e86442017-07-19 10:11:13 +0100360 const uintptr_t table_entry_base_va, const unsigned int level)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000361{
362 uintptr_t mm_end_va = mm->base_va + mm->size - 1;
363 uintptr_t table_entry_end_va =
364 table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1;
365
366 /*
367 * The descriptor types allowed depend on the current table level.
368 */
369
370 if ((mm->base_va <= table_entry_base_va) &&
371 (mm_end_va >= table_entry_end_va)) {
372
373 /*
374 * Table entry is covered by region
375 * --------------------------------
376 *
377 * This means that this table entry can describe the whole
378 * translation with this granularity in principle.
379 */
380
381 if (level == 3) {
382 /*
383 * Last level, only page descriptors are allowed.
384 */
385 if (desc_type == PAGE_DESC) {
386 /*
387 * There's another region mapped here, don't
388 * overwrite.
389 */
390 return ACTION_NONE;
391 } else {
392 assert(desc_type == INVALID_DESC);
393 return ACTION_WRITE_BLOCK_ENTRY;
394 }
395
396 } else {
397
398 /*
399 * Other levels. Table descriptors are allowed. Block
400 * descriptors too, but they have some limitations.
401 */
402
403 if (desc_type == TABLE_DESC) {
404 /* There's already a table, recurse into it. */
405 return ACTION_RECURSE_INTO_TABLE;
406
407 } else if (desc_type == INVALID_DESC) {
408 /*
409 * There's nothing mapped here, create a new
410 * entry.
411 *
412 * Check if the destination granularity allows
413 * us to use a block descriptor or we need a
414 * finer table for it.
415 *
416 * Also, check if the current level allows block
417 * descriptors. If not, create a table instead.
418 */
419 if ((dest_pa & XLAT_BLOCK_MASK(level)) ||
420 (level < MIN_LVL_BLOCK_DESC))
421 return ACTION_CREATE_NEW_TABLE;
422 else
423 return ACTION_WRITE_BLOCK_ENTRY;
424
425 } else {
426 /*
427 * There's another region mapped here, don't
428 * overwrite.
429 */
430 assert(desc_type == BLOCK_DESC);
431
432 return ACTION_NONE;
433 }
434 }
435
436 } else if ((mm->base_va <= table_entry_end_va) ||
437 (mm_end_va >= table_entry_base_va)) {
438
439 /*
440 * Region partially covers table entry
441 * -----------------------------------
442 *
443 * This means that this table entry can't describe the whole
444 * translation, a finer table is needed.
445
446 * There cannot be partial block overlaps in level 3. If that
447 * happens, some of the preliminary checks when adding the
448 * mmap region failed to detect that PA and VA must at least be
449 * aligned to PAGE_SIZE.
450 */
451 assert(level < 3);
452
453 if (desc_type == INVALID_DESC) {
454 /*
455 * The block is not fully covered by the region. Create
456 * a new table, recurse into it and try to map the
457 * region with finer granularity.
458 */
459 return ACTION_CREATE_NEW_TABLE;
460
461 } else {
462 assert(desc_type == TABLE_DESC);
463 /*
464 * The block is not fully covered by the region, but
465 * there is already a table here. Recurse into it and
466 * try to map with finer granularity.
467 *
468 * PAGE_DESC for level 3 has the same value as
469 * TABLE_DESC, but this code can't run on a level 3
470 * table because there can't be overlaps in level 3.
471 */
472 return ACTION_RECURSE_INTO_TABLE;
473 }
474 }
475
476 /*
477 * This table entry is outside of the region specified in the arguments,
478 * don't write anything to it.
479 */
480 return ACTION_NONE;
481}
482
483/*
484 * Recursive function that writes to the translation tables and maps the
Antonio Nino Diazac998032017-02-27 17:23:54 +0000485 * specified region. On success, it returns the VA of the last byte that was
486 * succesfully mapped. On error, it returns the VA of the next entry that
487 * should have been mapped.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000488 */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000489static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000490 const uintptr_t table_base_va,
491 uint64_t *const table_base,
492 const int table_entries,
Varun Wadekar66231d12017-06-07 09:57:42 -0700493 const unsigned int level)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000494{
495 assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
496
497 uintptr_t mm_end_va = mm->base_va + mm->size - 1;
498
499 uintptr_t table_idx_va;
500 unsigned long long table_idx_pa;
501
502 uint64_t *subtable;
503 uint64_t desc;
504
505 int table_idx;
506
507 if (mm->base_va > table_base_va) {
508 /* Find the first index of the table affected by the region. */
509 table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
510
511 table_idx = (table_idx_va - table_base_va) >>
512 XLAT_ADDR_SHIFT(level);
513
514 assert(table_idx < table_entries);
515 } else {
516 /* Start from the beginning of the table. */
517 table_idx_va = table_base_va;
518 table_idx = 0;
519 }
520
Antonio Nino Diazac998032017-02-27 17:23:54 +0000521#if PLAT_XLAT_TABLES_DYNAMIC
522 if (level > ctx->base_level)
523 xlat_table_inc_regions_count(ctx, table_base);
524#endif
525
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000526 while (table_idx < table_entries) {
527
528 desc = table_base[table_idx];
529
530 table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
531
532 action_t action = xlat_tables_map_region_action(mm,
533 desc & DESC_MASK, table_idx_pa, table_idx_va, level);
534
535 if (action == ACTION_WRITE_BLOCK_ENTRY) {
536
537 table_base[table_idx] =
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100538 xlat_desc(mm->attr, table_idx_pa, level,
539 ctx->execute_never_mask);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000540
541 } else if (action == ACTION_CREATE_NEW_TABLE) {
542
543 subtable = xlat_table_get_empty(ctx);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000544 if (subtable == NULL) {
545 /* Not enough free tables to map this region */
546 return table_idx_va;
547 }
548
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000549 /* Point to new subtable from this one. */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000550 table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
551
552 /* Recurse to write into subtable */
553 uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
554 subtable, XLAT_TABLE_ENTRIES,
555 level + 1);
556 if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
557 return end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000558
559 } else if (action == ACTION_RECURSE_INTO_TABLE) {
560
561 subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
562 /* Recurse to write into subtable */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000563 uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
564 subtable, XLAT_TABLE_ENTRIES,
565 level + 1);
566 if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
567 return end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000568
569 } else {
570
571 assert(action == ACTION_NONE);
572
573 }
574
575 table_idx++;
576 table_idx_va += XLAT_BLOCK_SIZE(level);
577
578 /* If reached the end of the region, exit */
579 if (mm_end_va <= table_idx_va)
580 break;
581 }
Antonio Nino Diazac998032017-02-27 17:23:54 +0000582
583 return table_idx_va - 1;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000584}
585
586void print_mmap(mmap_region_t *const mmap)
587{
588#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
589 tf_printf("mmap:\n");
590 mmap_region_t *mm = mmap;
591
592 while (mm->size) {
593 tf_printf(" VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
594 (void *)mm->base_va, mm->base_pa,
595 mm->size, mm->attr);
596 ++mm;
597 };
598 tf_printf("\n");
599#endif
600}
601
602/*
603 * Function that verifies that a region can be mapped.
604 * Returns:
605 * 0: Success, the mapping is allowed.
606 * EINVAL: Invalid values were used as arguments.
607 * ERANGE: The memory limits were surpassed.
608 * ENOMEM: There is not enough memory in the mmap array.
609 * EPERM: Region overlaps another one in an invalid way.
610 */
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100611static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000612{
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100613 unsigned long long base_pa = mm->base_pa;
614 uintptr_t base_va = mm->base_va;
615 size_t size = mm->size;
616 mmap_attr_t attr = mm->attr;
617
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000618 unsigned long long end_pa = base_pa + size - 1;
619 uintptr_t end_va = base_va + size - 1;
620
621 if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
622 !IS_PAGE_ALIGNED(size))
623 return -EINVAL;
624
625 /* Check for overflows */
626 if ((base_pa > end_pa) || (base_va > end_va))
627 return -ERANGE;
628
629 if ((base_va + (uintptr_t)size - (uintptr_t)1) > ctx->va_max_address)
630 return -ERANGE;
631
632 if ((base_pa + (unsigned long long)size - 1ULL) > ctx->pa_max_address)
633 return -ERANGE;
634
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100635 /* Check that there is space in the ctx->mmap array */
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000636 if (ctx->mmap[ctx->mmap_num - 1].size != 0)
637 return -ENOMEM;
638
639 /* Check for PAs and VAs overlaps with all other regions */
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100640 for (mmap_region_t *mm_cursor = ctx->mmap;
641 mm_cursor->size; ++mm_cursor) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000642
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100643 uintptr_t mm_cursor_end_va = mm_cursor->base_va
644 + mm_cursor->size - 1;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000645
646 /*
647 * Check if one of the regions is completely inside the other
648 * one.
649 */
650 int fully_overlapped_va =
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100651 ((base_va >= mm_cursor->base_va) &&
652 (end_va <= mm_cursor_end_va)) ||
653
654 ((mm_cursor->base_va >= base_va) &&
655 (mm_cursor_end_va <= end_va));
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000656
657 /*
658 * Full VA overlaps are only allowed if both regions are
659 * identity mapped (zero offset) or have the same VA to PA
660 * offset. Also, make sure that it's not the exact same area.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000661 * This can only be done with static regions.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000662 */
663 if (fully_overlapped_va) {
664
Antonio Nino Diazac998032017-02-27 17:23:54 +0000665#if PLAT_XLAT_TABLES_DYNAMIC
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100666 if ((attr & MT_DYNAMIC) ||
667 (mm_cursor->attr & MT_DYNAMIC))
Antonio Nino Diazac998032017-02-27 17:23:54 +0000668 return -EPERM;
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100669#else
670 (void)attr;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000671#endif /* PLAT_XLAT_TABLES_DYNAMIC */
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100672 if ((mm_cursor->base_va - mm_cursor->base_pa) !=
673 (base_va - base_pa))
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000674 return -EPERM;
675
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100676 if ((base_va == mm_cursor->base_va) &&
677 (size == mm_cursor->size))
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000678 return -EPERM;
679
680 } else {
681 /*
682 * If the regions do not have fully overlapping VAs,
683 * then they must have fully separated VAs and PAs.
684 * Partial overlaps are not allowed
685 */
686
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100687 unsigned long long mm_cursor_end_pa =
688 mm_cursor->base_pa + mm_cursor->size - 1;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000689
690 int separated_pa =
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100691 (end_pa < mm_cursor->base_pa) ||
692 (base_pa > mm_cursor_end_pa);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000693 int separated_va =
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100694 (end_va < mm_cursor->base_va) ||
695 (base_va > mm_cursor_end_va);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000696
697 if (!(separated_va && separated_pa))
698 return -EPERM;
699 }
700 }
701
702 return 0;
703}
704
Sandrine Bailleux66342932017-07-18 13:26:36 +0100705void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000706{
707 mmap_region_t *mm_cursor = ctx->mmap;
708 mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
709 unsigned long long end_pa = mm->base_pa + mm->size - 1;
710 uintptr_t end_va = mm->base_va + mm->size - 1;
711 int ret;
712
713 /* Ignore empty regions */
714 if (!mm->size)
715 return;
716
Antonio Nino Diazac998032017-02-27 17:23:54 +0000717 /* Static regions must be added before initializing the xlat tables. */
718 assert(!ctx->initialized);
719
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100720 ret = mmap_add_region_check(ctx, mm);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000721 if (ret != 0) {
722 ERROR("mmap_add_region_check() failed. error %d\n", ret);
723 assert(0);
724 return;
725 }
726
727 /*
728 * Find correct place in mmap to insert new region.
729 *
730 * 1 - Lower region VA end first.
731 * 2 - Smaller region size first.
732 *
733 * VA 0 0xFF
734 *
735 * 1st |------|
736 * 2nd |------------|
737 * 3rd |------|
738 * 4th |---|
739 * 5th |---|
740 * 6th |----------|
741 * 7th |-------------------------------------|
742 *
743 * This is required for overlapping regions only. It simplifies adding
744 * regions with the loop in xlat_tables_init_internal because the outer
745 * ones won't overwrite block or page descriptors of regions added
746 * previously.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000747 *
748 * Overlapping is only allowed for static regions.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000749 */
750
751 while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va
752 && mm_cursor->size)
753 ++mm_cursor;
754
755 while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va)
756 && (mm_cursor->size < mm->size))
757 ++mm_cursor;
758
759 /* Make room for new region by moving other regions up by one place */
760 memmove(mm_cursor + 1, mm_cursor,
761 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
762
763 /*
764 * Check we haven't lost the empty sentinel from the end of the array.
765 * This shouldn't happen as we have checked in mmap_add_region_check
766 * that there is free space.
767 */
768 assert(mm_last->size == 0);
769
Douglas Raillardf68d2ed2017-09-12 10:31:49 +0100770 *mm_cursor = *mm;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000771
772 if (end_pa > ctx->max_pa)
773 ctx->max_pa = end_pa;
774 if (end_va > ctx->max_va)
775 ctx->max_va = end_va;
776}
777
Sandrine Bailleux66342932017-07-18 13:26:36 +0100778void mmap_add_region(unsigned long long base_pa,
779 uintptr_t base_va,
780 size_t size,
781 mmap_attr_t attr)
782{
Douglas Raillard35c09f12017-08-31 16:20:25 +0100783 mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
Sandrine Bailleux66342932017-07-18 13:26:36 +0100784 mmap_add_region_ctx(&tf_xlat_ctx, &mm);
785}
786
787
788void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
789{
790 while (mm->size) {
791 mmap_add_region_ctx(ctx, mm);
792 mm++;
793 }
794}
795
796void mmap_add(const mmap_region_t *mm)
797{
798 mmap_add_ctx(&tf_xlat_ctx, mm);
799}
800
Antonio Nino Diazac998032017-02-27 17:23:54 +0000801#if PLAT_XLAT_TABLES_DYNAMIC
802
803int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
804{
805 mmap_region_t *mm_cursor = ctx->mmap;
806 mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
807 unsigned long long end_pa = mm->base_pa + mm->size - 1;
808 uintptr_t end_va = mm->base_va + mm->size - 1;
809 int ret;
810
811 /* Nothing to do */
812 if (!mm->size)
813 return 0;
814
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100815 /* Now this region is a dynamic one */
816 mm->attr |= MT_DYNAMIC;
817
818 ret = mmap_add_region_check(ctx, mm);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000819 if (ret != 0)
820 return ret;
821
822 /*
823 * Find the adequate entry in the mmap array in the same way done for
824 * static regions in mmap_add_region_ctx().
825 */
826
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100827 while ((mm_cursor->base_va + mm_cursor->size - 1)
828 < end_va && mm_cursor->size)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000829 ++mm_cursor;
830
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100831 while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va)
832 && (mm_cursor->size < mm->size))
Antonio Nino Diazac998032017-02-27 17:23:54 +0000833 ++mm_cursor;
834
835 /* Make room for new region by moving other regions up by one place */
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100836 memmove(mm_cursor + 1, mm_cursor,
837 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000838
839 /*
840 * Check we haven't lost the empty sentinal from the end of the array.
841 * This shouldn't happen as we have checked in mmap_add_region_check
842 * that there is free space.
843 */
844 assert(mm_last->size == 0);
845
Douglas Raillardf68d2ed2017-09-12 10:31:49 +0100846 *mm_cursor = *mm;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000847
848 /*
849 * Update the translation tables if the xlat tables are initialized. If
850 * not, this region will be mapped when they are initialized.
851 */
852 if (ctx->initialized) {
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100853 uintptr_t end_va = xlat_tables_map_region(ctx, mm_cursor,
854 0, ctx->base_table, ctx->base_table_entries,
855 ctx->base_level);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000856
857 /* Failed to map, remove mmap entry, unmap and return error. */
858 if (end_va != mm_cursor->base_va + mm_cursor->size - 1) {
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100859 memmove(mm_cursor, mm_cursor + 1,
860 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000861
862 /*
863 * Check if the mapping function actually managed to map
864 * anything. If not, just return now.
865 */
866 if (mm_cursor->base_va >= end_va)
867 return -ENOMEM;
868
869 /*
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100870 * Something went wrong after mapping some table
871 * entries, undo every change done up to this point.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000872 */
873 mmap_region_t unmap_mm = {
874 .base_pa = 0,
875 .base_va = mm->base_va,
876 .size = end_va - mm->base_va,
877 .attr = 0
878 };
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100879 xlat_tables_unmap_region(ctx,
880 &unmap_mm, 0, ctx->base_table,
881 ctx->base_table_entries, ctx->base_level);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000882
883 return -ENOMEM;
884 }
885
886 /*
887 * Make sure that all entries are written to the memory. There
888 * is no need to invalidate entries when mapping dynamic regions
889 * because new table/block/page descriptors only replace old
890 * invalid descriptors, that aren't TLB cached.
891 */
892 dsbishst();
893 }
894
895 if (end_pa > ctx->max_pa)
896 ctx->max_pa = end_pa;
897 if (end_va > ctx->max_va)
898 ctx->max_va = end_va;
899
900 return 0;
901}
902
Sandrine Bailleux66342932017-07-18 13:26:36 +0100903int mmap_add_dynamic_region(unsigned long long base_pa,
904 uintptr_t base_va, size_t size, mmap_attr_t attr)
905{
Douglas Raillard35c09f12017-08-31 16:20:25 +0100906 mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
Sandrine Bailleux66342932017-07-18 13:26:36 +0100907 return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
908}
909
Antonio Nino Diazac998032017-02-27 17:23:54 +0000910/*
911 * Removes the region with given base Virtual Address and size from the given
912 * context.
913 *
914 * Returns:
915 * 0: Success.
916 * EINVAL: Invalid values were used as arguments (region not found).
917 * EPERM: Tried to remove a static region.
918 */
919int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
920 size_t size)
921{
922 mmap_region_t *mm = ctx->mmap;
923 mmap_region_t *mm_last = mm + ctx->mmap_num;
924 int update_max_va_needed = 0;
925 int update_max_pa_needed = 0;
926
927 /* Check sanity of mmap array. */
928 assert(mm[ctx->mmap_num].size == 0);
929
930 while (mm->size) {
931 if ((mm->base_va == base_va) && (mm->size == size))
932 break;
933 ++mm;
934 }
935
936 /* Check that the region was found */
937 if (mm->size == 0)
938 return -EINVAL;
939
940 /* If the region is static it can't be removed */
941 if (!(mm->attr & MT_DYNAMIC))
942 return -EPERM;
943
944 /* Check if this region is using the top VAs or PAs. */
945 if ((mm->base_va + mm->size - 1) == ctx->max_va)
946 update_max_va_needed = 1;
947 if ((mm->base_pa + mm->size - 1) == ctx->max_pa)
948 update_max_pa_needed = 1;
949
950 /* Update the translation tables if needed */
951 if (ctx->initialized) {
952 xlat_tables_unmap_region(ctx, mm, 0, ctx->base_table,
953 ctx->base_table_entries,
954 ctx->base_level);
955 xlat_arch_tlbi_va_sync();
956 }
957
958 /* Remove this region by moving the rest down by one place. */
959 memmove(mm, mm + 1, (uintptr_t)mm_last - (uintptr_t)mm);
960
961 /* Check if we need to update the max VAs and PAs */
962 if (update_max_va_needed) {
963 ctx->max_va = 0;
964 mm = ctx->mmap;
965 while (mm->size) {
966 if ((mm->base_va + mm->size - 1) > ctx->max_va)
967 ctx->max_va = mm->base_va + mm->size - 1;
968 ++mm;
969 }
970 }
971
972 if (update_max_pa_needed) {
973 ctx->max_pa = 0;
974 mm = ctx->mmap;
975 while (mm->size) {
976 if ((mm->base_pa + mm->size - 1) > ctx->max_pa)
977 ctx->max_pa = mm->base_pa + mm->size - 1;
978 ++mm;
979 }
980 }
981
982 return 0;
983}
984
Sandrine Bailleux66342932017-07-18 13:26:36 +0100985int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
986{
987 return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
988 base_va, size);
989}
990
Antonio Nino Diazac998032017-02-27 17:23:54 +0000991#endif /* PLAT_XLAT_TABLES_DYNAMIC */
992
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000993#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
994
995/* Print the attributes of the specified block descriptor. */
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100996static void xlat_desc_print(uint64_t desc, uint64_t execute_never_mask)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000997{
998 int mem_type_index = ATTR_INDEX_GET(desc);
999
1000 if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
1001 tf_printf("MEM");
1002 } else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
1003 tf_printf("NC");
1004 } else {
1005 assert(mem_type_index == ATTR_DEVICE_INDEX);
1006 tf_printf("DEV");
1007 }
1008
1009 tf_printf(LOWER_ATTRS(AP_RO) & desc ? "-RO" : "-RW");
1010 tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
Antonio Nino Diazefabaa92017-04-27 13:30:22 +01001011 tf_printf(execute_never_mask & desc ? "-XN" : "-EXEC");
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001012}
1013
1014static const char * const level_spacers[] = {
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +00001015 "[LV0] ",
1016 " [LV1] ",
1017 " [LV2] ",
1018 " [LV3] "
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001019};
1020
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +00001021static const char *invalid_descriptors_ommited =
1022 "%s(%d invalid descriptors omitted)\n";
1023
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001024/*
1025 * Recursive function that reads the translation tables passed as an argument
1026 * and prints their status.
1027 */
1028static void xlat_tables_print_internal(const uintptr_t table_base_va,
1029 uint64_t *const table_base, const int table_entries,
Varun Wadekar8bf7f232017-06-16 14:15:34 -07001030 const unsigned int level, const uint64_t execute_never_mask)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001031{
1032 assert(level <= XLAT_TABLE_LEVEL_MAX);
1033
1034 uint64_t desc;
1035 uintptr_t table_idx_va = table_base_va;
1036 int table_idx = 0;
1037
1038 size_t level_size = XLAT_BLOCK_SIZE(level);
1039
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +00001040 /*
1041 * Keep track of how many invalid descriptors are counted in a row.
1042 * Whenever multiple invalid descriptors are found, only the first one
1043 * is printed, and a line is added to inform about how many descriptors
1044 * have been omitted.
1045 */
1046 int invalid_row_count = 0;
1047
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001048 while (table_idx < table_entries) {
1049
1050 desc = table_base[table_idx];
1051
1052 if ((desc & DESC_MASK) == INVALID_DESC) {
1053
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +00001054 if (invalid_row_count == 0) {
1055 tf_printf("%sVA:%p size:0x%zx\n",
1056 level_spacers[level],
1057 (void *)table_idx_va, level_size);
1058 }
1059 invalid_row_count++;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001060
1061 } else {
1062
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +00001063 if (invalid_row_count > 1) {
1064 tf_printf(invalid_descriptors_ommited,
1065 level_spacers[level],
1066 invalid_row_count - 1);
1067 }
1068 invalid_row_count = 0;
1069
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001070 /*
1071 * Check if this is a table or a block. Tables are only
1072 * allowed in levels other than 3, but DESC_PAGE has the
1073 * same value as DESC_TABLE, so we need to check.
1074 */
1075 if (((desc & DESC_MASK) == TABLE_DESC) &&
1076 (level < XLAT_TABLE_LEVEL_MAX)) {
1077 /*
1078 * Do not print any PA for a table descriptor,
1079 * as it doesn't directly map physical memory
1080 * but instead points to the next translation
1081 * table in the translation table walk.
1082 */
1083 tf_printf("%sVA:%p size:0x%zx\n",
1084 level_spacers[level],
1085 (void *)table_idx_va, level_size);
1086
1087 uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
1088
1089 xlat_tables_print_internal(table_idx_va,
1090 (uint64_t *)addr_inner,
Antonio Nino Diazefabaa92017-04-27 13:30:22 +01001091 XLAT_TABLE_ENTRIES, level+1,
1092 execute_never_mask);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001093 } else {
1094 tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
1095 level_spacers[level],
1096 (void *)table_idx_va,
1097 (unsigned long long)(desc & TABLE_ADDR_MASK),
1098 level_size);
Antonio Nino Diazefabaa92017-04-27 13:30:22 +01001099 xlat_desc_print(desc, execute_never_mask);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001100 tf_printf("\n");
1101 }
1102 }
1103
1104 table_idx++;
1105 table_idx_va += level_size;
1106 }
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +00001107
1108 if (invalid_row_count > 1) {
1109 tf_printf(invalid_descriptors_ommited,
1110 level_spacers[level], invalid_row_count - 1);
1111 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001112}
1113
1114#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
1115
1116void xlat_tables_print(xlat_ctx_t *ctx)
1117{
1118#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
Sandrine Bailleux4e8f1452017-05-26 15:47:08 +01001119 VERBOSE("Translation tables state:\n");
1120 VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address);
1121 VERBOSE(" Max allowed VA: %p\n", (void *) ctx->va_max_address);
1122 VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa);
1123 VERBOSE(" Max mapped VA: %p\n", (void *) ctx->max_va);
1124
1125 VERBOSE(" Initial lookup level: %i\n", ctx->base_level);
1126 VERBOSE(" Entries @initial lookup level: %i\n",
1127 ctx->base_table_entries);
1128
1129 int used_page_tables;
1130#if PLAT_XLAT_TABLES_DYNAMIC
1131 used_page_tables = 0;
Sandrine Bailleuxde6628d2017-08-01 09:16:38 +01001132 for (unsigned int i = 0; i < ctx->tables_num; ++i) {
Sandrine Bailleux4e8f1452017-05-26 15:47:08 +01001133 if (ctx->tables_mapped_regions[i] != 0)
1134 ++used_page_tables;
1135 }
1136#else
1137 used_page_tables = ctx->next_table;
1138#endif
1139 VERBOSE(" Used %i sub-tables out of %i (spare: %i)\n",
1140 used_page_tables, ctx->tables_num,
1141 ctx->tables_num - used_page_tables);
1142
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001143 xlat_tables_print_internal(0, ctx->base_table, ctx->base_table_entries,
Antonio Nino Diazefabaa92017-04-27 13:30:22 +01001144 ctx->base_level, ctx->execute_never_mask);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001145#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
1146}
1147
Sandrine Bailleux66342932017-07-18 13:26:36 +01001148void init_xlat_tables_ctx(xlat_ctx_t *ctx)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001149{
1150 mmap_region_t *mm = ctx->mmap;
1151
Sandrine Bailleux66342932017-07-18 13:26:36 +01001152 assert(!is_mmu_enabled());
1153 assert(!ctx->initialized);
1154
1155 print_mmap(mm);
1156
1157 ctx->execute_never_mask =
1158 xlat_arch_get_xn_desc(xlat_arch_current_el());
1159
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001160 /* All tables must be zeroed before mapping any region. */
1161
Varun Wadekar66231d12017-06-07 09:57:42 -07001162 for (unsigned int i = 0; i < ctx->base_table_entries; i++)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001163 ctx->base_table[i] = INVALID_DESC;
1164
Varun Wadekar66231d12017-06-07 09:57:42 -07001165 for (unsigned int j = 0; j < ctx->tables_num; j++) {
Antonio Nino Diazac998032017-02-27 17:23:54 +00001166#if PLAT_XLAT_TABLES_DYNAMIC
1167 ctx->tables_mapped_regions[j] = 0;
1168#endif
Varun Wadekar66231d12017-06-07 09:57:42 -07001169 for (unsigned int i = 0; i < XLAT_TABLE_ENTRIES; i++)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001170 ctx->tables[j][i] = INVALID_DESC;
1171 }
1172
Antonio Nino Diazac998032017-02-27 17:23:54 +00001173 while (mm->size) {
1174 uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0, ctx->base_table,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001175 ctx->base_table_entries, ctx->base_level);
1176
Antonio Nino Diazac998032017-02-27 17:23:54 +00001177 if (end_va != mm->base_va + mm->size - 1) {
1178 ERROR("Not enough memory to map region:\n"
1179 " VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
1180 (void *)mm->base_va, mm->base_pa, mm->size, mm->attr);
1181 panic();
1182 }
1183
1184 mm++;
1185 }
1186
Sandrine Bailleux46c53a22017-07-11 15:11:10 +01001187 assert(ctx->pa_max_address <= xlat_arch_get_max_supported_pa());
Sandrine Bailleux66342932017-07-18 13:26:36 +01001188 assert(ctx->max_va <= ctx->va_max_address);
1189 assert(ctx->max_pa <= ctx->pa_max_address);
1190
Sandrine Bailleuxc5b63772017-05-31 13:31:48 +01001191 ctx->initialized = 1;
1192
1193 xlat_tables_print(ctx);
Sandrine Bailleux66342932017-07-18 13:26:36 +01001194}
1195
1196void init_xlat_tables(void)
1197{
1198 init_xlat_tables_ctx(&tf_xlat_ctx);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001199}
Sandrine Bailleux66342932017-07-18 13:26:36 +01001200
Sandrine Bailleuxc5b63772017-05-31 13:31:48 +01001201/*
1202 * If dynamic allocation of new regions is disabled then by the time we call the
1203 * function enabling the MMU, we'll have registered all the memory regions to
1204 * map for the system's lifetime. Therefore, at this point we know the maximum
1205 * physical address that will ever be mapped.
1206 *
1207 * If dynamic allocation is enabled then we can't make any such assumption
1208 * because the maximum physical address could get pushed while adding a new
1209 * region. Therefore, in this case we have to assume that the whole address
1210 * space size might be mapped.
1211 */
1212#ifdef PLAT_XLAT_TABLES_DYNAMIC
Sandrine Bailleux46c53a22017-07-11 15:11:10 +01001213#define MAX_PHYS_ADDR tf_xlat_ctx.pa_max_address
Sandrine Bailleuxc5b63772017-05-31 13:31:48 +01001214#else
1215#define MAX_PHYS_ADDR tf_xlat_ctx.max_pa
1216#endif
1217
Sandrine Bailleux66342932017-07-18 13:26:36 +01001218#ifdef AARCH32
1219
1220void enable_mmu_secure(unsigned int flags)
1221{
Sandrine Bailleux46c53a22017-07-11 15:11:10 +01001222 enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
1223 tf_xlat_ctx.va_max_address);
Sandrine Bailleux66342932017-07-18 13:26:36 +01001224}
1225
1226#else
1227
1228void enable_mmu_el1(unsigned int flags)
1229{
Sandrine Bailleux46c53a22017-07-11 15:11:10 +01001230 enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
1231 tf_xlat_ctx.va_max_address);
Sandrine Bailleux66342932017-07-18 13:26:36 +01001232}
1233
1234void enable_mmu_el3(unsigned int flags)
1235{
Sandrine Bailleux46c53a22017-07-11 15:11:10 +01001236 enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
1237 tf_xlat_ctx.va_max_address);
Sandrine Bailleux66342932017-07-18 13:26:36 +01001238}
1239
1240#endif /* AARCH32 */