blob: fd648137120be8babd59d15440c01e26a42dad2d [file] [log] [blame]
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00005 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
10#include <cassert.h>
11#include <common_def.h>
12#include <debug.h>
13#include <errno.h>
14#include <platform_def.h>
15#include <string.h>
16#include <types.h>
17#include <utils.h>
18#include <xlat_tables_v2.h>
19#ifdef AARCH32
20# include "aarch32/xlat_tables_arch.h"
21#else
22# include "aarch64/xlat_tables_arch.h"
23#endif
24#include "xlat_tables_private.h"
25
Antonio Nino Diazac998032017-02-27 17:23:54 +000026#if PLAT_XLAT_TABLES_DYNAMIC
27
28/*
29 * The following functions assume that they will be called using subtables only.
30 * The base table can't be unmapped, so it is not needed to do any special
31 * handling for it.
32 */
33
34/*
35 * Returns the index of the array corresponding to the specified translation
36 * table.
37 */
38static int xlat_table_get_index(xlat_ctx_t *ctx, const uint64_t *table)
39{
40 for (int i = 0; i < ctx->tables_num; i++)
41 if (ctx->tables[i] == table)
42 return i;
43
44 /*
45 * Maybe we were asked to get the index of the base level table, which
46 * should never happen.
47 */
48 assert(0);
49
50 return -1;
51}
52
53/* Returns a pointer to an empty translation table. */
54static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
55{
56 for (int i = 0; i < ctx->tables_num; i++)
57 if (ctx->tables_mapped_regions[i] == 0)
58 return ctx->tables[i];
59
60 return NULL;
61}
62
63/* Increments region count for a given table. */
64static void xlat_table_inc_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
65{
66 ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]++;
67}
68
69/* Decrements region count for a given table. */
70static void xlat_table_dec_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
71{
72 ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]--;
73}
74
75/* Returns 0 if the speficied table isn't empty, otherwise 1. */
76static int xlat_table_is_empty(xlat_ctx_t *ctx, const uint64_t *table)
77{
78 return !ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)];
79}
80
81#else /* PLAT_XLAT_TABLES_DYNAMIC */
82
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000083/* Returns a pointer to the first empty translation table. */
84static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
85{
86 assert(ctx->next_table < ctx->tables_num);
87
88 return ctx->tables[ctx->next_table++];
89}
90
Antonio Nino Diazac998032017-02-27 17:23:54 +000091#endif /* PLAT_XLAT_TABLES_DYNAMIC */
92
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000093/* Returns a block/page table descriptor for the given level and attributes. */
Sandrine Bailleux04980a32017-04-19 14:02:23 +010094static uint64_t xlat_desc(mmap_attr_t attr, unsigned long long addr_pa,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000095 int level)
96{
97 uint64_t desc;
98 int mem_type;
99
100 /* Make sure that the granularity is fine enough to map this address. */
101 assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0);
102
103 desc = addr_pa;
104 /*
105 * There are different translation table descriptors for level 3 and the
106 * rest.
107 */
108 desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
109 /*
110 * Always set the access flag, as TF doesn't manage access flag faults.
111 * Deduce other fields of the descriptor based on the MT_NS and MT_RW
112 * memory region attributes.
113 */
114 desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
115 desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
116 desc |= LOWER_ATTRS(ACCESS_FLAG);
117
118 /*
119 * Deduce shareability domain and executability of the memory region
120 * from the memory type of the attributes (MT_TYPE).
121 *
122 * Data accesses to device memory and non-cacheable normal memory are
123 * coherent for all observers in the system, and correspondingly are
124 * always treated as being Outer Shareable. Therefore, for these 2 types
125 * of memory, it is not strictly needed to set the shareability field
126 * in the translation tables.
127 */
128 mem_type = MT_TYPE(attr);
129 if (mem_type == MT_DEVICE) {
130 desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
131 /*
132 * Always map device memory as execute-never.
133 * This is to avoid the possibility of a speculative instruction
134 * fetch, which could be an issue if this memory region
135 * corresponds to a read-sensitive peripheral.
136 */
137 desc |= UPPER_ATTRS(XN);
138 } else { /* Normal memory */
139 /*
140 * Always map read-write normal memory as execute-never.
141 * (Trusted Firmware doesn't self-modify its code, therefore
142 * R/W memory is reserved for data storage, which must not be
143 * executable.)
144 * Note that setting the XN bit here is for consistency only.
145 * The enable_mmu_elx() function sets the SCTLR_EL3.WXN bit,
146 * which makes any writable memory region to be treated as
147 * execute-never, regardless of the value of the XN bit in the
148 * translation table.
149 *
150 * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
151 * attribute to figure out the value of the XN bit.
152 */
153 if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER))
154 desc |= UPPER_ATTRS(XN);
155
156 if (mem_type == MT_MEMORY) {
157 desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
158 } else {
159 assert(mem_type == MT_NON_CACHEABLE);
160 desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
161 }
162 }
163
164 return desc;
165}
166
167/*
168 * Enumeration of actions that can be made when mapping table entries depending
169 * on the previous value in that entry and information about the region being
170 * mapped.
171 */
172typedef enum {
173
174 /* Do nothing */
175 ACTION_NONE,
176
177 /* Write a block (or page, if in level 3) entry. */
178 ACTION_WRITE_BLOCK_ENTRY,
179
180 /*
181 * Create a new table and write a table entry pointing to it. Recurse
182 * into it for further processing.
183 */
184 ACTION_CREATE_NEW_TABLE,
185
186 /*
187 * There is a table descriptor in this entry, read it and recurse into
188 * that table for further processing.
189 */
190 ACTION_RECURSE_INTO_TABLE,
191
192} action_t;
193
Antonio Nino Diazac998032017-02-27 17:23:54 +0000194#if PLAT_XLAT_TABLES_DYNAMIC
195
196/*
197 * Recursive function that writes to the translation tables and unmaps the
198 * specified region.
199 */
200static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
201 const uintptr_t table_base_va,
202 uint64_t *const table_base,
203 const int table_entries,
204 const int level)
205{
206 assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
207
208 uint64_t *subtable;
209 uint64_t desc;
210
211 uintptr_t table_idx_va;
212 uintptr_t table_idx_end_va; /* End VA of this entry */
213
214 uintptr_t region_end_va = mm->base_va + mm->size - 1;
215
216 int table_idx;
217
218 if (mm->base_va > table_base_va) {
219 /* Find the first index of the table affected by the region. */
220 table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
221
222 table_idx = (table_idx_va - table_base_va) >>
223 XLAT_ADDR_SHIFT(level);
224
225 assert(table_idx < table_entries);
226 } else {
227 /* Start from the beginning of the table. */
228 table_idx_va = table_base_va;
229 table_idx = 0;
230 }
231
232 while (table_idx < table_entries) {
233
234 table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1;
235
236 desc = table_base[table_idx];
237 uint64_t desc_type = desc & DESC_MASK;
238
239 action_t action = ACTION_NONE;
240
241 if ((mm->base_va <= table_idx_va) &&
242 (region_end_va >= table_idx_end_va)) {
243
244 /* Region covers all block */
245
246 if (level == 3) {
247 /*
248 * Last level, only page descriptors allowed,
249 * erase it.
250 */
251 assert(desc_type == PAGE_DESC);
252
253 action = ACTION_WRITE_BLOCK_ENTRY;
254 } else {
255 /*
256 * Other levels can have table descriptors. If
257 * so, recurse into it and erase descriptors
258 * inside it as needed. If there is a block
259 * descriptor, just erase it. If an invalid
260 * descriptor is found, this table isn't
261 * actually mapped, which shouldn't happen.
262 */
263 if (desc_type == TABLE_DESC) {
264 action = ACTION_RECURSE_INTO_TABLE;
265 } else {
266 assert(desc_type == BLOCK_DESC);
267 action = ACTION_WRITE_BLOCK_ENTRY;
268 }
269 }
270
271 } else if ((mm->base_va <= table_idx_end_va) ||
272 (region_end_va >= table_idx_va)) {
273
274 /*
275 * Region partially covers block.
276 *
277 * It can't happen in level 3.
278 *
279 * There must be a table descriptor here, if not there
280 * was a problem when mapping the region.
281 */
282
283 assert(level < 3);
284
285 assert(desc_type == TABLE_DESC);
286
287 action = ACTION_RECURSE_INTO_TABLE;
288 }
289
290 if (action == ACTION_WRITE_BLOCK_ENTRY) {
291
292 table_base[table_idx] = INVALID_DESC;
293 xlat_arch_tlbi_va(table_idx_va);
294
295 } else if (action == ACTION_RECURSE_INTO_TABLE) {
296
297 subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
298
299 /* Recurse to write into subtable */
300 xlat_tables_unmap_region(ctx, mm, table_idx_va,
301 subtable, XLAT_TABLE_ENTRIES,
302 level + 1);
303
304 /*
305 * If the subtable is now empty, remove its reference.
306 */
307 if (xlat_table_is_empty(ctx, subtable)) {
308 table_base[table_idx] = INVALID_DESC;
309 xlat_arch_tlbi_va(table_idx_va);
310 }
311
312 } else {
313 assert(action == ACTION_NONE);
314 }
315
316 table_idx++;
317 table_idx_va += XLAT_BLOCK_SIZE(level);
318
319 /* If reached the end of the region, exit */
320 if (region_end_va <= table_idx_va)
321 break;
322 }
323
324 if (level > ctx->base_level)
325 xlat_table_dec_regions_count(ctx, table_base);
326}
327
328#endif /* PLAT_XLAT_TABLES_DYNAMIC */
329
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000330/*
331 * From the given arguments, it decides which action to take when mapping the
332 * specified region.
333 */
334static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
335 const int desc_type, const unsigned long long dest_pa,
336 const uintptr_t table_entry_base_va, const int level)
337{
338 uintptr_t mm_end_va = mm->base_va + mm->size - 1;
339 uintptr_t table_entry_end_va =
340 table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1;
341
342 /*
343 * The descriptor types allowed depend on the current table level.
344 */
345
346 if ((mm->base_va <= table_entry_base_va) &&
347 (mm_end_va >= table_entry_end_va)) {
348
349 /*
350 * Table entry is covered by region
351 * --------------------------------
352 *
353 * This means that this table entry can describe the whole
354 * translation with this granularity in principle.
355 */
356
357 if (level == 3) {
358 /*
359 * Last level, only page descriptors are allowed.
360 */
361 if (desc_type == PAGE_DESC) {
362 /*
363 * There's another region mapped here, don't
364 * overwrite.
365 */
366 return ACTION_NONE;
367 } else {
368 assert(desc_type == INVALID_DESC);
369 return ACTION_WRITE_BLOCK_ENTRY;
370 }
371
372 } else {
373
374 /*
375 * Other levels. Table descriptors are allowed. Block
376 * descriptors too, but they have some limitations.
377 */
378
379 if (desc_type == TABLE_DESC) {
380 /* There's already a table, recurse into it. */
381 return ACTION_RECURSE_INTO_TABLE;
382
383 } else if (desc_type == INVALID_DESC) {
384 /*
385 * There's nothing mapped here, create a new
386 * entry.
387 *
388 * Check if the destination granularity allows
389 * us to use a block descriptor or we need a
390 * finer table for it.
391 *
392 * Also, check if the current level allows block
393 * descriptors. If not, create a table instead.
394 */
395 if ((dest_pa & XLAT_BLOCK_MASK(level)) ||
396 (level < MIN_LVL_BLOCK_DESC))
397 return ACTION_CREATE_NEW_TABLE;
398 else
399 return ACTION_WRITE_BLOCK_ENTRY;
400
401 } else {
402 /*
403 * There's another region mapped here, don't
404 * overwrite.
405 */
406 assert(desc_type == BLOCK_DESC);
407
408 return ACTION_NONE;
409 }
410 }
411
412 } else if ((mm->base_va <= table_entry_end_va) ||
413 (mm_end_va >= table_entry_base_va)) {
414
415 /*
416 * Region partially covers table entry
417 * -----------------------------------
418 *
419 * This means that this table entry can't describe the whole
420 * translation, a finer table is needed.
421
422 * There cannot be partial block overlaps in level 3. If that
423 * happens, some of the preliminary checks when adding the
424 * mmap region failed to detect that PA and VA must at least be
425 * aligned to PAGE_SIZE.
426 */
427 assert(level < 3);
428
429 if (desc_type == INVALID_DESC) {
430 /*
431 * The block is not fully covered by the region. Create
432 * a new table, recurse into it and try to map the
433 * region with finer granularity.
434 */
435 return ACTION_CREATE_NEW_TABLE;
436
437 } else {
438 assert(desc_type == TABLE_DESC);
439 /*
440 * The block is not fully covered by the region, but
441 * there is already a table here. Recurse into it and
442 * try to map with finer granularity.
443 *
444 * PAGE_DESC for level 3 has the same value as
445 * TABLE_DESC, but this code can't run on a level 3
446 * table because there can't be overlaps in level 3.
447 */
448 return ACTION_RECURSE_INTO_TABLE;
449 }
450 }
451
452 /*
453 * This table entry is outside of the region specified in the arguments,
454 * don't write anything to it.
455 */
456 return ACTION_NONE;
457}
458
459/*
460 * Recursive function that writes to the translation tables and maps the
Antonio Nino Diazac998032017-02-27 17:23:54 +0000461 * specified region. On success, it returns the VA of the last byte that was
462 * succesfully mapped. On error, it returns the VA of the next entry that
463 * should have been mapped.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000464 */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000465static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000466 const uintptr_t table_base_va,
467 uint64_t *const table_base,
468 const int table_entries,
469 const int level)
470{
471 assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
472
473 uintptr_t mm_end_va = mm->base_va + mm->size - 1;
474
475 uintptr_t table_idx_va;
476 unsigned long long table_idx_pa;
477
478 uint64_t *subtable;
479 uint64_t desc;
480
481 int table_idx;
482
483 if (mm->base_va > table_base_va) {
484 /* Find the first index of the table affected by the region. */
485 table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
486
487 table_idx = (table_idx_va - table_base_va) >>
488 XLAT_ADDR_SHIFT(level);
489
490 assert(table_idx < table_entries);
491 } else {
492 /* Start from the beginning of the table. */
493 table_idx_va = table_base_va;
494 table_idx = 0;
495 }
496
Antonio Nino Diazac998032017-02-27 17:23:54 +0000497#if PLAT_XLAT_TABLES_DYNAMIC
498 if (level > ctx->base_level)
499 xlat_table_inc_regions_count(ctx, table_base);
500#endif
501
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000502 while (table_idx < table_entries) {
503
504 desc = table_base[table_idx];
505
506 table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
507
508 action_t action = xlat_tables_map_region_action(mm,
509 desc & DESC_MASK, table_idx_pa, table_idx_va, level);
510
511 if (action == ACTION_WRITE_BLOCK_ENTRY) {
512
513 table_base[table_idx] =
514 xlat_desc(mm->attr, table_idx_pa, level);
515
516 } else if (action == ACTION_CREATE_NEW_TABLE) {
517
518 subtable = xlat_table_get_empty(ctx);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000519 if (subtable == NULL) {
520 /* Not enough free tables to map this region */
521 return table_idx_va;
522 }
523
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000524 /* Point to new subtable from this one. */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000525 table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
526
527 /* Recurse to write into subtable */
528 uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
529 subtable, XLAT_TABLE_ENTRIES,
530 level + 1);
531 if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
532 return end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000533
534 } else if (action == ACTION_RECURSE_INTO_TABLE) {
535
536 subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
537 /* Recurse to write into subtable */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000538 uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
539 subtable, XLAT_TABLE_ENTRIES,
540 level + 1);
541 if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
542 return end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000543
544 } else {
545
546 assert(action == ACTION_NONE);
547
548 }
549
550 table_idx++;
551 table_idx_va += XLAT_BLOCK_SIZE(level);
552
553 /* If reached the end of the region, exit */
554 if (mm_end_va <= table_idx_va)
555 break;
556 }
Antonio Nino Diazac998032017-02-27 17:23:54 +0000557
558 return table_idx_va - 1;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000559}
560
561void print_mmap(mmap_region_t *const mmap)
562{
563#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
564 tf_printf("mmap:\n");
565 mmap_region_t *mm = mmap;
566
567 while (mm->size) {
568 tf_printf(" VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
569 (void *)mm->base_va, mm->base_pa,
570 mm->size, mm->attr);
571 ++mm;
572 };
573 tf_printf("\n");
574#endif
575}
576
577/*
578 * Function that verifies that a region can be mapped.
579 * Returns:
580 * 0: Success, the mapping is allowed.
581 * EINVAL: Invalid values were used as arguments.
582 * ERANGE: The memory limits were surpassed.
583 * ENOMEM: There is not enough memory in the mmap array.
584 * EPERM: Region overlaps another one in an invalid way.
585 */
586static int mmap_add_region_check(xlat_ctx_t *ctx, unsigned long long base_pa,
587 uintptr_t base_va, size_t size,
Sandrine Bailleux04980a32017-04-19 14:02:23 +0100588 mmap_attr_t attr)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000589{
590 mmap_region_t *mm = ctx->mmap;
591 unsigned long long end_pa = base_pa + size - 1;
592 uintptr_t end_va = base_va + size - 1;
593
594 if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
595 !IS_PAGE_ALIGNED(size))
596 return -EINVAL;
597
598 /* Check for overflows */
599 if ((base_pa > end_pa) || (base_va > end_va))
600 return -ERANGE;
601
602 if ((base_va + (uintptr_t)size - (uintptr_t)1) > ctx->va_max_address)
603 return -ERANGE;
604
605 if ((base_pa + (unsigned long long)size - 1ULL) > ctx->pa_max_address)
606 return -ERANGE;
607
608 /* Check that there is space in the mmap array */
609 if (ctx->mmap[ctx->mmap_num - 1].size != 0)
610 return -ENOMEM;
611
612 /* Check for PAs and VAs overlaps with all other regions */
613 for (mm = ctx->mmap; mm->size; ++mm) {
614
615 uintptr_t mm_end_va = mm->base_va + mm->size - 1;
616
617 /*
618 * Check if one of the regions is completely inside the other
619 * one.
620 */
621 int fully_overlapped_va =
622 ((base_va >= mm->base_va) && (end_va <= mm_end_va)) ||
623 ((mm->base_va >= base_va) && (mm_end_va <= end_va));
624
625 /*
626 * Full VA overlaps are only allowed if both regions are
627 * identity mapped (zero offset) or have the same VA to PA
628 * offset. Also, make sure that it's not the exact same area.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000629 * This can only be done with static regions.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000630 */
631 if (fully_overlapped_va) {
632
Antonio Nino Diazac998032017-02-27 17:23:54 +0000633#if PLAT_XLAT_TABLES_DYNAMIC
634 if ((attr & MT_DYNAMIC) || (mm->attr & MT_DYNAMIC))
635 return -EPERM;
636#endif /* PLAT_XLAT_TABLES_DYNAMIC */
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000637 if ((mm->base_va - mm->base_pa) != (base_va - base_pa))
638 return -EPERM;
639
640 if ((base_va == mm->base_va) && (size == mm->size))
641 return -EPERM;
642
643 } else {
644 /*
645 * If the regions do not have fully overlapping VAs,
646 * then they must have fully separated VAs and PAs.
647 * Partial overlaps are not allowed
648 */
649
650 unsigned long long mm_end_pa =
651 mm->base_pa + mm->size - 1;
652
653 int separated_pa =
654 (end_pa < mm->base_pa) || (base_pa > mm_end_pa);
655 int separated_va =
656 (end_va < mm->base_va) || (base_va > mm_end_va);
657
658 if (!(separated_va && separated_pa))
659 return -EPERM;
660 }
661 }
662
663 return 0;
664}
665
666void mmap_add_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
667{
668 mmap_region_t *mm_cursor = ctx->mmap;
669 mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
670 unsigned long long end_pa = mm->base_pa + mm->size - 1;
671 uintptr_t end_va = mm->base_va + mm->size - 1;
672 int ret;
673
674 /* Ignore empty regions */
675 if (!mm->size)
676 return;
677
Antonio Nino Diazac998032017-02-27 17:23:54 +0000678 /* Static regions must be added before initializing the xlat tables. */
679 assert(!ctx->initialized);
680
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000681 ret = mmap_add_region_check(ctx, mm->base_pa, mm->base_va, mm->size,
682 mm->attr);
683 if (ret != 0) {
684 ERROR("mmap_add_region_check() failed. error %d\n", ret);
685 assert(0);
686 return;
687 }
688
689 /*
690 * Find correct place in mmap to insert new region.
691 *
692 * 1 - Lower region VA end first.
693 * 2 - Smaller region size first.
694 *
695 * VA 0 0xFF
696 *
697 * 1st |------|
698 * 2nd |------------|
699 * 3rd |------|
700 * 4th |---|
701 * 5th |---|
702 * 6th |----------|
703 * 7th |-------------------------------------|
704 *
705 * This is required for overlapping regions only. It simplifies adding
706 * regions with the loop in xlat_tables_init_internal because the outer
707 * ones won't overwrite block or page descriptors of regions added
708 * previously.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000709 *
710 * Overlapping is only allowed for static regions.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000711 */
712
713 while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va
714 && mm_cursor->size)
715 ++mm_cursor;
716
717 while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va)
718 && (mm_cursor->size < mm->size))
719 ++mm_cursor;
720
721 /* Make room for new region by moving other regions up by one place */
722 memmove(mm_cursor + 1, mm_cursor,
723 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
724
725 /*
726 * Check we haven't lost the empty sentinel from the end of the array.
727 * This shouldn't happen as we have checked in mmap_add_region_check
728 * that there is free space.
729 */
730 assert(mm_last->size == 0);
731
732 mm_cursor->base_pa = mm->base_pa;
733 mm_cursor->base_va = mm->base_va;
734 mm_cursor->size = mm->size;
735 mm_cursor->attr = mm->attr;
736
737 if (end_pa > ctx->max_pa)
738 ctx->max_pa = end_pa;
739 if (end_va > ctx->max_va)
740 ctx->max_va = end_va;
741}
742
Antonio Nino Diazac998032017-02-27 17:23:54 +0000743#if PLAT_XLAT_TABLES_DYNAMIC
744
745int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
746{
747 mmap_region_t *mm_cursor = ctx->mmap;
748 mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
749 unsigned long long end_pa = mm->base_pa + mm->size - 1;
750 uintptr_t end_va = mm->base_va + mm->size - 1;
751 int ret;
752
753 /* Nothing to do */
754 if (!mm->size)
755 return 0;
756
757 ret = mmap_add_region_check(ctx, mm->base_pa, mm->base_va, mm->size, mm->attr | MT_DYNAMIC);
758 if (ret != 0)
759 return ret;
760
761 /*
762 * Find the adequate entry in the mmap array in the same way done for
763 * static regions in mmap_add_region_ctx().
764 */
765
766 while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va && mm_cursor->size)
767 ++mm_cursor;
768
769 while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va) && (mm_cursor->size < mm->size))
770 ++mm_cursor;
771
772 /* Make room for new region by moving other regions up by one place */
773 memmove(mm_cursor + 1, mm_cursor, (uintptr_t)mm_last - (uintptr_t)mm_cursor);
774
775 /*
776 * Check we haven't lost the empty sentinal from the end of the array.
777 * This shouldn't happen as we have checked in mmap_add_region_check
778 * that there is free space.
779 */
780 assert(mm_last->size == 0);
781
782 mm_cursor->base_pa = mm->base_pa;
783 mm_cursor->base_va = mm->base_va;
784 mm_cursor->size = mm->size;
785 mm_cursor->attr = mm->attr | MT_DYNAMIC;
786
787 /*
788 * Update the translation tables if the xlat tables are initialized. If
789 * not, this region will be mapped when they are initialized.
790 */
791 if (ctx->initialized) {
792 uintptr_t end_va = xlat_tables_map_region(ctx, mm_cursor, 0, ctx->base_table,
793 ctx->base_table_entries, ctx->base_level);
794
795 /* Failed to map, remove mmap entry, unmap and return error. */
796 if (end_va != mm_cursor->base_va + mm_cursor->size - 1) {
797 memmove(mm_cursor, mm_cursor + 1, (uintptr_t)mm_last - (uintptr_t)mm_cursor);
798
799 /*
800 * Check if the mapping function actually managed to map
801 * anything. If not, just return now.
802 */
803 if (mm_cursor->base_va >= end_va)
804 return -ENOMEM;
805
806 /*
807 * Something went wrong after mapping some table entries,
808 * undo every change done up to this point.
809 */
810 mmap_region_t unmap_mm = {
811 .base_pa = 0,
812 .base_va = mm->base_va,
813 .size = end_va - mm->base_va,
814 .attr = 0
815 };
816 xlat_tables_unmap_region(ctx, &unmap_mm, 0, ctx->base_table,
817 ctx->base_table_entries, ctx->base_level);
818
819 return -ENOMEM;
820 }
821
822 /*
823 * Make sure that all entries are written to the memory. There
824 * is no need to invalidate entries when mapping dynamic regions
825 * because new table/block/page descriptors only replace old
826 * invalid descriptors, that aren't TLB cached.
827 */
828 dsbishst();
829 }
830
831 if (end_pa > ctx->max_pa)
832 ctx->max_pa = end_pa;
833 if (end_va > ctx->max_va)
834 ctx->max_va = end_va;
835
836 return 0;
837}
838
839/*
840 * Removes the region with given base Virtual Address and size from the given
841 * context.
842 *
843 * Returns:
844 * 0: Success.
845 * EINVAL: Invalid values were used as arguments (region not found).
846 * EPERM: Tried to remove a static region.
847 */
848int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
849 size_t size)
850{
851 mmap_region_t *mm = ctx->mmap;
852 mmap_region_t *mm_last = mm + ctx->mmap_num;
853 int update_max_va_needed = 0;
854 int update_max_pa_needed = 0;
855
856 /* Check sanity of mmap array. */
857 assert(mm[ctx->mmap_num].size == 0);
858
859 while (mm->size) {
860 if ((mm->base_va == base_va) && (mm->size == size))
861 break;
862 ++mm;
863 }
864
865 /* Check that the region was found */
866 if (mm->size == 0)
867 return -EINVAL;
868
869 /* If the region is static it can't be removed */
870 if (!(mm->attr & MT_DYNAMIC))
871 return -EPERM;
872
873 /* Check if this region is using the top VAs or PAs. */
874 if ((mm->base_va + mm->size - 1) == ctx->max_va)
875 update_max_va_needed = 1;
876 if ((mm->base_pa + mm->size - 1) == ctx->max_pa)
877 update_max_pa_needed = 1;
878
879 /* Update the translation tables if needed */
880 if (ctx->initialized) {
881 xlat_tables_unmap_region(ctx, mm, 0, ctx->base_table,
882 ctx->base_table_entries,
883 ctx->base_level);
884 xlat_arch_tlbi_va_sync();
885 }
886
887 /* Remove this region by moving the rest down by one place. */
888 memmove(mm, mm + 1, (uintptr_t)mm_last - (uintptr_t)mm);
889
890 /* Check if we need to update the max VAs and PAs */
891 if (update_max_va_needed) {
892 ctx->max_va = 0;
893 mm = ctx->mmap;
894 while (mm->size) {
895 if ((mm->base_va + mm->size - 1) > ctx->max_va)
896 ctx->max_va = mm->base_va + mm->size - 1;
897 ++mm;
898 }
899 }
900
901 if (update_max_pa_needed) {
902 ctx->max_pa = 0;
903 mm = ctx->mmap;
904 while (mm->size) {
905 if ((mm->base_pa + mm->size - 1) > ctx->max_pa)
906 ctx->max_pa = mm->base_pa + mm->size - 1;
907 ++mm;
908 }
909 }
910
911 return 0;
912}
913
914#endif /* PLAT_XLAT_TABLES_DYNAMIC */
915
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000916#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
917
918/* Print the attributes of the specified block descriptor. */
919static void xlat_desc_print(uint64_t desc)
920{
921 int mem_type_index = ATTR_INDEX_GET(desc);
922
923 if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
924 tf_printf("MEM");
925 } else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
926 tf_printf("NC");
927 } else {
928 assert(mem_type_index == ATTR_DEVICE_INDEX);
929 tf_printf("DEV");
930 }
931
932 tf_printf(LOWER_ATTRS(AP_RO) & desc ? "-RO" : "-RW");
933 tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
934 tf_printf(UPPER_ATTRS(XN) & desc ? "-XN" : "-EXEC");
935}
936
937static const char * const level_spacers[] = {
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +0000938 "[LV0] ",
939 " [LV1] ",
940 " [LV2] ",
941 " [LV3] "
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000942};
943
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +0000944static const char *invalid_descriptors_ommited =
945 "%s(%d invalid descriptors omitted)\n";
946
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000947/*
948 * Recursive function that reads the translation tables passed as an argument
949 * and prints their status.
950 */
951static void xlat_tables_print_internal(const uintptr_t table_base_va,
952 uint64_t *const table_base, const int table_entries,
953 const int level)
954{
955 assert(level <= XLAT_TABLE_LEVEL_MAX);
956
957 uint64_t desc;
958 uintptr_t table_idx_va = table_base_va;
959 int table_idx = 0;
960
961 size_t level_size = XLAT_BLOCK_SIZE(level);
962
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +0000963 /*
964 * Keep track of how many invalid descriptors are counted in a row.
965 * Whenever multiple invalid descriptors are found, only the first one
966 * is printed, and a line is added to inform about how many descriptors
967 * have been omitted.
968 */
969 int invalid_row_count = 0;
970
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000971 while (table_idx < table_entries) {
972
973 desc = table_base[table_idx];
974
975 if ((desc & DESC_MASK) == INVALID_DESC) {
976
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +0000977 if (invalid_row_count == 0) {
978 tf_printf("%sVA:%p size:0x%zx\n",
979 level_spacers[level],
980 (void *)table_idx_va, level_size);
981 }
982 invalid_row_count++;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000983
984 } else {
985
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +0000986 if (invalid_row_count > 1) {
987 tf_printf(invalid_descriptors_ommited,
988 level_spacers[level],
989 invalid_row_count - 1);
990 }
991 invalid_row_count = 0;
992
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000993 /*
994 * Check if this is a table or a block. Tables are only
995 * allowed in levels other than 3, but DESC_PAGE has the
996 * same value as DESC_TABLE, so we need to check.
997 */
998 if (((desc & DESC_MASK) == TABLE_DESC) &&
999 (level < XLAT_TABLE_LEVEL_MAX)) {
1000 /*
1001 * Do not print any PA for a table descriptor,
1002 * as it doesn't directly map physical memory
1003 * but instead points to the next translation
1004 * table in the translation table walk.
1005 */
1006 tf_printf("%sVA:%p size:0x%zx\n",
1007 level_spacers[level],
1008 (void *)table_idx_va, level_size);
1009
1010 uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
1011
1012 xlat_tables_print_internal(table_idx_va,
1013 (uint64_t *)addr_inner,
1014 XLAT_TABLE_ENTRIES, level+1);
1015 } else {
1016 tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
1017 level_spacers[level],
1018 (void *)table_idx_va,
1019 (unsigned long long)(desc & TABLE_ADDR_MASK),
1020 level_size);
1021 xlat_desc_print(desc);
1022 tf_printf("\n");
1023 }
1024 }
1025
1026 table_idx++;
1027 table_idx_va += level_size;
1028 }
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +00001029
1030 if (invalid_row_count > 1) {
1031 tf_printf(invalid_descriptors_ommited,
1032 level_spacers[level], invalid_row_count - 1);
1033 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001034}
1035
1036#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
1037
1038void xlat_tables_print(xlat_ctx_t *ctx)
1039{
1040#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
1041 xlat_tables_print_internal(0, ctx->base_table, ctx->base_table_entries,
1042 ctx->base_level);
1043#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
1044}
1045
1046void init_xlation_table(xlat_ctx_t *ctx)
1047{
1048 mmap_region_t *mm = ctx->mmap;
1049
1050 /* All tables must be zeroed before mapping any region. */
1051
1052 for (int i = 0; i < ctx->base_table_entries; i++)
1053 ctx->base_table[i] = INVALID_DESC;
1054
1055 for (int j = 0; j < ctx->tables_num; j++) {
Antonio Nino Diazac998032017-02-27 17:23:54 +00001056#if PLAT_XLAT_TABLES_DYNAMIC
1057 ctx->tables_mapped_regions[j] = 0;
1058#endif
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001059 for (int i = 0; i < XLAT_TABLE_ENTRIES; i++)
1060 ctx->tables[j][i] = INVALID_DESC;
1061 }
1062
Antonio Nino Diazac998032017-02-27 17:23:54 +00001063 while (mm->size) {
1064 uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0, ctx->base_table,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001065 ctx->base_table_entries, ctx->base_level);
1066
Antonio Nino Diazac998032017-02-27 17:23:54 +00001067 if (end_va != mm->base_va + mm->size - 1) {
1068 ERROR("Not enough memory to map region:\n"
1069 " VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
1070 (void *)mm->base_va, mm->base_pa, mm->size, mm->attr);
1071 panic();
1072 }
1073
1074 mm++;
1075 }
1076
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001077 ctx->initialized = 1;
1078}