blob: 2d556e65df8d926a067f9c05a703f00786ff305a [file] [log] [blame]
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00005 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
10#include <cassert.h>
11#include <common_def.h>
12#include <debug.h>
13#include <errno.h>
14#include <platform_def.h>
15#include <string.h>
16#include <types.h>
17#include <utils.h>
18#include <xlat_tables_v2.h>
19#ifdef AARCH32
20# include "aarch32/xlat_tables_arch.h"
21#else
22# include "aarch64/xlat_tables_arch.h"
23#endif
24#include "xlat_tables_private.h"
25
Antonio Nino Diazac998032017-02-27 17:23:54 +000026#if PLAT_XLAT_TABLES_DYNAMIC
27
28/*
29 * The following functions assume that they will be called using subtables only.
30 * The base table can't be unmapped, so it is not needed to do any special
31 * handling for it.
32 */
33
34/*
35 * Returns the index of the array corresponding to the specified translation
36 * table.
37 */
38static int xlat_table_get_index(xlat_ctx_t *ctx, const uint64_t *table)
39{
40 for (int i = 0; i < ctx->tables_num; i++)
41 if (ctx->tables[i] == table)
42 return i;
43
44 /*
45 * Maybe we were asked to get the index of the base level table, which
46 * should never happen.
47 */
48 assert(0);
49
50 return -1;
51}
52
53/* Returns a pointer to an empty translation table. */
54static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
55{
56 for (int i = 0; i < ctx->tables_num; i++)
57 if (ctx->tables_mapped_regions[i] == 0)
58 return ctx->tables[i];
59
60 return NULL;
61}
62
63/* Increments region count for a given table. */
64static void xlat_table_inc_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
65{
66 ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]++;
67}
68
69/* Decrements region count for a given table. */
70static void xlat_table_dec_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
71{
72 ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]--;
73}
74
75/* Returns 0 if the speficied table isn't empty, otherwise 1. */
76static int xlat_table_is_empty(xlat_ctx_t *ctx, const uint64_t *table)
77{
78 return !ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)];
79}
80
81#else /* PLAT_XLAT_TABLES_DYNAMIC */
82
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000083/* Returns a pointer to the first empty translation table. */
84static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
85{
86 assert(ctx->next_table < ctx->tables_num);
87
88 return ctx->tables[ctx->next_table++];
89}
90
Antonio Nino Diazac998032017-02-27 17:23:54 +000091#endif /* PLAT_XLAT_TABLES_DYNAMIC */
92
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000093/* Returns a block/page table descriptor for the given level and attributes. */
Sandrine Bailleux04980a32017-04-19 14:02:23 +010094static uint64_t xlat_desc(mmap_attr_t attr, unsigned long long addr_pa,
Antonio Nino Diazefabaa92017-04-27 13:30:22 +010095 int level, uint64_t execute_never_mask)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000096{
97 uint64_t desc;
98 int mem_type;
99
100 /* Make sure that the granularity is fine enough to map this address. */
101 assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0);
102
103 desc = addr_pa;
104 /*
105 * There are different translation table descriptors for level 3 and the
106 * rest.
107 */
108 desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
109 /*
110 * Always set the access flag, as TF doesn't manage access flag faults.
111 * Deduce other fields of the descriptor based on the MT_NS and MT_RW
112 * memory region attributes.
113 */
114 desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
115 desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
116 desc |= LOWER_ATTRS(ACCESS_FLAG);
117
118 /*
119 * Deduce shareability domain and executability of the memory region
120 * from the memory type of the attributes (MT_TYPE).
121 *
122 * Data accesses to device memory and non-cacheable normal memory are
123 * coherent for all observers in the system, and correspondingly are
124 * always treated as being Outer Shareable. Therefore, for these 2 types
125 * of memory, it is not strictly needed to set the shareability field
126 * in the translation tables.
127 */
128 mem_type = MT_TYPE(attr);
129 if (mem_type == MT_DEVICE) {
130 desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
131 /*
132 * Always map device memory as execute-never.
133 * This is to avoid the possibility of a speculative instruction
134 * fetch, which could be an issue if this memory region
135 * corresponds to a read-sensitive peripheral.
136 */
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100137 desc |= execute_never_mask;
138
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000139 } else { /* Normal memory */
140 /*
141 * Always map read-write normal memory as execute-never.
142 * (Trusted Firmware doesn't self-modify its code, therefore
143 * R/W memory is reserved for data storage, which must not be
144 * executable.)
145 * Note that setting the XN bit here is for consistency only.
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100146 * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000147 * which makes any writable memory region to be treated as
148 * execute-never, regardless of the value of the XN bit in the
149 * translation table.
150 *
151 * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
152 * attribute to figure out the value of the XN bit.
153 */
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100154 if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) {
155 desc |= execute_never_mask;
156 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000157
158 if (mem_type == MT_MEMORY) {
159 desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
160 } else {
161 assert(mem_type == MT_NON_CACHEABLE);
162 desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
163 }
164 }
165
166 return desc;
167}
168
169/*
170 * Enumeration of actions that can be made when mapping table entries depending
171 * on the previous value in that entry and information about the region being
172 * mapped.
173 */
174typedef enum {
175
176 /* Do nothing */
177 ACTION_NONE,
178
179 /* Write a block (or page, if in level 3) entry. */
180 ACTION_WRITE_BLOCK_ENTRY,
181
182 /*
183 * Create a new table and write a table entry pointing to it. Recurse
184 * into it for further processing.
185 */
186 ACTION_CREATE_NEW_TABLE,
187
188 /*
189 * There is a table descriptor in this entry, read it and recurse into
190 * that table for further processing.
191 */
192 ACTION_RECURSE_INTO_TABLE,
193
194} action_t;
195
Antonio Nino Diazac998032017-02-27 17:23:54 +0000196#if PLAT_XLAT_TABLES_DYNAMIC
197
198/*
199 * Recursive function that writes to the translation tables and unmaps the
200 * specified region.
201 */
202static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
203 const uintptr_t table_base_va,
204 uint64_t *const table_base,
205 const int table_entries,
206 const int level)
207{
208 assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
209
210 uint64_t *subtable;
211 uint64_t desc;
212
213 uintptr_t table_idx_va;
214 uintptr_t table_idx_end_va; /* End VA of this entry */
215
216 uintptr_t region_end_va = mm->base_va + mm->size - 1;
217
218 int table_idx;
219
220 if (mm->base_va > table_base_va) {
221 /* Find the first index of the table affected by the region. */
222 table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
223
224 table_idx = (table_idx_va - table_base_va) >>
225 XLAT_ADDR_SHIFT(level);
226
227 assert(table_idx < table_entries);
228 } else {
229 /* Start from the beginning of the table. */
230 table_idx_va = table_base_va;
231 table_idx = 0;
232 }
233
234 while (table_idx < table_entries) {
235
236 table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1;
237
238 desc = table_base[table_idx];
239 uint64_t desc_type = desc & DESC_MASK;
240
241 action_t action = ACTION_NONE;
242
243 if ((mm->base_va <= table_idx_va) &&
244 (region_end_va >= table_idx_end_va)) {
245
246 /* Region covers all block */
247
248 if (level == 3) {
249 /*
250 * Last level, only page descriptors allowed,
251 * erase it.
252 */
253 assert(desc_type == PAGE_DESC);
254
255 action = ACTION_WRITE_BLOCK_ENTRY;
256 } else {
257 /*
258 * Other levels can have table descriptors. If
259 * so, recurse into it and erase descriptors
260 * inside it as needed. If there is a block
261 * descriptor, just erase it. If an invalid
262 * descriptor is found, this table isn't
263 * actually mapped, which shouldn't happen.
264 */
265 if (desc_type == TABLE_DESC) {
266 action = ACTION_RECURSE_INTO_TABLE;
267 } else {
268 assert(desc_type == BLOCK_DESC);
269 action = ACTION_WRITE_BLOCK_ENTRY;
270 }
271 }
272
273 } else if ((mm->base_va <= table_idx_end_va) ||
274 (region_end_va >= table_idx_va)) {
275
276 /*
277 * Region partially covers block.
278 *
279 * It can't happen in level 3.
280 *
281 * There must be a table descriptor here, if not there
282 * was a problem when mapping the region.
283 */
284
285 assert(level < 3);
286
287 assert(desc_type == TABLE_DESC);
288
289 action = ACTION_RECURSE_INTO_TABLE;
290 }
291
292 if (action == ACTION_WRITE_BLOCK_ENTRY) {
293
294 table_base[table_idx] = INVALID_DESC;
295 xlat_arch_tlbi_va(table_idx_va);
296
297 } else if (action == ACTION_RECURSE_INTO_TABLE) {
298
299 subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
300
301 /* Recurse to write into subtable */
302 xlat_tables_unmap_region(ctx, mm, table_idx_va,
303 subtable, XLAT_TABLE_ENTRIES,
304 level + 1);
305
306 /*
307 * If the subtable is now empty, remove its reference.
308 */
309 if (xlat_table_is_empty(ctx, subtable)) {
310 table_base[table_idx] = INVALID_DESC;
311 xlat_arch_tlbi_va(table_idx_va);
312 }
313
314 } else {
315 assert(action == ACTION_NONE);
316 }
317
318 table_idx++;
319 table_idx_va += XLAT_BLOCK_SIZE(level);
320
321 /* If reached the end of the region, exit */
322 if (region_end_va <= table_idx_va)
323 break;
324 }
325
326 if (level > ctx->base_level)
327 xlat_table_dec_regions_count(ctx, table_base);
328}
329
330#endif /* PLAT_XLAT_TABLES_DYNAMIC */
331
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000332/*
333 * From the given arguments, it decides which action to take when mapping the
334 * specified region.
335 */
336static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
337 const int desc_type, const unsigned long long dest_pa,
338 const uintptr_t table_entry_base_va, const int level)
339{
340 uintptr_t mm_end_va = mm->base_va + mm->size - 1;
341 uintptr_t table_entry_end_va =
342 table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1;
343
344 /*
345 * The descriptor types allowed depend on the current table level.
346 */
347
348 if ((mm->base_va <= table_entry_base_va) &&
349 (mm_end_va >= table_entry_end_va)) {
350
351 /*
352 * Table entry is covered by region
353 * --------------------------------
354 *
355 * This means that this table entry can describe the whole
356 * translation with this granularity in principle.
357 */
358
359 if (level == 3) {
360 /*
361 * Last level, only page descriptors are allowed.
362 */
363 if (desc_type == PAGE_DESC) {
364 /*
365 * There's another region mapped here, don't
366 * overwrite.
367 */
368 return ACTION_NONE;
369 } else {
370 assert(desc_type == INVALID_DESC);
371 return ACTION_WRITE_BLOCK_ENTRY;
372 }
373
374 } else {
375
376 /*
377 * Other levels. Table descriptors are allowed. Block
378 * descriptors too, but they have some limitations.
379 */
380
381 if (desc_type == TABLE_DESC) {
382 /* There's already a table, recurse into it. */
383 return ACTION_RECURSE_INTO_TABLE;
384
385 } else if (desc_type == INVALID_DESC) {
386 /*
387 * There's nothing mapped here, create a new
388 * entry.
389 *
390 * Check if the destination granularity allows
391 * us to use a block descriptor or we need a
392 * finer table for it.
393 *
394 * Also, check if the current level allows block
395 * descriptors. If not, create a table instead.
396 */
397 if ((dest_pa & XLAT_BLOCK_MASK(level)) ||
398 (level < MIN_LVL_BLOCK_DESC))
399 return ACTION_CREATE_NEW_TABLE;
400 else
401 return ACTION_WRITE_BLOCK_ENTRY;
402
403 } else {
404 /*
405 * There's another region mapped here, don't
406 * overwrite.
407 */
408 assert(desc_type == BLOCK_DESC);
409
410 return ACTION_NONE;
411 }
412 }
413
414 } else if ((mm->base_va <= table_entry_end_va) ||
415 (mm_end_va >= table_entry_base_va)) {
416
417 /*
418 * Region partially covers table entry
419 * -----------------------------------
420 *
421 * This means that this table entry can't describe the whole
422 * translation, a finer table is needed.
423
424 * There cannot be partial block overlaps in level 3. If that
425 * happens, some of the preliminary checks when adding the
426 * mmap region failed to detect that PA and VA must at least be
427 * aligned to PAGE_SIZE.
428 */
429 assert(level < 3);
430
431 if (desc_type == INVALID_DESC) {
432 /*
433 * The block is not fully covered by the region. Create
434 * a new table, recurse into it and try to map the
435 * region with finer granularity.
436 */
437 return ACTION_CREATE_NEW_TABLE;
438
439 } else {
440 assert(desc_type == TABLE_DESC);
441 /*
442 * The block is not fully covered by the region, but
443 * there is already a table here. Recurse into it and
444 * try to map with finer granularity.
445 *
446 * PAGE_DESC for level 3 has the same value as
447 * TABLE_DESC, but this code can't run on a level 3
448 * table because there can't be overlaps in level 3.
449 */
450 return ACTION_RECURSE_INTO_TABLE;
451 }
452 }
453
454 /*
455 * This table entry is outside of the region specified in the arguments,
456 * don't write anything to it.
457 */
458 return ACTION_NONE;
459}
460
461/*
462 * Recursive function that writes to the translation tables and maps the
Antonio Nino Diazac998032017-02-27 17:23:54 +0000463 * specified region. On success, it returns the VA of the last byte that was
464 * succesfully mapped. On error, it returns the VA of the next entry that
465 * should have been mapped.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000466 */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000467static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000468 const uintptr_t table_base_va,
469 uint64_t *const table_base,
470 const int table_entries,
471 const int level)
472{
473 assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
474
475 uintptr_t mm_end_va = mm->base_va + mm->size - 1;
476
477 uintptr_t table_idx_va;
478 unsigned long long table_idx_pa;
479
480 uint64_t *subtable;
481 uint64_t desc;
482
483 int table_idx;
484
485 if (mm->base_va > table_base_va) {
486 /* Find the first index of the table affected by the region. */
487 table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
488
489 table_idx = (table_idx_va - table_base_va) >>
490 XLAT_ADDR_SHIFT(level);
491
492 assert(table_idx < table_entries);
493 } else {
494 /* Start from the beginning of the table. */
495 table_idx_va = table_base_va;
496 table_idx = 0;
497 }
498
Antonio Nino Diazac998032017-02-27 17:23:54 +0000499#if PLAT_XLAT_TABLES_DYNAMIC
500 if (level > ctx->base_level)
501 xlat_table_inc_regions_count(ctx, table_base);
502#endif
503
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000504 while (table_idx < table_entries) {
505
506 desc = table_base[table_idx];
507
508 table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
509
510 action_t action = xlat_tables_map_region_action(mm,
511 desc & DESC_MASK, table_idx_pa, table_idx_va, level);
512
513 if (action == ACTION_WRITE_BLOCK_ENTRY) {
514
515 table_base[table_idx] =
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100516 xlat_desc(mm->attr, table_idx_pa, level,
517 ctx->execute_never_mask);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000518
519 } else if (action == ACTION_CREATE_NEW_TABLE) {
520
521 subtable = xlat_table_get_empty(ctx);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000522 if (subtable == NULL) {
523 /* Not enough free tables to map this region */
524 return table_idx_va;
525 }
526
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000527 /* Point to new subtable from this one. */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000528 table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
529
530 /* Recurse to write into subtable */
531 uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
532 subtable, XLAT_TABLE_ENTRIES,
533 level + 1);
534 if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
535 return end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000536
537 } else if (action == ACTION_RECURSE_INTO_TABLE) {
538
539 subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
540 /* Recurse to write into subtable */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000541 uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
542 subtable, XLAT_TABLE_ENTRIES,
543 level + 1);
544 if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
545 return end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000546
547 } else {
548
549 assert(action == ACTION_NONE);
550
551 }
552
553 table_idx++;
554 table_idx_va += XLAT_BLOCK_SIZE(level);
555
556 /* If reached the end of the region, exit */
557 if (mm_end_va <= table_idx_va)
558 break;
559 }
Antonio Nino Diazac998032017-02-27 17:23:54 +0000560
561 return table_idx_va - 1;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000562}
563
564void print_mmap(mmap_region_t *const mmap)
565{
566#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
567 tf_printf("mmap:\n");
568 mmap_region_t *mm = mmap;
569
570 while (mm->size) {
571 tf_printf(" VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
572 (void *)mm->base_va, mm->base_pa,
573 mm->size, mm->attr);
574 ++mm;
575 };
576 tf_printf("\n");
577#endif
578}
579
580/*
581 * Function that verifies that a region can be mapped.
582 * Returns:
583 * 0: Success, the mapping is allowed.
584 * EINVAL: Invalid values were used as arguments.
585 * ERANGE: The memory limits were surpassed.
586 * ENOMEM: There is not enough memory in the mmap array.
587 * EPERM: Region overlaps another one in an invalid way.
588 */
589static int mmap_add_region_check(xlat_ctx_t *ctx, unsigned long long base_pa,
590 uintptr_t base_va, size_t size,
Sandrine Bailleux04980a32017-04-19 14:02:23 +0100591 mmap_attr_t attr)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000592{
593 mmap_region_t *mm = ctx->mmap;
594 unsigned long long end_pa = base_pa + size - 1;
595 uintptr_t end_va = base_va + size - 1;
596
597 if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
598 !IS_PAGE_ALIGNED(size))
599 return -EINVAL;
600
601 /* Check for overflows */
602 if ((base_pa > end_pa) || (base_va > end_va))
603 return -ERANGE;
604
605 if ((base_va + (uintptr_t)size - (uintptr_t)1) > ctx->va_max_address)
606 return -ERANGE;
607
608 if ((base_pa + (unsigned long long)size - 1ULL) > ctx->pa_max_address)
609 return -ERANGE;
610
611 /* Check that there is space in the mmap array */
612 if (ctx->mmap[ctx->mmap_num - 1].size != 0)
613 return -ENOMEM;
614
615 /* Check for PAs and VAs overlaps with all other regions */
616 for (mm = ctx->mmap; mm->size; ++mm) {
617
618 uintptr_t mm_end_va = mm->base_va + mm->size - 1;
619
620 /*
621 * Check if one of the regions is completely inside the other
622 * one.
623 */
624 int fully_overlapped_va =
625 ((base_va >= mm->base_va) && (end_va <= mm_end_va)) ||
626 ((mm->base_va >= base_va) && (mm_end_va <= end_va));
627
628 /*
629 * Full VA overlaps are only allowed if both regions are
630 * identity mapped (zero offset) or have the same VA to PA
631 * offset. Also, make sure that it's not the exact same area.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000632 * This can only be done with static regions.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000633 */
634 if (fully_overlapped_va) {
635
Antonio Nino Diazac998032017-02-27 17:23:54 +0000636#if PLAT_XLAT_TABLES_DYNAMIC
637 if ((attr & MT_DYNAMIC) || (mm->attr & MT_DYNAMIC))
638 return -EPERM;
639#endif /* PLAT_XLAT_TABLES_DYNAMIC */
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000640 if ((mm->base_va - mm->base_pa) != (base_va - base_pa))
641 return -EPERM;
642
643 if ((base_va == mm->base_va) && (size == mm->size))
644 return -EPERM;
645
646 } else {
647 /*
648 * If the regions do not have fully overlapping VAs,
649 * then they must have fully separated VAs and PAs.
650 * Partial overlaps are not allowed
651 */
652
653 unsigned long long mm_end_pa =
654 mm->base_pa + mm->size - 1;
655
656 int separated_pa =
657 (end_pa < mm->base_pa) || (base_pa > mm_end_pa);
658 int separated_va =
659 (end_va < mm->base_va) || (base_va > mm_end_va);
660
661 if (!(separated_va && separated_pa))
662 return -EPERM;
663 }
664 }
665
666 return 0;
667}
668
669void mmap_add_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
670{
671 mmap_region_t *mm_cursor = ctx->mmap;
672 mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
673 unsigned long long end_pa = mm->base_pa + mm->size - 1;
674 uintptr_t end_va = mm->base_va + mm->size - 1;
675 int ret;
676
677 /* Ignore empty regions */
678 if (!mm->size)
679 return;
680
Antonio Nino Diazac998032017-02-27 17:23:54 +0000681 /* Static regions must be added before initializing the xlat tables. */
682 assert(!ctx->initialized);
683
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000684 ret = mmap_add_region_check(ctx, mm->base_pa, mm->base_va, mm->size,
685 mm->attr);
686 if (ret != 0) {
687 ERROR("mmap_add_region_check() failed. error %d\n", ret);
688 assert(0);
689 return;
690 }
691
692 /*
693 * Find correct place in mmap to insert new region.
694 *
695 * 1 - Lower region VA end first.
696 * 2 - Smaller region size first.
697 *
698 * VA 0 0xFF
699 *
700 * 1st |------|
701 * 2nd |------------|
702 * 3rd |------|
703 * 4th |---|
704 * 5th |---|
705 * 6th |----------|
706 * 7th |-------------------------------------|
707 *
708 * This is required for overlapping regions only. It simplifies adding
709 * regions with the loop in xlat_tables_init_internal because the outer
710 * ones won't overwrite block or page descriptors of regions added
711 * previously.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000712 *
713 * Overlapping is only allowed for static regions.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000714 */
715
716 while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va
717 && mm_cursor->size)
718 ++mm_cursor;
719
720 while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va)
721 && (mm_cursor->size < mm->size))
722 ++mm_cursor;
723
724 /* Make room for new region by moving other regions up by one place */
725 memmove(mm_cursor + 1, mm_cursor,
726 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
727
728 /*
729 * Check we haven't lost the empty sentinel from the end of the array.
730 * This shouldn't happen as we have checked in mmap_add_region_check
731 * that there is free space.
732 */
733 assert(mm_last->size == 0);
734
735 mm_cursor->base_pa = mm->base_pa;
736 mm_cursor->base_va = mm->base_va;
737 mm_cursor->size = mm->size;
738 mm_cursor->attr = mm->attr;
739
740 if (end_pa > ctx->max_pa)
741 ctx->max_pa = end_pa;
742 if (end_va > ctx->max_va)
743 ctx->max_va = end_va;
744}
745
Antonio Nino Diazac998032017-02-27 17:23:54 +0000746#if PLAT_XLAT_TABLES_DYNAMIC
747
748int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
749{
750 mmap_region_t *mm_cursor = ctx->mmap;
751 mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
752 unsigned long long end_pa = mm->base_pa + mm->size - 1;
753 uintptr_t end_va = mm->base_va + mm->size - 1;
754 int ret;
755
756 /* Nothing to do */
757 if (!mm->size)
758 return 0;
759
760 ret = mmap_add_region_check(ctx, mm->base_pa, mm->base_va, mm->size, mm->attr | MT_DYNAMIC);
761 if (ret != 0)
762 return ret;
763
764 /*
765 * Find the adequate entry in the mmap array in the same way done for
766 * static regions in mmap_add_region_ctx().
767 */
768
769 while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va && mm_cursor->size)
770 ++mm_cursor;
771
772 while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va) && (mm_cursor->size < mm->size))
773 ++mm_cursor;
774
775 /* Make room for new region by moving other regions up by one place */
776 memmove(mm_cursor + 1, mm_cursor, (uintptr_t)mm_last - (uintptr_t)mm_cursor);
777
778 /*
779 * Check we haven't lost the empty sentinal from the end of the array.
780 * This shouldn't happen as we have checked in mmap_add_region_check
781 * that there is free space.
782 */
783 assert(mm_last->size == 0);
784
785 mm_cursor->base_pa = mm->base_pa;
786 mm_cursor->base_va = mm->base_va;
787 mm_cursor->size = mm->size;
788 mm_cursor->attr = mm->attr | MT_DYNAMIC;
789
790 /*
791 * Update the translation tables if the xlat tables are initialized. If
792 * not, this region will be mapped when they are initialized.
793 */
794 if (ctx->initialized) {
795 uintptr_t end_va = xlat_tables_map_region(ctx, mm_cursor, 0, ctx->base_table,
796 ctx->base_table_entries, ctx->base_level);
797
798 /* Failed to map, remove mmap entry, unmap and return error. */
799 if (end_va != mm_cursor->base_va + mm_cursor->size - 1) {
800 memmove(mm_cursor, mm_cursor + 1, (uintptr_t)mm_last - (uintptr_t)mm_cursor);
801
802 /*
803 * Check if the mapping function actually managed to map
804 * anything. If not, just return now.
805 */
806 if (mm_cursor->base_va >= end_va)
807 return -ENOMEM;
808
809 /*
810 * Something went wrong after mapping some table entries,
811 * undo every change done up to this point.
812 */
813 mmap_region_t unmap_mm = {
814 .base_pa = 0,
815 .base_va = mm->base_va,
816 .size = end_va - mm->base_va,
817 .attr = 0
818 };
819 xlat_tables_unmap_region(ctx, &unmap_mm, 0, ctx->base_table,
820 ctx->base_table_entries, ctx->base_level);
821
822 return -ENOMEM;
823 }
824
825 /*
826 * Make sure that all entries are written to the memory. There
827 * is no need to invalidate entries when mapping dynamic regions
828 * because new table/block/page descriptors only replace old
829 * invalid descriptors, that aren't TLB cached.
830 */
831 dsbishst();
832 }
833
834 if (end_pa > ctx->max_pa)
835 ctx->max_pa = end_pa;
836 if (end_va > ctx->max_va)
837 ctx->max_va = end_va;
838
839 return 0;
840}
841
842/*
843 * Removes the region with given base Virtual Address and size from the given
844 * context.
845 *
846 * Returns:
847 * 0: Success.
848 * EINVAL: Invalid values were used as arguments (region not found).
849 * EPERM: Tried to remove a static region.
850 */
851int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
852 size_t size)
853{
854 mmap_region_t *mm = ctx->mmap;
855 mmap_region_t *mm_last = mm + ctx->mmap_num;
856 int update_max_va_needed = 0;
857 int update_max_pa_needed = 0;
858
859 /* Check sanity of mmap array. */
860 assert(mm[ctx->mmap_num].size == 0);
861
862 while (mm->size) {
863 if ((mm->base_va == base_va) && (mm->size == size))
864 break;
865 ++mm;
866 }
867
868 /* Check that the region was found */
869 if (mm->size == 0)
870 return -EINVAL;
871
872 /* If the region is static it can't be removed */
873 if (!(mm->attr & MT_DYNAMIC))
874 return -EPERM;
875
876 /* Check if this region is using the top VAs or PAs. */
877 if ((mm->base_va + mm->size - 1) == ctx->max_va)
878 update_max_va_needed = 1;
879 if ((mm->base_pa + mm->size - 1) == ctx->max_pa)
880 update_max_pa_needed = 1;
881
882 /* Update the translation tables if needed */
883 if (ctx->initialized) {
884 xlat_tables_unmap_region(ctx, mm, 0, ctx->base_table,
885 ctx->base_table_entries,
886 ctx->base_level);
887 xlat_arch_tlbi_va_sync();
888 }
889
890 /* Remove this region by moving the rest down by one place. */
891 memmove(mm, mm + 1, (uintptr_t)mm_last - (uintptr_t)mm);
892
893 /* Check if we need to update the max VAs and PAs */
894 if (update_max_va_needed) {
895 ctx->max_va = 0;
896 mm = ctx->mmap;
897 while (mm->size) {
898 if ((mm->base_va + mm->size - 1) > ctx->max_va)
899 ctx->max_va = mm->base_va + mm->size - 1;
900 ++mm;
901 }
902 }
903
904 if (update_max_pa_needed) {
905 ctx->max_pa = 0;
906 mm = ctx->mmap;
907 while (mm->size) {
908 if ((mm->base_pa + mm->size - 1) > ctx->max_pa)
909 ctx->max_pa = mm->base_pa + mm->size - 1;
910 ++mm;
911 }
912 }
913
914 return 0;
915}
916
917#endif /* PLAT_XLAT_TABLES_DYNAMIC */
918
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000919#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
920
921/* Print the attributes of the specified block descriptor. */
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100922static void xlat_desc_print(uint64_t desc, uint64_t execute_never_mask)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000923{
924 int mem_type_index = ATTR_INDEX_GET(desc);
925
926 if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
927 tf_printf("MEM");
928 } else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
929 tf_printf("NC");
930 } else {
931 assert(mem_type_index == ATTR_DEVICE_INDEX);
932 tf_printf("DEV");
933 }
934
935 tf_printf(LOWER_ATTRS(AP_RO) & desc ? "-RO" : "-RW");
936 tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100937 tf_printf(execute_never_mask & desc ? "-XN" : "-EXEC");
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000938}
939
940static const char * const level_spacers[] = {
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +0000941 "[LV0] ",
942 " [LV1] ",
943 " [LV2] ",
944 " [LV3] "
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000945};
946
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +0000947static const char *invalid_descriptors_ommited =
948 "%s(%d invalid descriptors omitted)\n";
949
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000950/*
951 * Recursive function that reads the translation tables passed as an argument
952 * and prints their status.
953 */
954static void xlat_tables_print_internal(const uintptr_t table_base_va,
955 uint64_t *const table_base, const int table_entries,
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100956 const int level, const uint64_t execute_never_mask)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000957{
958 assert(level <= XLAT_TABLE_LEVEL_MAX);
959
960 uint64_t desc;
961 uintptr_t table_idx_va = table_base_va;
962 int table_idx = 0;
963
964 size_t level_size = XLAT_BLOCK_SIZE(level);
965
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +0000966 /*
967 * Keep track of how many invalid descriptors are counted in a row.
968 * Whenever multiple invalid descriptors are found, only the first one
969 * is printed, and a line is added to inform about how many descriptors
970 * have been omitted.
971 */
972 int invalid_row_count = 0;
973
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000974 while (table_idx < table_entries) {
975
976 desc = table_base[table_idx];
977
978 if ((desc & DESC_MASK) == INVALID_DESC) {
979
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +0000980 if (invalid_row_count == 0) {
981 tf_printf("%sVA:%p size:0x%zx\n",
982 level_spacers[level],
983 (void *)table_idx_va, level_size);
984 }
985 invalid_row_count++;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000986
987 } else {
988
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +0000989 if (invalid_row_count > 1) {
990 tf_printf(invalid_descriptors_ommited,
991 level_spacers[level],
992 invalid_row_count - 1);
993 }
994 invalid_row_count = 0;
995
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000996 /*
997 * Check if this is a table or a block. Tables are only
998 * allowed in levels other than 3, but DESC_PAGE has the
999 * same value as DESC_TABLE, so we need to check.
1000 */
1001 if (((desc & DESC_MASK) == TABLE_DESC) &&
1002 (level < XLAT_TABLE_LEVEL_MAX)) {
1003 /*
1004 * Do not print any PA for a table descriptor,
1005 * as it doesn't directly map physical memory
1006 * but instead points to the next translation
1007 * table in the translation table walk.
1008 */
1009 tf_printf("%sVA:%p size:0x%zx\n",
1010 level_spacers[level],
1011 (void *)table_idx_va, level_size);
1012
1013 uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
1014
1015 xlat_tables_print_internal(table_idx_va,
1016 (uint64_t *)addr_inner,
Antonio Nino Diazefabaa92017-04-27 13:30:22 +01001017 XLAT_TABLE_ENTRIES, level+1,
1018 execute_never_mask);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001019 } else {
1020 tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
1021 level_spacers[level],
1022 (void *)table_idx_va,
1023 (unsigned long long)(desc & TABLE_ADDR_MASK),
1024 level_size);
Antonio Nino Diazefabaa92017-04-27 13:30:22 +01001025 xlat_desc_print(desc, execute_never_mask);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001026 tf_printf("\n");
1027 }
1028 }
1029
1030 table_idx++;
1031 table_idx_va += level_size;
1032 }
Antonio Nino Diaz755e54f2017-02-13 11:35:49 +00001033
1034 if (invalid_row_count > 1) {
1035 tf_printf(invalid_descriptors_ommited,
1036 level_spacers[level], invalid_row_count - 1);
1037 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001038}
1039
1040#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
1041
1042void xlat_tables_print(xlat_ctx_t *ctx)
1043{
1044#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
1045 xlat_tables_print_internal(0, ctx->base_table, ctx->base_table_entries,
Antonio Nino Diazefabaa92017-04-27 13:30:22 +01001046 ctx->base_level, ctx->execute_never_mask);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001047#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
1048}
1049
1050void init_xlation_table(xlat_ctx_t *ctx)
1051{
1052 mmap_region_t *mm = ctx->mmap;
1053
1054 /* All tables must be zeroed before mapping any region. */
1055
1056 for (int i = 0; i < ctx->base_table_entries; i++)
1057 ctx->base_table[i] = INVALID_DESC;
1058
1059 for (int j = 0; j < ctx->tables_num; j++) {
Antonio Nino Diazac998032017-02-27 17:23:54 +00001060#if PLAT_XLAT_TABLES_DYNAMIC
1061 ctx->tables_mapped_regions[j] = 0;
1062#endif
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001063 for (int i = 0; i < XLAT_TABLE_ENTRIES; i++)
1064 ctx->tables[j][i] = INVALID_DESC;
1065 }
1066
Antonio Nino Diazac998032017-02-27 17:23:54 +00001067 while (mm->size) {
1068 uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0, ctx->base_table,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001069 ctx->base_table_entries, ctx->base_level);
1070
Antonio Nino Diazac998032017-02-27 17:23:54 +00001071 if (end_va != mm->base_va + mm->size - 1) {
1072 ERROR("Not enough memory to map region:\n"
1073 " VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
1074 (void *)mm->base_va, mm->base_pa, mm->size, mm->attr);
1075 panic();
1076 }
1077
1078 mm++;
1079 }
1080
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001081 ctx->initialized = 1;
1082}