blob: 9d4c8379530968d6946f457535a8b9ef0e9b1d56 [file] [log] [blame]
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <arch_helpers.h>
33#include <assert.h>
34#include <cassert.h>
35#include <common_def.h>
36#include <debug.h>
37#include <errno.h>
38#include <platform_def.h>
39#include <string.h>
40#include <types.h>
41#include <utils.h>
42#include <xlat_tables_v2.h>
43#ifdef AARCH32
44# include "aarch32/xlat_tables_arch.h"
45#else
46# include "aarch64/xlat_tables_arch.h"
47#endif
48#include "xlat_tables_private.h"
49
50/* Returns a pointer to the first empty translation table. */
51static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
52{
53 assert(ctx->next_table < ctx->tables_num);
54
55 return ctx->tables[ctx->next_table++];
56}
57
58/* Returns a block/page table descriptor for the given level and attributes. */
59static uint64_t xlat_desc(unsigned int attr, unsigned long long addr_pa,
60 int level)
61{
62 uint64_t desc;
63 int mem_type;
64
65 /* Make sure that the granularity is fine enough to map this address. */
66 assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0);
67
68 desc = addr_pa;
69 /*
70 * There are different translation table descriptors for level 3 and the
71 * rest.
72 */
73 desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
74 /*
75 * Always set the access flag, as TF doesn't manage access flag faults.
76 * Deduce other fields of the descriptor based on the MT_NS and MT_RW
77 * memory region attributes.
78 */
79 desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
80 desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
81 desc |= LOWER_ATTRS(ACCESS_FLAG);
82
83 /*
84 * Deduce shareability domain and executability of the memory region
85 * from the memory type of the attributes (MT_TYPE).
86 *
87 * Data accesses to device memory and non-cacheable normal memory are
88 * coherent for all observers in the system, and correspondingly are
89 * always treated as being Outer Shareable. Therefore, for these 2 types
90 * of memory, it is not strictly needed to set the shareability field
91 * in the translation tables.
92 */
93 mem_type = MT_TYPE(attr);
94 if (mem_type == MT_DEVICE) {
95 desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
96 /*
97 * Always map device memory as execute-never.
98 * This is to avoid the possibility of a speculative instruction
99 * fetch, which could be an issue if this memory region
100 * corresponds to a read-sensitive peripheral.
101 */
102 desc |= UPPER_ATTRS(XN);
103 } else { /* Normal memory */
104 /*
105 * Always map read-write normal memory as execute-never.
106 * (Trusted Firmware doesn't self-modify its code, therefore
107 * R/W memory is reserved for data storage, which must not be
108 * executable.)
109 * Note that setting the XN bit here is for consistency only.
110 * The enable_mmu_elx() function sets the SCTLR_EL3.WXN bit,
111 * which makes any writable memory region to be treated as
112 * execute-never, regardless of the value of the XN bit in the
113 * translation table.
114 *
115 * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
116 * attribute to figure out the value of the XN bit.
117 */
118 if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER))
119 desc |= UPPER_ATTRS(XN);
120
121 if (mem_type == MT_MEMORY) {
122 desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
123 } else {
124 assert(mem_type == MT_NON_CACHEABLE);
125 desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
126 }
127 }
128
129 return desc;
130}
131
132/*
133 * Enumeration of actions that can be made when mapping table entries depending
134 * on the previous value in that entry and information about the region being
135 * mapped.
136 */
137typedef enum {
138
139 /* Do nothing */
140 ACTION_NONE,
141
142 /* Write a block (or page, if in level 3) entry. */
143 ACTION_WRITE_BLOCK_ENTRY,
144
145 /*
146 * Create a new table and write a table entry pointing to it. Recurse
147 * into it for further processing.
148 */
149 ACTION_CREATE_NEW_TABLE,
150
151 /*
152 * There is a table descriptor in this entry, read it and recurse into
153 * that table for further processing.
154 */
155 ACTION_RECURSE_INTO_TABLE,
156
157} action_t;
158
159/*
160 * From the given arguments, it decides which action to take when mapping the
161 * specified region.
162 */
163static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
164 const int desc_type, const unsigned long long dest_pa,
165 const uintptr_t table_entry_base_va, const int level)
166{
167 uintptr_t mm_end_va = mm->base_va + mm->size - 1;
168 uintptr_t table_entry_end_va =
169 table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1;
170
171 /*
172 * The descriptor types allowed depend on the current table level.
173 */
174
175 if ((mm->base_va <= table_entry_base_va) &&
176 (mm_end_va >= table_entry_end_va)) {
177
178 /*
179 * Table entry is covered by region
180 * --------------------------------
181 *
182 * This means that this table entry can describe the whole
183 * translation with this granularity in principle.
184 */
185
186 if (level == 3) {
187 /*
188 * Last level, only page descriptors are allowed.
189 */
190 if (desc_type == PAGE_DESC) {
191 /*
192 * There's another region mapped here, don't
193 * overwrite.
194 */
195 return ACTION_NONE;
196 } else {
197 assert(desc_type == INVALID_DESC);
198 return ACTION_WRITE_BLOCK_ENTRY;
199 }
200
201 } else {
202
203 /*
204 * Other levels. Table descriptors are allowed. Block
205 * descriptors too, but they have some limitations.
206 */
207
208 if (desc_type == TABLE_DESC) {
209 /* There's already a table, recurse into it. */
210 return ACTION_RECURSE_INTO_TABLE;
211
212 } else if (desc_type == INVALID_DESC) {
213 /*
214 * There's nothing mapped here, create a new
215 * entry.
216 *
217 * Check if the destination granularity allows
218 * us to use a block descriptor or we need a
219 * finer table for it.
220 *
221 * Also, check if the current level allows block
222 * descriptors. If not, create a table instead.
223 */
224 if ((dest_pa & XLAT_BLOCK_MASK(level)) ||
225 (level < MIN_LVL_BLOCK_DESC))
226 return ACTION_CREATE_NEW_TABLE;
227 else
228 return ACTION_WRITE_BLOCK_ENTRY;
229
230 } else {
231 /*
232 * There's another region mapped here, don't
233 * overwrite.
234 */
235 assert(desc_type == BLOCK_DESC);
236
237 return ACTION_NONE;
238 }
239 }
240
241 } else if ((mm->base_va <= table_entry_end_va) ||
242 (mm_end_va >= table_entry_base_va)) {
243
244 /*
245 * Region partially covers table entry
246 * -----------------------------------
247 *
248 * This means that this table entry can't describe the whole
249 * translation, a finer table is needed.
250
251 * There cannot be partial block overlaps in level 3. If that
252 * happens, some of the preliminary checks when adding the
253 * mmap region failed to detect that PA and VA must at least be
254 * aligned to PAGE_SIZE.
255 */
256 assert(level < 3);
257
258 if (desc_type == INVALID_DESC) {
259 /*
260 * The block is not fully covered by the region. Create
261 * a new table, recurse into it and try to map the
262 * region with finer granularity.
263 */
264 return ACTION_CREATE_NEW_TABLE;
265
266 } else {
267 assert(desc_type == TABLE_DESC);
268 /*
269 * The block is not fully covered by the region, but
270 * there is already a table here. Recurse into it and
271 * try to map with finer granularity.
272 *
273 * PAGE_DESC for level 3 has the same value as
274 * TABLE_DESC, but this code can't run on a level 3
275 * table because there can't be overlaps in level 3.
276 */
277 return ACTION_RECURSE_INTO_TABLE;
278 }
279 }
280
281 /*
282 * This table entry is outside of the region specified in the arguments,
283 * don't write anything to it.
284 */
285 return ACTION_NONE;
286}
287
288/*
289 * Recursive function that writes to the translation tables and maps the
290 * specified region.
291 */
292static void xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
293 const uintptr_t table_base_va,
294 uint64_t *const table_base,
295 const int table_entries,
296 const int level)
297{
298 assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
299
300 uintptr_t mm_end_va = mm->base_va + mm->size - 1;
301
302 uintptr_t table_idx_va;
303 unsigned long long table_idx_pa;
304
305 uint64_t *subtable;
306 uint64_t desc;
307
308 int table_idx;
309
310 if (mm->base_va > table_base_va) {
311 /* Find the first index of the table affected by the region. */
312 table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
313
314 table_idx = (table_idx_va - table_base_va) >>
315 XLAT_ADDR_SHIFT(level);
316
317 assert(table_idx < table_entries);
318 } else {
319 /* Start from the beginning of the table. */
320 table_idx_va = table_base_va;
321 table_idx = 0;
322 }
323
324 while (table_idx < table_entries) {
325
326 desc = table_base[table_idx];
327
328 table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
329
330 action_t action = xlat_tables_map_region_action(mm,
331 desc & DESC_MASK, table_idx_pa, table_idx_va, level);
332
333 if (action == ACTION_WRITE_BLOCK_ENTRY) {
334
335 table_base[table_idx] =
336 xlat_desc(mm->attr, table_idx_pa, level);
337
338 } else if (action == ACTION_CREATE_NEW_TABLE) {
339
340 subtable = xlat_table_get_empty(ctx);
341 assert(subtable != NULL);
342 /* Recurse to write into subtable */
343 xlat_tables_map_region(ctx, mm, table_idx_va, subtable,
344 XLAT_TABLE_ENTRIES, level + 1);
345 /* Point to new subtable from this one. */
346 table_base[table_idx] =
347 TABLE_DESC | (unsigned long)subtable;
348
349 } else if (action == ACTION_RECURSE_INTO_TABLE) {
350
351 subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
352 /* Recurse to write into subtable */
353 xlat_tables_map_region(ctx, mm, table_idx_va, subtable,
354 XLAT_TABLE_ENTRIES, level + 1);
355
356 } else {
357
358 assert(action == ACTION_NONE);
359
360 }
361
362 table_idx++;
363 table_idx_va += XLAT_BLOCK_SIZE(level);
364
365 /* If reached the end of the region, exit */
366 if (mm_end_va <= table_idx_va)
367 break;
368 }
369}
370
371void print_mmap(mmap_region_t *const mmap)
372{
373#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
374 tf_printf("mmap:\n");
375 mmap_region_t *mm = mmap;
376
377 while (mm->size) {
378 tf_printf(" VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
379 (void *)mm->base_va, mm->base_pa,
380 mm->size, mm->attr);
381 ++mm;
382 };
383 tf_printf("\n");
384#endif
385}
386
387/*
388 * Function that verifies that a region can be mapped.
389 * Returns:
390 * 0: Success, the mapping is allowed.
391 * EINVAL: Invalid values were used as arguments.
392 * ERANGE: The memory limits were surpassed.
393 * ENOMEM: There is not enough memory in the mmap array.
394 * EPERM: Region overlaps another one in an invalid way.
395 */
396static int mmap_add_region_check(xlat_ctx_t *ctx, unsigned long long base_pa,
397 uintptr_t base_va, size_t size,
398 unsigned int attr)
399{
400 mmap_region_t *mm = ctx->mmap;
401 unsigned long long end_pa = base_pa + size - 1;
402 uintptr_t end_va = base_va + size - 1;
403
404 if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
405 !IS_PAGE_ALIGNED(size))
406 return -EINVAL;
407
408 /* Check for overflows */
409 if ((base_pa > end_pa) || (base_va > end_va))
410 return -ERANGE;
411
412 if ((base_va + (uintptr_t)size - (uintptr_t)1) > ctx->va_max_address)
413 return -ERANGE;
414
415 if ((base_pa + (unsigned long long)size - 1ULL) > ctx->pa_max_address)
416 return -ERANGE;
417
418 /* Check that there is space in the mmap array */
419 if (ctx->mmap[ctx->mmap_num - 1].size != 0)
420 return -ENOMEM;
421
422 /* Check for PAs and VAs overlaps with all other regions */
423 for (mm = ctx->mmap; mm->size; ++mm) {
424
425 uintptr_t mm_end_va = mm->base_va + mm->size - 1;
426
427 /*
428 * Check if one of the regions is completely inside the other
429 * one.
430 */
431 int fully_overlapped_va =
432 ((base_va >= mm->base_va) && (end_va <= mm_end_va)) ||
433 ((mm->base_va >= base_va) && (mm_end_va <= end_va));
434
435 /*
436 * Full VA overlaps are only allowed if both regions are
437 * identity mapped (zero offset) or have the same VA to PA
438 * offset. Also, make sure that it's not the exact same area.
439 * This can only be done with locked regions.
440 */
441 if (fully_overlapped_va) {
442
443 if ((mm->base_va - mm->base_pa) != (base_va - base_pa))
444 return -EPERM;
445
446 if ((base_va == mm->base_va) && (size == mm->size))
447 return -EPERM;
448
449 } else {
450 /*
451 * If the regions do not have fully overlapping VAs,
452 * then they must have fully separated VAs and PAs.
453 * Partial overlaps are not allowed
454 */
455
456 unsigned long long mm_end_pa =
457 mm->base_pa + mm->size - 1;
458
459 int separated_pa =
460 (end_pa < mm->base_pa) || (base_pa > mm_end_pa);
461 int separated_va =
462 (end_va < mm->base_va) || (base_va > mm_end_va);
463
464 if (!(separated_va && separated_pa))
465 return -EPERM;
466 }
467 }
468
469 return 0;
470}
471
472void mmap_add_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
473{
474 mmap_region_t *mm_cursor = ctx->mmap;
475 mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
476 unsigned long long end_pa = mm->base_pa + mm->size - 1;
477 uintptr_t end_va = mm->base_va + mm->size - 1;
478 int ret;
479
480 /* Ignore empty regions */
481 if (!mm->size)
482 return;
483
484 ret = mmap_add_region_check(ctx, mm->base_pa, mm->base_va, mm->size,
485 mm->attr);
486 if (ret != 0) {
487 ERROR("mmap_add_region_check() failed. error %d\n", ret);
488 assert(0);
489 return;
490 }
491
492 /*
493 * Find correct place in mmap to insert new region.
494 *
495 * 1 - Lower region VA end first.
496 * 2 - Smaller region size first.
497 *
498 * VA 0 0xFF
499 *
500 * 1st |------|
501 * 2nd |------------|
502 * 3rd |------|
503 * 4th |---|
504 * 5th |---|
505 * 6th |----------|
506 * 7th |-------------------------------------|
507 *
508 * This is required for overlapping regions only. It simplifies adding
509 * regions with the loop in xlat_tables_init_internal because the outer
510 * ones won't overwrite block or page descriptors of regions added
511 * previously.
512 */
513
514 while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va
515 && mm_cursor->size)
516 ++mm_cursor;
517
518 while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va)
519 && (mm_cursor->size < mm->size))
520 ++mm_cursor;
521
522 /* Make room for new region by moving other regions up by one place */
523 memmove(mm_cursor + 1, mm_cursor,
524 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
525
526 /*
527 * Check we haven't lost the empty sentinel from the end of the array.
528 * This shouldn't happen as we have checked in mmap_add_region_check
529 * that there is free space.
530 */
531 assert(mm_last->size == 0);
532
533 mm_cursor->base_pa = mm->base_pa;
534 mm_cursor->base_va = mm->base_va;
535 mm_cursor->size = mm->size;
536 mm_cursor->attr = mm->attr;
537
538 if (end_pa > ctx->max_pa)
539 ctx->max_pa = end_pa;
540 if (end_va > ctx->max_va)
541 ctx->max_va = end_va;
542}
543
544#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
545
546/* Print the attributes of the specified block descriptor. */
547static void xlat_desc_print(uint64_t desc)
548{
549 int mem_type_index = ATTR_INDEX_GET(desc);
550
551 if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
552 tf_printf("MEM");
553 } else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
554 tf_printf("NC");
555 } else {
556 assert(mem_type_index == ATTR_DEVICE_INDEX);
557 tf_printf("DEV");
558 }
559
560 tf_printf(LOWER_ATTRS(AP_RO) & desc ? "-RO" : "-RW");
561 tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
562 tf_printf(UPPER_ATTRS(XN) & desc ? "-XN" : "-EXEC");
563}
564
565static const char * const level_spacers[] = {
566 "",
567 " ",
568 " ",
569 " "
570};
571
572/*
573 * Recursive function that reads the translation tables passed as an argument
574 * and prints their status.
575 */
576static void xlat_tables_print_internal(const uintptr_t table_base_va,
577 uint64_t *const table_base, const int table_entries,
578 const int level)
579{
580 assert(level <= XLAT_TABLE_LEVEL_MAX);
581
582 uint64_t desc;
583 uintptr_t table_idx_va = table_base_va;
584 int table_idx = 0;
585
586 size_t level_size = XLAT_BLOCK_SIZE(level);
587
588 while (table_idx < table_entries) {
589
590 desc = table_base[table_idx];
591
592 if ((desc & DESC_MASK) == INVALID_DESC) {
593
594 tf_printf("%sVA:%p size:0x%zx\n",
595 level_spacers[level],
596 (void *)table_idx_va, level_size);
597
598 } else {
599
600 /*
601 * Check if this is a table or a block. Tables are only
602 * allowed in levels other than 3, but DESC_PAGE has the
603 * same value as DESC_TABLE, so we need to check.
604 */
605 if (((desc & DESC_MASK) == TABLE_DESC) &&
606 (level < XLAT_TABLE_LEVEL_MAX)) {
607 /*
608 * Do not print any PA for a table descriptor,
609 * as it doesn't directly map physical memory
610 * but instead points to the next translation
611 * table in the translation table walk.
612 */
613 tf_printf("%sVA:%p size:0x%zx\n",
614 level_spacers[level],
615 (void *)table_idx_va, level_size);
616
617 uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
618
619 xlat_tables_print_internal(table_idx_va,
620 (uint64_t *)addr_inner,
621 XLAT_TABLE_ENTRIES, level+1);
622 } else {
623 tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
624 level_spacers[level],
625 (void *)table_idx_va,
626 (unsigned long long)(desc & TABLE_ADDR_MASK),
627 level_size);
628 xlat_desc_print(desc);
629 tf_printf("\n");
630 }
631 }
632
633 table_idx++;
634 table_idx_va += level_size;
635 }
636}
637
638#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
639
640void xlat_tables_print(xlat_ctx_t *ctx)
641{
642#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
643 xlat_tables_print_internal(0, ctx->base_table, ctx->base_table_entries,
644 ctx->base_level);
645#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
646}
647
648void init_xlation_table(xlat_ctx_t *ctx)
649{
650 mmap_region_t *mm = ctx->mmap;
651
652 /* All tables must be zeroed before mapping any region. */
653
654 for (int i = 0; i < ctx->base_table_entries; i++)
655 ctx->base_table[i] = INVALID_DESC;
656
657 for (int j = 0; j < ctx->tables_num; j++) {
658 for (int i = 0; i < XLAT_TABLE_ENTRIES; i++)
659 ctx->tables[j][i] = INVALID_DESC;
660 }
661
662 while (mm->size)
663 xlat_tables_map_region(ctx, mm++, 0, ctx->base_table,
664 ctx->base_table_entries, ctx->base_level);
665
666 ctx->initialized = 1;
667}