blob: c3f8dac648ba4c4e6f3b1e802433534cbece3171 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
David Feng85fd5f12013-12-14 11:47:35 +08002/*
3 * (C) Copyright 2013
4 * David Feng <fenghua@phytium.com.cn>
5 *
Alexander Grafe317fe82016-03-04 01:09:47 +01006 * (C) Copyright 2016
7 * Alexander Graf <agraf@suse.de>
David Feng85fd5f12013-12-14 11:47:35 +08008 */
9
Simon Glass1d91ba72019-11-14 12:57:37 -070010#include <cpu_func.h>
Simon Glassf11478f2019-12-28 10:45:07 -070011#include <hang.h>
Simon Glass0f2af882020-05-10 11:40:05 -060012#include <log.h>
Simon Glass274e0b02020-05-10 11:39:56 -060013#include <asm/cache.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060014#include <asm/global_data.h>
David Feng85fd5f12013-12-14 11:47:35 +080015#include <asm/system.h>
16#include <asm/armv8/mmu.h>
17
18DECLARE_GLOBAL_DATA_PTR;
19
Trevor Woerner43ec7e02019-05-03 09:41:00 -040020#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -070021
Alexander Grafe317fe82016-03-04 01:09:47 +010022/*
23 * With 4k page granule, a virtual address is split into 4 lookup parts
24 * spanning 9 bits each:
25 *
26 * _______________________________________________
27 * | | | | | | |
28 * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off |
29 * |_______|_______|_______|_______|_______|_______|
30 * 63-48 47-39 38-30 29-21 20-12 11-00
31 *
32 * mask page size
33 *
34 * Lv0: FF8000000000 --
35 * Lv1: 7FC0000000 1G
36 * Lv2: 3FE00000 2M
37 * Lv3: 1FF000 4K
38 * off: FFF
39 */
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -070040
Andre Przywara630a7942022-06-14 00:11:10 +010041static int get_effective_el(void)
Alexander Graffb74cc12016-03-04 01:09:45 +010042{
Andre Przywara630a7942022-06-14 00:11:10 +010043 int el = current_el();
44
45 if (el == 2) {
46 u64 hcr_el2;
47
48 /*
49 * If we are using the EL2&0 translation regime, the TCR_EL2
50 * looks like the EL1 version, even though we are in EL2.
51 */
52 __asm__ ("mrs %0, HCR_EL2\n" : "=r" (hcr_el2));
53 if (hcr_el2 & BIT(HCR_EL2_E2H_BIT))
54 return 1;
55 }
56
57 return el;
58}
59
60u64 get_tcr(u64 *pips, u64 *pva_bits)
61{
62 int el = get_effective_el();
Alexander Graffb74cc12016-03-04 01:09:45 +010063 u64 max_addr = 0;
64 u64 ips, va_bits;
65 u64 tcr;
66 int i;
67
68 /* Find the largest address we need to support */
Alexander Graf6b3e7ca2016-03-04 01:09:48 +010069 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
York Sunc7104e52016-06-24 16:46:22 -070070 max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size);
Alexander Graffb74cc12016-03-04 01:09:45 +010071
72 /* Calculate the maximum physical (and thus virtual) address */
73 if (max_addr > (1ULL << 44)) {
74 ips = 5;
75 va_bits = 48;
76 } else if (max_addr > (1ULL << 42)) {
77 ips = 4;
78 va_bits = 44;
79 } else if (max_addr > (1ULL << 40)) {
80 ips = 3;
81 va_bits = 42;
82 } else if (max_addr > (1ULL << 36)) {
83 ips = 2;
84 va_bits = 40;
85 } else if (max_addr > (1ULL << 32)) {
86 ips = 1;
87 va_bits = 36;
88 } else {
89 ips = 0;
90 va_bits = 32;
91 }
92
93 if (el == 1) {
Alexander Graff03c0e42016-03-04 01:09:46 +010094 tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE;
Alexander Graffb74cc12016-03-04 01:09:45 +010095 } else if (el == 2) {
96 tcr = TCR_EL2_RSVD | (ips << 16);
97 } else {
98 tcr = TCR_EL3_RSVD | (ips << 16);
99 }
100
101 /* PTWs cacheable, inner/outer WBWA and inner shareable */
Alexander Grafe317fe82016-03-04 01:09:47 +0100102 tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
103 tcr |= TCR_T0SZ(va_bits);
Alexander Graffb74cc12016-03-04 01:09:45 +0100104
105 if (pips)
106 *pips = ips;
107 if (pva_bits)
108 *pva_bits = va_bits;
109
110 return tcr;
111}
112
Alexander Grafe317fe82016-03-04 01:09:47 +0100113#define MAX_PTE_ENTRIES 512
114
115static int pte_type(u64 *pte)
116{
117 return *pte & PTE_TYPE_MASK;
118}
119
120/* Returns the LSB number for a PTE on level <level> */
121static int level2shift(int level)
122{
123 /* Page is 12 bits wide, every level translates 9 bits */
124 return (12 + 9 * (3 - level));
125}
126
127static u64 *find_pte(u64 addr, int level)
128{
129 int start_level = 0;
130 u64 *pte;
131 u64 idx;
132 u64 va_bits;
133 int i;
134
135 debug("addr=%llx level=%d\n", addr, level);
136
Andre Przywara630a7942022-06-14 00:11:10 +0100137 get_tcr(NULL, &va_bits);
Alexander Grafe317fe82016-03-04 01:09:47 +0100138 if (va_bits < 39)
139 start_level = 1;
140
141 if (level < start_level)
142 return NULL;
143
144 /* Walk through all page table levels to find our PTE */
145 pte = (u64*)gd->arch.tlb_addr;
146 for (i = start_level; i < 4; i++) {
147 idx = (addr >> level2shift(i)) & 0x1FF;
148 pte += idx;
149 debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte);
150
151 /* Found it */
152 if (i == level)
153 return pte;
154 /* PTE is no table (either invalid or block), can't traverse */
155 if (pte_type(pte) != PTE_TYPE_TABLE)
156 return NULL;
157 /* Off to the next level */
158 pte = (u64*)(*pte & 0x0000fffffffff000ULL);
159 }
160
161 /* Should never reach here */
162 return NULL;
163}
164
Marc Zyngierb67855c2023-02-09 04:54:27 +0800165#ifdef CONFIG_CMO_BY_VA_ONLY
166static void __cmo_on_leaves(void (*cmo_fn)(unsigned long, unsigned long),
167 u64 pte, int level, u64 base)
168{
169 u64 *ptep;
170 int i;
171
172 ptep = (u64 *)(pte & GENMASK_ULL(47, PAGE_SHIFT));
173 for (i = 0; i < PAGE_SIZE / sizeof(u64); i++) {
174 u64 end, va = base + i * BIT(level2shift(level));
175 u64 type, attrs;
176
177 pte = ptep[i];
178 type = pte & PTE_TYPE_MASK;
179 attrs = pte & PMD_ATTRINDX_MASK;
180 debug("PTE %llx at level %d VA %llx\n", pte, level, va);
181
182 /* Not valid? next! */
183 if (!(type & PTE_TYPE_VALID))
184 continue;
185
186 /* Not a leaf? Recurse on the next level */
187 if (!(type == PTE_TYPE_BLOCK ||
188 (level == 3 && type == PTE_TYPE_PAGE))) {
189 __cmo_on_leaves(cmo_fn, pte, level + 1, va);
190 continue;
191 }
192
193 /*
194 * From this point, this must be a leaf.
195 *
196 * Start excluding non memory mappings
197 */
198 if (attrs != PTE_BLOCK_MEMTYPE(MT_NORMAL) &&
199 attrs != PTE_BLOCK_MEMTYPE(MT_NORMAL_NC))
200 continue;
201
202 end = va + BIT(level2shift(level)) - 1;
203
204 /* No intersection with RAM? */
205 if (end < gd->ram_base ||
206 va >= (gd->ram_base + gd->ram_size))
207 continue;
208
209 /*
210 * OK, we have a partial RAM mapping. However, this
211 * can cover *more* than the RAM. Yes, u-boot is
212 * *that* braindead. Compute the intersection we care
213 * about, and not a byte more.
214 */
215 va = max(va, (u64)gd->ram_base);
216 end = min(end, gd->ram_base + gd->ram_size);
217
218 debug("Flush PTE %llx at level %d: %llx-%llx\n",
219 pte, level, va, end);
220 cmo_fn(va, end);
221 }
222}
223
224static void apply_cmo_to_mappings(void (*cmo_fn)(unsigned long, unsigned long))
225{
226 u64 va_bits;
227 int sl = 0;
228
229 if (!gd->arch.tlb_addr)
230 return;
231
232 get_tcr(NULL, &va_bits);
233 if (va_bits < 39)
234 sl = 1;
235
236 __cmo_on_leaves(cmo_fn, gd->arch.tlb_addr, sl, 0);
237}
238#else
239static inline void apply_cmo_to_mappings(void *dummy) {}
240#endif
241
Alexander Grafe317fe82016-03-04 01:09:47 +0100242/* Returns and creates a new full table (512 entries) */
243static u64 *create_table(void)
244{
245 u64 *new_table = (u64*)gd->arch.tlb_fillptr;
246 u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64);
247
248 /* Allocate MAX_PTE_ENTRIES pte entries */
249 gd->arch.tlb_fillptr += pt_len;
250
251 if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size)
252 panic("Insufficient RAM for page table: 0x%lx > 0x%lx. "
253 "Please increase the size in get_page_table_size()",
254 gd->arch.tlb_fillptr - gd->arch.tlb_addr,
255 gd->arch.tlb_size);
256
257 /* Mark all entries as invalid */
258 memset(new_table, 0, pt_len);
259
260 return new_table;
261}
262
263static void set_pte_table(u64 *pte, u64 *table)
264{
265 /* Point *pte to the new table */
266 debug("Setting %p to addr=%p\n", pte, table);
267 *pte = PTE_TYPE_TABLE | (ulong)table;
268}
269
York Sunf44afe72016-06-24 16:46:21 -0700270/* Splits a block PTE into table with subpages spanning the old block */
271static void split_block(u64 *pte, int level)
272{
273 u64 old_pte = *pte;
274 u64 *new_table;
275 u64 i = 0;
276 /* level describes the parent level, we need the child ones */
277 int levelshift = level2shift(level + 1);
278
279 if (pte_type(pte) != PTE_TYPE_BLOCK)
280 panic("PTE %p (%llx) is not a block. Some driver code wants to "
281 "modify dcache settings for an range not covered in "
282 "mem_map.", pte, old_pte);
283
284 new_table = create_table();
285 debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table);
286
287 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
288 new_table[i] = old_pte | (i << levelshift);
289
290 /* Level 3 block PTEs have the table type */
291 if ((level + 1) == 3)
292 new_table[i] |= PTE_TYPE_TABLE;
293
294 debug("Setting new_table[%lld] = %llx\n", i, new_table[i]);
295 }
296
297 /* Set the new table into effect */
298 set_pte_table(pte, new_table);
299}
300
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800301static void map_range(u64 virt, u64 phys, u64 size, int level,
302 u64 *table, u64 attrs)
Alexander Grafe317fe82016-03-04 01:09:47 +0100303{
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800304 u64 map_size = BIT_ULL(level2shift(level));
305 int i, idx;
Alexander Grafe317fe82016-03-04 01:09:47 +0100306
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800307 idx = (virt >> level2shift(level)) & (MAX_PTE_ENTRIES - 1);
308 for (i = idx; size; i++) {
309 u64 next_size, *next_table;
Alexander Grafe317fe82016-03-04 01:09:47 +0100310
Chris Packham978814f2023-10-27 13:23:53 +1300311 if (level >= 1 &&
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800312 size >= map_size && !(virt & (map_size - 1))) {
313 if (level == 3)
314 table[i] = phys | attrs | PTE_TYPE_PAGE;
315 else
316 table[i] = phys | attrs;
York Sunc7104e52016-06-24 16:46:22 -0700317
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800318 virt += map_size;
319 phys += map_size;
320 size -= map_size;
321
322 continue;
Alexander Grafe317fe82016-03-04 01:09:47 +0100323 }
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800324
325 /* Going one level down */
326 if (pte_type(&table[i]) == PTE_TYPE_FAULT)
327 set_pte_table(&table[i], create_table());
Pierre-Clément Tosid8ceb202024-03-18 19:35:49 +0000328 else if (pte_type(&table[i]) != PTE_TYPE_TABLE)
329 split_block(&table[i], level);
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800330
331 next_table = (u64 *)(table[i] & GENMASK_ULL(47, PAGE_SHIFT));
332 next_size = min(map_size - (virt & (map_size - 1)), size);
333
334 map_range(virt, phys, next_size, level + 1, next_table, attrs);
335
336 virt += next_size;
337 phys += next_size;
338 size -= next_size;
Alexander Grafe317fe82016-03-04 01:09:47 +0100339 }
340}
341
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800342static void add_map(struct mm_region *map)
343{
344 u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
345 u64 va_bits;
346 int level = 0;
347
348 get_tcr(NULL, &va_bits);
349 if (va_bits < 39)
350 level = 1;
351
352 map_range(map->virt, map->phys, map->size, level,
353 (u64 *)gd->arch.tlb_addr, attrs);
354}
355
Marc Zyngier6da328e2023-02-14 21:38:14 +0800356static void count_range(u64 virt, u64 size, int level, int *cntp)
Alexander Grafe317fe82016-03-04 01:09:47 +0100357{
Marc Zyngier6da328e2023-02-14 21:38:14 +0800358 u64 map_size = BIT_ULL(level2shift(level));
359 int i, idx;
Alexander Grafe317fe82016-03-04 01:09:47 +0100360
Marc Zyngier6da328e2023-02-14 21:38:14 +0800361 idx = (virt >> level2shift(level)) & (MAX_PTE_ENTRIES - 1);
362 for (i = idx; size; i++) {
363 u64 next_size;
Alexander Grafe317fe82016-03-04 01:09:47 +0100364
Chris Packham978814f2023-10-27 13:23:53 +1300365 if (level >= 1 &&
Marc Zyngier6da328e2023-02-14 21:38:14 +0800366 size >= map_size && !(virt & (map_size - 1))) {
367 virt += map_size;
368 size -= map_size;
Alexander Grafe317fe82016-03-04 01:09:47 +0100369
Marc Zyngier6da328e2023-02-14 21:38:14 +0800370 continue;
371 }
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700372
Marc Zyngier6da328e2023-02-14 21:38:14 +0800373 /* Going one level down */
374 (*cntp)++;
375 next_size = min(map_size - (virt & (map_size - 1)), size);
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700376
Marc Zyngier6da328e2023-02-14 21:38:14 +0800377 count_range(virt, next_size, level + 1, cntp);
Alexander Grafe317fe82016-03-04 01:09:47 +0100378
Marc Zyngier6da328e2023-02-14 21:38:14 +0800379 virt += next_size;
380 size -= next_size;
381 }
382}
Alexander Grafe317fe82016-03-04 01:09:47 +0100383
Marc Zyngier6da328e2023-02-14 21:38:14 +0800384static int count_ranges(void)
385{
386 int i, count = 0, level = 0;
387 u64 va_bits;
Alexander Grafe317fe82016-03-04 01:09:47 +0100388
Marc Zyngier6da328e2023-02-14 21:38:14 +0800389 get_tcr(NULL, &va_bits);
390 if (va_bits < 39)
391 level = 1;
Alexander Grafe317fe82016-03-04 01:09:47 +0100392
Marc Zyngier6da328e2023-02-14 21:38:14 +0800393 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
394 count_range(mem_map[i].virt, mem_map[i].size, level, &count);
Alexander Grafe317fe82016-03-04 01:09:47 +0100395
Marc Zyngier6da328e2023-02-14 21:38:14 +0800396 return count;
Alexander Grafe317fe82016-03-04 01:09:47 +0100397}
398
Caleb Connolly566907c2024-06-17 10:03:48 +0200399#define ALL_ATTRS (3 << 8 | PMD_ATTRINDX_MASK)
400#define PTE_IS_TABLE(pte, level) (pte_type(&(pte)) == PTE_TYPE_TABLE && (level) < 3)
401
402enum walker_state {
403 WALKER_STATE_START = 0,
404 WALKER_STATE_TABLE,
405 WALKER_STATE_REGION, /* block or page, depending on level */
406};
407
408
409/**
410 * __pagetable_walk() - Walk through the pagetable and call cb() for each memory region
411 *
412 * This is a software implementation of the ARMv8-A MMU translation table walk. As per
413 * section D5.4 of the ARMv8-A Architecture Reference Manual. It recursively walks the
414 * 4 or 3 levels of the page table and calls the callback function for each discrete
415 * region of memory (that being the discovery of a new table, a collection of blocks
416 * with the same attributes, or of pages with the same attributes).
417 *
418 * U-Boot picks the smallest number of virtual address (VA) bits that it can based on the
419 * memory map configured by the board. If this is less than 39 then the MMU will only use
420 * 3 levels of translation instead of 3 - skipping level 0.
421 *
422 * Each level has 512 entries of 64-bits each. Each entry includes attribute bits and
423 * an address. When the attribute bits indicate a table, the address is the physical
424 * address of the table, so we can recursively call _pagetable_walk() on it (after calling
425 * @cb). If instead they indicate a block or page, we record the start address and attributes
426 * and continue walking until we find a region with different attributes, or the end of the
427 * table, in either case we call @cb with the start and end address of the region.
428 *
429 * This approach can be used to fully emulate the MMU's translation table walk, as per
430 * Figure D5-25 of the ARMv8-A Architecture Reference Manual.
431 *
432 * @addr: The address of the table to walk
433 * @tcr: The TCR register value
434 * @level: The current level of the table
435 * @cb: The callback function to call for each region
436 * @priv: Private data to pass to the callback function
437 */
438static void __pagetable_walk(u64 addr, u64 tcr, int level, pte_walker_cb_t cb, void *priv)
439{
440 u64 *table = (u64 *)addr;
441 u64 attrs, last_attrs = 0, last_addr = 0, entry_start = 0;
442 int i;
443 u64 va_bits = 64 - (tcr & (BIT(6) - 1));
444 static enum walker_state state[4] = { 0 };
445 static bool exit;
446
447 if (!level) {
448 exit = false;
449 if (va_bits < 39)
450 level = 1;
451 }
452
453 state[level] = WALKER_STATE_START;
454
455 /* Walk through the table entries */
456 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
457 u64 pte = table[i];
458 u64 _addr = pte & GENMASK_ULL(va_bits, PAGE_SHIFT);
459
460 if (exit)
461 return;
462
463 if (pte_type(&pte) == PTE_TYPE_FAULT)
464 continue;
465
466 attrs = pte & ALL_ATTRS;
467 /* If we're currently inside a block or set of pages */
468 if (state[level] > WALKER_STATE_START && state[level] != WALKER_STATE_TABLE) {
469 /*
470 * Continue walking if this entry has the same attributes as the last and
471 * is one page/block away -- it's a contiguous region.
472 */
473 if (attrs == last_attrs && _addr == last_addr + (1 << level2shift(level))) {
474 last_attrs = attrs;
475 last_addr = _addr;
476 continue;
477 } else {
478 /* We either hit a table or a new region */
479 exit = cb(entry_start, last_addr + (1 << level2shift(level)),
480 va_bits, level, priv);
481 if (exit)
482 return;
483 state[level] = WALKER_STATE_START;
484 }
485 }
486 last_attrs = attrs;
487 last_addr = _addr;
488
489 if (PTE_IS_TABLE(pte, level)) {
490 /* After the end of the table might be corrupted data */
491 if (!_addr || (pte & 0xfff) > 0x3ff)
492 return;
493 state[level] = WALKER_STATE_TABLE;
494 /* Signify the start of a table */
495 exit = cb(pte, 0, va_bits, level, priv);
496 if (exit)
497 return;
498
499 /* Go down a level */
500 __pagetable_walk(_addr, tcr, level + 1, cb, priv);
501 state[level] = WALKER_STATE_START;
502 } else if (pte_type(&pte) == PTE_TYPE_BLOCK || pte_type(&pte) == PTE_TYPE_PAGE) {
503 /* We foud a block or page, start walking */
504 entry_start = pte;
505 state[level] = WALKER_STATE_REGION;
506 }
507 }
508
509 if (state[level] > WALKER_STATE_START)
510 exit = cb(entry_start, last_addr + (1 << level2shift(level)), va_bits, level, priv);
511}
512
513static void pretty_print_pte_type(u64 pte)
514{
515 switch (pte_type(&pte)) {
516 case PTE_TYPE_FAULT:
517 printf(" %-5s", "Fault");
518 break;
519 case PTE_TYPE_BLOCK:
520 printf(" %-5s", "Block");
521 break;
522 case PTE_TYPE_PAGE:
523 printf(" %-5s", "Pages");
524 break;
525 default:
526 printf(" %-5s", "Unk");
527 }
528}
529
530static void pretty_print_table_attrs(u64 pte)
531{
532 int ap = (pte & PTE_TABLE_AP) >> 61;
533
534 printf(" | %2s %10s",
535 (ap & 2) ? "RO" : "",
536 (ap & 1) ? "!EL0" : "");
537 printf(" | %3s %2s %2s",
538 (pte & PTE_TABLE_PXN) ? "PXN" : "",
539 (pte & PTE_TABLE_XN) ? "XN" : "",
540 (pte & PTE_TABLE_NS) ? "NS" : "");
541}
542
543static void pretty_print_block_attrs(u64 pte)
544{
545 u64 attrs = pte & PMD_ATTRINDX_MASK;
546
547 switch (attrs) {
548 case PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE):
549 printf(" | %-13s", "Device-nGnRnE");
550 break;
551 case PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRE):
552 printf(" | %-13s", "Device-nGnRE");
553 break;
554 case PTE_BLOCK_MEMTYPE(MT_DEVICE_GRE):
555 printf(" | %-13s", "Device-GRE");
556 break;
557 case PTE_BLOCK_MEMTYPE(MT_NORMAL_NC):
558 printf(" | %-13s", "Normal-NC");
559 break;
560 case PTE_BLOCK_MEMTYPE(MT_NORMAL):
561 printf(" | %-13s", "Normal");
562 break;
563 default:
564 printf(" | %-13s", "Unknown");
565 }
566}
567
568static void pretty_print_block_memtype(u64 pte)
569{
570 u64 share = pte & (3 << 8);
571
572 switch (share) {
573 case PTE_BLOCK_NON_SHARE:
574 printf(" | %-16s", "Non-shareable");
575 break;
576 case PTE_BLOCK_OUTER_SHARE:
577 printf(" | %-16s", "Outer-shareable");
578 break;
579 case PTE_BLOCK_INNER_SHARE:
580 printf(" | %-16s", "Inner-shareable");
581 break;
582 default:
583 printf(" | %-16s", "Unknown");
584 }
585}
586
587static void print_pte(u64 pte, int level)
588{
589 if (PTE_IS_TABLE(pte, level)) {
590 printf(" %-5s", "Table");
591 pretty_print_table_attrs(pte);
592 } else {
593 pretty_print_pte_type(pte);
594 pretty_print_block_attrs(pte);
595 pretty_print_block_memtype(pte);
596 }
597 printf("\n");
598}
599
600/**
601 * pagetable_print_entry() - Callback function to print a single pagetable region
602 *
603 * This is the default callback used by @dump_pagetable(). It does some basic pretty
604 * printing (see example in the U-Boot arm64 documentation). It can be replaced by
605 * a custom callback function if more detailed information is needed.
606 *
607 * @start_attrs: The start address and attributes of the region (or table address)
608 * @end: The end address of the region (or 0 if it's a table)
609 * @va_bits: The number of bits used for the virtual address
610 * @level: The level of the region
611 * @priv: Private data for the callback (unused)
612 */
613static bool pagetable_print_entry(u64 start_attrs, u64 end, int va_bits, int level, void *priv)
614{
615 u64 _addr = start_attrs & GENMASK_ULL(va_bits, PAGE_SHIFT);
616 int indent = va_bits < 39 ? level - 1 : level;
617
618 printf("%*s", indent * 2, "");
619 if (PTE_IS_TABLE(start_attrs, level))
620 printf("[%#011llx]%14s", _addr, "");
621 else
622 printf("[%#011llx - %#011llx]", _addr, end);
623
624 printf("%*s | ", (3 - level) * 2, "");
625 print_pte(start_attrs, level);
626
627 return false;
628}
629
630void walk_pagetable(u64 ttbr, u64 tcr, pte_walker_cb_t cb, void *priv)
631{
632 __pagetable_walk(ttbr, tcr, 0, cb, priv);
633}
634
635void dump_pagetable(u64 ttbr, u64 tcr)
636{
637 u64 va_bits = 64 - (tcr & (BIT(6) - 1));
638
639 printf("Walking pagetable at %p, va_bits: %lld. Using %d levels\n", (void *)ttbr,
640 va_bits, va_bits < 39 ? 3 : 4);
641 walk_pagetable(ttbr, tcr, pagetable_print_entry, NULL);
642}
643
Alexander Grafe317fe82016-03-04 01:09:47 +0100644/* Returns the estimated required size of all page tables */
Alexander Grafbc78b922016-03-21 20:26:12 +0100645__weak u64 get_page_table_size(void)
Alexander Grafe317fe82016-03-04 01:09:47 +0100646{
647 u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
Chris Packhama6c68c62023-10-27 13:23:54 +1300648 u64 size;
Alexander Grafe317fe82016-03-04 01:09:47 +0100649
650 /* Account for all page tables we would need to cover our memory map */
Marc Zyngier6da328e2023-02-14 21:38:14 +0800651 size = one_pt * count_ranges();
Alexander Grafe317fe82016-03-04 01:09:47 +0100652
653 /*
654 * We need to duplicate our page table once to have an emergency pt to
655 * resort to when splitting page tables later on
656 */
657 size *= 2;
658
659 /*
660 * We may need to split page tables later on if dcache settings change,
661 * so reserve up to 4 (random pick) page tables for that.
662 */
663 size += one_pt * 4;
664
665 return size;
666}
667
York Suna81fcd12016-06-24 16:46:20 -0700668void setup_pgtables(void)
Alexander Grafe317fe82016-03-04 01:09:47 +0100669{
670 int i;
671
York Suna81fcd12016-06-24 16:46:20 -0700672 if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr)
673 panic("Page table pointer not setup.");
674
Alexander Grafe317fe82016-03-04 01:09:47 +0100675 /*
676 * Allocate the first level we're on with invalidate entries.
677 * If the starting level is 0 (va_bits >= 39), then this is our
678 * Lv0 page table, otherwise it's the entry Lv1 page table.
679 */
680 create_table();
681
682 /* Now add all MMU table entries one after another to the table */
Alexander Graf6b3e7ca2016-03-04 01:09:48 +0100683 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
Alexander Grafe317fe82016-03-04 01:09:47 +0100684 add_map(&mem_map[i]);
Alexander Grafe317fe82016-03-04 01:09:47 +0100685}
686
687static void setup_all_pgtables(void)
688{
689 u64 tlb_addr = gd->arch.tlb_addr;
Alexander Graffa3754e2016-07-30 23:13:03 +0200690 u64 tlb_size = gd->arch.tlb_size;
Alexander Grafe317fe82016-03-04 01:09:47 +0100691
692 /* Reset the fill ptr */
693 gd->arch.tlb_fillptr = tlb_addr;
694
695 /* Create normal system page tables */
696 setup_pgtables();
697
698 /* Create emergency page tables */
Alexander Graffa3754e2016-07-30 23:13:03 +0200699 gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr -
700 (uintptr_t)gd->arch.tlb_addr;
Alexander Grafe317fe82016-03-04 01:09:47 +0100701 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
702 setup_pgtables();
703 gd->arch.tlb_emerg = gd->arch.tlb_addr;
704 gd->arch.tlb_addr = tlb_addr;
Alexander Graffa3754e2016-07-30 23:13:03 +0200705 gd->arch.tlb_size = tlb_size;
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700706}
707
David Feng85fd5f12013-12-14 11:47:35 +0800708/* to activate the MMU we need to set up virtual memory */
Stephen Warren7333c6a2015-10-05 12:09:00 -0600709__weak void mmu_setup(void)
David Feng85fd5f12013-12-14 11:47:35 +0800710{
Thierry Reding59c364d2015-07-22 17:10:11 -0600711 int el;
David Feng85fd5f12013-12-14 11:47:35 +0800712
Alexander Grafe317fe82016-03-04 01:09:47 +0100713 /* Set up page tables only once */
714 if (!gd->arch.tlb_fillptr)
715 setup_all_pgtables();
Alexander Graffb74cc12016-03-04 01:09:45 +0100716
717 el = current_el();
Andre Przywara630a7942022-06-14 00:11:10 +0100718 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(NULL, NULL),
Alexander Graffb74cc12016-03-04 01:09:45 +0100719 MEMORY_ATTRIBUTES);
Alexander Graffb74cc12016-03-04 01:09:45 +0100720
David Feng85fd5f12013-12-14 11:47:35 +0800721 /* enable the mmu */
722 set_sctlr(get_sctlr() | CR_M);
723}
724
725/*
726 * Performs a invalidation of the entire data cache at all levels
727 */
728void invalidate_dcache_all(void)
729{
Marc Zyngierb67855c2023-02-09 04:54:27 +0800730#ifndef CONFIG_CMO_BY_VA_ONLY
York Sunef042012014-02-26 13:26:04 -0800731 __asm_invalidate_dcache_all();
Stephen Warrenddb0f632016-10-19 15:18:46 -0600732 __asm_invalidate_l3_dcache();
Marc Zyngierb67855c2023-02-09 04:54:27 +0800733#else
734 apply_cmo_to_mappings(invalidate_dcache_range);
735#endif
David Feng85fd5f12013-12-14 11:47:35 +0800736}
737
738/*
York Sun1ce575f2015-01-06 13:18:42 -0800739 * Performs a clean & invalidation of the entire data cache at all levels.
740 * This function needs to be inline to avoid using stack.
Stephen Warrenddb0f632016-10-19 15:18:46 -0600741 * __asm_flush_l3_dcache return status of timeout
David Feng85fd5f12013-12-14 11:47:35 +0800742 */
York Sun1ce575f2015-01-06 13:18:42 -0800743inline void flush_dcache_all(void)
David Feng85fd5f12013-12-14 11:47:35 +0800744{
Marc Zyngierb67855c2023-02-09 04:54:27 +0800745#ifndef CONFIG_CMO_BY_VA_ONLY
York Sun1ce575f2015-01-06 13:18:42 -0800746 int ret;
747
David Feng85fd5f12013-12-14 11:47:35 +0800748 __asm_flush_dcache_all();
Stephen Warrenddb0f632016-10-19 15:18:46 -0600749 ret = __asm_flush_l3_dcache();
York Sun1ce575f2015-01-06 13:18:42 -0800750 if (ret)
751 debug("flushing dcache returns 0x%x\n", ret);
752 else
753 debug("flushing dcache successfully.\n");
Marc Zyngierb67855c2023-02-09 04:54:27 +0800754#else
755 apply_cmo_to_mappings(flush_dcache_range);
756#endif
David Feng85fd5f12013-12-14 11:47:35 +0800757}
758
Vignesh Raghavendra384c1412019-04-22 21:43:32 +0530759#ifndef CONFIG_SYS_DISABLE_DCACHE_OPS
David Feng85fd5f12013-12-14 11:47:35 +0800760/*
761 * Invalidates range in all levels of D-cache/unified cache
762 */
763void invalidate_dcache_range(unsigned long start, unsigned long stop)
764{
Simon Glass4415c3b2017-04-05 17:53:18 -0600765 __asm_invalidate_dcache_range(start, stop);
David Feng85fd5f12013-12-14 11:47:35 +0800766}
767
768/*
769 * Flush range(clean & invalidate) from all levels of D-cache/unified cache
770 */
771void flush_dcache_range(unsigned long start, unsigned long stop)
772{
773 __asm_flush_dcache_range(start, stop);
774}
Vignesh Raghavendra384c1412019-04-22 21:43:32 +0530775#else
776void invalidate_dcache_range(unsigned long start, unsigned long stop)
777{
778}
779
780void flush_dcache_range(unsigned long start, unsigned long stop)
781{
782}
783#endif /* CONFIG_SYS_DISABLE_DCACHE_OPS */
David Feng85fd5f12013-12-14 11:47:35 +0800784
785void dcache_enable(void)
786{
787 /* The data cache is not active unless the mmu is enabled */
788 if (!(get_sctlr() & CR_M)) {
789 invalidate_dcache_all();
790 __asm_invalidate_tlb_all();
791 mmu_setup();
792 }
793
Pali Rohárfbddaee2022-09-14 13:37:46 +0200794 /* Set up page tables only once (it is done also by mmu_setup()) */
795 if (!gd->arch.tlb_fillptr)
796 setup_all_pgtables();
797
David Feng85fd5f12013-12-14 11:47:35 +0800798 set_sctlr(get_sctlr() | CR_C);
799}
800
801void dcache_disable(void)
802{
803 uint32_t sctlr;
804
805 sctlr = get_sctlr();
806
807 /* if cache isn't enabled no need to disable */
808 if (!(sctlr & CR_C))
809 return;
810
Marc Zyngierb67855c2023-02-09 04:54:27 +0800811 if (IS_ENABLED(CONFIG_CMO_BY_VA_ONLY)) {
812 /*
813 * When invalidating by VA, do it *before* turning the MMU
814 * off, so that at least our stack is coherent.
815 */
816 flush_dcache_all();
817 }
818
David Feng85fd5f12013-12-14 11:47:35 +0800819 set_sctlr(sctlr & ~(CR_C|CR_M));
820
Marc Zyngierb67855c2023-02-09 04:54:27 +0800821 if (!IS_ENABLED(CONFIG_CMO_BY_VA_ONLY))
822 flush_dcache_all();
823
David Feng85fd5f12013-12-14 11:47:35 +0800824 __asm_invalidate_tlb_all();
825}
826
827int dcache_status(void)
828{
829 return (get_sctlr() & CR_C) != 0;
830}
831
Siva Durga Prasad Paladuguba2432a2015-06-26 18:05:07 +0530832u64 *__weak arch_get_page_table(void) {
833 puts("No page table offset defined\n");
834
835 return NULL;
836}
837
Alexander Grafe317fe82016-03-04 01:09:47 +0100838static bool is_aligned(u64 addr, u64 size, u64 align)
839{
840 return !(addr & (align - 1)) && !(size & (align - 1));
841}
842
York Sun5bb14e02017-03-06 09:02:33 -0800843/* Use flag to indicate if attrs has more than d-cache attributes */
844static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level)
Alexander Grafe317fe82016-03-04 01:09:47 +0100845{
846 int levelshift = level2shift(level);
847 u64 levelsize = 1ULL << levelshift;
848 u64 *pte = find_pte(start, level);
849
850 /* Can we can just modify the current level block PTE? */
851 if (is_aligned(start, size, levelsize)) {
York Sun5bb14e02017-03-06 09:02:33 -0800852 if (flag) {
853 *pte &= ~PMD_ATTRMASK;
854 *pte |= attrs & PMD_ATTRMASK;
855 } else {
856 *pte &= ~PMD_ATTRINDX_MASK;
857 *pte |= attrs & PMD_ATTRINDX_MASK;
858 }
Alexander Grafe317fe82016-03-04 01:09:47 +0100859 debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level);
860
861 return levelsize;
862 }
863
864 /* Unaligned or doesn't fit, maybe split block into table */
865 debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte);
866
867 /* Maybe we need to split the block into a table */
868 if (pte_type(pte) == PTE_TYPE_BLOCK)
869 split_block(pte, level);
870
871 /* And then double-check it became a table or already is one */
872 if (pte_type(pte) != PTE_TYPE_TABLE)
873 panic("PTE %p (%llx) for addr=%llx should be a table",
874 pte, *pte, start);
875
876 /* Roll on to the next page table level */
877 return 0;
878}
879
880void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
881 enum dcache_option option)
882{
Peng Fan41bad3e2020-05-11 16:41:07 +0800883 u64 attrs = PMD_ATTRINDX(option >> 2);
Alexander Grafe317fe82016-03-04 01:09:47 +0100884 u64 real_start = start;
885 u64 real_size = size;
886
887 debug("start=%lx size=%lx\n", (ulong)start, (ulong)size);
888
York Suna81fcd12016-06-24 16:46:20 -0700889 if (!gd->arch.tlb_emerg)
890 panic("Emergency page table not setup.");
891
Alexander Grafe317fe82016-03-04 01:09:47 +0100892 /*
893 * We can not modify page tables that we're currently running on,
894 * so we first need to switch to the "emergency" page tables where
895 * we can safely modify our primary page tables and then switch back
896 */
897 __asm_switch_ttbr(gd->arch.tlb_emerg);
898
899 /*
900 * Loop through the address range until we find a page granule that fits
901 * our alignment constraints, then set it to the new cache attributes
902 */
903 while (size > 0) {
904 int level;
905 u64 r;
906
907 for (level = 1; level < 4; level++) {
York Sun5bb14e02017-03-06 09:02:33 -0800908 /* Set d-cache attributes only */
909 r = set_one_region(start, size, attrs, false, level);
Alexander Grafe317fe82016-03-04 01:09:47 +0100910 if (r) {
911 /* PTE successfully replaced */
912 size -= r;
913 start += r;
914 break;
915 }
916 }
917
918 }
919
920 /* We're done modifying page tables, switch back to our primary ones */
921 __asm_switch_ttbr(gd->arch.tlb_addr);
922
923 /*
924 * Make sure there's nothing stale in dcache for a region that might
925 * have caches off now
926 */
927 flush_dcache_range(real_start, real_start + real_size);
928}
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700929
York Sun5bb14e02017-03-06 09:02:33 -0800930/*
931 * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits.
932 * The procecess is break-before-make. The target region will be marked as
933 * invalid during the process of changing.
934 */
935void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs)
936{
937 int level;
938 u64 r, size, start;
939
940 start = addr;
941 size = siz;
942 /*
943 * Loop through the address range until we find a page granule that fits
944 * our alignment constraints, then set it to "invalid".
945 */
946 while (size > 0) {
947 for (level = 1; level < 4; level++) {
948 /* Set PTE to fault */
949 r = set_one_region(start, size, PTE_TYPE_FAULT, true,
950 level);
951 if (r) {
952 /* PTE successfully invalidated */
953 size -= r;
954 start += r;
955 break;
956 }
957 }
958 }
959
960 flush_dcache_range(gd->arch.tlb_addr,
961 gd->arch.tlb_addr + gd->arch.tlb_size);
962 __asm_invalidate_tlb_all();
963
964 /*
965 * Loop through the address range until we find a page granule that fits
966 * our alignment constraints, then set it to the new cache attributes
967 */
968 start = addr;
969 size = siz;
970 while (size > 0) {
971 for (level = 1; level < 4; level++) {
972 /* Set PTE to new attributes */
973 r = set_one_region(start, size, attrs, true, level);
974 if (r) {
975 /* PTE successfully updated */
976 size -= r;
977 start += r;
978 break;
979 }
980 }
981 }
982 flush_dcache_range(gd->arch.tlb_addr,
983 gd->arch.tlb_addr + gd->arch.tlb_size);
984 __asm_invalidate_tlb_all();
985}
986
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400987#else /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +0800988
Alexander Grafbc40da92016-03-04 01:09:55 +0100989/*
990 * For SPL builds, we may want to not have dcache enabled. Any real U-Boot
991 * running however really wants to have dcache and the MMU active. Check that
992 * everything is sane and give the developer a hint if it isn't.
993 */
994#ifndef CONFIG_SPL_BUILD
995#error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache.
996#endif
997
David Feng85fd5f12013-12-14 11:47:35 +0800998void invalidate_dcache_all(void)
999{
1000}
1001
1002void flush_dcache_all(void)
1003{
1004}
1005
David Feng85fd5f12013-12-14 11:47:35 +08001006void dcache_enable(void)
1007{
1008}
1009
1010void dcache_disable(void)
1011{
1012}
1013
1014int dcache_status(void)
1015{
1016 return 0;
1017}
1018
Siva Durga Prasad Paladuguba2432a2015-06-26 18:05:07 +05301019void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
1020 enum dcache_option option)
1021{
1022}
1023
Trevor Woerner43ec7e02019-05-03 09:41:00 -04001024#endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +08001025
Trevor Woerner43ec7e02019-05-03 09:41:00 -04001026#if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF)
David Feng85fd5f12013-12-14 11:47:35 +08001027
1028void icache_enable(void)
1029{
Stephen Warrenddb0f632016-10-19 15:18:46 -06001030 invalidate_icache_all();
David Feng85fd5f12013-12-14 11:47:35 +08001031 set_sctlr(get_sctlr() | CR_I);
1032}
1033
1034void icache_disable(void)
1035{
1036 set_sctlr(get_sctlr() & ~CR_I);
1037}
1038
1039int icache_status(void)
1040{
1041 return (get_sctlr() & CR_I) != 0;
1042}
1043
Patrice Chotardee435c62021-07-19 11:21:51 +02001044int mmu_status(void)
1045{
1046 return (get_sctlr() & CR_M) != 0;
1047}
1048
David Feng85fd5f12013-12-14 11:47:35 +08001049void invalidate_icache_all(void)
1050{
1051 __asm_invalidate_icache_all();
Stephen Warrenddb0f632016-10-19 15:18:46 -06001052 __asm_invalidate_l3_icache();
David Feng85fd5f12013-12-14 11:47:35 +08001053}
1054
Trevor Woerner43ec7e02019-05-03 09:41:00 -04001055#else /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +08001056
1057void icache_enable(void)
1058{
1059}
1060
1061void icache_disable(void)
1062{
1063}
1064
1065int icache_status(void)
1066{
1067 return 0;
1068}
1069
Patrice Chotardee435c62021-07-19 11:21:51 +02001070int mmu_status(void)
1071{
1072 return 0;
1073}
1074
David Feng85fd5f12013-12-14 11:47:35 +08001075void invalidate_icache_all(void)
1076{
1077}
1078
Trevor Woerner43ec7e02019-05-03 09:41:00 -04001079#endif /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +08001080
1081/*
1082 * Enable dCache & iCache, whether cache is actually enabled
1083 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
1084 */
York Suna84cd722014-06-23 15:15:54 -07001085void __weak enable_caches(void)
David Feng85fd5f12013-12-14 11:47:35 +08001086{
1087 icache_enable();
1088 dcache_enable();
1089}