blob: 12ae9bd06039347eebad050f4532d7bac1cf0fbb [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
David Feng85fd5f12013-12-14 11:47:35 +08002/*
3 * (C) Copyright 2013
4 * David Feng <fenghua@phytium.com.cn>
5 *
Alexander Grafe317fe82016-03-04 01:09:47 +01006 * (C) Copyright 2016
7 * Alexander Graf <agraf@suse.de>
David Feng85fd5f12013-12-14 11:47:35 +08008 */
9
Simon Glass1d91ba72019-11-14 12:57:37 -070010#include <cpu_func.h>
Simon Glassf11478f2019-12-28 10:45:07 -070011#include <hang.h>
Simon Glass0f2af882020-05-10 11:40:05 -060012#include <log.h>
Simon Glass274e0b02020-05-10 11:39:56 -060013#include <asm/cache.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060014#include <asm/global_data.h>
David Feng85fd5f12013-12-14 11:47:35 +080015#include <asm/system.h>
16#include <asm/armv8/mmu.h>
Ilias Apalodimase9e18652025-02-20 15:54:42 +020017#include <linux/errno.h>
David Feng85fd5f12013-12-14 11:47:35 +080018
19DECLARE_GLOBAL_DATA_PTR;
20
Trevor Woerner43ec7e02019-05-03 09:41:00 -040021#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -070022
Alexander Grafe317fe82016-03-04 01:09:47 +010023/*
24 * With 4k page granule, a virtual address is split into 4 lookup parts
25 * spanning 9 bits each:
26 *
27 * _______________________________________________
28 * | | | | | | |
29 * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off |
30 * |_______|_______|_______|_______|_______|_______|
31 * 63-48 47-39 38-30 29-21 20-12 11-00
32 *
33 * mask page size
34 *
35 * Lv0: FF8000000000 --
36 * Lv1: 7FC0000000 1G
37 * Lv2: 3FE00000 2M
38 * Lv3: 1FF000 4K
39 * off: FFF
40 */
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -070041
Andre Przywara630a7942022-06-14 00:11:10 +010042static int get_effective_el(void)
Alexander Graffb74cc12016-03-04 01:09:45 +010043{
Andre Przywara630a7942022-06-14 00:11:10 +010044 int el = current_el();
45
46 if (el == 2) {
47 u64 hcr_el2;
48
49 /*
50 * If we are using the EL2&0 translation regime, the TCR_EL2
51 * looks like the EL1 version, even though we are in EL2.
52 */
53 __asm__ ("mrs %0, HCR_EL2\n" : "=r" (hcr_el2));
54 if (hcr_el2 & BIT(HCR_EL2_E2H_BIT))
55 return 1;
56 }
57
58 return el;
59}
60
61u64 get_tcr(u64 *pips, u64 *pva_bits)
62{
63 int el = get_effective_el();
Alexander Graffb74cc12016-03-04 01:09:45 +010064 u64 max_addr = 0;
65 u64 ips, va_bits;
66 u64 tcr;
67 int i;
68
69 /* Find the largest address we need to support */
Alexander Graf6b3e7ca2016-03-04 01:09:48 +010070 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
York Sunc7104e52016-06-24 16:46:22 -070071 max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size);
Alexander Graffb74cc12016-03-04 01:09:45 +010072
73 /* Calculate the maximum physical (and thus virtual) address */
74 if (max_addr > (1ULL << 44)) {
75 ips = 5;
76 va_bits = 48;
77 } else if (max_addr > (1ULL << 42)) {
78 ips = 4;
79 va_bits = 44;
80 } else if (max_addr > (1ULL << 40)) {
81 ips = 3;
82 va_bits = 42;
83 } else if (max_addr > (1ULL << 36)) {
84 ips = 2;
85 va_bits = 40;
86 } else if (max_addr > (1ULL << 32)) {
87 ips = 1;
88 va_bits = 36;
89 } else {
90 ips = 0;
91 va_bits = 32;
92 }
93
94 if (el == 1) {
Alexander Graff03c0e42016-03-04 01:09:46 +010095 tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE;
Alexander Graffb74cc12016-03-04 01:09:45 +010096 } else if (el == 2) {
97 tcr = TCR_EL2_RSVD | (ips << 16);
98 } else {
99 tcr = TCR_EL3_RSVD | (ips << 16);
100 }
101
102 /* PTWs cacheable, inner/outer WBWA and inner shareable */
Alexander Grafe317fe82016-03-04 01:09:47 +0100103 tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
104 tcr |= TCR_T0SZ(va_bits);
Alexander Graffb74cc12016-03-04 01:09:45 +0100105
106 if (pips)
107 *pips = ips;
108 if (pva_bits)
109 *pva_bits = va_bits;
110
111 return tcr;
112}
113
Alexander Grafe317fe82016-03-04 01:09:47 +0100114#define MAX_PTE_ENTRIES 512
115
116static int pte_type(u64 *pte)
117{
118 return *pte & PTE_TYPE_MASK;
119}
120
121/* Returns the LSB number for a PTE on level <level> */
122static int level2shift(int level)
123{
124 /* Page is 12 bits wide, every level translates 9 bits */
125 return (12 + 9 * (3 - level));
126}
127
128static u64 *find_pte(u64 addr, int level)
129{
130 int start_level = 0;
131 u64 *pte;
132 u64 idx;
133 u64 va_bits;
134 int i;
135
136 debug("addr=%llx level=%d\n", addr, level);
137
Andre Przywara630a7942022-06-14 00:11:10 +0100138 get_tcr(NULL, &va_bits);
Alexander Grafe317fe82016-03-04 01:09:47 +0100139 if (va_bits < 39)
140 start_level = 1;
141
142 if (level < start_level)
143 return NULL;
144
145 /* Walk through all page table levels to find our PTE */
146 pte = (u64*)gd->arch.tlb_addr;
147 for (i = start_level; i < 4; i++) {
148 idx = (addr >> level2shift(i)) & 0x1FF;
149 pte += idx;
150 debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte);
151
152 /* Found it */
153 if (i == level)
154 return pte;
155 /* PTE is no table (either invalid or block), can't traverse */
156 if (pte_type(pte) != PTE_TYPE_TABLE)
157 return NULL;
158 /* Off to the next level */
159 pte = (u64*)(*pte & 0x0000fffffffff000ULL);
160 }
161
162 /* Should never reach here */
163 return NULL;
164}
165
Marc Zyngierb67855c2023-02-09 04:54:27 +0800166#ifdef CONFIG_CMO_BY_VA_ONLY
167static void __cmo_on_leaves(void (*cmo_fn)(unsigned long, unsigned long),
168 u64 pte, int level, u64 base)
169{
170 u64 *ptep;
171 int i;
172
173 ptep = (u64 *)(pte & GENMASK_ULL(47, PAGE_SHIFT));
174 for (i = 0; i < PAGE_SIZE / sizeof(u64); i++) {
175 u64 end, va = base + i * BIT(level2shift(level));
176 u64 type, attrs;
177
178 pte = ptep[i];
179 type = pte & PTE_TYPE_MASK;
180 attrs = pte & PMD_ATTRINDX_MASK;
181 debug("PTE %llx at level %d VA %llx\n", pte, level, va);
182
183 /* Not valid? next! */
184 if (!(type & PTE_TYPE_VALID))
185 continue;
186
187 /* Not a leaf? Recurse on the next level */
188 if (!(type == PTE_TYPE_BLOCK ||
189 (level == 3 && type == PTE_TYPE_PAGE))) {
190 __cmo_on_leaves(cmo_fn, pte, level + 1, va);
191 continue;
192 }
193
194 /*
195 * From this point, this must be a leaf.
196 *
197 * Start excluding non memory mappings
198 */
199 if (attrs != PTE_BLOCK_MEMTYPE(MT_NORMAL) &&
200 attrs != PTE_BLOCK_MEMTYPE(MT_NORMAL_NC))
201 continue;
202
203 end = va + BIT(level2shift(level)) - 1;
204
205 /* No intersection with RAM? */
206 if (end < gd->ram_base ||
207 va >= (gd->ram_base + gd->ram_size))
208 continue;
209
210 /*
211 * OK, we have a partial RAM mapping. However, this
212 * can cover *more* than the RAM. Yes, u-boot is
213 * *that* braindead. Compute the intersection we care
214 * about, and not a byte more.
215 */
216 va = max(va, (u64)gd->ram_base);
217 end = min(end, gd->ram_base + gd->ram_size);
218
219 debug("Flush PTE %llx at level %d: %llx-%llx\n",
220 pte, level, va, end);
221 cmo_fn(va, end);
222 }
223}
224
225static void apply_cmo_to_mappings(void (*cmo_fn)(unsigned long, unsigned long))
226{
227 u64 va_bits;
228 int sl = 0;
229
230 if (!gd->arch.tlb_addr)
231 return;
232
233 get_tcr(NULL, &va_bits);
234 if (va_bits < 39)
235 sl = 1;
236
237 __cmo_on_leaves(cmo_fn, gd->arch.tlb_addr, sl, 0);
238}
239#else
240static inline void apply_cmo_to_mappings(void *dummy) {}
241#endif
242
Alexander Grafe317fe82016-03-04 01:09:47 +0100243/* Returns and creates a new full table (512 entries) */
244static u64 *create_table(void)
245{
246 u64 *new_table = (u64*)gd->arch.tlb_fillptr;
247 u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64);
248
249 /* Allocate MAX_PTE_ENTRIES pte entries */
250 gd->arch.tlb_fillptr += pt_len;
251
252 if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size)
253 panic("Insufficient RAM for page table: 0x%lx > 0x%lx. "
254 "Please increase the size in get_page_table_size()",
255 gd->arch.tlb_fillptr - gd->arch.tlb_addr,
256 gd->arch.tlb_size);
257
258 /* Mark all entries as invalid */
259 memset(new_table, 0, pt_len);
260
261 return new_table;
262}
263
264static void set_pte_table(u64 *pte, u64 *table)
265{
266 /* Point *pte to the new table */
267 debug("Setting %p to addr=%p\n", pte, table);
268 *pte = PTE_TYPE_TABLE | (ulong)table;
269}
270
York Sunf44afe72016-06-24 16:46:21 -0700271/* Splits a block PTE into table with subpages spanning the old block */
272static void split_block(u64 *pte, int level)
273{
274 u64 old_pte = *pte;
275 u64 *new_table;
276 u64 i = 0;
277 /* level describes the parent level, we need the child ones */
278 int levelshift = level2shift(level + 1);
279
280 if (pte_type(pte) != PTE_TYPE_BLOCK)
281 panic("PTE %p (%llx) is not a block. Some driver code wants to "
282 "modify dcache settings for an range not covered in "
283 "mem_map.", pte, old_pte);
284
285 new_table = create_table();
286 debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table);
287
288 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
289 new_table[i] = old_pte | (i << levelshift);
290
291 /* Level 3 block PTEs have the table type */
292 if ((level + 1) == 3)
293 new_table[i] |= PTE_TYPE_TABLE;
294
295 debug("Setting new_table[%lld] = %llx\n", i, new_table[i]);
296 }
297
298 /* Set the new table into effect */
299 set_pte_table(pte, new_table);
300}
301
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800302static void map_range(u64 virt, u64 phys, u64 size, int level,
303 u64 *table, u64 attrs)
Alexander Grafe317fe82016-03-04 01:09:47 +0100304{
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800305 u64 map_size = BIT_ULL(level2shift(level));
306 int i, idx;
Alexander Grafe317fe82016-03-04 01:09:47 +0100307
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800308 idx = (virt >> level2shift(level)) & (MAX_PTE_ENTRIES - 1);
309 for (i = idx; size; i++) {
310 u64 next_size, *next_table;
Alexander Grafe317fe82016-03-04 01:09:47 +0100311
Chris Packham978814f2023-10-27 13:23:53 +1300312 if (level >= 1 &&
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800313 size >= map_size && !(virt & (map_size - 1))) {
314 if (level == 3)
315 table[i] = phys | attrs | PTE_TYPE_PAGE;
316 else
317 table[i] = phys | attrs;
York Sunc7104e52016-06-24 16:46:22 -0700318
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800319 virt += map_size;
320 phys += map_size;
321 size -= map_size;
322
323 continue;
Alexander Grafe317fe82016-03-04 01:09:47 +0100324 }
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800325
326 /* Going one level down */
327 if (pte_type(&table[i]) == PTE_TYPE_FAULT)
328 set_pte_table(&table[i], create_table());
Pierre-Clément Tosid8ceb202024-03-18 19:35:49 +0000329 else if (pte_type(&table[i]) != PTE_TYPE_TABLE)
330 split_block(&table[i], level);
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800331
332 next_table = (u64 *)(table[i] & GENMASK_ULL(47, PAGE_SHIFT));
333 next_size = min(map_size - (virt & (map_size - 1)), size);
334
335 map_range(virt, phys, next_size, level + 1, next_table, attrs);
336
337 virt += next_size;
338 phys += next_size;
339 size -= next_size;
Alexander Grafe317fe82016-03-04 01:09:47 +0100340 }
341}
342
Caleb Connolly27b05b52024-08-09 01:59:31 +0200343void mmu_map_region(phys_addr_t addr, u64 size, bool emergency)
344{
345 u64 va_bits;
346 int level = 0;
347 u64 attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_INNER_SHARE;
348
349 attrs |= PTE_TYPE_BLOCK | PTE_BLOCK_AF;
350
351 get_tcr(NULL, &va_bits);
352 if (va_bits < 39)
353 level = 1;
354
355 if (emergency)
356 map_range(addr, addr, size, level,
357 (u64 *)gd->arch.tlb_emerg, attrs);
358
359 /* Switch pagetables while we update the primary one */
360 __asm_switch_ttbr(gd->arch.tlb_emerg);
361
362 map_range(addr, addr, size, level,
363 (u64 *)gd->arch.tlb_addr, attrs);
364
365 __asm_switch_ttbr(gd->arch.tlb_addr);
366}
367
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800368static void add_map(struct mm_region *map)
369{
370 u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
371 u64 va_bits;
372 int level = 0;
373
374 get_tcr(NULL, &va_bits);
375 if (va_bits < 39)
376 level = 1;
377
378 map_range(map->virt, map->phys, map->size, level,
379 (u64 *)gd->arch.tlb_addr, attrs);
380}
381
Marc Zyngier6da328e2023-02-14 21:38:14 +0800382static void count_range(u64 virt, u64 size, int level, int *cntp)
Alexander Grafe317fe82016-03-04 01:09:47 +0100383{
Marc Zyngier6da328e2023-02-14 21:38:14 +0800384 u64 map_size = BIT_ULL(level2shift(level));
385 int i, idx;
Alexander Grafe317fe82016-03-04 01:09:47 +0100386
Marc Zyngier6da328e2023-02-14 21:38:14 +0800387 idx = (virt >> level2shift(level)) & (MAX_PTE_ENTRIES - 1);
388 for (i = idx; size; i++) {
389 u64 next_size;
Alexander Grafe317fe82016-03-04 01:09:47 +0100390
Chris Packham978814f2023-10-27 13:23:53 +1300391 if (level >= 1 &&
Marc Zyngier6da328e2023-02-14 21:38:14 +0800392 size >= map_size && !(virt & (map_size - 1))) {
393 virt += map_size;
394 size -= map_size;
Alexander Grafe317fe82016-03-04 01:09:47 +0100395
Marc Zyngier6da328e2023-02-14 21:38:14 +0800396 continue;
397 }
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700398
Marc Zyngier6da328e2023-02-14 21:38:14 +0800399 /* Going one level down */
400 (*cntp)++;
401 next_size = min(map_size - (virt & (map_size - 1)), size);
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700402
Marc Zyngier6da328e2023-02-14 21:38:14 +0800403 count_range(virt, next_size, level + 1, cntp);
Alexander Grafe317fe82016-03-04 01:09:47 +0100404
Marc Zyngier6da328e2023-02-14 21:38:14 +0800405 virt += next_size;
406 size -= next_size;
407 }
408}
Alexander Grafe317fe82016-03-04 01:09:47 +0100409
Marc Zyngier6da328e2023-02-14 21:38:14 +0800410static int count_ranges(void)
411{
412 int i, count = 0, level = 0;
413 u64 va_bits;
Alexander Grafe317fe82016-03-04 01:09:47 +0100414
Marc Zyngier6da328e2023-02-14 21:38:14 +0800415 get_tcr(NULL, &va_bits);
416 if (va_bits < 39)
417 level = 1;
Alexander Grafe317fe82016-03-04 01:09:47 +0100418
Marc Zyngier6da328e2023-02-14 21:38:14 +0800419 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
420 count_range(mem_map[i].virt, mem_map[i].size, level, &count);
Alexander Grafe317fe82016-03-04 01:09:47 +0100421
Marc Zyngier6da328e2023-02-14 21:38:14 +0800422 return count;
Alexander Grafe317fe82016-03-04 01:09:47 +0100423}
424
Ilias Apalodimasb3c5d252025-02-20 15:54:38 +0200425#define ALL_ATTRS (3 << 8 | PMD_ATTRMASK)
Caleb Connolly566907c2024-06-17 10:03:48 +0200426#define PTE_IS_TABLE(pte, level) (pte_type(&(pte)) == PTE_TYPE_TABLE && (level) < 3)
427
428enum walker_state {
429 WALKER_STATE_START = 0,
430 WALKER_STATE_TABLE,
431 WALKER_STATE_REGION, /* block or page, depending on level */
432};
433
434
435/**
436 * __pagetable_walk() - Walk through the pagetable and call cb() for each memory region
437 *
438 * This is a software implementation of the ARMv8-A MMU translation table walk. As per
439 * section D5.4 of the ARMv8-A Architecture Reference Manual. It recursively walks the
440 * 4 or 3 levels of the page table and calls the callback function for each discrete
441 * region of memory (that being the discovery of a new table, a collection of blocks
442 * with the same attributes, or of pages with the same attributes).
443 *
444 * U-Boot picks the smallest number of virtual address (VA) bits that it can based on the
445 * memory map configured by the board. If this is less than 39 then the MMU will only use
446 * 3 levels of translation instead of 3 - skipping level 0.
447 *
448 * Each level has 512 entries of 64-bits each. Each entry includes attribute bits and
449 * an address. When the attribute bits indicate a table, the address is the physical
450 * address of the table, so we can recursively call _pagetable_walk() on it (after calling
451 * @cb). If instead they indicate a block or page, we record the start address and attributes
452 * and continue walking until we find a region with different attributes, or the end of the
453 * table, in either case we call @cb with the start and end address of the region.
454 *
455 * This approach can be used to fully emulate the MMU's translation table walk, as per
456 * Figure D5-25 of the ARMv8-A Architecture Reference Manual.
457 *
458 * @addr: The address of the table to walk
459 * @tcr: The TCR register value
460 * @level: The current level of the table
461 * @cb: The callback function to call for each region
462 * @priv: Private data to pass to the callback function
463 */
464static void __pagetable_walk(u64 addr, u64 tcr, int level, pte_walker_cb_t cb, void *priv)
465{
466 u64 *table = (u64 *)addr;
467 u64 attrs, last_attrs = 0, last_addr = 0, entry_start = 0;
468 int i;
469 u64 va_bits = 64 - (tcr & (BIT(6) - 1));
470 static enum walker_state state[4] = { 0 };
471 static bool exit;
472
473 if (!level) {
474 exit = false;
475 if (va_bits < 39)
476 level = 1;
477 }
478
479 state[level] = WALKER_STATE_START;
480
481 /* Walk through the table entries */
482 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
483 u64 pte = table[i];
484 u64 _addr = pte & GENMASK_ULL(va_bits, PAGE_SHIFT);
485
486 if (exit)
487 return;
488
489 if (pte_type(&pte) == PTE_TYPE_FAULT)
490 continue;
491
492 attrs = pte & ALL_ATTRS;
493 /* If we're currently inside a block or set of pages */
494 if (state[level] > WALKER_STATE_START && state[level] != WALKER_STATE_TABLE) {
495 /*
496 * Continue walking if this entry has the same attributes as the last and
497 * is one page/block away -- it's a contiguous region.
498 */
499 if (attrs == last_attrs && _addr == last_addr + (1 << level2shift(level))) {
500 last_attrs = attrs;
501 last_addr = _addr;
502 continue;
503 } else {
504 /* We either hit a table or a new region */
505 exit = cb(entry_start, last_addr + (1 << level2shift(level)),
506 va_bits, level, priv);
507 if (exit)
508 return;
509 state[level] = WALKER_STATE_START;
510 }
511 }
512 last_attrs = attrs;
513 last_addr = _addr;
514
515 if (PTE_IS_TABLE(pte, level)) {
516 /* After the end of the table might be corrupted data */
517 if (!_addr || (pte & 0xfff) > 0x3ff)
518 return;
519 state[level] = WALKER_STATE_TABLE;
520 /* Signify the start of a table */
521 exit = cb(pte, 0, va_bits, level, priv);
522 if (exit)
523 return;
524
525 /* Go down a level */
526 __pagetable_walk(_addr, tcr, level + 1, cb, priv);
527 state[level] = WALKER_STATE_START;
528 } else if (pte_type(&pte) == PTE_TYPE_BLOCK || pte_type(&pte) == PTE_TYPE_PAGE) {
529 /* We foud a block or page, start walking */
530 entry_start = pte;
531 state[level] = WALKER_STATE_REGION;
532 }
533 }
534
535 if (state[level] > WALKER_STATE_START)
536 exit = cb(entry_start, last_addr + (1 << level2shift(level)), va_bits, level, priv);
537}
538
539static void pretty_print_pte_type(u64 pte)
540{
541 switch (pte_type(&pte)) {
542 case PTE_TYPE_FAULT:
543 printf(" %-5s", "Fault");
544 break;
545 case PTE_TYPE_BLOCK:
546 printf(" %-5s", "Block");
547 break;
548 case PTE_TYPE_PAGE:
549 printf(" %-5s", "Pages");
550 break;
551 default:
552 printf(" %-5s", "Unk");
553 }
554}
555
556static void pretty_print_table_attrs(u64 pte)
557{
558 int ap = (pte & PTE_TABLE_AP) >> 61;
559
560 printf(" | %2s %10s",
561 (ap & 2) ? "RO" : "",
562 (ap & 1) ? "!EL0" : "");
563 printf(" | %3s %2s %2s",
564 (pte & PTE_TABLE_PXN) ? "PXN" : "",
565 (pte & PTE_TABLE_XN) ? "XN" : "",
566 (pte & PTE_TABLE_NS) ? "NS" : "");
567}
568
569static void pretty_print_block_attrs(u64 pte)
570{
571 u64 attrs = pte & PMD_ATTRINDX_MASK;
Ilias Apalodimasb3c5d252025-02-20 15:54:38 +0200572 u64 perm_attrs = pte & PMD_ATTRMASK;
573 char mem_attrs[16] = { 0 };
574 int cnt = 0;
575
576 if (perm_attrs & PTE_BLOCK_PXN)
577 cnt += snprintf(mem_attrs + cnt, sizeof(mem_attrs) - cnt, "PXN ");
578 if (perm_attrs & PTE_BLOCK_UXN)
579 cnt += snprintf(mem_attrs + cnt, sizeof(mem_attrs) - cnt, "UXN ");
580 if (perm_attrs & PTE_BLOCK_RO)
581 cnt += snprintf(mem_attrs + cnt, sizeof(mem_attrs) - cnt, "RO");
582 if (!mem_attrs[0])
583 snprintf(mem_attrs, sizeof(mem_attrs), "RWX ");
584
585 printf(" | %-10s", mem_attrs);
Caleb Connolly566907c2024-06-17 10:03:48 +0200586
587 switch (attrs) {
588 case PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE):
589 printf(" | %-13s", "Device-nGnRnE");
590 break;
591 case PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRE):
592 printf(" | %-13s", "Device-nGnRE");
593 break;
594 case PTE_BLOCK_MEMTYPE(MT_DEVICE_GRE):
595 printf(" | %-13s", "Device-GRE");
596 break;
597 case PTE_BLOCK_MEMTYPE(MT_NORMAL_NC):
598 printf(" | %-13s", "Normal-NC");
599 break;
600 case PTE_BLOCK_MEMTYPE(MT_NORMAL):
601 printf(" | %-13s", "Normal");
602 break;
603 default:
604 printf(" | %-13s", "Unknown");
605 }
606}
607
608static void pretty_print_block_memtype(u64 pte)
609{
610 u64 share = pte & (3 << 8);
611
612 switch (share) {
613 case PTE_BLOCK_NON_SHARE:
614 printf(" | %-16s", "Non-shareable");
615 break;
616 case PTE_BLOCK_OUTER_SHARE:
617 printf(" | %-16s", "Outer-shareable");
618 break;
619 case PTE_BLOCK_INNER_SHARE:
620 printf(" | %-16s", "Inner-shareable");
621 break;
622 default:
623 printf(" | %-16s", "Unknown");
624 }
625}
626
627static void print_pte(u64 pte, int level)
628{
629 if (PTE_IS_TABLE(pte, level)) {
630 printf(" %-5s", "Table");
Ilias Apalodimasb3c5d252025-02-20 15:54:38 +0200631 printf(" %-12s", "|");
Caleb Connolly566907c2024-06-17 10:03:48 +0200632 pretty_print_table_attrs(pte);
633 } else {
634 pretty_print_pte_type(pte);
635 pretty_print_block_attrs(pte);
636 pretty_print_block_memtype(pte);
637 }
638 printf("\n");
639}
640
641/**
642 * pagetable_print_entry() - Callback function to print a single pagetable region
643 *
644 * This is the default callback used by @dump_pagetable(). It does some basic pretty
645 * printing (see example in the U-Boot arm64 documentation). It can be replaced by
646 * a custom callback function if more detailed information is needed.
647 *
648 * @start_attrs: The start address and attributes of the region (or table address)
649 * @end: The end address of the region (or 0 if it's a table)
650 * @va_bits: The number of bits used for the virtual address
651 * @level: The level of the region
652 * @priv: Private data for the callback (unused)
653 */
654static bool pagetable_print_entry(u64 start_attrs, u64 end, int va_bits, int level, void *priv)
655{
656 u64 _addr = start_attrs & GENMASK_ULL(va_bits, PAGE_SHIFT);
657 int indent = va_bits < 39 ? level - 1 : level;
658
659 printf("%*s", indent * 2, "");
660 if (PTE_IS_TABLE(start_attrs, level))
Ilias Apalodimasb3c5d252025-02-20 15:54:38 +0200661 printf("[%#016llx]%19s", _addr, "");
Caleb Connolly566907c2024-06-17 10:03:48 +0200662 else
Ilias Apalodimasb3c5d252025-02-20 15:54:38 +0200663 printf("[%#016llx - %#016llx]", _addr, end);
Caleb Connolly566907c2024-06-17 10:03:48 +0200664
665 printf("%*s | ", (3 - level) * 2, "");
666 print_pte(start_attrs, level);
667
668 return false;
669}
670
671void walk_pagetable(u64 ttbr, u64 tcr, pte_walker_cb_t cb, void *priv)
672{
673 __pagetable_walk(ttbr, tcr, 0, cb, priv);
674}
675
676void dump_pagetable(u64 ttbr, u64 tcr)
677{
678 u64 va_bits = 64 - (tcr & (BIT(6) - 1));
679
680 printf("Walking pagetable at %p, va_bits: %lld. Using %d levels\n", (void *)ttbr,
681 va_bits, va_bits < 39 ? 3 : 4);
682 walk_pagetable(ttbr, tcr, pagetable_print_entry, NULL);
683}
684
Alexander Grafe317fe82016-03-04 01:09:47 +0100685/* Returns the estimated required size of all page tables */
Alexander Grafbc78b922016-03-21 20:26:12 +0100686__weak u64 get_page_table_size(void)
Alexander Grafe317fe82016-03-04 01:09:47 +0100687{
688 u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
Chris Packhama6c68c62023-10-27 13:23:54 +1300689 u64 size;
Alexander Grafe317fe82016-03-04 01:09:47 +0100690
691 /* Account for all page tables we would need to cover our memory map */
Marc Zyngier6da328e2023-02-14 21:38:14 +0800692 size = one_pt * count_ranges();
Alexander Grafe317fe82016-03-04 01:09:47 +0100693
694 /*
695 * We need to duplicate our page table once to have an emergency pt to
696 * resort to when splitting page tables later on
697 */
698 size *= 2;
699
700 /*
701 * We may need to split page tables later on if dcache settings change,
702 * so reserve up to 4 (random pick) page tables for that.
703 */
704 size += one_pt * 4;
705
706 return size;
707}
708
York Suna81fcd12016-06-24 16:46:20 -0700709void setup_pgtables(void)
Alexander Grafe317fe82016-03-04 01:09:47 +0100710{
711 int i;
712
York Suna81fcd12016-06-24 16:46:20 -0700713 if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr)
714 panic("Page table pointer not setup.");
715
Alexander Grafe317fe82016-03-04 01:09:47 +0100716 /*
717 * Allocate the first level we're on with invalidate entries.
718 * If the starting level is 0 (va_bits >= 39), then this is our
719 * Lv0 page table, otherwise it's the entry Lv1 page table.
720 */
721 create_table();
722
723 /* Now add all MMU table entries one after another to the table */
Alexander Graf6b3e7ca2016-03-04 01:09:48 +0100724 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
Alexander Grafe317fe82016-03-04 01:09:47 +0100725 add_map(&mem_map[i]);
Alexander Grafe317fe82016-03-04 01:09:47 +0100726}
727
728static void setup_all_pgtables(void)
729{
730 u64 tlb_addr = gd->arch.tlb_addr;
Alexander Graffa3754e2016-07-30 23:13:03 +0200731 u64 tlb_size = gd->arch.tlb_size;
Alexander Grafe317fe82016-03-04 01:09:47 +0100732
733 /* Reset the fill ptr */
734 gd->arch.tlb_fillptr = tlb_addr;
735
736 /* Create normal system page tables */
737 setup_pgtables();
738
739 /* Create emergency page tables */
Alexander Graffa3754e2016-07-30 23:13:03 +0200740 gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr -
741 (uintptr_t)gd->arch.tlb_addr;
Alexander Grafe317fe82016-03-04 01:09:47 +0100742 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
743 setup_pgtables();
744 gd->arch.tlb_emerg = gd->arch.tlb_addr;
745 gd->arch.tlb_addr = tlb_addr;
Alexander Graffa3754e2016-07-30 23:13:03 +0200746 gd->arch.tlb_size = tlb_size;
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700747}
748
David Feng85fd5f12013-12-14 11:47:35 +0800749/* to activate the MMU we need to set up virtual memory */
Stephen Warren7333c6a2015-10-05 12:09:00 -0600750__weak void mmu_setup(void)
David Feng85fd5f12013-12-14 11:47:35 +0800751{
Thierry Reding59c364d2015-07-22 17:10:11 -0600752 int el;
David Feng85fd5f12013-12-14 11:47:35 +0800753
Alexander Grafe317fe82016-03-04 01:09:47 +0100754 /* Set up page tables only once */
755 if (!gd->arch.tlb_fillptr)
756 setup_all_pgtables();
Alexander Graffb74cc12016-03-04 01:09:45 +0100757
758 el = current_el();
Andre Przywara630a7942022-06-14 00:11:10 +0100759 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(NULL, NULL),
Alexander Graffb74cc12016-03-04 01:09:45 +0100760 MEMORY_ATTRIBUTES);
Alexander Graffb74cc12016-03-04 01:09:45 +0100761
David Feng85fd5f12013-12-14 11:47:35 +0800762 /* enable the mmu */
763 set_sctlr(get_sctlr() | CR_M);
764}
765
766/*
767 * Performs a invalidation of the entire data cache at all levels
768 */
769void invalidate_dcache_all(void)
770{
Marc Zyngierb67855c2023-02-09 04:54:27 +0800771#ifndef CONFIG_CMO_BY_VA_ONLY
York Sunef042012014-02-26 13:26:04 -0800772 __asm_invalidate_dcache_all();
Stephen Warrenddb0f632016-10-19 15:18:46 -0600773 __asm_invalidate_l3_dcache();
Marc Zyngierb67855c2023-02-09 04:54:27 +0800774#else
775 apply_cmo_to_mappings(invalidate_dcache_range);
776#endif
David Feng85fd5f12013-12-14 11:47:35 +0800777}
778
779/*
York Sun1ce575f2015-01-06 13:18:42 -0800780 * Performs a clean & invalidation of the entire data cache at all levels.
781 * This function needs to be inline to avoid using stack.
Stephen Warrenddb0f632016-10-19 15:18:46 -0600782 * __asm_flush_l3_dcache return status of timeout
David Feng85fd5f12013-12-14 11:47:35 +0800783 */
York Sun1ce575f2015-01-06 13:18:42 -0800784inline void flush_dcache_all(void)
David Feng85fd5f12013-12-14 11:47:35 +0800785{
Marc Zyngierb67855c2023-02-09 04:54:27 +0800786#ifndef CONFIG_CMO_BY_VA_ONLY
York Sun1ce575f2015-01-06 13:18:42 -0800787 int ret;
788
David Feng85fd5f12013-12-14 11:47:35 +0800789 __asm_flush_dcache_all();
Stephen Warrenddb0f632016-10-19 15:18:46 -0600790 ret = __asm_flush_l3_dcache();
York Sun1ce575f2015-01-06 13:18:42 -0800791 if (ret)
792 debug("flushing dcache returns 0x%x\n", ret);
793 else
794 debug("flushing dcache successfully.\n");
Marc Zyngierb67855c2023-02-09 04:54:27 +0800795#else
796 apply_cmo_to_mappings(flush_dcache_range);
797#endif
David Feng85fd5f12013-12-14 11:47:35 +0800798}
799
Vignesh Raghavendra384c1412019-04-22 21:43:32 +0530800#ifndef CONFIG_SYS_DISABLE_DCACHE_OPS
David Feng85fd5f12013-12-14 11:47:35 +0800801/*
802 * Invalidates range in all levels of D-cache/unified cache
803 */
804void invalidate_dcache_range(unsigned long start, unsigned long stop)
805{
Simon Glass4415c3b2017-04-05 17:53:18 -0600806 __asm_invalidate_dcache_range(start, stop);
David Feng85fd5f12013-12-14 11:47:35 +0800807}
808
809/*
810 * Flush range(clean & invalidate) from all levels of D-cache/unified cache
811 */
812void flush_dcache_range(unsigned long start, unsigned long stop)
813{
814 __asm_flush_dcache_range(start, stop);
815}
Vignesh Raghavendra384c1412019-04-22 21:43:32 +0530816#else
817void invalidate_dcache_range(unsigned long start, unsigned long stop)
818{
819}
820
821void flush_dcache_range(unsigned long start, unsigned long stop)
822{
823}
824#endif /* CONFIG_SYS_DISABLE_DCACHE_OPS */
David Feng85fd5f12013-12-14 11:47:35 +0800825
826void dcache_enable(void)
827{
828 /* The data cache is not active unless the mmu is enabled */
829 if (!(get_sctlr() & CR_M)) {
830 invalidate_dcache_all();
831 __asm_invalidate_tlb_all();
832 mmu_setup();
833 }
834
Pali Rohárfbddaee2022-09-14 13:37:46 +0200835 /* Set up page tables only once (it is done also by mmu_setup()) */
836 if (!gd->arch.tlb_fillptr)
837 setup_all_pgtables();
838
David Feng85fd5f12013-12-14 11:47:35 +0800839 set_sctlr(get_sctlr() | CR_C);
840}
841
842void dcache_disable(void)
843{
Sam Protsenkoe60b24e2024-11-06 20:58:30 -0600844 unsigned long sctlr;
David Feng85fd5f12013-12-14 11:47:35 +0800845
846 sctlr = get_sctlr();
847
848 /* if cache isn't enabled no need to disable */
849 if (!(sctlr & CR_C))
850 return;
851
Marc Zyngierb67855c2023-02-09 04:54:27 +0800852 if (IS_ENABLED(CONFIG_CMO_BY_VA_ONLY)) {
853 /*
854 * When invalidating by VA, do it *before* turning the MMU
855 * off, so that at least our stack is coherent.
856 */
857 flush_dcache_all();
858 }
859
David Feng85fd5f12013-12-14 11:47:35 +0800860 set_sctlr(sctlr & ~(CR_C|CR_M));
861
Marc Zyngierb67855c2023-02-09 04:54:27 +0800862 if (!IS_ENABLED(CONFIG_CMO_BY_VA_ONLY))
863 flush_dcache_all();
864
David Feng85fd5f12013-12-14 11:47:35 +0800865 __asm_invalidate_tlb_all();
866}
867
868int dcache_status(void)
869{
870 return (get_sctlr() & CR_C) != 0;
871}
872
Siva Durga Prasad Paladuguba2432a2015-06-26 18:05:07 +0530873u64 *__weak arch_get_page_table(void) {
874 puts("No page table offset defined\n");
875
876 return NULL;
877}
878
Alexander Grafe317fe82016-03-04 01:09:47 +0100879static bool is_aligned(u64 addr, u64 size, u64 align)
880{
881 return !(addr & (align - 1)) && !(size & (align - 1));
882}
883
York Sun5bb14e02017-03-06 09:02:33 -0800884/* Use flag to indicate if attrs has more than d-cache attributes */
885static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level)
Alexander Grafe317fe82016-03-04 01:09:47 +0100886{
887 int levelshift = level2shift(level);
888 u64 levelsize = 1ULL << levelshift;
889 u64 *pte = find_pte(start, level);
890
891 /* Can we can just modify the current level block PTE? */
892 if (is_aligned(start, size, levelsize)) {
York Sun5bb14e02017-03-06 09:02:33 -0800893 if (flag) {
894 *pte &= ~PMD_ATTRMASK;
895 *pte |= attrs & PMD_ATTRMASK;
896 } else {
897 *pte &= ~PMD_ATTRINDX_MASK;
898 *pte |= attrs & PMD_ATTRINDX_MASK;
899 }
Alexander Grafe317fe82016-03-04 01:09:47 +0100900 debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level);
901
902 return levelsize;
903 }
904
905 /* Unaligned or doesn't fit, maybe split block into table */
906 debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte);
907
908 /* Maybe we need to split the block into a table */
909 if (pte_type(pte) == PTE_TYPE_BLOCK)
910 split_block(pte, level);
911
912 /* And then double-check it became a table or already is one */
913 if (pte_type(pte) != PTE_TYPE_TABLE)
914 panic("PTE %p (%llx) for addr=%llx should be a table",
915 pte, *pte, start);
916
917 /* Roll on to the next page table level */
918 return 0;
919}
920
921void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
922 enum dcache_option option)
923{
Peng Fan41bad3e2020-05-11 16:41:07 +0800924 u64 attrs = PMD_ATTRINDX(option >> 2);
Alexander Grafe317fe82016-03-04 01:09:47 +0100925 u64 real_start = start;
926 u64 real_size = size;
927
928 debug("start=%lx size=%lx\n", (ulong)start, (ulong)size);
929
York Suna81fcd12016-06-24 16:46:20 -0700930 if (!gd->arch.tlb_emerg)
931 panic("Emergency page table not setup.");
932
Alexander Grafe317fe82016-03-04 01:09:47 +0100933 /*
934 * We can not modify page tables that we're currently running on,
935 * so we first need to switch to the "emergency" page tables where
936 * we can safely modify our primary page tables and then switch back
937 */
938 __asm_switch_ttbr(gd->arch.tlb_emerg);
939
940 /*
941 * Loop through the address range until we find a page granule that fits
942 * our alignment constraints, then set it to the new cache attributes
943 */
944 while (size > 0) {
945 int level;
946 u64 r;
947
948 for (level = 1; level < 4; level++) {
York Sun5bb14e02017-03-06 09:02:33 -0800949 /* Set d-cache attributes only */
950 r = set_one_region(start, size, attrs, false, level);
Alexander Grafe317fe82016-03-04 01:09:47 +0100951 if (r) {
952 /* PTE successfully replaced */
953 size -= r;
954 start += r;
955 break;
956 }
957 }
958
959 }
960
961 /* We're done modifying page tables, switch back to our primary ones */
962 __asm_switch_ttbr(gd->arch.tlb_addr);
963
964 /*
965 * Make sure there's nothing stale in dcache for a region that might
966 * have caches off now
967 */
968 flush_dcache_range(real_start, real_start + real_size);
969}
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700970
Ilias Apalodimasa6db58f2025-02-20 15:54:41 +0200971void mmu_change_region_attr_nobreak(phys_addr_t addr, size_t siz, u64 attrs)
York Sun5bb14e02017-03-06 09:02:33 -0800972{
973 int level;
974 u64 r, size, start;
975
York Sun5bb14e02017-03-06 09:02:33 -0800976 /*
977 * Loop through the address range until we find a page granule that fits
Ilias Apalodimasa6db58f2025-02-20 15:54:41 +0200978 * our alignment constraints and set the new permissions
York Sun5bb14e02017-03-06 09:02:33 -0800979 */
Ilias Apalodimasa6db58f2025-02-20 15:54:41 +0200980 start = addr;
981 size = siz;
York Sun5bb14e02017-03-06 09:02:33 -0800982 while (size > 0) {
983 for (level = 1; level < 4; level++) {
Ilias Apalodimasa6db58f2025-02-20 15:54:41 +0200984 /* Set PTE to new attributes */
985 r = set_one_region(start, size, attrs, true, level);
York Sun5bb14e02017-03-06 09:02:33 -0800986 if (r) {
Ilias Apalodimasa6db58f2025-02-20 15:54:41 +0200987 /* PTE successfully updated */
York Sun5bb14e02017-03-06 09:02:33 -0800988 size -= r;
989 start += r;
990 break;
991 }
992 }
993 }
York Sun5bb14e02017-03-06 09:02:33 -0800994 flush_dcache_range(gd->arch.tlb_addr,
995 gd->arch.tlb_addr + gd->arch.tlb_size);
996 __asm_invalidate_tlb_all();
Ilias Apalodimasa6db58f2025-02-20 15:54:41 +0200997}
York Sun5bb14e02017-03-06 09:02:33 -0800998
Ilias Apalodimasa6db58f2025-02-20 15:54:41 +0200999/*
1000 * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits.
1001 * The procecess is break-before-make. The target region will be marked as
1002 * invalid during the process of changing.
1003 */
1004void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs)
1005{
1006 int level;
1007 u64 r, size, start;
1008
1009 start = addr;
1010 size = siz;
York Sun5bb14e02017-03-06 09:02:33 -08001011 /*
1012 * Loop through the address range until we find a page granule that fits
Ilias Apalodimasa6db58f2025-02-20 15:54:41 +02001013 * our alignment constraints, then set it to "invalid".
York Sun5bb14e02017-03-06 09:02:33 -08001014 */
York Sun5bb14e02017-03-06 09:02:33 -08001015 while (size > 0) {
1016 for (level = 1; level < 4; level++) {
Ilias Apalodimasa6db58f2025-02-20 15:54:41 +02001017 /* Set PTE to fault */
1018 r = set_one_region(start, size, PTE_TYPE_FAULT, true,
1019 level);
York Sun5bb14e02017-03-06 09:02:33 -08001020 if (r) {
Ilias Apalodimasa6db58f2025-02-20 15:54:41 +02001021 /* PTE successfully invalidated */
York Sun5bb14e02017-03-06 09:02:33 -08001022 size -= r;
1023 start += r;
1024 break;
1025 }
1026 }
1027 }
Ilias Apalodimasa6db58f2025-02-20 15:54:41 +02001028
York Sun5bb14e02017-03-06 09:02:33 -08001029 flush_dcache_range(gd->arch.tlb_addr,
1030 gd->arch.tlb_addr + gd->arch.tlb_size);
1031 __asm_invalidate_tlb_all();
Ilias Apalodimasa6db58f2025-02-20 15:54:41 +02001032
1033 mmu_change_region_attr_nobreak(addr, siz, attrs);
York Sun5bb14e02017-03-06 09:02:33 -08001034}
1035
Ilias Apalodimase9e18652025-02-20 15:54:42 +02001036int pgprot_set_attrs(phys_addr_t addr, size_t size, enum pgprot_attrs perm)
1037{
1038 u64 attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_INNER_SHARE | PTE_TYPE_VALID;
1039
1040 switch (perm) {
1041 case MMU_ATTR_RO:
1042 attrs |= PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_RO;
1043 break;
1044 case MMU_ATTR_RX:
1045 attrs |= PTE_BLOCK_RO;
1046 break;
1047 case MMU_ATTR_RW:
1048 attrs |= PTE_BLOCK_PXN | PTE_BLOCK_UXN;
1049 break;
1050 default:
1051 log_err("Unknown attribute %d\n", perm);
1052 return -EINVAL;
1053 }
1054
1055 mmu_change_region_attr_nobreak(addr, size, attrs);
1056
1057 return 0;
1058}
1059
Trevor Woerner43ec7e02019-05-03 09:41:00 -04001060#else /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +08001061
Alexander Grafbc40da92016-03-04 01:09:55 +01001062/*
1063 * For SPL builds, we may want to not have dcache enabled. Any real U-Boot
1064 * running however really wants to have dcache and the MMU active. Check that
1065 * everything is sane and give the developer a hint if it isn't.
1066 */
Simon Glass85ed77d2024-09-29 19:49:46 -06001067#ifndef CONFIG_XPL_BUILD
Alexander Grafbc40da92016-03-04 01:09:55 +01001068#error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache.
1069#endif
1070
David Feng85fd5f12013-12-14 11:47:35 +08001071void invalidate_dcache_all(void)
1072{
1073}
1074
1075void flush_dcache_all(void)
1076{
1077}
1078
David Feng85fd5f12013-12-14 11:47:35 +08001079void dcache_enable(void)
1080{
1081}
1082
1083void dcache_disable(void)
1084{
1085}
1086
1087int dcache_status(void)
1088{
1089 return 0;
1090}
1091
Siva Durga Prasad Paladuguba2432a2015-06-26 18:05:07 +05301092void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
1093 enum dcache_option option)
1094{
1095}
1096
Trevor Woerner43ec7e02019-05-03 09:41:00 -04001097#endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +08001098
Trevor Woerner43ec7e02019-05-03 09:41:00 -04001099#if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF)
David Feng85fd5f12013-12-14 11:47:35 +08001100
1101void icache_enable(void)
1102{
Stephen Warrenddb0f632016-10-19 15:18:46 -06001103 invalidate_icache_all();
David Feng85fd5f12013-12-14 11:47:35 +08001104 set_sctlr(get_sctlr() | CR_I);
1105}
1106
1107void icache_disable(void)
1108{
1109 set_sctlr(get_sctlr() & ~CR_I);
1110}
1111
1112int icache_status(void)
1113{
1114 return (get_sctlr() & CR_I) != 0;
1115}
1116
Patrice Chotardee435c62021-07-19 11:21:51 +02001117int mmu_status(void)
1118{
1119 return (get_sctlr() & CR_M) != 0;
1120}
1121
David Feng85fd5f12013-12-14 11:47:35 +08001122void invalidate_icache_all(void)
1123{
1124 __asm_invalidate_icache_all();
Stephen Warrenddb0f632016-10-19 15:18:46 -06001125 __asm_invalidate_l3_icache();
David Feng85fd5f12013-12-14 11:47:35 +08001126}
1127
Trevor Woerner43ec7e02019-05-03 09:41:00 -04001128#else /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +08001129
1130void icache_enable(void)
1131{
1132}
1133
1134void icache_disable(void)
1135{
1136}
1137
1138int icache_status(void)
1139{
1140 return 0;
1141}
1142
Patrice Chotardee435c62021-07-19 11:21:51 +02001143int mmu_status(void)
1144{
1145 return 0;
1146}
1147
David Feng85fd5f12013-12-14 11:47:35 +08001148void invalidate_icache_all(void)
1149{
1150}
1151
Trevor Woerner43ec7e02019-05-03 09:41:00 -04001152#endif /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +08001153
1154/*
1155 * Enable dCache & iCache, whether cache is actually enabled
1156 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
1157 */
York Suna84cd722014-06-23 15:15:54 -07001158void __weak enable_caches(void)
David Feng85fd5f12013-12-14 11:47:35 +08001159{
1160 icache_enable();
1161 dcache_enable();
1162}
Ilias Apalodimasb3c5d252025-02-20 15:54:38 +02001163
1164void arch_dump_mem_attrs(void)
1165{
1166 dump_pagetable(gd->arch.tlb_addr, get_tcr(NULL, NULL));
1167}