blob: 4760064ee18f9427e28bca9e0f61e5c23815b101 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
David Feng85fd5f12013-12-14 11:47:35 +08002/*
3 * (C) Copyright 2013
4 * David Feng <fenghua@phytium.com.cn>
5 *
Alexander Grafe317fe82016-03-04 01:09:47 +01006 * (C) Copyright 2016
7 * Alexander Graf <agraf@suse.de>
David Feng85fd5f12013-12-14 11:47:35 +08008 */
9
10#include <common.h>
Simon Glass1d91ba72019-11-14 12:57:37 -070011#include <cpu_func.h>
Simon Glassf11478f2019-12-28 10:45:07 -070012#include <hang.h>
Simon Glass0f2af882020-05-10 11:40:05 -060013#include <log.h>
Simon Glass274e0b02020-05-10 11:39:56 -060014#include <asm/cache.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060015#include <asm/global_data.h>
David Feng85fd5f12013-12-14 11:47:35 +080016#include <asm/system.h>
17#include <asm/armv8/mmu.h>
18
19DECLARE_GLOBAL_DATA_PTR;
20
Trevor Woerner43ec7e02019-05-03 09:41:00 -040021#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -070022
Alexander Grafe317fe82016-03-04 01:09:47 +010023/*
24 * With 4k page granule, a virtual address is split into 4 lookup parts
25 * spanning 9 bits each:
26 *
27 * _______________________________________________
28 * | | | | | | |
29 * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off |
30 * |_______|_______|_______|_______|_______|_______|
31 * 63-48 47-39 38-30 29-21 20-12 11-00
32 *
33 * mask page size
34 *
35 * Lv0: FF8000000000 --
36 * Lv1: 7FC0000000 1G
37 * Lv2: 3FE00000 2M
38 * Lv3: 1FF000 4K
39 * off: FFF
40 */
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -070041
Andre Przywara630a7942022-06-14 00:11:10 +010042static int get_effective_el(void)
Alexander Graffb74cc12016-03-04 01:09:45 +010043{
Andre Przywara630a7942022-06-14 00:11:10 +010044 int el = current_el();
45
46 if (el == 2) {
47 u64 hcr_el2;
48
49 /*
50 * If we are using the EL2&0 translation regime, the TCR_EL2
51 * looks like the EL1 version, even though we are in EL2.
52 */
53 __asm__ ("mrs %0, HCR_EL2\n" : "=r" (hcr_el2));
54 if (hcr_el2 & BIT(HCR_EL2_E2H_BIT))
55 return 1;
56 }
57
58 return el;
59}
60
61u64 get_tcr(u64 *pips, u64 *pva_bits)
62{
63 int el = get_effective_el();
Alexander Graffb74cc12016-03-04 01:09:45 +010064 u64 max_addr = 0;
65 u64 ips, va_bits;
66 u64 tcr;
67 int i;
68
69 /* Find the largest address we need to support */
Alexander Graf6b3e7ca2016-03-04 01:09:48 +010070 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
York Sunc7104e52016-06-24 16:46:22 -070071 max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size);
Alexander Graffb74cc12016-03-04 01:09:45 +010072
73 /* Calculate the maximum physical (and thus virtual) address */
74 if (max_addr > (1ULL << 44)) {
75 ips = 5;
76 va_bits = 48;
77 } else if (max_addr > (1ULL << 42)) {
78 ips = 4;
79 va_bits = 44;
80 } else if (max_addr > (1ULL << 40)) {
81 ips = 3;
82 va_bits = 42;
83 } else if (max_addr > (1ULL << 36)) {
84 ips = 2;
85 va_bits = 40;
86 } else if (max_addr > (1ULL << 32)) {
87 ips = 1;
88 va_bits = 36;
89 } else {
90 ips = 0;
91 va_bits = 32;
92 }
93
94 if (el == 1) {
Alexander Graff03c0e42016-03-04 01:09:46 +010095 tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE;
Marc Zyngier2b156992023-03-18 00:22:51 +080096 if (gd->arch.has_hafdbs)
Chris Packham596ebeb2023-10-27 13:23:52 +130097 tcr |= TCR_HA | TCR_HD;
Alexander Graffb74cc12016-03-04 01:09:45 +010098 } else if (el == 2) {
99 tcr = TCR_EL2_RSVD | (ips << 16);
100 } else {
101 tcr = TCR_EL3_RSVD | (ips << 16);
102 }
103
104 /* PTWs cacheable, inner/outer WBWA and inner shareable */
Alexander Grafe317fe82016-03-04 01:09:47 +0100105 tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
106 tcr |= TCR_T0SZ(va_bits);
Alexander Graffb74cc12016-03-04 01:09:45 +0100107
108 if (pips)
109 *pips = ips;
110 if (pva_bits)
111 *pva_bits = va_bits;
112
113 return tcr;
114}
115
Alexander Grafe317fe82016-03-04 01:09:47 +0100116#define MAX_PTE_ENTRIES 512
117
118static int pte_type(u64 *pte)
119{
120 return *pte & PTE_TYPE_MASK;
121}
122
123/* Returns the LSB number for a PTE on level <level> */
124static int level2shift(int level)
125{
126 /* Page is 12 bits wide, every level translates 9 bits */
127 return (12 + 9 * (3 - level));
128}
129
130static u64 *find_pte(u64 addr, int level)
131{
132 int start_level = 0;
133 u64 *pte;
134 u64 idx;
135 u64 va_bits;
136 int i;
137
138 debug("addr=%llx level=%d\n", addr, level);
139
Andre Przywara630a7942022-06-14 00:11:10 +0100140 get_tcr(NULL, &va_bits);
Alexander Grafe317fe82016-03-04 01:09:47 +0100141 if (va_bits < 39)
142 start_level = 1;
143
144 if (level < start_level)
145 return NULL;
146
147 /* Walk through all page table levels to find our PTE */
148 pte = (u64*)gd->arch.tlb_addr;
149 for (i = start_level; i < 4; i++) {
150 idx = (addr >> level2shift(i)) & 0x1FF;
151 pte += idx;
152 debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte);
153
154 /* Found it */
155 if (i == level)
156 return pte;
157 /* PTE is no table (either invalid or block), can't traverse */
158 if (pte_type(pte) != PTE_TYPE_TABLE)
159 return NULL;
160 /* Off to the next level */
161 pte = (u64*)(*pte & 0x0000fffffffff000ULL);
162 }
163
164 /* Should never reach here */
165 return NULL;
166}
167
Marc Zyngierb67855c2023-02-09 04:54:27 +0800168#ifdef CONFIG_CMO_BY_VA_ONLY
169static void __cmo_on_leaves(void (*cmo_fn)(unsigned long, unsigned long),
170 u64 pte, int level, u64 base)
171{
172 u64 *ptep;
173 int i;
174
175 ptep = (u64 *)(pte & GENMASK_ULL(47, PAGE_SHIFT));
176 for (i = 0; i < PAGE_SIZE / sizeof(u64); i++) {
177 u64 end, va = base + i * BIT(level2shift(level));
178 u64 type, attrs;
179
180 pte = ptep[i];
181 type = pte & PTE_TYPE_MASK;
182 attrs = pte & PMD_ATTRINDX_MASK;
183 debug("PTE %llx at level %d VA %llx\n", pte, level, va);
184
185 /* Not valid? next! */
186 if (!(type & PTE_TYPE_VALID))
187 continue;
188
189 /* Not a leaf? Recurse on the next level */
190 if (!(type == PTE_TYPE_BLOCK ||
191 (level == 3 && type == PTE_TYPE_PAGE))) {
192 __cmo_on_leaves(cmo_fn, pte, level + 1, va);
193 continue;
194 }
195
196 /*
197 * From this point, this must be a leaf.
198 *
199 * Start excluding non memory mappings
200 */
201 if (attrs != PTE_BLOCK_MEMTYPE(MT_NORMAL) &&
202 attrs != PTE_BLOCK_MEMTYPE(MT_NORMAL_NC))
203 continue;
204
Marc Zyngier2b156992023-03-18 00:22:51 +0800205 if (gd->arch.has_hafdbs && (pte & (PTE_RDONLY | PTE_DBM)) != PTE_DBM)
206 continue;
207
Marc Zyngierb67855c2023-02-09 04:54:27 +0800208 end = va + BIT(level2shift(level)) - 1;
209
210 /* No intersection with RAM? */
211 if (end < gd->ram_base ||
212 va >= (gd->ram_base + gd->ram_size))
213 continue;
214
215 /*
216 * OK, we have a partial RAM mapping. However, this
217 * can cover *more* than the RAM. Yes, u-boot is
218 * *that* braindead. Compute the intersection we care
219 * about, and not a byte more.
220 */
221 va = max(va, (u64)gd->ram_base);
222 end = min(end, gd->ram_base + gd->ram_size);
223
224 debug("Flush PTE %llx at level %d: %llx-%llx\n",
225 pte, level, va, end);
226 cmo_fn(va, end);
227 }
228}
229
230static void apply_cmo_to_mappings(void (*cmo_fn)(unsigned long, unsigned long))
231{
232 u64 va_bits;
233 int sl = 0;
234
235 if (!gd->arch.tlb_addr)
236 return;
237
238 get_tcr(NULL, &va_bits);
239 if (va_bits < 39)
240 sl = 1;
241
242 __cmo_on_leaves(cmo_fn, gd->arch.tlb_addr, sl, 0);
243}
244#else
245static inline void apply_cmo_to_mappings(void *dummy) {}
246#endif
247
Alexander Grafe317fe82016-03-04 01:09:47 +0100248/* Returns and creates a new full table (512 entries) */
249static u64 *create_table(void)
250{
251 u64 *new_table = (u64*)gd->arch.tlb_fillptr;
252 u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64);
253
254 /* Allocate MAX_PTE_ENTRIES pte entries */
255 gd->arch.tlb_fillptr += pt_len;
256
257 if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size)
258 panic("Insufficient RAM for page table: 0x%lx > 0x%lx. "
259 "Please increase the size in get_page_table_size()",
260 gd->arch.tlb_fillptr - gd->arch.tlb_addr,
261 gd->arch.tlb_size);
262
263 /* Mark all entries as invalid */
264 memset(new_table, 0, pt_len);
265
266 return new_table;
267}
268
269static void set_pte_table(u64 *pte, u64 *table)
270{
271 /* Point *pte to the new table */
272 debug("Setting %p to addr=%p\n", pte, table);
273 *pte = PTE_TYPE_TABLE | (ulong)table;
274}
275
York Sunf44afe72016-06-24 16:46:21 -0700276/* Splits a block PTE into table with subpages spanning the old block */
277static void split_block(u64 *pte, int level)
278{
279 u64 old_pte = *pte;
280 u64 *new_table;
281 u64 i = 0;
282 /* level describes the parent level, we need the child ones */
283 int levelshift = level2shift(level + 1);
284
285 if (pte_type(pte) != PTE_TYPE_BLOCK)
286 panic("PTE %p (%llx) is not a block. Some driver code wants to "
287 "modify dcache settings for an range not covered in "
288 "mem_map.", pte, old_pte);
289
290 new_table = create_table();
291 debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table);
292
293 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
294 new_table[i] = old_pte | (i << levelshift);
295
296 /* Level 3 block PTEs have the table type */
297 if ((level + 1) == 3)
298 new_table[i] |= PTE_TYPE_TABLE;
299
300 debug("Setting new_table[%lld] = %llx\n", i, new_table[i]);
301 }
302
303 /* Set the new table into effect */
304 set_pte_table(pte, new_table);
305}
306
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800307static void map_range(u64 virt, u64 phys, u64 size, int level,
308 u64 *table, u64 attrs)
Alexander Grafe317fe82016-03-04 01:09:47 +0100309{
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800310 u64 map_size = BIT_ULL(level2shift(level));
311 int i, idx;
Alexander Grafe317fe82016-03-04 01:09:47 +0100312
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800313 idx = (virt >> level2shift(level)) & (MAX_PTE_ENTRIES - 1);
314 for (i = idx; size; i++) {
315 u64 next_size, *next_table;
Alexander Grafe317fe82016-03-04 01:09:47 +0100316
Chris Packham978814f2023-10-27 13:23:53 +1300317 if (level >= 1 &&
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800318 size >= map_size && !(virt & (map_size - 1))) {
319 if (level == 3)
320 table[i] = phys | attrs | PTE_TYPE_PAGE;
321 else
322 table[i] = phys | attrs;
York Sunc7104e52016-06-24 16:46:22 -0700323
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800324 virt += map_size;
325 phys += map_size;
326 size -= map_size;
327
328 continue;
Alexander Grafe317fe82016-03-04 01:09:47 +0100329 }
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800330
331 /* Going one level down */
332 if (pte_type(&table[i]) == PTE_TYPE_FAULT)
333 set_pte_table(&table[i], create_table());
334
335 next_table = (u64 *)(table[i] & GENMASK_ULL(47, PAGE_SHIFT));
336 next_size = min(map_size - (virt & (map_size - 1)), size);
337
338 map_range(virt, phys, next_size, level + 1, next_table, attrs);
339
340 virt += next_size;
341 phys += next_size;
342 size -= next_size;
Alexander Grafe317fe82016-03-04 01:09:47 +0100343 }
344}
345
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800346static void add_map(struct mm_region *map)
347{
348 u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
349 u64 va_bits;
350 int level = 0;
351
352 get_tcr(NULL, &va_bits);
353 if (va_bits < 39)
354 level = 1;
355
Marc Zyngier2b156992023-03-18 00:22:51 +0800356 if (gd->arch.has_hafdbs)
357 attrs |= PTE_DBM | PTE_RDONLY;
358
Marc Zyngierfeb0ec22023-02-14 21:38:13 +0800359 map_range(map->virt, map->phys, map->size, level,
360 (u64 *)gd->arch.tlb_addr, attrs);
361}
362
Marc Zyngier6da328e2023-02-14 21:38:14 +0800363static void count_range(u64 virt, u64 size, int level, int *cntp)
Alexander Grafe317fe82016-03-04 01:09:47 +0100364{
Marc Zyngier6da328e2023-02-14 21:38:14 +0800365 u64 map_size = BIT_ULL(level2shift(level));
366 int i, idx;
Alexander Grafe317fe82016-03-04 01:09:47 +0100367
Marc Zyngier6da328e2023-02-14 21:38:14 +0800368 idx = (virt >> level2shift(level)) & (MAX_PTE_ENTRIES - 1);
369 for (i = idx; size; i++) {
370 u64 next_size;
Alexander Grafe317fe82016-03-04 01:09:47 +0100371
Chris Packham978814f2023-10-27 13:23:53 +1300372 if (level >= 1 &&
Marc Zyngier6da328e2023-02-14 21:38:14 +0800373 size >= map_size && !(virt & (map_size - 1))) {
374 virt += map_size;
375 size -= map_size;
Alexander Grafe317fe82016-03-04 01:09:47 +0100376
Marc Zyngier6da328e2023-02-14 21:38:14 +0800377 continue;
378 }
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700379
Marc Zyngier6da328e2023-02-14 21:38:14 +0800380 /* Going one level down */
381 (*cntp)++;
382 next_size = min(map_size - (virt & (map_size - 1)), size);
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700383
Marc Zyngier6da328e2023-02-14 21:38:14 +0800384 count_range(virt, next_size, level + 1, cntp);
Alexander Grafe317fe82016-03-04 01:09:47 +0100385
Marc Zyngier6da328e2023-02-14 21:38:14 +0800386 virt += next_size;
387 size -= next_size;
388 }
389}
Alexander Grafe317fe82016-03-04 01:09:47 +0100390
Marc Zyngier6da328e2023-02-14 21:38:14 +0800391static int count_ranges(void)
392{
393 int i, count = 0, level = 0;
394 u64 va_bits;
Alexander Grafe317fe82016-03-04 01:09:47 +0100395
Marc Zyngier6da328e2023-02-14 21:38:14 +0800396 get_tcr(NULL, &va_bits);
397 if (va_bits < 39)
398 level = 1;
Alexander Grafe317fe82016-03-04 01:09:47 +0100399
Marc Zyngier6da328e2023-02-14 21:38:14 +0800400 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
401 count_range(mem_map[i].virt, mem_map[i].size, level, &count);
Alexander Grafe317fe82016-03-04 01:09:47 +0100402
Marc Zyngier6da328e2023-02-14 21:38:14 +0800403 return count;
Alexander Grafe317fe82016-03-04 01:09:47 +0100404}
405
406/* Returns the estimated required size of all page tables */
Alexander Grafbc78b922016-03-21 20:26:12 +0100407__weak u64 get_page_table_size(void)
Alexander Grafe317fe82016-03-04 01:09:47 +0100408{
409 u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
Marc Zyngier2b156992023-03-18 00:22:51 +0800410 u64 size, mmfr1;
411
412 asm volatile("mrs %0, id_aa64mmfr1_el1" : "=r" (mmfr1));
Chris Packham978814f2023-10-27 13:23:53 +1300413 if ((mmfr1 & 0xf) == 2)
Marc Zyngier2b156992023-03-18 00:22:51 +0800414 gd->arch.has_hafdbs = true;
Chris Packham978814f2023-10-27 13:23:53 +1300415 else
Marc Zyngier2b156992023-03-18 00:22:51 +0800416 gd->arch.has_hafdbs = false;
Alexander Grafe317fe82016-03-04 01:09:47 +0100417
418 /* Account for all page tables we would need to cover our memory map */
Marc Zyngier6da328e2023-02-14 21:38:14 +0800419 size = one_pt * count_ranges();
Alexander Grafe317fe82016-03-04 01:09:47 +0100420
421 /*
422 * We need to duplicate our page table once to have an emergency pt to
423 * resort to when splitting page tables later on
424 */
425 size *= 2;
426
427 /*
428 * We may need to split page tables later on if dcache settings change,
429 * so reserve up to 4 (random pick) page tables for that.
430 */
431 size += one_pt * 4;
432
433 return size;
434}
435
York Suna81fcd12016-06-24 16:46:20 -0700436void setup_pgtables(void)
Alexander Grafe317fe82016-03-04 01:09:47 +0100437{
438 int i;
439
York Suna81fcd12016-06-24 16:46:20 -0700440 if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr)
441 panic("Page table pointer not setup.");
442
Alexander Grafe317fe82016-03-04 01:09:47 +0100443 /*
444 * Allocate the first level we're on with invalidate entries.
445 * If the starting level is 0 (va_bits >= 39), then this is our
446 * Lv0 page table, otherwise it's the entry Lv1 page table.
447 */
448 create_table();
449
450 /* Now add all MMU table entries one after another to the table */
Alexander Graf6b3e7ca2016-03-04 01:09:48 +0100451 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
Alexander Grafe317fe82016-03-04 01:09:47 +0100452 add_map(&mem_map[i]);
Alexander Grafe317fe82016-03-04 01:09:47 +0100453}
454
455static void setup_all_pgtables(void)
456{
457 u64 tlb_addr = gd->arch.tlb_addr;
Alexander Graffa3754e2016-07-30 23:13:03 +0200458 u64 tlb_size = gd->arch.tlb_size;
Alexander Grafe317fe82016-03-04 01:09:47 +0100459
460 /* Reset the fill ptr */
461 gd->arch.tlb_fillptr = tlb_addr;
462
463 /* Create normal system page tables */
464 setup_pgtables();
465
466 /* Create emergency page tables */
Alexander Graffa3754e2016-07-30 23:13:03 +0200467 gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr -
468 (uintptr_t)gd->arch.tlb_addr;
Alexander Grafe317fe82016-03-04 01:09:47 +0100469 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
470 setup_pgtables();
471 gd->arch.tlb_emerg = gd->arch.tlb_addr;
472 gd->arch.tlb_addr = tlb_addr;
Alexander Graffa3754e2016-07-30 23:13:03 +0200473 gd->arch.tlb_size = tlb_size;
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700474}
475
David Feng85fd5f12013-12-14 11:47:35 +0800476/* to activate the MMU we need to set up virtual memory */
Stephen Warren7333c6a2015-10-05 12:09:00 -0600477__weak void mmu_setup(void)
David Feng85fd5f12013-12-14 11:47:35 +0800478{
Thierry Reding59c364d2015-07-22 17:10:11 -0600479 int el;
David Feng85fd5f12013-12-14 11:47:35 +0800480
Alexander Grafe317fe82016-03-04 01:09:47 +0100481 /* Set up page tables only once */
482 if (!gd->arch.tlb_fillptr)
483 setup_all_pgtables();
Alexander Graffb74cc12016-03-04 01:09:45 +0100484
485 el = current_el();
Andre Przywara630a7942022-06-14 00:11:10 +0100486 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(NULL, NULL),
Alexander Graffb74cc12016-03-04 01:09:45 +0100487 MEMORY_ATTRIBUTES);
Alexander Graffb74cc12016-03-04 01:09:45 +0100488
David Feng85fd5f12013-12-14 11:47:35 +0800489 /* enable the mmu */
490 set_sctlr(get_sctlr() | CR_M);
491}
492
493/*
494 * Performs a invalidation of the entire data cache at all levels
495 */
496void invalidate_dcache_all(void)
497{
Marc Zyngierb67855c2023-02-09 04:54:27 +0800498#ifndef CONFIG_CMO_BY_VA_ONLY
York Sunef042012014-02-26 13:26:04 -0800499 __asm_invalidate_dcache_all();
Stephen Warrenddb0f632016-10-19 15:18:46 -0600500 __asm_invalidate_l3_dcache();
Marc Zyngierb67855c2023-02-09 04:54:27 +0800501#else
502 apply_cmo_to_mappings(invalidate_dcache_range);
503#endif
David Feng85fd5f12013-12-14 11:47:35 +0800504}
505
506/*
York Sun1ce575f2015-01-06 13:18:42 -0800507 * Performs a clean & invalidation of the entire data cache at all levels.
508 * This function needs to be inline to avoid using stack.
Stephen Warrenddb0f632016-10-19 15:18:46 -0600509 * __asm_flush_l3_dcache return status of timeout
David Feng85fd5f12013-12-14 11:47:35 +0800510 */
York Sun1ce575f2015-01-06 13:18:42 -0800511inline void flush_dcache_all(void)
David Feng85fd5f12013-12-14 11:47:35 +0800512{
Marc Zyngierb67855c2023-02-09 04:54:27 +0800513#ifndef CONFIG_CMO_BY_VA_ONLY
York Sun1ce575f2015-01-06 13:18:42 -0800514 int ret;
515
David Feng85fd5f12013-12-14 11:47:35 +0800516 __asm_flush_dcache_all();
Stephen Warrenddb0f632016-10-19 15:18:46 -0600517 ret = __asm_flush_l3_dcache();
York Sun1ce575f2015-01-06 13:18:42 -0800518 if (ret)
519 debug("flushing dcache returns 0x%x\n", ret);
520 else
521 debug("flushing dcache successfully.\n");
Marc Zyngierb67855c2023-02-09 04:54:27 +0800522#else
523 apply_cmo_to_mappings(flush_dcache_range);
524#endif
David Feng85fd5f12013-12-14 11:47:35 +0800525}
526
Vignesh Raghavendra384c1412019-04-22 21:43:32 +0530527#ifndef CONFIG_SYS_DISABLE_DCACHE_OPS
David Feng85fd5f12013-12-14 11:47:35 +0800528/*
529 * Invalidates range in all levels of D-cache/unified cache
530 */
531void invalidate_dcache_range(unsigned long start, unsigned long stop)
532{
Simon Glass4415c3b2017-04-05 17:53:18 -0600533 __asm_invalidate_dcache_range(start, stop);
David Feng85fd5f12013-12-14 11:47:35 +0800534}
535
536/*
537 * Flush range(clean & invalidate) from all levels of D-cache/unified cache
538 */
539void flush_dcache_range(unsigned long start, unsigned long stop)
540{
541 __asm_flush_dcache_range(start, stop);
542}
Vignesh Raghavendra384c1412019-04-22 21:43:32 +0530543#else
544void invalidate_dcache_range(unsigned long start, unsigned long stop)
545{
546}
547
548void flush_dcache_range(unsigned long start, unsigned long stop)
549{
550}
551#endif /* CONFIG_SYS_DISABLE_DCACHE_OPS */
David Feng85fd5f12013-12-14 11:47:35 +0800552
553void dcache_enable(void)
554{
555 /* The data cache is not active unless the mmu is enabled */
556 if (!(get_sctlr() & CR_M)) {
557 invalidate_dcache_all();
558 __asm_invalidate_tlb_all();
559 mmu_setup();
560 }
561
Pali Rohárfbddaee2022-09-14 13:37:46 +0200562 /* Set up page tables only once (it is done also by mmu_setup()) */
563 if (!gd->arch.tlb_fillptr)
564 setup_all_pgtables();
565
David Feng85fd5f12013-12-14 11:47:35 +0800566 set_sctlr(get_sctlr() | CR_C);
567}
568
569void dcache_disable(void)
570{
571 uint32_t sctlr;
572
573 sctlr = get_sctlr();
574
575 /* if cache isn't enabled no need to disable */
576 if (!(sctlr & CR_C))
577 return;
578
Marc Zyngierb67855c2023-02-09 04:54:27 +0800579 if (IS_ENABLED(CONFIG_CMO_BY_VA_ONLY)) {
580 /*
581 * When invalidating by VA, do it *before* turning the MMU
582 * off, so that at least our stack is coherent.
583 */
584 flush_dcache_all();
585 }
586
David Feng85fd5f12013-12-14 11:47:35 +0800587 set_sctlr(sctlr & ~(CR_C|CR_M));
588
Marc Zyngierb67855c2023-02-09 04:54:27 +0800589 if (!IS_ENABLED(CONFIG_CMO_BY_VA_ONLY))
590 flush_dcache_all();
591
David Feng85fd5f12013-12-14 11:47:35 +0800592 __asm_invalidate_tlb_all();
593}
594
595int dcache_status(void)
596{
597 return (get_sctlr() & CR_C) != 0;
598}
599
Siva Durga Prasad Paladuguba2432a2015-06-26 18:05:07 +0530600u64 *__weak arch_get_page_table(void) {
601 puts("No page table offset defined\n");
602
603 return NULL;
604}
605
Alexander Grafe317fe82016-03-04 01:09:47 +0100606static bool is_aligned(u64 addr, u64 size, u64 align)
607{
608 return !(addr & (align - 1)) && !(size & (align - 1));
609}
610
York Sun5bb14e02017-03-06 09:02:33 -0800611/* Use flag to indicate if attrs has more than d-cache attributes */
612static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level)
Alexander Grafe317fe82016-03-04 01:09:47 +0100613{
614 int levelshift = level2shift(level);
615 u64 levelsize = 1ULL << levelshift;
616 u64 *pte = find_pte(start, level);
617
618 /* Can we can just modify the current level block PTE? */
619 if (is_aligned(start, size, levelsize)) {
York Sun5bb14e02017-03-06 09:02:33 -0800620 if (flag) {
621 *pte &= ~PMD_ATTRMASK;
622 *pte |= attrs & PMD_ATTRMASK;
623 } else {
624 *pte &= ~PMD_ATTRINDX_MASK;
625 *pte |= attrs & PMD_ATTRINDX_MASK;
626 }
Alexander Grafe317fe82016-03-04 01:09:47 +0100627 debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level);
628
629 return levelsize;
630 }
631
632 /* Unaligned or doesn't fit, maybe split block into table */
633 debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte);
634
635 /* Maybe we need to split the block into a table */
636 if (pte_type(pte) == PTE_TYPE_BLOCK)
637 split_block(pte, level);
638
639 /* And then double-check it became a table or already is one */
640 if (pte_type(pte) != PTE_TYPE_TABLE)
641 panic("PTE %p (%llx) for addr=%llx should be a table",
642 pte, *pte, start);
643
644 /* Roll on to the next page table level */
645 return 0;
646}
647
648void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
649 enum dcache_option option)
650{
Peng Fan41bad3e2020-05-11 16:41:07 +0800651 u64 attrs = PMD_ATTRINDX(option >> 2);
Alexander Grafe317fe82016-03-04 01:09:47 +0100652 u64 real_start = start;
653 u64 real_size = size;
654
655 debug("start=%lx size=%lx\n", (ulong)start, (ulong)size);
656
York Suna81fcd12016-06-24 16:46:20 -0700657 if (!gd->arch.tlb_emerg)
658 panic("Emergency page table not setup.");
659
Alexander Grafe317fe82016-03-04 01:09:47 +0100660 /*
661 * We can not modify page tables that we're currently running on,
662 * so we first need to switch to the "emergency" page tables where
663 * we can safely modify our primary page tables and then switch back
664 */
665 __asm_switch_ttbr(gd->arch.tlb_emerg);
666
667 /*
668 * Loop through the address range until we find a page granule that fits
669 * our alignment constraints, then set it to the new cache attributes
670 */
671 while (size > 0) {
672 int level;
673 u64 r;
674
675 for (level = 1; level < 4; level++) {
York Sun5bb14e02017-03-06 09:02:33 -0800676 /* Set d-cache attributes only */
677 r = set_one_region(start, size, attrs, false, level);
Alexander Grafe317fe82016-03-04 01:09:47 +0100678 if (r) {
679 /* PTE successfully replaced */
680 size -= r;
681 start += r;
682 break;
683 }
684 }
685
686 }
687
688 /* We're done modifying page tables, switch back to our primary ones */
689 __asm_switch_ttbr(gd->arch.tlb_addr);
690
691 /*
692 * Make sure there's nothing stale in dcache for a region that might
693 * have caches off now
694 */
695 flush_dcache_range(real_start, real_start + real_size);
696}
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700697
York Sun5bb14e02017-03-06 09:02:33 -0800698/*
699 * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits.
700 * The procecess is break-before-make. The target region will be marked as
701 * invalid during the process of changing.
702 */
703void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs)
704{
705 int level;
706 u64 r, size, start;
707
708 start = addr;
709 size = siz;
710 /*
711 * Loop through the address range until we find a page granule that fits
712 * our alignment constraints, then set it to "invalid".
713 */
714 while (size > 0) {
715 for (level = 1; level < 4; level++) {
716 /* Set PTE to fault */
717 r = set_one_region(start, size, PTE_TYPE_FAULT, true,
718 level);
719 if (r) {
720 /* PTE successfully invalidated */
721 size -= r;
722 start += r;
723 break;
724 }
725 }
726 }
727
728 flush_dcache_range(gd->arch.tlb_addr,
729 gd->arch.tlb_addr + gd->arch.tlb_size);
730 __asm_invalidate_tlb_all();
731
732 /*
733 * Loop through the address range until we find a page granule that fits
734 * our alignment constraints, then set it to the new cache attributes
735 */
736 start = addr;
737 size = siz;
738 while (size > 0) {
739 for (level = 1; level < 4; level++) {
740 /* Set PTE to new attributes */
741 r = set_one_region(start, size, attrs, true, level);
742 if (r) {
743 /* PTE successfully updated */
744 size -= r;
745 start += r;
746 break;
747 }
748 }
749 }
750 flush_dcache_range(gd->arch.tlb_addr,
751 gd->arch.tlb_addr + gd->arch.tlb_size);
752 __asm_invalidate_tlb_all();
753}
754
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400755#else /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +0800756
Alexander Grafbc40da92016-03-04 01:09:55 +0100757/*
758 * For SPL builds, we may want to not have dcache enabled. Any real U-Boot
759 * running however really wants to have dcache and the MMU active. Check that
760 * everything is sane and give the developer a hint if it isn't.
761 */
762#ifndef CONFIG_SPL_BUILD
763#error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache.
764#endif
765
David Feng85fd5f12013-12-14 11:47:35 +0800766void invalidate_dcache_all(void)
767{
768}
769
770void flush_dcache_all(void)
771{
772}
773
David Feng85fd5f12013-12-14 11:47:35 +0800774void dcache_enable(void)
775{
776}
777
778void dcache_disable(void)
779{
780}
781
782int dcache_status(void)
783{
784 return 0;
785}
786
Siva Durga Prasad Paladuguba2432a2015-06-26 18:05:07 +0530787void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
788 enum dcache_option option)
789{
790}
791
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400792#endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +0800793
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400794#if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF)
David Feng85fd5f12013-12-14 11:47:35 +0800795
796void icache_enable(void)
797{
Stephen Warrenddb0f632016-10-19 15:18:46 -0600798 invalidate_icache_all();
David Feng85fd5f12013-12-14 11:47:35 +0800799 set_sctlr(get_sctlr() | CR_I);
800}
801
802void icache_disable(void)
803{
804 set_sctlr(get_sctlr() & ~CR_I);
805}
806
807int icache_status(void)
808{
809 return (get_sctlr() & CR_I) != 0;
810}
811
Patrice Chotardee435c62021-07-19 11:21:51 +0200812int mmu_status(void)
813{
814 return (get_sctlr() & CR_M) != 0;
815}
816
David Feng85fd5f12013-12-14 11:47:35 +0800817void invalidate_icache_all(void)
818{
819 __asm_invalidate_icache_all();
Stephen Warrenddb0f632016-10-19 15:18:46 -0600820 __asm_invalidate_l3_icache();
David Feng85fd5f12013-12-14 11:47:35 +0800821}
822
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400823#else /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +0800824
825void icache_enable(void)
826{
827}
828
829void icache_disable(void)
830{
831}
832
833int icache_status(void)
834{
835 return 0;
836}
837
Patrice Chotardee435c62021-07-19 11:21:51 +0200838int mmu_status(void)
839{
840 return 0;
841}
842
David Feng85fd5f12013-12-14 11:47:35 +0800843void invalidate_icache_all(void)
844{
845}
846
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400847#endif /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +0800848
849/*
850 * Enable dCache & iCache, whether cache is actually enabled
851 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
852 */
York Suna84cd722014-06-23 15:15:54 -0700853void __weak enable_caches(void)
David Feng85fd5f12013-12-14 11:47:35 +0800854{
855 icache_enable();
856 dcache_enable();
857}