blob: e4736e564366838ebc10f24a8b97c17822734b60 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
David Feng85fd5f12013-12-14 11:47:35 +08002/*
3 * (C) Copyright 2013
4 * David Feng <fenghua@phytium.com.cn>
5 *
Alexander Grafe317fe82016-03-04 01:09:47 +01006 * (C) Copyright 2016
7 * Alexander Graf <agraf@suse.de>
David Feng85fd5f12013-12-14 11:47:35 +08008 */
9
10#include <common.h>
Simon Glass1d91ba72019-11-14 12:57:37 -070011#include <cpu_func.h>
Simon Glassf11478f2019-12-28 10:45:07 -070012#include <hang.h>
Simon Glass0f2af882020-05-10 11:40:05 -060013#include <log.h>
Simon Glass274e0b02020-05-10 11:39:56 -060014#include <asm/cache.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060015#include <asm/global_data.h>
David Feng85fd5f12013-12-14 11:47:35 +080016#include <asm/system.h>
17#include <asm/armv8/mmu.h>
18
19DECLARE_GLOBAL_DATA_PTR;
20
Trevor Woerner43ec7e02019-05-03 09:41:00 -040021#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -070022
Alexander Grafe317fe82016-03-04 01:09:47 +010023/*
24 * With 4k page granule, a virtual address is split into 4 lookup parts
25 * spanning 9 bits each:
26 *
27 * _______________________________________________
28 * | | | | | | |
29 * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off |
30 * |_______|_______|_______|_______|_______|_______|
31 * 63-48 47-39 38-30 29-21 20-12 11-00
32 *
33 * mask page size
34 *
35 * Lv0: FF8000000000 --
36 * Lv1: 7FC0000000 1G
37 * Lv2: 3FE00000 2M
38 * Lv3: 1FF000 4K
39 * off: FFF
40 */
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -070041
Andre Przywara630a7942022-06-14 00:11:10 +010042static int get_effective_el(void)
Alexander Graffb74cc12016-03-04 01:09:45 +010043{
Andre Przywara630a7942022-06-14 00:11:10 +010044 int el = current_el();
45
46 if (el == 2) {
47 u64 hcr_el2;
48
49 /*
50 * If we are using the EL2&0 translation regime, the TCR_EL2
51 * looks like the EL1 version, even though we are in EL2.
52 */
53 __asm__ ("mrs %0, HCR_EL2\n" : "=r" (hcr_el2));
54 if (hcr_el2 & BIT(HCR_EL2_E2H_BIT))
55 return 1;
56 }
57
58 return el;
59}
60
61u64 get_tcr(u64 *pips, u64 *pva_bits)
62{
63 int el = get_effective_el();
Alexander Graffb74cc12016-03-04 01:09:45 +010064 u64 max_addr = 0;
65 u64 ips, va_bits;
66 u64 tcr;
67 int i;
68
69 /* Find the largest address we need to support */
Alexander Graf6b3e7ca2016-03-04 01:09:48 +010070 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
York Sunc7104e52016-06-24 16:46:22 -070071 max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size);
Alexander Graffb74cc12016-03-04 01:09:45 +010072
73 /* Calculate the maximum physical (and thus virtual) address */
74 if (max_addr > (1ULL << 44)) {
75 ips = 5;
76 va_bits = 48;
77 } else if (max_addr > (1ULL << 42)) {
78 ips = 4;
79 va_bits = 44;
80 } else if (max_addr > (1ULL << 40)) {
81 ips = 3;
82 va_bits = 42;
83 } else if (max_addr > (1ULL << 36)) {
84 ips = 2;
85 va_bits = 40;
86 } else if (max_addr > (1ULL << 32)) {
87 ips = 1;
88 va_bits = 36;
89 } else {
90 ips = 0;
91 va_bits = 32;
92 }
93
94 if (el == 1) {
Alexander Graff03c0e42016-03-04 01:09:46 +010095 tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE;
Alexander Graffb74cc12016-03-04 01:09:45 +010096 } else if (el == 2) {
97 tcr = TCR_EL2_RSVD | (ips << 16);
98 } else {
99 tcr = TCR_EL3_RSVD | (ips << 16);
100 }
101
102 /* PTWs cacheable, inner/outer WBWA and inner shareable */
Alexander Grafe317fe82016-03-04 01:09:47 +0100103 tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
104 tcr |= TCR_T0SZ(va_bits);
Alexander Graffb74cc12016-03-04 01:09:45 +0100105
106 if (pips)
107 *pips = ips;
108 if (pva_bits)
109 *pva_bits = va_bits;
110
111 return tcr;
112}
113
Alexander Grafe317fe82016-03-04 01:09:47 +0100114#define MAX_PTE_ENTRIES 512
115
116static int pte_type(u64 *pte)
117{
118 return *pte & PTE_TYPE_MASK;
119}
120
121/* Returns the LSB number for a PTE on level <level> */
122static int level2shift(int level)
123{
124 /* Page is 12 bits wide, every level translates 9 bits */
125 return (12 + 9 * (3 - level));
126}
127
128static u64 *find_pte(u64 addr, int level)
129{
130 int start_level = 0;
131 u64 *pte;
132 u64 idx;
133 u64 va_bits;
134 int i;
135
136 debug("addr=%llx level=%d\n", addr, level);
137
Andre Przywara630a7942022-06-14 00:11:10 +0100138 get_tcr(NULL, &va_bits);
Alexander Grafe317fe82016-03-04 01:09:47 +0100139 if (va_bits < 39)
140 start_level = 1;
141
142 if (level < start_level)
143 return NULL;
144
145 /* Walk through all page table levels to find our PTE */
146 pte = (u64*)gd->arch.tlb_addr;
147 for (i = start_level; i < 4; i++) {
148 idx = (addr >> level2shift(i)) & 0x1FF;
149 pte += idx;
150 debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte);
151
152 /* Found it */
153 if (i == level)
154 return pte;
155 /* PTE is no table (either invalid or block), can't traverse */
156 if (pte_type(pte) != PTE_TYPE_TABLE)
157 return NULL;
158 /* Off to the next level */
159 pte = (u64*)(*pte & 0x0000fffffffff000ULL);
160 }
161
162 /* Should never reach here */
163 return NULL;
164}
165
166/* Returns and creates a new full table (512 entries) */
167static u64 *create_table(void)
168{
169 u64 *new_table = (u64*)gd->arch.tlb_fillptr;
170 u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64);
171
172 /* Allocate MAX_PTE_ENTRIES pte entries */
173 gd->arch.tlb_fillptr += pt_len;
174
175 if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size)
176 panic("Insufficient RAM for page table: 0x%lx > 0x%lx. "
177 "Please increase the size in get_page_table_size()",
178 gd->arch.tlb_fillptr - gd->arch.tlb_addr,
179 gd->arch.tlb_size);
180
181 /* Mark all entries as invalid */
182 memset(new_table, 0, pt_len);
183
184 return new_table;
185}
186
187static void set_pte_table(u64 *pte, u64 *table)
188{
189 /* Point *pte to the new table */
190 debug("Setting %p to addr=%p\n", pte, table);
191 *pte = PTE_TYPE_TABLE | (ulong)table;
192}
193
York Sunf44afe72016-06-24 16:46:21 -0700194/* Splits a block PTE into table with subpages spanning the old block */
195static void split_block(u64 *pte, int level)
196{
197 u64 old_pte = *pte;
198 u64 *new_table;
199 u64 i = 0;
200 /* level describes the parent level, we need the child ones */
201 int levelshift = level2shift(level + 1);
202
203 if (pte_type(pte) != PTE_TYPE_BLOCK)
204 panic("PTE %p (%llx) is not a block. Some driver code wants to "
205 "modify dcache settings for an range not covered in "
206 "mem_map.", pte, old_pte);
207
208 new_table = create_table();
209 debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table);
210
211 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
212 new_table[i] = old_pte | (i << levelshift);
213
214 /* Level 3 block PTEs have the table type */
215 if ((level + 1) == 3)
216 new_table[i] |= PTE_TYPE_TABLE;
217
218 debug("Setting new_table[%lld] = %llx\n", i, new_table[i]);
219 }
220
221 /* Set the new table into effect */
222 set_pte_table(pte, new_table);
223}
224
Alexander Grafe317fe82016-03-04 01:09:47 +0100225/* Add one mm_region map entry to the page tables */
226static void add_map(struct mm_region *map)
227{
228 u64 *pte;
York Sunc7104e52016-06-24 16:46:22 -0700229 u64 virt = map->virt;
230 u64 phys = map->phys;
Alexander Grafe317fe82016-03-04 01:09:47 +0100231 u64 size = map->size;
232 u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
233 u64 blocksize;
234 int level;
235 u64 *new_table;
236
237 while (size) {
York Sunc7104e52016-06-24 16:46:22 -0700238 pte = find_pte(virt, 0);
Alexander Grafe317fe82016-03-04 01:09:47 +0100239 if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) {
York Sunc7104e52016-06-24 16:46:22 -0700240 debug("Creating table for virt 0x%llx\n", virt);
Alexander Grafe317fe82016-03-04 01:09:47 +0100241 new_table = create_table();
242 set_pte_table(pte, new_table);
243 }
244
245 for (level = 1; level < 4; level++) {
York Sunc7104e52016-06-24 16:46:22 -0700246 pte = find_pte(virt, level);
York Sunf44afe72016-06-24 16:46:21 -0700247 if (!pte)
248 panic("pte not found\n");
York Sunc7104e52016-06-24 16:46:22 -0700249
Alexander Grafe317fe82016-03-04 01:09:47 +0100250 blocksize = 1ULL << level2shift(level);
York Sunc7104e52016-06-24 16:46:22 -0700251 debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n",
252 virt, size, blocksize);
253 if (size >= blocksize && !(virt & (blocksize - 1))) {
Alexander Grafe317fe82016-03-04 01:09:47 +0100254 /* Page fits, create block PTE */
York Sunc7104e52016-06-24 16:46:22 -0700255 debug("Setting PTE %p to block virt=%llx\n",
256 pte, virt);
Peng Fane0e98712017-11-28 10:31:28 +0800257 if (level == 3)
258 *pte = phys | attrs | PTE_TYPE_PAGE;
259 else
260 *pte = phys | attrs;
York Sunc7104e52016-06-24 16:46:22 -0700261 virt += blocksize;
262 phys += blocksize;
Alexander Grafe317fe82016-03-04 01:09:47 +0100263 size -= blocksize;
264 break;
York Sunf44afe72016-06-24 16:46:21 -0700265 } else if (pte_type(pte) == PTE_TYPE_FAULT) {
Alexander Grafe317fe82016-03-04 01:09:47 +0100266 /* Page doesn't fit, create subpages */
York Sunc7104e52016-06-24 16:46:22 -0700267 debug("Creating subtable for virt 0x%llx blksize=%llx\n",
268 virt, blocksize);
Alexander Grafe317fe82016-03-04 01:09:47 +0100269 new_table = create_table();
270 set_pte_table(pte, new_table);
York Sunf44afe72016-06-24 16:46:21 -0700271 } else if (pte_type(pte) == PTE_TYPE_BLOCK) {
York Sunc7104e52016-06-24 16:46:22 -0700272 debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n",
273 virt, blocksize);
York Sunf44afe72016-06-24 16:46:21 -0700274 split_block(pte, level);
Alexander Grafe317fe82016-03-04 01:09:47 +0100275 }
276 }
277 }
278}
279
Alexander Grafe317fe82016-03-04 01:09:47 +0100280enum pte_type {
281 PTE_INVAL,
282 PTE_BLOCK,
283 PTE_LEVEL,
284};
285
286/*
287 * This is a recursively called function to count the number of
288 * page tables we need to cover a particular PTE range. If you
289 * call this with level = -1 you basically get the full 48 bit
290 * coverage.
291 */
292static int count_required_pts(u64 addr, int level, u64 maxaddr)
293{
294 int levelshift = level2shift(level);
295 u64 levelsize = 1ULL << levelshift;
296 u64 levelmask = levelsize - 1;
297 u64 levelend = addr + levelsize;
298 int r = 0;
299 int i;
300 enum pte_type pte_type = PTE_INVAL;
301
Alexander Graf6b3e7ca2016-03-04 01:09:48 +0100302 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) {
Alexander Grafe317fe82016-03-04 01:09:47 +0100303 struct mm_region *map = &mem_map[i];
York Sunc7104e52016-06-24 16:46:22 -0700304 u64 start = map->virt;
Alexander Grafe317fe82016-03-04 01:09:47 +0100305 u64 end = start + map->size;
306
307 /* Check if the PTE would overlap with the map */
308 if (max(addr, start) <= min(levelend, end)) {
309 start = max(addr, start);
310 end = min(levelend, end);
311
312 /* We need a sub-pt for this level */
313 if ((start & levelmask) || (end & levelmask)) {
314 pte_type = PTE_LEVEL;
315 break;
316 }
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700317
Alexander Grafe317fe82016-03-04 01:09:47 +0100318 /* Lv0 can not do block PTEs, so do levels here too */
319 if (level <= 0) {
320 pte_type = PTE_LEVEL;
321 break;
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700322 }
323
Alexander Grafe317fe82016-03-04 01:09:47 +0100324 /* PTE is active, but fits into a block */
325 pte_type = PTE_BLOCK;
326 }
327 }
328
329 /*
330 * Block PTEs at this level are already covered by the parent page
331 * table, so we only need to count sub page tables.
332 */
333 if (pte_type == PTE_LEVEL) {
334 int sublevel = level + 1;
335 u64 sublevelsize = 1ULL << level2shift(sublevel);
336
337 /* Account for the new sub page table ... */
338 r = 1;
339
340 /* ... and for all child page tables that one might have */
341 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
342 r += count_required_pts(addr, sublevel, maxaddr);
343 addr += sublevelsize;
344
345 if (addr >= maxaddr) {
346 /*
347 * We reached the end of address space, no need
348 * to look any further.
349 */
350 break;
351 }
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700352 }
353 }
Alexander Grafe317fe82016-03-04 01:09:47 +0100354
355 return r;
356}
357
358/* Returns the estimated required size of all page tables */
Alexander Grafbc78b922016-03-21 20:26:12 +0100359__weak u64 get_page_table_size(void)
Alexander Grafe317fe82016-03-04 01:09:47 +0100360{
361 u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
362 u64 size = 0;
363 u64 va_bits;
364 int start_level = 0;
365
Andre Przywara630a7942022-06-14 00:11:10 +0100366 get_tcr(NULL, &va_bits);
Alexander Grafe317fe82016-03-04 01:09:47 +0100367 if (va_bits < 39)
368 start_level = 1;
369
370 /* Account for all page tables we would need to cover our memory map */
371 size = one_pt * count_required_pts(0, start_level - 1, 1ULL << va_bits);
372
373 /*
374 * We need to duplicate our page table once to have an emergency pt to
375 * resort to when splitting page tables later on
376 */
377 size *= 2;
378
379 /*
380 * We may need to split page tables later on if dcache settings change,
381 * so reserve up to 4 (random pick) page tables for that.
382 */
383 size += one_pt * 4;
384
385 return size;
386}
387
York Suna81fcd12016-06-24 16:46:20 -0700388void setup_pgtables(void)
Alexander Grafe317fe82016-03-04 01:09:47 +0100389{
390 int i;
391
York Suna81fcd12016-06-24 16:46:20 -0700392 if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr)
393 panic("Page table pointer not setup.");
394
Alexander Grafe317fe82016-03-04 01:09:47 +0100395 /*
396 * Allocate the first level we're on with invalidate entries.
397 * If the starting level is 0 (va_bits >= 39), then this is our
398 * Lv0 page table, otherwise it's the entry Lv1 page table.
399 */
400 create_table();
401
402 /* Now add all MMU table entries one after another to the table */
Alexander Graf6b3e7ca2016-03-04 01:09:48 +0100403 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
Alexander Grafe317fe82016-03-04 01:09:47 +0100404 add_map(&mem_map[i]);
Alexander Grafe317fe82016-03-04 01:09:47 +0100405}
406
407static void setup_all_pgtables(void)
408{
409 u64 tlb_addr = gd->arch.tlb_addr;
Alexander Graffa3754e2016-07-30 23:13:03 +0200410 u64 tlb_size = gd->arch.tlb_size;
Alexander Grafe317fe82016-03-04 01:09:47 +0100411
412 /* Reset the fill ptr */
413 gd->arch.tlb_fillptr = tlb_addr;
414
415 /* Create normal system page tables */
416 setup_pgtables();
417
418 /* Create emergency page tables */
Alexander Graffa3754e2016-07-30 23:13:03 +0200419 gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr -
420 (uintptr_t)gd->arch.tlb_addr;
Alexander Grafe317fe82016-03-04 01:09:47 +0100421 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
422 setup_pgtables();
423 gd->arch.tlb_emerg = gd->arch.tlb_addr;
424 gd->arch.tlb_addr = tlb_addr;
Alexander Graffa3754e2016-07-30 23:13:03 +0200425 gd->arch.tlb_size = tlb_size;
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700426}
427
David Feng85fd5f12013-12-14 11:47:35 +0800428/* to activate the MMU we need to set up virtual memory */
Stephen Warren7333c6a2015-10-05 12:09:00 -0600429__weak void mmu_setup(void)
David Feng85fd5f12013-12-14 11:47:35 +0800430{
Thierry Reding59c364d2015-07-22 17:10:11 -0600431 int el;
David Feng85fd5f12013-12-14 11:47:35 +0800432
Alexander Grafe317fe82016-03-04 01:09:47 +0100433 /* Set up page tables only once */
434 if (!gd->arch.tlb_fillptr)
435 setup_all_pgtables();
Alexander Graffb74cc12016-03-04 01:09:45 +0100436
437 el = current_el();
Andre Przywara630a7942022-06-14 00:11:10 +0100438 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(NULL, NULL),
Alexander Graffb74cc12016-03-04 01:09:45 +0100439 MEMORY_ATTRIBUTES);
Alexander Graffb74cc12016-03-04 01:09:45 +0100440
David Feng85fd5f12013-12-14 11:47:35 +0800441 /* enable the mmu */
442 set_sctlr(get_sctlr() | CR_M);
443}
444
445/*
446 * Performs a invalidation of the entire data cache at all levels
447 */
448void invalidate_dcache_all(void)
449{
York Sunef042012014-02-26 13:26:04 -0800450 __asm_invalidate_dcache_all();
Stephen Warrenddb0f632016-10-19 15:18:46 -0600451 __asm_invalidate_l3_dcache();
David Feng85fd5f12013-12-14 11:47:35 +0800452}
453
454/*
York Sun1ce575f2015-01-06 13:18:42 -0800455 * Performs a clean & invalidation of the entire data cache at all levels.
456 * This function needs to be inline to avoid using stack.
Stephen Warrenddb0f632016-10-19 15:18:46 -0600457 * __asm_flush_l3_dcache return status of timeout
David Feng85fd5f12013-12-14 11:47:35 +0800458 */
York Sun1ce575f2015-01-06 13:18:42 -0800459inline void flush_dcache_all(void)
David Feng85fd5f12013-12-14 11:47:35 +0800460{
York Sun1ce575f2015-01-06 13:18:42 -0800461 int ret;
462
David Feng85fd5f12013-12-14 11:47:35 +0800463 __asm_flush_dcache_all();
Stephen Warrenddb0f632016-10-19 15:18:46 -0600464 ret = __asm_flush_l3_dcache();
York Sun1ce575f2015-01-06 13:18:42 -0800465 if (ret)
466 debug("flushing dcache returns 0x%x\n", ret);
467 else
468 debug("flushing dcache successfully.\n");
David Feng85fd5f12013-12-14 11:47:35 +0800469}
470
Vignesh Raghavendra384c1412019-04-22 21:43:32 +0530471#ifndef CONFIG_SYS_DISABLE_DCACHE_OPS
David Feng85fd5f12013-12-14 11:47:35 +0800472/*
473 * Invalidates range in all levels of D-cache/unified cache
474 */
475void invalidate_dcache_range(unsigned long start, unsigned long stop)
476{
Simon Glass4415c3b2017-04-05 17:53:18 -0600477 __asm_invalidate_dcache_range(start, stop);
David Feng85fd5f12013-12-14 11:47:35 +0800478}
479
480/*
481 * Flush range(clean & invalidate) from all levels of D-cache/unified cache
482 */
483void flush_dcache_range(unsigned long start, unsigned long stop)
484{
485 __asm_flush_dcache_range(start, stop);
486}
Vignesh Raghavendra384c1412019-04-22 21:43:32 +0530487#else
488void invalidate_dcache_range(unsigned long start, unsigned long stop)
489{
490}
491
492void flush_dcache_range(unsigned long start, unsigned long stop)
493{
494}
495#endif /* CONFIG_SYS_DISABLE_DCACHE_OPS */
David Feng85fd5f12013-12-14 11:47:35 +0800496
497void dcache_enable(void)
498{
499 /* The data cache is not active unless the mmu is enabled */
500 if (!(get_sctlr() & CR_M)) {
501 invalidate_dcache_all();
502 __asm_invalidate_tlb_all();
503 mmu_setup();
504 }
505
506 set_sctlr(get_sctlr() | CR_C);
507}
508
509void dcache_disable(void)
510{
511 uint32_t sctlr;
512
513 sctlr = get_sctlr();
514
515 /* if cache isn't enabled no need to disable */
516 if (!(sctlr & CR_C))
517 return;
518
519 set_sctlr(sctlr & ~(CR_C|CR_M));
520
521 flush_dcache_all();
522 __asm_invalidate_tlb_all();
523}
524
525int dcache_status(void)
526{
527 return (get_sctlr() & CR_C) != 0;
528}
529
Siva Durga Prasad Paladuguba2432a2015-06-26 18:05:07 +0530530u64 *__weak arch_get_page_table(void) {
531 puts("No page table offset defined\n");
532
533 return NULL;
534}
535
Alexander Grafe317fe82016-03-04 01:09:47 +0100536static bool is_aligned(u64 addr, u64 size, u64 align)
537{
538 return !(addr & (align - 1)) && !(size & (align - 1));
539}
540
York Sun5bb14e02017-03-06 09:02:33 -0800541/* Use flag to indicate if attrs has more than d-cache attributes */
542static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level)
Alexander Grafe317fe82016-03-04 01:09:47 +0100543{
544 int levelshift = level2shift(level);
545 u64 levelsize = 1ULL << levelshift;
546 u64 *pte = find_pte(start, level);
547
548 /* Can we can just modify the current level block PTE? */
549 if (is_aligned(start, size, levelsize)) {
York Sun5bb14e02017-03-06 09:02:33 -0800550 if (flag) {
551 *pte &= ~PMD_ATTRMASK;
552 *pte |= attrs & PMD_ATTRMASK;
553 } else {
554 *pte &= ~PMD_ATTRINDX_MASK;
555 *pte |= attrs & PMD_ATTRINDX_MASK;
556 }
Alexander Grafe317fe82016-03-04 01:09:47 +0100557 debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level);
558
559 return levelsize;
560 }
561
562 /* Unaligned or doesn't fit, maybe split block into table */
563 debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte);
564
565 /* Maybe we need to split the block into a table */
566 if (pte_type(pte) == PTE_TYPE_BLOCK)
567 split_block(pte, level);
568
569 /* And then double-check it became a table or already is one */
570 if (pte_type(pte) != PTE_TYPE_TABLE)
571 panic("PTE %p (%llx) for addr=%llx should be a table",
572 pte, *pte, start);
573
574 /* Roll on to the next page table level */
575 return 0;
576}
577
578void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
579 enum dcache_option option)
580{
Peng Fan41bad3e2020-05-11 16:41:07 +0800581 u64 attrs = PMD_ATTRINDX(option >> 2);
Alexander Grafe317fe82016-03-04 01:09:47 +0100582 u64 real_start = start;
583 u64 real_size = size;
584
585 debug("start=%lx size=%lx\n", (ulong)start, (ulong)size);
586
York Suna81fcd12016-06-24 16:46:20 -0700587 if (!gd->arch.tlb_emerg)
588 panic("Emergency page table not setup.");
589
Alexander Grafe317fe82016-03-04 01:09:47 +0100590 /*
591 * We can not modify page tables that we're currently running on,
592 * so we first need to switch to the "emergency" page tables where
593 * we can safely modify our primary page tables and then switch back
594 */
595 __asm_switch_ttbr(gd->arch.tlb_emerg);
596
597 /*
598 * Loop through the address range until we find a page granule that fits
599 * our alignment constraints, then set it to the new cache attributes
600 */
601 while (size > 0) {
602 int level;
603 u64 r;
604
605 for (level = 1; level < 4; level++) {
York Sun5bb14e02017-03-06 09:02:33 -0800606 /* Set d-cache attributes only */
607 r = set_one_region(start, size, attrs, false, level);
Alexander Grafe317fe82016-03-04 01:09:47 +0100608 if (r) {
609 /* PTE successfully replaced */
610 size -= r;
611 start += r;
612 break;
613 }
614 }
615
616 }
617
618 /* We're done modifying page tables, switch back to our primary ones */
619 __asm_switch_ttbr(gd->arch.tlb_addr);
620
621 /*
622 * Make sure there's nothing stale in dcache for a region that might
623 * have caches off now
624 */
625 flush_dcache_range(real_start, real_start + real_size);
626}
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700627
York Sun5bb14e02017-03-06 09:02:33 -0800628/*
629 * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits.
630 * The procecess is break-before-make. The target region will be marked as
631 * invalid during the process of changing.
632 */
633void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs)
634{
635 int level;
636 u64 r, size, start;
637
638 start = addr;
639 size = siz;
640 /*
641 * Loop through the address range until we find a page granule that fits
642 * our alignment constraints, then set it to "invalid".
643 */
644 while (size > 0) {
645 for (level = 1; level < 4; level++) {
646 /* Set PTE to fault */
647 r = set_one_region(start, size, PTE_TYPE_FAULT, true,
648 level);
649 if (r) {
650 /* PTE successfully invalidated */
651 size -= r;
652 start += r;
653 break;
654 }
655 }
656 }
657
658 flush_dcache_range(gd->arch.tlb_addr,
659 gd->arch.tlb_addr + gd->arch.tlb_size);
660 __asm_invalidate_tlb_all();
661
662 /*
663 * Loop through the address range until we find a page granule that fits
664 * our alignment constraints, then set it to the new cache attributes
665 */
666 start = addr;
667 size = siz;
668 while (size > 0) {
669 for (level = 1; level < 4; level++) {
670 /* Set PTE to new attributes */
671 r = set_one_region(start, size, attrs, true, level);
672 if (r) {
673 /* PTE successfully updated */
674 size -= r;
675 start += r;
676 break;
677 }
678 }
679 }
680 flush_dcache_range(gd->arch.tlb_addr,
681 gd->arch.tlb_addr + gd->arch.tlb_size);
682 __asm_invalidate_tlb_all();
683}
684
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400685#else /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +0800686
Alexander Grafbc40da92016-03-04 01:09:55 +0100687/*
688 * For SPL builds, we may want to not have dcache enabled. Any real U-Boot
689 * running however really wants to have dcache and the MMU active. Check that
690 * everything is sane and give the developer a hint if it isn't.
691 */
692#ifndef CONFIG_SPL_BUILD
693#error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache.
694#endif
695
David Feng85fd5f12013-12-14 11:47:35 +0800696void invalidate_dcache_all(void)
697{
698}
699
700void flush_dcache_all(void)
701{
702}
703
David Feng85fd5f12013-12-14 11:47:35 +0800704void dcache_enable(void)
705{
706}
707
708void dcache_disable(void)
709{
710}
711
712int dcache_status(void)
713{
714 return 0;
715}
716
Siva Durga Prasad Paladuguba2432a2015-06-26 18:05:07 +0530717void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
718 enum dcache_option option)
719{
720}
721
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400722#endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +0800723
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400724#if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF)
David Feng85fd5f12013-12-14 11:47:35 +0800725
726void icache_enable(void)
727{
Stephen Warrenddb0f632016-10-19 15:18:46 -0600728 invalidate_icache_all();
David Feng85fd5f12013-12-14 11:47:35 +0800729 set_sctlr(get_sctlr() | CR_I);
730}
731
732void icache_disable(void)
733{
734 set_sctlr(get_sctlr() & ~CR_I);
735}
736
737int icache_status(void)
738{
739 return (get_sctlr() & CR_I) != 0;
740}
741
Patrice Chotardee435c62021-07-19 11:21:51 +0200742int mmu_status(void)
743{
744 return (get_sctlr() & CR_M) != 0;
745}
746
David Feng85fd5f12013-12-14 11:47:35 +0800747void invalidate_icache_all(void)
748{
749 __asm_invalidate_icache_all();
Stephen Warrenddb0f632016-10-19 15:18:46 -0600750 __asm_invalidate_l3_icache();
David Feng85fd5f12013-12-14 11:47:35 +0800751}
752
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400753#else /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +0800754
755void icache_enable(void)
756{
757}
758
759void icache_disable(void)
760{
761}
762
763int icache_status(void)
764{
765 return 0;
766}
767
Patrice Chotardee435c62021-07-19 11:21:51 +0200768int mmu_status(void)
769{
770 return 0;
771}
772
David Feng85fd5f12013-12-14 11:47:35 +0800773void invalidate_icache_all(void)
774{
775}
776
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400777#endif /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +0800778
779/*
780 * Enable dCache & iCache, whether cache is actually enabled
781 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
782 */
York Suna84cd722014-06-23 15:15:54 -0700783void __weak enable_caches(void)
David Feng85fd5f12013-12-14 11:47:35 +0800784{
785 icache_enable();
786 dcache_enable();
787}