blob: 3de18c7675b9eb9a6e496b0cd849a10f1909fd66 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
David Feng85fd5f12013-12-14 11:47:35 +08002/*
3 * (C) Copyright 2013
4 * David Feng <fenghua@phytium.com.cn>
5 *
Alexander Grafe317fe82016-03-04 01:09:47 +01006 * (C) Copyright 2016
7 * Alexander Graf <agraf@suse.de>
David Feng85fd5f12013-12-14 11:47:35 +08008 */
9
10#include <common.h>
Simon Glass1d91ba72019-11-14 12:57:37 -070011#include <cpu_func.h>
Simon Glassf11478f2019-12-28 10:45:07 -070012#include <hang.h>
Simon Glass0f2af882020-05-10 11:40:05 -060013#include <log.h>
Simon Glass274e0b02020-05-10 11:39:56 -060014#include <asm/cache.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060015#include <asm/global_data.h>
David Feng85fd5f12013-12-14 11:47:35 +080016#include <asm/system.h>
17#include <asm/armv8/mmu.h>
18
19DECLARE_GLOBAL_DATA_PTR;
20
Trevor Woerner43ec7e02019-05-03 09:41:00 -040021#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -070022
Alexander Grafe317fe82016-03-04 01:09:47 +010023/*
24 * With 4k page granule, a virtual address is split into 4 lookup parts
25 * spanning 9 bits each:
26 *
27 * _______________________________________________
28 * | | | | | | |
29 * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off |
30 * |_______|_______|_______|_______|_______|_______|
31 * 63-48 47-39 38-30 29-21 20-12 11-00
32 *
33 * mask page size
34 *
35 * Lv0: FF8000000000 --
36 * Lv1: 7FC0000000 1G
37 * Lv2: 3FE00000 2M
38 * Lv3: 1FF000 4K
39 * off: FFF
40 */
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -070041
York Suna81fcd12016-06-24 16:46:20 -070042u64 get_tcr(int el, u64 *pips, u64 *pva_bits)
Alexander Graffb74cc12016-03-04 01:09:45 +010043{
44 u64 max_addr = 0;
45 u64 ips, va_bits;
46 u64 tcr;
47 int i;
48
49 /* Find the largest address we need to support */
Alexander Graf6b3e7ca2016-03-04 01:09:48 +010050 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
York Sunc7104e52016-06-24 16:46:22 -070051 max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size);
Alexander Graffb74cc12016-03-04 01:09:45 +010052
53 /* Calculate the maximum physical (and thus virtual) address */
54 if (max_addr > (1ULL << 44)) {
55 ips = 5;
56 va_bits = 48;
57 } else if (max_addr > (1ULL << 42)) {
58 ips = 4;
59 va_bits = 44;
60 } else if (max_addr > (1ULL << 40)) {
61 ips = 3;
62 va_bits = 42;
63 } else if (max_addr > (1ULL << 36)) {
64 ips = 2;
65 va_bits = 40;
66 } else if (max_addr > (1ULL << 32)) {
67 ips = 1;
68 va_bits = 36;
69 } else {
70 ips = 0;
71 va_bits = 32;
72 }
73
74 if (el == 1) {
Alexander Graff03c0e42016-03-04 01:09:46 +010075 tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE;
Alexander Graffb74cc12016-03-04 01:09:45 +010076 } else if (el == 2) {
77 tcr = TCR_EL2_RSVD | (ips << 16);
78 } else {
79 tcr = TCR_EL3_RSVD | (ips << 16);
80 }
81
82 /* PTWs cacheable, inner/outer WBWA and inner shareable */
Alexander Grafe317fe82016-03-04 01:09:47 +010083 tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
84 tcr |= TCR_T0SZ(va_bits);
Alexander Graffb74cc12016-03-04 01:09:45 +010085
86 if (pips)
87 *pips = ips;
88 if (pva_bits)
89 *pva_bits = va_bits;
90
91 return tcr;
92}
93
Alexander Grafe317fe82016-03-04 01:09:47 +010094#define MAX_PTE_ENTRIES 512
95
96static int pte_type(u64 *pte)
97{
98 return *pte & PTE_TYPE_MASK;
99}
100
101/* Returns the LSB number for a PTE on level <level> */
102static int level2shift(int level)
103{
104 /* Page is 12 bits wide, every level translates 9 bits */
105 return (12 + 9 * (3 - level));
106}
107
108static u64 *find_pte(u64 addr, int level)
109{
110 int start_level = 0;
111 u64 *pte;
112 u64 idx;
113 u64 va_bits;
114 int i;
115
116 debug("addr=%llx level=%d\n", addr, level);
117
118 get_tcr(0, NULL, &va_bits);
119 if (va_bits < 39)
120 start_level = 1;
121
122 if (level < start_level)
123 return NULL;
124
125 /* Walk through all page table levels to find our PTE */
126 pte = (u64*)gd->arch.tlb_addr;
127 for (i = start_level; i < 4; i++) {
128 idx = (addr >> level2shift(i)) & 0x1FF;
129 pte += idx;
130 debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte);
131
132 /* Found it */
133 if (i == level)
134 return pte;
135 /* PTE is no table (either invalid or block), can't traverse */
136 if (pte_type(pte) != PTE_TYPE_TABLE)
137 return NULL;
138 /* Off to the next level */
139 pte = (u64*)(*pte & 0x0000fffffffff000ULL);
140 }
141
142 /* Should never reach here */
143 return NULL;
144}
145
146/* Returns and creates a new full table (512 entries) */
147static u64 *create_table(void)
148{
149 u64 *new_table = (u64*)gd->arch.tlb_fillptr;
150 u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64);
151
152 /* Allocate MAX_PTE_ENTRIES pte entries */
153 gd->arch.tlb_fillptr += pt_len;
154
155 if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size)
156 panic("Insufficient RAM for page table: 0x%lx > 0x%lx. "
157 "Please increase the size in get_page_table_size()",
158 gd->arch.tlb_fillptr - gd->arch.tlb_addr,
159 gd->arch.tlb_size);
160
161 /* Mark all entries as invalid */
162 memset(new_table, 0, pt_len);
163
164 return new_table;
165}
166
167static void set_pte_table(u64 *pte, u64 *table)
168{
169 /* Point *pte to the new table */
170 debug("Setting %p to addr=%p\n", pte, table);
171 *pte = PTE_TYPE_TABLE | (ulong)table;
172}
173
York Sunf44afe72016-06-24 16:46:21 -0700174/* Splits a block PTE into table with subpages spanning the old block */
175static void split_block(u64 *pte, int level)
176{
177 u64 old_pte = *pte;
178 u64 *new_table;
179 u64 i = 0;
180 /* level describes the parent level, we need the child ones */
181 int levelshift = level2shift(level + 1);
182
183 if (pte_type(pte) != PTE_TYPE_BLOCK)
184 panic("PTE %p (%llx) is not a block. Some driver code wants to "
185 "modify dcache settings for an range not covered in "
186 "mem_map.", pte, old_pte);
187
188 new_table = create_table();
189 debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table);
190
191 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
192 new_table[i] = old_pte | (i << levelshift);
193
194 /* Level 3 block PTEs have the table type */
195 if ((level + 1) == 3)
196 new_table[i] |= PTE_TYPE_TABLE;
197
198 debug("Setting new_table[%lld] = %llx\n", i, new_table[i]);
199 }
200
201 /* Set the new table into effect */
202 set_pte_table(pte, new_table);
203}
204
Alexander Grafe317fe82016-03-04 01:09:47 +0100205/* Add one mm_region map entry to the page tables */
206static void add_map(struct mm_region *map)
207{
208 u64 *pte;
York Sunc7104e52016-06-24 16:46:22 -0700209 u64 virt = map->virt;
210 u64 phys = map->phys;
Alexander Grafe317fe82016-03-04 01:09:47 +0100211 u64 size = map->size;
212 u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
213 u64 blocksize;
214 int level;
215 u64 *new_table;
216
217 while (size) {
York Sunc7104e52016-06-24 16:46:22 -0700218 pte = find_pte(virt, 0);
Alexander Grafe317fe82016-03-04 01:09:47 +0100219 if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) {
York Sunc7104e52016-06-24 16:46:22 -0700220 debug("Creating table for virt 0x%llx\n", virt);
Alexander Grafe317fe82016-03-04 01:09:47 +0100221 new_table = create_table();
222 set_pte_table(pte, new_table);
223 }
224
225 for (level = 1; level < 4; level++) {
York Sunc7104e52016-06-24 16:46:22 -0700226 pte = find_pte(virt, level);
York Sunf44afe72016-06-24 16:46:21 -0700227 if (!pte)
228 panic("pte not found\n");
York Sunc7104e52016-06-24 16:46:22 -0700229
Alexander Grafe317fe82016-03-04 01:09:47 +0100230 blocksize = 1ULL << level2shift(level);
York Sunc7104e52016-06-24 16:46:22 -0700231 debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n",
232 virt, size, blocksize);
233 if (size >= blocksize && !(virt & (blocksize - 1))) {
Alexander Grafe317fe82016-03-04 01:09:47 +0100234 /* Page fits, create block PTE */
York Sunc7104e52016-06-24 16:46:22 -0700235 debug("Setting PTE %p to block virt=%llx\n",
236 pte, virt);
Peng Fane0e98712017-11-28 10:31:28 +0800237 if (level == 3)
238 *pte = phys | attrs | PTE_TYPE_PAGE;
239 else
240 *pte = phys | attrs;
York Sunc7104e52016-06-24 16:46:22 -0700241 virt += blocksize;
242 phys += blocksize;
Alexander Grafe317fe82016-03-04 01:09:47 +0100243 size -= blocksize;
244 break;
York Sunf44afe72016-06-24 16:46:21 -0700245 } else if (pte_type(pte) == PTE_TYPE_FAULT) {
Alexander Grafe317fe82016-03-04 01:09:47 +0100246 /* Page doesn't fit, create subpages */
York Sunc7104e52016-06-24 16:46:22 -0700247 debug("Creating subtable for virt 0x%llx blksize=%llx\n",
248 virt, blocksize);
Alexander Grafe317fe82016-03-04 01:09:47 +0100249 new_table = create_table();
250 set_pte_table(pte, new_table);
York Sunf44afe72016-06-24 16:46:21 -0700251 } else if (pte_type(pte) == PTE_TYPE_BLOCK) {
York Sunc7104e52016-06-24 16:46:22 -0700252 debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n",
253 virt, blocksize);
York Sunf44afe72016-06-24 16:46:21 -0700254 split_block(pte, level);
Alexander Grafe317fe82016-03-04 01:09:47 +0100255 }
256 }
257 }
258}
259
Alexander Grafe317fe82016-03-04 01:09:47 +0100260enum pte_type {
261 PTE_INVAL,
262 PTE_BLOCK,
263 PTE_LEVEL,
264};
265
266/*
267 * This is a recursively called function to count the number of
268 * page tables we need to cover a particular PTE range. If you
269 * call this with level = -1 you basically get the full 48 bit
270 * coverage.
271 */
272static int count_required_pts(u64 addr, int level, u64 maxaddr)
273{
274 int levelshift = level2shift(level);
275 u64 levelsize = 1ULL << levelshift;
276 u64 levelmask = levelsize - 1;
277 u64 levelend = addr + levelsize;
278 int r = 0;
279 int i;
280 enum pte_type pte_type = PTE_INVAL;
281
Alexander Graf6b3e7ca2016-03-04 01:09:48 +0100282 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) {
Alexander Grafe317fe82016-03-04 01:09:47 +0100283 struct mm_region *map = &mem_map[i];
York Sunc7104e52016-06-24 16:46:22 -0700284 u64 start = map->virt;
Alexander Grafe317fe82016-03-04 01:09:47 +0100285 u64 end = start + map->size;
286
287 /* Check if the PTE would overlap with the map */
288 if (max(addr, start) <= min(levelend, end)) {
289 start = max(addr, start);
290 end = min(levelend, end);
291
292 /* We need a sub-pt for this level */
293 if ((start & levelmask) || (end & levelmask)) {
294 pte_type = PTE_LEVEL;
295 break;
296 }
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700297
Alexander Grafe317fe82016-03-04 01:09:47 +0100298 /* Lv0 can not do block PTEs, so do levels here too */
299 if (level <= 0) {
300 pte_type = PTE_LEVEL;
301 break;
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700302 }
303
Alexander Grafe317fe82016-03-04 01:09:47 +0100304 /* PTE is active, but fits into a block */
305 pte_type = PTE_BLOCK;
306 }
307 }
308
309 /*
310 * Block PTEs at this level are already covered by the parent page
311 * table, so we only need to count sub page tables.
312 */
313 if (pte_type == PTE_LEVEL) {
314 int sublevel = level + 1;
315 u64 sublevelsize = 1ULL << level2shift(sublevel);
316
317 /* Account for the new sub page table ... */
318 r = 1;
319
320 /* ... and for all child page tables that one might have */
321 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
322 r += count_required_pts(addr, sublevel, maxaddr);
323 addr += sublevelsize;
324
325 if (addr >= maxaddr) {
326 /*
327 * We reached the end of address space, no need
328 * to look any further.
329 */
330 break;
331 }
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700332 }
333 }
Alexander Grafe317fe82016-03-04 01:09:47 +0100334
335 return r;
336}
337
338/* Returns the estimated required size of all page tables */
Alexander Grafbc78b922016-03-21 20:26:12 +0100339__weak u64 get_page_table_size(void)
Alexander Grafe317fe82016-03-04 01:09:47 +0100340{
341 u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
342 u64 size = 0;
343 u64 va_bits;
344 int start_level = 0;
345
346 get_tcr(0, NULL, &va_bits);
347 if (va_bits < 39)
348 start_level = 1;
349
350 /* Account for all page tables we would need to cover our memory map */
351 size = one_pt * count_required_pts(0, start_level - 1, 1ULL << va_bits);
352
353 /*
354 * We need to duplicate our page table once to have an emergency pt to
355 * resort to when splitting page tables later on
356 */
357 size *= 2;
358
359 /*
360 * We may need to split page tables later on if dcache settings change,
361 * so reserve up to 4 (random pick) page tables for that.
362 */
363 size += one_pt * 4;
364
365 return size;
366}
367
York Suna81fcd12016-06-24 16:46:20 -0700368void setup_pgtables(void)
Alexander Grafe317fe82016-03-04 01:09:47 +0100369{
370 int i;
371
York Suna81fcd12016-06-24 16:46:20 -0700372 if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr)
373 panic("Page table pointer not setup.");
374
Alexander Grafe317fe82016-03-04 01:09:47 +0100375 /*
376 * Allocate the first level we're on with invalidate entries.
377 * If the starting level is 0 (va_bits >= 39), then this is our
378 * Lv0 page table, otherwise it's the entry Lv1 page table.
379 */
380 create_table();
381
382 /* Now add all MMU table entries one after another to the table */
Alexander Graf6b3e7ca2016-03-04 01:09:48 +0100383 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
Alexander Grafe317fe82016-03-04 01:09:47 +0100384 add_map(&mem_map[i]);
Alexander Grafe317fe82016-03-04 01:09:47 +0100385}
386
387static void setup_all_pgtables(void)
388{
389 u64 tlb_addr = gd->arch.tlb_addr;
Alexander Graffa3754e2016-07-30 23:13:03 +0200390 u64 tlb_size = gd->arch.tlb_size;
Alexander Grafe317fe82016-03-04 01:09:47 +0100391
392 /* Reset the fill ptr */
393 gd->arch.tlb_fillptr = tlb_addr;
394
395 /* Create normal system page tables */
396 setup_pgtables();
397
398 /* Create emergency page tables */
Alexander Graffa3754e2016-07-30 23:13:03 +0200399 gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr -
400 (uintptr_t)gd->arch.tlb_addr;
Alexander Grafe317fe82016-03-04 01:09:47 +0100401 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
402 setup_pgtables();
403 gd->arch.tlb_emerg = gd->arch.tlb_addr;
404 gd->arch.tlb_addr = tlb_addr;
Alexander Graffa3754e2016-07-30 23:13:03 +0200405 gd->arch.tlb_size = tlb_size;
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700406}
407
David Feng85fd5f12013-12-14 11:47:35 +0800408/* to activate the MMU we need to set up virtual memory */
Stephen Warren7333c6a2015-10-05 12:09:00 -0600409__weak void mmu_setup(void)
David Feng85fd5f12013-12-14 11:47:35 +0800410{
Thierry Reding59c364d2015-07-22 17:10:11 -0600411 int el;
David Feng85fd5f12013-12-14 11:47:35 +0800412
Alexander Grafe317fe82016-03-04 01:09:47 +0100413 /* Set up page tables only once */
414 if (!gd->arch.tlb_fillptr)
415 setup_all_pgtables();
Alexander Graffb74cc12016-03-04 01:09:45 +0100416
417 el = current_el();
418 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
419 MEMORY_ATTRIBUTES);
Alexander Graffb74cc12016-03-04 01:09:45 +0100420
David Feng85fd5f12013-12-14 11:47:35 +0800421 /* enable the mmu */
422 set_sctlr(get_sctlr() | CR_M);
423}
424
425/*
426 * Performs a invalidation of the entire data cache at all levels
427 */
428void invalidate_dcache_all(void)
429{
York Sunef042012014-02-26 13:26:04 -0800430 __asm_invalidate_dcache_all();
Stephen Warrenddb0f632016-10-19 15:18:46 -0600431 __asm_invalidate_l3_dcache();
David Feng85fd5f12013-12-14 11:47:35 +0800432}
433
434/*
York Sun1ce575f2015-01-06 13:18:42 -0800435 * Performs a clean & invalidation of the entire data cache at all levels.
436 * This function needs to be inline to avoid using stack.
Stephen Warrenddb0f632016-10-19 15:18:46 -0600437 * __asm_flush_l3_dcache return status of timeout
David Feng85fd5f12013-12-14 11:47:35 +0800438 */
York Sun1ce575f2015-01-06 13:18:42 -0800439inline void flush_dcache_all(void)
David Feng85fd5f12013-12-14 11:47:35 +0800440{
York Sun1ce575f2015-01-06 13:18:42 -0800441 int ret;
442
David Feng85fd5f12013-12-14 11:47:35 +0800443 __asm_flush_dcache_all();
Stephen Warrenddb0f632016-10-19 15:18:46 -0600444 ret = __asm_flush_l3_dcache();
York Sun1ce575f2015-01-06 13:18:42 -0800445 if (ret)
446 debug("flushing dcache returns 0x%x\n", ret);
447 else
448 debug("flushing dcache successfully.\n");
David Feng85fd5f12013-12-14 11:47:35 +0800449}
450
Vignesh Raghavendra384c1412019-04-22 21:43:32 +0530451#ifndef CONFIG_SYS_DISABLE_DCACHE_OPS
David Feng85fd5f12013-12-14 11:47:35 +0800452/*
453 * Invalidates range in all levels of D-cache/unified cache
454 */
455void invalidate_dcache_range(unsigned long start, unsigned long stop)
456{
Simon Glass4415c3b2017-04-05 17:53:18 -0600457 __asm_invalidate_dcache_range(start, stop);
David Feng85fd5f12013-12-14 11:47:35 +0800458}
459
460/*
461 * Flush range(clean & invalidate) from all levels of D-cache/unified cache
462 */
463void flush_dcache_range(unsigned long start, unsigned long stop)
464{
465 __asm_flush_dcache_range(start, stop);
466}
Vignesh Raghavendra384c1412019-04-22 21:43:32 +0530467#else
468void invalidate_dcache_range(unsigned long start, unsigned long stop)
469{
470}
471
472void flush_dcache_range(unsigned long start, unsigned long stop)
473{
474}
475#endif /* CONFIG_SYS_DISABLE_DCACHE_OPS */
David Feng85fd5f12013-12-14 11:47:35 +0800476
477void dcache_enable(void)
478{
479 /* The data cache is not active unless the mmu is enabled */
480 if (!(get_sctlr() & CR_M)) {
481 invalidate_dcache_all();
482 __asm_invalidate_tlb_all();
483 mmu_setup();
484 }
485
486 set_sctlr(get_sctlr() | CR_C);
487}
488
489void dcache_disable(void)
490{
491 uint32_t sctlr;
492
493 sctlr = get_sctlr();
494
495 /* if cache isn't enabled no need to disable */
496 if (!(sctlr & CR_C))
497 return;
498
499 set_sctlr(sctlr & ~(CR_C|CR_M));
500
501 flush_dcache_all();
502 __asm_invalidate_tlb_all();
503}
504
505int dcache_status(void)
506{
507 return (get_sctlr() & CR_C) != 0;
508}
509
Siva Durga Prasad Paladuguba2432a2015-06-26 18:05:07 +0530510u64 *__weak arch_get_page_table(void) {
511 puts("No page table offset defined\n");
512
513 return NULL;
514}
515
Alexander Grafe317fe82016-03-04 01:09:47 +0100516static bool is_aligned(u64 addr, u64 size, u64 align)
517{
518 return !(addr & (align - 1)) && !(size & (align - 1));
519}
520
York Sun5bb14e02017-03-06 09:02:33 -0800521/* Use flag to indicate if attrs has more than d-cache attributes */
522static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level)
Alexander Grafe317fe82016-03-04 01:09:47 +0100523{
524 int levelshift = level2shift(level);
525 u64 levelsize = 1ULL << levelshift;
526 u64 *pte = find_pte(start, level);
527
528 /* Can we can just modify the current level block PTE? */
529 if (is_aligned(start, size, levelsize)) {
York Sun5bb14e02017-03-06 09:02:33 -0800530 if (flag) {
531 *pte &= ~PMD_ATTRMASK;
532 *pte |= attrs & PMD_ATTRMASK;
533 } else {
534 *pte &= ~PMD_ATTRINDX_MASK;
535 *pte |= attrs & PMD_ATTRINDX_MASK;
536 }
Alexander Grafe317fe82016-03-04 01:09:47 +0100537 debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level);
538
539 return levelsize;
540 }
541
542 /* Unaligned or doesn't fit, maybe split block into table */
543 debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte);
544
545 /* Maybe we need to split the block into a table */
546 if (pte_type(pte) == PTE_TYPE_BLOCK)
547 split_block(pte, level);
548
549 /* And then double-check it became a table or already is one */
550 if (pte_type(pte) != PTE_TYPE_TABLE)
551 panic("PTE %p (%llx) for addr=%llx should be a table",
552 pte, *pte, start);
553
554 /* Roll on to the next page table level */
555 return 0;
556}
557
558void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
559 enum dcache_option option)
560{
Peng Fan41bad3e2020-05-11 16:41:07 +0800561 u64 attrs = PMD_ATTRINDX(option >> 2);
Alexander Grafe317fe82016-03-04 01:09:47 +0100562 u64 real_start = start;
563 u64 real_size = size;
564
565 debug("start=%lx size=%lx\n", (ulong)start, (ulong)size);
566
York Suna81fcd12016-06-24 16:46:20 -0700567 if (!gd->arch.tlb_emerg)
568 panic("Emergency page table not setup.");
569
Alexander Grafe317fe82016-03-04 01:09:47 +0100570 /*
571 * We can not modify page tables that we're currently running on,
572 * so we first need to switch to the "emergency" page tables where
573 * we can safely modify our primary page tables and then switch back
574 */
575 __asm_switch_ttbr(gd->arch.tlb_emerg);
576
577 /*
578 * Loop through the address range until we find a page granule that fits
579 * our alignment constraints, then set it to the new cache attributes
580 */
581 while (size > 0) {
582 int level;
583 u64 r;
584
585 for (level = 1; level < 4; level++) {
York Sun5bb14e02017-03-06 09:02:33 -0800586 /* Set d-cache attributes only */
587 r = set_one_region(start, size, attrs, false, level);
Alexander Grafe317fe82016-03-04 01:09:47 +0100588 if (r) {
589 /* PTE successfully replaced */
590 size -= r;
591 start += r;
592 break;
593 }
594 }
595
596 }
597
598 /* We're done modifying page tables, switch back to our primary ones */
599 __asm_switch_ttbr(gd->arch.tlb_addr);
600
601 /*
602 * Make sure there's nothing stale in dcache for a region that might
603 * have caches off now
604 */
605 flush_dcache_range(real_start, real_start + real_size);
606}
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700607
York Sun5bb14e02017-03-06 09:02:33 -0800608/*
609 * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits.
610 * The procecess is break-before-make. The target region will be marked as
611 * invalid during the process of changing.
612 */
613void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs)
614{
615 int level;
616 u64 r, size, start;
617
618 start = addr;
619 size = siz;
620 /*
621 * Loop through the address range until we find a page granule that fits
622 * our alignment constraints, then set it to "invalid".
623 */
624 while (size > 0) {
625 for (level = 1; level < 4; level++) {
626 /* Set PTE to fault */
627 r = set_one_region(start, size, PTE_TYPE_FAULT, true,
628 level);
629 if (r) {
630 /* PTE successfully invalidated */
631 size -= r;
632 start += r;
633 break;
634 }
635 }
636 }
637
638 flush_dcache_range(gd->arch.tlb_addr,
639 gd->arch.tlb_addr + gd->arch.tlb_size);
640 __asm_invalidate_tlb_all();
641
642 /*
643 * Loop through the address range until we find a page granule that fits
644 * our alignment constraints, then set it to the new cache attributes
645 */
646 start = addr;
647 size = siz;
648 while (size > 0) {
649 for (level = 1; level < 4; level++) {
650 /* Set PTE to new attributes */
651 r = set_one_region(start, size, attrs, true, level);
652 if (r) {
653 /* PTE successfully updated */
654 size -= r;
655 start += r;
656 break;
657 }
658 }
659 }
660 flush_dcache_range(gd->arch.tlb_addr,
661 gd->arch.tlb_addr + gd->arch.tlb_size);
662 __asm_invalidate_tlb_all();
663}
664
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400665#else /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +0800666
Alexander Grafbc40da92016-03-04 01:09:55 +0100667/*
668 * For SPL builds, we may want to not have dcache enabled. Any real U-Boot
669 * running however really wants to have dcache and the MMU active. Check that
670 * everything is sane and give the developer a hint if it isn't.
671 */
672#ifndef CONFIG_SPL_BUILD
673#error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache.
674#endif
675
David Feng85fd5f12013-12-14 11:47:35 +0800676void invalidate_dcache_all(void)
677{
678}
679
680void flush_dcache_all(void)
681{
682}
683
David Feng85fd5f12013-12-14 11:47:35 +0800684void dcache_enable(void)
685{
686}
687
688void dcache_disable(void)
689{
690}
691
692int dcache_status(void)
693{
694 return 0;
695}
696
Siva Durga Prasad Paladuguba2432a2015-06-26 18:05:07 +0530697void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
698 enum dcache_option option)
699{
700}
701
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400702#endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +0800703
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400704#if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF)
David Feng85fd5f12013-12-14 11:47:35 +0800705
706void icache_enable(void)
707{
Stephen Warrenddb0f632016-10-19 15:18:46 -0600708 invalidate_icache_all();
David Feng85fd5f12013-12-14 11:47:35 +0800709 set_sctlr(get_sctlr() | CR_I);
710}
711
712void icache_disable(void)
713{
714 set_sctlr(get_sctlr() & ~CR_I);
715}
716
717int icache_status(void)
718{
719 return (get_sctlr() & CR_I) != 0;
720}
721
Patrice Chotardee435c62021-07-19 11:21:51 +0200722int mmu_status(void)
723{
724 return (get_sctlr() & CR_M) != 0;
725}
726
David Feng85fd5f12013-12-14 11:47:35 +0800727void invalidate_icache_all(void)
728{
729 __asm_invalidate_icache_all();
Stephen Warrenddb0f632016-10-19 15:18:46 -0600730 __asm_invalidate_l3_icache();
David Feng85fd5f12013-12-14 11:47:35 +0800731}
732
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400733#else /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +0800734
735void icache_enable(void)
736{
737}
738
739void icache_disable(void)
740{
741}
742
743int icache_status(void)
744{
745 return 0;
746}
747
Patrice Chotardee435c62021-07-19 11:21:51 +0200748int mmu_status(void)
749{
750 return 0;
751}
752
David Feng85fd5f12013-12-14 11:47:35 +0800753void invalidate_icache_all(void)
754{
755}
756
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400757#endif /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +0800758
759/*
760 * Enable dCache & iCache, whether cache is actually enabled
761 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
762 */
York Suna84cd722014-06-23 15:15:54 -0700763void __weak enable_caches(void)
David Feng85fd5f12013-12-14 11:47:35 +0800764{
765 icache_enable();
766 dcache_enable();
767}