blob: 7c31d98a6f03157af1d72f28fe9c0162cdb7d02f [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
David Feng85fd5f12013-12-14 11:47:35 +08002/*
3 * (C) Copyright 2013
4 * David Feng <fenghua@phytium.com.cn>
5 *
Alexander Grafe317fe82016-03-04 01:09:47 +01006 * (C) Copyright 2016
7 * Alexander Graf <agraf@suse.de>
David Feng85fd5f12013-12-14 11:47:35 +08008 */
9
10#include <common.h>
Simon Glass1d91ba72019-11-14 12:57:37 -070011#include <cpu_func.h>
Simon Glassf11478f2019-12-28 10:45:07 -070012#include <hang.h>
Simon Glass0f2af882020-05-10 11:40:05 -060013#include <log.h>
Simon Glass274e0b02020-05-10 11:39:56 -060014#include <asm/cache.h>
David Feng85fd5f12013-12-14 11:47:35 +080015#include <asm/system.h>
16#include <asm/armv8/mmu.h>
17
18DECLARE_GLOBAL_DATA_PTR;
19
Trevor Woerner43ec7e02019-05-03 09:41:00 -040020#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -070021
Alexander Grafe317fe82016-03-04 01:09:47 +010022/*
23 * With 4k page granule, a virtual address is split into 4 lookup parts
24 * spanning 9 bits each:
25 *
26 * _______________________________________________
27 * | | | | | | |
28 * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off |
29 * |_______|_______|_______|_______|_______|_______|
30 * 63-48 47-39 38-30 29-21 20-12 11-00
31 *
32 * mask page size
33 *
34 * Lv0: FF8000000000 --
35 * Lv1: 7FC0000000 1G
36 * Lv2: 3FE00000 2M
37 * Lv3: 1FF000 4K
38 * off: FFF
39 */
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -070040
York Suna81fcd12016-06-24 16:46:20 -070041u64 get_tcr(int el, u64 *pips, u64 *pva_bits)
Alexander Graffb74cc12016-03-04 01:09:45 +010042{
43 u64 max_addr = 0;
44 u64 ips, va_bits;
45 u64 tcr;
46 int i;
47
48 /* Find the largest address we need to support */
Alexander Graf6b3e7ca2016-03-04 01:09:48 +010049 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
York Sunc7104e52016-06-24 16:46:22 -070050 max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size);
Alexander Graffb74cc12016-03-04 01:09:45 +010051
52 /* Calculate the maximum physical (and thus virtual) address */
53 if (max_addr > (1ULL << 44)) {
54 ips = 5;
55 va_bits = 48;
56 } else if (max_addr > (1ULL << 42)) {
57 ips = 4;
58 va_bits = 44;
59 } else if (max_addr > (1ULL << 40)) {
60 ips = 3;
61 va_bits = 42;
62 } else if (max_addr > (1ULL << 36)) {
63 ips = 2;
64 va_bits = 40;
65 } else if (max_addr > (1ULL << 32)) {
66 ips = 1;
67 va_bits = 36;
68 } else {
69 ips = 0;
70 va_bits = 32;
71 }
72
73 if (el == 1) {
Alexander Graff03c0e42016-03-04 01:09:46 +010074 tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE;
Alexander Graffb74cc12016-03-04 01:09:45 +010075 } else if (el == 2) {
76 tcr = TCR_EL2_RSVD | (ips << 16);
77 } else {
78 tcr = TCR_EL3_RSVD | (ips << 16);
79 }
80
81 /* PTWs cacheable, inner/outer WBWA and inner shareable */
Alexander Grafe317fe82016-03-04 01:09:47 +010082 tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
83 tcr |= TCR_T0SZ(va_bits);
Alexander Graffb74cc12016-03-04 01:09:45 +010084
85 if (pips)
86 *pips = ips;
87 if (pva_bits)
88 *pva_bits = va_bits;
89
90 return tcr;
91}
92
Alexander Grafe317fe82016-03-04 01:09:47 +010093#define MAX_PTE_ENTRIES 512
94
95static int pte_type(u64 *pte)
96{
97 return *pte & PTE_TYPE_MASK;
98}
99
100/* Returns the LSB number for a PTE on level <level> */
101static int level2shift(int level)
102{
103 /* Page is 12 bits wide, every level translates 9 bits */
104 return (12 + 9 * (3 - level));
105}
106
107static u64 *find_pte(u64 addr, int level)
108{
109 int start_level = 0;
110 u64 *pte;
111 u64 idx;
112 u64 va_bits;
113 int i;
114
115 debug("addr=%llx level=%d\n", addr, level);
116
117 get_tcr(0, NULL, &va_bits);
118 if (va_bits < 39)
119 start_level = 1;
120
121 if (level < start_level)
122 return NULL;
123
124 /* Walk through all page table levels to find our PTE */
125 pte = (u64*)gd->arch.tlb_addr;
126 for (i = start_level; i < 4; i++) {
127 idx = (addr >> level2shift(i)) & 0x1FF;
128 pte += idx;
129 debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte);
130
131 /* Found it */
132 if (i == level)
133 return pte;
134 /* PTE is no table (either invalid or block), can't traverse */
135 if (pte_type(pte) != PTE_TYPE_TABLE)
136 return NULL;
137 /* Off to the next level */
138 pte = (u64*)(*pte & 0x0000fffffffff000ULL);
139 }
140
141 /* Should never reach here */
142 return NULL;
143}
144
145/* Returns and creates a new full table (512 entries) */
146static u64 *create_table(void)
147{
148 u64 *new_table = (u64*)gd->arch.tlb_fillptr;
149 u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64);
150
151 /* Allocate MAX_PTE_ENTRIES pte entries */
152 gd->arch.tlb_fillptr += pt_len;
153
154 if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size)
155 panic("Insufficient RAM for page table: 0x%lx > 0x%lx. "
156 "Please increase the size in get_page_table_size()",
157 gd->arch.tlb_fillptr - gd->arch.tlb_addr,
158 gd->arch.tlb_size);
159
160 /* Mark all entries as invalid */
161 memset(new_table, 0, pt_len);
162
163 return new_table;
164}
165
166static void set_pte_table(u64 *pte, u64 *table)
167{
168 /* Point *pte to the new table */
169 debug("Setting %p to addr=%p\n", pte, table);
170 *pte = PTE_TYPE_TABLE | (ulong)table;
171}
172
York Sunf44afe72016-06-24 16:46:21 -0700173/* Splits a block PTE into table with subpages spanning the old block */
174static void split_block(u64 *pte, int level)
175{
176 u64 old_pte = *pte;
177 u64 *new_table;
178 u64 i = 0;
179 /* level describes the parent level, we need the child ones */
180 int levelshift = level2shift(level + 1);
181
182 if (pte_type(pte) != PTE_TYPE_BLOCK)
183 panic("PTE %p (%llx) is not a block. Some driver code wants to "
184 "modify dcache settings for an range not covered in "
185 "mem_map.", pte, old_pte);
186
187 new_table = create_table();
188 debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table);
189
190 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
191 new_table[i] = old_pte | (i << levelshift);
192
193 /* Level 3 block PTEs have the table type */
194 if ((level + 1) == 3)
195 new_table[i] |= PTE_TYPE_TABLE;
196
197 debug("Setting new_table[%lld] = %llx\n", i, new_table[i]);
198 }
199
200 /* Set the new table into effect */
201 set_pte_table(pte, new_table);
202}
203
Alexander Grafe317fe82016-03-04 01:09:47 +0100204/* Add one mm_region map entry to the page tables */
205static void add_map(struct mm_region *map)
206{
207 u64 *pte;
York Sunc7104e52016-06-24 16:46:22 -0700208 u64 virt = map->virt;
209 u64 phys = map->phys;
Alexander Grafe317fe82016-03-04 01:09:47 +0100210 u64 size = map->size;
211 u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
212 u64 blocksize;
213 int level;
214 u64 *new_table;
215
216 while (size) {
York Sunc7104e52016-06-24 16:46:22 -0700217 pte = find_pte(virt, 0);
Alexander Grafe317fe82016-03-04 01:09:47 +0100218 if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) {
York Sunc7104e52016-06-24 16:46:22 -0700219 debug("Creating table for virt 0x%llx\n", virt);
Alexander Grafe317fe82016-03-04 01:09:47 +0100220 new_table = create_table();
221 set_pte_table(pte, new_table);
222 }
223
224 for (level = 1; level < 4; level++) {
York Sunc7104e52016-06-24 16:46:22 -0700225 pte = find_pte(virt, level);
York Sunf44afe72016-06-24 16:46:21 -0700226 if (!pte)
227 panic("pte not found\n");
York Sunc7104e52016-06-24 16:46:22 -0700228
Alexander Grafe317fe82016-03-04 01:09:47 +0100229 blocksize = 1ULL << level2shift(level);
York Sunc7104e52016-06-24 16:46:22 -0700230 debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n",
231 virt, size, blocksize);
232 if (size >= blocksize && !(virt & (blocksize - 1))) {
Alexander Grafe317fe82016-03-04 01:09:47 +0100233 /* Page fits, create block PTE */
York Sunc7104e52016-06-24 16:46:22 -0700234 debug("Setting PTE %p to block virt=%llx\n",
235 pte, virt);
Peng Fane0e98712017-11-28 10:31:28 +0800236 if (level == 3)
237 *pte = phys | attrs | PTE_TYPE_PAGE;
238 else
239 *pte = phys | attrs;
York Sunc7104e52016-06-24 16:46:22 -0700240 virt += blocksize;
241 phys += blocksize;
Alexander Grafe317fe82016-03-04 01:09:47 +0100242 size -= blocksize;
243 break;
York Sunf44afe72016-06-24 16:46:21 -0700244 } else if (pte_type(pte) == PTE_TYPE_FAULT) {
Alexander Grafe317fe82016-03-04 01:09:47 +0100245 /* Page doesn't fit, create subpages */
York Sunc7104e52016-06-24 16:46:22 -0700246 debug("Creating subtable for virt 0x%llx blksize=%llx\n",
247 virt, blocksize);
Alexander Grafe317fe82016-03-04 01:09:47 +0100248 new_table = create_table();
249 set_pte_table(pte, new_table);
York Sunf44afe72016-06-24 16:46:21 -0700250 } else if (pte_type(pte) == PTE_TYPE_BLOCK) {
York Sunc7104e52016-06-24 16:46:22 -0700251 debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n",
252 virt, blocksize);
York Sunf44afe72016-06-24 16:46:21 -0700253 split_block(pte, level);
Alexander Grafe317fe82016-03-04 01:09:47 +0100254 }
255 }
256 }
257}
258
Alexander Grafe317fe82016-03-04 01:09:47 +0100259enum pte_type {
260 PTE_INVAL,
261 PTE_BLOCK,
262 PTE_LEVEL,
263};
264
265/*
266 * This is a recursively called function to count the number of
267 * page tables we need to cover a particular PTE range. If you
268 * call this with level = -1 you basically get the full 48 bit
269 * coverage.
270 */
271static int count_required_pts(u64 addr, int level, u64 maxaddr)
272{
273 int levelshift = level2shift(level);
274 u64 levelsize = 1ULL << levelshift;
275 u64 levelmask = levelsize - 1;
276 u64 levelend = addr + levelsize;
277 int r = 0;
278 int i;
279 enum pte_type pte_type = PTE_INVAL;
280
Alexander Graf6b3e7ca2016-03-04 01:09:48 +0100281 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) {
Alexander Grafe317fe82016-03-04 01:09:47 +0100282 struct mm_region *map = &mem_map[i];
York Sunc7104e52016-06-24 16:46:22 -0700283 u64 start = map->virt;
Alexander Grafe317fe82016-03-04 01:09:47 +0100284 u64 end = start + map->size;
285
286 /* Check if the PTE would overlap with the map */
287 if (max(addr, start) <= min(levelend, end)) {
288 start = max(addr, start);
289 end = min(levelend, end);
290
291 /* We need a sub-pt for this level */
292 if ((start & levelmask) || (end & levelmask)) {
293 pte_type = PTE_LEVEL;
294 break;
295 }
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700296
Alexander Grafe317fe82016-03-04 01:09:47 +0100297 /* Lv0 can not do block PTEs, so do levels here too */
298 if (level <= 0) {
299 pte_type = PTE_LEVEL;
300 break;
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700301 }
302
Alexander Grafe317fe82016-03-04 01:09:47 +0100303 /* PTE is active, but fits into a block */
304 pte_type = PTE_BLOCK;
305 }
306 }
307
308 /*
309 * Block PTEs at this level are already covered by the parent page
310 * table, so we only need to count sub page tables.
311 */
312 if (pte_type == PTE_LEVEL) {
313 int sublevel = level + 1;
314 u64 sublevelsize = 1ULL << level2shift(sublevel);
315
316 /* Account for the new sub page table ... */
317 r = 1;
318
319 /* ... and for all child page tables that one might have */
320 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
321 r += count_required_pts(addr, sublevel, maxaddr);
322 addr += sublevelsize;
323
324 if (addr >= maxaddr) {
325 /*
326 * We reached the end of address space, no need
327 * to look any further.
328 */
329 break;
330 }
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700331 }
332 }
Alexander Grafe317fe82016-03-04 01:09:47 +0100333
334 return r;
335}
336
337/* Returns the estimated required size of all page tables */
Alexander Grafbc78b922016-03-21 20:26:12 +0100338__weak u64 get_page_table_size(void)
Alexander Grafe317fe82016-03-04 01:09:47 +0100339{
340 u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
341 u64 size = 0;
342 u64 va_bits;
343 int start_level = 0;
344
345 get_tcr(0, NULL, &va_bits);
346 if (va_bits < 39)
347 start_level = 1;
348
349 /* Account for all page tables we would need to cover our memory map */
350 size = one_pt * count_required_pts(0, start_level - 1, 1ULL << va_bits);
351
352 /*
353 * We need to duplicate our page table once to have an emergency pt to
354 * resort to when splitting page tables later on
355 */
356 size *= 2;
357
358 /*
359 * We may need to split page tables later on if dcache settings change,
360 * so reserve up to 4 (random pick) page tables for that.
361 */
362 size += one_pt * 4;
363
364 return size;
365}
366
York Suna81fcd12016-06-24 16:46:20 -0700367void setup_pgtables(void)
Alexander Grafe317fe82016-03-04 01:09:47 +0100368{
369 int i;
370
York Suna81fcd12016-06-24 16:46:20 -0700371 if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr)
372 panic("Page table pointer not setup.");
373
Alexander Grafe317fe82016-03-04 01:09:47 +0100374 /*
375 * Allocate the first level we're on with invalidate entries.
376 * If the starting level is 0 (va_bits >= 39), then this is our
377 * Lv0 page table, otherwise it's the entry Lv1 page table.
378 */
379 create_table();
380
381 /* Now add all MMU table entries one after another to the table */
Alexander Graf6b3e7ca2016-03-04 01:09:48 +0100382 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
Alexander Grafe317fe82016-03-04 01:09:47 +0100383 add_map(&mem_map[i]);
Alexander Grafe317fe82016-03-04 01:09:47 +0100384}
385
386static void setup_all_pgtables(void)
387{
388 u64 tlb_addr = gd->arch.tlb_addr;
Alexander Graffa3754e2016-07-30 23:13:03 +0200389 u64 tlb_size = gd->arch.tlb_size;
Alexander Grafe317fe82016-03-04 01:09:47 +0100390
391 /* Reset the fill ptr */
392 gd->arch.tlb_fillptr = tlb_addr;
393
394 /* Create normal system page tables */
395 setup_pgtables();
396
397 /* Create emergency page tables */
Alexander Graffa3754e2016-07-30 23:13:03 +0200398 gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr -
399 (uintptr_t)gd->arch.tlb_addr;
Alexander Grafe317fe82016-03-04 01:09:47 +0100400 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
401 setup_pgtables();
402 gd->arch.tlb_emerg = gd->arch.tlb_addr;
403 gd->arch.tlb_addr = tlb_addr;
Alexander Graffa3754e2016-07-30 23:13:03 +0200404 gd->arch.tlb_size = tlb_size;
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700405}
406
David Feng85fd5f12013-12-14 11:47:35 +0800407/* to activate the MMU we need to set up virtual memory */
Stephen Warren7333c6a2015-10-05 12:09:00 -0600408__weak void mmu_setup(void)
David Feng85fd5f12013-12-14 11:47:35 +0800409{
Thierry Reding59c364d2015-07-22 17:10:11 -0600410 int el;
David Feng85fd5f12013-12-14 11:47:35 +0800411
Alexander Grafe317fe82016-03-04 01:09:47 +0100412 /* Set up page tables only once */
413 if (!gd->arch.tlb_fillptr)
414 setup_all_pgtables();
Alexander Graffb74cc12016-03-04 01:09:45 +0100415
416 el = current_el();
417 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
418 MEMORY_ATTRIBUTES);
Alexander Graffb74cc12016-03-04 01:09:45 +0100419
David Feng85fd5f12013-12-14 11:47:35 +0800420 /* enable the mmu */
421 set_sctlr(get_sctlr() | CR_M);
422}
423
424/*
425 * Performs a invalidation of the entire data cache at all levels
426 */
427void invalidate_dcache_all(void)
428{
York Sunef042012014-02-26 13:26:04 -0800429 __asm_invalidate_dcache_all();
Stephen Warrenddb0f632016-10-19 15:18:46 -0600430 __asm_invalidate_l3_dcache();
David Feng85fd5f12013-12-14 11:47:35 +0800431}
432
433/*
York Sun1ce575f2015-01-06 13:18:42 -0800434 * Performs a clean & invalidation of the entire data cache at all levels.
435 * This function needs to be inline to avoid using stack.
Stephen Warrenddb0f632016-10-19 15:18:46 -0600436 * __asm_flush_l3_dcache return status of timeout
David Feng85fd5f12013-12-14 11:47:35 +0800437 */
York Sun1ce575f2015-01-06 13:18:42 -0800438inline void flush_dcache_all(void)
David Feng85fd5f12013-12-14 11:47:35 +0800439{
York Sun1ce575f2015-01-06 13:18:42 -0800440 int ret;
441
David Feng85fd5f12013-12-14 11:47:35 +0800442 __asm_flush_dcache_all();
Stephen Warrenddb0f632016-10-19 15:18:46 -0600443 ret = __asm_flush_l3_dcache();
York Sun1ce575f2015-01-06 13:18:42 -0800444 if (ret)
445 debug("flushing dcache returns 0x%x\n", ret);
446 else
447 debug("flushing dcache successfully.\n");
David Feng85fd5f12013-12-14 11:47:35 +0800448}
449
Vignesh Raghavendra384c1412019-04-22 21:43:32 +0530450#ifndef CONFIG_SYS_DISABLE_DCACHE_OPS
David Feng85fd5f12013-12-14 11:47:35 +0800451/*
452 * Invalidates range in all levels of D-cache/unified cache
453 */
454void invalidate_dcache_range(unsigned long start, unsigned long stop)
455{
Simon Glass4415c3b2017-04-05 17:53:18 -0600456 __asm_invalidate_dcache_range(start, stop);
David Feng85fd5f12013-12-14 11:47:35 +0800457}
458
459/*
460 * Flush range(clean & invalidate) from all levels of D-cache/unified cache
461 */
462void flush_dcache_range(unsigned long start, unsigned long stop)
463{
464 __asm_flush_dcache_range(start, stop);
465}
Vignesh Raghavendra384c1412019-04-22 21:43:32 +0530466#else
467void invalidate_dcache_range(unsigned long start, unsigned long stop)
468{
469}
470
471void flush_dcache_range(unsigned long start, unsigned long stop)
472{
473}
474#endif /* CONFIG_SYS_DISABLE_DCACHE_OPS */
David Feng85fd5f12013-12-14 11:47:35 +0800475
476void dcache_enable(void)
477{
478 /* The data cache is not active unless the mmu is enabled */
479 if (!(get_sctlr() & CR_M)) {
480 invalidate_dcache_all();
481 __asm_invalidate_tlb_all();
482 mmu_setup();
483 }
484
485 set_sctlr(get_sctlr() | CR_C);
486}
487
488void dcache_disable(void)
489{
490 uint32_t sctlr;
491
492 sctlr = get_sctlr();
493
494 /* if cache isn't enabled no need to disable */
495 if (!(sctlr & CR_C))
496 return;
497
498 set_sctlr(sctlr & ~(CR_C|CR_M));
499
500 flush_dcache_all();
501 __asm_invalidate_tlb_all();
502}
503
504int dcache_status(void)
505{
506 return (get_sctlr() & CR_C) != 0;
507}
508
Siva Durga Prasad Paladuguba2432a2015-06-26 18:05:07 +0530509u64 *__weak arch_get_page_table(void) {
510 puts("No page table offset defined\n");
511
512 return NULL;
513}
514
Alexander Grafe317fe82016-03-04 01:09:47 +0100515static bool is_aligned(u64 addr, u64 size, u64 align)
516{
517 return !(addr & (align - 1)) && !(size & (align - 1));
518}
519
York Sun5bb14e02017-03-06 09:02:33 -0800520/* Use flag to indicate if attrs has more than d-cache attributes */
521static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level)
Alexander Grafe317fe82016-03-04 01:09:47 +0100522{
523 int levelshift = level2shift(level);
524 u64 levelsize = 1ULL << levelshift;
525 u64 *pte = find_pte(start, level);
526
527 /* Can we can just modify the current level block PTE? */
528 if (is_aligned(start, size, levelsize)) {
York Sun5bb14e02017-03-06 09:02:33 -0800529 if (flag) {
530 *pte &= ~PMD_ATTRMASK;
531 *pte |= attrs & PMD_ATTRMASK;
532 } else {
533 *pte &= ~PMD_ATTRINDX_MASK;
534 *pte |= attrs & PMD_ATTRINDX_MASK;
535 }
Alexander Grafe317fe82016-03-04 01:09:47 +0100536 debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level);
537
538 return levelsize;
539 }
540
541 /* Unaligned or doesn't fit, maybe split block into table */
542 debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte);
543
544 /* Maybe we need to split the block into a table */
545 if (pte_type(pte) == PTE_TYPE_BLOCK)
546 split_block(pte, level);
547
548 /* And then double-check it became a table or already is one */
549 if (pte_type(pte) != PTE_TYPE_TABLE)
550 panic("PTE %p (%llx) for addr=%llx should be a table",
551 pte, *pte, start);
552
553 /* Roll on to the next page table level */
554 return 0;
555}
556
557void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
558 enum dcache_option option)
559{
Peng Fan41bad3e2020-05-11 16:41:07 +0800560 u64 attrs = PMD_ATTRINDX(option >> 2);
Alexander Grafe317fe82016-03-04 01:09:47 +0100561 u64 real_start = start;
562 u64 real_size = size;
563
564 debug("start=%lx size=%lx\n", (ulong)start, (ulong)size);
565
York Suna81fcd12016-06-24 16:46:20 -0700566 if (!gd->arch.tlb_emerg)
567 panic("Emergency page table not setup.");
568
Alexander Grafe317fe82016-03-04 01:09:47 +0100569 /*
570 * We can not modify page tables that we're currently running on,
571 * so we first need to switch to the "emergency" page tables where
572 * we can safely modify our primary page tables and then switch back
573 */
574 __asm_switch_ttbr(gd->arch.tlb_emerg);
575
576 /*
577 * Loop through the address range until we find a page granule that fits
578 * our alignment constraints, then set it to the new cache attributes
579 */
580 while (size > 0) {
581 int level;
582 u64 r;
583
584 for (level = 1; level < 4; level++) {
York Sun5bb14e02017-03-06 09:02:33 -0800585 /* Set d-cache attributes only */
586 r = set_one_region(start, size, attrs, false, level);
Alexander Grafe317fe82016-03-04 01:09:47 +0100587 if (r) {
588 /* PTE successfully replaced */
589 size -= r;
590 start += r;
591 break;
592 }
593 }
594
595 }
596
597 /* We're done modifying page tables, switch back to our primary ones */
598 __asm_switch_ttbr(gd->arch.tlb_addr);
599
600 /*
601 * Make sure there's nothing stale in dcache for a region that might
602 * have caches off now
603 */
604 flush_dcache_range(real_start, real_start + real_size);
605}
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700606
York Sun5bb14e02017-03-06 09:02:33 -0800607/*
608 * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits.
609 * The procecess is break-before-make. The target region will be marked as
610 * invalid during the process of changing.
611 */
612void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs)
613{
614 int level;
615 u64 r, size, start;
616
617 start = addr;
618 size = siz;
619 /*
620 * Loop through the address range until we find a page granule that fits
621 * our alignment constraints, then set it to "invalid".
622 */
623 while (size > 0) {
624 for (level = 1; level < 4; level++) {
625 /* Set PTE to fault */
626 r = set_one_region(start, size, PTE_TYPE_FAULT, true,
627 level);
628 if (r) {
629 /* PTE successfully invalidated */
630 size -= r;
631 start += r;
632 break;
633 }
634 }
635 }
636
637 flush_dcache_range(gd->arch.tlb_addr,
638 gd->arch.tlb_addr + gd->arch.tlb_size);
639 __asm_invalidate_tlb_all();
640
641 /*
642 * Loop through the address range until we find a page granule that fits
643 * our alignment constraints, then set it to the new cache attributes
644 */
645 start = addr;
646 size = siz;
647 while (size > 0) {
648 for (level = 1; level < 4; level++) {
649 /* Set PTE to new attributes */
650 r = set_one_region(start, size, attrs, true, level);
651 if (r) {
652 /* PTE successfully updated */
653 size -= r;
654 start += r;
655 break;
656 }
657 }
658 }
659 flush_dcache_range(gd->arch.tlb_addr,
660 gd->arch.tlb_addr + gd->arch.tlb_size);
661 __asm_invalidate_tlb_all();
662}
663
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400664#else /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +0800665
Alexander Grafbc40da92016-03-04 01:09:55 +0100666/*
667 * For SPL builds, we may want to not have dcache enabled. Any real U-Boot
668 * running however really wants to have dcache and the MMU active. Check that
669 * everything is sane and give the developer a hint if it isn't.
670 */
671#ifndef CONFIG_SPL_BUILD
672#error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache.
673#endif
674
David Feng85fd5f12013-12-14 11:47:35 +0800675void invalidate_dcache_all(void)
676{
677}
678
679void flush_dcache_all(void)
680{
681}
682
David Feng85fd5f12013-12-14 11:47:35 +0800683void dcache_enable(void)
684{
685}
686
687void dcache_disable(void)
688{
689}
690
691int dcache_status(void)
692{
693 return 0;
694}
695
Siva Durga Prasad Paladuguba2432a2015-06-26 18:05:07 +0530696void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
697 enum dcache_option option)
698{
699}
700
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400701#endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +0800702
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400703#if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF)
David Feng85fd5f12013-12-14 11:47:35 +0800704
705void icache_enable(void)
706{
Stephen Warrenddb0f632016-10-19 15:18:46 -0600707 invalidate_icache_all();
David Feng85fd5f12013-12-14 11:47:35 +0800708 set_sctlr(get_sctlr() | CR_I);
709}
710
711void icache_disable(void)
712{
713 set_sctlr(get_sctlr() & ~CR_I);
714}
715
716int icache_status(void)
717{
718 return (get_sctlr() & CR_I) != 0;
719}
720
721void invalidate_icache_all(void)
722{
723 __asm_invalidate_icache_all();
Stephen Warrenddb0f632016-10-19 15:18:46 -0600724 __asm_invalidate_l3_icache();
David Feng85fd5f12013-12-14 11:47:35 +0800725}
726
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400727#else /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +0800728
729void icache_enable(void)
730{
731}
732
733void icache_disable(void)
734{
735}
736
737int icache_status(void)
738{
739 return 0;
740}
741
742void invalidate_icache_all(void)
743{
744}
745
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400746#endif /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
David Feng85fd5f12013-12-14 11:47:35 +0800747
748/*
749 * Enable dCache & iCache, whether cache is actually enabled
750 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
751 */
York Suna84cd722014-06-23 15:15:54 -0700752void __weak enable_caches(void)
David Feng85fd5f12013-12-14 11:47:35 +0800753{
754 icache_enable();
755 dcache_enable();
756}