blob: 55c6f2f259ec3f7a0bff339afc072a9484135948 [file] [log] [blame]
David Feng85fd5f12013-12-14 11:47:35 +08001/*
2 * (C) Copyright 2013
3 * David Feng <fenghua@phytium.com.cn>
4 *
Alexander Grafe317fe82016-03-04 01:09:47 +01005 * (C) Copyright 2016
6 * Alexander Graf <agraf@suse.de>
7 *
David Feng85fd5f12013-12-14 11:47:35 +08008 * SPDX-License-Identifier: GPL-2.0+
9 */
10
11#include <common.h>
12#include <asm/system.h>
13#include <asm/armv8/mmu.h>
14
15DECLARE_GLOBAL_DATA_PTR;
16
17#ifndef CONFIG_SYS_DCACHE_OFF
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -070018
Alexander Grafe317fe82016-03-04 01:09:47 +010019/*
20 * With 4k page granule, a virtual address is split into 4 lookup parts
21 * spanning 9 bits each:
22 *
23 * _______________________________________________
24 * | | | | | | |
25 * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off |
26 * |_______|_______|_______|_______|_______|_______|
27 * 63-48 47-39 38-30 29-21 20-12 11-00
28 *
29 * mask page size
30 *
31 * Lv0: FF8000000000 --
32 * Lv1: 7FC0000000 1G
33 * Lv2: 3FE00000 2M
34 * Lv3: 1FF000 4K
35 * off: FFF
36 */
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -070037
Alexander Grafe317fe82016-03-04 01:09:47 +010038#ifdef CONFIG_SYS_FULL_VA
Alexander Graffb74cc12016-03-04 01:09:45 +010039static u64 get_tcr(int el, u64 *pips, u64 *pva_bits)
40{
41 u64 max_addr = 0;
42 u64 ips, va_bits;
43 u64 tcr;
44 int i;
45
46 /* Find the largest address we need to support */
Alexander Graf6b3e7ca2016-03-04 01:09:48 +010047 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
Alexander Graffb74cc12016-03-04 01:09:45 +010048 max_addr = max(max_addr, mem_map[i].base + mem_map[i].size);
49
50 /* Calculate the maximum physical (and thus virtual) address */
51 if (max_addr > (1ULL << 44)) {
52 ips = 5;
53 va_bits = 48;
54 } else if (max_addr > (1ULL << 42)) {
55 ips = 4;
56 va_bits = 44;
57 } else if (max_addr > (1ULL << 40)) {
58 ips = 3;
59 va_bits = 42;
60 } else if (max_addr > (1ULL << 36)) {
61 ips = 2;
62 va_bits = 40;
63 } else if (max_addr > (1ULL << 32)) {
64 ips = 1;
65 va_bits = 36;
66 } else {
67 ips = 0;
68 va_bits = 32;
69 }
70
71 if (el == 1) {
Alexander Graff03c0e42016-03-04 01:09:46 +010072 tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE;
Alexander Graffb74cc12016-03-04 01:09:45 +010073 } else if (el == 2) {
74 tcr = TCR_EL2_RSVD | (ips << 16);
75 } else {
76 tcr = TCR_EL3_RSVD | (ips << 16);
77 }
78
79 /* PTWs cacheable, inner/outer WBWA and inner shareable */
Alexander Grafe317fe82016-03-04 01:09:47 +010080 tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
81 tcr |= TCR_T0SZ(va_bits);
Alexander Graffb74cc12016-03-04 01:09:45 +010082
83 if (pips)
84 *pips = ips;
85 if (pva_bits)
86 *pva_bits = va_bits;
87
88 return tcr;
89}
90
Alexander Grafe317fe82016-03-04 01:09:47 +010091#define MAX_PTE_ENTRIES 512
92
93static int pte_type(u64 *pte)
94{
95 return *pte & PTE_TYPE_MASK;
96}
97
98/* Returns the LSB number for a PTE on level <level> */
99static int level2shift(int level)
100{
101 /* Page is 12 bits wide, every level translates 9 bits */
102 return (12 + 9 * (3 - level));
103}
104
105static u64 *find_pte(u64 addr, int level)
106{
107 int start_level = 0;
108 u64 *pte;
109 u64 idx;
110 u64 va_bits;
111 int i;
112
113 debug("addr=%llx level=%d\n", addr, level);
114
115 get_tcr(0, NULL, &va_bits);
116 if (va_bits < 39)
117 start_level = 1;
118
119 if (level < start_level)
120 return NULL;
121
122 /* Walk through all page table levels to find our PTE */
123 pte = (u64*)gd->arch.tlb_addr;
124 for (i = start_level; i < 4; i++) {
125 idx = (addr >> level2shift(i)) & 0x1FF;
126 pte += idx;
127 debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte);
128
129 /* Found it */
130 if (i == level)
131 return pte;
132 /* PTE is no table (either invalid or block), can't traverse */
133 if (pte_type(pte) != PTE_TYPE_TABLE)
134 return NULL;
135 /* Off to the next level */
136 pte = (u64*)(*pte & 0x0000fffffffff000ULL);
137 }
138
139 /* Should never reach here */
140 return NULL;
141}
142
143/* Returns and creates a new full table (512 entries) */
144static u64 *create_table(void)
145{
146 u64 *new_table = (u64*)gd->arch.tlb_fillptr;
147 u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64);
148
149 /* Allocate MAX_PTE_ENTRIES pte entries */
150 gd->arch.tlb_fillptr += pt_len;
151
152 if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size)
153 panic("Insufficient RAM for page table: 0x%lx > 0x%lx. "
154 "Please increase the size in get_page_table_size()",
155 gd->arch.tlb_fillptr - gd->arch.tlb_addr,
156 gd->arch.tlb_size);
157
158 /* Mark all entries as invalid */
159 memset(new_table, 0, pt_len);
160
161 return new_table;
162}
163
164static void set_pte_table(u64 *pte, u64 *table)
165{
166 /* Point *pte to the new table */
167 debug("Setting %p to addr=%p\n", pte, table);
168 *pte = PTE_TYPE_TABLE | (ulong)table;
169}
170
171/* Add one mm_region map entry to the page tables */
172static void add_map(struct mm_region *map)
173{
174 u64 *pte;
175 u64 addr = map->base;
176 u64 size = map->size;
177 u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
178 u64 blocksize;
179 int level;
180 u64 *new_table;
181
182 while (size) {
183 pte = find_pte(addr, 0);
184 if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) {
185 debug("Creating table for addr 0x%llx\n", addr);
186 new_table = create_table();
187 set_pte_table(pte, new_table);
188 }
189
190 for (level = 1; level < 4; level++) {
191 pte = find_pte(addr, level);
192 blocksize = 1ULL << level2shift(level);
193 debug("Checking if pte fits for addr=%llx size=%llx "
194 "blocksize=%llx\n", addr, size, blocksize);
195 if (size >= blocksize && !(addr & (blocksize - 1))) {
196 /* Page fits, create block PTE */
197 debug("Setting PTE %p to block addr=%llx\n",
198 pte, addr);
199 *pte = addr | attrs;
200 addr += blocksize;
201 size -= blocksize;
202 break;
203 } else if ((pte_type(pte) == PTE_TYPE_FAULT)) {
204 /* Page doesn't fit, create subpages */
205 debug("Creating subtable for addr 0x%llx "
206 "blksize=%llx\n", addr, blocksize);
207 new_table = create_table();
208 set_pte_table(pte, new_table);
209 }
210 }
211 }
212}
213
214/* Splits a block PTE into table with subpages spanning the old block */
215static void split_block(u64 *pte, int level)
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700216{
Alexander Grafe317fe82016-03-04 01:09:47 +0100217 u64 old_pte = *pte;
218 u64 *new_table;
219 u64 i = 0;
220 /* level describes the parent level, we need the child ones */
221 int levelshift = level2shift(level + 1);
222
223 if (pte_type(pte) != PTE_TYPE_BLOCK)
224 panic("PTE %p (%llx) is not a block. Some driver code wants to "
225 "modify dcache settings for an range not covered in "
226 "mem_map.", pte, old_pte);
227
228 new_table = create_table();
229 debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table);
230
231 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
232 new_table[i] = old_pte | (i << levelshift);
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700233
Alexander Grafe317fe82016-03-04 01:09:47 +0100234 /* Level 3 block PTEs have the table type */
235 if ((level + 1) == 3)
236 new_table[i] |= PTE_TYPE_TABLE;
237
238 debug("Setting new_table[%lld] = %llx\n", i, new_table[i]);
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700239 }
240
Alexander Grafe317fe82016-03-04 01:09:47 +0100241 /* Set the new table into effect */
242 set_pte_table(pte, new_table);
243}
244
245enum pte_type {
246 PTE_INVAL,
247 PTE_BLOCK,
248 PTE_LEVEL,
249};
250
251/*
252 * This is a recursively called function to count the number of
253 * page tables we need to cover a particular PTE range. If you
254 * call this with level = -1 you basically get the full 48 bit
255 * coverage.
256 */
257static int count_required_pts(u64 addr, int level, u64 maxaddr)
258{
259 int levelshift = level2shift(level);
260 u64 levelsize = 1ULL << levelshift;
261 u64 levelmask = levelsize - 1;
262 u64 levelend = addr + levelsize;
263 int r = 0;
264 int i;
265 enum pte_type pte_type = PTE_INVAL;
266
Alexander Graf6b3e7ca2016-03-04 01:09:48 +0100267 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) {
Alexander Grafe317fe82016-03-04 01:09:47 +0100268 struct mm_region *map = &mem_map[i];
269 u64 start = map->base;
270 u64 end = start + map->size;
271
272 /* Check if the PTE would overlap with the map */
273 if (max(addr, start) <= min(levelend, end)) {
274 start = max(addr, start);
275 end = min(levelend, end);
276
277 /* We need a sub-pt for this level */
278 if ((start & levelmask) || (end & levelmask)) {
279 pte_type = PTE_LEVEL;
280 break;
281 }
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700282
Alexander Grafe317fe82016-03-04 01:09:47 +0100283 /* Lv0 can not do block PTEs, so do levels here too */
284 if (level <= 0) {
285 pte_type = PTE_LEVEL;
286 break;
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700287 }
288
Alexander Grafe317fe82016-03-04 01:09:47 +0100289 /* PTE is active, but fits into a block */
290 pte_type = PTE_BLOCK;
291 }
292 }
293
294 /*
295 * Block PTEs at this level are already covered by the parent page
296 * table, so we only need to count sub page tables.
297 */
298 if (pte_type == PTE_LEVEL) {
299 int sublevel = level + 1;
300 u64 sublevelsize = 1ULL << level2shift(sublevel);
301
302 /* Account for the new sub page table ... */
303 r = 1;
304
305 /* ... and for all child page tables that one might have */
306 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
307 r += count_required_pts(addr, sublevel, maxaddr);
308 addr += sublevelsize;
309
310 if (addr >= maxaddr) {
311 /*
312 * We reached the end of address space, no need
313 * to look any further.
314 */
315 break;
316 }
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700317 }
318 }
Alexander Grafe317fe82016-03-04 01:09:47 +0100319
320 return r;
321}
322
323/* Returns the estimated required size of all page tables */
324u64 get_page_table_size(void)
325{
326 u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
327 u64 size = 0;
328 u64 va_bits;
329 int start_level = 0;
330
331 get_tcr(0, NULL, &va_bits);
332 if (va_bits < 39)
333 start_level = 1;
334
335 /* Account for all page tables we would need to cover our memory map */
336 size = one_pt * count_required_pts(0, start_level - 1, 1ULL << va_bits);
337
338 /*
339 * We need to duplicate our page table once to have an emergency pt to
340 * resort to when splitting page tables later on
341 */
342 size *= 2;
343
344 /*
345 * We may need to split page tables later on if dcache settings change,
346 * so reserve up to 4 (random pick) page tables for that.
347 */
348 size += one_pt * 4;
349
350 return size;
351}
352
353static void setup_pgtables(void)
354{
355 int i;
356
357 /*
358 * Allocate the first level we're on with invalidate entries.
359 * If the starting level is 0 (va_bits >= 39), then this is our
360 * Lv0 page table, otherwise it's the entry Lv1 page table.
361 */
362 create_table();
363
364 /* Now add all MMU table entries one after another to the table */
Alexander Graf6b3e7ca2016-03-04 01:09:48 +0100365 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
Alexander Grafe317fe82016-03-04 01:09:47 +0100366 add_map(&mem_map[i]);
367
368 /* Create the same thing once more for our emergency page table */
369 create_table();
370}
371
372static void setup_all_pgtables(void)
373{
374 u64 tlb_addr = gd->arch.tlb_addr;
375
376 /* Reset the fill ptr */
377 gd->arch.tlb_fillptr = tlb_addr;
378
379 /* Create normal system page tables */
380 setup_pgtables();
381
382 /* Create emergency page tables */
383 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
384 setup_pgtables();
385 gd->arch.tlb_emerg = gd->arch.tlb_addr;
386 gd->arch.tlb_addr = tlb_addr;
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700387}
388
389#else
390
Alison Wang7f8e1782015-08-18 11:22:05 +0800391inline void set_pgtable_section(u64 *page_table, u64 index, u64 section,
Alison Wange28e18c2015-11-05 11:15:49 +0800392 u64 memory_type, u64 attribute)
David Feng85fd5f12013-12-14 11:47:35 +0800393{
David Feng85fd5f12013-12-14 11:47:35 +0800394 u64 value;
395
York Sunef631942014-06-23 15:15:53 -0700396 value = section | PMD_TYPE_SECT | PMD_SECT_AF;
David Feng85fd5f12013-12-14 11:47:35 +0800397 value |= PMD_ATTRINDX(memory_type);
Alison Wange28e18c2015-11-05 11:15:49 +0800398 value |= attribute;
York Sunef631942014-06-23 15:15:53 -0700399 page_table[index] = value;
David Feng85fd5f12013-12-14 11:47:35 +0800400}
401
Alison Wang7f8e1782015-08-18 11:22:05 +0800402inline void set_pgtable_table(u64 *page_table, u64 index, u64 *table_addr)
403{
404 u64 value;
405
406 value = (u64)table_addr | PMD_TYPE_TABLE;
407 page_table[index] = value;
408}
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700409#endif
Alison Wang7f8e1782015-08-18 11:22:05 +0800410
David Feng85fd5f12013-12-14 11:47:35 +0800411/* to activate the MMU we need to set up virtual memory */
Stephen Warren7333c6a2015-10-05 12:09:00 -0600412__weak void mmu_setup(void)
David Feng85fd5f12013-12-14 11:47:35 +0800413{
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700414#ifndef CONFIG_SYS_FULL_VA
David Feng85fd5f12013-12-14 11:47:35 +0800415 bd_t *bd = gd->bd;
Thierry Reding59c364d2015-07-22 17:10:11 -0600416 u64 *page_table = (u64 *)gd->arch.tlb_addr, i, j;
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700417#endif
Thierry Reding59c364d2015-07-22 17:10:11 -0600418 int el;
David Feng85fd5f12013-12-14 11:47:35 +0800419
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700420#ifdef CONFIG_SYS_FULL_VA
Alexander Grafe317fe82016-03-04 01:09:47 +0100421 /* Set up page tables only once */
422 if (!gd->arch.tlb_fillptr)
423 setup_all_pgtables();
Alexander Graffb74cc12016-03-04 01:09:45 +0100424
425 el = current_el();
426 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
427 MEMORY_ATTRIBUTES);
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700428#else
David Feng85fd5f12013-12-14 11:47:35 +0800429 /* Setup an identity-mapping for all spaces */
York Sunef631942014-06-23 15:15:53 -0700430 for (i = 0; i < (PGTABLE_SIZE >> 3); i++) {
431 set_pgtable_section(page_table, i, i << SECTION_SHIFT,
Alison Wang7f8e1782015-08-18 11:22:05 +0800432 MT_DEVICE_NGNRNE, PMD_SECT_NON_SHARE);
York Sunef631942014-06-23 15:15:53 -0700433 }
David Feng85fd5f12013-12-14 11:47:35 +0800434
435 /* Setup an identity-mapping for all RAM space */
436 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
437 ulong start = bd->bi_dram[i].start;
438 ulong end = bd->bi_dram[i].start + bd->bi_dram[i].size;
439 for (j = start >> SECTION_SHIFT;
440 j < end >> SECTION_SHIFT; j++) {
York Sunef631942014-06-23 15:15:53 -0700441 set_pgtable_section(page_table, j, j << SECTION_SHIFT,
Alison Wang7f8e1782015-08-18 11:22:05 +0800442 MT_NORMAL, PMD_SECT_NON_SHARE);
David Feng85fd5f12013-12-14 11:47:35 +0800443 }
444 }
445
446 /* load TTBR0 */
447 el = current_el();
York Sun897947c2014-02-26 13:26:02 -0800448 if (el == 1) {
York Sunef631942014-06-23 15:15:53 -0700449 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
Thierry Redinga3e45ab2015-08-20 11:52:14 +0200450 TCR_EL1_RSVD | TCR_FLAGS | TCR_EL1_IPS_BITS,
York Sunef631942014-06-23 15:15:53 -0700451 MEMORY_ATTRIBUTES);
York Sun897947c2014-02-26 13:26:02 -0800452 } else if (el == 2) {
York Sunef631942014-06-23 15:15:53 -0700453 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
Thierry Redinga3e45ab2015-08-20 11:52:14 +0200454 TCR_EL2_RSVD | TCR_FLAGS | TCR_EL2_IPS_BITS,
York Sunef631942014-06-23 15:15:53 -0700455 MEMORY_ATTRIBUTES);
York Sun897947c2014-02-26 13:26:02 -0800456 } else {
York Sunef631942014-06-23 15:15:53 -0700457 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
Thierry Redinga3e45ab2015-08-20 11:52:14 +0200458 TCR_EL3_RSVD | TCR_FLAGS | TCR_EL3_IPS_BITS,
York Sunef631942014-06-23 15:15:53 -0700459 MEMORY_ATTRIBUTES);
York Sun897947c2014-02-26 13:26:02 -0800460 }
Alexander Graffb74cc12016-03-04 01:09:45 +0100461#endif
462
David Feng85fd5f12013-12-14 11:47:35 +0800463 /* enable the mmu */
464 set_sctlr(get_sctlr() | CR_M);
465}
466
467/*
468 * Performs a invalidation of the entire data cache at all levels
469 */
470void invalidate_dcache_all(void)
471{
York Sunef042012014-02-26 13:26:04 -0800472 __asm_invalidate_dcache_all();
David Feng85fd5f12013-12-14 11:47:35 +0800473}
474
475/*
York Sun1ce575f2015-01-06 13:18:42 -0800476 * Performs a clean & invalidation of the entire data cache at all levels.
477 * This function needs to be inline to avoid using stack.
478 * __asm_flush_l3_cache return status of timeout
David Feng85fd5f12013-12-14 11:47:35 +0800479 */
York Sun1ce575f2015-01-06 13:18:42 -0800480inline void flush_dcache_all(void)
David Feng85fd5f12013-12-14 11:47:35 +0800481{
York Sun1ce575f2015-01-06 13:18:42 -0800482 int ret;
483
David Feng85fd5f12013-12-14 11:47:35 +0800484 __asm_flush_dcache_all();
York Sun1ce575f2015-01-06 13:18:42 -0800485 ret = __asm_flush_l3_cache();
486 if (ret)
487 debug("flushing dcache returns 0x%x\n", ret);
488 else
489 debug("flushing dcache successfully.\n");
David Feng85fd5f12013-12-14 11:47:35 +0800490}
491
492/*
493 * Invalidates range in all levels of D-cache/unified cache
494 */
495void invalidate_dcache_range(unsigned long start, unsigned long stop)
496{
497 __asm_flush_dcache_range(start, stop);
498}
499
500/*
501 * Flush range(clean & invalidate) from all levels of D-cache/unified cache
502 */
503void flush_dcache_range(unsigned long start, unsigned long stop)
504{
505 __asm_flush_dcache_range(start, stop);
506}
507
508void dcache_enable(void)
509{
510 /* The data cache is not active unless the mmu is enabled */
511 if (!(get_sctlr() & CR_M)) {
512 invalidate_dcache_all();
513 __asm_invalidate_tlb_all();
514 mmu_setup();
515 }
516
517 set_sctlr(get_sctlr() | CR_C);
518}
519
520void dcache_disable(void)
521{
522 uint32_t sctlr;
523
524 sctlr = get_sctlr();
525
526 /* if cache isn't enabled no need to disable */
527 if (!(sctlr & CR_C))
528 return;
529
530 set_sctlr(sctlr & ~(CR_C|CR_M));
531
532 flush_dcache_all();
533 __asm_invalidate_tlb_all();
534}
535
536int dcache_status(void)
537{
538 return (get_sctlr() & CR_C) != 0;
539}
540
Siva Durga Prasad Paladuguba2432a2015-06-26 18:05:07 +0530541u64 *__weak arch_get_page_table(void) {
542 puts("No page table offset defined\n");
543
544 return NULL;
545}
546
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700547#ifndef CONFIG_SYS_FULL_VA
Siva Durga Prasad Paladuguba2432a2015-06-26 18:05:07 +0530548void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
549 enum dcache_option option)
550{
551 u64 *page_table = arch_get_page_table();
552 u64 upto, end;
553
554 if (page_table == NULL)
555 return;
556
557 end = ALIGN(start + size, (1 << MMU_SECTION_SHIFT)) >>
558 MMU_SECTION_SHIFT;
559 start = start >> MMU_SECTION_SHIFT;
560 for (upto = start; upto < end; upto++) {
561 page_table[upto] &= ~PMD_ATTRINDX_MASK;
562 page_table[upto] |= PMD_ATTRINDX(option);
563 }
564 asm volatile("dsb sy");
565 __asm_invalidate_tlb_all();
566 asm volatile("dsb sy");
567 asm volatile("isb");
568 start = start << MMU_SECTION_SHIFT;
569 end = end << MMU_SECTION_SHIFT;
570 flush_dcache_range(start, end);
571 asm volatile("dsb sy");
572}
Alexander Grafe317fe82016-03-04 01:09:47 +0100573#else
574static bool is_aligned(u64 addr, u64 size, u64 align)
575{
576 return !(addr & (align - 1)) && !(size & (align - 1));
577}
578
579static u64 set_one_region(u64 start, u64 size, u64 attrs, int level)
580{
581 int levelshift = level2shift(level);
582 u64 levelsize = 1ULL << levelshift;
583 u64 *pte = find_pte(start, level);
584
585 /* Can we can just modify the current level block PTE? */
586 if (is_aligned(start, size, levelsize)) {
587 *pte &= ~PMD_ATTRINDX_MASK;
588 *pte |= attrs;
589 debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level);
590
591 return levelsize;
592 }
593
594 /* Unaligned or doesn't fit, maybe split block into table */
595 debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte);
596
597 /* Maybe we need to split the block into a table */
598 if (pte_type(pte) == PTE_TYPE_BLOCK)
599 split_block(pte, level);
600
601 /* And then double-check it became a table or already is one */
602 if (pte_type(pte) != PTE_TYPE_TABLE)
603 panic("PTE %p (%llx) for addr=%llx should be a table",
604 pte, *pte, start);
605
606 /* Roll on to the next page table level */
607 return 0;
608}
609
610void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
611 enum dcache_option option)
612{
613 u64 attrs = PMD_ATTRINDX(option);
614 u64 real_start = start;
615 u64 real_size = size;
616
617 debug("start=%lx size=%lx\n", (ulong)start, (ulong)size);
618
619 /*
620 * We can not modify page tables that we're currently running on,
621 * so we first need to switch to the "emergency" page tables where
622 * we can safely modify our primary page tables and then switch back
623 */
624 __asm_switch_ttbr(gd->arch.tlb_emerg);
625
626 /*
627 * Loop through the address range until we find a page granule that fits
628 * our alignment constraints, then set it to the new cache attributes
629 */
630 while (size > 0) {
631 int level;
632 u64 r;
633
634 for (level = 1; level < 4; level++) {
635 r = set_one_region(start, size, attrs, level);
636 if (r) {
637 /* PTE successfully replaced */
638 size -= r;
639 start += r;
640 break;
641 }
642 }
643
644 }
645
646 /* We're done modifying page tables, switch back to our primary ones */
647 __asm_switch_ttbr(gd->arch.tlb_addr);
648
649 /*
650 * Make sure there's nothing stale in dcache for a region that might
651 * have caches off now
652 */
653 flush_dcache_range(real_start, real_start + real_size);
654}
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700655#endif
656
David Feng85fd5f12013-12-14 11:47:35 +0800657#else /* CONFIG_SYS_DCACHE_OFF */
658
659void invalidate_dcache_all(void)
660{
661}
662
663void flush_dcache_all(void)
664{
665}
666
David Feng85fd5f12013-12-14 11:47:35 +0800667void dcache_enable(void)
668{
669}
670
671void dcache_disable(void)
672{
673}
674
675int dcache_status(void)
676{
677 return 0;
678}
679
Siva Durga Prasad Paladuguba2432a2015-06-26 18:05:07 +0530680void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
681 enum dcache_option option)
682{
683}
684
David Feng85fd5f12013-12-14 11:47:35 +0800685#endif /* CONFIG_SYS_DCACHE_OFF */
686
687#ifndef CONFIG_SYS_ICACHE_OFF
688
689void icache_enable(void)
690{
York Sunef042012014-02-26 13:26:04 -0800691 __asm_invalidate_icache_all();
David Feng85fd5f12013-12-14 11:47:35 +0800692 set_sctlr(get_sctlr() | CR_I);
693}
694
695void icache_disable(void)
696{
697 set_sctlr(get_sctlr() & ~CR_I);
698}
699
700int icache_status(void)
701{
702 return (get_sctlr() & CR_I) != 0;
703}
704
705void invalidate_icache_all(void)
706{
707 __asm_invalidate_icache_all();
708}
709
710#else /* CONFIG_SYS_ICACHE_OFF */
711
712void icache_enable(void)
713{
714}
715
716void icache_disable(void)
717{
718}
719
720int icache_status(void)
721{
722 return 0;
723}
724
725void invalidate_icache_all(void)
726{
727}
728
729#endif /* CONFIG_SYS_ICACHE_OFF */
730
731/*
732 * Enable dCache & iCache, whether cache is actually enabled
733 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
734 */
York Suna84cd722014-06-23 15:15:54 -0700735void __weak enable_caches(void)
David Feng85fd5f12013-12-14 11:47:35 +0800736{
737 icache_enable();
738 dcache_enable();
739}