Tom Rini | 10e4779 | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 2 | /* |
| 3 | * (C) Copyright 2013 |
| 4 | * David Feng <fenghua@phytium.com.cn> |
| 5 | * |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 6 | * (C) Copyright 2016 |
| 7 | * Alexander Graf <agraf@suse.de> |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include <common.h> |
Simon Glass | 1d91ba7 | 2019-11-14 12:57:37 -0700 | [diff] [blame] | 11 | #include <cpu_func.h> |
Simon Glass | f11478f | 2019-12-28 10:45:07 -0700 | [diff] [blame] | 12 | #include <hang.h> |
Simon Glass | 0f2af88 | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 13 | #include <log.h> |
Simon Glass | 274e0b0 | 2020-05-10 11:39:56 -0600 | [diff] [blame] | 14 | #include <asm/cache.h> |
Simon Glass | 3ba929a | 2020-10-30 21:38:53 -0600 | [diff] [blame] | 15 | #include <asm/global_data.h> |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 16 | #include <asm/system.h> |
| 17 | #include <asm/armv8/mmu.h> |
| 18 | |
| 19 | DECLARE_GLOBAL_DATA_PTR; |
| 20 | |
Trevor Woerner | 43ec7e0 | 2019-05-03 09:41:00 -0400 | [diff] [blame] | 21 | #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 22 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 23 | /* |
| 24 | * With 4k page granule, a virtual address is split into 4 lookup parts |
| 25 | * spanning 9 bits each: |
| 26 | * |
| 27 | * _______________________________________________ |
| 28 | * | | | | | | | |
| 29 | * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off | |
| 30 | * |_______|_______|_______|_______|_______|_______| |
| 31 | * 63-48 47-39 38-30 29-21 20-12 11-00 |
| 32 | * |
| 33 | * mask page size |
| 34 | * |
| 35 | * Lv0: FF8000000000 -- |
| 36 | * Lv1: 7FC0000000 1G |
| 37 | * Lv2: 3FE00000 2M |
| 38 | * Lv3: 1FF000 4K |
| 39 | * off: FFF |
| 40 | */ |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 41 | |
Andre Przywara | 630a794 | 2022-06-14 00:11:10 +0100 | [diff] [blame] | 42 | static int get_effective_el(void) |
Alexander Graf | fb74cc1 | 2016-03-04 01:09:45 +0100 | [diff] [blame] | 43 | { |
Andre Przywara | 630a794 | 2022-06-14 00:11:10 +0100 | [diff] [blame] | 44 | int el = current_el(); |
| 45 | |
| 46 | if (el == 2) { |
| 47 | u64 hcr_el2; |
| 48 | |
| 49 | /* |
| 50 | * If we are using the EL2&0 translation regime, the TCR_EL2 |
| 51 | * looks like the EL1 version, even though we are in EL2. |
| 52 | */ |
| 53 | __asm__ ("mrs %0, HCR_EL2\n" : "=r" (hcr_el2)); |
| 54 | if (hcr_el2 & BIT(HCR_EL2_E2H_BIT)) |
| 55 | return 1; |
| 56 | } |
| 57 | |
| 58 | return el; |
| 59 | } |
| 60 | |
| 61 | u64 get_tcr(u64 *pips, u64 *pva_bits) |
| 62 | { |
| 63 | int el = get_effective_el(); |
Alexander Graf | fb74cc1 | 2016-03-04 01:09:45 +0100 | [diff] [blame] | 64 | u64 max_addr = 0; |
| 65 | u64 ips, va_bits; |
| 66 | u64 tcr; |
| 67 | int i; |
| 68 | |
| 69 | /* Find the largest address we need to support */ |
Alexander Graf | 6b3e7ca | 2016-03-04 01:09:48 +0100 | [diff] [blame] | 70 | for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) |
York Sun | c7104e5 | 2016-06-24 16:46:22 -0700 | [diff] [blame] | 71 | max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size); |
Alexander Graf | fb74cc1 | 2016-03-04 01:09:45 +0100 | [diff] [blame] | 72 | |
| 73 | /* Calculate the maximum physical (and thus virtual) address */ |
| 74 | if (max_addr > (1ULL << 44)) { |
| 75 | ips = 5; |
| 76 | va_bits = 48; |
| 77 | } else if (max_addr > (1ULL << 42)) { |
| 78 | ips = 4; |
| 79 | va_bits = 44; |
| 80 | } else if (max_addr > (1ULL << 40)) { |
| 81 | ips = 3; |
| 82 | va_bits = 42; |
| 83 | } else if (max_addr > (1ULL << 36)) { |
| 84 | ips = 2; |
| 85 | va_bits = 40; |
| 86 | } else if (max_addr > (1ULL << 32)) { |
| 87 | ips = 1; |
| 88 | va_bits = 36; |
| 89 | } else { |
| 90 | ips = 0; |
| 91 | va_bits = 32; |
| 92 | } |
| 93 | |
| 94 | if (el == 1) { |
Alexander Graf | f03c0e4 | 2016-03-04 01:09:46 +0100 | [diff] [blame] | 95 | tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE; |
Alexander Graf | fb74cc1 | 2016-03-04 01:09:45 +0100 | [diff] [blame] | 96 | } else if (el == 2) { |
| 97 | tcr = TCR_EL2_RSVD | (ips << 16); |
| 98 | } else { |
| 99 | tcr = TCR_EL3_RSVD | (ips << 16); |
| 100 | } |
| 101 | |
| 102 | /* PTWs cacheable, inner/outer WBWA and inner shareable */ |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 103 | tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA; |
| 104 | tcr |= TCR_T0SZ(va_bits); |
Alexander Graf | fb74cc1 | 2016-03-04 01:09:45 +0100 | [diff] [blame] | 105 | |
| 106 | if (pips) |
| 107 | *pips = ips; |
| 108 | if (pva_bits) |
| 109 | *pva_bits = va_bits; |
| 110 | |
| 111 | return tcr; |
| 112 | } |
| 113 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 114 | #define MAX_PTE_ENTRIES 512 |
| 115 | |
| 116 | static int pte_type(u64 *pte) |
| 117 | { |
| 118 | return *pte & PTE_TYPE_MASK; |
| 119 | } |
| 120 | |
| 121 | /* Returns the LSB number for a PTE on level <level> */ |
| 122 | static int level2shift(int level) |
| 123 | { |
| 124 | /* Page is 12 bits wide, every level translates 9 bits */ |
| 125 | return (12 + 9 * (3 - level)); |
| 126 | } |
| 127 | |
| 128 | static u64 *find_pte(u64 addr, int level) |
| 129 | { |
| 130 | int start_level = 0; |
| 131 | u64 *pte; |
| 132 | u64 idx; |
| 133 | u64 va_bits; |
| 134 | int i; |
| 135 | |
| 136 | debug("addr=%llx level=%d\n", addr, level); |
| 137 | |
Andre Przywara | 630a794 | 2022-06-14 00:11:10 +0100 | [diff] [blame] | 138 | get_tcr(NULL, &va_bits); |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 139 | if (va_bits < 39) |
| 140 | start_level = 1; |
| 141 | |
| 142 | if (level < start_level) |
| 143 | return NULL; |
| 144 | |
| 145 | /* Walk through all page table levels to find our PTE */ |
| 146 | pte = (u64*)gd->arch.tlb_addr; |
| 147 | for (i = start_level; i < 4; i++) { |
| 148 | idx = (addr >> level2shift(i)) & 0x1FF; |
| 149 | pte += idx; |
| 150 | debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte); |
| 151 | |
| 152 | /* Found it */ |
| 153 | if (i == level) |
| 154 | return pte; |
| 155 | /* PTE is no table (either invalid or block), can't traverse */ |
| 156 | if (pte_type(pte) != PTE_TYPE_TABLE) |
| 157 | return NULL; |
| 158 | /* Off to the next level */ |
| 159 | pte = (u64*)(*pte & 0x0000fffffffff000ULL); |
| 160 | } |
| 161 | |
| 162 | /* Should never reach here */ |
| 163 | return NULL; |
| 164 | } |
| 165 | |
Marc Zyngier | b67855c | 2023-02-09 04:54:27 +0800 | [diff] [blame^] | 166 | #ifdef CONFIG_CMO_BY_VA_ONLY |
| 167 | static void __cmo_on_leaves(void (*cmo_fn)(unsigned long, unsigned long), |
| 168 | u64 pte, int level, u64 base) |
| 169 | { |
| 170 | u64 *ptep; |
| 171 | int i; |
| 172 | |
| 173 | ptep = (u64 *)(pte & GENMASK_ULL(47, PAGE_SHIFT)); |
| 174 | for (i = 0; i < PAGE_SIZE / sizeof(u64); i++) { |
| 175 | u64 end, va = base + i * BIT(level2shift(level)); |
| 176 | u64 type, attrs; |
| 177 | |
| 178 | pte = ptep[i]; |
| 179 | type = pte & PTE_TYPE_MASK; |
| 180 | attrs = pte & PMD_ATTRINDX_MASK; |
| 181 | debug("PTE %llx at level %d VA %llx\n", pte, level, va); |
| 182 | |
| 183 | /* Not valid? next! */ |
| 184 | if (!(type & PTE_TYPE_VALID)) |
| 185 | continue; |
| 186 | |
| 187 | /* Not a leaf? Recurse on the next level */ |
| 188 | if (!(type == PTE_TYPE_BLOCK || |
| 189 | (level == 3 && type == PTE_TYPE_PAGE))) { |
| 190 | __cmo_on_leaves(cmo_fn, pte, level + 1, va); |
| 191 | continue; |
| 192 | } |
| 193 | |
| 194 | /* |
| 195 | * From this point, this must be a leaf. |
| 196 | * |
| 197 | * Start excluding non memory mappings |
| 198 | */ |
| 199 | if (attrs != PTE_BLOCK_MEMTYPE(MT_NORMAL) && |
| 200 | attrs != PTE_BLOCK_MEMTYPE(MT_NORMAL_NC)) |
| 201 | continue; |
| 202 | |
| 203 | end = va + BIT(level2shift(level)) - 1; |
| 204 | |
| 205 | /* No intersection with RAM? */ |
| 206 | if (end < gd->ram_base || |
| 207 | va >= (gd->ram_base + gd->ram_size)) |
| 208 | continue; |
| 209 | |
| 210 | /* |
| 211 | * OK, we have a partial RAM mapping. However, this |
| 212 | * can cover *more* than the RAM. Yes, u-boot is |
| 213 | * *that* braindead. Compute the intersection we care |
| 214 | * about, and not a byte more. |
| 215 | */ |
| 216 | va = max(va, (u64)gd->ram_base); |
| 217 | end = min(end, gd->ram_base + gd->ram_size); |
| 218 | |
| 219 | debug("Flush PTE %llx at level %d: %llx-%llx\n", |
| 220 | pte, level, va, end); |
| 221 | cmo_fn(va, end); |
| 222 | } |
| 223 | } |
| 224 | |
| 225 | static void apply_cmo_to_mappings(void (*cmo_fn)(unsigned long, unsigned long)) |
| 226 | { |
| 227 | u64 va_bits; |
| 228 | int sl = 0; |
| 229 | |
| 230 | if (!gd->arch.tlb_addr) |
| 231 | return; |
| 232 | |
| 233 | get_tcr(NULL, &va_bits); |
| 234 | if (va_bits < 39) |
| 235 | sl = 1; |
| 236 | |
| 237 | __cmo_on_leaves(cmo_fn, gd->arch.tlb_addr, sl, 0); |
| 238 | } |
| 239 | #else |
| 240 | static inline void apply_cmo_to_mappings(void *dummy) {} |
| 241 | #endif |
| 242 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 243 | /* Returns and creates a new full table (512 entries) */ |
| 244 | static u64 *create_table(void) |
| 245 | { |
| 246 | u64 *new_table = (u64*)gd->arch.tlb_fillptr; |
| 247 | u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64); |
| 248 | |
| 249 | /* Allocate MAX_PTE_ENTRIES pte entries */ |
| 250 | gd->arch.tlb_fillptr += pt_len; |
| 251 | |
| 252 | if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size) |
| 253 | panic("Insufficient RAM for page table: 0x%lx > 0x%lx. " |
| 254 | "Please increase the size in get_page_table_size()", |
| 255 | gd->arch.tlb_fillptr - gd->arch.tlb_addr, |
| 256 | gd->arch.tlb_size); |
| 257 | |
| 258 | /* Mark all entries as invalid */ |
| 259 | memset(new_table, 0, pt_len); |
| 260 | |
| 261 | return new_table; |
| 262 | } |
| 263 | |
| 264 | static void set_pte_table(u64 *pte, u64 *table) |
| 265 | { |
| 266 | /* Point *pte to the new table */ |
| 267 | debug("Setting %p to addr=%p\n", pte, table); |
| 268 | *pte = PTE_TYPE_TABLE | (ulong)table; |
| 269 | } |
| 270 | |
York Sun | f44afe7 | 2016-06-24 16:46:21 -0700 | [diff] [blame] | 271 | /* Splits a block PTE into table with subpages spanning the old block */ |
| 272 | static void split_block(u64 *pte, int level) |
| 273 | { |
| 274 | u64 old_pte = *pte; |
| 275 | u64 *new_table; |
| 276 | u64 i = 0; |
| 277 | /* level describes the parent level, we need the child ones */ |
| 278 | int levelshift = level2shift(level + 1); |
| 279 | |
| 280 | if (pte_type(pte) != PTE_TYPE_BLOCK) |
| 281 | panic("PTE %p (%llx) is not a block. Some driver code wants to " |
| 282 | "modify dcache settings for an range not covered in " |
| 283 | "mem_map.", pte, old_pte); |
| 284 | |
| 285 | new_table = create_table(); |
| 286 | debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table); |
| 287 | |
| 288 | for (i = 0; i < MAX_PTE_ENTRIES; i++) { |
| 289 | new_table[i] = old_pte | (i << levelshift); |
| 290 | |
| 291 | /* Level 3 block PTEs have the table type */ |
| 292 | if ((level + 1) == 3) |
| 293 | new_table[i] |= PTE_TYPE_TABLE; |
| 294 | |
| 295 | debug("Setting new_table[%lld] = %llx\n", i, new_table[i]); |
| 296 | } |
| 297 | |
| 298 | /* Set the new table into effect */ |
| 299 | set_pte_table(pte, new_table); |
| 300 | } |
| 301 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 302 | /* Add one mm_region map entry to the page tables */ |
| 303 | static void add_map(struct mm_region *map) |
| 304 | { |
| 305 | u64 *pte; |
York Sun | c7104e5 | 2016-06-24 16:46:22 -0700 | [diff] [blame] | 306 | u64 virt = map->virt; |
| 307 | u64 phys = map->phys; |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 308 | u64 size = map->size; |
| 309 | u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF; |
| 310 | u64 blocksize; |
| 311 | int level; |
| 312 | u64 *new_table; |
| 313 | |
| 314 | while (size) { |
York Sun | c7104e5 | 2016-06-24 16:46:22 -0700 | [diff] [blame] | 315 | pte = find_pte(virt, 0); |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 316 | if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) { |
York Sun | c7104e5 | 2016-06-24 16:46:22 -0700 | [diff] [blame] | 317 | debug("Creating table for virt 0x%llx\n", virt); |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 318 | new_table = create_table(); |
| 319 | set_pte_table(pte, new_table); |
| 320 | } |
| 321 | |
| 322 | for (level = 1; level < 4; level++) { |
York Sun | c7104e5 | 2016-06-24 16:46:22 -0700 | [diff] [blame] | 323 | pte = find_pte(virt, level); |
York Sun | f44afe7 | 2016-06-24 16:46:21 -0700 | [diff] [blame] | 324 | if (!pte) |
| 325 | panic("pte not found\n"); |
York Sun | c7104e5 | 2016-06-24 16:46:22 -0700 | [diff] [blame] | 326 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 327 | blocksize = 1ULL << level2shift(level); |
York Sun | c7104e5 | 2016-06-24 16:46:22 -0700 | [diff] [blame] | 328 | debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n", |
| 329 | virt, size, blocksize); |
| 330 | if (size >= blocksize && !(virt & (blocksize - 1))) { |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 331 | /* Page fits, create block PTE */ |
York Sun | c7104e5 | 2016-06-24 16:46:22 -0700 | [diff] [blame] | 332 | debug("Setting PTE %p to block virt=%llx\n", |
| 333 | pte, virt); |
Peng Fan | e0e9871 | 2017-11-28 10:31:28 +0800 | [diff] [blame] | 334 | if (level == 3) |
| 335 | *pte = phys | attrs | PTE_TYPE_PAGE; |
| 336 | else |
| 337 | *pte = phys | attrs; |
York Sun | c7104e5 | 2016-06-24 16:46:22 -0700 | [diff] [blame] | 338 | virt += blocksize; |
| 339 | phys += blocksize; |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 340 | size -= blocksize; |
| 341 | break; |
York Sun | f44afe7 | 2016-06-24 16:46:21 -0700 | [diff] [blame] | 342 | } else if (pte_type(pte) == PTE_TYPE_FAULT) { |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 343 | /* Page doesn't fit, create subpages */ |
York Sun | c7104e5 | 2016-06-24 16:46:22 -0700 | [diff] [blame] | 344 | debug("Creating subtable for virt 0x%llx blksize=%llx\n", |
| 345 | virt, blocksize); |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 346 | new_table = create_table(); |
| 347 | set_pte_table(pte, new_table); |
York Sun | f44afe7 | 2016-06-24 16:46:21 -0700 | [diff] [blame] | 348 | } else if (pte_type(pte) == PTE_TYPE_BLOCK) { |
York Sun | c7104e5 | 2016-06-24 16:46:22 -0700 | [diff] [blame] | 349 | debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n", |
| 350 | virt, blocksize); |
York Sun | f44afe7 | 2016-06-24 16:46:21 -0700 | [diff] [blame] | 351 | split_block(pte, level); |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 352 | } |
| 353 | } |
| 354 | } |
| 355 | } |
| 356 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 357 | enum pte_type { |
| 358 | PTE_INVAL, |
| 359 | PTE_BLOCK, |
| 360 | PTE_LEVEL, |
| 361 | }; |
| 362 | |
| 363 | /* |
| 364 | * This is a recursively called function to count the number of |
| 365 | * page tables we need to cover a particular PTE range. If you |
| 366 | * call this with level = -1 you basically get the full 48 bit |
| 367 | * coverage. |
| 368 | */ |
| 369 | static int count_required_pts(u64 addr, int level, u64 maxaddr) |
| 370 | { |
| 371 | int levelshift = level2shift(level); |
| 372 | u64 levelsize = 1ULL << levelshift; |
| 373 | u64 levelmask = levelsize - 1; |
| 374 | u64 levelend = addr + levelsize; |
| 375 | int r = 0; |
| 376 | int i; |
| 377 | enum pte_type pte_type = PTE_INVAL; |
| 378 | |
Alexander Graf | 6b3e7ca | 2016-03-04 01:09:48 +0100 | [diff] [blame] | 379 | for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) { |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 380 | struct mm_region *map = &mem_map[i]; |
York Sun | c7104e5 | 2016-06-24 16:46:22 -0700 | [diff] [blame] | 381 | u64 start = map->virt; |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 382 | u64 end = start + map->size; |
| 383 | |
| 384 | /* Check if the PTE would overlap with the map */ |
| 385 | if (max(addr, start) <= min(levelend, end)) { |
| 386 | start = max(addr, start); |
| 387 | end = min(levelend, end); |
| 388 | |
| 389 | /* We need a sub-pt for this level */ |
| 390 | if ((start & levelmask) || (end & levelmask)) { |
| 391 | pte_type = PTE_LEVEL; |
| 392 | break; |
| 393 | } |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 394 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 395 | /* Lv0 can not do block PTEs, so do levels here too */ |
| 396 | if (level <= 0) { |
| 397 | pte_type = PTE_LEVEL; |
| 398 | break; |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 399 | } |
| 400 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 401 | /* PTE is active, but fits into a block */ |
| 402 | pte_type = PTE_BLOCK; |
| 403 | } |
| 404 | } |
| 405 | |
| 406 | /* |
| 407 | * Block PTEs at this level are already covered by the parent page |
| 408 | * table, so we only need to count sub page tables. |
| 409 | */ |
| 410 | if (pte_type == PTE_LEVEL) { |
| 411 | int sublevel = level + 1; |
| 412 | u64 sublevelsize = 1ULL << level2shift(sublevel); |
| 413 | |
| 414 | /* Account for the new sub page table ... */ |
| 415 | r = 1; |
| 416 | |
| 417 | /* ... and for all child page tables that one might have */ |
| 418 | for (i = 0; i < MAX_PTE_ENTRIES; i++) { |
| 419 | r += count_required_pts(addr, sublevel, maxaddr); |
| 420 | addr += sublevelsize; |
| 421 | |
| 422 | if (addr >= maxaddr) { |
| 423 | /* |
| 424 | * We reached the end of address space, no need |
| 425 | * to look any further. |
| 426 | */ |
| 427 | break; |
| 428 | } |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 429 | } |
| 430 | } |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 431 | |
| 432 | return r; |
| 433 | } |
| 434 | |
| 435 | /* Returns the estimated required size of all page tables */ |
Alexander Graf | bc78b92 | 2016-03-21 20:26:12 +0100 | [diff] [blame] | 436 | __weak u64 get_page_table_size(void) |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 437 | { |
| 438 | u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64); |
| 439 | u64 size = 0; |
| 440 | u64 va_bits; |
| 441 | int start_level = 0; |
| 442 | |
Andre Przywara | 630a794 | 2022-06-14 00:11:10 +0100 | [diff] [blame] | 443 | get_tcr(NULL, &va_bits); |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 444 | if (va_bits < 39) |
| 445 | start_level = 1; |
| 446 | |
| 447 | /* Account for all page tables we would need to cover our memory map */ |
| 448 | size = one_pt * count_required_pts(0, start_level - 1, 1ULL << va_bits); |
| 449 | |
| 450 | /* |
| 451 | * We need to duplicate our page table once to have an emergency pt to |
| 452 | * resort to when splitting page tables later on |
| 453 | */ |
| 454 | size *= 2; |
| 455 | |
| 456 | /* |
| 457 | * We may need to split page tables later on if dcache settings change, |
| 458 | * so reserve up to 4 (random pick) page tables for that. |
| 459 | */ |
| 460 | size += one_pt * 4; |
| 461 | |
| 462 | return size; |
| 463 | } |
| 464 | |
York Sun | a81fcd1 | 2016-06-24 16:46:20 -0700 | [diff] [blame] | 465 | void setup_pgtables(void) |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 466 | { |
| 467 | int i; |
| 468 | |
York Sun | a81fcd1 | 2016-06-24 16:46:20 -0700 | [diff] [blame] | 469 | if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr) |
| 470 | panic("Page table pointer not setup."); |
| 471 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 472 | /* |
| 473 | * Allocate the first level we're on with invalidate entries. |
| 474 | * If the starting level is 0 (va_bits >= 39), then this is our |
| 475 | * Lv0 page table, otherwise it's the entry Lv1 page table. |
| 476 | */ |
| 477 | create_table(); |
| 478 | |
| 479 | /* Now add all MMU table entries one after another to the table */ |
Alexander Graf | 6b3e7ca | 2016-03-04 01:09:48 +0100 | [diff] [blame] | 480 | for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 481 | add_map(&mem_map[i]); |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 482 | } |
| 483 | |
| 484 | static void setup_all_pgtables(void) |
| 485 | { |
| 486 | u64 tlb_addr = gd->arch.tlb_addr; |
Alexander Graf | fa3754e | 2016-07-30 23:13:03 +0200 | [diff] [blame] | 487 | u64 tlb_size = gd->arch.tlb_size; |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 488 | |
| 489 | /* Reset the fill ptr */ |
| 490 | gd->arch.tlb_fillptr = tlb_addr; |
| 491 | |
| 492 | /* Create normal system page tables */ |
| 493 | setup_pgtables(); |
| 494 | |
| 495 | /* Create emergency page tables */ |
Alexander Graf | fa3754e | 2016-07-30 23:13:03 +0200 | [diff] [blame] | 496 | gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr - |
| 497 | (uintptr_t)gd->arch.tlb_addr; |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 498 | gd->arch.tlb_addr = gd->arch.tlb_fillptr; |
| 499 | setup_pgtables(); |
| 500 | gd->arch.tlb_emerg = gd->arch.tlb_addr; |
| 501 | gd->arch.tlb_addr = tlb_addr; |
Alexander Graf | fa3754e | 2016-07-30 23:13:03 +0200 | [diff] [blame] | 502 | gd->arch.tlb_size = tlb_size; |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 503 | } |
| 504 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 505 | /* to activate the MMU we need to set up virtual memory */ |
Stephen Warren | 7333c6a | 2015-10-05 12:09:00 -0600 | [diff] [blame] | 506 | __weak void mmu_setup(void) |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 507 | { |
Thierry Reding | 59c364d | 2015-07-22 17:10:11 -0600 | [diff] [blame] | 508 | int el; |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 509 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 510 | /* Set up page tables only once */ |
| 511 | if (!gd->arch.tlb_fillptr) |
| 512 | setup_all_pgtables(); |
Alexander Graf | fb74cc1 | 2016-03-04 01:09:45 +0100 | [diff] [blame] | 513 | |
| 514 | el = current_el(); |
Andre Przywara | 630a794 | 2022-06-14 00:11:10 +0100 | [diff] [blame] | 515 | set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(NULL, NULL), |
Alexander Graf | fb74cc1 | 2016-03-04 01:09:45 +0100 | [diff] [blame] | 516 | MEMORY_ATTRIBUTES); |
Alexander Graf | fb74cc1 | 2016-03-04 01:09:45 +0100 | [diff] [blame] | 517 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 518 | /* enable the mmu */ |
| 519 | set_sctlr(get_sctlr() | CR_M); |
| 520 | } |
| 521 | |
| 522 | /* |
| 523 | * Performs a invalidation of the entire data cache at all levels |
| 524 | */ |
| 525 | void invalidate_dcache_all(void) |
| 526 | { |
Marc Zyngier | b67855c | 2023-02-09 04:54:27 +0800 | [diff] [blame^] | 527 | #ifndef CONFIG_CMO_BY_VA_ONLY |
York Sun | ef04201 | 2014-02-26 13:26:04 -0800 | [diff] [blame] | 528 | __asm_invalidate_dcache_all(); |
Stephen Warren | ddb0f63 | 2016-10-19 15:18:46 -0600 | [diff] [blame] | 529 | __asm_invalidate_l3_dcache(); |
Marc Zyngier | b67855c | 2023-02-09 04:54:27 +0800 | [diff] [blame^] | 530 | #else |
| 531 | apply_cmo_to_mappings(invalidate_dcache_range); |
| 532 | #endif |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 533 | } |
| 534 | |
| 535 | /* |
York Sun | 1ce575f | 2015-01-06 13:18:42 -0800 | [diff] [blame] | 536 | * Performs a clean & invalidation of the entire data cache at all levels. |
| 537 | * This function needs to be inline to avoid using stack. |
Stephen Warren | ddb0f63 | 2016-10-19 15:18:46 -0600 | [diff] [blame] | 538 | * __asm_flush_l3_dcache return status of timeout |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 539 | */ |
York Sun | 1ce575f | 2015-01-06 13:18:42 -0800 | [diff] [blame] | 540 | inline void flush_dcache_all(void) |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 541 | { |
Marc Zyngier | b67855c | 2023-02-09 04:54:27 +0800 | [diff] [blame^] | 542 | #ifndef CONFIG_CMO_BY_VA_ONLY |
York Sun | 1ce575f | 2015-01-06 13:18:42 -0800 | [diff] [blame] | 543 | int ret; |
| 544 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 545 | __asm_flush_dcache_all(); |
Stephen Warren | ddb0f63 | 2016-10-19 15:18:46 -0600 | [diff] [blame] | 546 | ret = __asm_flush_l3_dcache(); |
York Sun | 1ce575f | 2015-01-06 13:18:42 -0800 | [diff] [blame] | 547 | if (ret) |
| 548 | debug("flushing dcache returns 0x%x\n", ret); |
| 549 | else |
| 550 | debug("flushing dcache successfully.\n"); |
Marc Zyngier | b67855c | 2023-02-09 04:54:27 +0800 | [diff] [blame^] | 551 | #else |
| 552 | apply_cmo_to_mappings(flush_dcache_range); |
| 553 | #endif |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 554 | } |
| 555 | |
Vignesh Raghavendra | 384c141 | 2019-04-22 21:43:32 +0530 | [diff] [blame] | 556 | #ifndef CONFIG_SYS_DISABLE_DCACHE_OPS |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 557 | /* |
| 558 | * Invalidates range in all levels of D-cache/unified cache |
| 559 | */ |
| 560 | void invalidate_dcache_range(unsigned long start, unsigned long stop) |
| 561 | { |
Simon Glass | 4415c3b | 2017-04-05 17:53:18 -0600 | [diff] [blame] | 562 | __asm_invalidate_dcache_range(start, stop); |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 563 | } |
| 564 | |
| 565 | /* |
| 566 | * Flush range(clean & invalidate) from all levels of D-cache/unified cache |
| 567 | */ |
| 568 | void flush_dcache_range(unsigned long start, unsigned long stop) |
| 569 | { |
| 570 | __asm_flush_dcache_range(start, stop); |
| 571 | } |
Vignesh Raghavendra | 384c141 | 2019-04-22 21:43:32 +0530 | [diff] [blame] | 572 | #else |
| 573 | void invalidate_dcache_range(unsigned long start, unsigned long stop) |
| 574 | { |
| 575 | } |
| 576 | |
| 577 | void flush_dcache_range(unsigned long start, unsigned long stop) |
| 578 | { |
| 579 | } |
| 580 | #endif /* CONFIG_SYS_DISABLE_DCACHE_OPS */ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 581 | |
| 582 | void dcache_enable(void) |
| 583 | { |
| 584 | /* The data cache is not active unless the mmu is enabled */ |
| 585 | if (!(get_sctlr() & CR_M)) { |
| 586 | invalidate_dcache_all(); |
| 587 | __asm_invalidate_tlb_all(); |
| 588 | mmu_setup(); |
| 589 | } |
| 590 | |
Pali Rohár | fbddaee | 2022-09-14 13:37:46 +0200 | [diff] [blame] | 591 | /* Set up page tables only once (it is done also by mmu_setup()) */ |
| 592 | if (!gd->arch.tlb_fillptr) |
| 593 | setup_all_pgtables(); |
| 594 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 595 | set_sctlr(get_sctlr() | CR_C); |
| 596 | } |
| 597 | |
| 598 | void dcache_disable(void) |
| 599 | { |
| 600 | uint32_t sctlr; |
| 601 | |
| 602 | sctlr = get_sctlr(); |
| 603 | |
| 604 | /* if cache isn't enabled no need to disable */ |
| 605 | if (!(sctlr & CR_C)) |
| 606 | return; |
| 607 | |
Marc Zyngier | b67855c | 2023-02-09 04:54:27 +0800 | [diff] [blame^] | 608 | if (IS_ENABLED(CONFIG_CMO_BY_VA_ONLY)) { |
| 609 | /* |
| 610 | * When invalidating by VA, do it *before* turning the MMU |
| 611 | * off, so that at least our stack is coherent. |
| 612 | */ |
| 613 | flush_dcache_all(); |
| 614 | } |
| 615 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 616 | set_sctlr(sctlr & ~(CR_C|CR_M)); |
| 617 | |
Marc Zyngier | b67855c | 2023-02-09 04:54:27 +0800 | [diff] [blame^] | 618 | if (!IS_ENABLED(CONFIG_CMO_BY_VA_ONLY)) |
| 619 | flush_dcache_all(); |
| 620 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 621 | __asm_invalidate_tlb_all(); |
| 622 | } |
| 623 | |
| 624 | int dcache_status(void) |
| 625 | { |
| 626 | return (get_sctlr() & CR_C) != 0; |
| 627 | } |
| 628 | |
Siva Durga Prasad Paladugu | ba2432a | 2015-06-26 18:05:07 +0530 | [diff] [blame] | 629 | u64 *__weak arch_get_page_table(void) { |
| 630 | puts("No page table offset defined\n"); |
| 631 | |
| 632 | return NULL; |
| 633 | } |
| 634 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 635 | static bool is_aligned(u64 addr, u64 size, u64 align) |
| 636 | { |
| 637 | return !(addr & (align - 1)) && !(size & (align - 1)); |
| 638 | } |
| 639 | |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 640 | /* Use flag to indicate if attrs has more than d-cache attributes */ |
| 641 | static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level) |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 642 | { |
| 643 | int levelshift = level2shift(level); |
| 644 | u64 levelsize = 1ULL << levelshift; |
| 645 | u64 *pte = find_pte(start, level); |
| 646 | |
| 647 | /* Can we can just modify the current level block PTE? */ |
| 648 | if (is_aligned(start, size, levelsize)) { |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 649 | if (flag) { |
| 650 | *pte &= ~PMD_ATTRMASK; |
| 651 | *pte |= attrs & PMD_ATTRMASK; |
| 652 | } else { |
| 653 | *pte &= ~PMD_ATTRINDX_MASK; |
| 654 | *pte |= attrs & PMD_ATTRINDX_MASK; |
| 655 | } |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 656 | debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level); |
| 657 | |
| 658 | return levelsize; |
| 659 | } |
| 660 | |
| 661 | /* Unaligned or doesn't fit, maybe split block into table */ |
| 662 | debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte); |
| 663 | |
| 664 | /* Maybe we need to split the block into a table */ |
| 665 | if (pte_type(pte) == PTE_TYPE_BLOCK) |
| 666 | split_block(pte, level); |
| 667 | |
| 668 | /* And then double-check it became a table or already is one */ |
| 669 | if (pte_type(pte) != PTE_TYPE_TABLE) |
| 670 | panic("PTE %p (%llx) for addr=%llx should be a table", |
| 671 | pte, *pte, start); |
| 672 | |
| 673 | /* Roll on to the next page table level */ |
| 674 | return 0; |
| 675 | } |
| 676 | |
| 677 | void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, |
| 678 | enum dcache_option option) |
| 679 | { |
Peng Fan | 41bad3e | 2020-05-11 16:41:07 +0800 | [diff] [blame] | 680 | u64 attrs = PMD_ATTRINDX(option >> 2); |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 681 | u64 real_start = start; |
| 682 | u64 real_size = size; |
| 683 | |
| 684 | debug("start=%lx size=%lx\n", (ulong)start, (ulong)size); |
| 685 | |
York Sun | a81fcd1 | 2016-06-24 16:46:20 -0700 | [diff] [blame] | 686 | if (!gd->arch.tlb_emerg) |
| 687 | panic("Emergency page table not setup."); |
| 688 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 689 | /* |
| 690 | * We can not modify page tables that we're currently running on, |
| 691 | * so we first need to switch to the "emergency" page tables where |
| 692 | * we can safely modify our primary page tables and then switch back |
| 693 | */ |
| 694 | __asm_switch_ttbr(gd->arch.tlb_emerg); |
| 695 | |
| 696 | /* |
| 697 | * Loop through the address range until we find a page granule that fits |
| 698 | * our alignment constraints, then set it to the new cache attributes |
| 699 | */ |
| 700 | while (size > 0) { |
| 701 | int level; |
| 702 | u64 r; |
| 703 | |
| 704 | for (level = 1; level < 4; level++) { |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 705 | /* Set d-cache attributes only */ |
| 706 | r = set_one_region(start, size, attrs, false, level); |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 707 | if (r) { |
| 708 | /* PTE successfully replaced */ |
| 709 | size -= r; |
| 710 | start += r; |
| 711 | break; |
| 712 | } |
| 713 | } |
| 714 | |
| 715 | } |
| 716 | |
| 717 | /* We're done modifying page tables, switch back to our primary ones */ |
| 718 | __asm_switch_ttbr(gd->arch.tlb_addr); |
| 719 | |
| 720 | /* |
| 721 | * Make sure there's nothing stale in dcache for a region that might |
| 722 | * have caches off now |
| 723 | */ |
| 724 | flush_dcache_range(real_start, real_start + real_size); |
| 725 | } |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 726 | |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 727 | /* |
| 728 | * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits. |
| 729 | * The procecess is break-before-make. The target region will be marked as |
| 730 | * invalid during the process of changing. |
| 731 | */ |
| 732 | void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs) |
| 733 | { |
| 734 | int level; |
| 735 | u64 r, size, start; |
| 736 | |
| 737 | start = addr; |
| 738 | size = siz; |
| 739 | /* |
| 740 | * Loop through the address range until we find a page granule that fits |
| 741 | * our alignment constraints, then set it to "invalid". |
| 742 | */ |
| 743 | while (size > 0) { |
| 744 | for (level = 1; level < 4; level++) { |
| 745 | /* Set PTE to fault */ |
| 746 | r = set_one_region(start, size, PTE_TYPE_FAULT, true, |
| 747 | level); |
| 748 | if (r) { |
| 749 | /* PTE successfully invalidated */ |
| 750 | size -= r; |
| 751 | start += r; |
| 752 | break; |
| 753 | } |
| 754 | } |
| 755 | } |
| 756 | |
| 757 | flush_dcache_range(gd->arch.tlb_addr, |
| 758 | gd->arch.tlb_addr + gd->arch.tlb_size); |
| 759 | __asm_invalidate_tlb_all(); |
| 760 | |
| 761 | /* |
| 762 | * Loop through the address range until we find a page granule that fits |
| 763 | * our alignment constraints, then set it to the new cache attributes |
| 764 | */ |
| 765 | start = addr; |
| 766 | size = siz; |
| 767 | while (size > 0) { |
| 768 | for (level = 1; level < 4; level++) { |
| 769 | /* Set PTE to new attributes */ |
| 770 | r = set_one_region(start, size, attrs, true, level); |
| 771 | if (r) { |
| 772 | /* PTE successfully updated */ |
| 773 | size -= r; |
| 774 | start += r; |
| 775 | break; |
| 776 | } |
| 777 | } |
| 778 | } |
| 779 | flush_dcache_range(gd->arch.tlb_addr, |
| 780 | gd->arch.tlb_addr + gd->arch.tlb_size); |
| 781 | __asm_invalidate_tlb_all(); |
| 782 | } |
| 783 | |
Trevor Woerner | 43ec7e0 | 2019-05-03 09:41:00 -0400 | [diff] [blame] | 784 | #else /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 785 | |
Alexander Graf | bc40da9 | 2016-03-04 01:09:55 +0100 | [diff] [blame] | 786 | /* |
| 787 | * For SPL builds, we may want to not have dcache enabled. Any real U-Boot |
| 788 | * running however really wants to have dcache and the MMU active. Check that |
| 789 | * everything is sane and give the developer a hint if it isn't. |
| 790 | */ |
| 791 | #ifndef CONFIG_SPL_BUILD |
| 792 | #error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache. |
| 793 | #endif |
| 794 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 795 | void invalidate_dcache_all(void) |
| 796 | { |
| 797 | } |
| 798 | |
| 799 | void flush_dcache_all(void) |
| 800 | { |
| 801 | } |
| 802 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 803 | void dcache_enable(void) |
| 804 | { |
| 805 | } |
| 806 | |
| 807 | void dcache_disable(void) |
| 808 | { |
| 809 | } |
| 810 | |
| 811 | int dcache_status(void) |
| 812 | { |
| 813 | return 0; |
| 814 | } |
| 815 | |
Siva Durga Prasad Paladugu | ba2432a | 2015-06-26 18:05:07 +0530 | [diff] [blame] | 816 | void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, |
| 817 | enum dcache_option option) |
| 818 | { |
| 819 | } |
| 820 | |
Trevor Woerner | 43ec7e0 | 2019-05-03 09:41:00 -0400 | [diff] [blame] | 821 | #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 822 | |
Trevor Woerner | 43ec7e0 | 2019-05-03 09:41:00 -0400 | [diff] [blame] | 823 | #if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 824 | |
| 825 | void icache_enable(void) |
| 826 | { |
Stephen Warren | ddb0f63 | 2016-10-19 15:18:46 -0600 | [diff] [blame] | 827 | invalidate_icache_all(); |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 828 | set_sctlr(get_sctlr() | CR_I); |
| 829 | } |
| 830 | |
| 831 | void icache_disable(void) |
| 832 | { |
| 833 | set_sctlr(get_sctlr() & ~CR_I); |
| 834 | } |
| 835 | |
| 836 | int icache_status(void) |
| 837 | { |
| 838 | return (get_sctlr() & CR_I) != 0; |
| 839 | } |
| 840 | |
Patrice Chotard | ee435c6 | 2021-07-19 11:21:51 +0200 | [diff] [blame] | 841 | int mmu_status(void) |
| 842 | { |
| 843 | return (get_sctlr() & CR_M) != 0; |
| 844 | } |
| 845 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 846 | void invalidate_icache_all(void) |
| 847 | { |
| 848 | __asm_invalidate_icache_all(); |
Stephen Warren | ddb0f63 | 2016-10-19 15:18:46 -0600 | [diff] [blame] | 849 | __asm_invalidate_l3_icache(); |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 850 | } |
| 851 | |
Trevor Woerner | 43ec7e0 | 2019-05-03 09:41:00 -0400 | [diff] [blame] | 852 | #else /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 853 | |
| 854 | void icache_enable(void) |
| 855 | { |
| 856 | } |
| 857 | |
| 858 | void icache_disable(void) |
| 859 | { |
| 860 | } |
| 861 | |
| 862 | int icache_status(void) |
| 863 | { |
| 864 | return 0; |
| 865 | } |
| 866 | |
Patrice Chotard | ee435c6 | 2021-07-19 11:21:51 +0200 | [diff] [blame] | 867 | int mmu_status(void) |
| 868 | { |
| 869 | return 0; |
| 870 | } |
| 871 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 872 | void invalidate_icache_all(void) |
| 873 | { |
| 874 | } |
| 875 | |
Trevor Woerner | 43ec7e0 | 2019-05-03 09:41:00 -0400 | [diff] [blame] | 876 | #endif /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 877 | |
| 878 | /* |
| 879 | * Enable dCache & iCache, whether cache is actually enabled |
| 880 | * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF |
| 881 | */ |
York Sun | a84cd72 | 2014-06-23 15:15:54 -0700 | [diff] [blame] | 882 | void __weak enable_caches(void) |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 883 | { |
| 884 | icache_enable(); |
| 885 | dcache_enable(); |
| 886 | } |