Tom Rini | 10e4779 | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 2 | /* |
| 3 | * (C) Copyright 2013 |
| 4 | * David Feng <fenghua@phytium.com.cn> |
| 5 | * |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 6 | * (C) Copyright 2016 |
| 7 | * Alexander Graf <agraf@suse.de> |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 8 | */ |
| 9 | |
Simon Glass | 1d91ba7 | 2019-11-14 12:57:37 -0700 | [diff] [blame] | 10 | #include <cpu_func.h> |
Simon Glass | f11478f | 2019-12-28 10:45:07 -0700 | [diff] [blame] | 11 | #include <hang.h> |
Simon Glass | 0f2af88 | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 12 | #include <log.h> |
Simon Glass | 274e0b0 | 2020-05-10 11:39:56 -0600 | [diff] [blame] | 13 | #include <asm/cache.h> |
Simon Glass | 3ba929a | 2020-10-30 21:38:53 -0600 | [diff] [blame] | 14 | #include <asm/global_data.h> |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 15 | #include <asm/system.h> |
| 16 | #include <asm/armv8/mmu.h> |
Ilias Apalodimas | e9e1865 | 2025-02-20 15:54:42 +0200 | [diff] [blame] | 17 | #include <linux/errno.h> |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 18 | |
| 19 | DECLARE_GLOBAL_DATA_PTR; |
| 20 | |
Trevor Woerner | 43ec7e0 | 2019-05-03 09:41:00 -0400 | [diff] [blame] | 21 | #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 22 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 23 | /* |
| 24 | * With 4k page granule, a virtual address is split into 4 lookup parts |
| 25 | * spanning 9 bits each: |
| 26 | * |
| 27 | * _______________________________________________ |
| 28 | * | | | | | | | |
| 29 | * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off | |
| 30 | * |_______|_______|_______|_______|_______|_______| |
| 31 | * 63-48 47-39 38-30 29-21 20-12 11-00 |
| 32 | * |
| 33 | * mask page size |
| 34 | * |
| 35 | * Lv0: FF8000000000 -- |
| 36 | * Lv1: 7FC0000000 1G |
| 37 | * Lv2: 3FE00000 2M |
| 38 | * Lv3: 1FF000 4K |
| 39 | * off: FFF |
| 40 | */ |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 41 | |
Andre Przywara | 630a794 | 2022-06-14 00:11:10 +0100 | [diff] [blame] | 42 | static int get_effective_el(void) |
Alexander Graf | fb74cc1 | 2016-03-04 01:09:45 +0100 | [diff] [blame] | 43 | { |
Andre Przywara | 630a794 | 2022-06-14 00:11:10 +0100 | [diff] [blame] | 44 | int el = current_el(); |
| 45 | |
| 46 | if (el == 2) { |
| 47 | u64 hcr_el2; |
| 48 | |
| 49 | /* |
| 50 | * If we are using the EL2&0 translation regime, the TCR_EL2 |
| 51 | * looks like the EL1 version, even though we are in EL2. |
| 52 | */ |
| 53 | __asm__ ("mrs %0, HCR_EL2\n" : "=r" (hcr_el2)); |
| 54 | if (hcr_el2 & BIT(HCR_EL2_E2H_BIT)) |
| 55 | return 1; |
| 56 | } |
| 57 | |
| 58 | return el; |
| 59 | } |
| 60 | |
| 61 | u64 get_tcr(u64 *pips, u64 *pva_bits) |
| 62 | { |
| 63 | int el = get_effective_el(); |
Alexander Graf | fb74cc1 | 2016-03-04 01:09:45 +0100 | [diff] [blame] | 64 | u64 max_addr = 0; |
| 65 | u64 ips, va_bits; |
| 66 | u64 tcr; |
| 67 | int i; |
| 68 | |
| 69 | /* Find the largest address we need to support */ |
Alexander Graf | 6b3e7ca | 2016-03-04 01:09:48 +0100 | [diff] [blame] | 70 | for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) |
York Sun | c7104e5 | 2016-06-24 16:46:22 -0700 | [diff] [blame] | 71 | max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size); |
Alexander Graf | fb74cc1 | 2016-03-04 01:09:45 +0100 | [diff] [blame] | 72 | |
| 73 | /* Calculate the maximum physical (and thus virtual) address */ |
| 74 | if (max_addr > (1ULL << 44)) { |
| 75 | ips = 5; |
| 76 | va_bits = 48; |
| 77 | } else if (max_addr > (1ULL << 42)) { |
| 78 | ips = 4; |
| 79 | va_bits = 44; |
| 80 | } else if (max_addr > (1ULL << 40)) { |
| 81 | ips = 3; |
| 82 | va_bits = 42; |
| 83 | } else if (max_addr > (1ULL << 36)) { |
| 84 | ips = 2; |
| 85 | va_bits = 40; |
| 86 | } else if (max_addr > (1ULL << 32)) { |
| 87 | ips = 1; |
| 88 | va_bits = 36; |
| 89 | } else { |
| 90 | ips = 0; |
| 91 | va_bits = 32; |
| 92 | } |
| 93 | |
| 94 | if (el == 1) { |
Alexander Graf | f03c0e4 | 2016-03-04 01:09:46 +0100 | [diff] [blame] | 95 | tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE; |
Alexander Graf | fb74cc1 | 2016-03-04 01:09:45 +0100 | [diff] [blame] | 96 | } else if (el == 2) { |
| 97 | tcr = TCR_EL2_RSVD | (ips << 16); |
| 98 | } else { |
| 99 | tcr = TCR_EL3_RSVD | (ips << 16); |
| 100 | } |
| 101 | |
| 102 | /* PTWs cacheable, inner/outer WBWA and inner shareable */ |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 103 | tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA; |
| 104 | tcr |= TCR_T0SZ(va_bits); |
Alexander Graf | fb74cc1 | 2016-03-04 01:09:45 +0100 | [diff] [blame] | 105 | |
| 106 | if (pips) |
| 107 | *pips = ips; |
| 108 | if (pva_bits) |
| 109 | *pva_bits = va_bits; |
| 110 | |
| 111 | return tcr; |
| 112 | } |
| 113 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 114 | #define MAX_PTE_ENTRIES 512 |
| 115 | |
| 116 | static int pte_type(u64 *pte) |
| 117 | { |
| 118 | return *pte & PTE_TYPE_MASK; |
| 119 | } |
| 120 | |
| 121 | /* Returns the LSB number for a PTE on level <level> */ |
| 122 | static int level2shift(int level) |
| 123 | { |
| 124 | /* Page is 12 bits wide, every level translates 9 bits */ |
| 125 | return (12 + 9 * (3 - level)); |
| 126 | } |
| 127 | |
| 128 | static u64 *find_pte(u64 addr, int level) |
| 129 | { |
| 130 | int start_level = 0; |
| 131 | u64 *pte; |
| 132 | u64 idx; |
| 133 | u64 va_bits; |
| 134 | int i; |
| 135 | |
| 136 | debug("addr=%llx level=%d\n", addr, level); |
| 137 | |
Andre Przywara | 630a794 | 2022-06-14 00:11:10 +0100 | [diff] [blame] | 138 | get_tcr(NULL, &va_bits); |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 139 | if (va_bits < 39) |
| 140 | start_level = 1; |
| 141 | |
| 142 | if (level < start_level) |
| 143 | return NULL; |
| 144 | |
| 145 | /* Walk through all page table levels to find our PTE */ |
| 146 | pte = (u64*)gd->arch.tlb_addr; |
| 147 | for (i = start_level; i < 4; i++) { |
| 148 | idx = (addr >> level2shift(i)) & 0x1FF; |
| 149 | pte += idx; |
| 150 | debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte); |
| 151 | |
| 152 | /* Found it */ |
| 153 | if (i == level) |
| 154 | return pte; |
| 155 | /* PTE is no table (either invalid or block), can't traverse */ |
| 156 | if (pte_type(pte) != PTE_TYPE_TABLE) |
| 157 | return NULL; |
| 158 | /* Off to the next level */ |
| 159 | pte = (u64*)(*pte & 0x0000fffffffff000ULL); |
| 160 | } |
| 161 | |
| 162 | /* Should never reach here */ |
| 163 | return NULL; |
| 164 | } |
| 165 | |
Marc Zyngier | b67855c | 2023-02-09 04:54:27 +0800 | [diff] [blame] | 166 | #ifdef CONFIG_CMO_BY_VA_ONLY |
| 167 | static void __cmo_on_leaves(void (*cmo_fn)(unsigned long, unsigned long), |
| 168 | u64 pte, int level, u64 base) |
| 169 | { |
| 170 | u64 *ptep; |
| 171 | int i; |
| 172 | |
| 173 | ptep = (u64 *)(pte & GENMASK_ULL(47, PAGE_SHIFT)); |
| 174 | for (i = 0; i < PAGE_SIZE / sizeof(u64); i++) { |
| 175 | u64 end, va = base + i * BIT(level2shift(level)); |
| 176 | u64 type, attrs; |
| 177 | |
| 178 | pte = ptep[i]; |
| 179 | type = pte & PTE_TYPE_MASK; |
| 180 | attrs = pte & PMD_ATTRINDX_MASK; |
| 181 | debug("PTE %llx at level %d VA %llx\n", pte, level, va); |
| 182 | |
| 183 | /* Not valid? next! */ |
| 184 | if (!(type & PTE_TYPE_VALID)) |
| 185 | continue; |
| 186 | |
| 187 | /* Not a leaf? Recurse on the next level */ |
| 188 | if (!(type == PTE_TYPE_BLOCK || |
| 189 | (level == 3 && type == PTE_TYPE_PAGE))) { |
| 190 | __cmo_on_leaves(cmo_fn, pte, level + 1, va); |
| 191 | continue; |
| 192 | } |
| 193 | |
| 194 | /* |
| 195 | * From this point, this must be a leaf. |
| 196 | * |
| 197 | * Start excluding non memory mappings |
| 198 | */ |
| 199 | if (attrs != PTE_BLOCK_MEMTYPE(MT_NORMAL) && |
| 200 | attrs != PTE_BLOCK_MEMTYPE(MT_NORMAL_NC)) |
| 201 | continue; |
| 202 | |
| 203 | end = va + BIT(level2shift(level)) - 1; |
| 204 | |
| 205 | /* No intersection with RAM? */ |
| 206 | if (end < gd->ram_base || |
| 207 | va >= (gd->ram_base + gd->ram_size)) |
| 208 | continue; |
| 209 | |
| 210 | /* |
| 211 | * OK, we have a partial RAM mapping. However, this |
| 212 | * can cover *more* than the RAM. Yes, u-boot is |
| 213 | * *that* braindead. Compute the intersection we care |
| 214 | * about, and not a byte more. |
| 215 | */ |
| 216 | va = max(va, (u64)gd->ram_base); |
| 217 | end = min(end, gd->ram_base + gd->ram_size); |
| 218 | |
| 219 | debug("Flush PTE %llx at level %d: %llx-%llx\n", |
| 220 | pte, level, va, end); |
| 221 | cmo_fn(va, end); |
| 222 | } |
| 223 | } |
| 224 | |
| 225 | static void apply_cmo_to_mappings(void (*cmo_fn)(unsigned long, unsigned long)) |
| 226 | { |
| 227 | u64 va_bits; |
| 228 | int sl = 0; |
| 229 | |
| 230 | if (!gd->arch.tlb_addr) |
| 231 | return; |
| 232 | |
| 233 | get_tcr(NULL, &va_bits); |
| 234 | if (va_bits < 39) |
| 235 | sl = 1; |
| 236 | |
| 237 | __cmo_on_leaves(cmo_fn, gd->arch.tlb_addr, sl, 0); |
| 238 | } |
| 239 | #else |
| 240 | static inline void apply_cmo_to_mappings(void *dummy) {} |
| 241 | #endif |
| 242 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 243 | /* Returns and creates a new full table (512 entries) */ |
| 244 | static u64 *create_table(void) |
| 245 | { |
| 246 | u64 *new_table = (u64*)gd->arch.tlb_fillptr; |
| 247 | u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64); |
| 248 | |
| 249 | /* Allocate MAX_PTE_ENTRIES pte entries */ |
| 250 | gd->arch.tlb_fillptr += pt_len; |
| 251 | |
| 252 | if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size) |
| 253 | panic("Insufficient RAM for page table: 0x%lx > 0x%lx. " |
| 254 | "Please increase the size in get_page_table_size()", |
| 255 | gd->arch.tlb_fillptr - gd->arch.tlb_addr, |
| 256 | gd->arch.tlb_size); |
| 257 | |
| 258 | /* Mark all entries as invalid */ |
| 259 | memset(new_table, 0, pt_len); |
| 260 | |
| 261 | return new_table; |
| 262 | } |
| 263 | |
| 264 | static void set_pte_table(u64 *pte, u64 *table) |
| 265 | { |
| 266 | /* Point *pte to the new table */ |
| 267 | debug("Setting %p to addr=%p\n", pte, table); |
| 268 | *pte = PTE_TYPE_TABLE | (ulong)table; |
| 269 | } |
| 270 | |
York Sun | f44afe7 | 2016-06-24 16:46:21 -0700 | [diff] [blame] | 271 | /* Splits a block PTE into table with subpages spanning the old block */ |
| 272 | static void split_block(u64 *pte, int level) |
| 273 | { |
| 274 | u64 old_pte = *pte; |
| 275 | u64 *new_table; |
| 276 | u64 i = 0; |
| 277 | /* level describes the parent level, we need the child ones */ |
| 278 | int levelshift = level2shift(level + 1); |
| 279 | |
| 280 | if (pte_type(pte) != PTE_TYPE_BLOCK) |
| 281 | panic("PTE %p (%llx) is not a block. Some driver code wants to " |
| 282 | "modify dcache settings for an range not covered in " |
| 283 | "mem_map.", pte, old_pte); |
| 284 | |
| 285 | new_table = create_table(); |
| 286 | debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table); |
| 287 | |
| 288 | for (i = 0; i < MAX_PTE_ENTRIES; i++) { |
| 289 | new_table[i] = old_pte | (i << levelshift); |
| 290 | |
| 291 | /* Level 3 block PTEs have the table type */ |
| 292 | if ((level + 1) == 3) |
| 293 | new_table[i] |= PTE_TYPE_TABLE; |
| 294 | |
| 295 | debug("Setting new_table[%lld] = %llx\n", i, new_table[i]); |
| 296 | } |
| 297 | |
| 298 | /* Set the new table into effect */ |
| 299 | set_pte_table(pte, new_table); |
| 300 | } |
| 301 | |
Marc Zyngier | feb0ec2 | 2023-02-14 21:38:13 +0800 | [diff] [blame] | 302 | static void map_range(u64 virt, u64 phys, u64 size, int level, |
| 303 | u64 *table, u64 attrs) |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 304 | { |
Marc Zyngier | feb0ec2 | 2023-02-14 21:38:13 +0800 | [diff] [blame] | 305 | u64 map_size = BIT_ULL(level2shift(level)); |
| 306 | int i, idx; |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 307 | |
Marc Zyngier | feb0ec2 | 2023-02-14 21:38:13 +0800 | [diff] [blame] | 308 | idx = (virt >> level2shift(level)) & (MAX_PTE_ENTRIES - 1); |
| 309 | for (i = idx; size; i++) { |
| 310 | u64 next_size, *next_table; |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 311 | |
Chris Packham | 978814f | 2023-10-27 13:23:53 +1300 | [diff] [blame] | 312 | if (level >= 1 && |
Marc Zyngier | feb0ec2 | 2023-02-14 21:38:13 +0800 | [diff] [blame] | 313 | size >= map_size && !(virt & (map_size - 1))) { |
| 314 | if (level == 3) |
| 315 | table[i] = phys | attrs | PTE_TYPE_PAGE; |
| 316 | else |
| 317 | table[i] = phys | attrs; |
York Sun | c7104e5 | 2016-06-24 16:46:22 -0700 | [diff] [blame] | 318 | |
Marc Zyngier | feb0ec2 | 2023-02-14 21:38:13 +0800 | [diff] [blame] | 319 | virt += map_size; |
| 320 | phys += map_size; |
| 321 | size -= map_size; |
| 322 | |
| 323 | continue; |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 324 | } |
Marc Zyngier | feb0ec2 | 2023-02-14 21:38:13 +0800 | [diff] [blame] | 325 | |
| 326 | /* Going one level down */ |
| 327 | if (pte_type(&table[i]) == PTE_TYPE_FAULT) |
| 328 | set_pte_table(&table[i], create_table()); |
Pierre-Clément Tosi | d8ceb20 | 2024-03-18 19:35:49 +0000 | [diff] [blame] | 329 | else if (pte_type(&table[i]) != PTE_TYPE_TABLE) |
| 330 | split_block(&table[i], level); |
Marc Zyngier | feb0ec2 | 2023-02-14 21:38:13 +0800 | [diff] [blame] | 331 | |
| 332 | next_table = (u64 *)(table[i] & GENMASK_ULL(47, PAGE_SHIFT)); |
| 333 | next_size = min(map_size - (virt & (map_size - 1)), size); |
| 334 | |
| 335 | map_range(virt, phys, next_size, level + 1, next_table, attrs); |
| 336 | |
| 337 | virt += next_size; |
| 338 | phys += next_size; |
| 339 | size -= next_size; |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 340 | } |
| 341 | } |
| 342 | |
Caleb Connolly | 27b05b5 | 2024-08-09 01:59:31 +0200 | [diff] [blame] | 343 | void mmu_map_region(phys_addr_t addr, u64 size, bool emergency) |
| 344 | { |
| 345 | u64 va_bits; |
| 346 | int level = 0; |
| 347 | u64 attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_INNER_SHARE; |
| 348 | |
| 349 | attrs |= PTE_TYPE_BLOCK | PTE_BLOCK_AF; |
| 350 | |
| 351 | get_tcr(NULL, &va_bits); |
| 352 | if (va_bits < 39) |
| 353 | level = 1; |
| 354 | |
| 355 | if (emergency) |
| 356 | map_range(addr, addr, size, level, |
| 357 | (u64 *)gd->arch.tlb_emerg, attrs); |
| 358 | |
| 359 | /* Switch pagetables while we update the primary one */ |
| 360 | __asm_switch_ttbr(gd->arch.tlb_emerg); |
| 361 | |
| 362 | map_range(addr, addr, size, level, |
| 363 | (u64 *)gd->arch.tlb_addr, attrs); |
| 364 | |
| 365 | __asm_switch_ttbr(gd->arch.tlb_addr); |
| 366 | } |
| 367 | |
Marc Zyngier | feb0ec2 | 2023-02-14 21:38:13 +0800 | [diff] [blame] | 368 | static void add_map(struct mm_region *map) |
| 369 | { |
| 370 | u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF; |
| 371 | u64 va_bits; |
| 372 | int level = 0; |
| 373 | |
| 374 | get_tcr(NULL, &va_bits); |
| 375 | if (va_bits < 39) |
| 376 | level = 1; |
| 377 | |
| 378 | map_range(map->virt, map->phys, map->size, level, |
| 379 | (u64 *)gd->arch.tlb_addr, attrs); |
| 380 | } |
| 381 | |
Marc Zyngier | 6da328e | 2023-02-14 21:38:14 +0800 | [diff] [blame] | 382 | static void count_range(u64 virt, u64 size, int level, int *cntp) |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 383 | { |
Marc Zyngier | 6da328e | 2023-02-14 21:38:14 +0800 | [diff] [blame] | 384 | u64 map_size = BIT_ULL(level2shift(level)); |
| 385 | int i, idx; |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 386 | |
Marc Zyngier | 6da328e | 2023-02-14 21:38:14 +0800 | [diff] [blame] | 387 | idx = (virt >> level2shift(level)) & (MAX_PTE_ENTRIES - 1); |
| 388 | for (i = idx; size; i++) { |
| 389 | u64 next_size; |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 390 | |
Chris Packham | 978814f | 2023-10-27 13:23:53 +1300 | [diff] [blame] | 391 | if (level >= 1 && |
Marc Zyngier | 6da328e | 2023-02-14 21:38:14 +0800 | [diff] [blame] | 392 | size >= map_size && !(virt & (map_size - 1))) { |
| 393 | virt += map_size; |
| 394 | size -= map_size; |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 395 | |
Marc Zyngier | 6da328e | 2023-02-14 21:38:14 +0800 | [diff] [blame] | 396 | continue; |
| 397 | } |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 398 | |
Marc Zyngier | 6da328e | 2023-02-14 21:38:14 +0800 | [diff] [blame] | 399 | /* Going one level down */ |
| 400 | (*cntp)++; |
| 401 | next_size = min(map_size - (virt & (map_size - 1)), size); |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 402 | |
Marc Zyngier | 6da328e | 2023-02-14 21:38:14 +0800 | [diff] [blame] | 403 | count_range(virt, next_size, level + 1, cntp); |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 404 | |
Marc Zyngier | 6da328e | 2023-02-14 21:38:14 +0800 | [diff] [blame] | 405 | virt += next_size; |
| 406 | size -= next_size; |
| 407 | } |
| 408 | } |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 409 | |
Marc Zyngier | 6da328e | 2023-02-14 21:38:14 +0800 | [diff] [blame] | 410 | static int count_ranges(void) |
| 411 | { |
| 412 | int i, count = 0, level = 0; |
| 413 | u64 va_bits; |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 414 | |
Marc Zyngier | 6da328e | 2023-02-14 21:38:14 +0800 | [diff] [blame] | 415 | get_tcr(NULL, &va_bits); |
| 416 | if (va_bits < 39) |
| 417 | level = 1; |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 418 | |
Marc Zyngier | 6da328e | 2023-02-14 21:38:14 +0800 | [diff] [blame] | 419 | for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) |
| 420 | count_range(mem_map[i].virt, mem_map[i].size, level, &count); |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 421 | |
Marc Zyngier | 6da328e | 2023-02-14 21:38:14 +0800 | [diff] [blame] | 422 | return count; |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 423 | } |
| 424 | |
Ilias Apalodimas | b3c5d25 | 2025-02-20 15:54:38 +0200 | [diff] [blame] | 425 | #define ALL_ATTRS (3 << 8 | PMD_ATTRMASK) |
Caleb Connolly | 566907c | 2024-06-17 10:03:48 +0200 | [diff] [blame] | 426 | #define PTE_IS_TABLE(pte, level) (pte_type(&(pte)) == PTE_TYPE_TABLE && (level) < 3) |
| 427 | |
| 428 | enum walker_state { |
| 429 | WALKER_STATE_START = 0, |
| 430 | WALKER_STATE_TABLE, |
| 431 | WALKER_STATE_REGION, /* block or page, depending on level */ |
| 432 | }; |
| 433 | |
| 434 | |
| 435 | /** |
| 436 | * __pagetable_walk() - Walk through the pagetable and call cb() for each memory region |
| 437 | * |
| 438 | * This is a software implementation of the ARMv8-A MMU translation table walk. As per |
| 439 | * section D5.4 of the ARMv8-A Architecture Reference Manual. It recursively walks the |
| 440 | * 4 or 3 levels of the page table and calls the callback function for each discrete |
| 441 | * region of memory (that being the discovery of a new table, a collection of blocks |
| 442 | * with the same attributes, or of pages with the same attributes). |
| 443 | * |
| 444 | * U-Boot picks the smallest number of virtual address (VA) bits that it can based on the |
| 445 | * memory map configured by the board. If this is less than 39 then the MMU will only use |
| 446 | * 3 levels of translation instead of 3 - skipping level 0. |
| 447 | * |
| 448 | * Each level has 512 entries of 64-bits each. Each entry includes attribute bits and |
| 449 | * an address. When the attribute bits indicate a table, the address is the physical |
| 450 | * address of the table, so we can recursively call _pagetable_walk() on it (after calling |
| 451 | * @cb). If instead they indicate a block or page, we record the start address and attributes |
| 452 | * and continue walking until we find a region with different attributes, or the end of the |
| 453 | * table, in either case we call @cb with the start and end address of the region. |
| 454 | * |
| 455 | * This approach can be used to fully emulate the MMU's translation table walk, as per |
| 456 | * Figure D5-25 of the ARMv8-A Architecture Reference Manual. |
| 457 | * |
| 458 | * @addr: The address of the table to walk |
| 459 | * @tcr: The TCR register value |
| 460 | * @level: The current level of the table |
| 461 | * @cb: The callback function to call for each region |
| 462 | * @priv: Private data to pass to the callback function |
| 463 | */ |
| 464 | static void __pagetable_walk(u64 addr, u64 tcr, int level, pte_walker_cb_t cb, void *priv) |
| 465 | { |
| 466 | u64 *table = (u64 *)addr; |
| 467 | u64 attrs, last_attrs = 0, last_addr = 0, entry_start = 0; |
| 468 | int i; |
| 469 | u64 va_bits = 64 - (tcr & (BIT(6) - 1)); |
| 470 | static enum walker_state state[4] = { 0 }; |
| 471 | static bool exit; |
| 472 | |
| 473 | if (!level) { |
| 474 | exit = false; |
| 475 | if (va_bits < 39) |
| 476 | level = 1; |
| 477 | } |
| 478 | |
| 479 | state[level] = WALKER_STATE_START; |
| 480 | |
| 481 | /* Walk through the table entries */ |
| 482 | for (i = 0; i < MAX_PTE_ENTRIES; i++) { |
| 483 | u64 pte = table[i]; |
| 484 | u64 _addr = pte & GENMASK_ULL(va_bits, PAGE_SHIFT); |
| 485 | |
| 486 | if (exit) |
| 487 | return; |
| 488 | |
| 489 | if (pte_type(&pte) == PTE_TYPE_FAULT) |
| 490 | continue; |
| 491 | |
| 492 | attrs = pte & ALL_ATTRS; |
| 493 | /* If we're currently inside a block or set of pages */ |
| 494 | if (state[level] > WALKER_STATE_START && state[level] != WALKER_STATE_TABLE) { |
| 495 | /* |
| 496 | * Continue walking if this entry has the same attributes as the last and |
| 497 | * is one page/block away -- it's a contiguous region. |
| 498 | */ |
| 499 | if (attrs == last_attrs && _addr == last_addr + (1 << level2shift(level))) { |
| 500 | last_attrs = attrs; |
| 501 | last_addr = _addr; |
| 502 | continue; |
| 503 | } else { |
| 504 | /* We either hit a table or a new region */ |
| 505 | exit = cb(entry_start, last_addr + (1 << level2shift(level)), |
| 506 | va_bits, level, priv); |
| 507 | if (exit) |
| 508 | return; |
| 509 | state[level] = WALKER_STATE_START; |
| 510 | } |
| 511 | } |
| 512 | last_attrs = attrs; |
| 513 | last_addr = _addr; |
| 514 | |
| 515 | if (PTE_IS_TABLE(pte, level)) { |
| 516 | /* After the end of the table might be corrupted data */ |
| 517 | if (!_addr || (pte & 0xfff) > 0x3ff) |
| 518 | return; |
| 519 | state[level] = WALKER_STATE_TABLE; |
| 520 | /* Signify the start of a table */ |
| 521 | exit = cb(pte, 0, va_bits, level, priv); |
| 522 | if (exit) |
| 523 | return; |
| 524 | |
| 525 | /* Go down a level */ |
| 526 | __pagetable_walk(_addr, tcr, level + 1, cb, priv); |
| 527 | state[level] = WALKER_STATE_START; |
| 528 | } else if (pte_type(&pte) == PTE_TYPE_BLOCK || pte_type(&pte) == PTE_TYPE_PAGE) { |
| 529 | /* We foud a block or page, start walking */ |
| 530 | entry_start = pte; |
| 531 | state[level] = WALKER_STATE_REGION; |
| 532 | } |
| 533 | } |
| 534 | |
| 535 | if (state[level] > WALKER_STATE_START) |
| 536 | exit = cb(entry_start, last_addr + (1 << level2shift(level)), va_bits, level, priv); |
| 537 | } |
| 538 | |
| 539 | static void pretty_print_pte_type(u64 pte) |
| 540 | { |
| 541 | switch (pte_type(&pte)) { |
| 542 | case PTE_TYPE_FAULT: |
| 543 | printf(" %-5s", "Fault"); |
| 544 | break; |
| 545 | case PTE_TYPE_BLOCK: |
| 546 | printf(" %-5s", "Block"); |
| 547 | break; |
| 548 | case PTE_TYPE_PAGE: |
| 549 | printf(" %-5s", "Pages"); |
| 550 | break; |
| 551 | default: |
| 552 | printf(" %-5s", "Unk"); |
| 553 | } |
| 554 | } |
| 555 | |
| 556 | static void pretty_print_table_attrs(u64 pte) |
| 557 | { |
| 558 | int ap = (pte & PTE_TABLE_AP) >> 61; |
| 559 | |
| 560 | printf(" | %2s %10s", |
| 561 | (ap & 2) ? "RO" : "", |
| 562 | (ap & 1) ? "!EL0" : ""); |
| 563 | printf(" | %3s %2s %2s", |
| 564 | (pte & PTE_TABLE_PXN) ? "PXN" : "", |
| 565 | (pte & PTE_TABLE_XN) ? "XN" : "", |
| 566 | (pte & PTE_TABLE_NS) ? "NS" : ""); |
| 567 | } |
| 568 | |
| 569 | static void pretty_print_block_attrs(u64 pte) |
| 570 | { |
| 571 | u64 attrs = pte & PMD_ATTRINDX_MASK; |
Ilias Apalodimas | b3c5d25 | 2025-02-20 15:54:38 +0200 | [diff] [blame] | 572 | u64 perm_attrs = pte & PMD_ATTRMASK; |
| 573 | char mem_attrs[16] = { 0 }; |
| 574 | int cnt = 0; |
| 575 | |
| 576 | if (perm_attrs & PTE_BLOCK_PXN) |
| 577 | cnt += snprintf(mem_attrs + cnt, sizeof(mem_attrs) - cnt, "PXN "); |
| 578 | if (perm_attrs & PTE_BLOCK_UXN) |
| 579 | cnt += snprintf(mem_attrs + cnt, sizeof(mem_attrs) - cnt, "UXN "); |
| 580 | if (perm_attrs & PTE_BLOCK_RO) |
| 581 | cnt += snprintf(mem_attrs + cnt, sizeof(mem_attrs) - cnt, "RO"); |
| 582 | if (!mem_attrs[0]) |
| 583 | snprintf(mem_attrs, sizeof(mem_attrs), "RWX "); |
| 584 | |
| 585 | printf(" | %-10s", mem_attrs); |
Caleb Connolly | 566907c | 2024-06-17 10:03:48 +0200 | [diff] [blame] | 586 | |
| 587 | switch (attrs) { |
| 588 | case PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE): |
| 589 | printf(" | %-13s", "Device-nGnRnE"); |
| 590 | break; |
| 591 | case PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRE): |
| 592 | printf(" | %-13s", "Device-nGnRE"); |
| 593 | break; |
| 594 | case PTE_BLOCK_MEMTYPE(MT_DEVICE_GRE): |
| 595 | printf(" | %-13s", "Device-GRE"); |
| 596 | break; |
| 597 | case PTE_BLOCK_MEMTYPE(MT_NORMAL_NC): |
| 598 | printf(" | %-13s", "Normal-NC"); |
| 599 | break; |
| 600 | case PTE_BLOCK_MEMTYPE(MT_NORMAL): |
| 601 | printf(" | %-13s", "Normal"); |
| 602 | break; |
| 603 | default: |
| 604 | printf(" | %-13s", "Unknown"); |
| 605 | } |
| 606 | } |
| 607 | |
| 608 | static void pretty_print_block_memtype(u64 pte) |
| 609 | { |
| 610 | u64 share = pte & (3 << 8); |
| 611 | |
| 612 | switch (share) { |
| 613 | case PTE_BLOCK_NON_SHARE: |
| 614 | printf(" | %-16s", "Non-shareable"); |
| 615 | break; |
| 616 | case PTE_BLOCK_OUTER_SHARE: |
| 617 | printf(" | %-16s", "Outer-shareable"); |
| 618 | break; |
| 619 | case PTE_BLOCK_INNER_SHARE: |
| 620 | printf(" | %-16s", "Inner-shareable"); |
| 621 | break; |
| 622 | default: |
| 623 | printf(" | %-16s", "Unknown"); |
| 624 | } |
| 625 | } |
| 626 | |
| 627 | static void print_pte(u64 pte, int level) |
| 628 | { |
| 629 | if (PTE_IS_TABLE(pte, level)) { |
| 630 | printf(" %-5s", "Table"); |
Ilias Apalodimas | b3c5d25 | 2025-02-20 15:54:38 +0200 | [diff] [blame] | 631 | printf(" %-12s", "|"); |
Caleb Connolly | 566907c | 2024-06-17 10:03:48 +0200 | [diff] [blame] | 632 | pretty_print_table_attrs(pte); |
| 633 | } else { |
| 634 | pretty_print_pte_type(pte); |
| 635 | pretty_print_block_attrs(pte); |
| 636 | pretty_print_block_memtype(pte); |
| 637 | } |
| 638 | printf("\n"); |
| 639 | } |
| 640 | |
| 641 | /** |
| 642 | * pagetable_print_entry() - Callback function to print a single pagetable region |
| 643 | * |
| 644 | * This is the default callback used by @dump_pagetable(). It does some basic pretty |
| 645 | * printing (see example in the U-Boot arm64 documentation). It can be replaced by |
| 646 | * a custom callback function if more detailed information is needed. |
| 647 | * |
| 648 | * @start_attrs: The start address and attributes of the region (or table address) |
| 649 | * @end: The end address of the region (or 0 if it's a table) |
| 650 | * @va_bits: The number of bits used for the virtual address |
| 651 | * @level: The level of the region |
| 652 | * @priv: Private data for the callback (unused) |
| 653 | */ |
| 654 | static bool pagetable_print_entry(u64 start_attrs, u64 end, int va_bits, int level, void *priv) |
| 655 | { |
| 656 | u64 _addr = start_attrs & GENMASK_ULL(va_bits, PAGE_SHIFT); |
| 657 | int indent = va_bits < 39 ? level - 1 : level; |
| 658 | |
| 659 | printf("%*s", indent * 2, ""); |
| 660 | if (PTE_IS_TABLE(start_attrs, level)) |
Ilias Apalodimas | b3c5d25 | 2025-02-20 15:54:38 +0200 | [diff] [blame] | 661 | printf("[%#016llx]%19s", _addr, ""); |
Caleb Connolly | 566907c | 2024-06-17 10:03:48 +0200 | [diff] [blame] | 662 | else |
Ilias Apalodimas | b3c5d25 | 2025-02-20 15:54:38 +0200 | [diff] [blame] | 663 | printf("[%#016llx - %#016llx]", _addr, end); |
Caleb Connolly | 566907c | 2024-06-17 10:03:48 +0200 | [diff] [blame] | 664 | |
| 665 | printf("%*s | ", (3 - level) * 2, ""); |
| 666 | print_pte(start_attrs, level); |
| 667 | |
| 668 | return false; |
| 669 | } |
| 670 | |
| 671 | void walk_pagetable(u64 ttbr, u64 tcr, pte_walker_cb_t cb, void *priv) |
| 672 | { |
| 673 | __pagetable_walk(ttbr, tcr, 0, cb, priv); |
| 674 | } |
| 675 | |
| 676 | void dump_pagetable(u64 ttbr, u64 tcr) |
| 677 | { |
| 678 | u64 va_bits = 64 - (tcr & (BIT(6) - 1)); |
| 679 | |
| 680 | printf("Walking pagetable at %p, va_bits: %lld. Using %d levels\n", (void *)ttbr, |
| 681 | va_bits, va_bits < 39 ? 3 : 4); |
| 682 | walk_pagetable(ttbr, tcr, pagetable_print_entry, NULL); |
| 683 | } |
| 684 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 685 | /* Returns the estimated required size of all page tables */ |
Alexander Graf | bc78b92 | 2016-03-21 20:26:12 +0100 | [diff] [blame] | 686 | __weak u64 get_page_table_size(void) |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 687 | { |
| 688 | u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64); |
Chris Packham | a6c68c6 | 2023-10-27 13:23:54 +1300 | [diff] [blame] | 689 | u64 size; |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 690 | |
| 691 | /* Account for all page tables we would need to cover our memory map */ |
Marc Zyngier | 6da328e | 2023-02-14 21:38:14 +0800 | [diff] [blame] | 692 | size = one_pt * count_ranges(); |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 693 | |
| 694 | /* |
| 695 | * We need to duplicate our page table once to have an emergency pt to |
| 696 | * resort to when splitting page tables later on |
| 697 | */ |
| 698 | size *= 2; |
| 699 | |
| 700 | /* |
| 701 | * We may need to split page tables later on if dcache settings change, |
| 702 | * so reserve up to 4 (random pick) page tables for that. |
| 703 | */ |
| 704 | size += one_pt * 4; |
| 705 | |
| 706 | return size; |
| 707 | } |
| 708 | |
York Sun | a81fcd1 | 2016-06-24 16:46:20 -0700 | [diff] [blame] | 709 | void setup_pgtables(void) |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 710 | { |
| 711 | int i; |
| 712 | |
York Sun | a81fcd1 | 2016-06-24 16:46:20 -0700 | [diff] [blame] | 713 | if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr) |
| 714 | panic("Page table pointer not setup."); |
| 715 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 716 | /* |
| 717 | * Allocate the first level we're on with invalidate entries. |
| 718 | * If the starting level is 0 (va_bits >= 39), then this is our |
| 719 | * Lv0 page table, otherwise it's the entry Lv1 page table. |
| 720 | */ |
| 721 | create_table(); |
| 722 | |
| 723 | /* Now add all MMU table entries one after another to the table */ |
Alexander Graf | 6b3e7ca | 2016-03-04 01:09:48 +0100 | [diff] [blame] | 724 | for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 725 | add_map(&mem_map[i]); |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 726 | } |
| 727 | |
| 728 | static void setup_all_pgtables(void) |
| 729 | { |
| 730 | u64 tlb_addr = gd->arch.tlb_addr; |
Alexander Graf | fa3754e | 2016-07-30 23:13:03 +0200 | [diff] [blame] | 731 | u64 tlb_size = gd->arch.tlb_size; |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 732 | |
| 733 | /* Reset the fill ptr */ |
| 734 | gd->arch.tlb_fillptr = tlb_addr; |
| 735 | |
| 736 | /* Create normal system page tables */ |
| 737 | setup_pgtables(); |
| 738 | |
| 739 | /* Create emergency page tables */ |
Alexander Graf | fa3754e | 2016-07-30 23:13:03 +0200 | [diff] [blame] | 740 | gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr - |
| 741 | (uintptr_t)gd->arch.tlb_addr; |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 742 | gd->arch.tlb_addr = gd->arch.tlb_fillptr; |
| 743 | setup_pgtables(); |
| 744 | gd->arch.tlb_emerg = gd->arch.tlb_addr; |
| 745 | gd->arch.tlb_addr = tlb_addr; |
Alexander Graf | fa3754e | 2016-07-30 23:13:03 +0200 | [diff] [blame] | 746 | gd->arch.tlb_size = tlb_size; |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 747 | } |
| 748 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 749 | /* to activate the MMU we need to set up virtual memory */ |
Stephen Warren | 7333c6a | 2015-10-05 12:09:00 -0600 | [diff] [blame] | 750 | __weak void mmu_setup(void) |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 751 | { |
Thierry Reding | 59c364d | 2015-07-22 17:10:11 -0600 | [diff] [blame] | 752 | int el; |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 753 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 754 | /* Set up page tables only once */ |
| 755 | if (!gd->arch.tlb_fillptr) |
| 756 | setup_all_pgtables(); |
Alexander Graf | fb74cc1 | 2016-03-04 01:09:45 +0100 | [diff] [blame] | 757 | |
| 758 | el = current_el(); |
Andre Przywara | 630a794 | 2022-06-14 00:11:10 +0100 | [diff] [blame] | 759 | set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(NULL, NULL), |
Alexander Graf | fb74cc1 | 2016-03-04 01:09:45 +0100 | [diff] [blame] | 760 | MEMORY_ATTRIBUTES); |
Alexander Graf | fb74cc1 | 2016-03-04 01:09:45 +0100 | [diff] [blame] | 761 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 762 | /* enable the mmu */ |
| 763 | set_sctlr(get_sctlr() | CR_M); |
| 764 | } |
| 765 | |
| 766 | /* |
| 767 | * Performs a invalidation of the entire data cache at all levels |
| 768 | */ |
| 769 | void invalidate_dcache_all(void) |
| 770 | { |
Marc Zyngier | b67855c | 2023-02-09 04:54:27 +0800 | [diff] [blame] | 771 | #ifndef CONFIG_CMO_BY_VA_ONLY |
York Sun | ef04201 | 2014-02-26 13:26:04 -0800 | [diff] [blame] | 772 | __asm_invalidate_dcache_all(); |
Stephen Warren | ddb0f63 | 2016-10-19 15:18:46 -0600 | [diff] [blame] | 773 | __asm_invalidate_l3_dcache(); |
Marc Zyngier | b67855c | 2023-02-09 04:54:27 +0800 | [diff] [blame] | 774 | #else |
| 775 | apply_cmo_to_mappings(invalidate_dcache_range); |
| 776 | #endif |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 777 | } |
| 778 | |
| 779 | /* |
York Sun | 1ce575f | 2015-01-06 13:18:42 -0800 | [diff] [blame] | 780 | * Performs a clean & invalidation of the entire data cache at all levels. |
| 781 | * This function needs to be inline to avoid using stack. |
Stephen Warren | ddb0f63 | 2016-10-19 15:18:46 -0600 | [diff] [blame] | 782 | * __asm_flush_l3_dcache return status of timeout |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 783 | */ |
York Sun | 1ce575f | 2015-01-06 13:18:42 -0800 | [diff] [blame] | 784 | inline void flush_dcache_all(void) |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 785 | { |
Marc Zyngier | b67855c | 2023-02-09 04:54:27 +0800 | [diff] [blame] | 786 | #ifndef CONFIG_CMO_BY_VA_ONLY |
York Sun | 1ce575f | 2015-01-06 13:18:42 -0800 | [diff] [blame] | 787 | int ret; |
| 788 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 789 | __asm_flush_dcache_all(); |
Stephen Warren | ddb0f63 | 2016-10-19 15:18:46 -0600 | [diff] [blame] | 790 | ret = __asm_flush_l3_dcache(); |
York Sun | 1ce575f | 2015-01-06 13:18:42 -0800 | [diff] [blame] | 791 | if (ret) |
| 792 | debug("flushing dcache returns 0x%x\n", ret); |
| 793 | else |
| 794 | debug("flushing dcache successfully.\n"); |
Marc Zyngier | b67855c | 2023-02-09 04:54:27 +0800 | [diff] [blame] | 795 | #else |
| 796 | apply_cmo_to_mappings(flush_dcache_range); |
| 797 | #endif |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 798 | } |
| 799 | |
Vignesh Raghavendra | 384c141 | 2019-04-22 21:43:32 +0530 | [diff] [blame] | 800 | #ifndef CONFIG_SYS_DISABLE_DCACHE_OPS |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 801 | /* |
| 802 | * Invalidates range in all levels of D-cache/unified cache |
| 803 | */ |
| 804 | void invalidate_dcache_range(unsigned long start, unsigned long stop) |
| 805 | { |
Simon Glass | 4415c3b | 2017-04-05 17:53:18 -0600 | [diff] [blame] | 806 | __asm_invalidate_dcache_range(start, stop); |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 807 | } |
| 808 | |
| 809 | /* |
| 810 | * Flush range(clean & invalidate) from all levels of D-cache/unified cache |
| 811 | */ |
| 812 | void flush_dcache_range(unsigned long start, unsigned long stop) |
| 813 | { |
| 814 | __asm_flush_dcache_range(start, stop); |
| 815 | } |
Vignesh Raghavendra | 384c141 | 2019-04-22 21:43:32 +0530 | [diff] [blame] | 816 | #else |
| 817 | void invalidate_dcache_range(unsigned long start, unsigned long stop) |
| 818 | { |
| 819 | } |
| 820 | |
| 821 | void flush_dcache_range(unsigned long start, unsigned long stop) |
| 822 | { |
| 823 | } |
| 824 | #endif /* CONFIG_SYS_DISABLE_DCACHE_OPS */ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 825 | |
| 826 | void dcache_enable(void) |
| 827 | { |
| 828 | /* The data cache is not active unless the mmu is enabled */ |
| 829 | if (!(get_sctlr() & CR_M)) { |
| 830 | invalidate_dcache_all(); |
| 831 | __asm_invalidate_tlb_all(); |
| 832 | mmu_setup(); |
| 833 | } |
| 834 | |
Pali Rohár | fbddaee | 2022-09-14 13:37:46 +0200 | [diff] [blame] | 835 | /* Set up page tables only once (it is done also by mmu_setup()) */ |
| 836 | if (!gd->arch.tlb_fillptr) |
| 837 | setup_all_pgtables(); |
| 838 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 839 | set_sctlr(get_sctlr() | CR_C); |
| 840 | } |
| 841 | |
| 842 | void dcache_disable(void) |
| 843 | { |
Sam Protsenko | e60b24e | 2024-11-06 20:58:30 -0600 | [diff] [blame] | 844 | unsigned long sctlr; |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 845 | |
| 846 | sctlr = get_sctlr(); |
| 847 | |
| 848 | /* if cache isn't enabled no need to disable */ |
| 849 | if (!(sctlr & CR_C)) |
| 850 | return; |
| 851 | |
Marc Zyngier | b67855c | 2023-02-09 04:54:27 +0800 | [diff] [blame] | 852 | if (IS_ENABLED(CONFIG_CMO_BY_VA_ONLY)) { |
| 853 | /* |
| 854 | * When invalidating by VA, do it *before* turning the MMU |
| 855 | * off, so that at least our stack is coherent. |
| 856 | */ |
| 857 | flush_dcache_all(); |
| 858 | } |
| 859 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 860 | set_sctlr(sctlr & ~(CR_C|CR_M)); |
| 861 | |
Marc Zyngier | b67855c | 2023-02-09 04:54:27 +0800 | [diff] [blame] | 862 | if (!IS_ENABLED(CONFIG_CMO_BY_VA_ONLY)) |
| 863 | flush_dcache_all(); |
| 864 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 865 | __asm_invalidate_tlb_all(); |
| 866 | } |
| 867 | |
| 868 | int dcache_status(void) |
| 869 | { |
| 870 | return (get_sctlr() & CR_C) != 0; |
| 871 | } |
| 872 | |
Siva Durga Prasad Paladugu | ba2432a | 2015-06-26 18:05:07 +0530 | [diff] [blame] | 873 | u64 *__weak arch_get_page_table(void) { |
| 874 | puts("No page table offset defined\n"); |
| 875 | |
| 876 | return NULL; |
| 877 | } |
| 878 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 879 | static bool is_aligned(u64 addr, u64 size, u64 align) |
| 880 | { |
| 881 | return !(addr & (align - 1)) && !(size & (align - 1)); |
| 882 | } |
| 883 | |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 884 | /* Use flag to indicate if attrs has more than d-cache attributes */ |
| 885 | static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level) |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 886 | { |
| 887 | int levelshift = level2shift(level); |
| 888 | u64 levelsize = 1ULL << levelshift; |
| 889 | u64 *pte = find_pte(start, level); |
| 890 | |
| 891 | /* Can we can just modify the current level block PTE? */ |
| 892 | if (is_aligned(start, size, levelsize)) { |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 893 | if (flag) { |
| 894 | *pte &= ~PMD_ATTRMASK; |
| 895 | *pte |= attrs & PMD_ATTRMASK; |
| 896 | } else { |
| 897 | *pte &= ~PMD_ATTRINDX_MASK; |
| 898 | *pte |= attrs & PMD_ATTRINDX_MASK; |
| 899 | } |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 900 | debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level); |
| 901 | |
| 902 | return levelsize; |
| 903 | } |
| 904 | |
| 905 | /* Unaligned or doesn't fit, maybe split block into table */ |
| 906 | debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte); |
| 907 | |
| 908 | /* Maybe we need to split the block into a table */ |
| 909 | if (pte_type(pte) == PTE_TYPE_BLOCK) |
| 910 | split_block(pte, level); |
| 911 | |
| 912 | /* And then double-check it became a table or already is one */ |
| 913 | if (pte_type(pte) != PTE_TYPE_TABLE) |
| 914 | panic("PTE %p (%llx) for addr=%llx should be a table", |
| 915 | pte, *pte, start); |
| 916 | |
| 917 | /* Roll on to the next page table level */ |
| 918 | return 0; |
| 919 | } |
| 920 | |
| 921 | void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, |
| 922 | enum dcache_option option) |
| 923 | { |
Peng Fan | 41bad3e | 2020-05-11 16:41:07 +0800 | [diff] [blame] | 924 | u64 attrs = PMD_ATTRINDX(option >> 2); |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 925 | u64 real_start = start; |
| 926 | u64 real_size = size; |
| 927 | |
| 928 | debug("start=%lx size=%lx\n", (ulong)start, (ulong)size); |
| 929 | |
York Sun | a81fcd1 | 2016-06-24 16:46:20 -0700 | [diff] [blame] | 930 | if (!gd->arch.tlb_emerg) |
| 931 | panic("Emergency page table not setup."); |
| 932 | |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 933 | /* |
| 934 | * We can not modify page tables that we're currently running on, |
| 935 | * so we first need to switch to the "emergency" page tables where |
| 936 | * we can safely modify our primary page tables and then switch back |
| 937 | */ |
| 938 | __asm_switch_ttbr(gd->arch.tlb_emerg); |
| 939 | |
| 940 | /* |
| 941 | * Loop through the address range until we find a page granule that fits |
| 942 | * our alignment constraints, then set it to the new cache attributes |
| 943 | */ |
| 944 | while (size > 0) { |
| 945 | int level; |
| 946 | u64 r; |
| 947 | |
| 948 | for (level = 1; level < 4; level++) { |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 949 | /* Set d-cache attributes only */ |
| 950 | r = set_one_region(start, size, attrs, false, level); |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 951 | if (r) { |
| 952 | /* PTE successfully replaced */ |
| 953 | size -= r; |
| 954 | start += r; |
| 955 | break; |
| 956 | } |
| 957 | } |
| 958 | |
| 959 | } |
| 960 | |
| 961 | /* We're done modifying page tables, switch back to our primary ones */ |
| 962 | __asm_switch_ttbr(gd->arch.tlb_addr); |
| 963 | |
| 964 | /* |
| 965 | * Make sure there's nothing stale in dcache for a region that might |
| 966 | * have caches off now |
| 967 | */ |
| 968 | flush_dcache_range(real_start, real_start + real_size); |
| 969 | } |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 970 | |
Ilias Apalodimas | a6db58f | 2025-02-20 15:54:41 +0200 | [diff] [blame] | 971 | void mmu_change_region_attr_nobreak(phys_addr_t addr, size_t siz, u64 attrs) |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 972 | { |
| 973 | int level; |
| 974 | u64 r, size, start; |
| 975 | |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 976 | /* |
| 977 | * Loop through the address range until we find a page granule that fits |
Ilias Apalodimas | a6db58f | 2025-02-20 15:54:41 +0200 | [diff] [blame] | 978 | * our alignment constraints and set the new permissions |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 979 | */ |
Ilias Apalodimas | a6db58f | 2025-02-20 15:54:41 +0200 | [diff] [blame] | 980 | start = addr; |
| 981 | size = siz; |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 982 | while (size > 0) { |
| 983 | for (level = 1; level < 4; level++) { |
Ilias Apalodimas | a6db58f | 2025-02-20 15:54:41 +0200 | [diff] [blame] | 984 | /* Set PTE to new attributes */ |
| 985 | r = set_one_region(start, size, attrs, true, level); |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 986 | if (r) { |
Ilias Apalodimas | a6db58f | 2025-02-20 15:54:41 +0200 | [diff] [blame] | 987 | /* PTE successfully updated */ |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 988 | size -= r; |
| 989 | start += r; |
| 990 | break; |
| 991 | } |
| 992 | } |
| 993 | } |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 994 | flush_dcache_range(gd->arch.tlb_addr, |
| 995 | gd->arch.tlb_addr + gd->arch.tlb_size); |
| 996 | __asm_invalidate_tlb_all(); |
Ilias Apalodimas | a6db58f | 2025-02-20 15:54:41 +0200 | [diff] [blame] | 997 | } |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 998 | |
Ilias Apalodimas | a6db58f | 2025-02-20 15:54:41 +0200 | [diff] [blame] | 999 | /* |
| 1000 | * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits. |
| 1001 | * The procecess is break-before-make. The target region will be marked as |
| 1002 | * invalid during the process of changing. |
| 1003 | */ |
| 1004 | void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs) |
| 1005 | { |
| 1006 | int level; |
| 1007 | u64 r, size, start; |
| 1008 | |
| 1009 | start = addr; |
| 1010 | size = siz; |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 1011 | /* |
| 1012 | * Loop through the address range until we find a page granule that fits |
Ilias Apalodimas | a6db58f | 2025-02-20 15:54:41 +0200 | [diff] [blame] | 1013 | * our alignment constraints, then set it to "invalid". |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 1014 | */ |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 1015 | while (size > 0) { |
| 1016 | for (level = 1; level < 4; level++) { |
Ilias Apalodimas | a6db58f | 2025-02-20 15:54:41 +0200 | [diff] [blame] | 1017 | /* Set PTE to fault */ |
| 1018 | r = set_one_region(start, size, PTE_TYPE_FAULT, true, |
| 1019 | level); |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 1020 | if (r) { |
Ilias Apalodimas | a6db58f | 2025-02-20 15:54:41 +0200 | [diff] [blame] | 1021 | /* PTE successfully invalidated */ |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 1022 | size -= r; |
| 1023 | start += r; |
| 1024 | break; |
| 1025 | } |
| 1026 | } |
| 1027 | } |
Ilias Apalodimas | a6db58f | 2025-02-20 15:54:41 +0200 | [diff] [blame] | 1028 | |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 1029 | flush_dcache_range(gd->arch.tlb_addr, |
| 1030 | gd->arch.tlb_addr + gd->arch.tlb_size); |
| 1031 | __asm_invalidate_tlb_all(); |
Ilias Apalodimas | a6db58f | 2025-02-20 15:54:41 +0200 | [diff] [blame] | 1032 | |
| 1033 | mmu_change_region_attr_nobreak(addr, siz, attrs); |
York Sun | 5bb14e0 | 2017-03-06 09:02:33 -0800 | [diff] [blame] | 1034 | } |
| 1035 | |
Ilias Apalodimas | e9e1865 | 2025-02-20 15:54:42 +0200 | [diff] [blame] | 1036 | int pgprot_set_attrs(phys_addr_t addr, size_t size, enum pgprot_attrs perm) |
| 1037 | { |
| 1038 | u64 attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_INNER_SHARE | PTE_TYPE_VALID; |
| 1039 | |
| 1040 | switch (perm) { |
| 1041 | case MMU_ATTR_RO: |
| 1042 | attrs |= PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_RO; |
| 1043 | break; |
| 1044 | case MMU_ATTR_RX: |
| 1045 | attrs |= PTE_BLOCK_RO; |
| 1046 | break; |
| 1047 | case MMU_ATTR_RW: |
| 1048 | attrs |= PTE_BLOCK_PXN | PTE_BLOCK_UXN; |
| 1049 | break; |
| 1050 | default: |
| 1051 | log_err("Unknown attribute %d\n", perm); |
| 1052 | return -EINVAL; |
| 1053 | } |
| 1054 | |
| 1055 | mmu_change_region_attr_nobreak(addr, size, attrs); |
| 1056 | |
| 1057 | return 0; |
| 1058 | } |
| 1059 | |
Trevor Woerner | 43ec7e0 | 2019-05-03 09:41:00 -0400 | [diff] [blame] | 1060 | #else /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 1061 | |
Alexander Graf | bc40da9 | 2016-03-04 01:09:55 +0100 | [diff] [blame] | 1062 | /* |
| 1063 | * For SPL builds, we may want to not have dcache enabled. Any real U-Boot |
| 1064 | * running however really wants to have dcache and the MMU active. Check that |
| 1065 | * everything is sane and give the developer a hint if it isn't. |
| 1066 | */ |
Simon Glass | 85ed77d | 2024-09-29 19:49:46 -0600 | [diff] [blame] | 1067 | #ifndef CONFIG_XPL_BUILD |
Alexander Graf | bc40da9 | 2016-03-04 01:09:55 +0100 | [diff] [blame] | 1068 | #error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache. |
| 1069 | #endif |
| 1070 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 1071 | void invalidate_dcache_all(void) |
| 1072 | { |
| 1073 | } |
| 1074 | |
| 1075 | void flush_dcache_all(void) |
| 1076 | { |
| 1077 | } |
| 1078 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 1079 | void dcache_enable(void) |
| 1080 | { |
| 1081 | } |
| 1082 | |
| 1083 | void dcache_disable(void) |
| 1084 | { |
| 1085 | } |
| 1086 | |
| 1087 | int dcache_status(void) |
| 1088 | { |
| 1089 | return 0; |
| 1090 | } |
| 1091 | |
Siva Durga Prasad Paladugu | ba2432a | 2015-06-26 18:05:07 +0530 | [diff] [blame] | 1092 | void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, |
| 1093 | enum dcache_option option) |
| 1094 | { |
| 1095 | } |
| 1096 | |
Trevor Woerner | 43ec7e0 | 2019-05-03 09:41:00 -0400 | [diff] [blame] | 1097 | #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 1098 | |
Trevor Woerner | 43ec7e0 | 2019-05-03 09:41:00 -0400 | [diff] [blame] | 1099 | #if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 1100 | |
| 1101 | void icache_enable(void) |
| 1102 | { |
Stephen Warren | ddb0f63 | 2016-10-19 15:18:46 -0600 | [diff] [blame] | 1103 | invalidate_icache_all(); |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 1104 | set_sctlr(get_sctlr() | CR_I); |
| 1105 | } |
| 1106 | |
| 1107 | void icache_disable(void) |
| 1108 | { |
| 1109 | set_sctlr(get_sctlr() & ~CR_I); |
| 1110 | } |
| 1111 | |
| 1112 | int icache_status(void) |
| 1113 | { |
| 1114 | return (get_sctlr() & CR_I) != 0; |
| 1115 | } |
| 1116 | |
Patrice Chotard | ee435c6 | 2021-07-19 11:21:51 +0200 | [diff] [blame] | 1117 | int mmu_status(void) |
| 1118 | { |
| 1119 | return (get_sctlr() & CR_M) != 0; |
| 1120 | } |
| 1121 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 1122 | void invalidate_icache_all(void) |
| 1123 | { |
| 1124 | __asm_invalidate_icache_all(); |
Stephen Warren | ddb0f63 | 2016-10-19 15:18:46 -0600 | [diff] [blame] | 1125 | __asm_invalidate_l3_icache(); |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 1126 | } |
| 1127 | |
Trevor Woerner | 43ec7e0 | 2019-05-03 09:41:00 -0400 | [diff] [blame] | 1128 | #else /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 1129 | |
| 1130 | void icache_enable(void) |
| 1131 | { |
| 1132 | } |
| 1133 | |
| 1134 | void icache_disable(void) |
| 1135 | { |
| 1136 | } |
| 1137 | |
| 1138 | int icache_status(void) |
| 1139 | { |
| 1140 | return 0; |
| 1141 | } |
| 1142 | |
Patrice Chotard | ee435c6 | 2021-07-19 11:21:51 +0200 | [diff] [blame] | 1143 | int mmu_status(void) |
| 1144 | { |
| 1145 | return 0; |
| 1146 | } |
| 1147 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 1148 | void invalidate_icache_all(void) |
| 1149 | { |
| 1150 | } |
| 1151 | |
Trevor Woerner | 43ec7e0 | 2019-05-03 09:41:00 -0400 | [diff] [blame] | 1152 | #endif /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 1153 | |
| 1154 | /* |
| 1155 | * Enable dCache & iCache, whether cache is actually enabled |
| 1156 | * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF |
| 1157 | */ |
York Sun | a84cd72 | 2014-06-23 15:15:54 -0700 | [diff] [blame] | 1158 | void __weak enable_caches(void) |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 1159 | { |
| 1160 | icache_enable(); |
| 1161 | dcache_enable(); |
| 1162 | } |
Ilias Apalodimas | b3c5d25 | 2025-02-20 15:54:38 +0200 | [diff] [blame] | 1163 | |
| 1164 | void arch_dump_mem_attrs(void) |
| 1165 | { |
| 1166 | dump_pagetable(gd->arch.tlb_addr, get_tcr(NULL, NULL)); |
| 1167 | } |