David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 1 | /* |
| 2 | * (C) Copyright 2013 |
| 3 | * David Feng <fenghua@phytium.com.cn> |
| 4 | * |
| 5 | * SPDX-License-Identifier: GPL-2.0+ |
| 6 | */ |
| 7 | |
| 8 | #include <common.h> |
| 9 | #include <asm/system.h> |
| 10 | #include <asm/armv8/mmu.h> |
| 11 | |
| 12 | DECLARE_GLOBAL_DATA_PTR; |
| 13 | |
| 14 | #ifndef CONFIG_SYS_DCACHE_OFF |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 15 | |
| 16 | #ifdef CONFIG_SYS_FULL_VA |
| 17 | static void set_ptl1_entry(u64 index, u64 ptl2_entry) |
| 18 | { |
| 19 | u64 *pgd = (u64 *)gd->arch.tlb_addr; |
| 20 | u64 value; |
| 21 | |
| 22 | value = ptl2_entry | PTL1_TYPE_TABLE; |
| 23 | pgd[index] = value; |
| 24 | } |
| 25 | |
| 26 | static void set_ptl2_block(u64 ptl1, u64 bfn, u64 address, u64 memory_attrs) |
| 27 | { |
| 28 | u64 *pmd = (u64 *)ptl1; |
| 29 | u64 value; |
| 30 | |
| 31 | value = address | PTL2_TYPE_BLOCK | PTL2_BLOCK_AF; |
| 32 | value |= memory_attrs; |
| 33 | pmd[bfn] = value; |
| 34 | } |
| 35 | |
| 36 | static struct mm_region mem_map[] = CONFIG_SYS_MEM_MAP; |
| 37 | |
| 38 | #define PTL1_ENTRIES CONFIG_SYS_PTL1_ENTRIES |
| 39 | #define PTL2_ENTRIES CONFIG_SYS_PTL2_ENTRIES |
| 40 | |
| 41 | static void setup_pgtables(void) |
| 42 | { |
| 43 | int l1_e, l2_e; |
| 44 | unsigned long pmd = 0; |
| 45 | unsigned long address; |
| 46 | |
| 47 | /* Setup the PMD pointers */ |
| 48 | for (l1_e = 0; l1_e < CONFIG_SYS_MEM_MAP_SIZE; l1_e++) { |
| 49 | gd->arch.pmd_addr[l1_e] = gd->arch.tlb_addr + |
| 50 | PTL1_ENTRIES * sizeof(u64); |
| 51 | gd->arch.pmd_addr[l1_e] += PTL2_ENTRIES * sizeof(u64) * l1_e; |
| 52 | gd->arch.pmd_addr[l1_e] = ALIGN(gd->arch.pmd_addr[l1_e], |
| 53 | 0x10000UL); |
| 54 | } |
| 55 | |
| 56 | /* Setup the page tables */ |
| 57 | for (l1_e = 0; l1_e < PTL1_ENTRIES; l1_e++) { |
| 58 | if (mem_map[pmd].base == |
| 59 | (uintptr_t)l1_e << PTL2_BITS) { |
| 60 | set_ptl1_entry(l1_e, gd->arch.pmd_addr[pmd]); |
| 61 | |
| 62 | for (l2_e = 0; l2_e < PTL2_ENTRIES; l2_e++) { |
| 63 | address = mem_map[pmd].base |
| 64 | + (uintptr_t)l2_e * BLOCK_SIZE; |
| 65 | set_ptl2_block(gd->arch.pmd_addr[pmd], l2_e, |
| 66 | address, mem_map[pmd].attrs); |
| 67 | } |
| 68 | |
| 69 | pmd++; |
| 70 | } else { |
| 71 | set_ptl1_entry(l1_e, 0); |
| 72 | } |
| 73 | } |
| 74 | } |
| 75 | |
| 76 | #else |
| 77 | |
Alison Wang | 7f8e178 | 2015-08-18 11:22:05 +0800 | [diff] [blame] | 78 | inline void set_pgtable_section(u64 *page_table, u64 index, u64 section, |
Alison Wang | e28e18c | 2015-11-05 11:15:49 +0800 | [diff] [blame] | 79 | u64 memory_type, u64 attribute) |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 80 | { |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 81 | u64 value; |
| 82 | |
York Sun | ef63194 | 2014-06-23 15:15:53 -0700 | [diff] [blame] | 83 | value = section | PMD_TYPE_SECT | PMD_SECT_AF; |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 84 | value |= PMD_ATTRINDX(memory_type); |
Alison Wang | e28e18c | 2015-11-05 11:15:49 +0800 | [diff] [blame] | 85 | value |= attribute; |
York Sun | ef63194 | 2014-06-23 15:15:53 -0700 | [diff] [blame] | 86 | page_table[index] = value; |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 87 | } |
| 88 | |
Alison Wang | 7f8e178 | 2015-08-18 11:22:05 +0800 | [diff] [blame] | 89 | inline void set_pgtable_table(u64 *page_table, u64 index, u64 *table_addr) |
| 90 | { |
| 91 | u64 value; |
| 92 | |
| 93 | value = (u64)table_addr | PMD_TYPE_TABLE; |
| 94 | page_table[index] = value; |
| 95 | } |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 96 | #endif |
Alison Wang | 7f8e178 | 2015-08-18 11:22:05 +0800 | [diff] [blame] | 97 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 98 | /* to activate the MMU we need to set up virtual memory */ |
Stephen Warren | 7333c6a | 2015-10-05 12:09:00 -0600 | [diff] [blame] | 99 | __weak void mmu_setup(void) |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 100 | { |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 101 | #ifndef CONFIG_SYS_FULL_VA |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 102 | bd_t *bd = gd->bd; |
Thierry Reding | 59c364d | 2015-07-22 17:10:11 -0600 | [diff] [blame] | 103 | u64 *page_table = (u64 *)gd->arch.tlb_addr, i, j; |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 104 | #endif |
Thierry Reding | 59c364d | 2015-07-22 17:10:11 -0600 | [diff] [blame] | 105 | int el; |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 106 | |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 107 | #ifdef CONFIG_SYS_FULL_VA |
| 108 | unsigned long coreid = read_mpidr() & CONFIG_COREID_MASK; |
| 109 | |
| 110 | /* Set up page tables only on BSP */ |
| 111 | if (coreid == BSP_COREID) |
| 112 | setup_pgtables(); |
| 113 | #else |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 114 | /* Setup an identity-mapping for all spaces */ |
York Sun | ef63194 | 2014-06-23 15:15:53 -0700 | [diff] [blame] | 115 | for (i = 0; i < (PGTABLE_SIZE >> 3); i++) { |
| 116 | set_pgtable_section(page_table, i, i << SECTION_SHIFT, |
Alison Wang | 7f8e178 | 2015-08-18 11:22:05 +0800 | [diff] [blame] | 117 | MT_DEVICE_NGNRNE, PMD_SECT_NON_SHARE); |
York Sun | ef63194 | 2014-06-23 15:15:53 -0700 | [diff] [blame] | 118 | } |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 119 | |
| 120 | /* Setup an identity-mapping for all RAM space */ |
| 121 | for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { |
| 122 | ulong start = bd->bi_dram[i].start; |
| 123 | ulong end = bd->bi_dram[i].start + bd->bi_dram[i].size; |
| 124 | for (j = start >> SECTION_SHIFT; |
| 125 | j < end >> SECTION_SHIFT; j++) { |
York Sun | ef63194 | 2014-06-23 15:15:53 -0700 | [diff] [blame] | 126 | set_pgtable_section(page_table, j, j << SECTION_SHIFT, |
Alison Wang | 7f8e178 | 2015-08-18 11:22:05 +0800 | [diff] [blame] | 127 | MT_NORMAL, PMD_SECT_NON_SHARE); |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 128 | } |
| 129 | } |
| 130 | |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 131 | #endif |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 132 | /* load TTBR0 */ |
| 133 | el = current_el(); |
York Sun | 897947c | 2014-02-26 13:26:02 -0800 | [diff] [blame] | 134 | if (el == 1) { |
York Sun | ef63194 | 2014-06-23 15:15:53 -0700 | [diff] [blame] | 135 | set_ttbr_tcr_mair(el, gd->arch.tlb_addr, |
Thierry Reding | a3e45ab | 2015-08-20 11:52:14 +0200 | [diff] [blame] | 136 | TCR_EL1_RSVD | TCR_FLAGS | TCR_EL1_IPS_BITS, |
York Sun | ef63194 | 2014-06-23 15:15:53 -0700 | [diff] [blame] | 137 | MEMORY_ATTRIBUTES); |
York Sun | 897947c | 2014-02-26 13:26:02 -0800 | [diff] [blame] | 138 | } else if (el == 2) { |
York Sun | ef63194 | 2014-06-23 15:15:53 -0700 | [diff] [blame] | 139 | set_ttbr_tcr_mair(el, gd->arch.tlb_addr, |
Thierry Reding | a3e45ab | 2015-08-20 11:52:14 +0200 | [diff] [blame] | 140 | TCR_EL2_RSVD | TCR_FLAGS | TCR_EL2_IPS_BITS, |
York Sun | ef63194 | 2014-06-23 15:15:53 -0700 | [diff] [blame] | 141 | MEMORY_ATTRIBUTES); |
York Sun | 897947c | 2014-02-26 13:26:02 -0800 | [diff] [blame] | 142 | } else { |
York Sun | ef63194 | 2014-06-23 15:15:53 -0700 | [diff] [blame] | 143 | set_ttbr_tcr_mair(el, gd->arch.tlb_addr, |
Thierry Reding | a3e45ab | 2015-08-20 11:52:14 +0200 | [diff] [blame] | 144 | TCR_EL3_RSVD | TCR_FLAGS | TCR_EL3_IPS_BITS, |
York Sun | ef63194 | 2014-06-23 15:15:53 -0700 | [diff] [blame] | 145 | MEMORY_ATTRIBUTES); |
York Sun | 897947c | 2014-02-26 13:26:02 -0800 | [diff] [blame] | 146 | } |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 147 | /* enable the mmu */ |
| 148 | set_sctlr(get_sctlr() | CR_M); |
| 149 | } |
| 150 | |
| 151 | /* |
| 152 | * Performs a invalidation of the entire data cache at all levels |
| 153 | */ |
| 154 | void invalidate_dcache_all(void) |
| 155 | { |
York Sun | ef04201 | 2014-02-26 13:26:04 -0800 | [diff] [blame] | 156 | __asm_invalidate_dcache_all(); |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 157 | } |
| 158 | |
| 159 | /* |
York Sun | 1ce575f | 2015-01-06 13:18:42 -0800 | [diff] [blame] | 160 | * Performs a clean & invalidation of the entire data cache at all levels. |
| 161 | * This function needs to be inline to avoid using stack. |
| 162 | * __asm_flush_l3_cache return status of timeout |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 163 | */ |
York Sun | 1ce575f | 2015-01-06 13:18:42 -0800 | [diff] [blame] | 164 | inline void flush_dcache_all(void) |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 165 | { |
York Sun | 1ce575f | 2015-01-06 13:18:42 -0800 | [diff] [blame] | 166 | int ret; |
| 167 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 168 | __asm_flush_dcache_all(); |
York Sun | 1ce575f | 2015-01-06 13:18:42 -0800 | [diff] [blame] | 169 | ret = __asm_flush_l3_cache(); |
| 170 | if (ret) |
| 171 | debug("flushing dcache returns 0x%x\n", ret); |
| 172 | else |
| 173 | debug("flushing dcache successfully.\n"); |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 174 | } |
| 175 | |
| 176 | /* |
| 177 | * Invalidates range in all levels of D-cache/unified cache |
| 178 | */ |
| 179 | void invalidate_dcache_range(unsigned long start, unsigned long stop) |
| 180 | { |
| 181 | __asm_flush_dcache_range(start, stop); |
| 182 | } |
| 183 | |
| 184 | /* |
| 185 | * Flush range(clean & invalidate) from all levels of D-cache/unified cache |
| 186 | */ |
| 187 | void flush_dcache_range(unsigned long start, unsigned long stop) |
| 188 | { |
| 189 | __asm_flush_dcache_range(start, stop); |
| 190 | } |
| 191 | |
| 192 | void dcache_enable(void) |
| 193 | { |
| 194 | /* The data cache is not active unless the mmu is enabled */ |
| 195 | if (!(get_sctlr() & CR_M)) { |
| 196 | invalidate_dcache_all(); |
| 197 | __asm_invalidate_tlb_all(); |
| 198 | mmu_setup(); |
| 199 | } |
| 200 | |
| 201 | set_sctlr(get_sctlr() | CR_C); |
| 202 | } |
| 203 | |
| 204 | void dcache_disable(void) |
| 205 | { |
| 206 | uint32_t sctlr; |
| 207 | |
| 208 | sctlr = get_sctlr(); |
| 209 | |
| 210 | /* if cache isn't enabled no need to disable */ |
| 211 | if (!(sctlr & CR_C)) |
| 212 | return; |
| 213 | |
| 214 | set_sctlr(sctlr & ~(CR_C|CR_M)); |
| 215 | |
| 216 | flush_dcache_all(); |
| 217 | __asm_invalidate_tlb_all(); |
| 218 | } |
| 219 | |
| 220 | int dcache_status(void) |
| 221 | { |
| 222 | return (get_sctlr() & CR_C) != 0; |
| 223 | } |
| 224 | |
Siva Durga Prasad Paladugu | ba2432a | 2015-06-26 18:05:07 +0530 | [diff] [blame] | 225 | u64 *__weak arch_get_page_table(void) { |
| 226 | puts("No page table offset defined\n"); |
| 227 | |
| 228 | return NULL; |
| 229 | } |
| 230 | |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 231 | #ifndef CONFIG_SYS_FULL_VA |
Siva Durga Prasad Paladugu | ba2432a | 2015-06-26 18:05:07 +0530 | [diff] [blame] | 232 | void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, |
| 233 | enum dcache_option option) |
| 234 | { |
| 235 | u64 *page_table = arch_get_page_table(); |
| 236 | u64 upto, end; |
| 237 | |
| 238 | if (page_table == NULL) |
| 239 | return; |
| 240 | |
| 241 | end = ALIGN(start + size, (1 << MMU_SECTION_SHIFT)) >> |
| 242 | MMU_SECTION_SHIFT; |
| 243 | start = start >> MMU_SECTION_SHIFT; |
| 244 | for (upto = start; upto < end; upto++) { |
| 245 | page_table[upto] &= ~PMD_ATTRINDX_MASK; |
| 246 | page_table[upto] |= PMD_ATTRINDX(option); |
| 247 | } |
| 248 | asm volatile("dsb sy"); |
| 249 | __asm_invalidate_tlb_all(); |
| 250 | asm volatile("dsb sy"); |
| 251 | asm volatile("isb"); |
| 252 | start = start << MMU_SECTION_SHIFT; |
| 253 | end = end << MMU_SECTION_SHIFT; |
| 254 | flush_dcache_range(start, end); |
| 255 | asm volatile("dsb sy"); |
| 256 | } |
Sergey Temerkhanov | 78eaa49 | 2015-10-14 09:55:45 -0700 | [diff] [blame] | 257 | #endif |
| 258 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 259 | #else /* CONFIG_SYS_DCACHE_OFF */ |
| 260 | |
| 261 | void invalidate_dcache_all(void) |
| 262 | { |
| 263 | } |
| 264 | |
| 265 | void flush_dcache_all(void) |
| 266 | { |
| 267 | } |
| 268 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 269 | void dcache_enable(void) |
| 270 | { |
| 271 | } |
| 272 | |
| 273 | void dcache_disable(void) |
| 274 | { |
| 275 | } |
| 276 | |
| 277 | int dcache_status(void) |
| 278 | { |
| 279 | return 0; |
| 280 | } |
| 281 | |
Siva Durga Prasad Paladugu | ba2432a | 2015-06-26 18:05:07 +0530 | [diff] [blame] | 282 | void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, |
| 283 | enum dcache_option option) |
| 284 | { |
| 285 | } |
| 286 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 287 | #endif /* CONFIG_SYS_DCACHE_OFF */ |
| 288 | |
| 289 | #ifndef CONFIG_SYS_ICACHE_OFF |
| 290 | |
| 291 | void icache_enable(void) |
| 292 | { |
York Sun | ef04201 | 2014-02-26 13:26:04 -0800 | [diff] [blame] | 293 | __asm_invalidate_icache_all(); |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 294 | set_sctlr(get_sctlr() | CR_I); |
| 295 | } |
| 296 | |
| 297 | void icache_disable(void) |
| 298 | { |
| 299 | set_sctlr(get_sctlr() & ~CR_I); |
| 300 | } |
| 301 | |
| 302 | int icache_status(void) |
| 303 | { |
| 304 | return (get_sctlr() & CR_I) != 0; |
| 305 | } |
| 306 | |
| 307 | void invalidate_icache_all(void) |
| 308 | { |
| 309 | __asm_invalidate_icache_all(); |
| 310 | } |
| 311 | |
| 312 | #else /* CONFIG_SYS_ICACHE_OFF */ |
| 313 | |
| 314 | void icache_enable(void) |
| 315 | { |
| 316 | } |
| 317 | |
| 318 | void icache_disable(void) |
| 319 | { |
| 320 | } |
| 321 | |
| 322 | int icache_status(void) |
| 323 | { |
| 324 | return 0; |
| 325 | } |
| 326 | |
| 327 | void invalidate_icache_all(void) |
| 328 | { |
| 329 | } |
| 330 | |
| 331 | #endif /* CONFIG_SYS_ICACHE_OFF */ |
| 332 | |
| 333 | /* |
| 334 | * Enable dCache & iCache, whether cache is actually enabled |
| 335 | * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF |
| 336 | */ |
York Sun | a84cd72 | 2014-06-23 15:15:54 -0700 | [diff] [blame] | 337 | void __weak enable_caches(void) |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 338 | { |
| 339 | icache_enable(); |
| 340 | dcache_enable(); |
| 341 | } |