Tom Rini | 10e4779 | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
wdenk | edc48b6 | 2002-09-08 17:56:50 +0000 | [diff] [blame] | 2 | /* |
| 3 | * (C) Copyright 2002 |
| 4 | * Wolfgang Denk, DENX Software Engineering, wd@denx.de. |
wdenk | edc48b6 | 2002-09-08 17:56:50 +0000 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | /* for now: just dummy functions to satisfy the linker */ |
| 8 | |
Tom Rini | dec7ea0 | 2024-05-20 13:35:03 -0600 | [diff] [blame^] | 9 | #include <config.h> |
Simon Glass | 6333448 | 2019-11-14 12:57:39 -0700 | [diff] [blame] | 10 | #include <cpu_func.h> |
Simon Glass | 0f2af88 | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 11 | #include <log.h> |
Thierry Reding | c97d974 | 2014-12-09 22:25:22 -0700 | [diff] [blame] | 12 | #include <malloc.h> |
Simon Glass | 274e0b0 | 2020-05-10 11:39:56 -0600 | [diff] [blame] | 13 | #include <asm/cache.h> |
Simon Glass | 3ba929a | 2020-10-30 21:38:53 -0600 | [diff] [blame] | 14 | #include <asm/global_data.h> |
wdenk | f806271 | 2005-01-09 23:16:25 +0000 | [diff] [blame] | 15 | |
Ovidiu Panait | 68b371a | 2020-03-29 20:57:39 +0300 | [diff] [blame] | 16 | DECLARE_GLOBAL_DATA_PTR; |
| 17 | |
Wu, Josh | 2219026 | 2015-07-27 11:40:17 +0800 | [diff] [blame] | 18 | /* |
| 19 | * Flush range from all levels of d-cache/unified-cache. |
| 20 | * Affects the range [start, start + size - 1]. |
| 21 | */ |
Jeroen Hofstee | d746077 | 2014-06-23 22:07:04 +0200 | [diff] [blame] | 22 | __weak void flush_cache(unsigned long start, unsigned long size) |
wdenk | edc48b6 | 2002-09-08 17:56:50 +0000 | [diff] [blame] | 23 | { |
Wu, Josh | 2219026 | 2015-07-27 11:40:17 +0800 | [diff] [blame] | 24 | flush_dcache_range(start, start + size); |
wdenk | edc48b6 | 2002-09-08 17:56:50 +0000 | [diff] [blame] | 25 | } |
Aneesh V | 3bda377 | 2011-06-16 23:30:50 +0000 | [diff] [blame] | 26 | |
| 27 | /* |
| 28 | * Default implementation: |
| 29 | * do a range flush for the entire range |
| 30 | */ |
Jeroen Hofstee | d746077 | 2014-06-23 22:07:04 +0200 | [diff] [blame] | 31 | __weak void flush_dcache_all(void) |
Aneesh V | 3bda377 | 2011-06-16 23:30:50 +0000 | [diff] [blame] | 32 | { |
| 33 | flush_cache(0, ~0); |
| 34 | } |
Aneesh V | fffbb97 | 2011-08-16 04:33:05 +0000 | [diff] [blame] | 35 | |
| 36 | /* |
| 37 | * Default implementation of enable_caches() |
| 38 | * Real implementation should be in platform code |
| 39 | */ |
Jeroen Hofstee | d746077 | 2014-06-23 22:07:04 +0200 | [diff] [blame] | 40 | __weak void enable_caches(void) |
Aneesh V | fffbb97 | 2011-08-16 04:33:05 +0000 | [diff] [blame] | 41 | { |
| 42 | puts("WARNING: Caches not enabled\n"); |
| 43 | } |
Thierry Reding | c97d974 | 2014-12-09 22:25:22 -0700 | [diff] [blame] | 44 | |
Wu, Josh | aaa3545 | 2015-07-27 11:40:16 +0800 | [diff] [blame] | 45 | __weak void invalidate_dcache_range(unsigned long start, unsigned long stop) |
| 46 | { |
| 47 | /* An empty stub, real implementation should be in platform code */ |
| 48 | } |
| 49 | __weak void flush_dcache_range(unsigned long start, unsigned long stop) |
| 50 | { |
| 51 | /* An empty stub, real implementation should be in platform code */ |
| 52 | } |
| 53 | |
Simon Glass | 8540658 | 2016-06-19 19:43:01 -0600 | [diff] [blame] | 54 | int check_cache_range(unsigned long start, unsigned long stop) |
| 55 | { |
| 56 | int ok = 1; |
| 57 | |
| 58 | if (start & (CONFIG_SYS_CACHELINE_SIZE - 1)) |
| 59 | ok = 0; |
| 60 | |
| 61 | if (stop & (CONFIG_SYS_CACHELINE_SIZE - 1)) |
| 62 | ok = 0; |
| 63 | |
| 64 | if (!ok) { |
Simon Glass | 143997a | 2016-06-19 19:43:05 -0600 | [diff] [blame] | 65 | warn_non_spl("CACHE: Misaligned operation at range [%08lx, %08lx]\n", |
| 66 | start, stop); |
Simon Glass | 8540658 | 2016-06-19 19:43:01 -0600 | [diff] [blame] | 67 | } |
| 68 | |
| 69 | return ok; |
| 70 | } |
| 71 | |
Thierry Reding | c97d974 | 2014-12-09 22:25:22 -0700 | [diff] [blame] | 72 | #ifdef CONFIG_SYS_NONCACHED_MEMORY |
| 73 | /* |
| 74 | * Reserve one MMU section worth of address space below the malloc() area that |
| 75 | * will be mapped uncached. |
| 76 | */ |
| 77 | static unsigned long noncached_start; |
| 78 | static unsigned long noncached_end; |
| 79 | static unsigned long noncached_next; |
| 80 | |
Patrice Chotard | e2eb721 | 2020-04-28 11:38:03 +0200 | [diff] [blame] | 81 | void noncached_set_region(void) |
| 82 | { |
| 83 | #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) |
| 84 | mmu_set_region_dcache_behaviour(noncached_start, |
| 85 | noncached_end - noncached_start, |
| 86 | DCACHE_OFF); |
| 87 | #endif |
| 88 | } |
| 89 | |
Ovidiu Panait | 1c45ed9 | 2020-11-28 10:43:13 +0200 | [diff] [blame] | 90 | int noncached_init(void) |
Thierry Reding | c97d974 | 2014-12-09 22:25:22 -0700 | [diff] [blame] | 91 | { |
| 92 | phys_addr_t start, end; |
| 93 | size_t size; |
| 94 | |
Stephen Warren | 9b49643 | 2019-08-27 11:54:31 -0600 | [diff] [blame] | 95 | /* If this calculation changes, update board_f.c:reserve_noncached() */ |
Thierry Reding | c97d974 | 2014-12-09 22:25:22 -0700 | [diff] [blame] | 96 | end = ALIGN(mem_malloc_start, MMU_SECTION_SIZE) - MMU_SECTION_SIZE; |
| 97 | size = ALIGN(CONFIG_SYS_NONCACHED_MEMORY, MMU_SECTION_SIZE); |
| 98 | start = end - size; |
| 99 | |
| 100 | debug("mapping memory %pa-%pa non-cached\n", &start, &end); |
| 101 | |
| 102 | noncached_start = start; |
| 103 | noncached_end = end; |
| 104 | noncached_next = start; |
| 105 | |
Patrice Chotard | e2eb721 | 2020-04-28 11:38:03 +0200 | [diff] [blame] | 106 | noncached_set_region(); |
Ovidiu Panait | 1c45ed9 | 2020-11-28 10:43:13 +0200 | [diff] [blame] | 107 | |
| 108 | return 0; |
Thierry Reding | c97d974 | 2014-12-09 22:25:22 -0700 | [diff] [blame] | 109 | } |
| 110 | |
| 111 | phys_addr_t noncached_alloc(size_t size, size_t align) |
| 112 | { |
| 113 | phys_addr_t next = ALIGN(noncached_next, align); |
| 114 | |
| 115 | if (next >= noncached_end || (noncached_end - next) < size) |
| 116 | return 0; |
| 117 | |
| 118 | debug("allocated %zu bytes of uncached memory @%pa\n", size, &next); |
| 119 | noncached_next = next + size; |
| 120 | |
| 121 | return next; |
| 122 | } |
| 123 | #endif /* CONFIG_SYS_NONCACHED_MEMORY */ |
Albert ARIBAUD | a382322 | 2015-10-23 18:06:40 +0200 | [diff] [blame] | 124 | |
Tom Rini | 1c640a6 | 2017-03-18 09:01:44 -0400 | [diff] [blame] | 125 | #if CONFIG_IS_ENABLED(SYS_THUMB_BUILD) |
Albert ARIBAUD | a382322 | 2015-10-23 18:06:40 +0200 | [diff] [blame] | 126 | void invalidate_l2_cache(void) |
| 127 | { |
| 128 | unsigned int val = 0; |
| 129 | |
| 130 | asm volatile("mcr p15, 1, %0, c15, c11, 0 @ invl l2 cache" |
| 131 | : : "r" (val) : "cc"); |
| 132 | isb(); |
| 133 | } |
| 134 | #endif |
Ovidiu Panait | 68b371a | 2020-03-29 20:57:39 +0300 | [diff] [blame] | 135 | |
Ovidiu Panait | 2a2941b | 2020-03-29 20:57:41 +0300 | [diff] [blame] | 136 | int arch_reserve_mmu(void) |
Ovidiu Panait | 68b371a | 2020-03-29 20:57:39 +0300 | [diff] [blame] | 137 | { |
Ovidiu Panait | 2b61847 | 2020-03-29 20:57:40 +0300 | [diff] [blame] | 138 | return arm_reserve_mmu(); |
| 139 | } |
| 140 | |
| 141 | __weak int arm_reserve_mmu(void) |
| 142 | { |
Ovidiu Panait | 68b371a | 2020-03-29 20:57:39 +0300 | [diff] [blame] | 143 | #if !(CONFIG_IS_ENABLED(SYS_ICACHE_OFF) && CONFIG_IS_ENABLED(SYS_DCACHE_OFF)) |
| 144 | /* reserve TLB table */ |
| 145 | gd->arch.tlb_size = PGTABLE_SIZE; |
| 146 | gd->relocaddr -= gd->arch.tlb_size; |
| 147 | |
| 148 | /* round down to next 64 kB limit */ |
| 149 | gd->relocaddr &= ~(0x10000 - 1); |
| 150 | |
| 151 | gd->arch.tlb_addr = gd->relocaddr; |
| 152 | debug("TLB table from %08lx to %08lx\n", gd->arch.tlb_addr, |
| 153 | gd->arch.tlb_addr + gd->arch.tlb_size); |
| 154 | |
Tom Rini | 6a5dccc | 2022-11-16 13:10:41 -0500 | [diff] [blame] | 155 | #ifdef CFG_SYS_MEM_RESERVE_SECURE |
Ovidiu Panait | 68b371a | 2020-03-29 20:57:39 +0300 | [diff] [blame] | 156 | /* |
| 157 | * Record allocated tlb_addr in case gd->tlb_addr to be overwritten |
| 158 | * with location within secure ram. |
| 159 | */ |
| 160 | gd->arch.tlb_allocated = gd->arch.tlb_addr; |
| 161 | #endif |
Pierre-Clément Tosi | 0ac9804 | 2023-02-09 04:54:28 +0800 | [diff] [blame] | 162 | |
| 163 | if (IS_ENABLED(CONFIG_CMO_BY_VA_ONLY)) { |
| 164 | /* |
| 165 | * As invalidate_dcache_all() will be called before |
| 166 | * mmu_setup(), we should make sure that the PTs are |
| 167 | * already in a valid state. |
| 168 | */ |
| 169 | memset((void *)gd->arch.tlb_addr, 0, gd->arch.tlb_size); |
| 170 | } |
Ovidiu Panait | 68b371a | 2020-03-29 20:57:39 +0300 | [diff] [blame] | 171 | #endif |
| 172 | |
| 173 | return 0; |
| 174 | } |