blob: d365578b926cbf08f9062fb22cf884f36b2c55a0 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Paul Burton4ff6b102015-01-29 01:27:57 +00002/*
3 * (C) Copyright 2003
4 * Wolfgang Denk, DENX Software Engineering, <wd@denx.de>
Paul Burton4ff6b102015-01-29 01:27:57 +00005 */
6
Simon Glass1d91ba72019-11-14 12:57:37 -07007#include <cpu_func.h>
developer33d68ce2022-05-20 11:21:51 +08008#include <malloc.h>
Simon Glass274e0b02020-05-10 11:39:56 -06009#include <asm/cache.h>
Paul Burton4ff6b102015-01-29 01:27:57 +000010#include <asm/cacheops.h>
Paul Burton81560782016-09-21 11:18:54 +010011#include <asm/cm.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060012#include <asm/global_data.h>
Paul Burtonee3c0b82017-11-21 11:18:37 -080013#include <asm/io.h>
Paul Burton4ff6b102015-01-29 01:27:57 +000014#include <asm/mipsregs.h>
Paul Burton834f74e2017-11-21 11:18:38 -080015#include <asm/system.h>
Simon Glassc06c1be2020-05-10 11:40:08 -060016#include <linux/bug.h>
Paul Burton4ff6b102015-01-29 01:27:57 +000017
Paul Burtondc2037e2016-09-21 11:18:48 +010018DECLARE_GLOBAL_DATA_PTR;
Paul Burton4ff6b102015-01-29 01:27:57 +000019
Paul Burton81560782016-09-21 11:18:54 +010020static void probe_l2(void)
21{
22#ifdef CONFIG_MIPS_L2_CACHE
23 unsigned long conf2, sl;
24 bool l2c = false;
25
26 if (!(read_c0_config1() & MIPS_CONF_M))
27 return;
28
29 conf2 = read_c0_config2();
30
31 if (__mips_isa_rev >= 6) {
32 l2c = conf2 & MIPS_CONF_M;
33 if (l2c)
34 l2c = read_c0_config3() & MIPS_CONF_M;
35 if (l2c)
36 l2c = read_c0_config4() & MIPS_CONF_M;
37 if (l2c)
38 l2c = read_c0_config5() & MIPS_CONF5_L2C;
39 }
40
Simon Glass8e15fc62022-01-22 05:07:23 -070041 if (l2c && IS_ENABLED(CONFIG_MIPS_CM)) {
Paul Burton81560782016-09-21 11:18:54 +010042 gd->arch.l2_line_size = mips_cm_l2_line_size();
43 } else if (l2c) {
44 /* We don't know how to retrieve L2 config on this system */
45 BUG();
46 } else {
47 sl = (conf2 & MIPS_CONF2_SL) >> MIPS_CONF2_SL_SHF;
48 gd->arch.l2_line_size = sl ? (2 << sl) : 0;
49 }
50#endif
51}
52
Paul Burtondc2037e2016-09-21 11:18:48 +010053void mips_cache_probe(void)
54{
55#ifdef CONFIG_SYS_CACHE_SIZE_AUTO
56 unsigned long conf1, il, dl;
Paul Burton4ff6b102015-01-29 01:27:57 +000057
Paul Burton4ff6b102015-01-29 01:27:57 +000058 conf1 = read_c0_config1();
Paul Burtondc2037e2016-09-21 11:18:48 +010059
Daniel Schwierzecka6dae712016-01-12 21:48:26 +010060 il = (conf1 & MIPS_CONF1_IL) >> MIPS_CONF1_IL_SHF;
Paul Burtondc2037e2016-09-21 11:18:48 +010061 dl = (conf1 & MIPS_CONF1_DL) >> MIPS_CONF1_DL_SHF;
62
63 gd->arch.l1i_line_size = il ? (2 << il) : 0;
64 gd->arch.l1d_line_size = dl ? (2 << dl) : 0;
65#endif
Paul Burton81560782016-09-21 11:18:54 +010066 probe_l2();
Paul Burton4ff6b102015-01-29 01:27:57 +000067}
68
Paul Burtondc2037e2016-09-21 11:18:48 +010069static inline unsigned long icache_line_size(void)
Paul Burton4ff6b102015-01-29 01:27:57 +000070{
Paul Burtondc2037e2016-09-21 11:18:48 +010071#ifdef CONFIG_SYS_CACHE_SIZE_AUTO
72 return gd->arch.l1i_line_size;
73#else
74 return CONFIG_SYS_ICACHE_LINE_SIZE;
75#endif
76}
Paul Burton62f13522016-05-27 14:28:05 +010077
Paul Burtondc2037e2016-09-21 11:18:48 +010078static inline unsigned long dcache_line_size(void)
79{
80#ifdef CONFIG_SYS_CACHE_SIZE_AUTO
81 return gd->arch.l1d_line_size;
82#else
83 return CONFIG_SYS_DCACHE_LINE_SIZE;
84#endif
Paul Burton4ff6b102015-01-29 01:27:57 +000085}
86
Paul Burton81560782016-09-21 11:18:54 +010087static inline unsigned long scache_line_size(void)
88{
89#ifdef CONFIG_MIPS_L2_CACHE
90 return gd->arch.l2_line_size;
91#else
Ramon Fried7e07e492019-06-10 21:05:26 +030092 return CONFIG_SYS_SCACHE_LINE_SIZE;
Paul Burton81560782016-09-21 11:18:54 +010093#endif
94}
95
Paul Burtona97d5932016-05-27 14:28:06 +010096#define cache_loop(start, end, lsize, ops...) do { \
97 const void *addr = (const void *)(start & ~(lsize - 1)); \
98 const void *aend = (const void *)((end - 1) & ~(lsize - 1)); \
99 const unsigned int cache_ops[] = { ops }; \
100 unsigned int i; \
101 \
Paul Burtoned258e62017-11-21 11:18:39 -0800102 if (!lsize) \
103 break; \
104 \
Paul Burtona97d5932016-05-27 14:28:06 +0100105 for (; addr <= aend; addr += lsize) { \
106 for (i = 0; i < ARRAY_SIZE(cache_ops); i++) \
107 mips_cache(cache_ops[i], addr); \
108 } \
109} while (0)
110
Stefan Roesefef1b822020-05-14 11:59:04 +0200111void __weak flush_cache(ulong start_addr, ulong size)
Paul Burton4ff6b102015-01-29 01:27:57 +0000112{
113 unsigned long ilsize = icache_line_size();
114 unsigned long dlsize = dcache_line_size();
Paul Burton81560782016-09-21 11:18:54 +0100115 unsigned long slsize = scache_line_size();
Paul Burton4ff6b102015-01-29 01:27:57 +0000116
117 /* aend will be miscalculated when size is zero, so we return here */
118 if (size == 0)
119 return;
120
Paul Burton81560782016-09-21 11:18:54 +0100121 if ((ilsize == dlsize) && !slsize) {
Paul Burton4ff6b102015-01-29 01:27:57 +0000122 /* flush I-cache & D-cache simultaneously */
Paul Burtona97d5932016-05-27 14:28:06 +0100123 cache_loop(start_addr, start_addr + size, ilsize,
124 HIT_WRITEBACK_INV_D, HIT_INVALIDATE_I);
Paul Burtonee3c0b82017-11-21 11:18:37 -0800125 goto ops_done;
Paul Burton4ff6b102015-01-29 01:27:57 +0000126 }
127
128 /* flush D-cache */
Paul Burtona97d5932016-05-27 14:28:06 +0100129 cache_loop(start_addr, start_addr + size, dlsize, HIT_WRITEBACK_INV_D);
Paul Burton4ff6b102015-01-29 01:27:57 +0000130
Paul Burton81560782016-09-21 11:18:54 +0100131 /* flush L2 cache */
Paul Burtoned258e62017-11-21 11:18:39 -0800132 cache_loop(start_addr, start_addr + size, slsize, HIT_WRITEBACK_INV_SD);
Paul Burton81560782016-09-21 11:18:54 +0100133
Paul Burton4ff6b102015-01-29 01:27:57 +0000134 /* flush I-cache */
Paul Burtona97d5932016-05-27 14:28:06 +0100135 cache_loop(start_addr, start_addr + size, ilsize, HIT_INVALIDATE_I);
Paul Burtonee3c0b82017-11-21 11:18:37 -0800136
137ops_done:
138 /* ensure cache ops complete before any further memory accesses */
139 sync();
Paul Burton834f74e2017-11-21 11:18:38 -0800140
141 /* ensure the pipeline doesn't contain now-invalid instructions */
142 instruction_hazard_barrier();
Paul Burton4ff6b102015-01-29 01:27:57 +0000143}
144
Alex Nemirovsky6086d7f2019-12-23 20:19:20 +0000145void __weak flush_dcache_range(ulong start_addr, ulong stop)
Paul Burton4ff6b102015-01-29 01:27:57 +0000146{
147 unsigned long lsize = dcache_line_size();
Paul Burton81560782016-09-21 11:18:54 +0100148 unsigned long slsize = scache_line_size();
Paul Burton4ff6b102015-01-29 01:27:57 +0000149
Marek Vasut0e50ffc2016-01-27 03:13:59 +0100150 /* aend will be miscalculated when size is zero, so we return here */
151 if (start_addr == stop)
152 return;
153
Paul Burtona97d5932016-05-27 14:28:06 +0100154 cache_loop(start_addr, stop, lsize, HIT_WRITEBACK_INV_D);
Paul Burton81560782016-09-21 11:18:54 +0100155
156 /* flush L2 cache */
Paul Burtoned258e62017-11-21 11:18:39 -0800157 cache_loop(start_addr, stop, slsize, HIT_WRITEBACK_INV_SD);
Paul Burtonee3c0b82017-11-21 11:18:37 -0800158
159 /* ensure cache ops complete before any further memory accesses */
160 sync();
Paul Burton4ff6b102015-01-29 01:27:57 +0000161}
162
Stefan Roesee4a3ba22020-06-30 12:33:19 +0200163void __weak invalidate_dcache_range(ulong start_addr, ulong stop)
Paul Burton4ff6b102015-01-29 01:27:57 +0000164{
165 unsigned long lsize = dcache_line_size();
Paul Burton81560782016-09-21 11:18:54 +0100166 unsigned long slsize = scache_line_size();
Paul Burton4ff6b102015-01-29 01:27:57 +0000167
Marek Vasut0e50ffc2016-01-27 03:13:59 +0100168 /* aend will be miscalculated when size is zero, so we return here */
169 if (start_addr == stop)
170 return;
171
Paul Burton81560782016-09-21 11:18:54 +0100172 /* invalidate L2 cache */
Paul Burtoned258e62017-11-21 11:18:39 -0800173 cache_loop(start_addr, stop, slsize, HIT_INVALIDATE_SD);
Paul Burton81560782016-09-21 11:18:54 +0100174
Paul Burton1194f942016-06-09 13:09:51 +0100175 cache_loop(start_addr, stop, lsize, HIT_INVALIDATE_D);
Paul Burtonee3c0b82017-11-21 11:18:37 -0800176
177 /* ensure cache ops complete before any further memory accesses */
178 sync();
Paul Burton4ff6b102015-01-29 01:27:57 +0000179}
Daniel Schwierzeck7026e842018-09-07 19:02:03 +0200180
181int dcache_status(void)
182{
183 unsigned int cca = read_c0_config() & CONF_CM_CMASK;
184 return cca != CONF_CM_UNCACHED;
185}
186
187void dcache_enable(void)
188{
189 puts("Not supported!\n");
190}
191
192void dcache_disable(void)
193{
194 /* change CCA to uncached */
195 change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
196
197 /* ensure the pipeline doesn't contain now-invalid instructions */
198 instruction_hazard_barrier();
199}
developer33d68ce2022-05-20 11:21:51 +0800200
201#ifdef CONFIG_SYS_NONCACHED_MEMORY
202static unsigned long noncached_start;
203static unsigned long noncached_end;
204static unsigned long noncached_next;
205
206void noncached_set_region(void)
207{
208}
209
210int noncached_init(void)
211{
212 phys_addr_t start, end;
213 size_t size;
214
215 /* If this calculation changes, update board_f.c:reserve_noncached() */
216 end = ALIGN(mem_malloc_start, MMU_SECTION_SIZE) - MMU_SECTION_SIZE;
217 size = ALIGN(CONFIG_SYS_NONCACHED_MEMORY, MMU_SECTION_SIZE);
218 start = end - size;
219
220 debug("mapping memory %pa-%pa non-cached\n", &start, &end);
221
222 noncached_start = start;
223 noncached_end = end;
224 noncached_next = start;
225
226 return 0;
227}
228
229phys_addr_t noncached_alloc(size_t size, size_t align)
230{
231 phys_addr_t next = ALIGN(noncached_next, align);
232
233 if (next >= noncached_end || (noncached_end - next) < size)
234 return 0;
235
236 debug("allocated %zu bytes of uncached memory @%pa\n", size, &next);
237 noncached_next = next + size;
238
239 return CKSEG1ADDR(next);
240}
241#endif /* CONFIG_SYS_NONCACHED_MEMORY */