blob: d23b38d6b93fa1804b90e75811a0ab025d03d677 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Paul Burton4ff6b102015-01-29 01:27:57 +00002/*
3 * (C) Copyright 2003
4 * Wolfgang Denk, DENX Software Engineering, <wd@denx.de>
Paul Burton4ff6b102015-01-29 01:27:57 +00005 */
6
7#include <common.h>
Simon Glass1d91ba72019-11-14 12:57:37 -07008#include <cpu_func.h>
developer33d68ce2022-05-20 11:21:51 +08009#include <malloc.h>
Simon Glass274e0b02020-05-10 11:39:56 -060010#include <asm/cache.h>
Paul Burton4ff6b102015-01-29 01:27:57 +000011#include <asm/cacheops.h>
Paul Burton81560782016-09-21 11:18:54 +010012#include <asm/cm.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060013#include <asm/global_data.h>
Paul Burtonee3c0b82017-11-21 11:18:37 -080014#include <asm/io.h>
Paul Burton4ff6b102015-01-29 01:27:57 +000015#include <asm/mipsregs.h>
Paul Burton834f74e2017-11-21 11:18:38 -080016#include <asm/system.h>
Simon Glassc06c1be2020-05-10 11:40:08 -060017#include <linux/bug.h>
Paul Burton4ff6b102015-01-29 01:27:57 +000018
Paul Burtondc2037e2016-09-21 11:18:48 +010019DECLARE_GLOBAL_DATA_PTR;
Paul Burton4ff6b102015-01-29 01:27:57 +000020
Paul Burton81560782016-09-21 11:18:54 +010021static void probe_l2(void)
22{
23#ifdef CONFIG_MIPS_L2_CACHE
24 unsigned long conf2, sl;
25 bool l2c = false;
26
27 if (!(read_c0_config1() & MIPS_CONF_M))
28 return;
29
30 conf2 = read_c0_config2();
31
32 if (__mips_isa_rev >= 6) {
33 l2c = conf2 & MIPS_CONF_M;
34 if (l2c)
35 l2c = read_c0_config3() & MIPS_CONF_M;
36 if (l2c)
37 l2c = read_c0_config4() & MIPS_CONF_M;
38 if (l2c)
39 l2c = read_c0_config5() & MIPS_CONF5_L2C;
40 }
41
Simon Glass8e15fc62022-01-22 05:07:23 -070042 if (l2c && IS_ENABLED(CONFIG_MIPS_CM)) {
Paul Burton81560782016-09-21 11:18:54 +010043 gd->arch.l2_line_size = mips_cm_l2_line_size();
44 } else if (l2c) {
45 /* We don't know how to retrieve L2 config on this system */
46 BUG();
47 } else {
48 sl = (conf2 & MIPS_CONF2_SL) >> MIPS_CONF2_SL_SHF;
49 gd->arch.l2_line_size = sl ? (2 << sl) : 0;
50 }
51#endif
52}
53
Paul Burtondc2037e2016-09-21 11:18:48 +010054void mips_cache_probe(void)
55{
56#ifdef CONFIG_SYS_CACHE_SIZE_AUTO
57 unsigned long conf1, il, dl;
Paul Burton4ff6b102015-01-29 01:27:57 +000058
Paul Burton4ff6b102015-01-29 01:27:57 +000059 conf1 = read_c0_config1();
Paul Burtondc2037e2016-09-21 11:18:48 +010060
Daniel Schwierzecka6dae712016-01-12 21:48:26 +010061 il = (conf1 & MIPS_CONF1_IL) >> MIPS_CONF1_IL_SHF;
Paul Burtondc2037e2016-09-21 11:18:48 +010062 dl = (conf1 & MIPS_CONF1_DL) >> MIPS_CONF1_DL_SHF;
63
64 gd->arch.l1i_line_size = il ? (2 << il) : 0;
65 gd->arch.l1d_line_size = dl ? (2 << dl) : 0;
66#endif
Paul Burton81560782016-09-21 11:18:54 +010067 probe_l2();
Paul Burton4ff6b102015-01-29 01:27:57 +000068}
69
Paul Burtondc2037e2016-09-21 11:18:48 +010070static inline unsigned long icache_line_size(void)
Paul Burton4ff6b102015-01-29 01:27:57 +000071{
Paul Burtondc2037e2016-09-21 11:18:48 +010072#ifdef CONFIG_SYS_CACHE_SIZE_AUTO
73 return gd->arch.l1i_line_size;
74#else
75 return CONFIG_SYS_ICACHE_LINE_SIZE;
76#endif
77}
Paul Burton62f13522016-05-27 14:28:05 +010078
Paul Burtondc2037e2016-09-21 11:18:48 +010079static inline unsigned long dcache_line_size(void)
80{
81#ifdef CONFIG_SYS_CACHE_SIZE_AUTO
82 return gd->arch.l1d_line_size;
83#else
84 return CONFIG_SYS_DCACHE_LINE_SIZE;
85#endif
Paul Burton4ff6b102015-01-29 01:27:57 +000086}
87
Paul Burton81560782016-09-21 11:18:54 +010088static inline unsigned long scache_line_size(void)
89{
90#ifdef CONFIG_MIPS_L2_CACHE
91 return gd->arch.l2_line_size;
92#else
Ramon Fried7e07e492019-06-10 21:05:26 +030093 return CONFIG_SYS_SCACHE_LINE_SIZE;
Paul Burton81560782016-09-21 11:18:54 +010094#endif
95}
96
Paul Burtona97d5932016-05-27 14:28:06 +010097#define cache_loop(start, end, lsize, ops...) do { \
98 const void *addr = (const void *)(start & ~(lsize - 1)); \
99 const void *aend = (const void *)((end - 1) & ~(lsize - 1)); \
100 const unsigned int cache_ops[] = { ops }; \
101 unsigned int i; \
102 \
Paul Burtoned258e62017-11-21 11:18:39 -0800103 if (!lsize) \
104 break; \
105 \
Paul Burtona97d5932016-05-27 14:28:06 +0100106 for (; addr <= aend; addr += lsize) { \
107 for (i = 0; i < ARRAY_SIZE(cache_ops); i++) \
108 mips_cache(cache_ops[i], addr); \
109 } \
110} while (0)
111
Stefan Roesefef1b822020-05-14 11:59:04 +0200112void __weak flush_cache(ulong start_addr, ulong size)
Paul Burton4ff6b102015-01-29 01:27:57 +0000113{
114 unsigned long ilsize = icache_line_size();
115 unsigned long dlsize = dcache_line_size();
Paul Burton81560782016-09-21 11:18:54 +0100116 unsigned long slsize = scache_line_size();
Paul Burton4ff6b102015-01-29 01:27:57 +0000117
118 /* aend will be miscalculated when size is zero, so we return here */
119 if (size == 0)
120 return;
121
Paul Burton81560782016-09-21 11:18:54 +0100122 if ((ilsize == dlsize) && !slsize) {
Paul Burton4ff6b102015-01-29 01:27:57 +0000123 /* flush I-cache & D-cache simultaneously */
Paul Burtona97d5932016-05-27 14:28:06 +0100124 cache_loop(start_addr, start_addr + size, ilsize,
125 HIT_WRITEBACK_INV_D, HIT_INVALIDATE_I);
Paul Burtonee3c0b82017-11-21 11:18:37 -0800126 goto ops_done;
Paul Burton4ff6b102015-01-29 01:27:57 +0000127 }
128
129 /* flush D-cache */
Paul Burtona97d5932016-05-27 14:28:06 +0100130 cache_loop(start_addr, start_addr + size, dlsize, HIT_WRITEBACK_INV_D);
Paul Burton4ff6b102015-01-29 01:27:57 +0000131
Paul Burton81560782016-09-21 11:18:54 +0100132 /* flush L2 cache */
Paul Burtoned258e62017-11-21 11:18:39 -0800133 cache_loop(start_addr, start_addr + size, slsize, HIT_WRITEBACK_INV_SD);
Paul Burton81560782016-09-21 11:18:54 +0100134
Paul Burton4ff6b102015-01-29 01:27:57 +0000135 /* flush I-cache */
Paul Burtona97d5932016-05-27 14:28:06 +0100136 cache_loop(start_addr, start_addr + size, ilsize, HIT_INVALIDATE_I);
Paul Burtonee3c0b82017-11-21 11:18:37 -0800137
138ops_done:
139 /* ensure cache ops complete before any further memory accesses */
140 sync();
Paul Burton834f74e2017-11-21 11:18:38 -0800141
142 /* ensure the pipeline doesn't contain now-invalid instructions */
143 instruction_hazard_barrier();
Paul Burton4ff6b102015-01-29 01:27:57 +0000144}
145
Alex Nemirovsky6086d7f2019-12-23 20:19:20 +0000146void __weak flush_dcache_range(ulong start_addr, ulong stop)
Paul Burton4ff6b102015-01-29 01:27:57 +0000147{
148 unsigned long lsize = dcache_line_size();
Paul Burton81560782016-09-21 11:18:54 +0100149 unsigned long slsize = scache_line_size();
Paul Burton4ff6b102015-01-29 01:27:57 +0000150
Marek Vasut0e50ffc2016-01-27 03:13:59 +0100151 /* aend will be miscalculated when size is zero, so we return here */
152 if (start_addr == stop)
153 return;
154
Paul Burtona97d5932016-05-27 14:28:06 +0100155 cache_loop(start_addr, stop, lsize, HIT_WRITEBACK_INV_D);
Paul Burton81560782016-09-21 11:18:54 +0100156
157 /* flush L2 cache */
Paul Burtoned258e62017-11-21 11:18:39 -0800158 cache_loop(start_addr, stop, slsize, HIT_WRITEBACK_INV_SD);
Paul Burtonee3c0b82017-11-21 11:18:37 -0800159
160 /* ensure cache ops complete before any further memory accesses */
161 sync();
Paul Burton4ff6b102015-01-29 01:27:57 +0000162}
163
Stefan Roesee4a3ba22020-06-30 12:33:19 +0200164void __weak invalidate_dcache_range(ulong start_addr, ulong stop)
Paul Burton4ff6b102015-01-29 01:27:57 +0000165{
166 unsigned long lsize = dcache_line_size();
Paul Burton81560782016-09-21 11:18:54 +0100167 unsigned long slsize = scache_line_size();
Paul Burton4ff6b102015-01-29 01:27:57 +0000168
Marek Vasut0e50ffc2016-01-27 03:13:59 +0100169 /* aend will be miscalculated when size is zero, so we return here */
170 if (start_addr == stop)
171 return;
172
Paul Burton81560782016-09-21 11:18:54 +0100173 /* invalidate L2 cache */
Paul Burtoned258e62017-11-21 11:18:39 -0800174 cache_loop(start_addr, stop, slsize, HIT_INVALIDATE_SD);
Paul Burton81560782016-09-21 11:18:54 +0100175
Paul Burton1194f942016-06-09 13:09:51 +0100176 cache_loop(start_addr, stop, lsize, HIT_INVALIDATE_D);
Paul Burtonee3c0b82017-11-21 11:18:37 -0800177
178 /* ensure cache ops complete before any further memory accesses */
179 sync();
Paul Burton4ff6b102015-01-29 01:27:57 +0000180}
Daniel Schwierzeck7026e842018-09-07 19:02:03 +0200181
182int dcache_status(void)
183{
184 unsigned int cca = read_c0_config() & CONF_CM_CMASK;
185 return cca != CONF_CM_UNCACHED;
186}
187
188void dcache_enable(void)
189{
190 puts("Not supported!\n");
191}
192
193void dcache_disable(void)
194{
195 /* change CCA to uncached */
196 change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
197
198 /* ensure the pipeline doesn't contain now-invalid instructions */
199 instruction_hazard_barrier();
200}
developer33d68ce2022-05-20 11:21:51 +0800201
202#ifdef CONFIG_SYS_NONCACHED_MEMORY
203static unsigned long noncached_start;
204static unsigned long noncached_end;
205static unsigned long noncached_next;
206
207void noncached_set_region(void)
208{
209}
210
211int noncached_init(void)
212{
213 phys_addr_t start, end;
214 size_t size;
215
216 /* If this calculation changes, update board_f.c:reserve_noncached() */
217 end = ALIGN(mem_malloc_start, MMU_SECTION_SIZE) - MMU_SECTION_SIZE;
218 size = ALIGN(CONFIG_SYS_NONCACHED_MEMORY, MMU_SECTION_SIZE);
219 start = end - size;
220
221 debug("mapping memory %pa-%pa non-cached\n", &start, &end);
222
223 noncached_start = start;
224 noncached_end = end;
225 noncached_next = start;
226
227 return 0;
228}
229
230phys_addr_t noncached_alloc(size_t size, size_t align)
231{
232 phys_addr_t next = ALIGN(noncached_next, align);
233
234 if (next >= noncached_end || (noncached_end - next) < size)
235 return 0;
236
237 debug("allocated %zu bytes of uncached memory @%pa\n", size, &next);
238 noncached_next = next + size;
239
240 return CKSEG1ADDR(next);
241}
242#endif /* CONFIG_SYS_NONCACHED_MEMORY */