blob: b6e4abbad0f62bfb030b882f0453478cbf23ea9d [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +09002/*
3 * Copyright (C) 2012-2014 Panasonic Corporation
4 * Copyright (C) 2015-2016 Socionext Inc.
5 * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +09006 */
7
8#include <common.h>
Simon Glass1d91ba72019-11-14 12:57:37 -07009#include <cpu_func.h>
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +090010#include <linux/io.h>
Masahiro Yamada5ffada92016-08-10 16:08:36 +090011#include <linux/kernel.h>
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +090012#include <asm/armv7.h>
Masahiro Yamada5ffada92016-08-10 16:08:36 +090013#include <asm/processor.h>
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +090014
Masahiro Yamada487b5fe2016-08-10 16:08:37 +090015#include "cache-uniphier.h"
Masahiro Yamadaf846a2b2016-08-10 16:08:44 +090016
17/* control registers */
18#define UNIPHIER_SSCC 0x500c0000 /* Control Register */
19#define UNIPHIER_SSCC_BST (0x1 << 20) /* UCWG burst read */
20#define UNIPHIER_SSCC_ACT (0x1 << 19) /* Inst-Data separate */
21#define UNIPHIER_SSCC_WTG (0x1 << 18) /* WT gathering on */
22#define UNIPHIER_SSCC_PRD (0x1 << 17) /* enable pre-fetch */
23#define UNIPHIER_SSCC_ON (0x1 << 0) /* enable cache */
24#define UNIPHIER_SSCLPDAWCR 0x500c0030 /* Unified/Data Active Way Control */
25#define UNIPHIER_SSCLPIAWCR 0x500c0034 /* Instruction Active Way Control */
26
27/* revision registers */
28#define UNIPHIER_SSCID 0x503c0100 /* ID Register */
29
30/* operation registers */
31#define UNIPHIER_SSCOPE 0x506c0244 /* Cache Operation Primitive Entry */
32#define UNIPHIER_SSCOPE_CM_INV 0x0 /* invalidate */
33#define UNIPHIER_SSCOPE_CM_CLEAN 0x1 /* clean */
34#define UNIPHIER_SSCOPE_CM_FLUSH 0x2 /* flush */
35#define UNIPHIER_SSCOPE_CM_SYNC 0x8 /* sync (drain bufs) */
36#define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9 /* flush p-fetch buf */
37#define UNIPHIER_SSCOQM 0x506c0248
38#define UNIPHIER_SSCOQM_TID_MASK (0x3 << 21)
39#define UNIPHIER_SSCOQM_TID_LRU_DATA (0x0 << 21)
40#define UNIPHIER_SSCOQM_TID_LRU_INST (0x1 << 21)
41#define UNIPHIER_SSCOQM_TID_WAY (0x2 << 21)
42#define UNIPHIER_SSCOQM_S_MASK (0x3 << 17)
43#define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17)
44#define UNIPHIER_SSCOQM_S_ALL (0x1 << 17)
45#define UNIPHIER_SSCOQM_S_WAY (0x2 << 17)
46#define UNIPHIER_SSCOQM_CE (0x1 << 15) /* notify completion */
47#define UNIPHIER_SSCOQM_CW (0x1 << 14)
48#define UNIPHIER_SSCOQM_CM_MASK (0x7)
49#define UNIPHIER_SSCOQM_CM_INV 0x0 /* invalidate */
50#define UNIPHIER_SSCOQM_CM_CLEAN 0x1 /* clean */
51#define UNIPHIER_SSCOQM_CM_FLUSH 0x2 /* flush */
52#define UNIPHIER_SSCOQM_CM_PREFETCH 0x3 /* prefetch to cache */
53#define UNIPHIER_SSCOQM_CM_PREFETCH_BUF 0x4 /* prefetch to pf-buf */
54#define UNIPHIER_SSCOQM_CM_TOUCH 0x5 /* touch */
55#define UNIPHIER_SSCOQM_CM_TOUCH_ZERO 0x6 /* touch to zero */
56#define UNIPHIER_SSCOQM_CM_TOUCH_DIRTY 0x7 /* touch with dirty */
57#define UNIPHIER_SSCOQAD 0x506c024c /* Cache Operation Queue Address */
58#define UNIPHIER_SSCOQSZ 0x506c0250 /* Cache Operation Queue Size */
59#define UNIPHIER_SSCOQMASK 0x506c0254 /* Cache Operation Queue Address Mask */
60#define UNIPHIER_SSCOQWN 0x506c0258 /* Cache Operation Queue Way Number */
61#define UNIPHIER_SSCOPPQSEF 0x506c025c /* Cache Operation Queue Set Complete */
62#define UNIPHIER_SSCOPPQSEF_FE (0x1 << 1)
63#define UNIPHIER_SSCOPPQSEF_OE (0x1 << 0)
64#define UNIPHIER_SSCOLPQS 0x506c0260 /* Cache Operation Queue Status */
65#define UNIPHIER_SSCOLPQS_EF (0x1 << 2)
66#define UNIPHIER_SSCOLPQS_EST (0x1 << 1)
67#define UNIPHIER_SSCOLPQS_QST (0x1 << 0)
68
69#define UNIPHIER_SSC_LINE_SIZE 128
70#define UNIPHIER_SSC_RANGE_OP_MAX_SIZE (0x00400000 - (UNIPHIER_SSC_LINE_SIZE))
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +090071
Masahiro Yamada5ffada92016-08-10 16:08:36 +090072#define UNIPHIER_SSCOQAD_IS_NEEDED(op) \
73 ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
Masahiro Yamada487b5fe2016-08-10 16:08:37 +090074#define UNIPHIER_SSCOQWM_IS_NEEDED(op) \
Masahiro Yamada983b3d32016-08-10 16:08:47 +090075 (((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_WAY) || \
76 ((op & UNIPHIER_SSCOQM_TID_MASK) == UNIPHIER_SSCOQM_TID_WAY))
Masahiro Yamada5ffada92016-08-10 16:08:36 +090077
78/* uniphier_cache_sync - perform a sync point for a particular cache level */
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +090079static void uniphier_cache_sync(void)
80{
81 /* drain internal buffers */
82 writel(UNIPHIER_SSCOPE_CM_SYNC, UNIPHIER_SSCOPE);
83 /* need a read back to confirm */
84 readl(UNIPHIER_SSCOPE);
85}
86
Masahiro Yamada5ffada92016-08-10 16:08:36 +090087/**
88 * uniphier_cache_maint_common - run a queue operation
89 *
90 * @start: start address of range operation (don't care for "all" operation)
91 * @size: data size of range operation (don't care for "all" operation)
Masahiro Yamada487b5fe2016-08-10 16:08:37 +090092 * @ways: target ways (don't care for operations other than pre-fetch, touch
Masahiro Yamada5ffada92016-08-10 16:08:36 +090093 * @operation: flags to specify the desired cache operation
94 */
Masahiro Yamada487b5fe2016-08-10 16:08:37 +090095static void uniphier_cache_maint_common(u32 start, u32 size, u32 ways,
96 u32 operation)
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +090097{
98 /* clear the complete notification flag */
99 writel(UNIPHIER_SSCOLPQS_EF, UNIPHIER_SSCOLPQS);
100
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900101 do {
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900102 /* set cache operation */
103 writel(UNIPHIER_SSCOQM_CE | operation, UNIPHIER_SSCOQM);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900104
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900105 /* set address range if needed */
106 if (likely(UNIPHIER_SSCOQAD_IS_NEEDED(operation))) {
107 writel(start, UNIPHIER_SSCOQAD);
108 writel(size, UNIPHIER_SSCOQSZ);
109 }
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900110
111 /* set target ways if needed */
112 if (unlikely(UNIPHIER_SSCOQWM_IS_NEEDED(operation)))
113 writel(ways, UNIPHIER_SSCOQWN);
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900114 } while (unlikely(readl(UNIPHIER_SSCOPPQSEF) &
115 (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900116
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900117 /* wait until the operation is completed */
118 while (likely(readl(UNIPHIER_SSCOLPQS) != UNIPHIER_SSCOLPQS_EF))
119 cpu_relax();
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900120}
121
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900122static void uniphier_cache_maint_all(u32 operation)
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900123{
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900124 uniphier_cache_maint_common(0, 0, 0, UNIPHIER_SSCOQM_S_ALL | operation);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900125
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900126 uniphier_cache_sync();
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900127}
128
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900129static void uniphier_cache_maint_range(u32 start, u32 end, u32 ways,
130 u32 operation)
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900131{
132 u32 size;
133
134 /*
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900135 * If the start address is not aligned,
136 * perform a cache operation for the first cache-line
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900137 */
138 start = start & ~(UNIPHIER_SSC_LINE_SIZE - 1);
139
140 size = end - start;
141
142 if (unlikely(size >= (u32)(-UNIPHIER_SSC_LINE_SIZE))) {
143 /* this means cache operation for all range */
144 uniphier_cache_maint_all(operation);
145 return;
146 }
147
148 /*
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900149 * If the end address is not aligned,
150 * perform a cache operation for the last cache-line
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900151 */
152 size = ALIGN(size, UNIPHIER_SSC_LINE_SIZE);
153
154 while (size) {
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900155 u32 chunk_size = min_t(u32, size, UNIPHIER_SSC_RANGE_OP_MAX_SIZE);
156
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900157 uniphier_cache_maint_common(start, chunk_size, ways,
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900158 UNIPHIER_SSCOQM_S_RANGE | operation);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900159
160 start += chunk_size;
161 size -= chunk_size;
162 }
163
164 uniphier_cache_sync();
165}
166
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900167void uniphier_cache_prefetch_range(u32 start, u32 end, u32 ways)
168{
169 uniphier_cache_maint_range(start, end, ways,
170 UNIPHIER_SSCOQM_TID_WAY |
171 UNIPHIER_SSCOQM_CM_PREFETCH);
172}
173
174void uniphier_cache_touch_range(u32 start, u32 end, u32 ways)
175{
176 uniphier_cache_maint_range(start, end, ways,
177 UNIPHIER_SSCOQM_TID_WAY |
178 UNIPHIER_SSCOQM_CM_TOUCH);
179}
180
181void uniphier_cache_touch_zero_range(u32 start, u32 end, u32 ways)
182{
183 uniphier_cache_maint_range(start, end, ways,
184 UNIPHIER_SSCOQM_TID_WAY |
185 UNIPHIER_SSCOQM_CM_TOUCH_ZERO);
186}
187
Masahiro Yamada983b3d32016-08-10 16:08:47 +0900188void uniphier_cache_inv_way(u32 ways)
189{
190 uniphier_cache_maint_common(0, 0, ways,
191 UNIPHIER_SSCOQM_S_WAY |
192 UNIPHIER_SSCOQM_CM_INV);
193}
194
Masahiro Yamadaf98afb62016-08-10 16:08:48 +0900195void uniphier_cache_set_active_ways(int cpu, u32 active_ways)
196{
197 void __iomem *base = (void __iomem *)UNIPHIER_SSCC + 0xc00;
198
199 switch (readl(UNIPHIER_SSCID)) { /* revision */
Masahiro Yamadaf98afb62016-08-10 16:08:48 +0900200 case 0x12: /* LD4 */
201 case 0x16: /* sld8 */
202 base = (void __iomem *)UNIPHIER_SSCC + 0x840;
203 break;
204 default:
205 base = (void __iomem *)UNIPHIER_SSCC + 0xc00;
206 break;
207 }
208
209 writel(active_ways, base + 4 * cpu);
210}
211
Masahiro Yamadadf649012016-08-10 16:08:42 +0900212static void uniphier_cache_endisable(int enable)
213{
214 u32 tmp;
215
216 tmp = readl(UNIPHIER_SSCC);
217 if (enable)
218 tmp |= UNIPHIER_SSCC_ON;
219 else
220 tmp &= ~UNIPHIER_SSCC_ON;
221 writel(tmp, UNIPHIER_SSCC);
222}
223
224void uniphier_cache_enable(void)
225{
226 uniphier_cache_endisable(1);
227}
228
229void uniphier_cache_disable(void)
230{
231 uniphier_cache_endisable(0);
232}
233
Masahiro Yamadabf44dde2016-08-10 16:08:45 +0900234#ifdef CONFIG_CACHE_UNIPHIER
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900235void v7_outer_cache_flush_all(void)
236{
237 uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
238}
239
240void v7_outer_cache_inval_all(void)
241{
242 uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
243}
244
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900245void v7_outer_cache_flush_range(u32 start, u32 end)
246{
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900247 uniphier_cache_maint_range(start, end, 0, UNIPHIER_SSCOQM_CM_FLUSH);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900248}
249
250void v7_outer_cache_inval_range(u32 start, u32 end)
251{
252 if (start & (UNIPHIER_SSC_LINE_SIZE - 1)) {
253 start &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900254 uniphier_cache_maint_range(start, UNIPHIER_SSC_LINE_SIZE, 0,
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900255 UNIPHIER_SSCOQM_CM_FLUSH);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900256 start += UNIPHIER_SSC_LINE_SIZE;
257 }
258
259 if (start >= end) {
260 uniphier_cache_sync();
261 return;
262 }
263
264 if (end & (UNIPHIER_SSC_LINE_SIZE - 1)) {
265 end &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900266 uniphier_cache_maint_range(end, UNIPHIER_SSC_LINE_SIZE, 0,
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900267 UNIPHIER_SSCOQM_CM_FLUSH);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900268 }
269
270 if (start >= end) {
271 uniphier_cache_sync();
272 return;
273 }
274
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900275 uniphier_cache_maint_range(start, end, 0, UNIPHIER_SSCOQM_CM_INV);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900276}
277
278void v7_outer_cache_enable(void)
279{
Masahiro Yamadaf98afb62016-08-10 16:08:48 +0900280 uniphier_cache_set_active_ways(0, U32_MAX); /* activate all ways */
Masahiro Yamadadf649012016-08-10 16:08:42 +0900281 uniphier_cache_enable();
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900282}
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900283
284void v7_outer_cache_disable(void)
285{
Masahiro Yamadadf649012016-08-10 16:08:42 +0900286 uniphier_cache_disable();
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900287}
Masahiro Yamada2c8df192016-08-10 16:08:38 +0900288#endif
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900289
290void enable_caches(void)
291{
292 dcache_enable();
293}