blob: 023b3396f552946b7513b4560a825d84798357da [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +09002/*
3 * Copyright (C) 2012-2014 Panasonic Corporation
4 * Copyright (C) 2015-2016 Socionext Inc.
5 * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +09006 */
7
8#include <common.h>
9#include <linux/io.h>
Masahiro Yamada5ffada92016-08-10 16:08:36 +090010#include <linux/kernel.h>
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +090011#include <asm/armv7.h>
Masahiro Yamada5ffada92016-08-10 16:08:36 +090012#include <asm/processor.h>
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +090013
Masahiro Yamada487b5fe2016-08-10 16:08:37 +090014#include "cache-uniphier.h"
Masahiro Yamadaf846a2b2016-08-10 16:08:44 +090015
16/* control registers */
17#define UNIPHIER_SSCC 0x500c0000 /* Control Register */
18#define UNIPHIER_SSCC_BST (0x1 << 20) /* UCWG burst read */
19#define UNIPHIER_SSCC_ACT (0x1 << 19) /* Inst-Data separate */
20#define UNIPHIER_SSCC_WTG (0x1 << 18) /* WT gathering on */
21#define UNIPHIER_SSCC_PRD (0x1 << 17) /* enable pre-fetch */
22#define UNIPHIER_SSCC_ON (0x1 << 0) /* enable cache */
23#define UNIPHIER_SSCLPDAWCR 0x500c0030 /* Unified/Data Active Way Control */
24#define UNIPHIER_SSCLPIAWCR 0x500c0034 /* Instruction Active Way Control */
25
26/* revision registers */
27#define UNIPHIER_SSCID 0x503c0100 /* ID Register */
28
29/* operation registers */
30#define UNIPHIER_SSCOPE 0x506c0244 /* Cache Operation Primitive Entry */
31#define UNIPHIER_SSCOPE_CM_INV 0x0 /* invalidate */
32#define UNIPHIER_SSCOPE_CM_CLEAN 0x1 /* clean */
33#define UNIPHIER_SSCOPE_CM_FLUSH 0x2 /* flush */
34#define UNIPHIER_SSCOPE_CM_SYNC 0x8 /* sync (drain bufs) */
35#define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9 /* flush p-fetch buf */
36#define UNIPHIER_SSCOQM 0x506c0248
37#define UNIPHIER_SSCOQM_TID_MASK (0x3 << 21)
38#define UNIPHIER_SSCOQM_TID_LRU_DATA (0x0 << 21)
39#define UNIPHIER_SSCOQM_TID_LRU_INST (0x1 << 21)
40#define UNIPHIER_SSCOQM_TID_WAY (0x2 << 21)
41#define UNIPHIER_SSCOQM_S_MASK (0x3 << 17)
42#define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17)
43#define UNIPHIER_SSCOQM_S_ALL (0x1 << 17)
44#define UNIPHIER_SSCOQM_S_WAY (0x2 << 17)
45#define UNIPHIER_SSCOQM_CE (0x1 << 15) /* notify completion */
46#define UNIPHIER_SSCOQM_CW (0x1 << 14)
47#define UNIPHIER_SSCOQM_CM_MASK (0x7)
48#define UNIPHIER_SSCOQM_CM_INV 0x0 /* invalidate */
49#define UNIPHIER_SSCOQM_CM_CLEAN 0x1 /* clean */
50#define UNIPHIER_SSCOQM_CM_FLUSH 0x2 /* flush */
51#define UNIPHIER_SSCOQM_CM_PREFETCH 0x3 /* prefetch to cache */
52#define UNIPHIER_SSCOQM_CM_PREFETCH_BUF 0x4 /* prefetch to pf-buf */
53#define UNIPHIER_SSCOQM_CM_TOUCH 0x5 /* touch */
54#define UNIPHIER_SSCOQM_CM_TOUCH_ZERO 0x6 /* touch to zero */
55#define UNIPHIER_SSCOQM_CM_TOUCH_DIRTY 0x7 /* touch with dirty */
56#define UNIPHIER_SSCOQAD 0x506c024c /* Cache Operation Queue Address */
57#define UNIPHIER_SSCOQSZ 0x506c0250 /* Cache Operation Queue Size */
58#define UNIPHIER_SSCOQMASK 0x506c0254 /* Cache Operation Queue Address Mask */
59#define UNIPHIER_SSCOQWN 0x506c0258 /* Cache Operation Queue Way Number */
60#define UNIPHIER_SSCOPPQSEF 0x506c025c /* Cache Operation Queue Set Complete */
61#define UNIPHIER_SSCOPPQSEF_FE (0x1 << 1)
62#define UNIPHIER_SSCOPPQSEF_OE (0x1 << 0)
63#define UNIPHIER_SSCOLPQS 0x506c0260 /* Cache Operation Queue Status */
64#define UNIPHIER_SSCOLPQS_EF (0x1 << 2)
65#define UNIPHIER_SSCOLPQS_EST (0x1 << 1)
66#define UNIPHIER_SSCOLPQS_QST (0x1 << 0)
67
68#define UNIPHIER_SSC_LINE_SIZE 128
69#define UNIPHIER_SSC_RANGE_OP_MAX_SIZE (0x00400000 - (UNIPHIER_SSC_LINE_SIZE))
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +090070
Masahiro Yamada5ffada92016-08-10 16:08:36 +090071#define UNIPHIER_SSCOQAD_IS_NEEDED(op) \
72 ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
Masahiro Yamada487b5fe2016-08-10 16:08:37 +090073#define UNIPHIER_SSCOQWM_IS_NEEDED(op) \
Masahiro Yamada983b3d32016-08-10 16:08:47 +090074 (((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_WAY) || \
75 ((op & UNIPHIER_SSCOQM_TID_MASK) == UNIPHIER_SSCOQM_TID_WAY))
Masahiro Yamada5ffada92016-08-10 16:08:36 +090076
77/* uniphier_cache_sync - perform a sync point for a particular cache level */
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +090078static void uniphier_cache_sync(void)
79{
80 /* drain internal buffers */
81 writel(UNIPHIER_SSCOPE_CM_SYNC, UNIPHIER_SSCOPE);
82 /* need a read back to confirm */
83 readl(UNIPHIER_SSCOPE);
84}
85
Masahiro Yamada5ffada92016-08-10 16:08:36 +090086/**
87 * uniphier_cache_maint_common - run a queue operation
88 *
89 * @start: start address of range operation (don't care for "all" operation)
90 * @size: data size of range operation (don't care for "all" operation)
Masahiro Yamada487b5fe2016-08-10 16:08:37 +090091 * @ways: target ways (don't care for operations other than pre-fetch, touch
Masahiro Yamada5ffada92016-08-10 16:08:36 +090092 * @operation: flags to specify the desired cache operation
93 */
Masahiro Yamada487b5fe2016-08-10 16:08:37 +090094static void uniphier_cache_maint_common(u32 start, u32 size, u32 ways,
95 u32 operation)
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +090096{
97 /* clear the complete notification flag */
98 writel(UNIPHIER_SSCOLPQS_EF, UNIPHIER_SSCOLPQS);
99
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900100 do {
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900101 /* set cache operation */
102 writel(UNIPHIER_SSCOQM_CE | operation, UNIPHIER_SSCOQM);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900103
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900104 /* set address range if needed */
105 if (likely(UNIPHIER_SSCOQAD_IS_NEEDED(operation))) {
106 writel(start, UNIPHIER_SSCOQAD);
107 writel(size, UNIPHIER_SSCOQSZ);
108 }
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900109
110 /* set target ways if needed */
111 if (unlikely(UNIPHIER_SSCOQWM_IS_NEEDED(operation)))
112 writel(ways, UNIPHIER_SSCOQWN);
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900113 } while (unlikely(readl(UNIPHIER_SSCOPPQSEF) &
114 (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900115
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900116 /* wait until the operation is completed */
117 while (likely(readl(UNIPHIER_SSCOLPQS) != UNIPHIER_SSCOLPQS_EF))
118 cpu_relax();
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900119}
120
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900121static void uniphier_cache_maint_all(u32 operation)
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900122{
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900123 uniphier_cache_maint_common(0, 0, 0, UNIPHIER_SSCOQM_S_ALL | operation);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900124
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900125 uniphier_cache_sync();
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900126}
127
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900128static void uniphier_cache_maint_range(u32 start, u32 end, u32 ways,
129 u32 operation)
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900130{
131 u32 size;
132
133 /*
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900134 * If the start address is not aligned,
135 * perform a cache operation for the first cache-line
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900136 */
137 start = start & ~(UNIPHIER_SSC_LINE_SIZE - 1);
138
139 size = end - start;
140
141 if (unlikely(size >= (u32)(-UNIPHIER_SSC_LINE_SIZE))) {
142 /* this means cache operation for all range */
143 uniphier_cache_maint_all(operation);
144 return;
145 }
146
147 /*
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900148 * If the end address is not aligned,
149 * perform a cache operation for the last cache-line
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900150 */
151 size = ALIGN(size, UNIPHIER_SSC_LINE_SIZE);
152
153 while (size) {
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900154 u32 chunk_size = min_t(u32, size, UNIPHIER_SSC_RANGE_OP_MAX_SIZE);
155
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900156 uniphier_cache_maint_common(start, chunk_size, ways,
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900157 UNIPHIER_SSCOQM_S_RANGE | operation);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900158
159 start += chunk_size;
160 size -= chunk_size;
161 }
162
163 uniphier_cache_sync();
164}
165
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900166void uniphier_cache_prefetch_range(u32 start, u32 end, u32 ways)
167{
168 uniphier_cache_maint_range(start, end, ways,
169 UNIPHIER_SSCOQM_TID_WAY |
170 UNIPHIER_SSCOQM_CM_PREFETCH);
171}
172
173void uniphier_cache_touch_range(u32 start, u32 end, u32 ways)
174{
175 uniphier_cache_maint_range(start, end, ways,
176 UNIPHIER_SSCOQM_TID_WAY |
177 UNIPHIER_SSCOQM_CM_TOUCH);
178}
179
180void uniphier_cache_touch_zero_range(u32 start, u32 end, u32 ways)
181{
182 uniphier_cache_maint_range(start, end, ways,
183 UNIPHIER_SSCOQM_TID_WAY |
184 UNIPHIER_SSCOQM_CM_TOUCH_ZERO);
185}
186
Masahiro Yamada983b3d32016-08-10 16:08:47 +0900187void uniphier_cache_inv_way(u32 ways)
188{
189 uniphier_cache_maint_common(0, 0, ways,
190 UNIPHIER_SSCOQM_S_WAY |
191 UNIPHIER_SSCOQM_CM_INV);
192}
193
Masahiro Yamadaf98afb62016-08-10 16:08:48 +0900194void uniphier_cache_set_active_ways(int cpu, u32 active_ways)
195{
196 void __iomem *base = (void __iomem *)UNIPHIER_SSCC + 0xc00;
197
198 switch (readl(UNIPHIER_SSCID)) { /* revision */
Masahiro Yamadaf98afb62016-08-10 16:08:48 +0900199 case 0x12: /* LD4 */
200 case 0x16: /* sld8 */
201 base = (void __iomem *)UNIPHIER_SSCC + 0x840;
202 break;
203 default:
204 base = (void __iomem *)UNIPHIER_SSCC + 0xc00;
205 break;
206 }
207
208 writel(active_ways, base + 4 * cpu);
209}
210
Masahiro Yamadadf649012016-08-10 16:08:42 +0900211static void uniphier_cache_endisable(int enable)
212{
213 u32 tmp;
214
215 tmp = readl(UNIPHIER_SSCC);
216 if (enable)
217 tmp |= UNIPHIER_SSCC_ON;
218 else
219 tmp &= ~UNIPHIER_SSCC_ON;
220 writel(tmp, UNIPHIER_SSCC);
221}
222
223void uniphier_cache_enable(void)
224{
225 uniphier_cache_endisable(1);
226}
227
228void uniphier_cache_disable(void)
229{
230 uniphier_cache_endisable(0);
231}
232
Masahiro Yamadabf44dde2016-08-10 16:08:45 +0900233#ifdef CONFIG_CACHE_UNIPHIER
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900234void v7_outer_cache_flush_all(void)
235{
236 uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
237}
238
239void v7_outer_cache_inval_all(void)
240{
241 uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
242}
243
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900244void v7_outer_cache_flush_range(u32 start, u32 end)
245{
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900246 uniphier_cache_maint_range(start, end, 0, UNIPHIER_SSCOQM_CM_FLUSH);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900247}
248
249void v7_outer_cache_inval_range(u32 start, u32 end)
250{
251 if (start & (UNIPHIER_SSC_LINE_SIZE - 1)) {
252 start &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900253 uniphier_cache_maint_range(start, UNIPHIER_SSC_LINE_SIZE, 0,
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900254 UNIPHIER_SSCOQM_CM_FLUSH);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900255 start += UNIPHIER_SSC_LINE_SIZE;
256 }
257
258 if (start >= end) {
259 uniphier_cache_sync();
260 return;
261 }
262
263 if (end & (UNIPHIER_SSC_LINE_SIZE - 1)) {
264 end &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900265 uniphier_cache_maint_range(end, UNIPHIER_SSC_LINE_SIZE, 0,
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900266 UNIPHIER_SSCOQM_CM_FLUSH);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900267 }
268
269 if (start >= end) {
270 uniphier_cache_sync();
271 return;
272 }
273
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900274 uniphier_cache_maint_range(start, end, 0, UNIPHIER_SSCOQM_CM_INV);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900275}
276
277void v7_outer_cache_enable(void)
278{
Masahiro Yamadaf98afb62016-08-10 16:08:48 +0900279 uniphier_cache_set_active_ways(0, U32_MAX); /* activate all ways */
Masahiro Yamadadf649012016-08-10 16:08:42 +0900280 uniphier_cache_enable();
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900281}
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900282
283void v7_outer_cache_disable(void)
284{
Masahiro Yamadadf649012016-08-10 16:08:42 +0900285 uniphier_cache_disable();
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900286}
Masahiro Yamada2c8df192016-08-10 16:08:38 +0900287#endif
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900288
289void enable_caches(void)
290{
291 dcache_enable();
292}