blob: 658969b049118aa7f958d0b384bc7b65cb046bab [file] [log] [blame]
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +09001/*
2 * Copyright (C) 2012-2014 Panasonic Corporation
3 * Copyright (C) 2015-2016 Socionext Inc.
4 * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0+
7 */
8
9#include <common.h>
10#include <linux/io.h>
Masahiro Yamada5ffada92016-08-10 16:08:36 +090011#include <linux/kernel.h>
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +090012#include <asm/armv7.h>
Masahiro Yamada5ffada92016-08-10 16:08:36 +090013#include <asm/processor.h>
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +090014
Masahiro Yamada487b5fe2016-08-10 16:08:37 +090015#include "cache-uniphier.h"
Masahiro Yamadaf846a2b2016-08-10 16:08:44 +090016
17/* control registers */
18#define UNIPHIER_SSCC 0x500c0000 /* Control Register */
19#define UNIPHIER_SSCC_BST (0x1 << 20) /* UCWG burst read */
20#define UNIPHIER_SSCC_ACT (0x1 << 19) /* Inst-Data separate */
21#define UNIPHIER_SSCC_WTG (0x1 << 18) /* WT gathering on */
22#define UNIPHIER_SSCC_PRD (0x1 << 17) /* enable pre-fetch */
23#define UNIPHIER_SSCC_ON (0x1 << 0) /* enable cache */
24#define UNIPHIER_SSCLPDAWCR 0x500c0030 /* Unified/Data Active Way Control */
25#define UNIPHIER_SSCLPIAWCR 0x500c0034 /* Instruction Active Way Control */
26
27/* revision registers */
28#define UNIPHIER_SSCID 0x503c0100 /* ID Register */
29
30/* operation registers */
31#define UNIPHIER_SSCOPE 0x506c0244 /* Cache Operation Primitive Entry */
32#define UNIPHIER_SSCOPE_CM_INV 0x0 /* invalidate */
33#define UNIPHIER_SSCOPE_CM_CLEAN 0x1 /* clean */
34#define UNIPHIER_SSCOPE_CM_FLUSH 0x2 /* flush */
35#define UNIPHIER_SSCOPE_CM_SYNC 0x8 /* sync (drain bufs) */
36#define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9 /* flush p-fetch buf */
37#define UNIPHIER_SSCOQM 0x506c0248
38#define UNIPHIER_SSCOQM_TID_MASK (0x3 << 21)
39#define UNIPHIER_SSCOQM_TID_LRU_DATA (0x0 << 21)
40#define UNIPHIER_SSCOQM_TID_LRU_INST (0x1 << 21)
41#define UNIPHIER_SSCOQM_TID_WAY (0x2 << 21)
42#define UNIPHIER_SSCOQM_S_MASK (0x3 << 17)
43#define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17)
44#define UNIPHIER_SSCOQM_S_ALL (0x1 << 17)
45#define UNIPHIER_SSCOQM_S_WAY (0x2 << 17)
46#define UNIPHIER_SSCOQM_CE (0x1 << 15) /* notify completion */
47#define UNIPHIER_SSCOQM_CW (0x1 << 14)
48#define UNIPHIER_SSCOQM_CM_MASK (0x7)
49#define UNIPHIER_SSCOQM_CM_INV 0x0 /* invalidate */
50#define UNIPHIER_SSCOQM_CM_CLEAN 0x1 /* clean */
51#define UNIPHIER_SSCOQM_CM_FLUSH 0x2 /* flush */
52#define UNIPHIER_SSCOQM_CM_PREFETCH 0x3 /* prefetch to cache */
53#define UNIPHIER_SSCOQM_CM_PREFETCH_BUF 0x4 /* prefetch to pf-buf */
54#define UNIPHIER_SSCOQM_CM_TOUCH 0x5 /* touch */
55#define UNIPHIER_SSCOQM_CM_TOUCH_ZERO 0x6 /* touch to zero */
56#define UNIPHIER_SSCOQM_CM_TOUCH_DIRTY 0x7 /* touch with dirty */
57#define UNIPHIER_SSCOQAD 0x506c024c /* Cache Operation Queue Address */
58#define UNIPHIER_SSCOQSZ 0x506c0250 /* Cache Operation Queue Size */
59#define UNIPHIER_SSCOQMASK 0x506c0254 /* Cache Operation Queue Address Mask */
60#define UNIPHIER_SSCOQWN 0x506c0258 /* Cache Operation Queue Way Number */
61#define UNIPHIER_SSCOPPQSEF 0x506c025c /* Cache Operation Queue Set Complete */
62#define UNIPHIER_SSCOPPQSEF_FE (0x1 << 1)
63#define UNIPHIER_SSCOPPQSEF_OE (0x1 << 0)
64#define UNIPHIER_SSCOLPQS 0x506c0260 /* Cache Operation Queue Status */
65#define UNIPHIER_SSCOLPQS_EF (0x1 << 2)
66#define UNIPHIER_SSCOLPQS_EST (0x1 << 1)
67#define UNIPHIER_SSCOLPQS_QST (0x1 << 0)
68
69#define UNIPHIER_SSC_LINE_SIZE 128
70#define UNIPHIER_SSC_RANGE_OP_MAX_SIZE (0x00400000 - (UNIPHIER_SSC_LINE_SIZE))
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +090071
Masahiro Yamada5ffada92016-08-10 16:08:36 +090072#define UNIPHIER_SSCOQAD_IS_NEEDED(op) \
73 ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
Masahiro Yamada487b5fe2016-08-10 16:08:37 +090074#define UNIPHIER_SSCOQWM_IS_NEEDED(op) \
Masahiro Yamada983b3d32016-08-10 16:08:47 +090075 (((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_WAY) || \
76 ((op & UNIPHIER_SSCOQM_TID_MASK) == UNIPHIER_SSCOQM_TID_WAY))
Masahiro Yamada5ffada92016-08-10 16:08:36 +090077
78/* uniphier_cache_sync - perform a sync point for a particular cache level */
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +090079static void uniphier_cache_sync(void)
80{
81 /* drain internal buffers */
82 writel(UNIPHIER_SSCOPE_CM_SYNC, UNIPHIER_SSCOPE);
83 /* need a read back to confirm */
84 readl(UNIPHIER_SSCOPE);
85}
86
Masahiro Yamada5ffada92016-08-10 16:08:36 +090087/**
88 * uniphier_cache_maint_common - run a queue operation
89 *
90 * @start: start address of range operation (don't care for "all" operation)
91 * @size: data size of range operation (don't care for "all" operation)
Masahiro Yamada487b5fe2016-08-10 16:08:37 +090092 * @ways: target ways (don't care for operations other than pre-fetch, touch
Masahiro Yamada5ffada92016-08-10 16:08:36 +090093 * @operation: flags to specify the desired cache operation
94 */
Masahiro Yamada487b5fe2016-08-10 16:08:37 +090095static void uniphier_cache_maint_common(u32 start, u32 size, u32 ways,
96 u32 operation)
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +090097{
98 /* clear the complete notification flag */
99 writel(UNIPHIER_SSCOLPQS_EF, UNIPHIER_SSCOLPQS);
100
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900101 do {
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900102 /* set cache operation */
103 writel(UNIPHIER_SSCOQM_CE | operation, UNIPHIER_SSCOQM);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900104
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900105 /* set address range if needed */
106 if (likely(UNIPHIER_SSCOQAD_IS_NEEDED(operation))) {
107 writel(start, UNIPHIER_SSCOQAD);
108 writel(size, UNIPHIER_SSCOQSZ);
109 }
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900110
111 /* set target ways if needed */
112 if (unlikely(UNIPHIER_SSCOQWM_IS_NEEDED(operation)))
113 writel(ways, UNIPHIER_SSCOQWN);
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900114 } while (unlikely(readl(UNIPHIER_SSCOPPQSEF) &
115 (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900116
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900117 /* wait until the operation is completed */
118 while (likely(readl(UNIPHIER_SSCOLPQS) != UNIPHIER_SSCOLPQS_EF))
119 cpu_relax();
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900120}
121
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900122static void uniphier_cache_maint_all(u32 operation)
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900123{
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900124 uniphier_cache_maint_common(0, 0, 0, UNIPHIER_SSCOQM_S_ALL | operation);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900125
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900126 uniphier_cache_sync();
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900127}
128
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900129static void uniphier_cache_maint_range(u32 start, u32 end, u32 ways,
130 u32 operation)
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900131{
132 u32 size;
133
134 /*
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900135 * If the start address is not aligned,
136 * perform a cache operation for the first cache-line
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900137 */
138 start = start & ~(UNIPHIER_SSC_LINE_SIZE - 1);
139
140 size = end - start;
141
142 if (unlikely(size >= (u32)(-UNIPHIER_SSC_LINE_SIZE))) {
143 /* this means cache operation for all range */
144 uniphier_cache_maint_all(operation);
145 return;
146 }
147
148 /*
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900149 * If the end address is not aligned,
150 * perform a cache operation for the last cache-line
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900151 */
152 size = ALIGN(size, UNIPHIER_SSC_LINE_SIZE);
153
154 while (size) {
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900155 u32 chunk_size = min_t(u32, size, UNIPHIER_SSC_RANGE_OP_MAX_SIZE);
156
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900157 uniphier_cache_maint_common(start, chunk_size, ways,
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900158 UNIPHIER_SSCOQM_S_RANGE | operation);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900159
160 start += chunk_size;
161 size -= chunk_size;
162 }
163
164 uniphier_cache_sync();
165}
166
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900167void uniphier_cache_prefetch_range(u32 start, u32 end, u32 ways)
168{
169 uniphier_cache_maint_range(start, end, ways,
170 UNIPHIER_SSCOQM_TID_WAY |
171 UNIPHIER_SSCOQM_CM_PREFETCH);
172}
173
174void uniphier_cache_touch_range(u32 start, u32 end, u32 ways)
175{
176 uniphier_cache_maint_range(start, end, ways,
177 UNIPHIER_SSCOQM_TID_WAY |
178 UNIPHIER_SSCOQM_CM_TOUCH);
179}
180
181void uniphier_cache_touch_zero_range(u32 start, u32 end, u32 ways)
182{
183 uniphier_cache_maint_range(start, end, ways,
184 UNIPHIER_SSCOQM_TID_WAY |
185 UNIPHIER_SSCOQM_CM_TOUCH_ZERO);
186}
187
Masahiro Yamada983b3d32016-08-10 16:08:47 +0900188void uniphier_cache_inv_way(u32 ways)
189{
190 uniphier_cache_maint_common(0, 0, ways,
191 UNIPHIER_SSCOQM_S_WAY |
192 UNIPHIER_SSCOQM_CM_INV);
193}
194
Masahiro Yamadaf98afb62016-08-10 16:08:48 +0900195void uniphier_cache_set_active_ways(int cpu, u32 active_ways)
196{
197 void __iomem *base = (void __iomem *)UNIPHIER_SSCC + 0xc00;
198
199 switch (readl(UNIPHIER_SSCID)) { /* revision */
200 case 0x11: /* sLD3 */
201 base = (void __iomem *)UNIPHIER_SSCC + 0x870;
202 break;
203 case 0x12: /* LD4 */
204 case 0x16: /* sld8 */
205 base = (void __iomem *)UNIPHIER_SSCC + 0x840;
206 break;
207 default:
208 base = (void __iomem *)UNIPHIER_SSCC + 0xc00;
209 break;
210 }
211
212 writel(active_ways, base + 4 * cpu);
213}
214
Masahiro Yamadadf649012016-08-10 16:08:42 +0900215static void uniphier_cache_endisable(int enable)
216{
217 u32 tmp;
218
219 tmp = readl(UNIPHIER_SSCC);
220 if (enable)
221 tmp |= UNIPHIER_SSCC_ON;
222 else
223 tmp &= ~UNIPHIER_SSCC_ON;
224 writel(tmp, UNIPHIER_SSCC);
225}
226
227void uniphier_cache_enable(void)
228{
229 uniphier_cache_endisable(1);
230}
231
232void uniphier_cache_disable(void)
233{
234 uniphier_cache_endisable(0);
235}
236
Masahiro Yamadabf44dde2016-08-10 16:08:45 +0900237#ifdef CONFIG_CACHE_UNIPHIER
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900238void v7_outer_cache_flush_all(void)
239{
240 uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
241}
242
243void v7_outer_cache_inval_all(void)
244{
245 uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
246}
247
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900248void v7_outer_cache_flush_range(u32 start, u32 end)
249{
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900250 uniphier_cache_maint_range(start, end, 0, UNIPHIER_SSCOQM_CM_FLUSH);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900251}
252
253void v7_outer_cache_inval_range(u32 start, u32 end)
254{
255 if (start & (UNIPHIER_SSC_LINE_SIZE - 1)) {
256 start &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900257 uniphier_cache_maint_range(start, UNIPHIER_SSC_LINE_SIZE, 0,
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900258 UNIPHIER_SSCOQM_CM_FLUSH);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900259 start += UNIPHIER_SSC_LINE_SIZE;
260 }
261
262 if (start >= end) {
263 uniphier_cache_sync();
264 return;
265 }
266
267 if (end & (UNIPHIER_SSC_LINE_SIZE - 1)) {
268 end &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900269 uniphier_cache_maint_range(end, UNIPHIER_SSC_LINE_SIZE, 0,
Masahiro Yamada5ffada92016-08-10 16:08:36 +0900270 UNIPHIER_SSCOQM_CM_FLUSH);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900271 }
272
273 if (start >= end) {
274 uniphier_cache_sync();
275 return;
276 }
277
Masahiro Yamada487b5fe2016-08-10 16:08:37 +0900278 uniphier_cache_maint_range(start, end, 0, UNIPHIER_SSCOQM_CM_INV);
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900279}
280
281void v7_outer_cache_enable(void)
282{
Masahiro Yamadaf98afb62016-08-10 16:08:48 +0900283 uniphier_cache_set_active_ways(0, U32_MAX); /* activate all ways */
Masahiro Yamadadf649012016-08-10 16:08:42 +0900284 uniphier_cache_enable();
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900285}
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900286
287void v7_outer_cache_disable(void)
288{
Masahiro Yamadadf649012016-08-10 16:08:42 +0900289 uniphier_cache_disable();
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900290}
Masahiro Yamada2c8df192016-08-10 16:08:38 +0900291#endif
Masahiro Yamadaa7c901f2016-07-22 13:38:31 +0900292
293void enable_caches(void)
294{
295 dcache_enable();
296}