blob: d8b82288537e6ef18a56b428091ce15006ead667 [file] [log] [blame]
Masahiro Yamadabb2ff9d2014-10-03 19:21:06 +09001/*
2 * Copyright (C) 2012-2014 Panasonic Corporation
Masahiro Yamadac0a68cc2015-03-23 00:07:31 +09003 * Copyright (C) 2015 Socionext Inc.
4 * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
Masahiro Yamadabb2ff9d2014-10-03 19:21:06 +09005 *
6 * SPDX-License-Identifier: GPL-2.0+
7 */
8
9#include <common.h>
10#include <asm/io.h>
11#include <asm/armv7.h>
Masahiro Yamada95387e22015-02-27 02:26:44 +090012#include <mach/ssc-regs.h>
Masahiro Yamadabb2ff9d2014-10-03 19:21:06 +090013
14#ifdef CONFIG_UNIPHIER_L2CACHE_ON
15static void uniphier_cache_maint_all(u32 operation)
16{
17 /* try until the command is successfully set */
18 do {
19 writel(SSCOQM_S_ALL | SSCOQM_CE | operation, SSCOQM);
20 } while (readl(SSCOPPQSEF) & (SSCOPPQSEF_FE | SSCOPPQSEF_OE));
21
22 /* wait until the operation is completed */
23 while (readl(SSCOLPQS) != SSCOLPQS_EF)
24 ;
25
26 /* clear the complete notification flag */
27 writel(SSCOLPQS_EF, SSCOLPQS);
28
29 writel(SSCOPE_CM_SYNC, SSCOPE); /* drain internal buffers */
30 readl(SSCOPE); /* need a read back to confirm */
31}
32
33void v7_outer_cache_flush_all(void)
34{
35 uniphier_cache_maint_all(SSCOQM_CM_WB_INV);
36}
37
38void v7_outer_cache_inval_all(void)
39{
40 uniphier_cache_maint_all(SSCOQM_CM_INV);
41}
42
43static void __uniphier_cache_maint_range(u32 start, u32 size, u32 operation)
44{
45 /* try until the command is successfully set */
46 do {
47 writel(SSCOQM_S_ADDRESS | SSCOQM_CE | operation, SSCOQM);
48 writel(start, SSCOQAD);
49 writel(size, SSCOQSZ);
50
51 } while (readl(SSCOPPQSEF) & (SSCOPPQSEF_FE | SSCOPPQSEF_OE));
52
53 /* wait until the operation is completed */
54 while (readl(SSCOLPQS) != SSCOLPQS_EF)
55 ;
56
57 /* clear the complete notification flag */
58 writel(SSCOLPQS_EF, SSCOLPQS);
59}
60
61static void uniphier_cache_maint_range(u32 start, u32 end, u32 operation)
62{
63 u32 size;
64
65 /*
66 * If start address is not aligned to cache-line,
67 * do cache operation for the first cache-line
68 */
69 start = start & ~(SSC_LINE_SIZE - 1);
70
71 if (start == 0 && end >= (u32)(-SSC_LINE_SIZE)) {
72 /* this means cache operation for all range */
73 uniphier_cache_maint_all(operation);
74 return;
75 }
76
77 /*
78 * If end address is not aligned to cache-line,
79 * do cache operation for the last cache-line
80 */
81 size = (end - start + SSC_LINE_SIZE - 1) & ~(SSC_LINE_SIZE - 1);
82
83 while (size) {
84 u32 chunk_size = size > SSC_RANGE_OP_MAX_SIZE ?
85 SSC_RANGE_OP_MAX_SIZE : size;
86 __uniphier_cache_maint_range(start, chunk_size, operation);
87
88 start += chunk_size;
89 size -= chunk_size;
90 }
91
92 writel(SSCOPE_CM_SYNC, SSCOPE); /* drain internal buffers */
93 readl(SSCOPE); /* need a read back to confirm */
94}
95
96void v7_outer_cache_flush_range(u32 start, u32 end)
97{
98 uniphier_cache_maint_range(start, end, SSCOQM_CM_WB_INV);
99}
100
101void v7_outer_cache_inval_range(u32 start, u32 end)
102{
103 uniphier_cache_maint_range(start, end, SSCOQM_CM_INV);
104}
105
106void v7_outer_cache_enable(void)
107{
108 u32 tmp;
109 tmp = readl(SSCC);
110 tmp |= SSCC_ON;
111 writel(tmp, SSCC);
112}
113#endif
114
115void v7_outer_cache_disable(void)
116{
117 u32 tmp;
118 tmp = readl(SSCC);
119 tmp &= ~SSCC_ON;
120 writel(tmp, SSCC);
121}
122
Masahiro Yamadabb2ff9d2014-10-03 19:21:06 +0900123void enable_caches(void)
124{
Masahiro Yamadabb2ff9d2014-10-03 19:21:06 +0900125 dcache_enable();
Masahiro Yamadabb2ff9d2014-10-03 19:21:06 +0900126}