blob: f1deaa723024e629c8a89aaee9c21bad0dff4a60 [file] [log] [blame]
David Feng85fd5f12013-12-14 11:47:35 +08001/*
2 * (C) Copyright 2013
3 * David Feng <fenghua@phytium.com.cn>
4 *
5 * This file is based on sample code from ARMv8 ARM.
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10#include <asm-offsets.h>
11#include <config.h>
David Feng85fd5f12013-12-14 11:47:35 +080012#include <asm/macro.h>
Alexander Grafe317fe82016-03-04 01:09:47 +010013#include <asm/system.h>
David Feng85fd5f12013-12-14 11:47:35 +080014#include <linux/linkage.h>
15
16/*
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090017 * void __asm_dcache_level(level)
David Feng85fd5f12013-12-14 11:47:35 +080018 *
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090019 * flush or invalidate one level cache.
David Feng85fd5f12013-12-14 11:47:35 +080020 *
21 * x0: cache level
Masahiro Yamadaa9c39612016-05-17 16:38:07 +090022 * x1: 0 clean & invalidate, 1 invalidate only
York Sunef042012014-02-26 13:26:04 -080023 * x2~x9: clobbered
David Feng85fd5f12013-12-14 11:47:35 +080024 */
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090025ENTRY(__asm_dcache_level)
York Sunef042012014-02-26 13:26:04 -080026 lsl x12, x0, #1
27 msr csselr_el1, x12 /* select cache level */
David Feng85fd5f12013-12-14 11:47:35 +080028 isb /* sync change of cssidr_el1 */
29 mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
30 and x2, x6, #7 /* x2 <- log2(cache line size)-4 */
31 add x2, x2, #4 /* x2 <- log2(cache line size) */
32 mov x3, #0x3ff
33 and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */
Leo Yan9e0d25e2014-03-31 09:50:35 +080034 clz w5, w3 /* bit position of #ways */
David Feng85fd5f12013-12-14 11:47:35 +080035 mov x4, #0x7fff
36 and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */
York Sunef042012014-02-26 13:26:04 -080037 /* x12 <- cache level << 1 */
David Feng85fd5f12013-12-14 11:47:35 +080038 /* x2 <- line length offset */
39 /* x3 <- number of cache ways - 1 */
40 /* x4 <- number of cache sets - 1 */
41 /* x5 <- bit position of #ways */
42
43loop_set:
44 mov x6, x3 /* x6 <- working copy of #ways */
45loop_way:
46 lsl x7, x6, x5
York Sunef042012014-02-26 13:26:04 -080047 orr x9, x12, x7 /* map way and level to cisw value */
David Feng85fd5f12013-12-14 11:47:35 +080048 lsl x7, x4, x2
49 orr x9, x9, x7 /* map set number to cisw value */
York Sunef042012014-02-26 13:26:04 -080050 tbz w1, #0, 1f
51 dc isw, x9
52 b 2f
531: dc cisw, x9 /* clean & invalidate by set/way */
542: subs x6, x6, #1 /* decrement the way */
David Feng85fd5f12013-12-14 11:47:35 +080055 b.ge loop_way
56 subs x4, x4, #1 /* decrement the set */
57 b.ge loop_set
58
59 ret
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090060ENDPROC(__asm_dcache_level)
David Feng85fd5f12013-12-14 11:47:35 +080061
62/*
York Sunef042012014-02-26 13:26:04 -080063 * void __asm_flush_dcache_all(int invalidate_only)
64 *
Masahiro Yamadaa9c39612016-05-17 16:38:07 +090065 * x0: 0 clean & invalidate, 1 invalidate only
David Feng85fd5f12013-12-14 11:47:35 +080066 *
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090067 * flush or invalidate all data cache by SET/WAY.
David Feng85fd5f12013-12-14 11:47:35 +080068 */
York Sunef042012014-02-26 13:26:04 -080069ENTRY(__asm_dcache_all)
70 mov x1, x0
David Feng85fd5f12013-12-14 11:47:35 +080071 dsb sy
72 mrs x10, clidr_el1 /* read clidr_el1 */
73 lsr x11, x10, #24
74 and x11, x11, #0x7 /* x11 <- loc */
75 cbz x11, finished /* if loc is 0, exit */
76 mov x15, lr
77 mov x0, #0 /* start flush at cache level 0 */
78 /* x0 <- cache level */
79 /* x10 <- clidr_el1 */
80 /* x11 <- loc */
81 /* x15 <- return address */
82
83loop_level:
York Sunef042012014-02-26 13:26:04 -080084 lsl x12, x0, #1
85 add x12, x12, x0 /* x0 <- tripled cache level */
86 lsr x12, x10, x12
87 and x12, x12, #7 /* x12 <- cache type */
88 cmp x12, #2
David Feng85fd5f12013-12-14 11:47:35 +080089 b.lt skip /* skip if no cache or icache */
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090090 bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */
David Feng85fd5f12013-12-14 11:47:35 +080091skip:
92 add x0, x0, #1 /* increment cache level */
93 cmp x11, x0
94 b.gt loop_level
95
96 mov x0, #0
Michal Simek5dc7d122015-01-14 15:36:35 +010097 msr csselr_el1, x0 /* restore csselr_el1 */
David Feng85fd5f12013-12-14 11:47:35 +080098 dsb sy
99 isb
100 mov lr, x15
101
102finished:
103 ret
York Sunef042012014-02-26 13:26:04 -0800104ENDPROC(__asm_dcache_all)
105
106ENTRY(__asm_flush_dcache_all)
York Sunef042012014-02-26 13:26:04 -0800107 mov x0, #0
Masahiro Yamadad094db42016-05-17 16:38:06 +0900108 b __asm_dcache_all
David Feng85fd5f12013-12-14 11:47:35 +0800109ENDPROC(__asm_flush_dcache_all)
110
York Sunef042012014-02-26 13:26:04 -0800111ENTRY(__asm_invalidate_dcache_all)
Peng Fand0062662015-08-06 17:54:13 +0800112 mov x0, #0x1
Masahiro Yamadad094db42016-05-17 16:38:06 +0900113 b __asm_dcache_all
York Sunef042012014-02-26 13:26:04 -0800114ENDPROC(__asm_invalidate_dcache_all)
115
David Feng85fd5f12013-12-14 11:47:35 +0800116/*
117 * void __asm_flush_dcache_range(start, end)
118 *
119 * clean & invalidate data cache in the range
120 *
121 * x0: start address
122 * x1: end address
123 */
124ENTRY(__asm_flush_dcache_range)
125 mrs x3, ctr_el0
126 lsr x3, x3, #16
127 and x3, x3, #0xf
128 mov x2, #4
129 lsl x2, x2, x3 /* cache line size */
130
131 /* x2 <- minimal cache line size in cache system */
132 sub x3, x2, #1
133 bic x0, x0, x3
1341: dc civac, x0 /* clean & invalidate data or unified cache */
135 add x0, x0, x2
136 cmp x0, x1
137 b.lo 1b
138 dsb sy
139 ret
140ENDPROC(__asm_flush_dcache_range)
141
142/*
143 * void __asm_invalidate_icache_all(void)
144 *
145 * invalidate all tlb entries.
146 */
147ENTRY(__asm_invalidate_icache_all)
148 ic ialluis
149 isb sy
150 ret
151ENDPROC(__asm_invalidate_icache_all)
York Sun1ce575f2015-01-06 13:18:42 -0800152
Stephen Warrenddb0f632016-10-19 15:18:46 -0600153ENTRY(__asm_invalidate_l3_dcache)
York Sun1ce575f2015-01-06 13:18:42 -0800154 mov x0, #0 /* return status as success */
155 ret
Stephen Warrenddb0f632016-10-19 15:18:46 -0600156ENDPROC(__asm_invalidate_l3_dcache)
157 .weak __asm_invalidate_l3_dcache
158
159ENTRY(__asm_flush_l3_dcache)
160 mov x0, #0 /* return status as success */
161 ret
162ENDPROC(__asm_flush_l3_dcache)
163 .weak __asm_flush_l3_dcache
164
165ENTRY(__asm_invalidate_l3_icache)
166 mov x0, #0 /* return status as success */
167 ret
168ENDPROC(__asm_invalidate_l3_icache)
169 .weak __asm_invalidate_l3_icache
Alexander Grafe317fe82016-03-04 01:09:47 +0100170
171/*
172 * void __asm_switch_ttbr(ulong new_ttbr)
173 *
174 * Safely switches to a new page table.
175 */
176ENTRY(__asm_switch_ttbr)
177 /* x2 = SCTLR (alive throghout the function) */
178 switch_el x4, 3f, 2f, 1f
1793: mrs x2, sctlr_el3
180 b 0f
1812: mrs x2, sctlr_el2
182 b 0f
1831: mrs x2, sctlr_el1
1840:
185
186 /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
187 movn x1, #(CR_M | CR_C | CR_I)
188 and x1, x2, x1
189 switch_el x4, 3f, 2f, 1f
1903: msr sctlr_el3, x1
191 b 0f
1922: msr sctlr_el2, x1
193 b 0f
1941: msr sctlr_el1, x1
1950: isb
196
197 /* This call only clobbers x30 (lr) and x9 (unused) */
198 mov x3, x30
199 bl __asm_invalidate_tlb_all
200
201 /* From here on we're running safely with caches disabled */
202
203 /* Set TTBR to our first argument */
204 switch_el x4, 3f, 2f, 1f
2053: msr ttbr0_el3, x0
206 b 0f
2072: msr ttbr0_el2, x0
208 b 0f
2091: msr ttbr0_el1, x0
2100: isb
211
212 /* Restore original SCTLR and thus enable caches again */
213 switch_el x4, 3f, 2f, 1f
2143: msr sctlr_el3, x2
215 b 0f
2162: msr sctlr_el2, x2
217 b 0f
2181: msr sctlr_el1, x2
2190: isb
220
221 ret x3
222ENDPROC(__asm_switch_ttbr)