blob: 3fe935cf283e7ce81cdbca09bd53a56fa9346ac1 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001/* SPDX-License-Identifier: GPL-2.0+ */
David Feng85fd5f12013-12-14 11:47:35 +08002/*
3 * (C) Copyright 2013
4 * David Feng <fenghua@phytium.com.cn>
5 *
6 * This file is based on sample code from ARMv8 ARM.
David Feng85fd5f12013-12-14 11:47:35 +08007 */
8
9#include <asm-offsets.h>
10#include <config.h>
David Feng85fd5f12013-12-14 11:47:35 +080011#include <asm/macro.h>
Alexander Grafe317fe82016-03-04 01:09:47 +010012#include <asm/system.h>
David Feng85fd5f12013-12-14 11:47:35 +080013#include <linux/linkage.h>
14
Marc Zyngierb67855c2023-02-09 04:54:27 +080015#ifndef CONFIG_CMO_BY_VA_ONLY
David Feng85fd5f12013-12-14 11:47:35 +080016/*
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090017 * void __asm_dcache_level(level)
David Feng85fd5f12013-12-14 11:47:35 +080018 *
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090019 * flush or invalidate one level cache.
David Feng85fd5f12013-12-14 11:47:35 +080020 *
21 * x0: cache level
Masahiro Yamadaa9c39612016-05-17 16:38:07 +090022 * x1: 0 clean & invalidate, 1 invalidate only
York Sunef042012014-02-26 13:26:04 -080023 * x2~x9: clobbered
David Feng85fd5f12013-12-14 11:47:35 +080024 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +020025.pushsection .text.__asm_dcache_level, "ax"
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090026ENTRY(__asm_dcache_level)
York Sunef042012014-02-26 13:26:04 -080027 lsl x12, x0, #1
28 msr csselr_el1, x12 /* select cache level */
David Feng85fd5f12013-12-14 11:47:35 +080029 isb /* sync change of cssidr_el1 */
30 mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
Pierre-Clément Tosieba9a0b2021-08-27 18:03:45 +020031 ubfx x2, x6, #0, #3 /* x2 <- log2(cache line size)-4 */
32 ubfx x3, x6, #3, #10 /* x3 <- number of cache ways - 1 */
33 ubfx x4, x6, #13, #15 /* x4 <- number of cache sets - 1 */
David Feng85fd5f12013-12-14 11:47:35 +080034 add x2, x2, #4 /* x2 <- log2(cache line size) */
Leo Yan9e0d25e2014-03-31 09:50:35 +080035 clz w5, w3 /* bit position of #ways */
York Sunef042012014-02-26 13:26:04 -080036 /* x12 <- cache level << 1 */
David Feng85fd5f12013-12-14 11:47:35 +080037 /* x2 <- line length offset */
38 /* x3 <- number of cache ways - 1 */
39 /* x4 <- number of cache sets - 1 */
40 /* x5 <- bit position of #ways */
41
42loop_set:
43 mov x6, x3 /* x6 <- working copy of #ways */
44loop_way:
45 lsl x7, x6, x5
York Sunef042012014-02-26 13:26:04 -080046 orr x9, x12, x7 /* map way and level to cisw value */
David Feng85fd5f12013-12-14 11:47:35 +080047 lsl x7, x4, x2
48 orr x9, x9, x7 /* map set number to cisw value */
York Sunef042012014-02-26 13:26:04 -080049 tbz w1, #0, 1f
50 dc isw, x9
51 b 2f
521: dc cisw, x9 /* clean & invalidate by set/way */
532: subs x6, x6, #1 /* decrement the way */
David Feng85fd5f12013-12-14 11:47:35 +080054 b.ge loop_way
55 subs x4, x4, #1 /* decrement the set */
56 b.ge loop_set
57
58 ret
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090059ENDPROC(__asm_dcache_level)
Philipp Tomsichd15592f2017-07-04 10:04:54 +020060.popsection
David Feng85fd5f12013-12-14 11:47:35 +080061
62/*
York Sunef042012014-02-26 13:26:04 -080063 * void __asm_flush_dcache_all(int invalidate_only)
64 *
Masahiro Yamadaa9c39612016-05-17 16:38:07 +090065 * x0: 0 clean & invalidate, 1 invalidate only
David Feng85fd5f12013-12-14 11:47:35 +080066 *
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090067 * flush or invalidate all data cache by SET/WAY.
David Feng85fd5f12013-12-14 11:47:35 +080068 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +020069.pushsection .text.__asm_dcache_all, "ax"
York Sunef042012014-02-26 13:26:04 -080070ENTRY(__asm_dcache_all)
71 mov x1, x0
David Feng85fd5f12013-12-14 11:47:35 +080072 dsb sy
73 mrs x10, clidr_el1 /* read clidr_el1 */
Pierre-Clément Tosieba9a0b2021-08-27 18:03:45 +020074 ubfx x11, x10, #24, #3 /* x11 <- loc */
David Feng85fd5f12013-12-14 11:47:35 +080075 cbz x11, finished /* if loc is 0, exit */
76 mov x15, lr
77 mov x0, #0 /* start flush at cache level 0 */
78 /* x0 <- cache level */
79 /* x10 <- clidr_el1 */
80 /* x11 <- loc */
81 /* x15 <- return address */
82
83loop_level:
Pierre-Clément Tosi1ffdcef2021-08-27 18:04:10 +020084 add x12, x0, x0, lsl #1 /* x12 <- tripled cache level */
York Sunef042012014-02-26 13:26:04 -080085 lsr x12, x10, x12
86 and x12, x12, #7 /* x12 <- cache type */
87 cmp x12, #2
David Feng85fd5f12013-12-14 11:47:35 +080088 b.lt skip /* skip if no cache or icache */
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090089 bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */
David Feng85fd5f12013-12-14 11:47:35 +080090skip:
91 add x0, x0, #1 /* increment cache level */
92 cmp x11, x0
93 b.gt loop_level
94
95 mov x0, #0
Michal Simek5dc7d122015-01-14 15:36:35 +010096 msr csselr_el1, x0 /* restore csselr_el1 */
David Feng85fd5f12013-12-14 11:47:35 +080097 dsb sy
98 isb
99 mov lr, x15
100
101finished:
102 ret
York Sunef042012014-02-26 13:26:04 -0800103ENDPROC(__asm_dcache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200104.popsection
York Sunef042012014-02-26 13:26:04 -0800105
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200106.pushsection .text.__asm_flush_dcache_all, "ax"
York Sunef042012014-02-26 13:26:04 -0800107ENTRY(__asm_flush_dcache_all)
York Sunef042012014-02-26 13:26:04 -0800108 mov x0, #0
Masahiro Yamadad094db42016-05-17 16:38:06 +0900109 b __asm_dcache_all
David Feng85fd5f12013-12-14 11:47:35 +0800110ENDPROC(__asm_flush_dcache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200111.popsection
David Feng85fd5f12013-12-14 11:47:35 +0800112
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200113.pushsection .text.__asm_invalidate_dcache_all, "ax"
York Sunef042012014-02-26 13:26:04 -0800114ENTRY(__asm_invalidate_dcache_all)
Peng Fand0062662015-08-06 17:54:13 +0800115 mov x0, #0x1
Masahiro Yamadad094db42016-05-17 16:38:06 +0900116 b __asm_dcache_all
York Sunef042012014-02-26 13:26:04 -0800117ENDPROC(__asm_invalidate_dcache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200118.popsection
York Sunef042012014-02-26 13:26:04 -0800119
Marc Zyngierb67855c2023-02-09 04:54:27 +0800120.pushsection .text.__asm_flush_l3_dcache, "ax"
121WEAK(__asm_flush_l3_dcache)
122 mov x0, #0 /* return status as success */
123 ret
124ENDPROC(__asm_flush_l3_dcache)
125.popsection
126
127.pushsection .text.__asm_invalidate_l3_icache, "ax"
128WEAK(__asm_invalidate_l3_icache)
129 mov x0, #0 /* return status as success */
130 ret
131ENDPROC(__asm_invalidate_l3_icache)
132.popsection
133
134#else /* CONFIG_CMO_BY_VA */
135
136/*
137 * Define these so that they actively clash with in implementation
138 * accidentally selecting CONFIG_CMO_BY_VA
139 */
140
141.pushsection .text.__asm_invalidate_l3_icache, "ax"
142ENTRY(__asm_invalidate_l3_icache)
143 mov x0, xzr
144 ret
145ENDPROC(__asm_invalidate_l3_icache)
146.popsection
147.pushsection .text.__asm_flush_l3_dcache, "ax"
148ENTRY(__asm_flush_l3_dcache)
149 mov x0, xzr
150 ret
151ENDPROC(__asm_flush_l3_dcache)
152.popsection
153#endif /* CONFIG_CMO_BY_VA */
154
David Feng85fd5f12013-12-14 11:47:35 +0800155/*
156 * void __asm_flush_dcache_range(start, end)
157 *
158 * clean & invalidate data cache in the range
159 *
160 * x0: start address
161 * x1: end address
162 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200163.pushsection .text.__asm_flush_dcache_range, "ax"
David Feng85fd5f12013-12-14 11:47:35 +0800164ENTRY(__asm_flush_dcache_range)
165 mrs x3, ctr_el0
Pierre-Clément Tosieba9a0b2021-08-27 18:03:45 +0200166 ubfx x3, x3, #16, #4
David Feng85fd5f12013-12-14 11:47:35 +0800167 mov x2, #4
168 lsl x2, x2, x3 /* cache line size */
169
170 /* x2 <- minimal cache line size in cache system */
171 sub x3, x2, #1
172 bic x0, x0, x3
1731: dc civac, x0 /* clean & invalidate data or unified cache */
174 add x0, x0, x2
175 cmp x0, x1
176 b.lo 1b
177 dsb sy
178 ret
179ENDPROC(__asm_flush_dcache_range)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200180.popsection
Simon Glass4415c3b2017-04-05 17:53:18 -0600181/*
182 * void __asm_invalidate_dcache_range(start, end)
183 *
184 * invalidate data cache in the range
185 *
186 * x0: start address
187 * x1: end address
188 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200189.pushsection .text.__asm_invalidate_dcache_range, "ax"
Simon Glass4415c3b2017-04-05 17:53:18 -0600190ENTRY(__asm_invalidate_dcache_range)
191 mrs x3, ctr_el0
Pierre-Clément Tosieba9a0b2021-08-27 18:03:45 +0200192 ubfx x3, x3, #16, #4
Simon Glass4415c3b2017-04-05 17:53:18 -0600193 mov x2, #4
194 lsl x2, x2, x3 /* cache line size */
195
196 /* x2 <- minimal cache line size in cache system */
197 sub x3, x2, #1
198 bic x0, x0, x3
1991: dc ivac, x0 /* invalidate data or unified cache */
200 add x0, x0, x2
201 cmp x0, x1
202 b.lo 1b
203 dsb sy
204 ret
205ENDPROC(__asm_invalidate_dcache_range)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200206.popsection
David Feng85fd5f12013-12-14 11:47:35 +0800207
208/*
209 * void __asm_invalidate_icache_all(void)
210 *
211 * invalidate all tlb entries.
212 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200213.pushsection .text.__asm_invalidate_icache_all, "ax"
David Feng85fd5f12013-12-14 11:47:35 +0800214ENTRY(__asm_invalidate_icache_all)
215 ic ialluis
216 isb sy
217 ret
218ENDPROC(__asm_invalidate_icache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200219.popsection
York Sun1ce575f2015-01-06 13:18:42 -0800220
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200221.pushsection .text.__asm_invalidate_l3_dcache, "ax"
Tom Rini7d1a6662021-06-29 19:33:04 -0400222WEAK(__asm_invalidate_l3_dcache)
York Sun1ce575f2015-01-06 13:18:42 -0800223 mov x0, #0 /* return status as success */
224 ret
Stephen Warrenddb0f632016-10-19 15:18:46 -0600225ENDPROC(__asm_invalidate_l3_dcache)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200226.popsection
Stephen Warrenddb0f632016-10-19 15:18:46 -0600227
Alexander Grafe317fe82016-03-04 01:09:47 +0100228/*
229 * void __asm_switch_ttbr(ulong new_ttbr)
230 *
231 * Safely switches to a new page table.
232 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200233.pushsection .text.__asm_switch_ttbr, "ax"
Alexander Grafe317fe82016-03-04 01:09:47 +0100234ENTRY(__asm_switch_ttbr)
235 /* x2 = SCTLR (alive throghout the function) */
236 switch_el x4, 3f, 2f, 1f
2373: mrs x2, sctlr_el3
238 b 0f
2392: mrs x2, sctlr_el2
240 b 0f
2411: mrs x2, sctlr_el1
2420:
243
244 /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
245 movn x1, #(CR_M | CR_C | CR_I)
246 and x1, x2, x1
247 switch_el x4, 3f, 2f, 1f
2483: msr sctlr_el3, x1
249 b 0f
2502: msr sctlr_el2, x1
251 b 0f
2521: msr sctlr_el1, x1
2530: isb
254
255 /* This call only clobbers x30 (lr) and x9 (unused) */
256 mov x3, x30
257 bl __asm_invalidate_tlb_all
258
259 /* From here on we're running safely with caches disabled */
260
261 /* Set TTBR to our first argument */
262 switch_el x4, 3f, 2f, 1f
2633: msr ttbr0_el3, x0
264 b 0f
2652: msr ttbr0_el2, x0
266 b 0f
2671: msr ttbr0_el1, x0
2680: isb
269
270 /* Restore original SCTLR and thus enable caches again */
271 switch_el x4, 3f, 2f, 1f
2723: msr sctlr_el3, x2
273 b 0f
2742: msr sctlr_el2, x2
275 b 0f
2761: msr sctlr_el1, x2
2770: isb
278
279 ret x3
280ENDPROC(__asm_switch_ttbr)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200281.popsection