blob: c9e46859b4f8b1bb2b6871c7fd18a07e1c104790 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001/* SPDX-License-Identifier: GPL-2.0+ */
David Feng85fd5f12013-12-14 11:47:35 +08002/*
3 * (C) Copyright 2013
4 * David Feng <fenghua@phytium.com.cn>
5 *
6 * This file is based on sample code from ARMv8 ARM.
David Feng85fd5f12013-12-14 11:47:35 +08007 */
8
9#include <asm-offsets.h>
10#include <config.h>
David Feng85fd5f12013-12-14 11:47:35 +080011#include <asm/macro.h>
Alexander Grafe317fe82016-03-04 01:09:47 +010012#include <asm/system.h>
David Feng85fd5f12013-12-14 11:47:35 +080013#include <linux/linkage.h>
14
Marc Zyngierb67855c2023-02-09 04:54:27 +080015#ifndef CONFIG_CMO_BY_VA_ONLY
David Feng85fd5f12013-12-14 11:47:35 +080016/*
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090017 * void __asm_dcache_level(level)
David Feng85fd5f12013-12-14 11:47:35 +080018 *
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090019 * flush or invalidate one level cache.
David Feng85fd5f12013-12-14 11:47:35 +080020 *
21 * x0: cache level
Masahiro Yamadaa9c39612016-05-17 16:38:07 +090022 * x1: 0 clean & invalidate, 1 invalidate only
Lukasz Wiecaszek4659a6e2024-03-10 11:29:58 +010023 * x16: FEAT_CCIDX
York Sunef042012014-02-26 13:26:04 -080024 * x2~x9: clobbered
David Feng85fd5f12013-12-14 11:47:35 +080025 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +020026.pushsection .text.__asm_dcache_level, "ax"
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090027ENTRY(__asm_dcache_level)
York Sunef042012014-02-26 13:26:04 -080028 lsl x12, x0, #1
29 msr csselr_el1, x12 /* select cache level */
David Feng85fd5f12013-12-14 11:47:35 +080030 isb /* sync change of cssidr_el1 */
31 mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
Pierre-Clément Tosieba9a0b2021-08-27 18:03:45 +020032 ubfx x2, x6, #0, #3 /* x2 <- log2(cache line size)-4 */
Lukasz Wiecaszek4659a6e2024-03-10 11:29:58 +010033 cbz x16, 3f /* check for FEAT_CCIDX */
34 ubfx x3, x6, #3, #21 /* x3 <- number of cache ways - 1 */
35 ubfx x4, x6, #32, #24 /* x4 <- number of cache sets - 1 */
36 b 4f
373:
Pierre-Clément Tosieba9a0b2021-08-27 18:03:45 +020038 ubfx x3, x6, #3, #10 /* x3 <- number of cache ways - 1 */
39 ubfx x4, x6, #13, #15 /* x4 <- number of cache sets - 1 */
Lukasz Wiecaszek4659a6e2024-03-10 11:29:58 +0100404:
David Feng85fd5f12013-12-14 11:47:35 +080041 add x2, x2, #4 /* x2 <- log2(cache line size) */
Leo Yan9e0d25e2014-03-31 09:50:35 +080042 clz w5, w3 /* bit position of #ways */
York Sunef042012014-02-26 13:26:04 -080043 /* x12 <- cache level << 1 */
David Feng85fd5f12013-12-14 11:47:35 +080044 /* x2 <- line length offset */
45 /* x3 <- number of cache ways - 1 */
46 /* x4 <- number of cache sets - 1 */
47 /* x5 <- bit position of #ways */
48
49loop_set:
50 mov x6, x3 /* x6 <- working copy of #ways */
51loop_way:
52 lsl x7, x6, x5
York Sunef042012014-02-26 13:26:04 -080053 orr x9, x12, x7 /* map way and level to cisw value */
David Feng85fd5f12013-12-14 11:47:35 +080054 lsl x7, x4, x2
55 orr x9, x9, x7 /* map set number to cisw value */
York Sunef042012014-02-26 13:26:04 -080056 tbz w1, #0, 1f
57 dc isw, x9
58 b 2f
591: dc cisw, x9 /* clean & invalidate by set/way */
602: subs x6, x6, #1 /* decrement the way */
David Feng85fd5f12013-12-14 11:47:35 +080061 b.ge loop_way
62 subs x4, x4, #1 /* decrement the set */
63 b.ge loop_set
64
65 ret
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090066ENDPROC(__asm_dcache_level)
Philipp Tomsichd15592f2017-07-04 10:04:54 +020067.popsection
David Feng85fd5f12013-12-14 11:47:35 +080068
69/*
York Sunef042012014-02-26 13:26:04 -080070 * void __asm_flush_dcache_all(int invalidate_only)
71 *
Masahiro Yamadaa9c39612016-05-17 16:38:07 +090072 * x0: 0 clean & invalidate, 1 invalidate only
David Feng85fd5f12013-12-14 11:47:35 +080073 *
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090074 * flush or invalidate all data cache by SET/WAY.
David Feng85fd5f12013-12-14 11:47:35 +080075 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +020076.pushsection .text.__asm_dcache_all, "ax"
York Sunef042012014-02-26 13:26:04 -080077ENTRY(__asm_dcache_all)
78 mov x1, x0
David Feng85fd5f12013-12-14 11:47:35 +080079 dsb sy
80 mrs x10, clidr_el1 /* read clidr_el1 */
Pierre-Clément Tosieba9a0b2021-08-27 18:03:45 +020081 ubfx x11, x10, #24, #3 /* x11 <- loc */
David Feng85fd5f12013-12-14 11:47:35 +080082 cbz x11, finished /* if loc is 0, exit */
83 mov x15, lr
Lukasz Wiecaszek4659a6e2024-03-10 11:29:58 +010084 mrs x16, s3_0_c0_c7_2 /* read value of id_aa64mmfr2_el1*/
85 ubfx x16, x16, #20, #4 /* save FEAT_CCIDX identifier in x16 */
David Feng85fd5f12013-12-14 11:47:35 +080086 mov x0, #0 /* start flush at cache level 0 */
87 /* x0 <- cache level */
88 /* x10 <- clidr_el1 */
89 /* x11 <- loc */
90 /* x15 <- return address */
91
92loop_level:
Pierre-Clément Tosi1ffdcef2021-08-27 18:04:10 +020093 add x12, x0, x0, lsl #1 /* x12 <- tripled cache level */
York Sunef042012014-02-26 13:26:04 -080094 lsr x12, x10, x12
95 and x12, x12, #7 /* x12 <- cache type */
96 cmp x12, #2
David Feng85fd5f12013-12-14 11:47:35 +080097 b.lt skip /* skip if no cache or icache */
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090098 bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */
David Feng85fd5f12013-12-14 11:47:35 +080099skip:
100 add x0, x0, #1 /* increment cache level */
101 cmp x11, x0
102 b.gt loop_level
103
104 mov x0, #0
Michal Simek5dc7d122015-01-14 15:36:35 +0100105 msr csselr_el1, x0 /* restore csselr_el1 */
David Feng85fd5f12013-12-14 11:47:35 +0800106 dsb sy
107 isb
108 mov lr, x15
109
110finished:
111 ret
York Sunef042012014-02-26 13:26:04 -0800112ENDPROC(__asm_dcache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200113.popsection
York Sunef042012014-02-26 13:26:04 -0800114
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200115.pushsection .text.__asm_flush_dcache_all, "ax"
York Sunef042012014-02-26 13:26:04 -0800116ENTRY(__asm_flush_dcache_all)
York Sunef042012014-02-26 13:26:04 -0800117 mov x0, #0
Masahiro Yamadad094db42016-05-17 16:38:06 +0900118 b __asm_dcache_all
David Feng85fd5f12013-12-14 11:47:35 +0800119ENDPROC(__asm_flush_dcache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200120.popsection
David Feng85fd5f12013-12-14 11:47:35 +0800121
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200122.pushsection .text.__asm_invalidate_dcache_all, "ax"
York Sunef042012014-02-26 13:26:04 -0800123ENTRY(__asm_invalidate_dcache_all)
Peng Fand0062662015-08-06 17:54:13 +0800124 mov x0, #0x1
Masahiro Yamadad094db42016-05-17 16:38:06 +0900125 b __asm_dcache_all
York Sunef042012014-02-26 13:26:04 -0800126ENDPROC(__asm_invalidate_dcache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200127.popsection
York Sunef042012014-02-26 13:26:04 -0800128
Marc Zyngierb67855c2023-02-09 04:54:27 +0800129.pushsection .text.__asm_flush_l3_dcache, "ax"
130WEAK(__asm_flush_l3_dcache)
131 mov x0, #0 /* return status as success */
132 ret
133ENDPROC(__asm_flush_l3_dcache)
134.popsection
135
136.pushsection .text.__asm_invalidate_l3_icache, "ax"
137WEAK(__asm_invalidate_l3_icache)
138 mov x0, #0 /* return status as success */
139 ret
140ENDPROC(__asm_invalidate_l3_icache)
141.popsection
142
143#else /* CONFIG_CMO_BY_VA */
144
145/*
146 * Define these so that they actively clash with in implementation
147 * accidentally selecting CONFIG_CMO_BY_VA
148 */
149
150.pushsection .text.__asm_invalidate_l3_icache, "ax"
151ENTRY(__asm_invalidate_l3_icache)
152 mov x0, xzr
153 ret
154ENDPROC(__asm_invalidate_l3_icache)
155.popsection
156.pushsection .text.__asm_flush_l3_dcache, "ax"
157ENTRY(__asm_flush_l3_dcache)
158 mov x0, xzr
159 ret
160ENDPROC(__asm_flush_l3_dcache)
161.popsection
162#endif /* CONFIG_CMO_BY_VA */
163
David Feng85fd5f12013-12-14 11:47:35 +0800164/*
165 * void __asm_flush_dcache_range(start, end)
166 *
167 * clean & invalidate data cache in the range
168 *
169 * x0: start address
170 * x1: end address
171 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200172.pushsection .text.__asm_flush_dcache_range, "ax"
David Feng85fd5f12013-12-14 11:47:35 +0800173ENTRY(__asm_flush_dcache_range)
174 mrs x3, ctr_el0
Pierre-Clément Tosieba9a0b2021-08-27 18:03:45 +0200175 ubfx x3, x3, #16, #4
David Feng85fd5f12013-12-14 11:47:35 +0800176 mov x2, #4
177 lsl x2, x2, x3 /* cache line size */
178
179 /* x2 <- minimal cache line size in cache system */
180 sub x3, x2, #1
181 bic x0, x0, x3
1821: dc civac, x0 /* clean & invalidate data or unified cache */
183 add x0, x0, x2
184 cmp x0, x1
185 b.lo 1b
186 dsb sy
187 ret
188ENDPROC(__asm_flush_dcache_range)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200189.popsection
Simon Glass4415c3b2017-04-05 17:53:18 -0600190/*
191 * void __asm_invalidate_dcache_range(start, end)
192 *
193 * invalidate data cache in the range
194 *
195 * x0: start address
196 * x1: end address
197 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200198.pushsection .text.__asm_invalidate_dcache_range, "ax"
Simon Glass4415c3b2017-04-05 17:53:18 -0600199ENTRY(__asm_invalidate_dcache_range)
200 mrs x3, ctr_el0
Pierre-Clément Tosieba9a0b2021-08-27 18:03:45 +0200201 ubfx x3, x3, #16, #4
Simon Glass4415c3b2017-04-05 17:53:18 -0600202 mov x2, #4
203 lsl x2, x2, x3 /* cache line size */
204
205 /* x2 <- minimal cache line size in cache system */
206 sub x3, x2, #1
207 bic x0, x0, x3
2081: dc ivac, x0 /* invalidate data or unified cache */
209 add x0, x0, x2
210 cmp x0, x1
211 b.lo 1b
212 dsb sy
213 ret
214ENDPROC(__asm_invalidate_dcache_range)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200215.popsection
David Feng85fd5f12013-12-14 11:47:35 +0800216
217/*
218 * void __asm_invalidate_icache_all(void)
219 *
220 * invalidate all tlb entries.
221 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200222.pushsection .text.__asm_invalidate_icache_all, "ax"
David Feng85fd5f12013-12-14 11:47:35 +0800223ENTRY(__asm_invalidate_icache_all)
224 ic ialluis
225 isb sy
226 ret
227ENDPROC(__asm_invalidate_icache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200228.popsection
York Sun1ce575f2015-01-06 13:18:42 -0800229
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200230.pushsection .text.__asm_invalidate_l3_dcache, "ax"
Tom Rini7d1a6662021-06-29 19:33:04 -0400231WEAK(__asm_invalidate_l3_dcache)
York Sun1ce575f2015-01-06 13:18:42 -0800232 mov x0, #0 /* return status as success */
233 ret
Stephen Warrenddb0f632016-10-19 15:18:46 -0600234ENDPROC(__asm_invalidate_l3_dcache)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200235.popsection
Stephen Warrenddb0f632016-10-19 15:18:46 -0600236
Alexander Grafe317fe82016-03-04 01:09:47 +0100237/*
238 * void __asm_switch_ttbr(ulong new_ttbr)
239 *
240 * Safely switches to a new page table.
241 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200242.pushsection .text.__asm_switch_ttbr, "ax"
Alexander Grafe317fe82016-03-04 01:09:47 +0100243ENTRY(__asm_switch_ttbr)
244 /* x2 = SCTLR (alive throghout the function) */
245 switch_el x4, 3f, 2f, 1f
2463: mrs x2, sctlr_el3
247 b 0f
2482: mrs x2, sctlr_el2
249 b 0f
2501: mrs x2, sctlr_el1
2510:
252
253 /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
254 movn x1, #(CR_M | CR_C | CR_I)
255 and x1, x2, x1
256 switch_el x4, 3f, 2f, 1f
2573: msr sctlr_el3, x1
258 b 0f
2592: msr sctlr_el2, x1
260 b 0f
2611: msr sctlr_el1, x1
2620: isb
263
264 /* This call only clobbers x30 (lr) and x9 (unused) */
265 mov x3, x30
266 bl __asm_invalidate_tlb_all
267
268 /* From here on we're running safely with caches disabled */
269
270 /* Set TTBR to our first argument */
271 switch_el x4, 3f, 2f, 1f
2723: msr ttbr0_el3, x0
273 b 0f
2742: msr ttbr0_el2, x0
275 b 0f
2761: msr ttbr0_el1, x0
2770: isb
278
279 /* Restore original SCTLR and thus enable caches again */
280 switch_el x4, 3f, 2f, 1f
2813: msr sctlr_el3, x2
282 b 0f
2832: msr sctlr_el2, x2
284 b 0f
2851: msr sctlr_el1, x2
2860: isb
287
288 ret x3
289ENDPROC(__asm_switch_ttbr)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200290.popsection