blob: 443d94c262a09d15b7358d5e27f236b9faf0d02a [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001/* SPDX-License-Identifier: GPL-2.0+ */
David Feng85fd5f12013-12-14 11:47:35 +08002/*
3 * (C) Copyright 2013
4 * David Feng <fenghua@phytium.com.cn>
5 *
6 * This file is based on sample code from ARMv8 ARM.
David Feng85fd5f12013-12-14 11:47:35 +08007 */
8
9#include <asm-offsets.h>
10#include <config.h>
David Feng85fd5f12013-12-14 11:47:35 +080011#include <asm/macro.h>
Alexander Grafe317fe82016-03-04 01:09:47 +010012#include <asm/system.h>
David Feng85fd5f12013-12-14 11:47:35 +080013#include <linux/linkage.h>
14
15/*
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090016 * void __asm_dcache_level(level)
David Feng85fd5f12013-12-14 11:47:35 +080017 *
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090018 * flush or invalidate one level cache.
David Feng85fd5f12013-12-14 11:47:35 +080019 *
20 * x0: cache level
Masahiro Yamadaa9c39612016-05-17 16:38:07 +090021 * x1: 0 clean & invalidate, 1 invalidate only
York Sunef042012014-02-26 13:26:04 -080022 * x2~x9: clobbered
David Feng85fd5f12013-12-14 11:47:35 +080023 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +020024.pushsection .text.__asm_dcache_level, "ax"
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090025ENTRY(__asm_dcache_level)
York Sunef042012014-02-26 13:26:04 -080026 lsl x12, x0, #1
27 msr csselr_el1, x12 /* select cache level */
David Feng85fd5f12013-12-14 11:47:35 +080028 isb /* sync change of cssidr_el1 */
29 mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
30 and x2, x6, #7 /* x2 <- log2(cache line size)-4 */
31 add x2, x2, #4 /* x2 <- log2(cache line size) */
32 mov x3, #0x3ff
33 and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */
Leo Yan9e0d25e2014-03-31 09:50:35 +080034 clz w5, w3 /* bit position of #ways */
David Feng85fd5f12013-12-14 11:47:35 +080035 mov x4, #0x7fff
36 and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */
York Sunef042012014-02-26 13:26:04 -080037 /* x12 <- cache level << 1 */
David Feng85fd5f12013-12-14 11:47:35 +080038 /* x2 <- line length offset */
39 /* x3 <- number of cache ways - 1 */
40 /* x4 <- number of cache sets - 1 */
41 /* x5 <- bit position of #ways */
42
43loop_set:
44 mov x6, x3 /* x6 <- working copy of #ways */
45loop_way:
46 lsl x7, x6, x5
York Sunef042012014-02-26 13:26:04 -080047 orr x9, x12, x7 /* map way and level to cisw value */
David Feng85fd5f12013-12-14 11:47:35 +080048 lsl x7, x4, x2
49 orr x9, x9, x7 /* map set number to cisw value */
York Sunef042012014-02-26 13:26:04 -080050 tbz w1, #0, 1f
51 dc isw, x9
52 b 2f
531: dc cisw, x9 /* clean & invalidate by set/way */
542: subs x6, x6, #1 /* decrement the way */
David Feng85fd5f12013-12-14 11:47:35 +080055 b.ge loop_way
56 subs x4, x4, #1 /* decrement the set */
57 b.ge loop_set
58
59 ret
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090060ENDPROC(__asm_dcache_level)
Philipp Tomsichd15592f2017-07-04 10:04:54 +020061.popsection
David Feng85fd5f12013-12-14 11:47:35 +080062
63/*
York Sunef042012014-02-26 13:26:04 -080064 * void __asm_flush_dcache_all(int invalidate_only)
65 *
Masahiro Yamadaa9c39612016-05-17 16:38:07 +090066 * x0: 0 clean & invalidate, 1 invalidate only
David Feng85fd5f12013-12-14 11:47:35 +080067 *
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090068 * flush or invalidate all data cache by SET/WAY.
David Feng85fd5f12013-12-14 11:47:35 +080069 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +020070.pushsection .text.__asm_dcache_all, "ax"
York Sunef042012014-02-26 13:26:04 -080071ENTRY(__asm_dcache_all)
72 mov x1, x0
David Feng85fd5f12013-12-14 11:47:35 +080073 dsb sy
74 mrs x10, clidr_el1 /* read clidr_el1 */
75 lsr x11, x10, #24
76 and x11, x11, #0x7 /* x11 <- loc */
77 cbz x11, finished /* if loc is 0, exit */
78 mov x15, lr
79 mov x0, #0 /* start flush at cache level 0 */
80 /* x0 <- cache level */
81 /* x10 <- clidr_el1 */
82 /* x11 <- loc */
83 /* x15 <- return address */
84
85loop_level:
York Sunef042012014-02-26 13:26:04 -080086 lsl x12, x0, #1
87 add x12, x12, x0 /* x0 <- tripled cache level */
88 lsr x12, x10, x12
89 and x12, x12, #7 /* x12 <- cache type */
90 cmp x12, #2
David Feng85fd5f12013-12-14 11:47:35 +080091 b.lt skip /* skip if no cache or icache */
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090092 bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */
David Feng85fd5f12013-12-14 11:47:35 +080093skip:
94 add x0, x0, #1 /* increment cache level */
95 cmp x11, x0
96 b.gt loop_level
97
98 mov x0, #0
Michal Simek5dc7d122015-01-14 15:36:35 +010099 msr csselr_el1, x0 /* restore csselr_el1 */
David Feng85fd5f12013-12-14 11:47:35 +0800100 dsb sy
101 isb
102 mov lr, x15
103
104finished:
105 ret
York Sunef042012014-02-26 13:26:04 -0800106ENDPROC(__asm_dcache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200107.popsection
York Sunef042012014-02-26 13:26:04 -0800108
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200109.pushsection .text.__asm_flush_dcache_all, "ax"
York Sunef042012014-02-26 13:26:04 -0800110ENTRY(__asm_flush_dcache_all)
York Sunef042012014-02-26 13:26:04 -0800111 mov x0, #0
Masahiro Yamadad094db42016-05-17 16:38:06 +0900112 b __asm_dcache_all
David Feng85fd5f12013-12-14 11:47:35 +0800113ENDPROC(__asm_flush_dcache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200114.popsection
David Feng85fd5f12013-12-14 11:47:35 +0800115
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200116.pushsection .text.__asm_invalidate_dcache_all, "ax"
York Sunef042012014-02-26 13:26:04 -0800117ENTRY(__asm_invalidate_dcache_all)
Peng Fand0062662015-08-06 17:54:13 +0800118 mov x0, #0x1
Masahiro Yamadad094db42016-05-17 16:38:06 +0900119 b __asm_dcache_all
York Sunef042012014-02-26 13:26:04 -0800120ENDPROC(__asm_invalidate_dcache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200121.popsection
York Sunef042012014-02-26 13:26:04 -0800122
David Feng85fd5f12013-12-14 11:47:35 +0800123/*
124 * void __asm_flush_dcache_range(start, end)
125 *
126 * clean & invalidate data cache in the range
127 *
128 * x0: start address
129 * x1: end address
130 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200131.pushsection .text.__asm_flush_dcache_range, "ax"
David Feng85fd5f12013-12-14 11:47:35 +0800132ENTRY(__asm_flush_dcache_range)
133 mrs x3, ctr_el0
134 lsr x3, x3, #16
135 and x3, x3, #0xf
136 mov x2, #4
137 lsl x2, x2, x3 /* cache line size */
138
139 /* x2 <- minimal cache line size in cache system */
140 sub x3, x2, #1
141 bic x0, x0, x3
1421: dc civac, x0 /* clean & invalidate data or unified cache */
143 add x0, x0, x2
144 cmp x0, x1
145 b.lo 1b
146 dsb sy
147 ret
148ENDPROC(__asm_flush_dcache_range)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200149.popsection
Simon Glass4415c3b2017-04-05 17:53:18 -0600150/*
151 * void __asm_invalidate_dcache_range(start, end)
152 *
153 * invalidate data cache in the range
154 *
155 * x0: start address
156 * x1: end address
157 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200158.pushsection .text.__asm_invalidate_dcache_range, "ax"
Simon Glass4415c3b2017-04-05 17:53:18 -0600159ENTRY(__asm_invalidate_dcache_range)
160 mrs x3, ctr_el0
161 ubfm x3, x3, #16, #19
162 mov x2, #4
163 lsl x2, x2, x3 /* cache line size */
164
165 /* x2 <- minimal cache line size in cache system */
166 sub x3, x2, #1
167 bic x0, x0, x3
1681: dc ivac, x0 /* invalidate data or unified cache */
169 add x0, x0, x2
170 cmp x0, x1
171 b.lo 1b
172 dsb sy
173 ret
174ENDPROC(__asm_invalidate_dcache_range)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200175.popsection
David Feng85fd5f12013-12-14 11:47:35 +0800176
177/*
178 * void __asm_invalidate_icache_all(void)
179 *
180 * invalidate all tlb entries.
181 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200182.pushsection .text.__asm_invalidate_icache_all, "ax"
David Feng85fd5f12013-12-14 11:47:35 +0800183ENTRY(__asm_invalidate_icache_all)
184 ic ialluis
185 isb sy
186 ret
187ENDPROC(__asm_invalidate_icache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200188.popsection
York Sun1ce575f2015-01-06 13:18:42 -0800189
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200190.pushsection .text.__asm_invalidate_l3_dcache, "ax"
Stephen Warrenddb0f632016-10-19 15:18:46 -0600191ENTRY(__asm_invalidate_l3_dcache)
York Sun1ce575f2015-01-06 13:18:42 -0800192 mov x0, #0 /* return status as success */
193 ret
Stephen Warrenddb0f632016-10-19 15:18:46 -0600194ENDPROC(__asm_invalidate_l3_dcache)
195 .weak __asm_invalidate_l3_dcache
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200196.popsection
Stephen Warrenddb0f632016-10-19 15:18:46 -0600197
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200198.pushsection .text.__asm_flush_l3_dcache, "ax"
Stephen Warrenddb0f632016-10-19 15:18:46 -0600199ENTRY(__asm_flush_l3_dcache)
200 mov x0, #0 /* return status as success */
201 ret
202ENDPROC(__asm_flush_l3_dcache)
203 .weak __asm_flush_l3_dcache
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200204.popsection
Stephen Warrenddb0f632016-10-19 15:18:46 -0600205
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200206.pushsection .text.__asm_invalidate_l3_icache, "ax"
Stephen Warrenddb0f632016-10-19 15:18:46 -0600207ENTRY(__asm_invalidate_l3_icache)
208 mov x0, #0 /* return status as success */
209 ret
210ENDPROC(__asm_invalidate_l3_icache)
211 .weak __asm_invalidate_l3_icache
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200212.popsection
Alexander Grafe317fe82016-03-04 01:09:47 +0100213
214/*
215 * void __asm_switch_ttbr(ulong new_ttbr)
216 *
217 * Safely switches to a new page table.
218 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200219.pushsection .text.__asm_switch_ttbr, "ax"
Alexander Grafe317fe82016-03-04 01:09:47 +0100220ENTRY(__asm_switch_ttbr)
221 /* x2 = SCTLR (alive throghout the function) */
222 switch_el x4, 3f, 2f, 1f
2233: mrs x2, sctlr_el3
224 b 0f
2252: mrs x2, sctlr_el2
226 b 0f
2271: mrs x2, sctlr_el1
2280:
229
230 /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
231 movn x1, #(CR_M | CR_C | CR_I)
232 and x1, x2, x1
233 switch_el x4, 3f, 2f, 1f
2343: msr sctlr_el3, x1
235 b 0f
2362: msr sctlr_el2, x1
237 b 0f
2381: msr sctlr_el1, x1
2390: isb
240
241 /* This call only clobbers x30 (lr) and x9 (unused) */
242 mov x3, x30
243 bl __asm_invalidate_tlb_all
244
245 /* From here on we're running safely with caches disabled */
246
247 /* Set TTBR to our first argument */
248 switch_el x4, 3f, 2f, 1f
2493: msr ttbr0_el3, x0
250 b 0f
2512: msr ttbr0_el2, x0
252 b 0f
2531: msr ttbr0_el1, x0
2540: isb
255
256 /* Restore original SCTLR and thus enable caches again */
257 switch_el x4, 3f, 2f, 1f
2583: msr sctlr_el3, x2
259 b 0f
2602: msr sctlr_el2, x2
261 b 0f
2621: msr sctlr_el1, x2
2630: isb
264
265 ret x3
266ENDPROC(__asm_switch_ttbr)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200267.popsection