blob: ea845d1809e0944d30620756d792a8c73ed3a014 [file] [log] [blame]
David Feng85fd5f12013-12-14 11:47:35 +08001/*
2 * (C) Copyright 2013
3 * David Feng <fenghua@phytium.com.cn>
4 *
5 * This file is based on sample code from ARMv8 ARM.
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10#include <asm-offsets.h>
11#include <config.h>
David Feng85fd5f12013-12-14 11:47:35 +080012#include <asm/macro.h>
Alexander Grafe317fe82016-03-04 01:09:47 +010013#include <asm/system.h>
David Feng85fd5f12013-12-14 11:47:35 +080014#include <linux/linkage.h>
15
16/*
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090017 * void __asm_dcache_level(level)
David Feng85fd5f12013-12-14 11:47:35 +080018 *
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090019 * flush or invalidate one level cache.
David Feng85fd5f12013-12-14 11:47:35 +080020 *
21 * x0: cache level
Masahiro Yamadaa9c39612016-05-17 16:38:07 +090022 * x1: 0 clean & invalidate, 1 invalidate only
York Sunef042012014-02-26 13:26:04 -080023 * x2~x9: clobbered
David Feng85fd5f12013-12-14 11:47:35 +080024 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +020025.pushsection .text.__asm_dcache_level, "ax"
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090026ENTRY(__asm_dcache_level)
York Sunef042012014-02-26 13:26:04 -080027 lsl x12, x0, #1
28 msr csselr_el1, x12 /* select cache level */
David Feng85fd5f12013-12-14 11:47:35 +080029 isb /* sync change of cssidr_el1 */
30 mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
31 and x2, x6, #7 /* x2 <- log2(cache line size)-4 */
32 add x2, x2, #4 /* x2 <- log2(cache line size) */
33 mov x3, #0x3ff
34 and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */
Leo Yan9e0d25e2014-03-31 09:50:35 +080035 clz w5, w3 /* bit position of #ways */
David Feng85fd5f12013-12-14 11:47:35 +080036 mov x4, #0x7fff
37 and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */
York Sunef042012014-02-26 13:26:04 -080038 /* x12 <- cache level << 1 */
David Feng85fd5f12013-12-14 11:47:35 +080039 /* x2 <- line length offset */
40 /* x3 <- number of cache ways - 1 */
41 /* x4 <- number of cache sets - 1 */
42 /* x5 <- bit position of #ways */
43
44loop_set:
45 mov x6, x3 /* x6 <- working copy of #ways */
46loop_way:
47 lsl x7, x6, x5
York Sunef042012014-02-26 13:26:04 -080048 orr x9, x12, x7 /* map way and level to cisw value */
David Feng85fd5f12013-12-14 11:47:35 +080049 lsl x7, x4, x2
50 orr x9, x9, x7 /* map set number to cisw value */
York Sunef042012014-02-26 13:26:04 -080051 tbz w1, #0, 1f
52 dc isw, x9
53 b 2f
541: dc cisw, x9 /* clean & invalidate by set/way */
552: subs x6, x6, #1 /* decrement the way */
David Feng85fd5f12013-12-14 11:47:35 +080056 b.ge loop_way
57 subs x4, x4, #1 /* decrement the set */
58 b.ge loop_set
59
60 ret
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090061ENDPROC(__asm_dcache_level)
Philipp Tomsichd15592f2017-07-04 10:04:54 +020062.popsection
David Feng85fd5f12013-12-14 11:47:35 +080063
64/*
York Sunef042012014-02-26 13:26:04 -080065 * void __asm_flush_dcache_all(int invalidate_only)
66 *
Masahiro Yamadaa9c39612016-05-17 16:38:07 +090067 * x0: 0 clean & invalidate, 1 invalidate only
David Feng85fd5f12013-12-14 11:47:35 +080068 *
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090069 * flush or invalidate all data cache by SET/WAY.
David Feng85fd5f12013-12-14 11:47:35 +080070 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +020071.pushsection .text.__asm_dcache_all, "ax"
York Sunef042012014-02-26 13:26:04 -080072ENTRY(__asm_dcache_all)
73 mov x1, x0
David Feng85fd5f12013-12-14 11:47:35 +080074 dsb sy
75 mrs x10, clidr_el1 /* read clidr_el1 */
76 lsr x11, x10, #24
77 and x11, x11, #0x7 /* x11 <- loc */
78 cbz x11, finished /* if loc is 0, exit */
79 mov x15, lr
80 mov x0, #0 /* start flush at cache level 0 */
81 /* x0 <- cache level */
82 /* x10 <- clidr_el1 */
83 /* x11 <- loc */
84 /* x15 <- return address */
85
86loop_level:
York Sunef042012014-02-26 13:26:04 -080087 lsl x12, x0, #1
88 add x12, x12, x0 /* x0 <- tripled cache level */
89 lsr x12, x10, x12
90 and x12, x12, #7 /* x12 <- cache type */
91 cmp x12, #2
David Feng85fd5f12013-12-14 11:47:35 +080092 b.lt skip /* skip if no cache or icache */
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090093 bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */
David Feng85fd5f12013-12-14 11:47:35 +080094skip:
95 add x0, x0, #1 /* increment cache level */
96 cmp x11, x0
97 b.gt loop_level
98
99 mov x0, #0
Michal Simek5dc7d122015-01-14 15:36:35 +0100100 msr csselr_el1, x0 /* restore csselr_el1 */
David Feng85fd5f12013-12-14 11:47:35 +0800101 dsb sy
102 isb
103 mov lr, x15
104
105finished:
106 ret
York Sunef042012014-02-26 13:26:04 -0800107ENDPROC(__asm_dcache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200108.popsection
York Sunef042012014-02-26 13:26:04 -0800109
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200110.pushsection .text.__asm_flush_dcache_all, "ax"
York Sunef042012014-02-26 13:26:04 -0800111ENTRY(__asm_flush_dcache_all)
York Sunef042012014-02-26 13:26:04 -0800112 mov x0, #0
Masahiro Yamadad094db42016-05-17 16:38:06 +0900113 b __asm_dcache_all
David Feng85fd5f12013-12-14 11:47:35 +0800114ENDPROC(__asm_flush_dcache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200115.popsection
David Feng85fd5f12013-12-14 11:47:35 +0800116
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200117.pushsection .text.__asm_invalidate_dcache_all, "ax"
York Sunef042012014-02-26 13:26:04 -0800118ENTRY(__asm_invalidate_dcache_all)
Peng Fand0062662015-08-06 17:54:13 +0800119 mov x0, #0x1
Masahiro Yamadad094db42016-05-17 16:38:06 +0900120 b __asm_dcache_all
York Sunef042012014-02-26 13:26:04 -0800121ENDPROC(__asm_invalidate_dcache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200122.popsection
York Sunef042012014-02-26 13:26:04 -0800123
David Feng85fd5f12013-12-14 11:47:35 +0800124/*
125 * void __asm_flush_dcache_range(start, end)
126 *
127 * clean & invalidate data cache in the range
128 *
129 * x0: start address
130 * x1: end address
131 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200132.pushsection .text.__asm_flush_dcache_range, "ax"
David Feng85fd5f12013-12-14 11:47:35 +0800133ENTRY(__asm_flush_dcache_range)
134 mrs x3, ctr_el0
135 lsr x3, x3, #16
136 and x3, x3, #0xf
137 mov x2, #4
138 lsl x2, x2, x3 /* cache line size */
139
140 /* x2 <- minimal cache line size in cache system */
141 sub x3, x2, #1
142 bic x0, x0, x3
1431: dc civac, x0 /* clean & invalidate data or unified cache */
144 add x0, x0, x2
145 cmp x0, x1
146 b.lo 1b
147 dsb sy
148 ret
149ENDPROC(__asm_flush_dcache_range)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200150.popsection
Simon Glass4415c3b2017-04-05 17:53:18 -0600151/*
152 * void __asm_invalidate_dcache_range(start, end)
153 *
154 * invalidate data cache in the range
155 *
156 * x0: start address
157 * x1: end address
158 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200159.pushsection .text.__asm_invalidate_dcache_range, "ax"
Simon Glass4415c3b2017-04-05 17:53:18 -0600160ENTRY(__asm_invalidate_dcache_range)
161 mrs x3, ctr_el0
162 ubfm x3, x3, #16, #19
163 mov x2, #4
164 lsl x2, x2, x3 /* cache line size */
165
166 /* x2 <- minimal cache line size in cache system */
167 sub x3, x2, #1
168 bic x0, x0, x3
1691: dc ivac, x0 /* invalidate data or unified cache */
170 add x0, x0, x2
171 cmp x0, x1
172 b.lo 1b
173 dsb sy
174 ret
175ENDPROC(__asm_invalidate_dcache_range)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200176.popsection
David Feng85fd5f12013-12-14 11:47:35 +0800177
178/*
179 * void __asm_invalidate_icache_all(void)
180 *
181 * invalidate all tlb entries.
182 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200183.pushsection .text.__asm_invalidate_icache_all, "ax"
David Feng85fd5f12013-12-14 11:47:35 +0800184ENTRY(__asm_invalidate_icache_all)
185 ic ialluis
186 isb sy
187 ret
188ENDPROC(__asm_invalidate_icache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200189.popsection
York Sun1ce575f2015-01-06 13:18:42 -0800190
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200191.pushsection .text.__asm_invalidate_l3_dcache, "ax"
Stephen Warrenddb0f632016-10-19 15:18:46 -0600192ENTRY(__asm_invalidate_l3_dcache)
York Sun1ce575f2015-01-06 13:18:42 -0800193 mov x0, #0 /* return status as success */
194 ret
Stephen Warrenddb0f632016-10-19 15:18:46 -0600195ENDPROC(__asm_invalidate_l3_dcache)
196 .weak __asm_invalidate_l3_dcache
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200197.popsection
Stephen Warrenddb0f632016-10-19 15:18:46 -0600198
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200199.pushsection .text.__asm_flush_l3_dcache, "ax"
Stephen Warrenddb0f632016-10-19 15:18:46 -0600200ENTRY(__asm_flush_l3_dcache)
201 mov x0, #0 /* return status as success */
202 ret
203ENDPROC(__asm_flush_l3_dcache)
204 .weak __asm_flush_l3_dcache
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200205.popsection
Stephen Warrenddb0f632016-10-19 15:18:46 -0600206
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200207.pushsection .text.__asm_invalidate_l3_icache, "ax"
Stephen Warrenddb0f632016-10-19 15:18:46 -0600208ENTRY(__asm_invalidate_l3_icache)
209 mov x0, #0 /* return status as success */
210 ret
211ENDPROC(__asm_invalidate_l3_icache)
212 .weak __asm_invalidate_l3_icache
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200213.popsection
Alexander Grafe317fe82016-03-04 01:09:47 +0100214
215/*
216 * void __asm_switch_ttbr(ulong new_ttbr)
217 *
218 * Safely switches to a new page table.
219 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200220.pushsection .text.__asm_switch_ttbr, "ax"
Alexander Grafe317fe82016-03-04 01:09:47 +0100221ENTRY(__asm_switch_ttbr)
222 /* x2 = SCTLR (alive throghout the function) */
223 switch_el x4, 3f, 2f, 1f
2243: mrs x2, sctlr_el3
225 b 0f
2262: mrs x2, sctlr_el2
227 b 0f
2281: mrs x2, sctlr_el1
2290:
230
231 /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
232 movn x1, #(CR_M | CR_C | CR_I)
233 and x1, x2, x1
234 switch_el x4, 3f, 2f, 1f
2353: msr sctlr_el3, x1
236 b 0f
2372: msr sctlr_el2, x1
238 b 0f
2391: msr sctlr_el1, x1
2400: isb
241
242 /* This call only clobbers x30 (lr) and x9 (unused) */
243 mov x3, x30
244 bl __asm_invalidate_tlb_all
245
246 /* From here on we're running safely with caches disabled */
247
248 /* Set TTBR to our first argument */
249 switch_el x4, 3f, 2f, 1f
2503: msr ttbr0_el3, x0
251 b 0f
2522: msr ttbr0_el2, x0
253 b 0f
2541: msr ttbr0_el1, x0
2550: isb
256
257 /* Restore original SCTLR and thus enable caches again */
258 switch_el x4, 3f, 2f, 1f
2593: msr sctlr_el3, x2
260 b 0f
2612: msr sctlr_el2, x2
262 b 0f
2631: msr sctlr_el1, x2
2640: isb
265
266 ret x3
267ENDPROC(__asm_switch_ttbr)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200268.popsection