blob: d1cee23437da1422b304b0bd2c31302448eb1dd0 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001/* SPDX-License-Identifier: GPL-2.0+ */
David Feng85fd5f12013-12-14 11:47:35 +08002/*
3 * (C) Copyright 2013
4 * David Feng <fenghua@phytium.com.cn>
5 *
6 * This file is based on sample code from ARMv8 ARM.
David Feng85fd5f12013-12-14 11:47:35 +08007 */
8
9#include <asm-offsets.h>
10#include <config.h>
David Feng85fd5f12013-12-14 11:47:35 +080011#include <asm/macro.h>
Alexander Grafe317fe82016-03-04 01:09:47 +010012#include <asm/system.h>
David Feng85fd5f12013-12-14 11:47:35 +080013#include <linux/linkage.h>
14
15/*
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090016 * void __asm_dcache_level(level)
David Feng85fd5f12013-12-14 11:47:35 +080017 *
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090018 * flush or invalidate one level cache.
David Feng85fd5f12013-12-14 11:47:35 +080019 *
20 * x0: cache level
Masahiro Yamadaa9c39612016-05-17 16:38:07 +090021 * x1: 0 clean & invalidate, 1 invalidate only
York Sunef042012014-02-26 13:26:04 -080022 * x2~x9: clobbered
David Feng85fd5f12013-12-14 11:47:35 +080023 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +020024.pushsection .text.__asm_dcache_level, "ax"
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090025ENTRY(__asm_dcache_level)
York Sunef042012014-02-26 13:26:04 -080026 lsl x12, x0, #1
27 msr csselr_el1, x12 /* select cache level */
David Feng85fd5f12013-12-14 11:47:35 +080028 isb /* sync change of cssidr_el1 */
29 mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
Pierre-Clément Tosieba9a0b2021-08-27 18:03:45 +020030 ubfx x2, x6, #0, #3 /* x2 <- log2(cache line size)-4 */
31 ubfx x3, x6, #3, #10 /* x3 <- number of cache ways - 1 */
32 ubfx x4, x6, #13, #15 /* x4 <- number of cache sets - 1 */
David Feng85fd5f12013-12-14 11:47:35 +080033 add x2, x2, #4 /* x2 <- log2(cache line size) */
Leo Yan9e0d25e2014-03-31 09:50:35 +080034 clz w5, w3 /* bit position of #ways */
York Sunef042012014-02-26 13:26:04 -080035 /* x12 <- cache level << 1 */
David Feng85fd5f12013-12-14 11:47:35 +080036 /* x2 <- line length offset */
37 /* x3 <- number of cache ways - 1 */
38 /* x4 <- number of cache sets - 1 */
39 /* x5 <- bit position of #ways */
40
41loop_set:
42 mov x6, x3 /* x6 <- working copy of #ways */
43loop_way:
44 lsl x7, x6, x5
York Sunef042012014-02-26 13:26:04 -080045 orr x9, x12, x7 /* map way and level to cisw value */
David Feng85fd5f12013-12-14 11:47:35 +080046 lsl x7, x4, x2
47 orr x9, x9, x7 /* map set number to cisw value */
York Sunef042012014-02-26 13:26:04 -080048 tbz w1, #0, 1f
49 dc isw, x9
50 b 2f
511: dc cisw, x9 /* clean & invalidate by set/way */
522: subs x6, x6, #1 /* decrement the way */
David Feng85fd5f12013-12-14 11:47:35 +080053 b.ge loop_way
54 subs x4, x4, #1 /* decrement the set */
55 b.ge loop_set
56
57 ret
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090058ENDPROC(__asm_dcache_level)
Philipp Tomsichd15592f2017-07-04 10:04:54 +020059.popsection
David Feng85fd5f12013-12-14 11:47:35 +080060
61/*
York Sunef042012014-02-26 13:26:04 -080062 * void __asm_flush_dcache_all(int invalidate_only)
63 *
Masahiro Yamadaa9c39612016-05-17 16:38:07 +090064 * x0: 0 clean & invalidate, 1 invalidate only
David Feng85fd5f12013-12-14 11:47:35 +080065 *
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090066 * flush or invalidate all data cache by SET/WAY.
David Feng85fd5f12013-12-14 11:47:35 +080067 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +020068.pushsection .text.__asm_dcache_all, "ax"
York Sunef042012014-02-26 13:26:04 -080069ENTRY(__asm_dcache_all)
70 mov x1, x0
David Feng85fd5f12013-12-14 11:47:35 +080071 dsb sy
72 mrs x10, clidr_el1 /* read clidr_el1 */
Pierre-Clément Tosieba9a0b2021-08-27 18:03:45 +020073 ubfx x11, x10, #24, #3 /* x11 <- loc */
David Feng85fd5f12013-12-14 11:47:35 +080074 cbz x11, finished /* if loc is 0, exit */
75 mov x15, lr
76 mov x0, #0 /* start flush at cache level 0 */
77 /* x0 <- cache level */
78 /* x10 <- clidr_el1 */
79 /* x11 <- loc */
80 /* x15 <- return address */
81
82loop_level:
Pierre-Clément Tosi1ffdcef2021-08-27 18:04:10 +020083 add x12, x0, x0, lsl #1 /* x12 <- tripled cache level */
York Sunef042012014-02-26 13:26:04 -080084 lsr x12, x10, x12
85 and x12, x12, #7 /* x12 <- cache type */
86 cmp x12, #2
David Feng85fd5f12013-12-14 11:47:35 +080087 b.lt skip /* skip if no cache or icache */
Masahiro Yamada4029d8e2016-05-17 16:38:08 +090088 bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */
David Feng85fd5f12013-12-14 11:47:35 +080089skip:
90 add x0, x0, #1 /* increment cache level */
91 cmp x11, x0
92 b.gt loop_level
93
94 mov x0, #0
Michal Simek5dc7d122015-01-14 15:36:35 +010095 msr csselr_el1, x0 /* restore csselr_el1 */
David Feng85fd5f12013-12-14 11:47:35 +080096 dsb sy
97 isb
98 mov lr, x15
99
100finished:
101 ret
York Sunef042012014-02-26 13:26:04 -0800102ENDPROC(__asm_dcache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200103.popsection
York Sunef042012014-02-26 13:26:04 -0800104
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200105.pushsection .text.__asm_flush_dcache_all, "ax"
York Sunef042012014-02-26 13:26:04 -0800106ENTRY(__asm_flush_dcache_all)
York Sunef042012014-02-26 13:26:04 -0800107 mov x0, #0
Masahiro Yamadad094db42016-05-17 16:38:06 +0900108 b __asm_dcache_all
David Feng85fd5f12013-12-14 11:47:35 +0800109ENDPROC(__asm_flush_dcache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200110.popsection
David Feng85fd5f12013-12-14 11:47:35 +0800111
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200112.pushsection .text.__asm_invalidate_dcache_all, "ax"
York Sunef042012014-02-26 13:26:04 -0800113ENTRY(__asm_invalidate_dcache_all)
Peng Fand0062662015-08-06 17:54:13 +0800114 mov x0, #0x1
Masahiro Yamadad094db42016-05-17 16:38:06 +0900115 b __asm_dcache_all
York Sunef042012014-02-26 13:26:04 -0800116ENDPROC(__asm_invalidate_dcache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200117.popsection
York Sunef042012014-02-26 13:26:04 -0800118
David Feng85fd5f12013-12-14 11:47:35 +0800119/*
120 * void __asm_flush_dcache_range(start, end)
121 *
122 * clean & invalidate data cache in the range
123 *
124 * x0: start address
125 * x1: end address
126 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200127.pushsection .text.__asm_flush_dcache_range, "ax"
David Feng85fd5f12013-12-14 11:47:35 +0800128ENTRY(__asm_flush_dcache_range)
129 mrs x3, ctr_el0
Pierre-Clément Tosieba9a0b2021-08-27 18:03:45 +0200130 ubfx x3, x3, #16, #4
David Feng85fd5f12013-12-14 11:47:35 +0800131 mov x2, #4
132 lsl x2, x2, x3 /* cache line size */
133
134 /* x2 <- minimal cache line size in cache system */
135 sub x3, x2, #1
136 bic x0, x0, x3
1371: dc civac, x0 /* clean & invalidate data or unified cache */
138 add x0, x0, x2
139 cmp x0, x1
140 b.lo 1b
141 dsb sy
142 ret
143ENDPROC(__asm_flush_dcache_range)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200144.popsection
Simon Glass4415c3b2017-04-05 17:53:18 -0600145/*
146 * void __asm_invalidate_dcache_range(start, end)
147 *
148 * invalidate data cache in the range
149 *
150 * x0: start address
151 * x1: end address
152 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200153.pushsection .text.__asm_invalidate_dcache_range, "ax"
Simon Glass4415c3b2017-04-05 17:53:18 -0600154ENTRY(__asm_invalidate_dcache_range)
155 mrs x3, ctr_el0
Pierre-Clément Tosieba9a0b2021-08-27 18:03:45 +0200156 ubfx x3, x3, #16, #4
Simon Glass4415c3b2017-04-05 17:53:18 -0600157 mov x2, #4
158 lsl x2, x2, x3 /* cache line size */
159
160 /* x2 <- minimal cache line size in cache system */
161 sub x3, x2, #1
162 bic x0, x0, x3
1631: dc ivac, x0 /* invalidate data or unified cache */
164 add x0, x0, x2
165 cmp x0, x1
166 b.lo 1b
167 dsb sy
168 ret
169ENDPROC(__asm_invalidate_dcache_range)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200170.popsection
David Feng85fd5f12013-12-14 11:47:35 +0800171
172/*
173 * void __asm_invalidate_icache_all(void)
174 *
175 * invalidate all tlb entries.
176 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200177.pushsection .text.__asm_invalidate_icache_all, "ax"
David Feng85fd5f12013-12-14 11:47:35 +0800178ENTRY(__asm_invalidate_icache_all)
179 ic ialluis
180 isb sy
181 ret
182ENDPROC(__asm_invalidate_icache_all)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200183.popsection
York Sun1ce575f2015-01-06 13:18:42 -0800184
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200185.pushsection .text.__asm_invalidate_l3_dcache, "ax"
Tom Rini7d1a6662021-06-29 19:33:04 -0400186WEAK(__asm_invalidate_l3_dcache)
York Sun1ce575f2015-01-06 13:18:42 -0800187 mov x0, #0 /* return status as success */
188 ret
Stephen Warrenddb0f632016-10-19 15:18:46 -0600189ENDPROC(__asm_invalidate_l3_dcache)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200190.popsection
Stephen Warrenddb0f632016-10-19 15:18:46 -0600191
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200192.pushsection .text.__asm_flush_l3_dcache, "ax"
Tom Rini7d1a6662021-06-29 19:33:04 -0400193WEAK(__asm_flush_l3_dcache)
Stephen Warrenddb0f632016-10-19 15:18:46 -0600194 mov x0, #0 /* return status as success */
195 ret
196ENDPROC(__asm_flush_l3_dcache)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200197.popsection
Stephen Warrenddb0f632016-10-19 15:18:46 -0600198
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200199.pushsection .text.__asm_invalidate_l3_icache, "ax"
Tom Rini7d1a6662021-06-29 19:33:04 -0400200WEAK(__asm_invalidate_l3_icache)
Stephen Warrenddb0f632016-10-19 15:18:46 -0600201 mov x0, #0 /* return status as success */
202 ret
203ENDPROC(__asm_invalidate_l3_icache)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200204.popsection
Alexander Grafe317fe82016-03-04 01:09:47 +0100205
206/*
207 * void __asm_switch_ttbr(ulong new_ttbr)
208 *
209 * Safely switches to a new page table.
210 */
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200211.pushsection .text.__asm_switch_ttbr, "ax"
Alexander Grafe317fe82016-03-04 01:09:47 +0100212ENTRY(__asm_switch_ttbr)
213 /* x2 = SCTLR (alive throghout the function) */
214 switch_el x4, 3f, 2f, 1f
2153: mrs x2, sctlr_el3
216 b 0f
2172: mrs x2, sctlr_el2
218 b 0f
2191: mrs x2, sctlr_el1
2200:
221
222 /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
223 movn x1, #(CR_M | CR_C | CR_I)
224 and x1, x2, x1
225 switch_el x4, 3f, 2f, 1f
2263: msr sctlr_el3, x1
227 b 0f
2282: msr sctlr_el2, x1
229 b 0f
2301: msr sctlr_el1, x1
2310: isb
232
233 /* This call only clobbers x30 (lr) and x9 (unused) */
234 mov x3, x30
235 bl __asm_invalidate_tlb_all
236
237 /* From here on we're running safely with caches disabled */
238
239 /* Set TTBR to our first argument */
240 switch_el x4, 3f, 2f, 1f
2413: msr ttbr0_el3, x0
242 b 0f
2432: msr ttbr0_el2, x0
244 b 0f
2451: msr ttbr0_el1, x0
2460: isb
247
248 /* Restore original SCTLR and thus enable caches again */
249 switch_el x4, 3f, 2f, 1f
2503: msr sctlr_el3, x2
251 b 0f
2522: msr sctlr_el2, x2
253 b 0f
2541: msr sctlr_el1, x2
2550: isb
256
257 ret x3
258ENDPROC(__asm_switch_ttbr)
Philipp Tomsichd15592f2017-07-04 10:04:54 +0200259.popsection