David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 1 | /* |
| 2 | * (C) Copyright 2013 |
| 3 | * David Feng <fenghua@phytium.com.cn> |
| 4 | * |
| 5 | * This file is based on sample code from ARMv8 ARM. |
| 6 | * |
| 7 | * SPDX-License-Identifier: GPL-2.0+ |
| 8 | */ |
| 9 | |
| 10 | #include <asm-offsets.h> |
| 11 | #include <config.h> |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 12 | #include <asm/macro.h> |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 13 | #include <asm/system.h> |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 14 | #include <linux/linkage.h> |
| 15 | |
| 16 | /* |
Masahiro Yamada | 4029d8e | 2016-05-17 16:38:08 +0900 | [diff] [blame] | 17 | * void __asm_dcache_level(level) |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 18 | * |
Masahiro Yamada | 4029d8e | 2016-05-17 16:38:08 +0900 | [diff] [blame] | 19 | * flush or invalidate one level cache. |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 20 | * |
| 21 | * x0: cache level |
Masahiro Yamada | a9c3961 | 2016-05-17 16:38:07 +0900 | [diff] [blame] | 22 | * x1: 0 clean & invalidate, 1 invalidate only |
York Sun | ef04201 | 2014-02-26 13:26:04 -0800 | [diff] [blame] | 23 | * x2~x9: clobbered |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 24 | */ |
Masahiro Yamada | 4029d8e | 2016-05-17 16:38:08 +0900 | [diff] [blame] | 25 | ENTRY(__asm_dcache_level) |
York Sun | ef04201 | 2014-02-26 13:26:04 -0800 | [diff] [blame] | 26 | lsl x12, x0, #1 |
| 27 | msr csselr_el1, x12 /* select cache level */ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 28 | isb /* sync change of cssidr_el1 */ |
| 29 | mrs x6, ccsidr_el1 /* read the new cssidr_el1 */ |
| 30 | and x2, x6, #7 /* x2 <- log2(cache line size)-4 */ |
| 31 | add x2, x2, #4 /* x2 <- log2(cache line size) */ |
| 32 | mov x3, #0x3ff |
| 33 | and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */ |
Leo Yan | 9e0d25e | 2014-03-31 09:50:35 +0800 | [diff] [blame] | 34 | clz w5, w3 /* bit position of #ways */ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 35 | mov x4, #0x7fff |
| 36 | and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */ |
York Sun | ef04201 | 2014-02-26 13:26:04 -0800 | [diff] [blame] | 37 | /* x12 <- cache level << 1 */ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 38 | /* x2 <- line length offset */ |
| 39 | /* x3 <- number of cache ways - 1 */ |
| 40 | /* x4 <- number of cache sets - 1 */ |
| 41 | /* x5 <- bit position of #ways */ |
| 42 | |
| 43 | loop_set: |
| 44 | mov x6, x3 /* x6 <- working copy of #ways */ |
| 45 | loop_way: |
| 46 | lsl x7, x6, x5 |
York Sun | ef04201 | 2014-02-26 13:26:04 -0800 | [diff] [blame] | 47 | orr x9, x12, x7 /* map way and level to cisw value */ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 48 | lsl x7, x4, x2 |
| 49 | orr x9, x9, x7 /* map set number to cisw value */ |
York Sun | ef04201 | 2014-02-26 13:26:04 -0800 | [diff] [blame] | 50 | tbz w1, #0, 1f |
| 51 | dc isw, x9 |
| 52 | b 2f |
| 53 | 1: dc cisw, x9 /* clean & invalidate by set/way */ |
| 54 | 2: subs x6, x6, #1 /* decrement the way */ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 55 | b.ge loop_way |
| 56 | subs x4, x4, #1 /* decrement the set */ |
| 57 | b.ge loop_set |
| 58 | |
| 59 | ret |
Masahiro Yamada | 4029d8e | 2016-05-17 16:38:08 +0900 | [diff] [blame] | 60 | ENDPROC(__asm_dcache_level) |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 61 | |
| 62 | /* |
York Sun | ef04201 | 2014-02-26 13:26:04 -0800 | [diff] [blame] | 63 | * void __asm_flush_dcache_all(int invalidate_only) |
| 64 | * |
Masahiro Yamada | a9c3961 | 2016-05-17 16:38:07 +0900 | [diff] [blame] | 65 | * x0: 0 clean & invalidate, 1 invalidate only |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 66 | * |
Masahiro Yamada | 4029d8e | 2016-05-17 16:38:08 +0900 | [diff] [blame] | 67 | * flush or invalidate all data cache by SET/WAY. |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 68 | */ |
York Sun | ef04201 | 2014-02-26 13:26:04 -0800 | [diff] [blame] | 69 | ENTRY(__asm_dcache_all) |
| 70 | mov x1, x0 |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 71 | dsb sy |
| 72 | mrs x10, clidr_el1 /* read clidr_el1 */ |
| 73 | lsr x11, x10, #24 |
| 74 | and x11, x11, #0x7 /* x11 <- loc */ |
| 75 | cbz x11, finished /* if loc is 0, exit */ |
| 76 | mov x15, lr |
| 77 | mov x0, #0 /* start flush at cache level 0 */ |
| 78 | /* x0 <- cache level */ |
| 79 | /* x10 <- clidr_el1 */ |
| 80 | /* x11 <- loc */ |
| 81 | /* x15 <- return address */ |
| 82 | |
| 83 | loop_level: |
York Sun | ef04201 | 2014-02-26 13:26:04 -0800 | [diff] [blame] | 84 | lsl x12, x0, #1 |
| 85 | add x12, x12, x0 /* x0 <- tripled cache level */ |
| 86 | lsr x12, x10, x12 |
| 87 | and x12, x12, #7 /* x12 <- cache type */ |
| 88 | cmp x12, #2 |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 89 | b.lt skip /* skip if no cache or icache */ |
Masahiro Yamada | 4029d8e | 2016-05-17 16:38:08 +0900 | [diff] [blame] | 90 | bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 91 | skip: |
| 92 | add x0, x0, #1 /* increment cache level */ |
| 93 | cmp x11, x0 |
| 94 | b.gt loop_level |
| 95 | |
| 96 | mov x0, #0 |
Michal Simek | 5dc7d12 | 2015-01-14 15:36:35 +0100 | [diff] [blame] | 97 | msr csselr_el1, x0 /* restore csselr_el1 */ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 98 | dsb sy |
| 99 | isb |
| 100 | mov lr, x15 |
| 101 | |
| 102 | finished: |
| 103 | ret |
York Sun | ef04201 | 2014-02-26 13:26:04 -0800 | [diff] [blame] | 104 | ENDPROC(__asm_dcache_all) |
| 105 | |
| 106 | ENTRY(__asm_flush_dcache_all) |
York Sun | ef04201 | 2014-02-26 13:26:04 -0800 | [diff] [blame] | 107 | mov x0, #0 |
Masahiro Yamada | d094db4 | 2016-05-17 16:38:06 +0900 | [diff] [blame] | 108 | b __asm_dcache_all |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 109 | ENDPROC(__asm_flush_dcache_all) |
| 110 | |
York Sun | ef04201 | 2014-02-26 13:26:04 -0800 | [diff] [blame] | 111 | ENTRY(__asm_invalidate_dcache_all) |
Peng Fan | d006266 | 2015-08-06 17:54:13 +0800 | [diff] [blame] | 112 | mov x0, #0x1 |
Masahiro Yamada | d094db4 | 2016-05-17 16:38:06 +0900 | [diff] [blame] | 113 | b __asm_dcache_all |
York Sun | ef04201 | 2014-02-26 13:26:04 -0800 | [diff] [blame] | 114 | ENDPROC(__asm_invalidate_dcache_all) |
| 115 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 116 | /* |
| 117 | * void __asm_flush_dcache_range(start, end) |
| 118 | * |
| 119 | * clean & invalidate data cache in the range |
| 120 | * |
| 121 | * x0: start address |
| 122 | * x1: end address |
| 123 | */ |
| 124 | ENTRY(__asm_flush_dcache_range) |
| 125 | mrs x3, ctr_el0 |
| 126 | lsr x3, x3, #16 |
| 127 | and x3, x3, #0xf |
| 128 | mov x2, #4 |
| 129 | lsl x2, x2, x3 /* cache line size */ |
| 130 | |
| 131 | /* x2 <- minimal cache line size in cache system */ |
| 132 | sub x3, x2, #1 |
| 133 | bic x0, x0, x3 |
| 134 | 1: dc civac, x0 /* clean & invalidate data or unified cache */ |
| 135 | add x0, x0, x2 |
| 136 | cmp x0, x1 |
| 137 | b.lo 1b |
| 138 | dsb sy |
| 139 | ret |
| 140 | ENDPROC(__asm_flush_dcache_range) |
Simon Glass | 4415c3b | 2017-04-05 17:53:18 -0600 | [diff] [blame] | 141 | /* |
| 142 | * void __asm_invalidate_dcache_range(start, end) |
| 143 | * |
| 144 | * invalidate data cache in the range |
| 145 | * |
| 146 | * x0: start address |
| 147 | * x1: end address |
| 148 | */ |
| 149 | ENTRY(__asm_invalidate_dcache_range) |
| 150 | mrs x3, ctr_el0 |
| 151 | ubfm x3, x3, #16, #19 |
| 152 | mov x2, #4 |
| 153 | lsl x2, x2, x3 /* cache line size */ |
| 154 | |
| 155 | /* x2 <- minimal cache line size in cache system */ |
| 156 | sub x3, x2, #1 |
| 157 | bic x0, x0, x3 |
| 158 | 1: dc ivac, x0 /* invalidate data or unified cache */ |
| 159 | add x0, x0, x2 |
| 160 | cmp x0, x1 |
| 161 | b.lo 1b |
| 162 | dsb sy |
| 163 | ret |
| 164 | ENDPROC(__asm_invalidate_dcache_range) |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 165 | |
| 166 | /* |
| 167 | * void __asm_invalidate_icache_all(void) |
| 168 | * |
| 169 | * invalidate all tlb entries. |
| 170 | */ |
| 171 | ENTRY(__asm_invalidate_icache_all) |
| 172 | ic ialluis |
| 173 | isb sy |
| 174 | ret |
| 175 | ENDPROC(__asm_invalidate_icache_all) |
York Sun | 1ce575f | 2015-01-06 13:18:42 -0800 | [diff] [blame] | 176 | |
Stephen Warren | ddb0f63 | 2016-10-19 15:18:46 -0600 | [diff] [blame] | 177 | ENTRY(__asm_invalidate_l3_dcache) |
York Sun | 1ce575f | 2015-01-06 13:18:42 -0800 | [diff] [blame] | 178 | mov x0, #0 /* return status as success */ |
| 179 | ret |
Stephen Warren | ddb0f63 | 2016-10-19 15:18:46 -0600 | [diff] [blame] | 180 | ENDPROC(__asm_invalidate_l3_dcache) |
| 181 | .weak __asm_invalidate_l3_dcache |
| 182 | |
| 183 | ENTRY(__asm_flush_l3_dcache) |
| 184 | mov x0, #0 /* return status as success */ |
| 185 | ret |
| 186 | ENDPROC(__asm_flush_l3_dcache) |
| 187 | .weak __asm_flush_l3_dcache |
| 188 | |
| 189 | ENTRY(__asm_invalidate_l3_icache) |
| 190 | mov x0, #0 /* return status as success */ |
| 191 | ret |
| 192 | ENDPROC(__asm_invalidate_l3_icache) |
| 193 | .weak __asm_invalidate_l3_icache |
Alexander Graf | e317fe8 | 2016-03-04 01:09:47 +0100 | [diff] [blame] | 194 | |
| 195 | /* |
| 196 | * void __asm_switch_ttbr(ulong new_ttbr) |
| 197 | * |
| 198 | * Safely switches to a new page table. |
| 199 | */ |
| 200 | ENTRY(__asm_switch_ttbr) |
| 201 | /* x2 = SCTLR (alive throghout the function) */ |
| 202 | switch_el x4, 3f, 2f, 1f |
| 203 | 3: mrs x2, sctlr_el3 |
| 204 | b 0f |
| 205 | 2: mrs x2, sctlr_el2 |
| 206 | b 0f |
| 207 | 1: mrs x2, sctlr_el1 |
| 208 | 0: |
| 209 | |
| 210 | /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */ |
| 211 | movn x1, #(CR_M | CR_C | CR_I) |
| 212 | and x1, x2, x1 |
| 213 | switch_el x4, 3f, 2f, 1f |
| 214 | 3: msr sctlr_el3, x1 |
| 215 | b 0f |
| 216 | 2: msr sctlr_el2, x1 |
| 217 | b 0f |
| 218 | 1: msr sctlr_el1, x1 |
| 219 | 0: isb |
| 220 | |
| 221 | /* This call only clobbers x30 (lr) and x9 (unused) */ |
| 222 | mov x3, x30 |
| 223 | bl __asm_invalidate_tlb_all |
| 224 | |
| 225 | /* From here on we're running safely with caches disabled */ |
| 226 | |
| 227 | /* Set TTBR to our first argument */ |
| 228 | switch_el x4, 3f, 2f, 1f |
| 229 | 3: msr ttbr0_el3, x0 |
| 230 | b 0f |
| 231 | 2: msr ttbr0_el2, x0 |
| 232 | b 0f |
| 233 | 1: msr ttbr0_el1, x0 |
| 234 | 0: isb |
| 235 | |
| 236 | /* Restore original SCTLR and thus enable caches again */ |
| 237 | switch_el x4, 3f, 2f, 1f |
| 238 | 3: msr sctlr_el3, x2 |
| 239 | b 0f |
| 240 | 2: msr sctlr_el2, x2 |
| 241 | b 0f |
| 242 | 1: msr sctlr_el1, x2 |
| 243 | 0: isb |
| 244 | |
| 245 | ret x3 |
| 246 | ENDPROC(__asm_switch_ttbr) |