David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 1 | /* |
| 2 | * (C) Copyright 2013 |
| 3 | * David Feng <fenghua@phytium.com.cn> |
| 4 | * |
| 5 | * This file is based on sample code from ARMv8 ARM. |
| 6 | * |
| 7 | * SPDX-License-Identifier: GPL-2.0+ |
| 8 | */ |
| 9 | |
| 10 | #include <asm-offsets.h> |
| 11 | #include <config.h> |
| 12 | #include <version.h> |
| 13 | #include <asm/macro.h> |
| 14 | #include <linux/linkage.h> |
| 15 | |
| 16 | /* |
| 17 | * void __asm_flush_dcache_level(level) |
| 18 | * |
| 19 | * clean and invalidate one level cache. |
| 20 | * |
| 21 | * x0: cache level |
| 22 | * x1~x9: clobbered |
| 23 | */ |
| 24 | ENTRY(__asm_flush_dcache_level) |
| 25 | lsl x1, x0, #1 |
| 26 | msr csselr_el1, x1 /* select cache level */ |
| 27 | isb /* sync change of cssidr_el1 */ |
| 28 | mrs x6, ccsidr_el1 /* read the new cssidr_el1 */ |
| 29 | and x2, x6, #7 /* x2 <- log2(cache line size)-4 */ |
| 30 | add x2, x2, #4 /* x2 <- log2(cache line size) */ |
| 31 | mov x3, #0x3ff |
| 32 | and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */ |
| 33 | add w4, w3, w3 |
| 34 | sub w4, w4, 1 /* round up log2(#ways + 1) */ |
| 35 | clz w5, w4 /* bit position of #ways */ |
| 36 | mov x4, #0x7fff |
| 37 | and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */ |
| 38 | /* x1 <- cache level << 1 */ |
| 39 | /* x2 <- line length offset */ |
| 40 | /* x3 <- number of cache ways - 1 */ |
| 41 | /* x4 <- number of cache sets - 1 */ |
| 42 | /* x5 <- bit position of #ways */ |
| 43 | |
| 44 | loop_set: |
| 45 | mov x6, x3 /* x6 <- working copy of #ways */ |
| 46 | loop_way: |
| 47 | lsl x7, x6, x5 |
| 48 | orr x9, x1, x7 /* map way and level to cisw value */ |
| 49 | lsl x7, x4, x2 |
| 50 | orr x9, x9, x7 /* map set number to cisw value */ |
| 51 | dc cisw, x9 /* clean & invalidate by set/way */ |
| 52 | subs x6, x6, #1 /* decrement the way */ |
| 53 | b.ge loop_way |
| 54 | subs x4, x4, #1 /* decrement the set */ |
| 55 | b.ge loop_set |
| 56 | |
| 57 | ret |
| 58 | ENDPROC(__asm_flush_dcache_level) |
| 59 | |
| 60 | /* |
| 61 | * void __asm_flush_dcache_all(void) |
| 62 | * |
| 63 | * clean and invalidate all data cache by SET/WAY. |
| 64 | */ |
| 65 | ENTRY(__asm_flush_dcache_all) |
| 66 | dsb sy |
| 67 | mrs x10, clidr_el1 /* read clidr_el1 */ |
| 68 | lsr x11, x10, #24 |
| 69 | and x11, x11, #0x7 /* x11 <- loc */ |
| 70 | cbz x11, finished /* if loc is 0, exit */ |
| 71 | mov x15, lr |
| 72 | mov x0, #0 /* start flush at cache level 0 */ |
| 73 | /* x0 <- cache level */ |
| 74 | /* x10 <- clidr_el1 */ |
| 75 | /* x11 <- loc */ |
| 76 | /* x15 <- return address */ |
| 77 | |
| 78 | loop_level: |
| 79 | lsl x1, x0, #1 |
| 80 | add x1, x1, x0 /* x0 <- tripled cache level */ |
| 81 | lsr x1, x10, x1 |
| 82 | and x1, x1, #7 /* x1 <- cache type */ |
| 83 | cmp x1, #2 |
| 84 | b.lt skip /* skip if no cache or icache */ |
| 85 | bl __asm_flush_dcache_level |
| 86 | skip: |
| 87 | add x0, x0, #1 /* increment cache level */ |
| 88 | cmp x11, x0 |
| 89 | b.gt loop_level |
| 90 | |
| 91 | mov x0, #0 |
| 92 | msr csselr_el1, x0 /* resotre csselr_el1 */ |
| 93 | dsb sy |
| 94 | isb |
| 95 | mov lr, x15 |
| 96 | |
| 97 | finished: |
| 98 | ret |
| 99 | ENDPROC(__asm_flush_dcache_all) |
| 100 | |
| 101 | /* |
| 102 | * void __asm_flush_dcache_range(start, end) |
| 103 | * |
| 104 | * clean & invalidate data cache in the range |
| 105 | * |
| 106 | * x0: start address |
| 107 | * x1: end address |
| 108 | */ |
| 109 | ENTRY(__asm_flush_dcache_range) |
| 110 | mrs x3, ctr_el0 |
| 111 | lsr x3, x3, #16 |
| 112 | and x3, x3, #0xf |
| 113 | mov x2, #4 |
| 114 | lsl x2, x2, x3 /* cache line size */ |
| 115 | |
| 116 | /* x2 <- minimal cache line size in cache system */ |
| 117 | sub x3, x2, #1 |
| 118 | bic x0, x0, x3 |
| 119 | 1: dc civac, x0 /* clean & invalidate data or unified cache */ |
| 120 | add x0, x0, x2 |
| 121 | cmp x0, x1 |
| 122 | b.lo 1b |
| 123 | dsb sy |
| 124 | ret |
| 125 | ENDPROC(__asm_flush_dcache_range) |
| 126 | |
| 127 | /* |
| 128 | * void __asm_invalidate_icache_all(void) |
| 129 | * |
| 130 | * invalidate all tlb entries. |
| 131 | */ |
| 132 | ENTRY(__asm_invalidate_icache_all) |
| 133 | ic ialluis |
| 134 | isb sy |
| 135 | ret |
| 136 | ENDPROC(__asm_invalidate_icache_all) |