blob: a5b918c3848e2de08b066ed63ff4563bb45c31fe [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley2bd4ef22014-04-09 13:14:54 +010031#include <arch.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010032#include <asm_macros.S>
33
34 .globl dcisw
35 .globl dccisw
36 .globl dccsw
37 .globl dccvac
38 .globl dcivac
39 .globl dccivac
40 .globl dccvau
41 .globl dczva
42 .globl flush_dcache_range
43 .globl inv_dcache_range
44 .globl dcsw_op_louis
45 .globl dcsw_op_all
46
Andrew Thoelke38bde412014-03-18 13:46:55 +000047func dcisw
Achin Gupta4f6ad662013-10-25 09:08:21 +010048 dc isw, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +010049 ret
50
51
Andrew Thoelke38bde412014-03-18 13:46:55 +000052func dccisw
Achin Gupta4f6ad662013-10-25 09:08:21 +010053 dc cisw, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +010054 ret
55
56
Andrew Thoelke38bde412014-03-18 13:46:55 +000057func dccsw
Achin Gupta4f6ad662013-10-25 09:08:21 +010058 dc csw, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +010059 ret
60
61
Andrew Thoelke38bde412014-03-18 13:46:55 +000062func dccvac
Achin Gupta4f6ad662013-10-25 09:08:21 +010063 dc cvac, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +010064 ret
65
66
Andrew Thoelke38bde412014-03-18 13:46:55 +000067func dcivac
Achin Gupta4f6ad662013-10-25 09:08:21 +010068 dc ivac, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +010069 ret
70
71
Andrew Thoelke38bde412014-03-18 13:46:55 +000072func dccivac
Achin Gupta4f6ad662013-10-25 09:08:21 +010073 dc civac, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +010074 ret
75
76
Andrew Thoelke38bde412014-03-18 13:46:55 +000077func dccvau
Achin Gupta4f6ad662013-10-25 09:08:21 +010078 dc cvau, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +010079 ret
80
81
Andrew Thoelke38bde412014-03-18 13:46:55 +000082func dczva
Achin Gupta4f6ad662013-10-25 09:08:21 +010083 dc zva, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +010084 ret
85
86
87 /* ------------------------------------------
88 * Clean+Invalidate from base address till
89 * size. 'x0' = addr, 'x1' = size
90 * ------------------------------------------
91 */
Andrew Thoelke38bde412014-03-18 13:46:55 +000092func flush_dcache_range
Achin Gupta4f6ad662013-10-25 09:08:21 +010093 dcache_line_size x2, x3
94 add x1, x0, x1
95 sub x3, x2, #1
96 bic x0, x0, x3
97flush_loop:
98 dc civac, x0
99 add x0, x0, x2
100 cmp x0, x1
101 b.lo flush_loop
102 dsb sy
103 ret
104
105
106 /* ------------------------------------------
107 * Invalidate from base address till
108 * size. 'x0' = addr, 'x1' = size
109 * ------------------------------------------
110 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000111func inv_dcache_range
Achin Gupta4f6ad662013-10-25 09:08:21 +0100112 dcache_line_size x2, x3
113 add x1, x0, x1
114 sub x3, x2, #1
115 bic x0, x0, x3
116inv_loop:
117 dc ivac, x0
118 add x0, x0, x2
119 cmp x0, x1
120 b.lo inv_loop
121 dsb sy
122 ret
123
124
Andrew Thoelke6a5b3a42014-04-25 10:49:30 +0100125 /* ---------------------------------------------------------------
126 * Data cache operations by set/way to the level specified
127 *
128 * The main function, do_dcsw_op requires:
129 * x0: The operation type (0-2), as defined in arch.h
130 * x3: The last cache level to operate on
131 * x9: clidr_el1
132 * and will carry out the operation on each data cache from level 0
133 * to the level in x3 in sequence
134 *
135 * The dcsw_op macro sets up the x3 and x9 parameters based on
136 * clidr_el1 cache information before invoking the main function
137 * ---------------------------------------------------------------
Achin Gupta4f6ad662013-10-25 09:08:21 +0100138 */
Achin Gupta4f6ad662013-10-25 09:08:21 +0100139
Andrew Thoelke6a5b3a42014-04-25 10:49:30 +0100140 .macro dcsw_op shift, fw, ls
141 mrs x9, clidr_el1
142 ubfx x3, x9, \shift, \fw
143 lsl x3, x3, \ls
144 b do_dcsw_op
145 .endm
Achin Gupta4f6ad662013-10-25 09:08:21 +0100146
Andrew Thoelke38bde412014-03-18 13:46:55 +0000147func do_dcsw_op
Achin Gupta4f6ad662013-10-25 09:08:21 +0100148 cbz x3, exit
Andrew Thoelke6a5b3a42014-04-25 10:49:30 +0100149 mov x10, xzr
150 adr x14, dcsw_loop_table // compute inner loop address
151 add x14, x14, x0, lsl #5 // inner loop is 8x32-bit instructions
Achin Gupta4f6ad662013-10-25 09:08:21 +0100152 mov x0, x9
Andrew Thoelke6a5b3a42014-04-25 10:49:30 +0100153 mov w8, #1
154loop1:
155 add x2, x10, x10, lsr #1 // work out 3x current cache level
156 lsr x1, x0, x2 // extract cache type bits from clidr
157 and x1, x1, #7 // mask the bits for current cache only
158 cmp x1, #2 // see what cache we have at this level
159 b.lt level_done // nothing to do if no cache or icache
160
161 msr csselr_el1, x10 // select current cache level in csselr
162 isb // isb to sych the new cssr&csidr
163 mrs x1, ccsidr_el1 // read the new ccsidr
164 and x2, x1, #7 // extract the length of the cache lines
165 add x2, x2, #4 // add 4 (line length offset)
166 ubfx x4, x1, #3, #10 // maximum way number
167 clz w5, w4 // bit position of way size increment
168 lsl w9, w4, w5 // w9 = aligned max way number
169 lsl w16, w8, w5 // w16 = way number loop decrement
170 orr w9, w10, w9 // w9 = combine way and cache number
171 ubfx w6, w1, #13, #15 // w6 = max set number
172 lsl w17, w8, w2 // w17 = set number loop decrement
173 dsb sy // barrier before we start this level
174 br x14 // jump to DC operation specific loop
175
176 .macro dcsw_loop _op
177loop2_\_op:
178 lsl w7, w6, w2 // w7 = aligned max set number
179
180loop3_\_op:
181 orr w11, w9, w7 // combine cache, way and set number
182 dc \_op, x11
183 subs w7, w7, w17 // decrement set number
184 b.ge loop3_\_op
185
186 subs x9, x9, x16 // decrement way number
187 b.ge loop2_\_op
188
189 b level_done
190 .endm
191
192level_done:
193 add x10, x10, #2 // increment cache number
194 cmp x3, x10
195 b.gt loop1
196 msr csselr_el1, xzr // select cache level 0 in csselr
197 dsb sy // barrier to complete final cache operation
198 isb
Achin Gupta4f6ad662013-10-25 09:08:21 +0100199exit:
200 ret
201
Andrew Thoelke6a5b3a42014-04-25 10:49:30 +0100202dcsw_loop_table:
203 dcsw_loop isw
204 dcsw_loop cisw
205 dcsw_loop csw
206
Achin Gupta4f6ad662013-10-25 09:08:21 +0100207
Andrew Thoelke38bde412014-03-18 13:46:55 +0000208func dcsw_op_louis
Andrew Thoelke6a5b3a42014-04-25 10:49:30 +0100209 dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
Achin Gupta4f6ad662013-10-25 09:08:21 +0100210
211
Andrew Thoelke38bde412014-03-18 13:46:55 +0000212func dcsw_op_all
Andrew Thoelke6a5b3a42014-04-25 10:49:30 +0100213 dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT