blob: c272fc748b43df59fc7d81c199fe5e5b2d44df75 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley2bd4ef22014-04-09 13:14:54 +010031#include <arch.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010032#include <asm_macros.S>
33
34 .globl dcisw
35 .globl dccisw
36 .globl dccsw
37 .globl dccvac
38 .globl dcivac
39 .globl dccivac
40 .globl dccvau
41 .globl dczva
42 .globl flush_dcache_range
43 .globl inv_dcache_range
44 .globl dcsw_op_louis
45 .globl dcsw_op_all
46
Andrew Thoelke38bde412014-03-18 13:46:55 +000047func dcisw
Achin Gupta4f6ad662013-10-25 09:08:21 +010048 dc isw, x0
49 dsb sy
50 isb
51 ret
52
53
Andrew Thoelke38bde412014-03-18 13:46:55 +000054func dccisw
Achin Gupta4f6ad662013-10-25 09:08:21 +010055 dc cisw, x0
56 dsb sy
57 isb
58 ret
59
60
Andrew Thoelke38bde412014-03-18 13:46:55 +000061func dccsw
Achin Gupta4f6ad662013-10-25 09:08:21 +010062 dc csw, x0
63 dsb sy
64 isb
65 ret
66
67
Andrew Thoelke38bde412014-03-18 13:46:55 +000068func dccvac
Achin Gupta4f6ad662013-10-25 09:08:21 +010069 dc cvac, x0
70 dsb sy
71 isb
72 ret
73
74
Andrew Thoelke38bde412014-03-18 13:46:55 +000075func dcivac
Achin Gupta4f6ad662013-10-25 09:08:21 +010076 dc ivac, x0
77 dsb sy
78 isb
79 ret
80
81
Andrew Thoelke38bde412014-03-18 13:46:55 +000082func dccivac
Achin Gupta4f6ad662013-10-25 09:08:21 +010083 dc civac, x0
84 dsb sy
85 isb
86 ret
87
88
Andrew Thoelke38bde412014-03-18 13:46:55 +000089func dccvau
Achin Gupta4f6ad662013-10-25 09:08:21 +010090 dc cvau, x0
91 dsb sy
92 isb
93 ret
94
95
Andrew Thoelke38bde412014-03-18 13:46:55 +000096func dczva
Achin Gupta4f6ad662013-10-25 09:08:21 +010097 dc zva, x0
98 dsb sy
99 isb
100 ret
101
102
103 /* ------------------------------------------
104 * Clean+Invalidate from base address till
105 * size. 'x0' = addr, 'x1' = size
106 * ------------------------------------------
107 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000108func flush_dcache_range
Achin Gupta4f6ad662013-10-25 09:08:21 +0100109 dcache_line_size x2, x3
110 add x1, x0, x1
111 sub x3, x2, #1
112 bic x0, x0, x3
113flush_loop:
114 dc civac, x0
115 add x0, x0, x2
116 cmp x0, x1
117 b.lo flush_loop
118 dsb sy
119 ret
120
121
122 /* ------------------------------------------
123 * Invalidate from base address till
124 * size. 'x0' = addr, 'x1' = size
125 * ------------------------------------------
126 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000127func inv_dcache_range
Achin Gupta4f6ad662013-10-25 09:08:21 +0100128 dcache_line_size x2, x3
129 add x1, x0, x1
130 sub x3, x2, #1
131 bic x0, x0, x3
132inv_loop:
133 dc ivac, x0
134 add x0, x0, x2
135 cmp x0, x1
136 b.lo inv_loop
137 dsb sy
138 ret
139
140
Andrew Thoelke6a5b3a42014-04-25 10:49:30 +0100141 /* ---------------------------------------------------------------
142 * Data cache operations by set/way to the level specified
143 *
144 * The main function, do_dcsw_op requires:
145 * x0: The operation type (0-2), as defined in arch.h
146 * x3: The last cache level to operate on
147 * x9: clidr_el1
148 * and will carry out the operation on each data cache from level 0
149 * to the level in x3 in sequence
150 *
151 * The dcsw_op macro sets up the x3 and x9 parameters based on
152 * clidr_el1 cache information before invoking the main function
153 * ---------------------------------------------------------------
Achin Gupta4f6ad662013-10-25 09:08:21 +0100154 */
Achin Gupta4f6ad662013-10-25 09:08:21 +0100155
Andrew Thoelke6a5b3a42014-04-25 10:49:30 +0100156 .macro dcsw_op shift, fw, ls
157 mrs x9, clidr_el1
158 ubfx x3, x9, \shift, \fw
159 lsl x3, x3, \ls
160 b do_dcsw_op
161 .endm
Achin Gupta4f6ad662013-10-25 09:08:21 +0100162
Andrew Thoelke38bde412014-03-18 13:46:55 +0000163func do_dcsw_op
Achin Gupta4f6ad662013-10-25 09:08:21 +0100164 cbz x3, exit
Andrew Thoelke6a5b3a42014-04-25 10:49:30 +0100165 mov x10, xzr
166 adr x14, dcsw_loop_table // compute inner loop address
167 add x14, x14, x0, lsl #5 // inner loop is 8x32-bit instructions
Achin Gupta4f6ad662013-10-25 09:08:21 +0100168 mov x0, x9
Andrew Thoelke6a5b3a42014-04-25 10:49:30 +0100169 mov w8, #1
170loop1:
171 add x2, x10, x10, lsr #1 // work out 3x current cache level
172 lsr x1, x0, x2 // extract cache type bits from clidr
173 and x1, x1, #7 // mask the bits for current cache only
174 cmp x1, #2 // see what cache we have at this level
175 b.lt level_done // nothing to do if no cache or icache
176
177 msr csselr_el1, x10 // select current cache level in csselr
178 isb // isb to sych the new cssr&csidr
179 mrs x1, ccsidr_el1 // read the new ccsidr
180 and x2, x1, #7 // extract the length of the cache lines
181 add x2, x2, #4 // add 4 (line length offset)
182 ubfx x4, x1, #3, #10 // maximum way number
183 clz w5, w4 // bit position of way size increment
184 lsl w9, w4, w5 // w9 = aligned max way number
185 lsl w16, w8, w5 // w16 = way number loop decrement
186 orr w9, w10, w9 // w9 = combine way and cache number
187 ubfx w6, w1, #13, #15 // w6 = max set number
188 lsl w17, w8, w2 // w17 = set number loop decrement
189 dsb sy // barrier before we start this level
190 br x14 // jump to DC operation specific loop
191
192 .macro dcsw_loop _op
193loop2_\_op:
194 lsl w7, w6, w2 // w7 = aligned max set number
195
196loop3_\_op:
197 orr w11, w9, w7 // combine cache, way and set number
198 dc \_op, x11
199 subs w7, w7, w17 // decrement set number
200 b.ge loop3_\_op
201
202 subs x9, x9, x16 // decrement way number
203 b.ge loop2_\_op
204
205 b level_done
206 .endm
207
208level_done:
209 add x10, x10, #2 // increment cache number
210 cmp x3, x10
211 b.gt loop1
212 msr csselr_el1, xzr // select cache level 0 in csselr
213 dsb sy // barrier to complete final cache operation
214 isb
Achin Gupta4f6ad662013-10-25 09:08:21 +0100215exit:
216 ret
217
Andrew Thoelke6a5b3a42014-04-25 10:49:30 +0100218dcsw_loop_table:
219 dcsw_loop isw
220 dcsw_loop cisw
221 dcsw_loop csw
222
Achin Gupta4f6ad662013-10-25 09:08:21 +0100223
Andrew Thoelke38bde412014-03-18 13:46:55 +0000224func dcsw_op_louis
Andrew Thoelke6a5b3a42014-04-25 10:49:30 +0100225 dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
Achin Gupta4f6ad662013-10-25 09:08:21 +0100226
227
Andrew Thoelke38bde412014-03-18 13:46:55 +0000228func dcsw_op_all
Andrew Thoelke6a5b3a42014-04-25 10:49:30 +0100229 dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT