blob: c33ade28d82b87473be0cb7483d970e147ac1406 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley2bd4ef22014-04-09 13:14:54 +010031#include <arch.h>
Andrew Thoelke38bde412014-03-18 13:46:55 +000032#include <asm_macros.S>
Achin Gupta4a826dd2013-11-25 14:00:56 +000033
Achin Gupta4f6ad662013-10-25 09:08:21 +010034 .globl enable_irq
35 .globl disable_irq
36
37 .globl enable_fiq
38 .globl disable_fiq
39
40 .globl enable_serror
41 .globl disable_serror
42
Sandrine Bailleux37382742013-11-18 17:26:59 +000043 .globl enable_debug_exceptions
44 .globl disable_debug_exceptions
45
Achin Gupta4f6ad662013-10-25 09:08:21 +010046 .globl read_daif
47 .globl write_daif
48
49 .globl read_spsr
50 .globl read_spsr_el1
51 .globl read_spsr_el2
52 .globl read_spsr_el3
53
54 .globl write_spsr
55 .globl write_spsr_el1
56 .globl write_spsr_el2
57 .globl write_spsr_el3
58
59 .globl read_elr
60 .globl read_elr_el1
61 .globl read_elr_el2
62 .globl read_elr_el3
63
64 .globl write_elr
65 .globl write_elr_el1
66 .globl write_elr_el2
67 .globl write_elr_el3
68
69 .globl get_afflvl_shift
70 .globl mpidr_mask_lower_afflvls
71 .globl dsb
72 .globl isb
73 .globl sev
74 .globl wfe
75 .globl wfi
76 .globl eret
77 .globl smc
78
Sandrine Bailleux65f546a2013-11-28 09:43:06 +000079 .globl zeromem16
80 .globl memcpy16
Achin Gupta4f6ad662013-10-25 09:08:21 +010081
Achin Gupta4f6ad662013-10-25 09:08:21 +010082
Andrew Thoelke38bde412014-03-18 13:46:55 +000083func get_afflvl_shift
Achin Gupta4f6ad662013-10-25 09:08:21 +010084 cmp x0, #3
85 cinc x0, x0, eq
86 mov x1, #MPIDR_AFFLVL_SHIFT
87 lsl x0, x0, x1
88 ret
89
Andrew Thoelke38bde412014-03-18 13:46:55 +000090func mpidr_mask_lower_afflvls
Achin Gupta4f6ad662013-10-25 09:08:21 +010091 cmp x1, #3
92 cinc x1, x1, eq
93 mov x2, #MPIDR_AFFLVL_SHIFT
94 lsl x2, x1, x2
95 lsr x0, x0, x2
96 lsl x0, x0, x2
97 ret
98
99 /* -----------------------------------------------------
100 * Asynchronous exception manipulation accessors
101 * -----------------------------------------------------
102 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000103func enable_irq
Achin Gupta4f6ad662013-10-25 09:08:21 +0100104 msr daifclr, #DAIF_IRQ_BIT
105 ret
106
107
Andrew Thoelke38bde412014-03-18 13:46:55 +0000108func enable_fiq
Achin Gupta4f6ad662013-10-25 09:08:21 +0100109 msr daifclr, #DAIF_FIQ_BIT
110 ret
111
112
Andrew Thoelke38bde412014-03-18 13:46:55 +0000113func enable_serror
Achin Gupta4f6ad662013-10-25 09:08:21 +0100114 msr daifclr, #DAIF_ABT_BIT
115 ret
116
117
Andrew Thoelke38bde412014-03-18 13:46:55 +0000118func enable_debug_exceptions
Sandrine Bailleux37382742013-11-18 17:26:59 +0000119 msr daifclr, #DAIF_DBG_BIT
120 ret
121
122
Andrew Thoelke38bde412014-03-18 13:46:55 +0000123func disable_irq
Achin Gupta4f6ad662013-10-25 09:08:21 +0100124 msr daifset, #DAIF_IRQ_BIT
125 ret
126
127
Andrew Thoelke38bde412014-03-18 13:46:55 +0000128func disable_fiq
Achin Gupta4f6ad662013-10-25 09:08:21 +0100129 msr daifset, #DAIF_FIQ_BIT
130 ret
131
132
Andrew Thoelke38bde412014-03-18 13:46:55 +0000133func disable_serror
Achin Gupta4f6ad662013-10-25 09:08:21 +0100134 msr daifset, #DAIF_ABT_BIT
135 ret
136
137
Andrew Thoelke38bde412014-03-18 13:46:55 +0000138func disable_debug_exceptions
Sandrine Bailleux37382742013-11-18 17:26:59 +0000139 msr daifset, #DAIF_DBG_BIT
140 ret
141
142
Andrew Thoelke38bde412014-03-18 13:46:55 +0000143func read_daif
Achin Gupta4f6ad662013-10-25 09:08:21 +0100144 mrs x0, daif
145 ret
146
147
Andrew Thoelke38bde412014-03-18 13:46:55 +0000148func write_daif
Achin Gupta4f6ad662013-10-25 09:08:21 +0100149 msr daif, x0
150 ret
151
152
Andrew Thoelke38bde412014-03-18 13:46:55 +0000153func read_spsr
Achin Gupta4f6ad662013-10-25 09:08:21 +0100154 mrs x0, CurrentEl
155 cmp x0, #(MODE_EL1 << MODE_EL_SHIFT)
156 b.eq read_spsr_el1
157 cmp x0, #(MODE_EL2 << MODE_EL_SHIFT)
158 b.eq read_spsr_el2
159 cmp x0, #(MODE_EL3 << MODE_EL_SHIFT)
160 b.eq read_spsr_el3
161
162
Andrew Thoelke38bde412014-03-18 13:46:55 +0000163func read_spsr_el1
Achin Gupta4f6ad662013-10-25 09:08:21 +0100164 mrs x0, spsr_el1
165 ret
166
167
Andrew Thoelke38bde412014-03-18 13:46:55 +0000168func read_spsr_el2
Achin Gupta4f6ad662013-10-25 09:08:21 +0100169 mrs x0, spsr_el2
170 ret
171
172
Andrew Thoelke38bde412014-03-18 13:46:55 +0000173func read_spsr_el3
Achin Gupta4f6ad662013-10-25 09:08:21 +0100174 mrs x0, spsr_el3
175 ret
176
177
Andrew Thoelke38bde412014-03-18 13:46:55 +0000178func write_spsr
Achin Gupta4f6ad662013-10-25 09:08:21 +0100179 mrs x1, CurrentEl
180 cmp x1, #(MODE_EL1 << MODE_EL_SHIFT)
181 b.eq write_spsr_el1
182 cmp x1, #(MODE_EL2 << MODE_EL_SHIFT)
183 b.eq write_spsr_el2
184 cmp x1, #(MODE_EL3 << MODE_EL_SHIFT)
185 b.eq write_spsr_el3
186
187
Andrew Thoelke38bde412014-03-18 13:46:55 +0000188func write_spsr_el1
Achin Gupta4f6ad662013-10-25 09:08:21 +0100189 msr spsr_el1, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100190 ret
191
192
Andrew Thoelke38bde412014-03-18 13:46:55 +0000193func write_spsr_el2
Achin Gupta4f6ad662013-10-25 09:08:21 +0100194 msr spsr_el2, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100195 ret
196
197
Andrew Thoelke38bde412014-03-18 13:46:55 +0000198func write_spsr_el3
Achin Gupta4f6ad662013-10-25 09:08:21 +0100199 msr spsr_el3, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100200 ret
201
202
Andrew Thoelke38bde412014-03-18 13:46:55 +0000203func read_elr
Achin Gupta4f6ad662013-10-25 09:08:21 +0100204 mrs x0, CurrentEl
205 cmp x0, #(MODE_EL1 << MODE_EL_SHIFT)
206 b.eq read_elr_el1
207 cmp x0, #(MODE_EL2 << MODE_EL_SHIFT)
208 b.eq read_elr_el2
209 cmp x0, #(MODE_EL3 << MODE_EL_SHIFT)
210 b.eq read_elr_el3
211
212
Andrew Thoelke38bde412014-03-18 13:46:55 +0000213func read_elr_el1
Achin Gupta4f6ad662013-10-25 09:08:21 +0100214 mrs x0, elr_el1
215 ret
216
217
Andrew Thoelke38bde412014-03-18 13:46:55 +0000218func read_elr_el2
Achin Gupta4f6ad662013-10-25 09:08:21 +0100219 mrs x0, elr_el2
220 ret
221
222
Andrew Thoelke38bde412014-03-18 13:46:55 +0000223func read_elr_el3
Achin Gupta4f6ad662013-10-25 09:08:21 +0100224 mrs x0, elr_el3
225 ret
226
227
Andrew Thoelke38bde412014-03-18 13:46:55 +0000228func write_elr
Achin Gupta4f6ad662013-10-25 09:08:21 +0100229 mrs x1, CurrentEl
230 cmp x1, #(MODE_EL1 << MODE_EL_SHIFT)
231 b.eq write_elr_el1
232 cmp x1, #(MODE_EL2 << MODE_EL_SHIFT)
233 b.eq write_elr_el2
234 cmp x1, #(MODE_EL3 << MODE_EL_SHIFT)
235 b.eq write_elr_el3
236
237
Andrew Thoelke38bde412014-03-18 13:46:55 +0000238func write_elr_el1
Achin Gupta4f6ad662013-10-25 09:08:21 +0100239 msr elr_el1, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100240 ret
241
242
Andrew Thoelke38bde412014-03-18 13:46:55 +0000243func write_elr_el2
Achin Gupta4f6ad662013-10-25 09:08:21 +0100244 msr elr_el2, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100245 ret
246
247
Andrew Thoelke38bde412014-03-18 13:46:55 +0000248func write_elr_el3
Achin Gupta4f6ad662013-10-25 09:08:21 +0100249 msr elr_el3, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100250 ret
251
252
Andrew Thoelke38bde412014-03-18 13:46:55 +0000253func dsb
Achin Gupta4f6ad662013-10-25 09:08:21 +0100254 dsb sy
255 ret
256
257
Andrew Thoelke38bde412014-03-18 13:46:55 +0000258func isb
Achin Gupta4f6ad662013-10-25 09:08:21 +0100259 isb
260 ret
261
262
Andrew Thoelke38bde412014-03-18 13:46:55 +0000263func sev
Achin Gupta4f6ad662013-10-25 09:08:21 +0100264 sev
265 ret
266
267
Andrew Thoelke38bde412014-03-18 13:46:55 +0000268func wfe
Achin Gupta4f6ad662013-10-25 09:08:21 +0100269 wfe
270 ret
271
272
Andrew Thoelke38bde412014-03-18 13:46:55 +0000273func wfi
Achin Gupta4f6ad662013-10-25 09:08:21 +0100274 wfi
275 ret
276
277
Andrew Thoelke38bde412014-03-18 13:46:55 +0000278func eret
Achin Gupta4f6ad662013-10-25 09:08:21 +0100279 eret
280
281
Andrew Thoelke38bde412014-03-18 13:46:55 +0000282func smc
Achin Gupta4f6ad662013-10-25 09:08:21 +0100283 smc #0
Sandrine Bailleux65f546a2013-11-28 09:43:06 +0000284
285/* -----------------------------------------------------------------------
286 * void zeromem16(void *mem, unsigned int length);
287 *
288 * Initialise a memory region to 0.
289 * The memory address must be 16-byte aligned.
290 * -----------------------------------------------------------------------
291 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000292func zeromem16
Sandrine Bailleux65f546a2013-11-28 09:43:06 +0000293 add x2, x0, x1
294/* zero 16 bytes at a time */
295z_loop16:
296 sub x3, x2, x0
297 cmp x3, #16
298 b.lt z_loop1
299 stp xzr, xzr, [x0], #16
300 b z_loop16
301/* zero byte per byte */
302z_loop1:
303 cmp x0, x2
304 b.eq z_end
305 strb wzr, [x0], #1
306 b z_loop1
307z_end: ret
308
309
310/* --------------------------------------------------------------------------
311 * void memcpy16(void *dest, const void *src, unsigned int length)
312 *
313 * Copy length bytes from memory area src to memory area dest.
314 * The memory areas should not overlap.
315 * Destination and source addresses must be 16-byte aligned.
316 * --------------------------------------------------------------------------
317 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000318func memcpy16
Sandrine Bailleux65f546a2013-11-28 09:43:06 +0000319/* copy 16 bytes at a time */
320m_loop16:
321 cmp x2, #16
322 b.lt m_loop1
323 ldp x3, x4, [x1], #16
324 stp x3, x4, [x0], #16
325 sub x2, x2, #16
326 b m_loop16
327/* copy byte per byte */
328m_loop1:
329 cbz x2, m_end
330 ldrb w3, [x1], #1
331 strb w3, [x0], #1
332 subs x2, x2, #1
333 b.ne m_loop1
334m_end: ret