blob: bccc936ad5796f0e061122a2a921fcd92f414cfd [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley2bd4ef22014-04-09 13:14:54 +010031#include <arch.h>
Andrew Thoelke38bde412014-03-18 13:46:55 +000032#include <asm_macros.S>
Achin Gupta4a826dd2013-11-25 14:00:56 +000033
Achin Gupta4f6ad662013-10-25 09:08:21 +010034 .globl enable_irq
35 .globl disable_irq
36
37 .globl enable_fiq
38 .globl disable_fiq
39
40 .globl enable_serror
41 .globl disable_serror
42
Sandrine Bailleux37382742013-11-18 17:26:59 +000043 .globl enable_debug_exceptions
44 .globl disable_debug_exceptions
45
Achin Gupta4f6ad662013-10-25 09:08:21 +010046 .globl read_daif
47 .globl write_daif
48
49 .globl read_spsr
50 .globl read_spsr_el1
51 .globl read_spsr_el2
52 .globl read_spsr_el3
53
54 .globl write_spsr
55 .globl write_spsr_el1
56 .globl write_spsr_el2
57 .globl write_spsr_el3
58
59 .globl read_elr
60 .globl read_elr_el1
61 .globl read_elr_el2
62 .globl read_elr_el3
63
64 .globl write_elr
65 .globl write_elr_el1
66 .globl write_elr_el2
67 .globl write_elr_el3
68
69 .globl get_afflvl_shift
70 .globl mpidr_mask_lower_afflvls
71 .globl dsb
72 .globl isb
73 .globl sev
74 .globl wfe
75 .globl wfi
76 .globl eret
77 .globl smc
78
Sandrine Bailleux65f546a2013-11-28 09:43:06 +000079 .globl zeromem16
80 .globl memcpy16
Achin Gupta4f6ad662013-10-25 09:08:21 +010081
Andrew Thoelke438c63a2014-04-28 12:06:18 +010082 .globl disable_mmu_el3
83 .globl disable_mmu_icache_el3
84
Achin Gupta4f6ad662013-10-25 09:08:21 +010085
Andrew Thoelke38bde412014-03-18 13:46:55 +000086func get_afflvl_shift
Achin Gupta4f6ad662013-10-25 09:08:21 +010087 cmp x0, #3
88 cinc x0, x0, eq
89 mov x1, #MPIDR_AFFLVL_SHIFT
90 lsl x0, x0, x1
91 ret
92
Andrew Thoelke38bde412014-03-18 13:46:55 +000093func mpidr_mask_lower_afflvls
Achin Gupta4f6ad662013-10-25 09:08:21 +010094 cmp x1, #3
95 cinc x1, x1, eq
96 mov x2, #MPIDR_AFFLVL_SHIFT
97 lsl x2, x1, x2
98 lsr x0, x0, x2
99 lsl x0, x0, x2
100 ret
101
102 /* -----------------------------------------------------
103 * Asynchronous exception manipulation accessors
104 * -----------------------------------------------------
105 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000106func enable_irq
Achin Gupta4f6ad662013-10-25 09:08:21 +0100107 msr daifclr, #DAIF_IRQ_BIT
108 ret
109
110
Andrew Thoelke38bde412014-03-18 13:46:55 +0000111func enable_fiq
Achin Gupta4f6ad662013-10-25 09:08:21 +0100112 msr daifclr, #DAIF_FIQ_BIT
113 ret
114
115
Andrew Thoelke38bde412014-03-18 13:46:55 +0000116func enable_serror
Achin Gupta4f6ad662013-10-25 09:08:21 +0100117 msr daifclr, #DAIF_ABT_BIT
118 ret
119
120
Andrew Thoelke38bde412014-03-18 13:46:55 +0000121func enable_debug_exceptions
Sandrine Bailleux37382742013-11-18 17:26:59 +0000122 msr daifclr, #DAIF_DBG_BIT
123 ret
124
125
Andrew Thoelke38bde412014-03-18 13:46:55 +0000126func disable_irq
Achin Gupta4f6ad662013-10-25 09:08:21 +0100127 msr daifset, #DAIF_IRQ_BIT
128 ret
129
130
Andrew Thoelke38bde412014-03-18 13:46:55 +0000131func disable_fiq
Achin Gupta4f6ad662013-10-25 09:08:21 +0100132 msr daifset, #DAIF_FIQ_BIT
133 ret
134
135
Andrew Thoelke38bde412014-03-18 13:46:55 +0000136func disable_serror
Achin Gupta4f6ad662013-10-25 09:08:21 +0100137 msr daifset, #DAIF_ABT_BIT
138 ret
139
140
Andrew Thoelke38bde412014-03-18 13:46:55 +0000141func disable_debug_exceptions
Sandrine Bailleux37382742013-11-18 17:26:59 +0000142 msr daifset, #DAIF_DBG_BIT
143 ret
144
145
Andrew Thoelke38bde412014-03-18 13:46:55 +0000146func read_daif
Achin Gupta4f6ad662013-10-25 09:08:21 +0100147 mrs x0, daif
148 ret
149
150
Andrew Thoelke38bde412014-03-18 13:46:55 +0000151func write_daif
Achin Gupta4f6ad662013-10-25 09:08:21 +0100152 msr daif, x0
153 ret
154
155
Andrew Thoelke38bde412014-03-18 13:46:55 +0000156func read_spsr
Achin Gupta4f6ad662013-10-25 09:08:21 +0100157 mrs x0, CurrentEl
158 cmp x0, #(MODE_EL1 << MODE_EL_SHIFT)
159 b.eq read_spsr_el1
160 cmp x0, #(MODE_EL2 << MODE_EL_SHIFT)
161 b.eq read_spsr_el2
162 cmp x0, #(MODE_EL3 << MODE_EL_SHIFT)
163 b.eq read_spsr_el3
164
165
Andrew Thoelke38bde412014-03-18 13:46:55 +0000166func read_spsr_el1
Achin Gupta4f6ad662013-10-25 09:08:21 +0100167 mrs x0, spsr_el1
168 ret
169
170
Andrew Thoelke38bde412014-03-18 13:46:55 +0000171func read_spsr_el2
Achin Gupta4f6ad662013-10-25 09:08:21 +0100172 mrs x0, spsr_el2
173 ret
174
175
Andrew Thoelke38bde412014-03-18 13:46:55 +0000176func read_spsr_el3
Achin Gupta4f6ad662013-10-25 09:08:21 +0100177 mrs x0, spsr_el3
178 ret
179
180
Andrew Thoelke38bde412014-03-18 13:46:55 +0000181func write_spsr
Achin Gupta4f6ad662013-10-25 09:08:21 +0100182 mrs x1, CurrentEl
183 cmp x1, #(MODE_EL1 << MODE_EL_SHIFT)
184 b.eq write_spsr_el1
185 cmp x1, #(MODE_EL2 << MODE_EL_SHIFT)
186 b.eq write_spsr_el2
187 cmp x1, #(MODE_EL3 << MODE_EL_SHIFT)
188 b.eq write_spsr_el3
189
190
Andrew Thoelke38bde412014-03-18 13:46:55 +0000191func write_spsr_el1
Achin Gupta4f6ad662013-10-25 09:08:21 +0100192 msr spsr_el1, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100193 ret
194
195
Andrew Thoelke38bde412014-03-18 13:46:55 +0000196func write_spsr_el2
Achin Gupta4f6ad662013-10-25 09:08:21 +0100197 msr spsr_el2, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100198 ret
199
200
Andrew Thoelke38bde412014-03-18 13:46:55 +0000201func write_spsr_el3
Achin Gupta4f6ad662013-10-25 09:08:21 +0100202 msr spsr_el3, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100203 ret
204
205
Andrew Thoelke38bde412014-03-18 13:46:55 +0000206func read_elr
Achin Gupta4f6ad662013-10-25 09:08:21 +0100207 mrs x0, CurrentEl
208 cmp x0, #(MODE_EL1 << MODE_EL_SHIFT)
209 b.eq read_elr_el1
210 cmp x0, #(MODE_EL2 << MODE_EL_SHIFT)
211 b.eq read_elr_el2
212 cmp x0, #(MODE_EL3 << MODE_EL_SHIFT)
213 b.eq read_elr_el3
214
215
Andrew Thoelke38bde412014-03-18 13:46:55 +0000216func read_elr_el1
Achin Gupta4f6ad662013-10-25 09:08:21 +0100217 mrs x0, elr_el1
218 ret
219
220
Andrew Thoelke38bde412014-03-18 13:46:55 +0000221func read_elr_el2
Achin Gupta4f6ad662013-10-25 09:08:21 +0100222 mrs x0, elr_el2
223 ret
224
225
Andrew Thoelke38bde412014-03-18 13:46:55 +0000226func read_elr_el3
Achin Gupta4f6ad662013-10-25 09:08:21 +0100227 mrs x0, elr_el3
228 ret
229
230
Andrew Thoelke38bde412014-03-18 13:46:55 +0000231func write_elr
Achin Gupta4f6ad662013-10-25 09:08:21 +0100232 mrs x1, CurrentEl
233 cmp x1, #(MODE_EL1 << MODE_EL_SHIFT)
234 b.eq write_elr_el1
235 cmp x1, #(MODE_EL2 << MODE_EL_SHIFT)
236 b.eq write_elr_el2
237 cmp x1, #(MODE_EL3 << MODE_EL_SHIFT)
238 b.eq write_elr_el3
239
240
Andrew Thoelke38bde412014-03-18 13:46:55 +0000241func write_elr_el1
Achin Gupta4f6ad662013-10-25 09:08:21 +0100242 msr elr_el1, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100243 ret
244
245
Andrew Thoelke38bde412014-03-18 13:46:55 +0000246func write_elr_el2
Achin Gupta4f6ad662013-10-25 09:08:21 +0100247 msr elr_el2, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100248 ret
249
250
Andrew Thoelke38bde412014-03-18 13:46:55 +0000251func write_elr_el3
Achin Gupta4f6ad662013-10-25 09:08:21 +0100252 msr elr_el3, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100253 ret
254
255
Andrew Thoelke38bde412014-03-18 13:46:55 +0000256func dsb
Achin Gupta4f6ad662013-10-25 09:08:21 +0100257 dsb sy
258 ret
259
260
Andrew Thoelke38bde412014-03-18 13:46:55 +0000261func isb
Achin Gupta4f6ad662013-10-25 09:08:21 +0100262 isb
263 ret
264
265
Andrew Thoelke38bde412014-03-18 13:46:55 +0000266func sev
Achin Gupta4f6ad662013-10-25 09:08:21 +0100267 sev
268 ret
269
270
Andrew Thoelke38bde412014-03-18 13:46:55 +0000271func wfe
Achin Gupta4f6ad662013-10-25 09:08:21 +0100272 wfe
273 ret
274
275
Andrew Thoelke38bde412014-03-18 13:46:55 +0000276func wfi
Achin Gupta4f6ad662013-10-25 09:08:21 +0100277 wfi
278 ret
279
280
Andrew Thoelke38bde412014-03-18 13:46:55 +0000281func eret
Achin Gupta4f6ad662013-10-25 09:08:21 +0100282 eret
283
284
Andrew Thoelke38bde412014-03-18 13:46:55 +0000285func smc
Achin Gupta4f6ad662013-10-25 09:08:21 +0100286 smc #0
Sandrine Bailleux65f546a2013-11-28 09:43:06 +0000287
288/* -----------------------------------------------------------------------
289 * void zeromem16(void *mem, unsigned int length);
290 *
291 * Initialise a memory region to 0.
292 * The memory address must be 16-byte aligned.
293 * -----------------------------------------------------------------------
294 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000295func zeromem16
Sandrine Bailleux65f546a2013-11-28 09:43:06 +0000296 add x2, x0, x1
297/* zero 16 bytes at a time */
298z_loop16:
299 sub x3, x2, x0
300 cmp x3, #16
301 b.lt z_loop1
302 stp xzr, xzr, [x0], #16
303 b z_loop16
304/* zero byte per byte */
305z_loop1:
306 cmp x0, x2
307 b.eq z_end
308 strb wzr, [x0], #1
309 b z_loop1
310z_end: ret
311
312
313/* --------------------------------------------------------------------------
314 * void memcpy16(void *dest, const void *src, unsigned int length)
315 *
316 * Copy length bytes from memory area src to memory area dest.
317 * The memory areas should not overlap.
318 * Destination and source addresses must be 16-byte aligned.
319 * --------------------------------------------------------------------------
320 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000321func memcpy16
Sandrine Bailleux65f546a2013-11-28 09:43:06 +0000322/* copy 16 bytes at a time */
323m_loop16:
324 cmp x2, #16
325 b.lt m_loop1
326 ldp x3, x4, [x1], #16
327 stp x3, x4, [x0], #16
328 sub x2, x2, #16
329 b m_loop16
330/* copy byte per byte */
331m_loop1:
332 cbz x2, m_end
333 ldrb w3, [x1], #1
334 strb w3, [x0], #1
335 subs x2, x2, #1
336 b.ne m_loop1
337m_end: ret
Andrew Thoelke438c63a2014-04-28 12:06:18 +0100338
339/* ---------------------------------------------------------------------------
340 * Disable the MMU at EL3
341 * This is implemented in assembler to ensure that the data cache is cleaned
342 * and invalidated after the MMU is disabled without any intervening cacheable
343 * data accesses
344 * ---------------------------------------------------------------------------
345 */
346
347func disable_mmu_el3
348 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
349do_disable_mmu:
350 mrs x0, sctlr_el3
351 bic x0, x0, x1
352 msr sctlr_el3, x0
353 isb // ensure MMU is off
354 mov x0, #DCCISW // DCache clean and invalidate
355 b dcsw_op_all
356
357
358func disable_mmu_icache_el3
359 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
360 b do_disable_mmu
361