blob: e7ee015fe9dbc0851805cb2f65fc442cdbaa1de2 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley2bd4ef22014-04-09 13:14:54 +010031#include <arch.h>
Andrew Thoelke38bde412014-03-18 13:46:55 +000032#include <asm_macros.S>
Achin Gupta4a826dd2013-11-25 14:00:56 +000033
Achin Gupta4f6ad662013-10-25 09:08:21 +010034 .globl enable_irq
35 .globl disable_irq
36
37 .globl enable_fiq
38 .globl disable_fiq
39
40 .globl enable_serror
41 .globl disable_serror
42
Sandrine Bailleux37382742013-11-18 17:26:59 +000043 .globl enable_debug_exceptions
44 .globl disable_debug_exceptions
45
Achin Gupta4f6ad662013-10-25 09:08:21 +010046 .globl read_daif
47 .globl write_daif
48
Achin Gupta4f6ad662013-10-25 09:08:21 +010049 .globl read_spsr_el1
50 .globl read_spsr_el2
51 .globl read_spsr_el3
52
Achin Gupta4f6ad662013-10-25 09:08:21 +010053 .globl write_spsr_el1
54 .globl write_spsr_el2
55 .globl write_spsr_el3
56
Achin Gupta4f6ad662013-10-25 09:08:21 +010057 .globl read_elr_el1
58 .globl read_elr_el2
59 .globl read_elr_el3
60
Achin Gupta4f6ad662013-10-25 09:08:21 +010061 .globl write_elr_el1
62 .globl write_elr_el2
63 .globl write_elr_el3
64
65 .globl get_afflvl_shift
66 .globl mpidr_mask_lower_afflvls
67 .globl dsb
68 .globl isb
69 .globl sev
70 .globl wfe
71 .globl wfi
72 .globl eret
73 .globl smc
74
Sandrine Bailleux65f546a2013-11-28 09:43:06 +000075 .globl zeromem16
76 .globl memcpy16
Achin Gupta4f6ad662013-10-25 09:08:21 +010077
Andrew Thoelke438c63a2014-04-28 12:06:18 +010078 .globl disable_mmu_el3
79 .globl disable_mmu_icache_el3
80
Achin Gupta4f6ad662013-10-25 09:08:21 +010081
Andrew Thoelke38bde412014-03-18 13:46:55 +000082func get_afflvl_shift
Achin Gupta4f6ad662013-10-25 09:08:21 +010083 cmp x0, #3
84 cinc x0, x0, eq
85 mov x1, #MPIDR_AFFLVL_SHIFT
86 lsl x0, x0, x1
87 ret
88
Andrew Thoelke38bde412014-03-18 13:46:55 +000089func mpidr_mask_lower_afflvls
Achin Gupta4f6ad662013-10-25 09:08:21 +010090 cmp x1, #3
91 cinc x1, x1, eq
92 mov x2, #MPIDR_AFFLVL_SHIFT
93 lsl x2, x1, x2
94 lsr x0, x0, x2
95 lsl x0, x0, x2
96 ret
97
98 /* -----------------------------------------------------
99 * Asynchronous exception manipulation accessors
100 * -----------------------------------------------------
101 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000102func enable_irq
Achin Gupta4f6ad662013-10-25 09:08:21 +0100103 msr daifclr, #DAIF_IRQ_BIT
104 ret
105
106
Andrew Thoelke38bde412014-03-18 13:46:55 +0000107func enable_fiq
Achin Gupta4f6ad662013-10-25 09:08:21 +0100108 msr daifclr, #DAIF_FIQ_BIT
109 ret
110
111
Andrew Thoelke38bde412014-03-18 13:46:55 +0000112func enable_serror
Achin Gupta4f6ad662013-10-25 09:08:21 +0100113 msr daifclr, #DAIF_ABT_BIT
114 ret
115
116
Andrew Thoelke38bde412014-03-18 13:46:55 +0000117func enable_debug_exceptions
Sandrine Bailleux37382742013-11-18 17:26:59 +0000118 msr daifclr, #DAIF_DBG_BIT
119 ret
120
121
Andrew Thoelke38bde412014-03-18 13:46:55 +0000122func disable_irq
Achin Gupta4f6ad662013-10-25 09:08:21 +0100123 msr daifset, #DAIF_IRQ_BIT
124 ret
125
126
Andrew Thoelke38bde412014-03-18 13:46:55 +0000127func disable_fiq
Achin Gupta4f6ad662013-10-25 09:08:21 +0100128 msr daifset, #DAIF_FIQ_BIT
129 ret
130
131
Andrew Thoelke38bde412014-03-18 13:46:55 +0000132func disable_serror
Achin Gupta4f6ad662013-10-25 09:08:21 +0100133 msr daifset, #DAIF_ABT_BIT
134 ret
135
136
Andrew Thoelke38bde412014-03-18 13:46:55 +0000137func disable_debug_exceptions
Sandrine Bailleux37382742013-11-18 17:26:59 +0000138 msr daifset, #DAIF_DBG_BIT
139 ret
140
141
Andrew Thoelke38bde412014-03-18 13:46:55 +0000142func read_daif
Achin Gupta4f6ad662013-10-25 09:08:21 +0100143 mrs x0, daif
144 ret
145
146
Andrew Thoelke38bde412014-03-18 13:46:55 +0000147func write_daif
Achin Gupta4f6ad662013-10-25 09:08:21 +0100148 msr daif, x0
149 ret
150
151
Andrew Thoelke38bde412014-03-18 13:46:55 +0000152func read_spsr_el1
Achin Gupta4f6ad662013-10-25 09:08:21 +0100153 mrs x0, spsr_el1
154 ret
155
156
Andrew Thoelke38bde412014-03-18 13:46:55 +0000157func read_spsr_el2
Achin Gupta4f6ad662013-10-25 09:08:21 +0100158 mrs x0, spsr_el2
159 ret
160
161
Andrew Thoelke38bde412014-03-18 13:46:55 +0000162func read_spsr_el3
Achin Gupta4f6ad662013-10-25 09:08:21 +0100163 mrs x0, spsr_el3
164 ret
165
166
Andrew Thoelke38bde412014-03-18 13:46:55 +0000167func write_spsr_el1
Achin Gupta4f6ad662013-10-25 09:08:21 +0100168 msr spsr_el1, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100169 ret
170
171
Andrew Thoelke38bde412014-03-18 13:46:55 +0000172func write_spsr_el2
Achin Gupta4f6ad662013-10-25 09:08:21 +0100173 msr spsr_el2, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100174 ret
175
176
Andrew Thoelke38bde412014-03-18 13:46:55 +0000177func write_spsr_el3
Achin Gupta4f6ad662013-10-25 09:08:21 +0100178 msr spsr_el3, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100179 ret
180
181
Andrew Thoelke38bde412014-03-18 13:46:55 +0000182func read_elr_el1
Achin Gupta4f6ad662013-10-25 09:08:21 +0100183 mrs x0, elr_el1
184 ret
185
186
Andrew Thoelke38bde412014-03-18 13:46:55 +0000187func read_elr_el2
Achin Gupta4f6ad662013-10-25 09:08:21 +0100188 mrs x0, elr_el2
189 ret
190
191
Andrew Thoelke38bde412014-03-18 13:46:55 +0000192func read_elr_el3
Achin Gupta4f6ad662013-10-25 09:08:21 +0100193 mrs x0, elr_el3
194 ret
195
196
Andrew Thoelke38bde412014-03-18 13:46:55 +0000197func write_elr_el1
Achin Gupta4f6ad662013-10-25 09:08:21 +0100198 msr elr_el1, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100199 ret
200
201
Andrew Thoelke38bde412014-03-18 13:46:55 +0000202func write_elr_el2
Achin Gupta4f6ad662013-10-25 09:08:21 +0100203 msr elr_el2, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100204 ret
205
206
Andrew Thoelke38bde412014-03-18 13:46:55 +0000207func write_elr_el3
Achin Gupta4f6ad662013-10-25 09:08:21 +0100208 msr elr_el3, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +0100209 ret
210
211
Andrew Thoelke38bde412014-03-18 13:46:55 +0000212func dsb
Achin Gupta4f6ad662013-10-25 09:08:21 +0100213 dsb sy
214 ret
215
216
Andrew Thoelke38bde412014-03-18 13:46:55 +0000217func isb
Achin Gupta4f6ad662013-10-25 09:08:21 +0100218 isb
219 ret
220
221
Andrew Thoelke38bde412014-03-18 13:46:55 +0000222func sev
Achin Gupta4f6ad662013-10-25 09:08:21 +0100223 sev
224 ret
225
226
Andrew Thoelke38bde412014-03-18 13:46:55 +0000227func wfe
Achin Gupta4f6ad662013-10-25 09:08:21 +0100228 wfe
229 ret
230
231
Andrew Thoelke38bde412014-03-18 13:46:55 +0000232func wfi
Achin Gupta4f6ad662013-10-25 09:08:21 +0100233 wfi
234 ret
235
236
Andrew Thoelke38bde412014-03-18 13:46:55 +0000237func eret
Achin Gupta4f6ad662013-10-25 09:08:21 +0100238 eret
239
240
Andrew Thoelke38bde412014-03-18 13:46:55 +0000241func smc
Achin Gupta4f6ad662013-10-25 09:08:21 +0100242 smc #0
Sandrine Bailleux65f546a2013-11-28 09:43:06 +0000243
244/* -----------------------------------------------------------------------
245 * void zeromem16(void *mem, unsigned int length);
246 *
247 * Initialise a memory region to 0.
248 * The memory address must be 16-byte aligned.
249 * -----------------------------------------------------------------------
250 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000251func zeromem16
Sandrine Bailleux65f546a2013-11-28 09:43:06 +0000252 add x2, x0, x1
253/* zero 16 bytes at a time */
254z_loop16:
255 sub x3, x2, x0
256 cmp x3, #16
257 b.lt z_loop1
258 stp xzr, xzr, [x0], #16
259 b z_loop16
260/* zero byte per byte */
261z_loop1:
262 cmp x0, x2
263 b.eq z_end
264 strb wzr, [x0], #1
265 b z_loop1
266z_end: ret
267
268
269/* --------------------------------------------------------------------------
270 * void memcpy16(void *dest, const void *src, unsigned int length)
271 *
272 * Copy length bytes from memory area src to memory area dest.
273 * The memory areas should not overlap.
274 * Destination and source addresses must be 16-byte aligned.
275 * --------------------------------------------------------------------------
276 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000277func memcpy16
Sandrine Bailleux65f546a2013-11-28 09:43:06 +0000278/* copy 16 bytes at a time */
279m_loop16:
280 cmp x2, #16
281 b.lt m_loop1
282 ldp x3, x4, [x1], #16
283 stp x3, x4, [x0], #16
284 sub x2, x2, #16
285 b m_loop16
286/* copy byte per byte */
287m_loop1:
288 cbz x2, m_end
289 ldrb w3, [x1], #1
290 strb w3, [x0], #1
291 subs x2, x2, #1
292 b.ne m_loop1
293m_end: ret
Andrew Thoelke438c63a2014-04-28 12:06:18 +0100294
295/* ---------------------------------------------------------------------------
296 * Disable the MMU at EL3
297 * This is implemented in assembler to ensure that the data cache is cleaned
298 * and invalidated after the MMU is disabled without any intervening cacheable
299 * data accesses
300 * ---------------------------------------------------------------------------
301 */
302
303func disable_mmu_el3
304 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
305do_disable_mmu:
306 mrs x0, sctlr_el3
307 bic x0, x0, x1
308 msr sctlr_el3, x0
309 isb // ensure MMU is off
310 mov x0, #DCCISW // DCache clean and invalidate
311 b dcsw_op_all
312
313
314func disable_mmu_icache_el3
315 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
316 b do_disable_mmu
317