blob: 324be765d7f9b3d2213d9b980f4257315a452364 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch_helpers.h>
Achin Gupta4a826dd2013-11-25 14:00:56 +000032#include <runtime_svc.h>
33
Achin Gupta4f6ad662013-10-25 09:08:21 +010034 .globl enable_irq
35 .globl disable_irq
36
37 .globl enable_fiq
38 .globl disable_fiq
39
40 .globl enable_serror
41 .globl disable_serror
42
Sandrine Bailleux37382742013-11-18 17:26:59 +000043 .globl enable_debug_exceptions
44 .globl disable_debug_exceptions
45
Achin Gupta4f6ad662013-10-25 09:08:21 +010046 .globl read_daif
47 .globl write_daif
48
49 .globl read_spsr
50 .globl read_spsr_el1
51 .globl read_spsr_el2
52 .globl read_spsr_el3
53
54 .globl write_spsr
55 .globl write_spsr_el1
56 .globl write_spsr_el2
57 .globl write_spsr_el3
58
59 .globl read_elr
60 .globl read_elr_el1
61 .globl read_elr_el2
62 .globl read_elr_el3
63
64 .globl write_elr
65 .globl write_elr_el1
66 .globl write_elr_el2
67 .globl write_elr_el3
68
69 .globl get_afflvl_shift
70 .globl mpidr_mask_lower_afflvls
71 .globl dsb
72 .globl isb
73 .globl sev
74 .globl wfe
75 .globl wfi
76 .globl eret
77 .globl smc
78
Sandrine Bailleux65f546a2013-11-28 09:43:06 +000079 .globl zeromem16
80 .globl memcpy16
Achin Gupta4f6ad662013-10-25 09:08:21 +010081
82 .section .text, "ax"
83
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +000084get_afflvl_shift: ; .type get_afflvl_shift, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +010085 cmp x0, #3
86 cinc x0, x0, eq
87 mov x1, #MPIDR_AFFLVL_SHIFT
88 lsl x0, x0, x1
89 ret
90
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +000091mpidr_mask_lower_afflvls: ; .type mpidr_mask_lower_afflvls, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +010092 cmp x1, #3
93 cinc x1, x1, eq
94 mov x2, #MPIDR_AFFLVL_SHIFT
95 lsl x2, x1, x2
96 lsr x0, x0, x2
97 lsl x0, x0, x2
98 ret
99
100 /* -----------------------------------------------------
101 * Asynchronous exception manipulation accessors
102 * -----------------------------------------------------
103 */
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000104enable_irq: ; .type enable_irq, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100105 msr daifclr, #DAIF_IRQ_BIT
106 ret
107
108
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000109enable_fiq: ; .type enable_fiq, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100110 msr daifclr, #DAIF_FIQ_BIT
111 ret
112
113
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000114enable_serror: ; .type enable_serror, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100115 msr daifclr, #DAIF_ABT_BIT
116 ret
117
118
Sandrine Bailleux37382742013-11-18 17:26:59 +0000119enable_debug_exceptions:
120 msr daifclr, #DAIF_DBG_BIT
121 ret
122
123
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000124disable_irq: ; .type disable_irq, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100125 msr daifset, #DAIF_IRQ_BIT
126 ret
127
128
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000129disable_fiq: ; .type disable_fiq, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100130 msr daifset, #DAIF_FIQ_BIT
131 ret
132
133
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000134disable_serror: ; .type disable_serror, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100135 msr daifset, #DAIF_ABT_BIT
136 ret
137
138
Sandrine Bailleux37382742013-11-18 17:26:59 +0000139disable_debug_exceptions:
140 msr daifset, #DAIF_DBG_BIT
141 ret
142
143
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000144read_daif: ; .type read_daif, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100145 mrs x0, daif
146 ret
147
148
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000149write_daif: ; .type write_daif, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100150 msr daif, x0
151 ret
152
153
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000154read_spsr: ; .type read_spsr, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100155 mrs x0, CurrentEl
156 cmp x0, #(MODE_EL1 << MODE_EL_SHIFT)
157 b.eq read_spsr_el1
158 cmp x0, #(MODE_EL2 << MODE_EL_SHIFT)
159 b.eq read_spsr_el2
160 cmp x0, #(MODE_EL3 << MODE_EL_SHIFT)
161 b.eq read_spsr_el3
162
163
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000164read_spsr_el1: ; .type read_spsr_el1, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100165 mrs x0, spsr_el1
166 ret
167
168
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000169read_spsr_el2: ; .type read_spsr_el2, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100170 mrs x0, spsr_el2
171 ret
172
173
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000174read_spsr_el3: ; .type read_spsr_el3, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100175 mrs x0, spsr_el3
176 ret
177
178
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000179write_spsr: ; .type write_spsr, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100180 mrs x1, CurrentEl
181 cmp x1, #(MODE_EL1 << MODE_EL_SHIFT)
182 b.eq write_spsr_el1
183 cmp x1, #(MODE_EL2 << MODE_EL_SHIFT)
184 b.eq write_spsr_el2
185 cmp x1, #(MODE_EL3 << MODE_EL_SHIFT)
186 b.eq write_spsr_el3
187
188
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000189write_spsr_el1: ; .type write_spsr_el1, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100190 msr spsr_el1, x0
191 isb
192 ret
193
194
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000195write_spsr_el2: ; .type write_spsr_el2, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100196 msr spsr_el2, x0
197 isb
198 ret
199
200
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000201write_spsr_el3: ; .type write_spsr_el3, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100202 msr spsr_el3, x0
203 isb
204 ret
205
206
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000207read_elr: ; .type read_elr, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100208 mrs x0, CurrentEl
209 cmp x0, #(MODE_EL1 << MODE_EL_SHIFT)
210 b.eq read_elr_el1
211 cmp x0, #(MODE_EL2 << MODE_EL_SHIFT)
212 b.eq read_elr_el2
213 cmp x0, #(MODE_EL3 << MODE_EL_SHIFT)
214 b.eq read_elr_el3
215
216
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000217read_elr_el1: ; .type read_elr_el1, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100218 mrs x0, elr_el1
219 ret
220
221
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000222read_elr_el2: ; .type read_elr_el2, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100223 mrs x0, elr_el2
224 ret
225
226
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000227read_elr_el3: ; .type read_elr_el3, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100228 mrs x0, elr_el3
229 ret
230
231
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000232write_elr: ; .type write_elr, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100233 mrs x1, CurrentEl
234 cmp x1, #(MODE_EL1 << MODE_EL_SHIFT)
235 b.eq write_elr_el1
236 cmp x1, #(MODE_EL2 << MODE_EL_SHIFT)
237 b.eq write_elr_el2
238 cmp x1, #(MODE_EL3 << MODE_EL_SHIFT)
239 b.eq write_elr_el3
240
241
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000242write_elr_el1: ; .type write_elr_el1, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100243 msr elr_el1, x0
244 isb
245 ret
246
247
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000248write_elr_el2: ; .type write_elr_el2, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100249 msr elr_el2, x0
250 isb
251 ret
252
253
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000254write_elr_el3: ; .type write_elr_el3, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100255 msr elr_el3, x0
256 isb
257 ret
258
259
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000260dsb: ; .type dsb, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100261 dsb sy
262 ret
263
264
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000265isb: ; .type isb, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100266 isb
267 ret
268
269
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000270sev: ; .type sev, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100271 sev
272 ret
273
274
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000275wfe: ; .type wfe, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100276 wfe
277 ret
278
279
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000280wfi: ; .type wfi, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100281 wfi
282 ret
283
284
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000285eret: ; .type eret, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100286 eret
287
288
Jeenu Viswambharan3a4cae02014-01-16 17:30:39 +0000289smc: ; .type smc, %function
Achin Gupta4f6ad662013-10-25 09:08:21 +0100290 smc #0
Sandrine Bailleux65f546a2013-11-28 09:43:06 +0000291
292/* -----------------------------------------------------------------------
293 * void zeromem16(void *mem, unsigned int length);
294 *
295 * Initialise a memory region to 0.
296 * The memory address must be 16-byte aligned.
297 * -----------------------------------------------------------------------
298 */
299zeromem16:
300 add x2, x0, x1
301/* zero 16 bytes at a time */
302z_loop16:
303 sub x3, x2, x0
304 cmp x3, #16
305 b.lt z_loop1
306 stp xzr, xzr, [x0], #16
307 b z_loop16
308/* zero byte per byte */
309z_loop1:
310 cmp x0, x2
311 b.eq z_end
312 strb wzr, [x0], #1
313 b z_loop1
314z_end: ret
315
316
317/* --------------------------------------------------------------------------
318 * void memcpy16(void *dest, const void *src, unsigned int length)
319 *
320 * Copy length bytes from memory area src to memory area dest.
321 * The memory areas should not overlap.
322 * Destination and source addresses must be 16-byte aligned.
323 * --------------------------------------------------------------------------
324 */
325memcpy16:
326/* copy 16 bytes at a time */
327m_loop16:
328 cmp x2, #16
329 b.lt m_loop1
330 ldp x3, x4, [x1], #16
331 stp x3, x4, [x0], #16
332 sub x2, x2, #16
333 b m_loop16
334/* copy byte per byte */
335m_loop1:
336 cbz x2, m_end
337 ldrb w3, [x1], #1
338 strb w3, [x0], #1
339 subs x2, x2, #1
340 b.ne m_loop1
341m_end: ret