blob: 3001a45ec8b12c1a14579004b9e0c729d64efde6 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleyab2d31e2013-12-02 19:25:12 +00002 * Copyright (c) 2013, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch_helpers.h>
Achin Gupta4a826dd2013-11-25 14:00:56 +000032#include <runtime_svc.h>
33
34 .globl save_regs
35 .globl restore_regs
Achin Gupta4f6ad662013-10-25 09:08:21 +010036
37 .globl enable_irq
38 .globl disable_irq
39
40 .globl enable_fiq
41 .globl disable_fiq
42
43 .globl enable_serror
44 .globl disable_serror
45
Sandrine Bailleux37382742013-11-18 17:26:59 +000046 .globl enable_debug_exceptions
47 .globl disable_debug_exceptions
48
Achin Gupta4f6ad662013-10-25 09:08:21 +010049 .globl read_daif
50 .globl write_daif
51
52 .globl read_spsr
53 .globl read_spsr_el1
54 .globl read_spsr_el2
55 .globl read_spsr_el3
56
57 .globl write_spsr
58 .globl write_spsr_el1
59 .globl write_spsr_el2
60 .globl write_spsr_el3
61
62 .globl read_elr
63 .globl read_elr_el1
64 .globl read_elr_el2
65 .globl read_elr_el3
66
67 .globl write_elr
68 .globl write_elr_el1
69 .globl write_elr_el2
70 .globl write_elr_el3
71
72 .globl get_afflvl_shift
73 .globl mpidr_mask_lower_afflvls
74 .globl dsb
75 .globl isb
76 .globl sev
77 .globl wfe
78 .globl wfi
79 .globl eret
80 .globl smc
81
Sandrine Bailleux65f546a2013-11-28 09:43:06 +000082 .globl zeromem16
83 .globl memcpy16
Achin Gupta4f6ad662013-10-25 09:08:21 +010084
85 .section .text, "ax"
86
Achin Gupta4a826dd2013-11-25 14:00:56 +000087save_regs:; .type save_regs, %function
88 sub sp, sp, #GPREGS_FP_OFF
89 stp x0, x1, [sp, #GPREGS_X0_OFF]
90 stp x2, x3, [sp, #GPREGS_X2_OFF]
91 stp x4, x5, [sp, #GPREGS_X4_OFF]
92 stp x6, x7, [sp, #GPREGS_X6_OFF]
93 stp x8, x9, [sp, #GPREGS_X8_OFF]
94 stp x10, x11, [sp, #GPREGS_X10_OFF]
95 stp x12, x13, [sp, #GPREGS_X12_OFF]
96 stp x14, x15, [sp, #GPREGS_X14_OFF]
97 stp x16, x17, [sp, #GPREGS_X16_OFF]
98 stp x18, x19, [sp, #GPREGS_X18_OFF]
99 stp x20, x21, [sp, #GPREGS_X20_OFF]
100 stp x22, x23, [sp, #GPREGS_X22_OFF]
101 stp x24, x25, [sp, #GPREGS_X24_OFF]
102 stp x26, x27, [sp, #GPREGS_X26_OFF]
103 mrs x0, sp_el0
104 stp x28, x0, [sp, #GPREGS_X28_OFF]
105 mrs x0, spsr_el3
Sandrine Bailleuxbdb774d2013-12-17 13:47:14 +0000106 str w0, [sp, #GPREGS_SPSR_OFF]
Achin Gupta4a826dd2013-11-25 14:00:56 +0000107 ret
108
109
110restore_regs:; .type restore_regs, %function
Sandrine Bailleuxbdb774d2013-12-17 13:47:14 +0000111 ldr w9, [sp, #GPREGS_SPSR_OFF]
Achin Gupta4a826dd2013-11-25 14:00:56 +0000112 msr spsr_el3, x9
113 ldp x28, x9, [sp, #GPREGS_X28_OFF]
114 msr sp_el0, x9
115 ldp x26, x27, [sp, #GPREGS_X26_OFF]
116 ldp x24, x25, [sp, #GPREGS_X24_OFF]
117 ldp x22, x23, [sp, #GPREGS_X22_OFF]
118 ldp x20, x21, [sp, #GPREGS_X20_OFF]
119 ldp x18, x19, [sp, #GPREGS_X18_OFF]
120 ldp x16, x17, [sp, #GPREGS_X16_OFF]
121 ldp x14, x15, [sp, #GPREGS_X14_OFF]
122 ldp x12, x13, [sp, #GPREGS_X12_OFF]
123 ldp x10, x11, [sp, #GPREGS_X10_OFF]
124 ldp x8, x9, [sp, #GPREGS_X8_OFF]
125 ldp x6, x7, [sp, #GPREGS_X6_OFF]
126 ldp x4, x5, [sp, #GPREGS_X4_OFF]
127 ldp x2, x3, [sp, #GPREGS_X2_OFF]
128 ldp x0, x1, [sp, #GPREGS_X0_OFF]
129 add sp, sp, #GPREGS_FP_OFF
130 ret
131
Achin Gupta4f6ad662013-10-25 09:08:21 +0100132get_afflvl_shift:; .type get_afflvl_shift, %function
133 cmp x0, #3
134 cinc x0, x0, eq
135 mov x1, #MPIDR_AFFLVL_SHIFT
136 lsl x0, x0, x1
137 ret
138
139mpidr_mask_lower_afflvls:; .type mpidr_mask_lower_afflvls, %function
140 cmp x1, #3
141 cinc x1, x1, eq
142 mov x2, #MPIDR_AFFLVL_SHIFT
143 lsl x2, x1, x2
144 lsr x0, x0, x2
145 lsl x0, x0, x2
146 ret
147
148 /* -----------------------------------------------------
149 * Asynchronous exception manipulation accessors
150 * -----------------------------------------------------
151 */
152enable_irq:; .type enable_irq, %function
153 msr daifclr, #DAIF_IRQ_BIT
154 ret
155
156
157enable_fiq:; .type enable_fiq, %function
158 msr daifclr, #DAIF_FIQ_BIT
159 ret
160
161
162enable_serror:; .type enable_serror, %function
163 msr daifclr, #DAIF_ABT_BIT
164 ret
165
166
Sandrine Bailleux37382742013-11-18 17:26:59 +0000167enable_debug_exceptions:
168 msr daifclr, #DAIF_DBG_BIT
169 ret
170
171
Achin Gupta4f6ad662013-10-25 09:08:21 +0100172disable_irq:; .type disable_irq, %function
173 msr daifset, #DAIF_IRQ_BIT
174 ret
175
176
177disable_fiq:; .type disable_fiq, %function
178 msr daifset, #DAIF_FIQ_BIT
179 ret
180
181
182disable_serror:; .type disable_serror, %function
183 msr daifset, #DAIF_ABT_BIT
184 ret
185
186
Sandrine Bailleux37382742013-11-18 17:26:59 +0000187disable_debug_exceptions:
188 msr daifset, #DAIF_DBG_BIT
189 ret
190
191
Achin Gupta4f6ad662013-10-25 09:08:21 +0100192read_daif:; .type read_daif, %function
193 mrs x0, daif
194 ret
195
196
197write_daif:; .type write_daif, %function
198 msr daif, x0
199 ret
200
201
202read_spsr:; .type read_spsr, %function
203 mrs x0, CurrentEl
204 cmp x0, #(MODE_EL1 << MODE_EL_SHIFT)
205 b.eq read_spsr_el1
206 cmp x0, #(MODE_EL2 << MODE_EL_SHIFT)
207 b.eq read_spsr_el2
208 cmp x0, #(MODE_EL3 << MODE_EL_SHIFT)
209 b.eq read_spsr_el3
210
211
212read_spsr_el1:; .type read_spsr_el1, %function
213 mrs x0, spsr_el1
214 ret
215
216
217read_spsr_el2:; .type read_spsr_el2, %function
218 mrs x0, spsr_el2
219 ret
220
221
222read_spsr_el3:; .type read_spsr_el3, %function
223 mrs x0, spsr_el3
224 ret
225
226
227write_spsr:; .type write_spsr, %function
228 mrs x1, CurrentEl
229 cmp x1, #(MODE_EL1 << MODE_EL_SHIFT)
230 b.eq write_spsr_el1
231 cmp x1, #(MODE_EL2 << MODE_EL_SHIFT)
232 b.eq write_spsr_el2
233 cmp x1, #(MODE_EL3 << MODE_EL_SHIFT)
234 b.eq write_spsr_el3
235
236
237write_spsr_el1:; .type write_spsr_el1, %function
238 msr spsr_el1, x0
239 isb
240 ret
241
242
243write_spsr_el2:; .type write_spsr_el2, %function
244 msr spsr_el2, x0
245 isb
246 ret
247
248
249write_spsr_el3:; .type write_spsr_el3, %function
250 msr spsr_el3, x0
251 isb
252 ret
253
254
255read_elr:; .type read_elr, %function
256 mrs x0, CurrentEl
257 cmp x0, #(MODE_EL1 << MODE_EL_SHIFT)
258 b.eq read_elr_el1
259 cmp x0, #(MODE_EL2 << MODE_EL_SHIFT)
260 b.eq read_elr_el2
261 cmp x0, #(MODE_EL3 << MODE_EL_SHIFT)
262 b.eq read_elr_el3
263
264
265read_elr_el1:; .type read_elr_el1, %function
266 mrs x0, elr_el1
267 ret
268
269
270read_elr_el2:; .type read_elr_el2, %function
271 mrs x0, elr_el2
272 ret
273
274
275read_elr_el3:; .type read_elr_el3, %function
276 mrs x0, elr_el3
277 ret
278
279
280write_elr:; .type write_elr, %function
281 mrs x1, CurrentEl
282 cmp x1, #(MODE_EL1 << MODE_EL_SHIFT)
283 b.eq write_elr_el1
284 cmp x1, #(MODE_EL2 << MODE_EL_SHIFT)
285 b.eq write_elr_el2
286 cmp x1, #(MODE_EL3 << MODE_EL_SHIFT)
287 b.eq write_elr_el3
288
289
290write_elr_el1:; .type write_elr_el1, %function
291 msr elr_el1, x0
292 isb
293 ret
294
295
296write_elr_el2:; .type write_elr_el2, %function
297 msr elr_el2, x0
298 isb
299 ret
300
301
302write_elr_el3:; .type write_elr_el3, %function
303 msr elr_el3, x0
304 isb
305 ret
306
307
308dsb:; .type dsb, %function
309 dsb sy
310 ret
311
312
313isb:; .type isb, %function
314 isb
315 ret
316
317
318sev:; .type sev, %function
319 sev
320 ret
321
322
323wfe:; .type wfe, %function
324 wfe
325 ret
326
327
328wfi:; .type wfi, %function
329 wfi
330 ret
331
332
333eret:; .type eret, %function
334 eret
335
336
337smc:; .type smc, %function
338 smc #0
Sandrine Bailleux65f546a2013-11-28 09:43:06 +0000339
340/* -----------------------------------------------------------------------
341 * void zeromem16(void *mem, unsigned int length);
342 *
343 * Initialise a memory region to 0.
344 * The memory address must be 16-byte aligned.
345 * -----------------------------------------------------------------------
346 */
347zeromem16:
348 add x2, x0, x1
349/* zero 16 bytes at a time */
350z_loop16:
351 sub x3, x2, x0
352 cmp x3, #16
353 b.lt z_loop1
354 stp xzr, xzr, [x0], #16
355 b z_loop16
356/* zero byte per byte */
357z_loop1:
358 cmp x0, x2
359 b.eq z_end
360 strb wzr, [x0], #1
361 b z_loop1
362z_end: ret
363
364
365/* --------------------------------------------------------------------------
366 * void memcpy16(void *dest, const void *src, unsigned int length)
367 *
368 * Copy length bytes from memory area src to memory area dest.
369 * The memory areas should not overlap.
370 * Destination and source addresses must be 16-byte aligned.
371 * --------------------------------------------------------------------------
372 */
373memcpy16:
374/* copy 16 bytes at a time */
375m_loop16:
376 cmp x2, #16
377 b.lt m_loop1
378 ldp x3, x4, [x1], #16
379 stp x3, x4, [x0], #16
380 sub x2, x2, #16
381 b m_loop16
382/* copy byte per byte */
383m_loop1:
384 cbz x2, m_end
385 ldrb w3, [x1], #1
386 strb w3, [x0], #1
387 subs x2, x2, #1
388 b.ne m_loop1
389m_end: ret