blob: 5f80b597deadc44e4efe3f6c87a246c7e2007231 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley2bd4ef22014-04-09 13:14:54 +010031#include <arch.h>
Andrew Thoelke38bde412014-03-18 13:46:55 +000032#include <asm_macros.S>
Soby Mathew041f62a2014-07-14 16:58:03 +010033#include <assert_macros.S>
Achin Gupta4a826dd2013-11-25 14:00:56 +000034
Achin Gupta4f6ad662013-10-25 09:08:21 +010035 .globl get_afflvl_shift
36 .globl mpidr_mask_lower_afflvls
Achin Gupta4f6ad662013-10-25 09:08:21 +010037 .globl eret
38 .globl smc
39
Sandrine Bailleux65f546a2013-11-28 09:43:06 +000040 .globl zeromem16
41 .globl memcpy16
Achin Gupta4f6ad662013-10-25 09:08:21 +010042
Andrew Thoelke438c63a2014-04-28 12:06:18 +010043 .globl disable_mmu_el3
44 .globl disable_mmu_icache_el3
45
Andrew Thoelke3f78dc32014-06-02 15:44:43 +010046#if SUPPORT_VFP
47 .globl enable_vfp
48#endif
49
Andrew Thoelke38bde412014-03-18 13:46:55 +000050func get_afflvl_shift
Achin Gupta4f6ad662013-10-25 09:08:21 +010051 cmp x0, #3
52 cinc x0, x0, eq
53 mov x1, #MPIDR_AFFLVL_SHIFT
54 lsl x0, x0, x1
55 ret
Kévin Petita877c252015-03-24 14:03:57 +000056endfunc get_afflvl_shift
Achin Gupta4f6ad662013-10-25 09:08:21 +010057
Andrew Thoelke38bde412014-03-18 13:46:55 +000058func mpidr_mask_lower_afflvls
Achin Gupta4f6ad662013-10-25 09:08:21 +010059 cmp x1, #3
60 cinc x1, x1, eq
61 mov x2, #MPIDR_AFFLVL_SHIFT
62 lsl x2, x1, x2
63 lsr x0, x0, x2
64 lsl x0, x0, x2
65 ret
Kévin Petita877c252015-03-24 14:03:57 +000066endfunc mpidr_mask_lower_afflvls
Achin Gupta4f6ad662013-10-25 09:08:21 +010067
Achin Gupta4f6ad662013-10-25 09:08:21 +010068
Andrew Thoelke38bde412014-03-18 13:46:55 +000069func eret
Achin Gupta4f6ad662013-10-25 09:08:21 +010070 eret
Kévin Petita877c252015-03-24 14:03:57 +000071endfunc eret
Achin Gupta4f6ad662013-10-25 09:08:21 +010072
73
Andrew Thoelke38bde412014-03-18 13:46:55 +000074func smc
Achin Gupta4f6ad662013-10-25 09:08:21 +010075 smc #0
Kévin Petita877c252015-03-24 14:03:57 +000076endfunc smc
Sandrine Bailleux65f546a2013-11-28 09:43:06 +000077
78/* -----------------------------------------------------------------------
79 * void zeromem16(void *mem, unsigned int length);
80 *
81 * Initialise a memory region to 0.
82 * The memory address must be 16-byte aligned.
83 * -----------------------------------------------------------------------
84 */
Andrew Thoelke38bde412014-03-18 13:46:55 +000085func zeromem16
Soby Mathew041f62a2014-07-14 16:58:03 +010086#if ASM_ASSERTION
87 tst x0, #0xf
88 ASM_ASSERT(eq)
89#endif
Sandrine Bailleux65f546a2013-11-28 09:43:06 +000090 add x2, x0, x1
91/* zero 16 bytes at a time */
92z_loop16:
93 sub x3, x2, x0
94 cmp x3, #16
95 b.lt z_loop1
96 stp xzr, xzr, [x0], #16
97 b z_loop16
98/* zero byte per byte */
99z_loop1:
100 cmp x0, x2
101 b.eq z_end
102 strb wzr, [x0], #1
103 b z_loop1
Kévin Petita877c252015-03-24 14:03:57 +0000104z_end:
105 ret
106endfunc zeromem16
Sandrine Bailleux65f546a2013-11-28 09:43:06 +0000107
108
109/* --------------------------------------------------------------------------
110 * void memcpy16(void *dest, const void *src, unsigned int length)
111 *
112 * Copy length bytes from memory area src to memory area dest.
113 * The memory areas should not overlap.
114 * Destination and source addresses must be 16-byte aligned.
115 * --------------------------------------------------------------------------
116 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000117func memcpy16
Soby Mathew041f62a2014-07-14 16:58:03 +0100118#if ASM_ASSERTION
119 orr x3, x0, x1
120 tst x3, #0xf
121 ASM_ASSERT(eq)
122#endif
Sandrine Bailleux65f546a2013-11-28 09:43:06 +0000123/* copy 16 bytes at a time */
124m_loop16:
125 cmp x2, #16
126 b.lt m_loop1
127 ldp x3, x4, [x1], #16
128 stp x3, x4, [x0], #16
129 sub x2, x2, #16
130 b m_loop16
131/* copy byte per byte */
132m_loop1:
133 cbz x2, m_end
134 ldrb w3, [x1], #1
135 strb w3, [x0], #1
136 subs x2, x2, #1
137 b.ne m_loop1
Kévin Petita877c252015-03-24 14:03:57 +0000138m_end:
139 ret
140endfunc memcpy16
Andrew Thoelke438c63a2014-04-28 12:06:18 +0100141
142/* ---------------------------------------------------------------------------
143 * Disable the MMU at EL3
144 * This is implemented in assembler to ensure that the data cache is cleaned
145 * and invalidated after the MMU is disabled without any intervening cacheable
146 * data accesses
147 * ---------------------------------------------------------------------------
148 */
149
150func disable_mmu_el3
151 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
152do_disable_mmu:
153 mrs x0, sctlr_el3
154 bic x0, x0, x1
155 msr sctlr_el3, x0
156 isb // ensure MMU is off
157 mov x0, #DCCISW // DCache clean and invalidate
158 b dcsw_op_all
Kévin Petita877c252015-03-24 14:03:57 +0000159endfunc disable_mmu_el3
Andrew Thoelke438c63a2014-04-28 12:06:18 +0100160
161
162func disable_mmu_icache_el3
163 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
164 b do_disable_mmu
Kévin Petita877c252015-03-24 14:03:57 +0000165endfunc disable_mmu_icache_el3
Andrew Thoelke438c63a2014-04-28 12:06:18 +0100166
Andrew Thoelke3f78dc32014-06-02 15:44:43 +0100167/* ---------------------------------------------------------------------------
168 * Enable the use of VFP at EL3
169 * ---------------------------------------------------------------------------
170 */
171#if SUPPORT_VFP
172func enable_vfp
173 mrs x0, cpacr_el1
174 orr x0, x0, #CPACR_VFP_BITS
175 msr cpacr_el1, x0
176 mrs x0, cptr_el3
177 mov x1, #AARCH64_CPTR_TFP
178 bic x0, x0, x1
179 msr cptr_el3, x0
180 isb
181 ret
Kévin Petita877c252015-03-24 14:03:57 +0000182endfunc enable_vfp
Andrew Thoelke3f78dc32014-06-02 15:44:43 +0100183#endif