blob: 439ca28570ff103b0f6eb5e99b624b62c835df2d [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley2bd4ef22014-04-09 13:14:54 +010031#include <arch.h>
Andrew Thoelke38bde412014-03-18 13:46:55 +000032#include <asm_macros.S>
Achin Gupta4a826dd2013-11-25 14:00:56 +000033
Achin Gupta4f6ad662013-10-25 09:08:21 +010034 .globl get_afflvl_shift
35 .globl mpidr_mask_lower_afflvls
Achin Gupta4f6ad662013-10-25 09:08:21 +010036 .globl eret
37 .globl smc
38
Sandrine Bailleux65f546a2013-11-28 09:43:06 +000039 .globl zeromem16
40 .globl memcpy16
Achin Gupta4f6ad662013-10-25 09:08:21 +010041
Andrew Thoelke438c63a2014-04-28 12:06:18 +010042 .globl disable_mmu_el3
43 .globl disable_mmu_icache_el3
44
Andrew Thoelke3f78dc32014-06-02 15:44:43 +010045#if SUPPORT_VFP
46 .globl enable_vfp
47#endif
48
Achin Gupta4f6ad662013-10-25 09:08:21 +010049
Andrew Thoelke38bde412014-03-18 13:46:55 +000050func get_afflvl_shift
Achin Gupta4f6ad662013-10-25 09:08:21 +010051 cmp x0, #3
52 cinc x0, x0, eq
53 mov x1, #MPIDR_AFFLVL_SHIFT
54 lsl x0, x0, x1
55 ret
56
Andrew Thoelke38bde412014-03-18 13:46:55 +000057func mpidr_mask_lower_afflvls
Achin Gupta4f6ad662013-10-25 09:08:21 +010058 cmp x1, #3
59 cinc x1, x1, eq
60 mov x2, #MPIDR_AFFLVL_SHIFT
61 lsl x2, x1, x2
62 lsr x0, x0, x2
63 lsl x0, x0, x2
64 ret
65
Achin Gupta4f6ad662013-10-25 09:08:21 +010066
Andrew Thoelke38bde412014-03-18 13:46:55 +000067func eret
Achin Gupta4f6ad662013-10-25 09:08:21 +010068 eret
69
70
Andrew Thoelke38bde412014-03-18 13:46:55 +000071func smc
Achin Gupta4f6ad662013-10-25 09:08:21 +010072 smc #0
Sandrine Bailleux65f546a2013-11-28 09:43:06 +000073
74/* -----------------------------------------------------------------------
75 * void zeromem16(void *mem, unsigned int length);
76 *
77 * Initialise a memory region to 0.
78 * The memory address must be 16-byte aligned.
79 * -----------------------------------------------------------------------
80 */
Andrew Thoelke38bde412014-03-18 13:46:55 +000081func zeromem16
Sandrine Bailleux65f546a2013-11-28 09:43:06 +000082 add x2, x0, x1
83/* zero 16 bytes at a time */
84z_loop16:
85 sub x3, x2, x0
86 cmp x3, #16
87 b.lt z_loop1
88 stp xzr, xzr, [x0], #16
89 b z_loop16
90/* zero byte per byte */
91z_loop1:
92 cmp x0, x2
93 b.eq z_end
94 strb wzr, [x0], #1
95 b z_loop1
96z_end: ret
97
98
99/* --------------------------------------------------------------------------
100 * void memcpy16(void *dest, const void *src, unsigned int length)
101 *
102 * Copy length bytes from memory area src to memory area dest.
103 * The memory areas should not overlap.
104 * Destination and source addresses must be 16-byte aligned.
105 * --------------------------------------------------------------------------
106 */
Andrew Thoelke38bde412014-03-18 13:46:55 +0000107func memcpy16
Sandrine Bailleux65f546a2013-11-28 09:43:06 +0000108/* copy 16 bytes at a time */
109m_loop16:
110 cmp x2, #16
111 b.lt m_loop1
112 ldp x3, x4, [x1], #16
113 stp x3, x4, [x0], #16
114 sub x2, x2, #16
115 b m_loop16
116/* copy byte per byte */
117m_loop1:
118 cbz x2, m_end
119 ldrb w3, [x1], #1
120 strb w3, [x0], #1
121 subs x2, x2, #1
122 b.ne m_loop1
123m_end: ret
Andrew Thoelke438c63a2014-04-28 12:06:18 +0100124
125/* ---------------------------------------------------------------------------
126 * Disable the MMU at EL3
127 * This is implemented in assembler to ensure that the data cache is cleaned
128 * and invalidated after the MMU is disabled without any intervening cacheable
129 * data accesses
130 * ---------------------------------------------------------------------------
131 */
132
133func disable_mmu_el3
134 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
135do_disable_mmu:
136 mrs x0, sctlr_el3
137 bic x0, x0, x1
138 msr sctlr_el3, x0
139 isb // ensure MMU is off
140 mov x0, #DCCISW // DCache clean and invalidate
141 b dcsw_op_all
142
143
144func disable_mmu_icache_el3
145 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
146 b do_disable_mmu
147
Andrew Thoelke3f78dc32014-06-02 15:44:43 +0100148
149/* ---------------------------------------------------------------------------
150 * Enable the use of VFP at EL3
151 * ---------------------------------------------------------------------------
152 */
153#if SUPPORT_VFP
154func enable_vfp
155 mrs x0, cpacr_el1
156 orr x0, x0, #CPACR_VFP_BITS
157 msr cpacr_el1, x0
158 mrs x0, cptr_el3
159 mov x1, #AARCH64_CPTR_TFP
160 bic x0, x0, x1
161 msr cptr_el3, x0
162 isb
163 ret
164#endif