blob: e9734ac2c6b932825e34d9a44e1db2a5d3d571f0 [file] [log] [blame]
Soby Mathewd29f67b2016-05-05 12:31:57 +01001/*
Alexei Fedorov7a2090a2020-07-23 18:35:49 +01002 * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
Soby Mathewd29f67b2016-05-05 12:31:57 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewd29f67b2016-05-05 12:31:57 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10
Yatharth Kocharf528faf2016-06-28 16:58:26 +010011 .globl smc
Soby Mathewd29f67b2016-05-05 12:31:57 +010012 .globl zeromem
Douglas Raillard21362a92016-12-02 13:51:54 +000013 .globl zero_normalmem
Yatharth Kocharc44c5af2016-09-28 11:00:05 +010014 .globl memcpy4
Yatharth Kocharf528faf2016-06-28 16:58:26 +010015 .globl disable_mmu_icache_secure
16 .globl disable_mmu_secure
17
18func smc
19 /*
20 * For AArch32 only r0-r3 will be in the registers;
21 * rest r4-r6 will be pushed on to the stack. So here, we'll
22 * have to load them from the stack to registers r4-r6 explicitly.
23 * Clobbers: r4-r6
24 */
25 ldm sp, {r4, r5, r6}
26 smc #0
27endfunc smc
Soby Mathewd29f67b2016-05-05 12:31:57 +010028
29/* -----------------------------------------------------------------------
Douglas Raillard21362a92016-12-02 13:51:54 +000030 * void zeromem(void *mem, unsigned int length)
Soby Mathewd29f67b2016-05-05 12:31:57 +010031 *
Douglas Raillard21362a92016-12-02 13:51:54 +000032 * Initialise a region in normal memory to 0. This functions complies with the
33 * AAPCS and can be called from C code.
34 *
Soby Mathewd29f67b2016-05-05 12:31:57 +010035 * -----------------------------------------------------------------------
36 */
37func zeromem
Douglas Raillard21362a92016-12-02 13:51:54 +000038 /*
39 * Readable names for registers
40 *
41 * Registers r0, r1 and r2 are also set by zeromem which
42 * branches into the fallback path directly, so cursor, length and
43 * stop_address should not be retargeted to other registers.
44 */
45 cursor .req r0 /* Start address and then current address */
46 length .req r1 /* Length in bytes of the region to zero out */
47 /*
48 * Reusing the r1 register as length is only used at the beginning of
49 * the function.
50 */
51 stop_address .req r1 /* Address past the last zeroed byte */
52 zeroreg1 .req r2 /* Source register filled with 0 */
53 zeroreg2 .req r3 /* Source register filled with 0 */
54 tmp .req r12 /* Temporary scratch register */
55
56 mov zeroreg1, #0
57
58 /* stop_address is the address past the last to zero */
59 add stop_address, cursor, length
60
61 /*
62 * Length cannot be used anymore as it shares the same register with
63 * stop_address.
64 */
65 .unreq length
66
67 /*
68 * If the start address is already aligned to 8 bytes, skip this loop.
69 */
70 tst cursor, #(8-1)
71 beq .Lzeromem_8bytes_aligned
72
73 /* Calculate the next address aligned to 8 bytes */
74 orr tmp, cursor, #(8-1)
75 adds tmp, tmp, #1
76 /* If it overflows, fallback to byte per byte zeroing */
77 beq .Lzeromem_1byte_aligned
78 /* If the next aligned address is after the stop address, fall back */
79 cmp tmp, stop_address
80 bhs .Lzeromem_1byte_aligned
81
82 /* zero byte per byte */
831:
84 strb zeroreg1, [cursor], #1
85 cmp cursor, tmp
86 bne 1b
87
88 /* zero 8 bytes at a time */
89.Lzeromem_8bytes_aligned:
90
91 /* Calculate the last 8 bytes aligned address. */
92 bic tmp, stop_address, #(8-1)
93
94 cmp cursor, tmp
95 bhs 2f
96
97 mov zeroreg2, #0
981:
99 stmia cursor!, {zeroreg1, zeroreg2}
100 cmp cursor, tmp
101 blo 1b
1022:
103
104 /* zero byte per byte */
105.Lzeromem_1byte_aligned:
106 cmp cursor, stop_address
107 beq 2f
1081:
109 strb zeroreg1, [cursor], #1
110 cmp cursor, stop_address
111 bne 1b
1122:
Soby Mathewd29f67b2016-05-05 12:31:57 +0100113 bx lr
Douglas Raillard21362a92016-12-02 13:51:54 +0000114
115 .unreq cursor
116 /*
117 * length is already unreq'ed to reuse the register for another
118 * variable.
119 */
120 .unreq stop_address
121 .unreq zeroreg1
122 .unreq zeroreg2
123 .unreq tmp
Soby Mathewd29f67b2016-05-05 12:31:57 +0100124endfunc zeromem
Yatharth Kocharf528faf2016-06-28 16:58:26 +0100125
Douglas Raillard21362a92016-12-02 13:51:54 +0000126/*
127 * AArch32 does not have special ways of zeroing normal memory as AArch64 does
128 * using the DC ZVA instruction, so we just alias zero_normalmem to zeromem.
129 */
130.equ zero_normalmem, zeromem
131
Yatharth Kocharc44c5af2016-09-28 11:00:05 +0100132/* --------------------------------------------------------------------------
133 * void memcpy4(void *dest, const void *src, unsigned int length)
134 *
135 * Copy length bytes from memory area src to memory area dest.
136 * The memory areas should not overlap.
137 * Destination and source addresses must be 4-byte aligned.
138 * --------------------------------------------------------------------------
139 */
140func memcpy4
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +0100141#if ENABLE_ASSERTIONS
Yatharth Kocharc44c5af2016-09-28 11:00:05 +0100142 orr r3, r0, r1
143 tst r3, #0x3
144 ASM_ASSERT(eq)
145#endif
146/* copy 4 bytes at a time */
147m_loop4:
148 cmp r2, #4
Douglas Raillard9d92e8c2017-03-07 16:36:14 +0000149 blo m_loop1
Yatharth Kocharc44c5af2016-09-28 11:00:05 +0100150 ldr r3, [r1], #4
151 str r3, [r0], #4
Alexei Fedorov7a2090a2020-07-23 18:35:49 +0100152 subs r2, r2, #4
153 bne m_loop4
154 bx lr
155
Yatharth Kocharc44c5af2016-09-28 11:00:05 +0100156/* copy byte per byte */
157m_loop1:
Yatharth Kocharc44c5af2016-09-28 11:00:05 +0100158 ldrb r3, [r1], #1
159 strb r3, [r0], #1
160 subs r2, r2, #1
161 bne m_loop1
Yatharth Kocharc44c5af2016-09-28 11:00:05 +0100162 bx lr
163endfunc memcpy4
164
Yatharth Kocharf528faf2016-06-28 16:58:26 +0100165/* ---------------------------------------------------------------------------
166 * Disable the MMU in Secure State
167 * ---------------------------------------------------------------------------
168 */
169
170func disable_mmu_secure
171 mov r1, #(SCTLR_M_BIT | SCTLR_C_BIT)
172do_disable_mmu:
Joel Hutton26d16762019-04-10 12:52:52 +0100173#if ERRATA_A9_794073
174 stcopr r0, BPIALL
175 dsb
176#endif
Yatharth Kocharf528faf2016-06-28 16:58:26 +0100177 ldcopr r0, SCTLR
178 bic r0, r0, r1
179 stcopr r0, SCTLR
180 isb // ensure MMU is off
181 dsb sy
182 bx lr
183endfunc disable_mmu_secure
184
185
186func disable_mmu_icache_secure
187 ldr r1, =(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
188 b do_disable_mmu
189endfunc disable_mmu_icache_secure