Soby Mathew | d29f67b | 2016-05-05 12:31:57 +0100 | [diff] [blame] | 1 | /* |
Douglas Raillard | 21362a9 | 2016-12-02 13:51:54 +0000 | [diff] [blame] | 2 | * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. |
Soby Mathew | d29f67b | 2016-05-05 12:31:57 +0100 | [diff] [blame] | 3 | * |
dp-arm | fa3cf0b | 2017-05-03 09:38:09 +0100 | [diff] [blame] | 4 | * SPDX-License-Identifier: BSD-3-Clause |
Soby Mathew | d29f67b | 2016-05-05 12:31:57 +0100 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <arch.h> |
| 8 | #include <asm_macros.S> |
| 9 | #include <assert_macros.S> |
| 10 | |
Yatharth Kochar | f528faf | 2016-06-28 16:58:26 +0100 | [diff] [blame] | 11 | .globl smc |
Soby Mathew | d29f67b | 2016-05-05 12:31:57 +0100 | [diff] [blame] | 12 | .globl zeromem |
Douglas Raillard | 21362a9 | 2016-12-02 13:51:54 +0000 | [diff] [blame] | 13 | .globl zero_normalmem |
Yatharth Kochar | c44c5af | 2016-09-28 11:00:05 +0100 | [diff] [blame] | 14 | .globl memcpy4 |
Yatharth Kochar | f528faf | 2016-06-28 16:58:26 +0100 | [diff] [blame] | 15 | .globl disable_mmu_icache_secure |
| 16 | .globl disable_mmu_secure |
| 17 | |
| 18 | func smc |
| 19 | /* |
| 20 | * For AArch32 only r0-r3 will be in the registers; |
| 21 | * rest r4-r6 will be pushed on to the stack. So here, we'll |
| 22 | * have to load them from the stack to registers r4-r6 explicitly. |
| 23 | * Clobbers: r4-r6 |
| 24 | */ |
| 25 | ldm sp, {r4, r5, r6} |
| 26 | smc #0 |
| 27 | endfunc smc |
Soby Mathew | d29f67b | 2016-05-05 12:31:57 +0100 | [diff] [blame] | 28 | |
| 29 | /* ----------------------------------------------------------------------- |
Douglas Raillard | 21362a9 | 2016-12-02 13:51:54 +0000 | [diff] [blame] | 30 | * void zeromem(void *mem, unsigned int length) |
Soby Mathew | d29f67b | 2016-05-05 12:31:57 +0100 | [diff] [blame] | 31 | * |
Douglas Raillard | 21362a9 | 2016-12-02 13:51:54 +0000 | [diff] [blame] | 32 | * Initialise a region in normal memory to 0. This functions complies with the |
| 33 | * AAPCS and can be called from C code. |
| 34 | * |
Soby Mathew | d29f67b | 2016-05-05 12:31:57 +0100 | [diff] [blame] | 35 | * ----------------------------------------------------------------------- |
| 36 | */ |
| 37 | func zeromem |
Douglas Raillard | 21362a9 | 2016-12-02 13:51:54 +0000 | [diff] [blame] | 38 | /* |
| 39 | * Readable names for registers |
| 40 | * |
| 41 | * Registers r0, r1 and r2 are also set by zeromem which |
| 42 | * branches into the fallback path directly, so cursor, length and |
| 43 | * stop_address should not be retargeted to other registers. |
| 44 | */ |
| 45 | cursor .req r0 /* Start address and then current address */ |
| 46 | length .req r1 /* Length in bytes of the region to zero out */ |
| 47 | /* |
| 48 | * Reusing the r1 register as length is only used at the beginning of |
| 49 | * the function. |
| 50 | */ |
| 51 | stop_address .req r1 /* Address past the last zeroed byte */ |
| 52 | zeroreg1 .req r2 /* Source register filled with 0 */ |
| 53 | zeroreg2 .req r3 /* Source register filled with 0 */ |
| 54 | tmp .req r12 /* Temporary scratch register */ |
| 55 | |
| 56 | mov zeroreg1, #0 |
| 57 | |
| 58 | /* stop_address is the address past the last to zero */ |
| 59 | add stop_address, cursor, length |
| 60 | |
| 61 | /* |
| 62 | * Length cannot be used anymore as it shares the same register with |
| 63 | * stop_address. |
| 64 | */ |
| 65 | .unreq length |
| 66 | |
| 67 | /* |
| 68 | * If the start address is already aligned to 8 bytes, skip this loop. |
| 69 | */ |
| 70 | tst cursor, #(8-1) |
| 71 | beq .Lzeromem_8bytes_aligned |
| 72 | |
| 73 | /* Calculate the next address aligned to 8 bytes */ |
| 74 | orr tmp, cursor, #(8-1) |
| 75 | adds tmp, tmp, #1 |
| 76 | /* If it overflows, fallback to byte per byte zeroing */ |
| 77 | beq .Lzeromem_1byte_aligned |
| 78 | /* If the next aligned address is after the stop address, fall back */ |
| 79 | cmp tmp, stop_address |
| 80 | bhs .Lzeromem_1byte_aligned |
| 81 | |
| 82 | /* zero byte per byte */ |
| 83 | 1: |
| 84 | strb zeroreg1, [cursor], #1 |
| 85 | cmp cursor, tmp |
| 86 | bne 1b |
| 87 | |
| 88 | /* zero 8 bytes at a time */ |
| 89 | .Lzeromem_8bytes_aligned: |
| 90 | |
| 91 | /* Calculate the last 8 bytes aligned address. */ |
| 92 | bic tmp, stop_address, #(8-1) |
| 93 | |
| 94 | cmp cursor, tmp |
| 95 | bhs 2f |
| 96 | |
| 97 | mov zeroreg2, #0 |
| 98 | 1: |
| 99 | stmia cursor!, {zeroreg1, zeroreg2} |
| 100 | cmp cursor, tmp |
| 101 | blo 1b |
| 102 | 2: |
| 103 | |
| 104 | /* zero byte per byte */ |
| 105 | .Lzeromem_1byte_aligned: |
| 106 | cmp cursor, stop_address |
| 107 | beq 2f |
| 108 | 1: |
| 109 | strb zeroreg1, [cursor], #1 |
| 110 | cmp cursor, stop_address |
| 111 | bne 1b |
| 112 | 2: |
Soby Mathew | d29f67b | 2016-05-05 12:31:57 +0100 | [diff] [blame] | 113 | bx lr |
Douglas Raillard | 21362a9 | 2016-12-02 13:51:54 +0000 | [diff] [blame] | 114 | |
| 115 | .unreq cursor |
| 116 | /* |
| 117 | * length is already unreq'ed to reuse the register for another |
| 118 | * variable. |
| 119 | */ |
| 120 | .unreq stop_address |
| 121 | .unreq zeroreg1 |
| 122 | .unreq zeroreg2 |
| 123 | .unreq tmp |
Soby Mathew | d29f67b | 2016-05-05 12:31:57 +0100 | [diff] [blame] | 124 | endfunc zeromem |
Yatharth Kochar | f528faf | 2016-06-28 16:58:26 +0100 | [diff] [blame] | 125 | |
Douglas Raillard | 21362a9 | 2016-12-02 13:51:54 +0000 | [diff] [blame] | 126 | /* |
| 127 | * AArch32 does not have special ways of zeroing normal memory as AArch64 does |
| 128 | * using the DC ZVA instruction, so we just alias zero_normalmem to zeromem. |
| 129 | */ |
| 130 | .equ zero_normalmem, zeromem |
| 131 | |
Yatharth Kochar | c44c5af | 2016-09-28 11:00:05 +0100 | [diff] [blame] | 132 | /* -------------------------------------------------------------------------- |
| 133 | * void memcpy4(void *dest, const void *src, unsigned int length) |
| 134 | * |
| 135 | * Copy length bytes from memory area src to memory area dest. |
| 136 | * The memory areas should not overlap. |
| 137 | * Destination and source addresses must be 4-byte aligned. |
| 138 | * -------------------------------------------------------------------------- |
| 139 | */ |
| 140 | func memcpy4 |
Antonio Nino Diaz | 7c65c1e | 2017-04-20 09:58:28 +0100 | [diff] [blame] | 141 | #if ENABLE_ASSERTIONS |
Yatharth Kochar | c44c5af | 2016-09-28 11:00:05 +0100 | [diff] [blame] | 142 | orr r3, r0, r1 |
| 143 | tst r3, #0x3 |
| 144 | ASM_ASSERT(eq) |
| 145 | #endif |
| 146 | /* copy 4 bytes at a time */ |
| 147 | m_loop4: |
| 148 | cmp r2, #4 |
Douglas Raillard | 9d92e8c | 2017-03-07 16:36:14 +0000 | [diff] [blame] | 149 | blo m_loop1 |
Yatharth Kochar | c44c5af | 2016-09-28 11:00:05 +0100 | [diff] [blame] | 150 | ldr r3, [r1], #4 |
| 151 | str r3, [r0], #4 |
| 152 | sub r2, r2, #4 |
| 153 | b m_loop4 |
| 154 | /* copy byte per byte */ |
| 155 | m_loop1: |
| 156 | cmp r2,#0 |
| 157 | beq m_end |
| 158 | ldrb r3, [r1], #1 |
| 159 | strb r3, [r0], #1 |
| 160 | subs r2, r2, #1 |
| 161 | bne m_loop1 |
| 162 | m_end: |
| 163 | bx lr |
| 164 | endfunc memcpy4 |
| 165 | |
Yatharth Kochar | f528faf | 2016-06-28 16:58:26 +0100 | [diff] [blame] | 166 | /* --------------------------------------------------------------------------- |
| 167 | * Disable the MMU in Secure State |
| 168 | * --------------------------------------------------------------------------- |
| 169 | */ |
| 170 | |
| 171 | func disable_mmu_secure |
| 172 | mov r1, #(SCTLR_M_BIT | SCTLR_C_BIT) |
| 173 | do_disable_mmu: |
| 174 | ldcopr r0, SCTLR |
| 175 | bic r0, r0, r1 |
| 176 | stcopr r0, SCTLR |
| 177 | isb // ensure MMU is off |
| 178 | dsb sy |
| 179 | bx lr |
| 180 | endfunc disable_mmu_secure |
| 181 | |
| 182 | |
| 183 | func disable_mmu_icache_secure |
| 184 | ldr r1, =(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT) |
| 185 | b do_disable_mmu |
| 186 | endfunc disable_mmu_icache_secure |