Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 1 | /* |
Dan Handley | e83b0ca | 2014-01-14 18:17:09 +0000 | [diff] [blame] | 2 | * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions are met: |
| 6 | * |
| 7 | * Redistributions of source code must retain the above copyright notice, this |
| 8 | * list of conditions and the following disclaimer. |
| 9 | * |
| 10 | * Redistributions in binary form must reproduce the above copyright notice, |
| 11 | * this list of conditions and the following disclaimer in the documentation |
| 12 | * and/or other materials provided with the distribution. |
| 13 | * |
| 14 | * Neither the name of ARM nor the names of its contributors may be used |
| 15 | * to endorse or promote products derived from this software without specific |
| 16 | * prior written permission. |
| 17 | * |
| 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 19 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 21 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE |
| 22 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 23 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 24 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 25 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 26 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 27 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 28 | * POSSIBILITY OF SUCH DAMAGE. |
| 29 | */ |
| 30 | |
Dan Handley | 2bd4ef2 | 2014-04-09 13:14:54 +0100 | [diff] [blame] | 31 | #include <arch.h> |
Andrew Thoelke | 38bde41 | 2014-03-18 13:46:55 +0000 | [diff] [blame] | 32 | #include <asm_macros.S> |
Soby Mathew | 041f62a | 2014-07-14 16:58:03 +0100 | [diff] [blame] | 33 | #include <assert_macros.S> |
Achin Gupta | 4a826dd | 2013-11-25 14:00:56 +0000 | [diff] [blame] | 34 | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 35 | .globl get_afflvl_shift |
| 36 | .globl mpidr_mask_lower_afflvls |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 37 | .globl eret |
| 38 | .globl smc |
| 39 | |
Sandrine Bailleux | 65f546a | 2013-11-28 09:43:06 +0000 | [diff] [blame] | 40 | .globl zeromem16 |
| 41 | .globl memcpy16 |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 42 | |
Andrew Thoelke | 438c63a | 2014-04-28 12:06:18 +0100 | [diff] [blame] | 43 | .globl disable_mmu_el3 |
| 44 | .globl disable_mmu_icache_el3 |
| 45 | |
Andrew Thoelke | 3f78dc3 | 2014-06-02 15:44:43 +0100 | [diff] [blame] | 46 | #if SUPPORT_VFP |
| 47 | .globl enable_vfp |
| 48 | #endif |
| 49 | |
Andrew Thoelke | 38bde41 | 2014-03-18 13:46:55 +0000 | [diff] [blame] | 50 | func get_afflvl_shift |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 51 | cmp x0, #3 |
| 52 | cinc x0, x0, eq |
| 53 | mov x1, #MPIDR_AFFLVL_SHIFT |
| 54 | lsl x0, x0, x1 |
| 55 | ret |
| 56 | |
Andrew Thoelke | 38bde41 | 2014-03-18 13:46:55 +0000 | [diff] [blame] | 57 | func mpidr_mask_lower_afflvls |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 58 | cmp x1, #3 |
| 59 | cinc x1, x1, eq |
| 60 | mov x2, #MPIDR_AFFLVL_SHIFT |
| 61 | lsl x2, x1, x2 |
| 62 | lsr x0, x0, x2 |
| 63 | lsl x0, x0, x2 |
| 64 | ret |
| 65 | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 66 | |
Andrew Thoelke | 38bde41 | 2014-03-18 13:46:55 +0000 | [diff] [blame] | 67 | func eret |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 68 | eret |
| 69 | |
| 70 | |
Andrew Thoelke | 38bde41 | 2014-03-18 13:46:55 +0000 | [diff] [blame] | 71 | func smc |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 72 | smc #0 |
Sandrine Bailleux | 65f546a | 2013-11-28 09:43:06 +0000 | [diff] [blame] | 73 | |
| 74 | /* ----------------------------------------------------------------------- |
| 75 | * void zeromem16(void *mem, unsigned int length); |
| 76 | * |
| 77 | * Initialise a memory region to 0. |
| 78 | * The memory address must be 16-byte aligned. |
| 79 | * ----------------------------------------------------------------------- |
| 80 | */ |
Andrew Thoelke | 38bde41 | 2014-03-18 13:46:55 +0000 | [diff] [blame] | 81 | func zeromem16 |
Soby Mathew | 041f62a | 2014-07-14 16:58:03 +0100 | [diff] [blame] | 82 | #if ASM_ASSERTION |
| 83 | tst x0, #0xf |
| 84 | ASM_ASSERT(eq) |
| 85 | #endif |
Sandrine Bailleux | 65f546a | 2013-11-28 09:43:06 +0000 | [diff] [blame] | 86 | add x2, x0, x1 |
| 87 | /* zero 16 bytes at a time */ |
| 88 | z_loop16: |
| 89 | sub x3, x2, x0 |
| 90 | cmp x3, #16 |
| 91 | b.lt z_loop1 |
| 92 | stp xzr, xzr, [x0], #16 |
| 93 | b z_loop16 |
| 94 | /* zero byte per byte */ |
| 95 | z_loop1: |
| 96 | cmp x0, x2 |
| 97 | b.eq z_end |
| 98 | strb wzr, [x0], #1 |
| 99 | b z_loop1 |
| 100 | z_end: ret |
| 101 | |
| 102 | |
| 103 | /* -------------------------------------------------------------------------- |
| 104 | * void memcpy16(void *dest, const void *src, unsigned int length) |
| 105 | * |
| 106 | * Copy length bytes from memory area src to memory area dest. |
| 107 | * The memory areas should not overlap. |
| 108 | * Destination and source addresses must be 16-byte aligned. |
| 109 | * -------------------------------------------------------------------------- |
| 110 | */ |
Andrew Thoelke | 38bde41 | 2014-03-18 13:46:55 +0000 | [diff] [blame] | 111 | func memcpy16 |
Soby Mathew | 041f62a | 2014-07-14 16:58:03 +0100 | [diff] [blame] | 112 | #if ASM_ASSERTION |
| 113 | orr x3, x0, x1 |
| 114 | tst x3, #0xf |
| 115 | ASM_ASSERT(eq) |
| 116 | #endif |
Sandrine Bailleux | 65f546a | 2013-11-28 09:43:06 +0000 | [diff] [blame] | 117 | /* copy 16 bytes at a time */ |
| 118 | m_loop16: |
| 119 | cmp x2, #16 |
| 120 | b.lt m_loop1 |
| 121 | ldp x3, x4, [x1], #16 |
| 122 | stp x3, x4, [x0], #16 |
| 123 | sub x2, x2, #16 |
| 124 | b m_loop16 |
| 125 | /* copy byte per byte */ |
| 126 | m_loop1: |
| 127 | cbz x2, m_end |
| 128 | ldrb w3, [x1], #1 |
| 129 | strb w3, [x0], #1 |
| 130 | subs x2, x2, #1 |
| 131 | b.ne m_loop1 |
| 132 | m_end: ret |
Andrew Thoelke | 438c63a | 2014-04-28 12:06:18 +0100 | [diff] [blame] | 133 | |
| 134 | /* --------------------------------------------------------------------------- |
| 135 | * Disable the MMU at EL3 |
| 136 | * This is implemented in assembler to ensure that the data cache is cleaned |
| 137 | * and invalidated after the MMU is disabled without any intervening cacheable |
| 138 | * data accesses |
| 139 | * --------------------------------------------------------------------------- |
| 140 | */ |
| 141 | |
| 142 | func disable_mmu_el3 |
| 143 | mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT) |
| 144 | do_disable_mmu: |
| 145 | mrs x0, sctlr_el3 |
| 146 | bic x0, x0, x1 |
| 147 | msr sctlr_el3, x0 |
| 148 | isb // ensure MMU is off |
| 149 | mov x0, #DCCISW // DCache clean and invalidate |
| 150 | b dcsw_op_all |
| 151 | |
| 152 | |
| 153 | func disable_mmu_icache_el3 |
| 154 | mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT) |
| 155 | b do_disable_mmu |
| 156 | |
Andrew Thoelke | 3f78dc3 | 2014-06-02 15:44:43 +0100 | [diff] [blame] | 157 | /* --------------------------------------------------------------------------- |
| 158 | * Enable the use of VFP at EL3 |
| 159 | * --------------------------------------------------------------------------- |
| 160 | */ |
| 161 | #if SUPPORT_VFP |
| 162 | func enable_vfp |
| 163 | mrs x0, cpacr_el1 |
| 164 | orr x0, x0, #CPACR_VFP_BITS |
| 165 | msr cpacr_el1, x0 |
| 166 | mrs x0, cptr_el3 |
| 167 | mov x1, #AARCH64_CPTR_TFP |
| 168 | bic x0, x0, x1 |
| 169 | msr cptr_el3, x0 |
| 170 | isb |
| 171 | ret |
| 172 | #endif |