Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 1 | /* |
Jeenu Viswambharan | 54ec86a | 2017-01-19 14:23:36 +0000 | [diff] [blame] | 2 | * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 3 | * |
dp-arm | fa3cf0b | 2017-05-03 09:38:09 +0100 | [diff] [blame] | 4 | * SPDX-License-Identifier: BSD-3-Clause |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 5 | */ |
Dan Handley | ea59668 | 2015-04-01 17:34:24 +0100 | [diff] [blame] | 6 | #ifndef __ASM_MACROS_S__ |
| 7 | #define __ASM_MACROS_S__ |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 8 | |
Dan Handley | 2bd4ef2 | 2014-04-09 13:14:54 +0100 | [diff] [blame] | 9 | #include <arch.h> |
Soby Mathew | b9ff2fd | 2016-07-08 15:26:35 +0100 | [diff] [blame] | 10 | #include <asm_macros_common.S> |
Jeenu Viswambharan | 54ec86a | 2017-01-19 14:23:36 +0000 | [diff] [blame] | 11 | #include <spinlock.h> |
Dan Handley | 2bd4ef2 | 2014-04-09 13:14:54 +0100 | [diff] [blame] | 12 | |
| 13 | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 14 | .macro func_prologue |
| 15 | stp x29, x30, [sp, #-0x10]! |
| 16 | mov x29,sp |
| 17 | .endm |
| 18 | |
| 19 | .macro func_epilogue |
| 20 | ldp x29, x30, [sp], #0x10 |
| 21 | .endm |
| 22 | |
| 23 | |
| 24 | .macro dcache_line_size reg, tmp |
Achin Gupta | 07f4e07 | 2014-02-02 12:02:23 +0000 | [diff] [blame] | 25 | mrs \tmp, ctr_el0 |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 26 | ubfx \tmp, \tmp, #16, #4 |
Achin Gupta | 07f4e07 | 2014-02-02 12:02:23 +0000 | [diff] [blame] | 27 | mov \reg, #4 |
| 28 | lsl \reg, \reg, \tmp |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 29 | .endm |
| 30 | |
| 31 | |
| 32 | .macro icache_line_size reg, tmp |
Achin Gupta | 07f4e07 | 2014-02-02 12:02:23 +0000 | [diff] [blame] | 33 | mrs \tmp, ctr_el0 |
| 34 | and \tmp, \tmp, #0xf |
| 35 | mov \reg, #4 |
| 36 | lsl \reg, \reg, \tmp |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 37 | .endm |
| 38 | |
| 39 | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 40 | .macro smc_check label |
Andrew Thoelke | f977ed8 | 2014-04-28 12:32:02 +0100 | [diff] [blame] | 41 | mrs x0, esr_el3 |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 42 | ubfx x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH |
| 43 | cmp x0, #EC_AARCH64_SMC |
| 44 | b.ne $label |
| 45 | .endm |
| 46 | |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 47 | /* |
| 48 | * Declare the exception vector table, enforcing it is aligned on a |
| 49 | * 2KB boundary, as required by the ARMv8 architecture. |
Sandrine Bailleux | 618ba99 | 2016-05-24 16:22:59 +0100 | [diff] [blame] | 50 | * Use zero bytes as the fill value to be stored in the padding bytes |
| 51 | * so that it inserts illegal AArch64 instructions. This increases |
| 52 | * security, robustness and potentially facilitates debugging. |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 53 | */ |
| 54 | .macro vector_base label |
| 55 | .section .vectors, "ax" |
Sandrine Bailleux | 618ba99 | 2016-05-24 16:22:59 +0100 | [diff] [blame] | 56 | .align 11, 0 |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 57 | \label: |
| 58 | .endm |
| 59 | |
| 60 | /* |
| 61 | * Create an entry in the exception vector table, enforcing it is |
| 62 | * aligned on a 128-byte boundary, as required by the ARMv8 architecture. |
Sandrine Bailleux | 618ba99 | 2016-05-24 16:22:59 +0100 | [diff] [blame] | 63 | * Use zero bytes as the fill value to be stored in the padding bytes |
| 64 | * so that it inserts illegal AArch64 instructions. This increases |
| 65 | * security, robustness and potentially facilitates debugging. |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 66 | */ |
| 67 | .macro vector_entry label |
| 68 | .section .vectors, "ax" |
Sandrine Bailleux | 618ba99 | 2016-05-24 16:22:59 +0100 | [diff] [blame] | 69 | .align 7, 0 |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 70 | \label: |
| 71 | .endm |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 72 | |
Jeenu Viswambharan | a7934d6 | 2014-02-07 15:53:18 +0000 | [diff] [blame] | 73 | /* |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 74 | * This macro verifies that the given vector doesn't exceed the |
Jeenu Viswambharan | a7934d6 | 2014-02-07 15:53:18 +0000 | [diff] [blame] | 75 | * architectural limit of 32 instructions. This is meant to be placed |
Sandrine Bailleux | 9e6ad6c | 2016-05-24 16:56:03 +0100 | [diff] [blame] | 76 | * immediately after the last instruction in the vector. It takes the |
Jeenu Viswambharan | a7934d6 | 2014-02-07 15:53:18 +0000 | [diff] [blame] | 77 | * vector entry as the parameter |
| 78 | */ |
| 79 | .macro check_vector_size since |
| 80 | .if (. - \since) > (32 * 4) |
| 81 | .error "Vector exceeds 32 instructions" |
| 82 | .endif |
| 83 | .endm |
Andrew Thoelke | 38bde41 | 2014-03-18 13:46:55 +0000 | [diff] [blame] | 84 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 85 | #if ENABLE_PLAT_COMPAT |
Andrew Thoelke | 65668f9 | 2014-03-20 10:48:23 +0000 | [diff] [blame] | 86 | /* |
| 87 | * This macro calculates the base address of an MP stack using the |
| 88 | * platform_get_core_pos() index, the name of the stack storage and |
| 89 | * the size of each stack |
| 90 | * In: X0 = MPIDR of CPU whose stack is wanted |
| 91 | * Out: X0 = physical address of stack base |
| 92 | * Clobber: X30, X1, X2 |
| 93 | */ |
| 94 | .macro get_mp_stack _name, _size |
| 95 | bl platform_get_core_pos |
| 96 | ldr x2, =(\_name + \_size) |
| 97 | mov x1, #\_size |
| 98 | madd x0, x0, x1, x2 |
| 99 | .endm |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 100 | #endif |
Andrew Thoelke | 65668f9 | 2014-03-20 10:48:23 +0000 | [diff] [blame] | 101 | |
| 102 | /* |
Soby Mathew | b0082d2 | 2015-04-09 13:40:55 +0100 | [diff] [blame] | 103 | * This macro calculates the base address of the current CPU's MP stack |
| 104 | * using the plat_my_core_pos() index, the name of the stack storage |
| 105 | * and the size of each stack |
| 106 | * Out: X0 = physical address of stack base |
| 107 | * Clobber: X30, X1, X2 |
| 108 | */ |
| 109 | .macro get_my_mp_stack _name, _size |
| 110 | bl plat_my_core_pos |
| 111 | ldr x2, =(\_name + \_size) |
| 112 | mov x1, #\_size |
| 113 | madd x0, x0, x1, x2 |
| 114 | .endm |
| 115 | |
| 116 | /* |
Andrew Thoelke | 65668f9 | 2014-03-20 10:48:23 +0000 | [diff] [blame] | 117 | * This macro calculates the base address of a UP stack using the |
| 118 | * name of the stack storage and the size of the stack |
| 119 | * Out: X0 = physical address of stack base |
| 120 | */ |
| 121 | .macro get_up_stack _name, _size |
| 122 | ldr x0, =(\_name + \_size) |
| 123 | .endm |
Soby Mathew | 066f713 | 2014-07-14 16:57:23 +0100 | [diff] [blame] | 124 | |
| 125 | /* |
| 126 | * Helper macro to generate the best mov/movk combinations according |
| 127 | * the value to be moved. The 16 bits from '_shift' are tested and |
| 128 | * if not zero, they are moved into '_reg' without affecting |
| 129 | * other bits. |
| 130 | */ |
| 131 | .macro _mov_imm16 _reg, _val, _shift |
| 132 | .if (\_val >> \_shift) & 0xffff |
| 133 | .if (\_val & (1 << \_shift - 1)) |
| 134 | movk \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift |
| 135 | .else |
| 136 | mov \_reg, \_val & (0xffff << \_shift) |
| 137 | .endif |
| 138 | .endif |
| 139 | .endm |
| 140 | |
| 141 | /* |
| 142 | * Helper macro to load arbitrary values into 32 or 64-bit registers |
| 143 | * which generates the best mov/movk combinations. Many base addresses |
| 144 | * are 64KB aligned the macro will eliminate updating bits 15:0 in |
| 145 | * that case |
| 146 | */ |
| 147 | .macro mov_imm _reg, _val |
| 148 | .if (\_val) == 0 |
| 149 | mov \_reg, #0 |
| 150 | .else |
| 151 | _mov_imm16 \_reg, (\_val), 0 |
| 152 | _mov_imm16 \_reg, (\_val), 16 |
| 153 | _mov_imm16 \_reg, (\_val), 32 |
| 154 | _mov_imm16 \_reg, (\_val), 48 |
| 155 | .endif |
| 156 | .endm |
Dan Handley | ea59668 | 2015-04-01 17:34:24 +0100 | [diff] [blame] | 157 | |
Jeenu Viswambharan | 68aef10 | 2016-11-30 15:21:11 +0000 | [diff] [blame] | 158 | /* |
| 159 | * Macro to mark instances where we're jumping to a function and don't |
| 160 | * expect a return. To provide the function being jumped to with |
| 161 | * additional information, we use 'bl' instruction to jump rather than |
| 162 | * 'b'. |
| 163 | * |
| 164 | * Debuggers infer the location of a call from where LR points to, which |
| 165 | * is usually the instruction after 'bl'. If this macro expansion |
| 166 | * happens to be the last location in a function, that'll cause the LR |
| 167 | * to point a location beyond the function, thereby misleading debugger |
| 168 | * back trace. We therefore insert a 'nop' after the function call for |
| 169 | * debug builds, unless 'skip_nop' parameter is non-zero. |
| 170 | */ |
| 171 | .macro no_ret _func:req, skip_nop=0 |
| 172 | bl \_func |
| 173 | #if DEBUG |
| 174 | .ifeq \skip_nop |
| 175 | nop |
| 176 | .endif |
| 177 | #endif |
| 178 | .endm |
| 179 | |
Jeenu Viswambharan | 54ec86a | 2017-01-19 14:23:36 +0000 | [diff] [blame] | 180 | /* |
| 181 | * Reserve space for a spin lock in assembly file. |
| 182 | */ |
| 183 | .macro define_asm_spinlock _name:req |
| 184 | .align SPINLOCK_ASM_ALIGN |
| 185 | \_name: |
| 186 | .space SPINLOCK_ASM_SIZE |
| 187 | .endm |
| 188 | |
Dan Handley | ea59668 | 2015-04-01 17:34:24 +0100 | [diff] [blame] | 189 | #endif /* __ASM_MACROS_S__ */ |