blob: 528e29e0e82cbae6f66a7a304fe2ccf4154aa148 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Jeenu Viswambharan54ec86a2017-01-19 14:23:36 +00002 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta4f6ad662013-10-25 09:08:21 +01005 */
Dan Handleyea596682015-04-01 17:34:24 +01006#ifndef __ASM_MACROS_S__
7#define __ASM_MACROS_S__
Achin Gupta4f6ad662013-10-25 09:08:21 +01008
Dan Handley2bd4ef22014-04-09 13:14:54 +01009#include <arch.h>
Soby Mathewb9ff2fd2016-07-08 15:26:35 +010010#include <asm_macros_common.S>
Jeenu Viswambharan54ec86a2017-01-19 14:23:36 +000011#include <spinlock.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010012
13
Achin Gupta4f6ad662013-10-25 09:08:21 +010014 .macro func_prologue
15 stp x29, x30, [sp, #-0x10]!
16 mov x29,sp
17 .endm
18
19 .macro func_epilogue
20 ldp x29, x30, [sp], #0x10
21 .endm
22
23
24 .macro dcache_line_size reg, tmp
Achin Gupta07f4e072014-02-02 12:02:23 +000025 mrs \tmp, ctr_el0
Achin Gupta4f6ad662013-10-25 09:08:21 +010026 ubfx \tmp, \tmp, #16, #4
Achin Gupta07f4e072014-02-02 12:02:23 +000027 mov \reg, #4
28 lsl \reg, \reg, \tmp
Achin Gupta4f6ad662013-10-25 09:08:21 +010029 .endm
30
31
32 .macro icache_line_size reg, tmp
Achin Gupta07f4e072014-02-02 12:02:23 +000033 mrs \tmp, ctr_el0
34 and \tmp, \tmp, #0xf
35 mov \reg, #4
36 lsl \reg, \reg, \tmp
Achin Gupta4f6ad662013-10-25 09:08:21 +010037 .endm
38
39
Achin Gupta4f6ad662013-10-25 09:08:21 +010040 .macro smc_check label
Andrew Thoelkef977ed82014-04-28 12:32:02 +010041 mrs x0, esr_el3
Achin Gupta4f6ad662013-10-25 09:08:21 +010042 ubfx x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
43 cmp x0, #EC_AARCH64_SMC
44 b.ne $label
45 .endm
46
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +010047 /*
48 * Declare the exception vector table, enforcing it is aligned on a
49 * 2KB boundary, as required by the ARMv8 architecture.
Sandrine Bailleux618ba992016-05-24 16:22:59 +010050 * Use zero bytes as the fill value to be stored in the padding bytes
51 * so that it inserts illegal AArch64 instructions. This increases
52 * security, robustness and potentially facilitates debugging.
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +010053 */
54 .macro vector_base label
55 .section .vectors, "ax"
Sandrine Bailleux618ba992016-05-24 16:22:59 +010056 .align 11, 0
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +010057 \label:
58 .endm
59
60 /*
61 * Create an entry in the exception vector table, enforcing it is
62 * aligned on a 128-byte boundary, as required by the ARMv8 architecture.
Sandrine Bailleux618ba992016-05-24 16:22:59 +010063 * Use zero bytes as the fill value to be stored in the padding bytes
64 * so that it inserts illegal AArch64 instructions. This increases
65 * security, robustness and potentially facilitates debugging.
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +010066 */
67 .macro vector_entry label
68 .section .vectors, "ax"
Sandrine Bailleux618ba992016-05-24 16:22:59 +010069 .align 7, 0
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +010070 \label:
71 .endm
Achin Gupta4f6ad662013-10-25 09:08:21 +010072
Jeenu Viswambharana7934d62014-02-07 15:53:18 +000073 /*
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +010074 * This macro verifies that the given vector doesn't exceed the
Jeenu Viswambharana7934d62014-02-07 15:53:18 +000075 * architectural limit of 32 instructions. This is meant to be placed
Sandrine Bailleux9e6ad6c2016-05-24 16:56:03 +010076 * immediately after the last instruction in the vector. It takes the
Jeenu Viswambharana7934d62014-02-07 15:53:18 +000077 * vector entry as the parameter
78 */
79 .macro check_vector_size since
80 .if (. - \since) > (32 * 4)
81 .error "Vector exceeds 32 instructions"
82 .endif
83 .endm
Andrew Thoelke38bde412014-03-18 13:46:55 +000084
Soby Mathew981487a2015-07-13 14:10:57 +010085#if ENABLE_PLAT_COMPAT
Andrew Thoelke65668f92014-03-20 10:48:23 +000086 /*
87 * This macro calculates the base address of an MP stack using the
88 * platform_get_core_pos() index, the name of the stack storage and
89 * the size of each stack
90 * In: X0 = MPIDR of CPU whose stack is wanted
91 * Out: X0 = physical address of stack base
92 * Clobber: X30, X1, X2
93 */
94 .macro get_mp_stack _name, _size
95 bl platform_get_core_pos
96 ldr x2, =(\_name + \_size)
97 mov x1, #\_size
98 madd x0, x0, x1, x2
99 .endm
Soby Mathew981487a2015-07-13 14:10:57 +0100100#endif
Andrew Thoelke65668f92014-03-20 10:48:23 +0000101
102 /*
Soby Mathewb0082d22015-04-09 13:40:55 +0100103 * This macro calculates the base address of the current CPU's MP stack
104 * using the plat_my_core_pos() index, the name of the stack storage
105 * and the size of each stack
106 * Out: X0 = physical address of stack base
107 * Clobber: X30, X1, X2
108 */
109 .macro get_my_mp_stack _name, _size
110 bl plat_my_core_pos
111 ldr x2, =(\_name + \_size)
112 mov x1, #\_size
113 madd x0, x0, x1, x2
114 .endm
115
116 /*
Andrew Thoelke65668f92014-03-20 10:48:23 +0000117 * This macro calculates the base address of a UP stack using the
118 * name of the stack storage and the size of the stack
119 * Out: X0 = physical address of stack base
120 */
121 .macro get_up_stack _name, _size
122 ldr x0, =(\_name + \_size)
123 .endm
Soby Mathew066f7132014-07-14 16:57:23 +0100124
125 /*
126 * Helper macro to generate the best mov/movk combinations according
127 * the value to be moved. The 16 bits from '_shift' are tested and
128 * if not zero, they are moved into '_reg' without affecting
129 * other bits.
130 */
131 .macro _mov_imm16 _reg, _val, _shift
132 .if (\_val >> \_shift) & 0xffff
133 .if (\_val & (1 << \_shift - 1))
134 movk \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
135 .else
136 mov \_reg, \_val & (0xffff << \_shift)
137 .endif
138 .endif
139 .endm
140
141 /*
142 * Helper macro to load arbitrary values into 32 or 64-bit registers
143 * which generates the best mov/movk combinations. Many base addresses
144 * are 64KB aligned the macro will eliminate updating bits 15:0 in
145 * that case
146 */
147 .macro mov_imm _reg, _val
148 .if (\_val) == 0
149 mov \_reg, #0
150 .else
151 _mov_imm16 \_reg, (\_val), 0
152 _mov_imm16 \_reg, (\_val), 16
153 _mov_imm16 \_reg, (\_val), 32
154 _mov_imm16 \_reg, (\_val), 48
155 .endif
156 .endm
Dan Handleyea596682015-04-01 17:34:24 +0100157
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000158 /*
159 * Macro to mark instances where we're jumping to a function and don't
160 * expect a return. To provide the function being jumped to with
161 * additional information, we use 'bl' instruction to jump rather than
162 * 'b'.
163 *
164 * Debuggers infer the location of a call from where LR points to, which
165 * is usually the instruction after 'bl'. If this macro expansion
166 * happens to be the last location in a function, that'll cause the LR
167 * to point a location beyond the function, thereby misleading debugger
168 * back trace. We therefore insert a 'nop' after the function call for
169 * debug builds, unless 'skip_nop' parameter is non-zero.
170 */
171 .macro no_ret _func:req, skip_nop=0
172 bl \_func
173#if DEBUG
174 .ifeq \skip_nop
175 nop
176 .endif
177#endif
178 .endm
179
Jeenu Viswambharan54ec86a2017-01-19 14:23:36 +0000180 /*
181 * Reserve space for a spin lock in assembly file.
182 */
183 .macro define_asm_spinlock _name:req
184 .align SPINLOCK_ASM_ALIGN
185 \_name:
186 .space SPINLOCK_ASM_SIZE
187 .endm
188
Dan Handleyea596682015-04-01 17:34:24 +0100189#endif /* __ASM_MACROS_S__ */