blob: ea1636e245d8c93b416157ab0dd483b7edadf7c1 [file] [log] [blame]
Soby Mathewd29f67b2016-05-05 12:31:57 +01001/*
Jeenu Viswambharan58e81482018-04-27 15:06:57 +01002 * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
Soby Mathewd29f67b2016-05-05 12:31:57 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewd29f67b2016-05-05 12:31:57 +01005 */
Antonio Nino Diaz5eb88372018-11-08 10:20:19 +00006#ifndef ASM_MACROS_S
7#define ASM_MACROS_S
Soby Mathewd29f67b2016-05-05 12:31:57 +01008
9#include <arch.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000010#include <common/asm_macros_common.S>
11#include <lib/spinlock.h>
Soby Mathewd29f67b2016-05-05 12:31:57 +010012
Jeenu Viswambharan58e81482018-04-27 15:06:57 +010013/*
14 * TLBI instruction with type specifier that implements the workaround for
15 * errata 813419 of Cortex-A57.
16 */
17#if ERRATA_A57_813419
18#define TLB_INVALIDATE(_reg, _coproc) \
19 stcopr _reg, _coproc; \
20 dsb ish; \
21 stcopr _reg, _coproc
22#else
23#define TLB_INVALIDATE(_reg, _coproc) \
24 stcopr _reg, _coproc
25#endif
26
Soby Mathewd29f67b2016-05-05 12:31:57 +010027#define WORD_SIZE 4
28
29 /*
30 * Co processor register accessors
31 */
32 .macro ldcopr reg, coproc, opc1, CRn, CRm, opc2
33 mrc \coproc, \opc1, \reg, \CRn, \CRm, \opc2
34 .endm
35
36 .macro ldcopr16 reg1, reg2, coproc, opc1, CRm
37 mrrc \coproc, \opc1, \reg1, \reg2, \CRm
38 .endm
39
40 .macro stcopr reg, coproc, opc1, CRn, CRm, opc2
41 mcr \coproc, \opc1, \reg, \CRn, \CRm, \opc2
42 .endm
43
44 .macro stcopr16 reg1, reg2, coproc, opc1, CRm
45 mcrr \coproc, \opc1, \reg1, \reg2, \CRm
46 .endm
47
48 /* Cache line size helpers */
49 .macro dcache_line_size reg, tmp
50 ldcopr \tmp, CTR
51 ubfx \tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH
52 mov \reg, #WORD_SIZE
53 lsl \reg, \reg, \tmp
54 .endm
55
56 .macro icache_line_size reg, tmp
57 ldcopr \tmp, CTR
58 and \tmp, \tmp, #CTR_IMINLINE_MASK
59 mov \reg, #WORD_SIZE
60 lsl \reg, \reg, \tmp
61 .endm
62
63 /*
Yatharth Kocharf528faf2016-06-28 16:58:26 +010064 * Declare the exception vector table, enforcing it is aligned on a
65 * 32 byte boundary.
66 */
67 .macro vector_base label
68 .section .vectors, "ax"
69 .align 5
70 \label:
71 .endm
72
73 /*
Soby Mathewd29f67b2016-05-05 12:31:57 +010074 * This macro calculates the base address of the current CPU's multi
75 * processor(MP) stack using the plat_my_core_pos() index, the name of
76 * the stack storage and the size of each stack.
77 * Out: r0 = physical address of stack base
78 * Clobber: r14, r1, r2
79 */
80 .macro get_my_mp_stack _name, _size
Usama Arifb69ac082018-12-12 17:08:33 +000081 bl plat_my_core_pos
Soby Mathewd29f67b2016-05-05 12:31:57 +010082 ldr r2, =(\_name + \_size)
83 mov r1, #\_size
84 mla r0, r0, r1, r2
85 .endm
86
87 /*
88 * This macro calculates the base address of a uniprocessor(UP) stack
89 * using the name of the stack storage and the size of the stack
90 * Out: r0 = physical address of stack base
91 */
92 .macro get_up_stack _name, _size
93 ldr r0, =(\_name + \_size)
94 .endm
95
Etienne Carriere4cce8352017-11-08 14:38:33 +010096#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
97 /*
Madhukar Pappireddyfcbcd6f2020-02-26 12:37:05 -060098 * Macro for mitigating against speculative execution.
Etienne Carriere4cce8352017-11-08 14:38:33 +010099 * ARMv7 cores without Virtualization extension do not support the
100 * eret instruction.
101 */
Madhukar Pappireddyfcbcd6f2020-02-26 12:37:05 -0600102 .macro exception_return
Etienne Carriere4cce8352017-11-08 14:38:33 +0100103 movs pc, lr
Madhukar Pappireddyfcbcd6f2020-02-26 12:37:05 -0600104 dsb nsh
105 isb
106 .endm
107
108#else
109 /*
110 * Macro for mitigating against speculative execution beyond ERET.
111 */
112 .macro exception_return
113 eret
114 dsb nsh
115 isb
Etienne Carriere4cce8352017-11-08 14:38:33 +0100116 .endm
117#endif
118
Etienne Carriere70b1c2f2017-11-05 22:55:47 +0100119#if (ARM_ARCH_MAJOR == 7)
120 /* ARMv7 does not support stl instruction */
121 .macro stl _reg, _write_lock
122 dmb
123 str \_reg, \_write_lock
124 dsb
125 .endm
126#endif
127
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000128 /*
Etienne Carriere97ad6ce2017-09-01 10:22:20 +0200129 * Helper macro to generate the best mov/movw/movt combinations
130 * according to the value to be moved.
131 */
132 .macro mov_imm _reg, _val
133 .if ((\_val) & 0xffff0000) == 0
134 mov \_reg, #(\_val)
135 .else
136 movw \_reg, #((\_val) & 0xffff)
137 movt \_reg, #((\_val) >> 16)
138 .endif
139 .endm
140
141 /*
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000142 * Macro to mark instances where we're jumping to a function and don't
143 * expect a return. To provide the function being jumped to with
144 * additional information, we use 'bl' instruction to jump rather than
145 * 'b'.
146 *
147 * Debuggers infer the location of a call from where LR points to, which
148 * is usually the instruction after 'bl'. If this macro expansion
149 * happens to be the last location in a function, that'll cause the LR
150 * to point a location beyond the function, thereby misleading debugger
151 * back trace. We therefore insert a 'nop' after the function call for
152 * debug builds, unless 'skip_nop' parameter is non-zero.
153 */
154 .macro no_ret _func:req, skip_nop=0
155 bl \_func
156#if DEBUG
157 .ifeq \skip_nop
158 nop
159 .endif
160#endif
161 .endm
162
Jeenu Viswambharan54ec86a2017-01-19 14:23:36 +0000163 /*
164 * Reserve space for a spin lock in assembly file.
165 */
166 .macro define_asm_spinlock _name:req
167 .align SPINLOCK_ASM_ALIGN
168 \_name:
169 .space SPINLOCK_ASM_SIZE
170 .endm
171
Yatharth Kochara9f776c2016-11-10 16:17:51 +0000172 /*
173 * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l`
174 * and the top 32 bits of `_val` into `_reg_h`. If either the bottom
175 * or top word of `_val` is zero, the corresponding OR operation
176 * is skipped.
177 */
178 .macro orr64_imm _reg_l, _reg_h, _val
179 .if (\_val >> 32)
180 orr \_reg_h, \_reg_h, #(\_val >> 32)
181 .endif
182 .if (\_val & 0xffffffff)
183 orr \_reg_l, \_reg_l, #(\_val & 0xffffffff)
184 .endif
185 .endm
186
187 /*
188 * Helper macro to bitwise-clear bits in `_reg_l` and
189 * `_reg_h` given a 64 bit immediate `_val`. The set bits
190 * in the bottom word of `_val` dictate which bits from
191 * `_reg_l` should be cleared. Similarly, the set bits in
192 * the top word of `_val` dictate which bits from `_reg_h`
193 * should be cleared. If either the bottom or top word of
194 * `_val` is zero, the corresponding BIC operation is skipped.
195 */
196 .macro bic64_imm _reg_l, _reg_h, _val
197 .if (\_val >> 32)
198 bic \_reg_h, \_reg_h, #(\_val >> 32)
199 .endif
200 .if (\_val & 0xffffffff)
201 bic \_reg_l, \_reg_l, #(\_val & 0xffffffff)
202 .endif
203 .endm
204
Usama Arifb69ac082018-12-12 17:08:33 +0000205 /*
206 * Helper macro for carrying out division in software when
207 * hardware division is not suported. \top holds the dividend
208 * in the function call and the remainder after
209 * the function is executed. \bot holds the divisor. \div holds
210 * the quotient and \temp is a temporary registed used in calcualtion.
211 * The division algorithm has been obtained from:
212 * http://www.keil.com/support/man/docs/armasm/armasm_dom1359731155623.htm
213 */
214 .macro softudiv div:req,top:req,bot:req,temp:req
215
216 mov \temp, \bot
217 cmp \temp, \top, lsr #1
218div1:
219 movls \temp, \temp, lsl #1
220 cmp \temp, \top, lsr #1
221 bls div1
222 mov \div, #0
223
224div2:
225 cmp \top, \temp
226 subcs \top, \top,\temp
227 ADC \div, \div, \div
228 mov \temp, \temp, lsr #1
229 cmp \temp, \bot
230 bhs div2
231 .endm
Antonio Nino Diaz5eb88372018-11-08 10:20:19 +0000232#endif /* ASM_MACROS_S */