blob: f75da0ce62e7724828a275b50500c8c55ae16ee7 [file] [log] [blame]
Soby Mathewd29f67b2016-05-05 12:31:57 +01001/*
Jeenu Viswambharan58e81482018-04-27 15:06:57 +01002 * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
Soby Mathewd29f67b2016-05-05 12:31:57 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewd29f67b2016-05-05 12:31:57 +01005 */
Antonio Nino Diaz5eb88372018-11-08 10:20:19 +00006#ifndef ASM_MACROS_S
7#define ASM_MACROS_S
Soby Mathewd29f67b2016-05-05 12:31:57 +01008
9#include <arch.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000010#include <common/asm_macros_common.S>
11#include <lib/spinlock.h>
Soby Mathewd29f67b2016-05-05 12:31:57 +010012
Jeenu Viswambharan58e81482018-04-27 15:06:57 +010013/*
14 * TLBI instruction with type specifier that implements the workaround for
15 * errata 813419 of Cortex-A57.
16 */
17#if ERRATA_A57_813419
18#define TLB_INVALIDATE(_reg, _coproc) \
19 stcopr _reg, _coproc; \
20 dsb ish; \
21 stcopr _reg, _coproc
22#else
23#define TLB_INVALIDATE(_reg, _coproc) \
24 stcopr _reg, _coproc
25#endif
26
Soby Mathewd29f67b2016-05-05 12:31:57 +010027#define WORD_SIZE 4
28
29 /*
30 * Co processor register accessors
31 */
32 .macro ldcopr reg, coproc, opc1, CRn, CRm, opc2
33 mrc \coproc, \opc1, \reg, \CRn, \CRm, \opc2
34 .endm
35
36 .macro ldcopr16 reg1, reg2, coproc, opc1, CRm
37 mrrc \coproc, \opc1, \reg1, \reg2, \CRm
38 .endm
39
40 .macro stcopr reg, coproc, opc1, CRn, CRm, opc2
41 mcr \coproc, \opc1, \reg, \CRn, \CRm, \opc2
42 .endm
43
44 .macro stcopr16 reg1, reg2, coproc, opc1, CRm
45 mcrr \coproc, \opc1, \reg1, \reg2, \CRm
46 .endm
47
48 /* Cache line size helpers */
49 .macro dcache_line_size reg, tmp
50 ldcopr \tmp, CTR
51 ubfx \tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH
52 mov \reg, #WORD_SIZE
53 lsl \reg, \reg, \tmp
54 .endm
55
56 .macro icache_line_size reg, tmp
57 ldcopr \tmp, CTR
58 and \tmp, \tmp, #CTR_IMINLINE_MASK
59 mov \reg, #WORD_SIZE
60 lsl \reg, \reg, \tmp
61 .endm
62
63 /*
Yatharth Kocharf528faf2016-06-28 16:58:26 +010064 * Declare the exception vector table, enforcing it is aligned on a
65 * 32 byte boundary.
66 */
67 .macro vector_base label
68 .section .vectors, "ax"
69 .align 5
70 \label:
71 .endm
72
73 /*
Soby Mathewd29f67b2016-05-05 12:31:57 +010074 * This macro calculates the base address of the current CPU's multi
75 * processor(MP) stack using the plat_my_core_pos() index, the name of
76 * the stack storage and the size of each stack.
77 * Out: r0 = physical address of stack base
78 * Clobber: r14, r1, r2
79 */
80 .macro get_my_mp_stack _name, _size
Usama Arifb69ac082018-12-12 17:08:33 +000081 bl plat_my_core_pos
Soby Mathewd29f67b2016-05-05 12:31:57 +010082 ldr r2, =(\_name + \_size)
83 mov r1, #\_size
84 mla r0, r0, r1, r2
85 .endm
86
87 /*
88 * This macro calculates the base address of a uniprocessor(UP) stack
89 * using the name of the stack storage and the size of the stack
90 * Out: r0 = physical address of stack base
91 */
92 .macro get_up_stack _name, _size
93 ldr r0, =(\_name + \_size)
94 .endm
95
Etienne Carriere4cce8352017-11-08 14:38:33 +010096#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
97 /*
Madhukar Pappireddyfcbcd6f2020-02-26 12:37:05 -060098 * Macro for mitigating against speculative execution.
Etienne Carriere4cce8352017-11-08 14:38:33 +010099 * ARMv7 cores without Virtualization extension do not support the
100 * eret instruction.
101 */
Madhukar Pappireddyfcbcd6f2020-02-26 12:37:05 -0600102 .macro exception_return
Etienne Carriere4cce8352017-11-08 14:38:33 +0100103 movs pc, lr
Madhukar Pappireddyfcbcd6f2020-02-26 12:37:05 -0600104 dsb nsh
105 isb
106 .endm
107
108#else
109 /*
110 * Macro for mitigating against speculative execution beyond ERET.
Madhukar Pappireddybfe7bb62020-03-10 18:04:59 -0500111 * If possible use Speculation Barrier instruction defined in ARMv8.5
Madhukar Pappireddyfcbcd6f2020-02-26 12:37:05 -0600112 */
113 .macro exception_return
114 eret
Madhukar Pappireddybfe7bb62020-03-10 18:04:59 -0500115#if ARM_ARCH_AT_LEAST(8, 5)
116 sb
117#else
Madhukar Pappireddyfcbcd6f2020-02-26 12:37:05 -0600118 dsb nsh
119 isb
Madhukar Pappireddybfe7bb62020-03-10 18:04:59 -0500120#endif
Etienne Carriere4cce8352017-11-08 14:38:33 +0100121 .endm
122#endif
123
Etienne Carriere70b1c2f2017-11-05 22:55:47 +0100124#if (ARM_ARCH_MAJOR == 7)
125 /* ARMv7 does not support stl instruction */
126 .macro stl _reg, _write_lock
127 dmb
128 str \_reg, \_write_lock
129 dsb
130 .endm
131#endif
132
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000133 /*
Etienne Carriere97ad6ce2017-09-01 10:22:20 +0200134 * Helper macro to generate the best mov/movw/movt combinations
135 * according to the value to be moved.
136 */
137 .macro mov_imm _reg, _val
138 .if ((\_val) & 0xffff0000) == 0
139 mov \_reg, #(\_val)
140 .else
141 movw \_reg, #((\_val) & 0xffff)
142 movt \_reg, #((\_val) >> 16)
143 .endif
144 .endm
145
146 /*
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000147 * Macro to mark instances where we're jumping to a function and don't
148 * expect a return. To provide the function being jumped to with
149 * additional information, we use 'bl' instruction to jump rather than
150 * 'b'.
151 *
152 * Debuggers infer the location of a call from where LR points to, which
153 * is usually the instruction after 'bl'. If this macro expansion
154 * happens to be the last location in a function, that'll cause the LR
155 * to point a location beyond the function, thereby misleading debugger
156 * back trace. We therefore insert a 'nop' after the function call for
157 * debug builds, unless 'skip_nop' parameter is non-zero.
158 */
159 .macro no_ret _func:req, skip_nop=0
160 bl \_func
161#if DEBUG
162 .ifeq \skip_nop
163 nop
164 .endif
165#endif
166 .endm
167
Jeenu Viswambharan54ec86a2017-01-19 14:23:36 +0000168 /*
169 * Reserve space for a spin lock in assembly file.
170 */
171 .macro define_asm_spinlock _name:req
172 .align SPINLOCK_ASM_ALIGN
173 \_name:
174 .space SPINLOCK_ASM_SIZE
175 .endm
176
Yatharth Kochara9f776c2016-11-10 16:17:51 +0000177 /*
178 * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l`
179 * and the top 32 bits of `_val` into `_reg_h`. If either the bottom
180 * or top word of `_val` is zero, the corresponding OR operation
181 * is skipped.
182 */
183 .macro orr64_imm _reg_l, _reg_h, _val
184 .if (\_val >> 32)
185 orr \_reg_h, \_reg_h, #(\_val >> 32)
186 .endif
187 .if (\_val & 0xffffffff)
188 orr \_reg_l, \_reg_l, #(\_val & 0xffffffff)
189 .endif
190 .endm
191
192 /*
193 * Helper macro to bitwise-clear bits in `_reg_l` and
194 * `_reg_h` given a 64 bit immediate `_val`. The set bits
195 * in the bottom word of `_val` dictate which bits from
196 * `_reg_l` should be cleared. Similarly, the set bits in
197 * the top word of `_val` dictate which bits from `_reg_h`
198 * should be cleared. If either the bottom or top word of
199 * `_val` is zero, the corresponding BIC operation is skipped.
200 */
201 .macro bic64_imm _reg_l, _reg_h, _val
202 .if (\_val >> 32)
203 bic \_reg_h, \_reg_h, #(\_val >> 32)
204 .endif
205 .if (\_val & 0xffffffff)
206 bic \_reg_l, \_reg_l, #(\_val & 0xffffffff)
207 .endif
208 .endm
209
Usama Arifb69ac082018-12-12 17:08:33 +0000210 /*
211 * Helper macro for carrying out division in software when
212 * hardware division is not suported. \top holds the dividend
213 * in the function call and the remainder after
214 * the function is executed. \bot holds the divisor. \div holds
215 * the quotient and \temp is a temporary registed used in calcualtion.
216 * The division algorithm has been obtained from:
217 * http://www.keil.com/support/man/docs/armasm/armasm_dom1359731155623.htm
218 */
219 .macro softudiv div:req,top:req,bot:req,temp:req
220
221 mov \temp, \bot
222 cmp \temp, \top, lsr #1
223div1:
224 movls \temp, \temp, lsl #1
225 cmp \temp, \top, lsr #1
226 bls div1
227 mov \div, #0
228
229div2:
230 cmp \top, \temp
231 subcs \top, \top,\temp
232 ADC \div, \div, \div
233 mov \temp, \temp, lsr #1
234 cmp \temp, \bot
235 bhs div2
236 .endm
Antonio Nino Diaz5eb88372018-11-08 10:20:19 +0000237#endif /* ASM_MACROS_S */