blob: 83e94caa4f6fd21c6639f1c0bbf2e3accc78b25d [file] [log] [blame]
Soby Mathewd29f67b2016-05-05 12:31:57 +01001/*
Boyan Karatoteve7d7c272023-01-25 16:55:18 +00002 * Copyright (c) 2016-2023, ARM Limited and Contributors. All rights reserved.
Soby Mathewd29f67b2016-05-05 12:31:57 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewd29f67b2016-05-05 12:31:57 +01005 */
Antonio Nino Diaz5eb88372018-11-08 10:20:19 +00006#ifndef ASM_MACROS_S
7#define ASM_MACROS_S
Soby Mathewd29f67b2016-05-05 12:31:57 +01008
9#include <arch.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000010#include <common/asm_macros_common.S>
Boyan Karatoteve7d7c272023-01-25 16:55:18 +000011#include <lib/cpus/cpu_ops.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000012#include <lib/spinlock.h>
Soby Mathewd29f67b2016-05-05 12:31:57 +010013
Jeenu Viswambharan58e81482018-04-27 15:06:57 +010014/*
15 * TLBI instruction with type specifier that implements the workaround for
16 * errata 813419 of Cortex-A57.
17 */
18#if ERRATA_A57_813419
19#define TLB_INVALIDATE(_reg, _coproc) \
20 stcopr _reg, _coproc; \
21 dsb ish; \
22 stcopr _reg, _coproc
23#else
24#define TLB_INVALIDATE(_reg, _coproc) \
25 stcopr _reg, _coproc
26#endif
27
Soby Mathewd29f67b2016-05-05 12:31:57 +010028 /*
29 * Co processor register accessors
30 */
31 .macro ldcopr reg, coproc, opc1, CRn, CRm, opc2
32 mrc \coproc, \opc1, \reg, \CRn, \CRm, \opc2
33 .endm
34
35 .macro ldcopr16 reg1, reg2, coproc, opc1, CRm
36 mrrc \coproc, \opc1, \reg1, \reg2, \CRm
37 .endm
38
39 .macro stcopr reg, coproc, opc1, CRn, CRm, opc2
40 mcr \coproc, \opc1, \reg, \CRn, \CRm, \opc2
41 .endm
42
43 .macro stcopr16 reg1, reg2, coproc, opc1, CRm
44 mcrr \coproc, \opc1, \reg1, \reg2, \CRm
45 .endm
46
47 /* Cache line size helpers */
48 .macro dcache_line_size reg, tmp
49 ldcopr \tmp, CTR
50 ubfx \tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH
Boyan Karatoteve7d7c272023-01-25 16:55:18 +000051 mov \reg, #CPU_WORD_SIZE
Soby Mathewd29f67b2016-05-05 12:31:57 +010052 lsl \reg, \reg, \tmp
53 .endm
54
55 .macro icache_line_size reg, tmp
56 ldcopr \tmp, CTR
57 and \tmp, \tmp, #CTR_IMINLINE_MASK
Boyan Karatoteve7d7c272023-01-25 16:55:18 +000058 mov \reg, #CPU_WORD_SIZE
Soby Mathewd29f67b2016-05-05 12:31:57 +010059 lsl \reg, \reg, \tmp
60 .endm
61
62 /*
Yatharth Kocharf528faf2016-06-28 16:58:26 +010063 * Declare the exception vector table, enforcing it is aligned on a
64 * 32 byte boundary.
65 */
66 .macro vector_base label
67 .section .vectors, "ax"
68 .align 5
69 \label:
70 .endm
71
72 /*
Soby Mathewd29f67b2016-05-05 12:31:57 +010073 * This macro calculates the base address of the current CPU's multi
74 * processor(MP) stack using the plat_my_core_pos() index, the name of
75 * the stack storage and the size of each stack.
76 * Out: r0 = physical address of stack base
77 * Clobber: r14, r1, r2
78 */
79 .macro get_my_mp_stack _name, _size
Usama Arifb69ac082018-12-12 17:08:33 +000080 bl plat_my_core_pos
Soby Mathewd29f67b2016-05-05 12:31:57 +010081 ldr r2, =(\_name + \_size)
82 mov r1, #\_size
83 mla r0, r0, r1, r2
84 .endm
85
86 /*
87 * This macro calculates the base address of a uniprocessor(UP) stack
88 * using the name of the stack storage and the size of the stack
89 * Out: r0 = physical address of stack base
90 */
91 .macro get_up_stack _name, _size
92 ldr r0, =(\_name + \_size)
93 .endm
94
Etienne Carriere4cce8352017-11-08 14:38:33 +010095#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
96 /*
Madhukar Pappireddyfcbcd6f2020-02-26 12:37:05 -060097 * Macro for mitigating against speculative execution.
Etienne Carriere4cce8352017-11-08 14:38:33 +010098 * ARMv7 cores without Virtualization extension do not support the
99 * eret instruction.
100 */
Madhukar Pappireddyfcbcd6f2020-02-26 12:37:05 -0600101 .macro exception_return
Etienne Carriere4cce8352017-11-08 14:38:33 +0100102 movs pc, lr
Madhukar Pappireddyfcbcd6f2020-02-26 12:37:05 -0600103 dsb nsh
104 isb
105 .endm
106
107#else
108 /*
Chris Kay08fec332021-03-09 13:34:35 +0000109 * Macro for mitigating against speculative execution beyond ERET. Uses the
110 * speculation barrier instruction introduced by FEAT_SB, if it's enabled.
Madhukar Pappireddyfcbcd6f2020-02-26 12:37:05 -0600111 */
112 .macro exception_return
113 eret
Chris Kay08fec332021-03-09 13:34:35 +0000114#if ENABLE_FEAT_SB
Madhukar Pappireddybfe7bb62020-03-10 18:04:59 -0500115 sb
116#else
Madhukar Pappireddyfcbcd6f2020-02-26 12:37:05 -0600117 dsb nsh
118 isb
Madhukar Pappireddybfe7bb62020-03-10 18:04:59 -0500119#endif
Etienne Carriere4cce8352017-11-08 14:38:33 +0100120 .endm
121#endif
122
Etienne Carriere70b1c2f2017-11-05 22:55:47 +0100123#if (ARM_ARCH_MAJOR == 7)
124 /* ARMv7 does not support stl instruction */
125 .macro stl _reg, _write_lock
126 dmb
127 str \_reg, \_write_lock
128 dsb
129 .endm
130#endif
131
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000132 /*
Etienne Carriere97ad6ce2017-09-01 10:22:20 +0200133 * Helper macro to generate the best mov/movw/movt combinations
134 * according to the value to be moved.
135 */
136 .macro mov_imm _reg, _val
137 .if ((\_val) & 0xffff0000) == 0
138 mov \_reg, #(\_val)
139 .else
140 movw \_reg, #((\_val) & 0xffff)
141 movt \_reg, #((\_val) >> 16)
142 .endif
143 .endm
144
145 /*
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000146 * Macro to mark instances where we're jumping to a function and don't
147 * expect a return. To provide the function being jumped to with
148 * additional information, we use 'bl' instruction to jump rather than
149 * 'b'.
150 *
151 * Debuggers infer the location of a call from where LR points to, which
152 * is usually the instruction after 'bl'. If this macro expansion
153 * happens to be the last location in a function, that'll cause the LR
154 * to point a location beyond the function, thereby misleading debugger
155 * back trace. We therefore insert a 'nop' after the function call for
156 * debug builds, unless 'skip_nop' parameter is non-zero.
157 */
158 .macro no_ret _func:req, skip_nop=0
159 bl \_func
160#if DEBUG
161 .ifeq \skip_nop
162 nop
163 .endif
164#endif
165 .endm
166
Jeenu Viswambharan54ec86a2017-01-19 14:23:36 +0000167 /*
168 * Reserve space for a spin lock in assembly file.
169 */
170 .macro define_asm_spinlock _name:req
171 .align SPINLOCK_ASM_ALIGN
172 \_name:
173 .space SPINLOCK_ASM_SIZE
174 .endm
175
Yatharth Kochara9f776c2016-11-10 16:17:51 +0000176 /*
177 * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l`
178 * and the top 32 bits of `_val` into `_reg_h`. If either the bottom
179 * or top word of `_val` is zero, the corresponding OR operation
180 * is skipped.
181 */
182 .macro orr64_imm _reg_l, _reg_h, _val
183 .if (\_val >> 32)
184 orr \_reg_h, \_reg_h, #(\_val >> 32)
185 .endif
186 .if (\_val & 0xffffffff)
187 orr \_reg_l, \_reg_l, #(\_val & 0xffffffff)
188 .endif
189 .endm
190
191 /*
192 * Helper macro to bitwise-clear bits in `_reg_l` and
193 * `_reg_h` given a 64 bit immediate `_val`. The set bits
194 * in the bottom word of `_val` dictate which bits from
195 * `_reg_l` should be cleared. Similarly, the set bits in
196 * the top word of `_val` dictate which bits from `_reg_h`
197 * should be cleared. If either the bottom or top word of
198 * `_val` is zero, the corresponding BIC operation is skipped.
199 */
200 .macro bic64_imm _reg_l, _reg_h, _val
201 .if (\_val >> 32)
202 bic \_reg_h, \_reg_h, #(\_val >> 32)
203 .endif
204 .if (\_val & 0xffffffff)
205 bic \_reg_l, \_reg_l, #(\_val & 0xffffffff)
206 .endif
207 .endm
208
Usama Arifb69ac082018-12-12 17:08:33 +0000209 /*
210 * Helper macro for carrying out division in software when
211 * hardware division is not suported. \top holds the dividend
212 * in the function call and the remainder after
213 * the function is executed. \bot holds the divisor. \div holds
214 * the quotient and \temp is a temporary registed used in calcualtion.
215 * The division algorithm has been obtained from:
216 * http://www.keil.com/support/man/docs/armasm/armasm_dom1359731155623.htm
217 */
218 .macro softudiv div:req,top:req,bot:req,temp:req
219
220 mov \temp, \bot
221 cmp \temp, \top, lsr #1
222div1:
223 movls \temp, \temp, lsl #1
224 cmp \temp, \top, lsr #1
225 bls div1
226 mov \div, #0
227
228div2:
229 cmp \top, \temp
230 subcs \top, \top,\temp
231 ADC \div, \div, \div
232 mov \temp, \temp, lsr #1
233 cmp \temp, \bot
234 bhs div2
235 .endm
Antonio Nino Diaz5eb88372018-11-08 10:20:19 +0000236#endif /* ASM_MACROS_S */