blob: bccd2a7d22a84d0750e2fa5920332915278833e9 [file] [log] [blame]
Soby Mathewd29f67b2016-05-05 12:31:57 +01001/*
Boyan Karatoteve7d7c272023-01-25 16:55:18 +00002 * Copyright (c) 2016-2023, ARM Limited and Contributors. All rights reserved.
Soby Mathewd29f67b2016-05-05 12:31:57 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewd29f67b2016-05-05 12:31:57 +01005 */
Antonio Nino Diaz5eb88372018-11-08 10:20:19 +00006#ifndef ASM_MACROS_S
7#define ASM_MACROS_S
Soby Mathewd29f67b2016-05-05 12:31:57 +01008
9#include <arch.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000010#include <common/asm_macros_common.S>
Boyan Karatoteve7d7c272023-01-25 16:55:18 +000011#include <lib/cpus/cpu_ops.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000012#include <lib/spinlock.h>
Soby Mathewd29f67b2016-05-05 12:31:57 +010013
Jeenu Viswambharan58e81482018-04-27 15:06:57 +010014/*
15 * TLBI instruction with type specifier that implements the workaround for
16 * errata 813419 of Cortex-A57.
17 */
18#if ERRATA_A57_813419
19#define TLB_INVALIDATE(_reg, _coproc) \
20 stcopr _reg, _coproc; \
21 dsb ish; \
22 stcopr _reg, _coproc
23#else
24#define TLB_INVALIDATE(_reg, _coproc) \
25 stcopr _reg, _coproc
26#endif
27
Soby Mathewd29f67b2016-05-05 12:31:57 +010028 /*
29 * Co processor register accessors
30 */
31 .macro ldcopr reg, coproc, opc1, CRn, CRm, opc2
32 mrc \coproc, \opc1, \reg, \CRn, \CRm, \opc2
33 .endm
34
35 .macro ldcopr16 reg1, reg2, coproc, opc1, CRm
36 mrrc \coproc, \opc1, \reg1, \reg2, \CRm
37 .endm
38
39 .macro stcopr reg, coproc, opc1, CRn, CRm, opc2
40 mcr \coproc, \opc1, \reg, \CRn, \CRm, \opc2
41 .endm
42
43 .macro stcopr16 reg1, reg2, coproc, opc1, CRm
44 mcrr \coproc, \opc1, \reg1, \reg2, \CRm
45 .endm
46
47 /* Cache line size helpers */
48 .macro dcache_line_size reg, tmp
49 ldcopr \tmp, CTR
50 ubfx \tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH
Boyan Karatoteve7d7c272023-01-25 16:55:18 +000051 mov \reg, #CPU_WORD_SIZE
Soby Mathewd29f67b2016-05-05 12:31:57 +010052 lsl \reg, \reg, \tmp
53 .endm
54
55 .macro icache_line_size reg, tmp
56 ldcopr \tmp, CTR
57 and \tmp, \tmp, #CTR_IMINLINE_MASK
Boyan Karatoteve7d7c272023-01-25 16:55:18 +000058 mov \reg, #CPU_WORD_SIZE
Soby Mathewd29f67b2016-05-05 12:31:57 +010059 lsl \reg, \reg, \tmp
60 .endm
61
62 /*
Yatharth Kocharf528faf2016-06-28 16:58:26 +010063 * Declare the exception vector table, enforcing it is aligned on a
64 * 32 byte boundary.
65 */
66 .macro vector_base label
67 .section .vectors, "ax"
68 .align 5
69 \label:
70 .endm
71
72 /*
Soby Mathewd29f67b2016-05-05 12:31:57 +010073 * This macro calculates the base address of the current CPU's multi
74 * processor(MP) stack using the plat_my_core_pos() index, the name of
75 * the stack storage and the size of each stack.
76 * Out: r0 = physical address of stack base
77 * Clobber: r14, r1, r2
78 */
79 .macro get_my_mp_stack _name, _size
Usama Arifb69ac082018-12-12 17:08:33 +000080 bl plat_my_core_pos
Soby Mathewd29f67b2016-05-05 12:31:57 +010081 ldr r2, =(\_name + \_size)
82 mov r1, #\_size
83 mla r0, r0, r1, r2
84 .endm
85
86 /*
87 * This macro calculates the base address of a uniprocessor(UP) stack
88 * using the name of the stack storage and the size of the stack
89 * Out: r0 = physical address of stack base
90 */
91 .macro get_up_stack _name, _size
92 ldr r0, =(\_name + \_size)
93 .endm
94
Etienne Carriere4cce8352017-11-08 14:38:33 +010095#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
96 /*
Madhukar Pappireddyfcbcd6f2020-02-26 12:37:05 -060097 * Macro for mitigating against speculative execution.
Etienne Carriere4cce8352017-11-08 14:38:33 +010098 * ARMv7 cores without Virtualization extension do not support the
99 * eret instruction.
100 */
Madhukar Pappireddyfcbcd6f2020-02-26 12:37:05 -0600101 .macro exception_return
Etienne Carriere4cce8352017-11-08 14:38:33 +0100102 movs pc, lr
Madhukar Pappireddyfcbcd6f2020-02-26 12:37:05 -0600103 dsb nsh
104 isb
105 .endm
106
107#else
108 /*
Chris Kay08fec332021-03-09 13:34:35 +0000109 * Macro for mitigating against speculative execution beyond ERET. Uses the
110 * speculation barrier instruction introduced by FEAT_SB, if it's enabled.
Madhukar Pappireddyfcbcd6f2020-02-26 12:37:05 -0600111 */
112 .macro exception_return
113 eret
Chris Kay08fec332021-03-09 13:34:35 +0000114#if ENABLE_FEAT_SB
Madhukar Pappireddybfe7bb62020-03-10 18:04:59 -0500115 sb
116#else
Madhukar Pappireddyfcbcd6f2020-02-26 12:37:05 -0600117 dsb nsh
118 isb
Madhukar Pappireddybfe7bb62020-03-10 18:04:59 -0500119#endif
Etienne Carriere4cce8352017-11-08 14:38:33 +0100120 .endm
121#endif
122
Manish Pandey6b5721f2023-06-26 17:46:14 +0100123 /* Macro for error synchronization */
124 .macro synchronize_errors
125 /* Complete any stores that may return an abort */
126 dsb sy
127 /* Synchronise the CPU context with the completion of the dsb */
128 isb
129 .endm
130
Etienne Carriere70b1c2f2017-11-05 22:55:47 +0100131#if (ARM_ARCH_MAJOR == 7)
132 /* ARMv7 does not support stl instruction */
133 .macro stl _reg, _write_lock
134 dmb
135 str \_reg, \_write_lock
136 dsb
137 .endm
138#endif
139
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000140 /*
Etienne Carriere97ad6ce2017-09-01 10:22:20 +0200141 * Helper macro to generate the best mov/movw/movt combinations
142 * according to the value to be moved.
143 */
144 .macro mov_imm _reg, _val
145 .if ((\_val) & 0xffff0000) == 0
146 mov \_reg, #(\_val)
147 .else
148 movw \_reg, #((\_val) & 0xffff)
149 movt \_reg, #((\_val) >> 16)
150 .endif
151 .endm
152
153 /*
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000154 * Macro to mark instances where we're jumping to a function and don't
155 * expect a return. To provide the function being jumped to with
156 * additional information, we use 'bl' instruction to jump rather than
157 * 'b'.
158 *
159 * Debuggers infer the location of a call from where LR points to, which
160 * is usually the instruction after 'bl'. If this macro expansion
161 * happens to be the last location in a function, that'll cause the LR
162 * to point a location beyond the function, thereby misleading debugger
163 * back trace. We therefore insert a 'nop' after the function call for
164 * debug builds, unless 'skip_nop' parameter is non-zero.
165 */
166 .macro no_ret _func:req, skip_nop=0
167 bl \_func
168#if DEBUG
169 .ifeq \skip_nop
170 nop
171 .endif
172#endif
173 .endm
174
Jeenu Viswambharan54ec86a2017-01-19 14:23:36 +0000175 /*
176 * Reserve space for a spin lock in assembly file.
177 */
178 .macro define_asm_spinlock _name:req
179 .align SPINLOCK_ASM_ALIGN
180 \_name:
181 .space SPINLOCK_ASM_SIZE
182 .endm
183
Yatharth Kochara9f776c2016-11-10 16:17:51 +0000184 /*
185 * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l`
186 * and the top 32 bits of `_val` into `_reg_h`. If either the bottom
187 * or top word of `_val` is zero, the corresponding OR operation
188 * is skipped.
189 */
190 .macro orr64_imm _reg_l, _reg_h, _val
191 .if (\_val >> 32)
192 orr \_reg_h, \_reg_h, #(\_val >> 32)
193 .endif
194 .if (\_val & 0xffffffff)
195 orr \_reg_l, \_reg_l, #(\_val & 0xffffffff)
196 .endif
197 .endm
198
199 /*
200 * Helper macro to bitwise-clear bits in `_reg_l` and
201 * `_reg_h` given a 64 bit immediate `_val`. The set bits
202 * in the bottom word of `_val` dictate which bits from
203 * `_reg_l` should be cleared. Similarly, the set bits in
204 * the top word of `_val` dictate which bits from `_reg_h`
205 * should be cleared. If either the bottom or top word of
206 * `_val` is zero, the corresponding BIC operation is skipped.
207 */
208 .macro bic64_imm _reg_l, _reg_h, _val
209 .if (\_val >> 32)
210 bic \_reg_h, \_reg_h, #(\_val >> 32)
211 .endif
212 .if (\_val & 0xffffffff)
213 bic \_reg_l, \_reg_l, #(\_val & 0xffffffff)
214 .endif
215 .endm
216
Usama Arifb69ac082018-12-12 17:08:33 +0000217 /*
218 * Helper macro for carrying out division in software when
219 * hardware division is not suported. \top holds the dividend
220 * in the function call and the remainder after
221 * the function is executed. \bot holds the divisor. \div holds
222 * the quotient and \temp is a temporary registed used in calcualtion.
223 * The division algorithm has been obtained from:
224 * http://www.keil.com/support/man/docs/armasm/armasm_dom1359731155623.htm
225 */
226 .macro softudiv div:req,top:req,bot:req,temp:req
227
228 mov \temp, \bot
229 cmp \temp, \top, lsr #1
230div1:
231 movls \temp, \temp, lsl #1
232 cmp \temp, \top, lsr #1
233 bls div1
234 mov \div, #0
235
236div2:
237 cmp \top, \temp
238 subcs \top, \top,\temp
239 ADC \div, \div, \div
240 mov \temp, \temp, lsr #1
241 cmp \temp, \bot
242 bhs div2
243 .endm
developerbc16d582024-02-22 15:16:32 +0800244
245 /*
246 * Helper macro to instruction adr <reg>, <symbol> where <symbol> is
247 * within the range +/- 4 GB.
248 */
249 .macro adr_l, dst, sym
250 adrp \dst, \sym
251 add \dst, \dst, :lo12:\sym
252 .endm
Antonio Nino Diaz5eb88372018-11-08 10:20:19 +0000253#endif /* ASM_MACROS_S */