blob: 74322228e72ecfb4b4923e5a74e611977942fa8e [file] [log] [blame]
Soby Mathewd29f67b2016-05-05 12:31:57 +01001/*
Jeenu Viswambharan54ec86a2017-01-19 14:23:36 +00002 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
Soby Mathewd29f67b2016-05-05 12:31:57 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewd29f67b2016-05-05 12:31:57 +01005 */
6#ifndef __ASM_MACROS_S__
7#define __ASM_MACROS_S__
8
9#include <arch.h>
10#include <asm_macros_common.S>
Jeenu Viswambharan54ec86a2017-01-19 14:23:36 +000011#include <spinlock.h>
Soby Mathewd29f67b2016-05-05 12:31:57 +010012
13#define WORD_SIZE 4
14
15 /*
16 * Co processor register accessors
17 */
18 .macro ldcopr reg, coproc, opc1, CRn, CRm, opc2
19 mrc \coproc, \opc1, \reg, \CRn, \CRm, \opc2
20 .endm
21
22 .macro ldcopr16 reg1, reg2, coproc, opc1, CRm
23 mrrc \coproc, \opc1, \reg1, \reg2, \CRm
24 .endm
25
26 .macro stcopr reg, coproc, opc1, CRn, CRm, opc2
27 mcr \coproc, \opc1, \reg, \CRn, \CRm, \opc2
28 .endm
29
30 .macro stcopr16 reg1, reg2, coproc, opc1, CRm
31 mcrr \coproc, \opc1, \reg1, \reg2, \CRm
32 .endm
33
34 /* Cache line size helpers */
35 .macro dcache_line_size reg, tmp
36 ldcopr \tmp, CTR
37 ubfx \tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH
38 mov \reg, #WORD_SIZE
39 lsl \reg, \reg, \tmp
40 .endm
41
42 .macro icache_line_size reg, tmp
43 ldcopr \tmp, CTR
44 and \tmp, \tmp, #CTR_IMINLINE_MASK
45 mov \reg, #WORD_SIZE
46 lsl \reg, \reg, \tmp
47 .endm
48
49 /*
Yatharth Kocharf528faf2016-06-28 16:58:26 +010050 * Declare the exception vector table, enforcing it is aligned on a
51 * 32 byte boundary.
52 */
53 .macro vector_base label
54 .section .vectors, "ax"
55 .align 5
56 \label:
57 .endm
58
59 /*
Soby Mathewd29f67b2016-05-05 12:31:57 +010060 * This macro calculates the base address of the current CPU's multi
61 * processor(MP) stack using the plat_my_core_pos() index, the name of
62 * the stack storage and the size of each stack.
63 * Out: r0 = physical address of stack base
64 * Clobber: r14, r1, r2
65 */
66 .macro get_my_mp_stack _name, _size
67 bl plat_my_core_pos
68 ldr r2, =(\_name + \_size)
69 mov r1, #\_size
70 mla r0, r0, r1, r2
71 .endm
72
73 /*
74 * This macro calculates the base address of a uniprocessor(UP) stack
75 * using the name of the stack storage and the size of the stack
76 * Out: r0 = physical address of stack base
77 */
78 .macro get_up_stack _name, _size
79 ldr r0, =(\_name + \_size)
80 .endm
81
Etienne Carriere4cce8352017-11-08 14:38:33 +010082#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
83 /*
84 * ARMv7 cores without Virtualization extension do not support the
85 * eret instruction.
86 */
87 .macro eret
88 movs pc, lr
89 .endm
90#endif
91
Etienne Carriere70b1c2f2017-11-05 22:55:47 +010092#if (ARM_ARCH_MAJOR == 7)
93 /* ARMv7 does not support stl instruction */
94 .macro stl _reg, _write_lock
95 dmb
96 str \_reg, \_write_lock
97 dsb
98 .endm
99#endif
100
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000101 /*
Etienne Carriere97ad6ce2017-09-01 10:22:20 +0200102 * Helper macro to generate the best mov/movw/movt combinations
103 * according to the value to be moved.
104 */
105 .macro mov_imm _reg, _val
106 .if ((\_val) & 0xffff0000) == 0
107 mov \_reg, #(\_val)
108 .else
109 movw \_reg, #((\_val) & 0xffff)
110 movt \_reg, #((\_val) >> 16)
111 .endif
112 .endm
113
114 /*
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000115 * Macro to mark instances where we're jumping to a function and don't
116 * expect a return. To provide the function being jumped to with
117 * additional information, we use 'bl' instruction to jump rather than
118 * 'b'.
119 *
120 * Debuggers infer the location of a call from where LR points to, which
121 * is usually the instruction after 'bl'. If this macro expansion
122 * happens to be the last location in a function, that'll cause the LR
123 * to point a location beyond the function, thereby misleading debugger
124 * back trace. We therefore insert a 'nop' after the function call for
125 * debug builds, unless 'skip_nop' parameter is non-zero.
126 */
127 .macro no_ret _func:req, skip_nop=0
128 bl \_func
129#if DEBUG
130 .ifeq \skip_nop
131 nop
132 .endif
133#endif
134 .endm
135
Jeenu Viswambharan54ec86a2017-01-19 14:23:36 +0000136 /*
137 * Reserve space for a spin lock in assembly file.
138 */
139 .macro define_asm_spinlock _name:req
140 .align SPINLOCK_ASM_ALIGN
141 \_name:
142 .space SPINLOCK_ASM_SIZE
143 .endm
144
Yatharth Kochara9f776c2016-11-10 16:17:51 +0000145 /*
146 * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l`
147 * and the top 32 bits of `_val` into `_reg_h`. If either the bottom
148 * or top word of `_val` is zero, the corresponding OR operation
149 * is skipped.
150 */
151 .macro orr64_imm _reg_l, _reg_h, _val
152 .if (\_val >> 32)
153 orr \_reg_h, \_reg_h, #(\_val >> 32)
154 .endif
155 .if (\_val & 0xffffffff)
156 orr \_reg_l, \_reg_l, #(\_val & 0xffffffff)
157 .endif
158 .endm
159
160 /*
161 * Helper macro to bitwise-clear bits in `_reg_l` and
162 * `_reg_h` given a 64 bit immediate `_val`. The set bits
163 * in the bottom word of `_val` dictate which bits from
164 * `_reg_l` should be cleared. Similarly, the set bits in
165 * the top word of `_val` dictate which bits from `_reg_h`
166 * should be cleared. If either the bottom or top word of
167 * `_val` is zero, the corresponding BIC operation is skipped.
168 */
169 .macro bic64_imm _reg_l, _reg_h, _val
170 .if (\_val >> 32)
171 bic \_reg_h, \_reg_h, #(\_val >> 32)
172 .endif
173 .if (\_val & 0xffffffff)
174 bic \_reg_l, \_reg_l, #(\_val & 0xffffffff)
175 .endif
176 .endm
177
Soby Mathewd29f67b2016-05-05 12:31:57 +0100178#endif /* __ASM_MACROS_S__ */