blob: 0d1a37d1ecb0eb09abcfd4ce0443dcb6ebd93e74 [file] [log] [blame]
Soby Mathewd29f67b2016-05-05 12:31:57 +01001/*
Jeenu Viswambharan54ec86a2017-01-19 14:23:36 +00002 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
Soby Mathewd29f67b2016-05-05 12:31:57 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewd29f67b2016-05-05 12:31:57 +01005 */
6#ifndef __ASM_MACROS_S__
7#define __ASM_MACROS_S__
8
9#include <arch.h>
10#include <asm_macros_common.S>
Jeenu Viswambharan54ec86a2017-01-19 14:23:36 +000011#include <spinlock.h>
Soby Mathewd29f67b2016-05-05 12:31:57 +010012
13#define WORD_SIZE 4
14
15 /*
16 * Co processor register accessors
17 */
18 .macro ldcopr reg, coproc, opc1, CRn, CRm, opc2
19 mrc \coproc, \opc1, \reg, \CRn, \CRm, \opc2
20 .endm
21
22 .macro ldcopr16 reg1, reg2, coproc, opc1, CRm
23 mrrc \coproc, \opc1, \reg1, \reg2, \CRm
24 .endm
25
26 .macro stcopr reg, coproc, opc1, CRn, CRm, opc2
27 mcr \coproc, \opc1, \reg, \CRn, \CRm, \opc2
28 .endm
29
30 .macro stcopr16 reg1, reg2, coproc, opc1, CRm
31 mcrr \coproc, \opc1, \reg1, \reg2, \CRm
32 .endm
33
34 /* Cache line size helpers */
35 .macro dcache_line_size reg, tmp
36 ldcopr \tmp, CTR
37 ubfx \tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH
38 mov \reg, #WORD_SIZE
39 lsl \reg, \reg, \tmp
40 .endm
41
42 .macro icache_line_size reg, tmp
43 ldcopr \tmp, CTR
44 and \tmp, \tmp, #CTR_IMINLINE_MASK
45 mov \reg, #WORD_SIZE
46 lsl \reg, \reg, \tmp
47 .endm
48
49 /*
Yatharth Kocharf528faf2016-06-28 16:58:26 +010050 * Declare the exception vector table, enforcing it is aligned on a
51 * 32 byte boundary.
52 */
53 .macro vector_base label
54 .section .vectors, "ax"
55 .align 5
56 \label:
57 .endm
58
59 /*
Soby Mathewd29f67b2016-05-05 12:31:57 +010060 * This macro calculates the base address of the current CPU's multi
61 * processor(MP) stack using the plat_my_core_pos() index, the name of
62 * the stack storage and the size of each stack.
63 * Out: r0 = physical address of stack base
64 * Clobber: r14, r1, r2
65 */
66 .macro get_my_mp_stack _name, _size
67 bl plat_my_core_pos
68 ldr r2, =(\_name + \_size)
69 mov r1, #\_size
70 mla r0, r0, r1, r2
71 .endm
72
73 /*
74 * This macro calculates the base address of a uniprocessor(UP) stack
75 * using the name of the stack storage and the size of the stack
76 * Out: r0 = physical address of stack base
77 */
78 .macro get_up_stack _name, _size
79 ldr r0, =(\_name + \_size)
80 .endm
81
Etienne Carriere70b1c2f2017-11-05 22:55:47 +010082#if (ARM_ARCH_MAJOR == 7)
83 /* ARMv7 does not support stl instruction */
84 .macro stl _reg, _write_lock
85 dmb
86 str \_reg, \_write_lock
87 dsb
88 .endm
89#endif
90
Jeenu Viswambharan68aef102016-11-30 15:21:11 +000091 /*
Etienne Carriere97ad6ce2017-09-01 10:22:20 +020092 * Helper macro to generate the best mov/movw/movt combinations
93 * according to the value to be moved.
94 */
95 .macro mov_imm _reg, _val
96 .if ((\_val) & 0xffff0000) == 0
97 mov \_reg, #(\_val)
98 .else
99 movw \_reg, #((\_val) & 0xffff)
100 movt \_reg, #((\_val) >> 16)
101 .endif
102 .endm
103
104 /*
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000105 * Macro to mark instances where we're jumping to a function and don't
106 * expect a return. To provide the function being jumped to with
107 * additional information, we use 'bl' instruction to jump rather than
108 * 'b'.
109 *
110 * Debuggers infer the location of a call from where LR points to, which
111 * is usually the instruction after 'bl'. If this macro expansion
112 * happens to be the last location in a function, that'll cause the LR
113 * to point a location beyond the function, thereby misleading debugger
114 * back trace. We therefore insert a 'nop' after the function call for
115 * debug builds, unless 'skip_nop' parameter is non-zero.
116 */
117 .macro no_ret _func:req, skip_nop=0
118 bl \_func
119#if DEBUG
120 .ifeq \skip_nop
121 nop
122 .endif
123#endif
124 .endm
125
Jeenu Viswambharan54ec86a2017-01-19 14:23:36 +0000126 /*
127 * Reserve space for a spin lock in assembly file.
128 */
129 .macro define_asm_spinlock _name:req
130 .align SPINLOCK_ASM_ALIGN
131 \_name:
132 .space SPINLOCK_ASM_SIZE
133 .endm
134
Yatharth Kochara9f776c2016-11-10 16:17:51 +0000135 /*
136 * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l`
137 * and the top 32 bits of `_val` into `_reg_h`. If either the bottom
138 * or top word of `_val` is zero, the corresponding OR operation
139 * is skipped.
140 */
141 .macro orr64_imm _reg_l, _reg_h, _val
142 .if (\_val >> 32)
143 orr \_reg_h, \_reg_h, #(\_val >> 32)
144 .endif
145 .if (\_val & 0xffffffff)
146 orr \_reg_l, \_reg_l, #(\_val & 0xffffffff)
147 .endif
148 .endm
149
150 /*
151 * Helper macro to bitwise-clear bits in `_reg_l` and
152 * `_reg_h` given a 64 bit immediate `_val`. The set bits
153 * in the bottom word of `_val` dictate which bits from
154 * `_reg_l` should be cleared. Similarly, the set bits in
155 * the top word of `_val` dictate which bits from `_reg_h`
156 * should be cleared. If either the bottom or top word of
157 * `_val` is zero, the corresponding BIC operation is skipped.
158 */
159 .macro bic64_imm _reg_l, _reg_h, _val
160 .if (\_val >> 32)
161 bic \_reg_h, \_reg_h, #(\_val >> 32)
162 .endif
163 .if (\_val & 0xffffffff)
164 bic \_reg_l, \_reg_l, #(\_val & 0xffffffff)
165 .endif
166 .endm
167
Soby Mathewd29f67b2016-05-05 12:31:57 +0100168#endif /* __ASM_MACROS_S__ */