blob: f5737449e87cf4987e7b5f09ce050d9bf247bc13 [file] [log] [blame]
Soby Mathewd29f67b2016-05-05 12:31:57 +01001/*
Jeenu Viswambharan54ec86a2017-01-19 14:23:36 +00002 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
Soby Mathewd29f67b2016-05-05 12:31:57 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewd29f67b2016-05-05 12:31:57 +01005 */
6#ifndef __ASM_MACROS_S__
7#define __ASM_MACROS_S__
8
9#include <arch.h>
10#include <asm_macros_common.S>
Jeenu Viswambharan54ec86a2017-01-19 14:23:36 +000011#include <spinlock.h>
Soby Mathewd29f67b2016-05-05 12:31:57 +010012
13#define WORD_SIZE 4
14
15 /*
16 * Co processor register accessors
17 */
18 .macro ldcopr reg, coproc, opc1, CRn, CRm, opc2
19 mrc \coproc, \opc1, \reg, \CRn, \CRm, \opc2
20 .endm
21
22 .macro ldcopr16 reg1, reg2, coproc, opc1, CRm
23 mrrc \coproc, \opc1, \reg1, \reg2, \CRm
24 .endm
25
26 .macro stcopr reg, coproc, opc1, CRn, CRm, opc2
27 mcr \coproc, \opc1, \reg, \CRn, \CRm, \opc2
28 .endm
29
30 .macro stcopr16 reg1, reg2, coproc, opc1, CRm
31 mcrr \coproc, \opc1, \reg1, \reg2, \CRm
32 .endm
33
34 /* Cache line size helpers */
35 .macro dcache_line_size reg, tmp
36 ldcopr \tmp, CTR
37 ubfx \tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH
38 mov \reg, #WORD_SIZE
39 lsl \reg, \reg, \tmp
40 .endm
41
42 .macro icache_line_size reg, tmp
43 ldcopr \tmp, CTR
44 and \tmp, \tmp, #CTR_IMINLINE_MASK
45 mov \reg, #WORD_SIZE
46 lsl \reg, \reg, \tmp
47 .endm
48
49 /*
Yatharth Kocharf528faf2016-06-28 16:58:26 +010050 * Declare the exception vector table, enforcing it is aligned on a
51 * 32 byte boundary.
52 */
53 .macro vector_base label
54 .section .vectors, "ax"
55 .align 5
56 \label:
57 .endm
58
59 /*
Soby Mathewd29f67b2016-05-05 12:31:57 +010060 * This macro calculates the base address of the current CPU's multi
61 * processor(MP) stack using the plat_my_core_pos() index, the name of
62 * the stack storage and the size of each stack.
63 * Out: r0 = physical address of stack base
64 * Clobber: r14, r1, r2
65 */
66 .macro get_my_mp_stack _name, _size
67 bl plat_my_core_pos
68 ldr r2, =(\_name + \_size)
69 mov r1, #\_size
70 mla r0, r0, r1, r2
71 .endm
72
73 /*
74 * This macro calculates the base address of a uniprocessor(UP) stack
75 * using the name of the stack storage and the size of the stack
76 * Out: r0 = physical address of stack base
77 */
78 .macro get_up_stack _name, _size
79 ldr r0, =(\_name + \_size)
80 .endm
81
Jeenu Viswambharan68aef102016-11-30 15:21:11 +000082 /*
Etienne Carriere97ad6ce2017-09-01 10:22:20 +020083 * Helper macro to generate the best mov/movw/movt combinations
84 * according to the value to be moved.
85 */
86 .macro mov_imm _reg, _val
87 .if ((\_val) & 0xffff0000) == 0
88 mov \_reg, #(\_val)
89 .else
90 movw \_reg, #((\_val) & 0xffff)
91 movt \_reg, #((\_val) >> 16)
92 .endif
93 .endm
94
95 /*
Jeenu Viswambharan68aef102016-11-30 15:21:11 +000096 * Macro to mark instances where we're jumping to a function and don't
97 * expect a return. To provide the function being jumped to with
98 * additional information, we use 'bl' instruction to jump rather than
99 * 'b'.
100 *
101 * Debuggers infer the location of a call from where LR points to, which
102 * is usually the instruction after 'bl'. If this macro expansion
103 * happens to be the last location in a function, that'll cause the LR
104 * to point a location beyond the function, thereby misleading debugger
105 * back trace. We therefore insert a 'nop' after the function call for
106 * debug builds, unless 'skip_nop' parameter is non-zero.
107 */
108 .macro no_ret _func:req, skip_nop=0
109 bl \_func
110#if DEBUG
111 .ifeq \skip_nop
112 nop
113 .endif
114#endif
115 .endm
116
Jeenu Viswambharan54ec86a2017-01-19 14:23:36 +0000117 /*
118 * Reserve space for a spin lock in assembly file.
119 */
120 .macro define_asm_spinlock _name:req
121 .align SPINLOCK_ASM_ALIGN
122 \_name:
123 .space SPINLOCK_ASM_SIZE
124 .endm
125
Yatharth Kochara9f776c2016-11-10 16:17:51 +0000126 /*
127 * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l`
128 * and the top 32 bits of `_val` into `_reg_h`. If either the bottom
129 * or top word of `_val` is zero, the corresponding OR operation
130 * is skipped.
131 */
132 .macro orr64_imm _reg_l, _reg_h, _val
133 .if (\_val >> 32)
134 orr \_reg_h, \_reg_h, #(\_val >> 32)
135 .endif
136 .if (\_val & 0xffffffff)
137 orr \_reg_l, \_reg_l, #(\_val & 0xffffffff)
138 .endif
139 .endm
140
141 /*
142 * Helper macro to bitwise-clear bits in `_reg_l` and
143 * `_reg_h` given a 64 bit immediate `_val`. The set bits
144 * in the bottom word of `_val` dictate which bits from
145 * `_reg_l` should be cleared. Similarly, the set bits in
146 * the top word of `_val` dictate which bits from `_reg_h`
147 * should be cleared. If either the bottom or top word of
148 * `_val` is zero, the corresponding BIC operation is skipped.
149 */
150 .macro bic64_imm _reg_l, _reg_h, _val
151 .if (\_val >> 32)
152 bic \_reg_h, \_reg_h, #(\_val >> 32)
153 .endif
154 .if (\_val & 0xffffffff)
155 bic \_reg_l, \_reg_l, #(\_val & 0xffffffff)
156 .endif
157 .endm
158
Soby Mathewd29f67b2016-05-05 12:31:57 +0100159#endif /* __ASM_MACROS_S__ */