blob: f959eb4f9e37592310673f12c04428a8cff81a17 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleyea596682015-04-01 17:34:24 +01002 * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
Dan Handleyea596682015-04-01 17:34:24 +010030#ifndef __ASM_MACROS_S__
31#define __ASM_MACROS_S__
Achin Gupta4f6ad662013-10-25 09:08:21 +010032
Dan Handley2bd4ef22014-04-09 13:14:54 +010033#include <arch.h>
34
35
Achin Gupta4f6ad662013-10-25 09:08:21 +010036 .macro func_prologue
37 stp x29, x30, [sp, #-0x10]!
38 mov x29,sp
39 .endm
40
41 .macro func_epilogue
42 ldp x29, x30, [sp], #0x10
43 .endm
44
45
46 .macro dcache_line_size reg, tmp
Achin Gupta07f4e072014-02-02 12:02:23 +000047 mrs \tmp, ctr_el0
Achin Gupta4f6ad662013-10-25 09:08:21 +010048 ubfx \tmp, \tmp, #16, #4
Achin Gupta07f4e072014-02-02 12:02:23 +000049 mov \reg, #4
50 lsl \reg, \reg, \tmp
Achin Gupta4f6ad662013-10-25 09:08:21 +010051 .endm
52
53
54 .macro icache_line_size reg, tmp
Achin Gupta07f4e072014-02-02 12:02:23 +000055 mrs \tmp, ctr_el0
56 and \tmp, \tmp, #0xf
57 mov \reg, #4
58 lsl \reg, \reg, \tmp
Achin Gupta4f6ad662013-10-25 09:08:21 +010059 .endm
60
61
Achin Gupta4f6ad662013-10-25 09:08:21 +010062 .macro smc_check label
Andrew Thoelkef977ed82014-04-28 12:32:02 +010063 mrs x0, esr_el3
Achin Gupta4f6ad662013-10-25 09:08:21 +010064 ubfx x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
65 cmp x0, #EC_AARCH64_SMC
66 b.ne $label
67 .endm
68
69
Jeenu Viswambharana7934d62014-02-07 15:53:18 +000070 /*
71 * This macro verifies that the a given vector doesn't exceed the
72 * architectural limit of 32 instructions. This is meant to be placed
73 * immedately after the last instruction in the vector. It takes the
74 * vector entry as the parameter
75 */
76 .macro check_vector_size since
77 .if (. - \since) > (32 * 4)
78 .error "Vector exceeds 32 instructions"
79 .endif
80 .endm
Andrew Thoelke38bde412014-03-18 13:46:55 +000081
82 /*
83 * This macro is used to create a function label and place the
84 * code into a separate text section based on the function name
85 * to enable elimination of unused code during linking
86 */
87 .macro func _name
88 .section .text.\_name, "ax"
89 .type \_name, %function
Kévin Petita877c252015-03-24 14:03:57 +000090 .func \_name
Andrew Thoelke38bde412014-03-18 13:46:55 +000091 \_name:
92 .endm
Andrew Thoelke65668f92014-03-20 10:48:23 +000093
Kévin Petita877c252015-03-24 14:03:57 +000094 /*
95 * This macro is used to mark the end of a function.
96 */
97 .macro endfunc _name
98 .endfunc
99 .size \_name, . - \_name
100 .endm
101
Andrew Thoelke65668f92014-03-20 10:48:23 +0000102 /*
103 * This macro declares an array of 1 or more stacks, properly
104 * aligned and in the requested section
105 */
106#define STACK_ALIGN 6
107
108 .macro declare_stack _name, _section, _size, _count
109 .if ((\_size & ((1 << STACK_ALIGN) - 1)) <> 0)
110 .error "Stack size not correctly aligned"
111 .endif
112 .section \_section, "aw", %nobits
113 .align STACK_ALIGN
114 \_name:
115 .space ((\_count) * (\_size)), 0
116 .endm
117
Soby Mathew981487a2015-07-13 14:10:57 +0100118#if ENABLE_PLAT_COMPAT
Andrew Thoelke65668f92014-03-20 10:48:23 +0000119 /*
120 * This macro calculates the base address of an MP stack using the
121 * platform_get_core_pos() index, the name of the stack storage and
122 * the size of each stack
123 * In: X0 = MPIDR of CPU whose stack is wanted
124 * Out: X0 = physical address of stack base
125 * Clobber: X30, X1, X2
126 */
127 .macro get_mp_stack _name, _size
128 bl platform_get_core_pos
129 ldr x2, =(\_name + \_size)
130 mov x1, #\_size
131 madd x0, x0, x1, x2
132 .endm
Soby Mathew981487a2015-07-13 14:10:57 +0100133#endif
Andrew Thoelke65668f92014-03-20 10:48:23 +0000134
135 /*
Soby Mathewb0082d22015-04-09 13:40:55 +0100136 * This macro calculates the base address of the current CPU's MP stack
137 * using the plat_my_core_pos() index, the name of the stack storage
138 * and the size of each stack
139 * Out: X0 = physical address of stack base
140 * Clobber: X30, X1, X2
141 */
142 .macro get_my_mp_stack _name, _size
143 bl plat_my_core_pos
144 ldr x2, =(\_name + \_size)
145 mov x1, #\_size
146 madd x0, x0, x1, x2
147 .endm
148
149 /*
Andrew Thoelke65668f92014-03-20 10:48:23 +0000150 * This macro calculates the base address of a UP stack using the
151 * name of the stack storage and the size of the stack
152 * Out: X0 = physical address of stack base
153 */
154 .macro get_up_stack _name, _size
155 ldr x0, =(\_name + \_size)
156 .endm
Soby Mathew066f7132014-07-14 16:57:23 +0100157
158 /*
159 * Helper macro to generate the best mov/movk combinations according
160 * the value to be moved. The 16 bits from '_shift' are tested and
161 * if not zero, they are moved into '_reg' without affecting
162 * other bits.
163 */
164 .macro _mov_imm16 _reg, _val, _shift
165 .if (\_val >> \_shift) & 0xffff
166 .if (\_val & (1 << \_shift - 1))
167 movk \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
168 .else
169 mov \_reg, \_val & (0xffff << \_shift)
170 .endif
171 .endif
172 .endm
173
174 /*
175 * Helper macro to load arbitrary values into 32 or 64-bit registers
176 * which generates the best mov/movk combinations. Many base addresses
177 * are 64KB aligned the macro will eliminate updating bits 15:0 in
178 * that case
179 */
180 .macro mov_imm _reg, _val
181 .if (\_val) == 0
182 mov \_reg, #0
183 .else
184 _mov_imm16 \_reg, (\_val), 0
185 _mov_imm16 \_reg, (\_val), 16
186 _mov_imm16 \_reg, (\_val), 32
187 _mov_imm16 \_reg, (\_val), 48
188 .endif
189 .endm
Dan Handleyea596682015-04-01 17:34:24 +0100190
191#endif /* __ASM_MACROS_S__ */