blob: 238fa82a8efce215a11e65771d78fbeeafd98280 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley2bd4ef22014-04-09 13:14:54 +010031#include <arch.h>
32
33
Achin Gupta4f6ad662013-10-25 09:08:21 +010034 .macro func_prologue
35 stp x29, x30, [sp, #-0x10]!
36 mov x29,sp
37 .endm
38
39 .macro func_epilogue
40 ldp x29, x30, [sp], #0x10
41 .endm
42
43
44 .macro dcache_line_size reg, tmp
Achin Gupta07f4e072014-02-02 12:02:23 +000045 mrs \tmp, ctr_el0
Achin Gupta4f6ad662013-10-25 09:08:21 +010046 ubfx \tmp, \tmp, #16, #4
Achin Gupta07f4e072014-02-02 12:02:23 +000047 mov \reg, #4
48 lsl \reg, \reg, \tmp
Achin Gupta4f6ad662013-10-25 09:08:21 +010049 .endm
50
51
52 .macro icache_line_size reg, tmp
Achin Gupta07f4e072014-02-02 12:02:23 +000053 mrs \tmp, ctr_el0
54 and \tmp, \tmp, #0xf
55 mov \reg, #4
56 lsl \reg, \reg, \tmp
Achin Gupta4f6ad662013-10-25 09:08:21 +010057 .endm
58
59
Achin Gupta4f6ad662013-10-25 09:08:21 +010060 .macro smc_check label
Andrew Thoelkef977ed82014-04-28 12:32:02 +010061 mrs x0, esr_el3
Achin Gupta4f6ad662013-10-25 09:08:21 +010062 ubfx x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
63 cmp x0, #EC_AARCH64_SMC
64 b.ne $label
65 .endm
66
67
Jeenu Viswambharana7934d62014-02-07 15:53:18 +000068 /*
69 * This macro verifies that the a given vector doesn't exceed the
70 * architectural limit of 32 instructions. This is meant to be placed
71 * immedately after the last instruction in the vector. It takes the
72 * vector entry as the parameter
73 */
74 .macro check_vector_size since
75 .if (. - \since) > (32 * 4)
76 .error "Vector exceeds 32 instructions"
77 .endif
78 .endm
Andrew Thoelke38bde412014-03-18 13:46:55 +000079
80 /*
81 * This macro is used to create a function label and place the
82 * code into a separate text section based on the function name
83 * to enable elimination of unused code during linking
84 */
85 .macro func _name
86 .section .text.\_name, "ax"
87 .type \_name, %function
88 \_name:
89 .endm
Andrew Thoelke65668f92014-03-20 10:48:23 +000090
Vikram Kanigiri96377452014-04-24 11:02:16 +010091 /* ---------------------------------------------
92 * Find the type of reset and jump to handler
93 * if present. If the handler is null then it is
94 * a cold boot. The primary cpu will set up the
95 * platform while the secondaries wait for
96 * their turn to be woken up
97 * ---------------------------------------------
98 */
99 .macro wait_for_entrypoint
100wait_for_entrypoint:
101 mrs x0, mpidr_el1
102 bl platform_get_entrypoint
103 cbnz x0, do_warm_boot
104 mrs x0, mpidr_el1
105 bl platform_is_primary_cpu
106 cbnz x0, do_cold_boot
107
108 /* ---------------------------------------------
109 * Perform any platform specific secondary cpu
110 * actions
111 * ---------------------------------------------
112 */
113 bl plat_secondary_cold_boot_setup
114 b wait_for_entrypoint
115
116 do_warm_boot:
117 /* ---------------------------------------------
118 * Jump to BL31 for all warm boot init.
119 * ---------------------------------------------
120 */
121 blr x0
122
123 do_cold_boot:
124 .endm
125
Andrew Thoelke65668f92014-03-20 10:48:23 +0000126 /*
127 * This macro declares an array of 1 or more stacks, properly
128 * aligned and in the requested section
129 */
130#define STACK_ALIGN 6
131
132 .macro declare_stack _name, _section, _size, _count
133 .if ((\_size & ((1 << STACK_ALIGN) - 1)) <> 0)
134 .error "Stack size not correctly aligned"
135 .endif
136 .section \_section, "aw", %nobits
137 .align STACK_ALIGN
138 \_name:
139 .space ((\_count) * (\_size)), 0
140 .endm
141
142 /*
143 * This macro calculates the base address of an MP stack using the
144 * platform_get_core_pos() index, the name of the stack storage and
145 * the size of each stack
146 * In: X0 = MPIDR of CPU whose stack is wanted
147 * Out: X0 = physical address of stack base
148 * Clobber: X30, X1, X2
149 */
150 .macro get_mp_stack _name, _size
151 bl platform_get_core_pos
152 ldr x2, =(\_name + \_size)
153 mov x1, #\_size
154 madd x0, x0, x1, x2
155 .endm
156
157 /*
158 * This macro calculates the base address of a UP stack using the
159 * name of the stack storage and the size of the stack
160 * Out: X0 = physical address of stack base
161 */
162 .macro get_up_stack _name, _size
163 ldr x0, =(\_name + \_size)
164 .endm
Soby Mathew066f7132014-07-14 16:57:23 +0100165
166 /*
167 * Helper macro to generate the best mov/movk combinations according
168 * the value to be moved. The 16 bits from '_shift' are tested and
169 * if not zero, they are moved into '_reg' without affecting
170 * other bits.
171 */
172 .macro _mov_imm16 _reg, _val, _shift
173 .if (\_val >> \_shift) & 0xffff
174 .if (\_val & (1 << \_shift - 1))
175 movk \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
176 .else
177 mov \_reg, \_val & (0xffff << \_shift)
178 .endif
179 .endif
180 .endm
181
182 /*
183 * Helper macro to load arbitrary values into 32 or 64-bit registers
184 * which generates the best mov/movk combinations. Many base addresses
185 * are 64KB aligned the macro will eliminate updating bits 15:0 in
186 * that case
187 */
188 .macro mov_imm _reg, _val
189 .if (\_val) == 0
190 mov \_reg, #0
191 .else
192 _mov_imm16 \_reg, (\_val), 0
193 _mov_imm16 \_reg, (\_val), 16
194 _mov_imm16 \_reg, (\_val), 32
195 _mov_imm16 \_reg, (\_val), 48
196 .endif
197 .endm