blob: 4dcb5f0cc5fd897e4d4f0b3da9f38d60e2575186 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley2bd4ef22014-04-09 13:14:54 +010031#include <arch.h>
32
33
Achin Gupta4f6ad662013-10-25 09:08:21 +010034 .macro func_prologue
35 stp x29, x30, [sp, #-0x10]!
36 mov x29,sp
37 .endm
38
39 .macro func_epilogue
40 ldp x29, x30, [sp], #0x10
41 .endm
42
43
44 .macro dcache_line_size reg, tmp
Achin Gupta07f4e072014-02-02 12:02:23 +000045 mrs \tmp, ctr_el0
Achin Gupta4f6ad662013-10-25 09:08:21 +010046 ubfx \tmp, \tmp, #16, #4
Achin Gupta07f4e072014-02-02 12:02:23 +000047 mov \reg, #4
48 lsl \reg, \reg, \tmp
Achin Gupta4f6ad662013-10-25 09:08:21 +010049 .endm
50
51
52 .macro icache_line_size reg, tmp
Achin Gupta07f4e072014-02-02 12:02:23 +000053 mrs \tmp, ctr_el0
54 and \tmp, \tmp, #0xf
55 mov \reg, #4
56 lsl \reg, \reg, \tmp
Achin Gupta4f6ad662013-10-25 09:08:21 +010057 .endm
58
59
Achin Gupta4f6ad662013-10-25 09:08:21 +010060 .macro smc_check label
Andrew Thoelkef977ed82014-04-28 12:32:02 +010061 mrs x0, esr_el3
Achin Gupta4f6ad662013-10-25 09:08:21 +010062 ubfx x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
63 cmp x0, #EC_AARCH64_SMC
64 b.ne $label
65 .endm
66
67
Jeenu Viswambharana7934d62014-02-07 15:53:18 +000068 /*
69 * This macro verifies that the a given vector doesn't exceed the
70 * architectural limit of 32 instructions. This is meant to be placed
71 * immedately after the last instruction in the vector. It takes the
72 * vector entry as the parameter
73 */
74 .macro check_vector_size since
75 .if (. - \since) > (32 * 4)
76 .error "Vector exceeds 32 instructions"
77 .endif
78 .endm
Andrew Thoelke38bde412014-03-18 13:46:55 +000079
80 /*
81 * This macro is used to create a function label and place the
82 * code into a separate text section based on the function name
83 * to enable elimination of unused code during linking
84 */
85 .macro func _name
86 .section .text.\_name, "ax"
87 .type \_name, %function
Kévin Petita877c252015-03-24 14:03:57 +000088 .func \_name
Andrew Thoelke38bde412014-03-18 13:46:55 +000089 \_name:
90 .endm
Andrew Thoelke65668f92014-03-20 10:48:23 +000091
Kévin Petita877c252015-03-24 14:03:57 +000092 /*
93 * This macro is used to mark the end of a function.
94 */
95 .macro endfunc _name
96 .endfunc
97 .size \_name, . - \_name
98 .endm
99
Vikram Kanigiri96377452014-04-24 11:02:16 +0100100 /* ---------------------------------------------
101 * Find the type of reset and jump to handler
102 * if present. If the handler is null then it is
103 * a cold boot. The primary cpu will set up the
104 * platform while the secondaries wait for
105 * their turn to be woken up
106 * ---------------------------------------------
107 */
108 .macro wait_for_entrypoint
109wait_for_entrypoint:
110 mrs x0, mpidr_el1
111 bl platform_get_entrypoint
112 cbnz x0, do_warm_boot
113 mrs x0, mpidr_el1
114 bl platform_is_primary_cpu
115 cbnz x0, do_cold_boot
116
117 /* ---------------------------------------------
118 * Perform any platform specific secondary cpu
119 * actions
120 * ---------------------------------------------
121 */
122 bl plat_secondary_cold_boot_setup
123 b wait_for_entrypoint
124
125 do_warm_boot:
126 /* ---------------------------------------------
127 * Jump to BL31 for all warm boot init.
128 * ---------------------------------------------
129 */
130 blr x0
131
132 do_cold_boot:
133 .endm
134
Andrew Thoelke65668f92014-03-20 10:48:23 +0000135 /*
136 * This macro declares an array of 1 or more stacks, properly
137 * aligned and in the requested section
138 */
139#define STACK_ALIGN 6
140
141 .macro declare_stack _name, _section, _size, _count
142 .if ((\_size & ((1 << STACK_ALIGN) - 1)) <> 0)
143 .error "Stack size not correctly aligned"
144 .endif
145 .section \_section, "aw", %nobits
146 .align STACK_ALIGN
147 \_name:
148 .space ((\_count) * (\_size)), 0
149 .endm
150
151 /*
152 * This macro calculates the base address of an MP stack using the
153 * platform_get_core_pos() index, the name of the stack storage and
154 * the size of each stack
155 * In: X0 = MPIDR of CPU whose stack is wanted
156 * Out: X0 = physical address of stack base
157 * Clobber: X30, X1, X2
158 */
159 .macro get_mp_stack _name, _size
160 bl platform_get_core_pos
161 ldr x2, =(\_name + \_size)
162 mov x1, #\_size
163 madd x0, x0, x1, x2
164 .endm
165
166 /*
167 * This macro calculates the base address of a UP stack using the
168 * name of the stack storage and the size of the stack
169 * Out: X0 = physical address of stack base
170 */
171 .macro get_up_stack _name, _size
172 ldr x0, =(\_name + \_size)
173 .endm
Soby Mathew066f7132014-07-14 16:57:23 +0100174
175 /*
176 * Helper macro to generate the best mov/movk combinations according
177 * the value to be moved. The 16 bits from '_shift' are tested and
178 * if not zero, they are moved into '_reg' without affecting
179 * other bits.
180 */
181 .macro _mov_imm16 _reg, _val, _shift
182 .if (\_val >> \_shift) & 0xffff
183 .if (\_val & (1 << \_shift - 1))
184 movk \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
185 .else
186 mov \_reg, \_val & (0xffff << \_shift)
187 .endif
188 .endif
189 .endm
190
191 /*
192 * Helper macro to load arbitrary values into 32 or 64-bit registers
193 * which generates the best mov/movk combinations. Many base addresses
194 * are 64KB aligned the macro will eliminate updating bits 15:0 in
195 * that case
196 */
197 .macro mov_imm _reg, _val
198 .if (\_val) == 0
199 mov \_reg, #0
200 .else
201 _mov_imm16 \_reg, (\_val), 0
202 _mov_imm16 \_reg, (\_val), 16
203 _mov_imm16 \_reg, (\_val), 32
204 _mov_imm16 \_reg, (\_val), 48
205 .endif
206 .endm