blob: 9b22a7345a08f639ed0ec589a5cc3fe2d86ebc6a [file] [log] [blame]
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +01001/*
Antonio Nino Diaz4357b412016-02-23 12:04:58 +00002 * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#ifndef __EL3_COMMON_MACROS_S__
32#define __EL3_COMMON_MACROS_S__
33
34#include <arch.h>
35#include <asm_macros.S>
36
37 /*
38 * Helper macro to initialise EL3 registers we care about.
39 */
40 .macro el3_arch_init_common _exception_vectors
41 /* ---------------------------------------------------------------------
42 * Enable the instruction cache, stack pointer and data access alignment
43 * checks
44 * ---------------------------------------------------------------------
45 */
46 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
47 mrs x0, sctlr_el3
48 orr x0, x0, x1
49 msr sctlr_el3, x0
50 isb
51
52#if IMAGE_BL31
53 /* ---------------------------------------------------------------------
54 * Initialise the per-cpu cache pointer to the CPU.
55 * This is done early to enable crash reporting to have access to crash
56 * stack. Since crash reporting depends on cpu_data to report the
57 * unhandled exception, not doing so can lead to recursive exceptions
58 * due to a NULL TPIDR_EL3.
59 * ---------------------------------------------------------------------
60 */
61 bl init_cpu_data_ptr
62#endif /* IMAGE_BL31 */
63
64 /* ---------------------------------------------------------------------
65 * Set the exception vectors.
66 * ---------------------------------------------------------------------
67 */
68 adr x0, \_exception_vectors
69 msr vbar_el3, x0
70 isb
71
72 /* ---------------------------------------------------------------------
Soby Mathew074e05a2016-04-04 12:34:24 +010073 * Early set RES1 bits in SCR_EL3. Set EA bit to catch both
74 * External Aborts and SError Interrupts in EL3 and also the SIF bit
75 * to disable instruction fetches from Non-secure memory.
Gerald Lejeune632d6df2016-03-22 09:29:23 +010076 * ---------------------------------------------------------------------
77 */
Soby Mathew074e05a2016-04-04 12:34:24 +010078 mov x0, #(SCR_RES1_BITS | SCR_EA_BIT | SCR_SIF_BIT)
Gerald Lejeune632d6df2016-03-22 09:29:23 +010079 msr scr_el3, x0
80 /* ---------------------------------------------------------------------
81 * Enable External Aborts and SError Interrupts now that the exception
82 * vectors have been setup.
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +010083 * ---------------------------------------------------------------------
84 */
85 msr daifclr, #DAIF_ABT_BIT
86
87 /* ---------------------------------------------------------------------
88 * The initial state of the Architectural feature trap register
89 * (CPTR_EL3) is unknown and it must be set to a known state. All
90 * feature traps are disabled. Some bits in this register are marked as
91 * reserved and should not be modified.
92 *
93 * CPTR_EL3.TCPAC: This causes a direct access to the CPACR_EL1 from EL1
94 * or the CPTR_EL2 from EL2 to trap to EL3 unless it is trapped at EL2.
95 *
96 * CPTR_EL3.TTA: This causes access to the Trace functionality to trap
97 * to EL3 when executed from EL0, EL1, EL2, or EL3. If system register
98 * access to trace functionality is not supported, this bit is RES0.
99 *
100 * CPTR_EL3.TFP: This causes instructions that access the registers
101 * associated with Floating Point and Advanced SIMD execution to trap
102 * to EL3 when executed from any exception level, unless trapped to EL1
103 * or EL2.
104 * ---------------------------------------------------------------------
105 */
106 mrs x0, cptr_el3
107 bic w0, w0, #TCPAC_BIT
108 bic w0, w0, #TTA_BIT
109 bic w0, w0, #TFP_BIT
110 msr cptr_el3, x0
111 .endm
112
113/* -----------------------------------------------------------------------------
114 * This is the super set of actions that need to be performed during a cold boot
Juan Castillo7d199412015-12-14 09:35:25 +0000115 * or a warm boot in EL3. This code is shared by BL1 and BL31.
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100116 *
117 * This macro will always perform reset handling, architectural initialisations
118 * and stack setup. The rest of the actions are optional because they might not
119 * be needed, depending on the context in which this macro is called. This is
120 * why this macro is parameterised ; each parameter allows to enable/disable
121 * some actions.
122 *
123 * _set_endian:
124 * Whether the macro needs to configure the endianness of data accesses.
125 *
126 * _warm_boot_mailbox:
127 * Whether the macro needs to detect the type of boot (cold/warm). The
128 * detection is based on the platform entrypoint address : if it is zero
129 * then it is a cold boot, otherwise it is a warm boot. In the latter case,
130 * this macro jumps on the platform entrypoint address.
131 *
132 * _secondary_cold_boot:
133 * Whether the macro needs to identify the CPU that is calling it: primary
134 * CPU or secondary CPU. The primary CPU will be allowed to carry on with
135 * the platform initialisations, while the secondaries will be put in a
136 * platform-specific state in the meantime.
137 *
138 * If the caller knows this macro will only be called by the primary CPU
139 * then this parameter can be defined to 0 to skip this step.
140 *
141 * _init_memory:
142 * Whether the macro needs to initialise the memory.
143 *
144 * _init_c_runtime:
145 * Whether the macro needs to initialise the C runtime environment.
146 *
147 * _exception_vectors:
148 * Address of the exception vectors to program in the VBAR_EL3 register.
149 * -----------------------------------------------------------------------------
150 */
151 .macro el3_entrypoint_common \
152 _set_endian, _warm_boot_mailbox, _secondary_cold_boot, \
153 _init_memory, _init_c_runtime, _exception_vectors
154
155 .if \_set_endian
156 /* -------------------------------------------------------------
157 * Set the CPU endianness before doing anything that might
158 * involve memory reads or writes.
159 * -------------------------------------------------------------
160 */
161 mrs x0, sctlr_el3
162 bic x0, x0, #SCTLR_EE_BIT
163 msr sctlr_el3, x0
164 isb
165 .endif /* _set_endian */
166
167 .if \_warm_boot_mailbox
168 /* -------------------------------------------------------------
169 * This code will be executed for both warm and cold resets.
170 * Now is the time to distinguish between the two.
171 * Query the platform entrypoint address and if it is not zero
172 * then it means it is a warm boot so jump to this address.
173 * -------------------------------------------------------------
174 */
Soby Mathew3700a922015-07-13 11:21:11 +0100175 bl plat_get_my_entrypoint
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100176 cbz x0, do_cold_boot
177 br x0
178
179 do_cold_boot:
180 .endif /* _warm_boot_mailbox */
181
Antonio Nino Diaz4357b412016-02-23 12:04:58 +0000182 /* ---------------------------------------------------------------------
183 * It is a cold boot.
184 * Perform any processor specific actions upon reset e.g. cache, TLB
185 * invalidations etc.
186 * ---------------------------------------------------------------------
187 */
188 bl reset_handler
189
190 el3_arch_init_common \_exception_vectors
191
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100192 .if \_secondary_cold_boot
193 /* -------------------------------------------------------------
Antonio Nino Diaz4357b412016-02-23 12:04:58 +0000194 * Check if this is a primary or secondary CPU cold boot.
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100195 * The primary CPU will set up the platform while the
196 * secondaries are placed in a platform-specific state until the
197 * primary CPU performs the necessary actions to bring them out
198 * of that state and allows entry into the OS.
199 * -------------------------------------------------------------
200 */
Soby Mathew3700a922015-07-13 11:21:11 +0100201 bl plat_is_my_cpu_primary
Soby Matheweb3bbf12015-06-08 12:32:50 +0100202 cbnz w0, do_primary_cold_boot
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100203
204 /* This is a cold boot on a secondary CPU */
205 bl plat_secondary_cold_boot_setup
206 /* plat_secondary_cold_boot_setup() is not supposed to return */
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000207 bl el3_panic
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100208
209 do_primary_cold_boot:
210 .endif /* _secondary_cold_boot */
211
212 /* ---------------------------------------------------------------------
Antonio Nino Diaz4357b412016-02-23 12:04:58 +0000213 * Initialize memory now. Secondary CPU initialization won't get to this
214 * point.
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100215 * ---------------------------------------------------------------------
216 */
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100217
218 .if \_init_memory
219 bl platform_mem_init
220 .endif /* _init_memory */
221
222 /* ---------------------------------------------------------------------
223 * Init C runtime environment:
224 * - Zero-initialise the NOBITS sections. There are 2 of them:
225 * - the .bss section;
226 * - the coherent memory section (if any).
227 * - Relocate the data section from ROM to RAM, if required.
228 * ---------------------------------------------------------------------
229 */
230 .if \_init_c_runtime
Achin Guptae9c4a642015-09-11 16:03:13 +0100231#if IMAGE_BL31
232 /* -------------------------------------------------------------
233 * Invalidate the RW memory used by the BL31 image. This
234 * includes the data and NOBITS sections. This is done to
235 * safeguard against possible corruption of this memory by
236 * dirty cache lines in a system cache as a result of use by
237 * an earlier boot loader stage.
238 * -------------------------------------------------------------
239 */
240 adr x0, __RW_START__
241 adr x1, __RW_END__
242 sub x1, x1, x0
243 bl inv_dcache_range
244#endif /* IMAGE_BL31 */
245
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100246 ldr x0, =__BSS_START__
247 ldr x1, =__BSS_SIZE__
248 bl zeromem16
249
250#if USE_COHERENT_MEM
251 ldr x0, =__COHERENT_RAM_START__
252 ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__
253 bl zeromem16
254#endif
255
Sandrine Bailleux4534c642015-06-24 15:26:39 +0100256#if IMAGE_BL1
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100257 ldr x0, =__DATA_RAM_START__
258 ldr x1, =__DATA_ROM_START__
259 ldr x2, =__DATA_SIZE__
260 bl memcpy16
261#endif
262 .endif /* _init_c_runtime */
263
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100264 /* ---------------------------------------------------------------------
265 * Use SP_EL0 for the C runtime stack.
266 * ---------------------------------------------------------------------
267 */
268 msr spsel, #0
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100269
270 /* ---------------------------------------------------------------------
271 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
272 * the MMU is enabled. There is no risk of reading stale stack memory
273 * after enabling the MMU as only the primary CPU is running at the
274 * moment.
275 * ---------------------------------------------------------------------
276 */
Soby Mathew3700a922015-07-13 11:21:11 +0100277 bl plat_set_my_stack
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100278 .endm
279
280#endif /* __EL3_COMMON_MACROS_S__ */