blob: 5c6aa069424f37be08b142175b465e4793ab64be [file] [log] [blame]
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +01001/*
Douglas Raillard21362a92016-12-02 13:51:54 +00002 * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#ifndef __EL3_COMMON_MACROS_S__
32#define __EL3_COMMON_MACROS_S__
33
34#include <arch.h>
35#include <asm_macros.S>
36
37 /*
38 * Helper macro to initialise EL3 registers we care about.
39 */
40 .macro el3_arch_init_common _exception_vectors
41 /* ---------------------------------------------------------------------
42 * Enable the instruction cache, stack pointer and data access alignment
43 * checks
44 * ---------------------------------------------------------------------
45 */
46 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
47 mrs x0, sctlr_el3
48 orr x0, x0, x1
49 msr sctlr_el3, x0
50 isb
51
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090052#ifdef IMAGE_BL31
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +010053 /* ---------------------------------------------------------------------
54 * Initialise the per-cpu cache pointer to the CPU.
55 * This is done early to enable crash reporting to have access to crash
56 * stack. Since crash reporting depends on cpu_data to report the
57 * unhandled exception, not doing so can lead to recursive exceptions
58 * due to a NULL TPIDR_EL3.
59 * ---------------------------------------------------------------------
60 */
61 bl init_cpu_data_ptr
62#endif /* IMAGE_BL31 */
63
64 /* ---------------------------------------------------------------------
65 * Set the exception vectors.
66 * ---------------------------------------------------------------------
67 */
68 adr x0, \_exception_vectors
69 msr vbar_el3, x0
70 isb
71
72 /* ---------------------------------------------------------------------
Soby Mathew074e05a2016-04-04 12:34:24 +010073 * Early set RES1 bits in SCR_EL3. Set EA bit to catch both
74 * External Aborts and SError Interrupts in EL3 and also the SIF bit
75 * to disable instruction fetches from Non-secure memory.
Gerald Lejeune632d6df2016-03-22 09:29:23 +010076 * ---------------------------------------------------------------------
77 */
Soby Mathew074e05a2016-04-04 12:34:24 +010078 mov x0, #(SCR_RES1_BITS | SCR_EA_BIT | SCR_SIF_BIT)
Gerald Lejeune632d6df2016-03-22 09:29:23 +010079 msr scr_el3, x0
David Cunado5f55e282016-10-31 17:37:34 +000080
81 /* ---------------------------------------------------------------------
dp-arm595d0d52017-02-08 11:51:50 +000082 * Disable secure self-hosted invasive debug.
David Cunado5f55e282016-10-31 17:37:34 +000083 * ---------------------------------------------------------------------
84 */
dp-arm595d0d52017-02-08 11:51:50 +000085 mov_imm x0, MDCR_DEF_VAL
86 msr mdcr_el3, x0
David Cunado5f55e282016-10-31 17:37:34 +000087
Gerald Lejeune632d6df2016-03-22 09:29:23 +010088 /* ---------------------------------------------------------------------
89 * Enable External Aborts and SError Interrupts now that the exception
90 * vectors have been setup.
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +010091 * ---------------------------------------------------------------------
92 */
93 msr daifclr, #DAIF_ABT_BIT
94
95 /* ---------------------------------------------------------------------
96 * The initial state of the Architectural feature trap register
97 * (CPTR_EL3) is unknown and it must be set to a known state. All
98 * feature traps are disabled. Some bits in this register are marked as
99 * reserved and should not be modified.
100 *
101 * CPTR_EL3.TCPAC: This causes a direct access to the CPACR_EL1 from EL1
102 * or the CPTR_EL2 from EL2 to trap to EL3 unless it is trapped at EL2.
103 *
104 * CPTR_EL3.TTA: This causes access to the Trace functionality to trap
105 * to EL3 when executed from EL0, EL1, EL2, or EL3. If system register
106 * access to trace functionality is not supported, this bit is RES0.
107 *
108 * CPTR_EL3.TFP: This causes instructions that access the registers
109 * associated with Floating Point and Advanced SIMD execution to trap
110 * to EL3 when executed from any exception level, unless trapped to EL1
111 * or EL2.
112 * ---------------------------------------------------------------------
113 */
114 mrs x0, cptr_el3
115 bic w0, w0, #TCPAC_BIT
116 bic w0, w0, #TTA_BIT
117 bic w0, w0, #TFP_BIT
118 msr cptr_el3, x0
119 .endm
120
121/* -----------------------------------------------------------------------------
122 * This is the super set of actions that need to be performed during a cold boot
Juan Castillo7d199412015-12-14 09:35:25 +0000123 * or a warm boot in EL3. This code is shared by BL1 and BL31.
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100124 *
125 * This macro will always perform reset handling, architectural initialisations
126 * and stack setup. The rest of the actions are optional because they might not
127 * be needed, depending on the context in which this macro is called. This is
128 * why this macro is parameterised ; each parameter allows to enable/disable
129 * some actions.
130 *
131 * _set_endian:
132 * Whether the macro needs to configure the endianness of data accesses.
133 *
134 * _warm_boot_mailbox:
135 * Whether the macro needs to detect the type of boot (cold/warm). The
136 * detection is based on the platform entrypoint address : if it is zero
137 * then it is a cold boot, otherwise it is a warm boot. In the latter case,
138 * this macro jumps on the platform entrypoint address.
139 *
140 * _secondary_cold_boot:
141 * Whether the macro needs to identify the CPU that is calling it: primary
142 * CPU or secondary CPU. The primary CPU will be allowed to carry on with
143 * the platform initialisations, while the secondaries will be put in a
144 * platform-specific state in the meantime.
145 *
146 * If the caller knows this macro will only be called by the primary CPU
147 * then this parameter can be defined to 0 to skip this step.
148 *
149 * _init_memory:
150 * Whether the macro needs to initialise the memory.
151 *
152 * _init_c_runtime:
153 * Whether the macro needs to initialise the C runtime environment.
154 *
155 * _exception_vectors:
156 * Address of the exception vectors to program in the VBAR_EL3 register.
157 * -----------------------------------------------------------------------------
158 */
159 .macro el3_entrypoint_common \
160 _set_endian, _warm_boot_mailbox, _secondary_cold_boot, \
161 _init_memory, _init_c_runtime, _exception_vectors
162
163 .if \_set_endian
164 /* -------------------------------------------------------------
165 * Set the CPU endianness before doing anything that might
166 * involve memory reads or writes.
167 * -------------------------------------------------------------
168 */
169 mrs x0, sctlr_el3
170 bic x0, x0, #SCTLR_EE_BIT
171 msr sctlr_el3, x0
172 isb
173 .endif /* _set_endian */
174
175 .if \_warm_boot_mailbox
176 /* -------------------------------------------------------------
177 * This code will be executed for both warm and cold resets.
178 * Now is the time to distinguish between the two.
179 * Query the platform entrypoint address and if it is not zero
180 * then it means it is a warm boot so jump to this address.
181 * -------------------------------------------------------------
182 */
Soby Mathew3700a922015-07-13 11:21:11 +0100183 bl plat_get_my_entrypoint
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100184 cbz x0, do_cold_boot
185 br x0
186
187 do_cold_boot:
188 .endif /* _warm_boot_mailbox */
189
Antonio Nino Diaz4357b412016-02-23 12:04:58 +0000190 /* ---------------------------------------------------------------------
191 * It is a cold boot.
192 * Perform any processor specific actions upon reset e.g. cache, TLB
193 * invalidations etc.
194 * ---------------------------------------------------------------------
195 */
196 bl reset_handler
197
198 el3_arch_init_common \_exception_vectors
199
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100200 .if \_secondary_cold_boot
201 /* -------------------------------------------------------------
Antonio Nino Diaz4357b412016-02-23 12:04:58 +0000202 * Check if this is a primary or secondary CPU cold boot.
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100203 * The primary CPU will set up the platform while the
204 * secondaries are placed in a platform-specific state until the
205 * primary CPU performs the necessary actions to bring them out
206 * of that state and allows entry into the OS.
207 * -------------------------------------------------------------
208 */
Soby Mathew3700a922015-07-13 11:21:11 +0100209 bl plat_is_my_cpu_primary
Soby Matheweb3bbf12015-06-08 12:32:50 +0100210 cbnz w0, do_primary_cold_boot
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100211
212 /* This is a cold boot on a secondary CPU */
213 bl plat_secondary_cold_boot_setup
214 /* plat_secondary_cold_boot_setup() is not supposed to return */
Antonio Nino Diaz1f21bcf2016-02-01 13:57:25 +0000215 bl el3_panic
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100216
217 do_primary_cold_boot:
218 .endif /* _secondary_cold_boot */
219
220 /* ---------------------------------------------------------------------
Antonio Nino Diaz4357b412016-02-23 12:04:58 +0000221 * Initialize memory now. Secondary CPU initialization won't get to this
222 * point.
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100223 * ---------------------------------------------------------------------
224 */
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100225
226 .if \_init_memory
227 bl platform_mem_init
228 .endif /* _init_memory */
229
230 /* ---------------------------------------------------------------------
231 * Init C runtime environment:
232 * - Zero-initialise the NOBITS sections. There are 2 of them:
233 * - the .bss section;
234 * - the coherent memory section (if any).
235 * - Relocate the data section from ROM to RAM, if required.
236 * ---------------------------------------------------------------------
237 */
238 .if \_init_c_runtime
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900239#ifdef IMAGE_BL31
Achin Guptae9c4a642015-09-11 16:03:13 +0100240 /* -------------------------------------------------------------
241 * Invalidate the RW memory used by the BL31 image. This
242 * includes the data and NOBITS sections. This is done to
243 * safeguard against possible corruption of this memory by
244 * dirty cache lines in a system cache as a result of use by
245 * an earlier boot loader stage.
246 * -------------------------------------------------------------
247 */
248 adr x0, __RW_START__
249 adr x1, __RW_END__
250 sub x1, x1, x0
251 bl inv_dcache_range
252#endif /* IMAGE_BL31 */
253
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100254 ldr x0, =__BSS_START__
255 ldr x1, =__BSS_SIZE__
Douglas Raillard21362a92016-12-02 13:51:54 +0000256 bl zeromem
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100257
258#if USE_COHERENT_MEM
259 ldr x0, =__COHERENT_RAM_START__
260 ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__
Douglas Raillard21362a92016-12-02 13:51:54 +0000261 bl zeromem
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100262#endif
263
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900264#ifdef IMAGE_BL1
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100265 ldr x0, =__DATA_RAM_START__
266 ldr x1, =__DATA_ROM_START__
267 ldr x2, =__DATA_SIZE__
268 bl memcpy16
269#endif
270 .endif /* _init_c_runtime */
271
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100272 /* ---------------------------------------------------------------------
273 * Use SP_EL0 for the C runtime stack.
274 * ---------------------------------------------------------------------
275 */
276 msr spsel, #0
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100277
278 /* ---------------------------------------------------------------------
279 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
280 * the MMU is enabled. There is no risk of reading stale stack memory
281 * after enabling the MMU as only the primary CPU is running at the
282 * moment.
283 * ---------------------------------------------------------------------
284 */
Soby Mathew3700a922015-07-13 11:21:11 +0100285 bl plat_set_my_stack
Douglas Raillard306593d2017-02-24 18:14:15 +0000286
287#if STACK_PROTECTOR_ENABLED
288 .if \_init_c_runtime
289 bl update_stack_protector_canary
290 .endif /* _init_c_runtime */
291#endif
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100292 .endm
293
294#endif /* __EL3_COMMON_MACROS_S__ */