blob: e1261ea8e1a66675e4b68fe58621ff2bd72d2f60 [file] [log] [blame]
Yatharth Kocharf528faf2016-06-28 16:58:26 +01001/*
Douglas Raillard306593d2017-02-24 18:14:15 +00002 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
Yatharth Kocharf528faf2016-06-28 16:58:26 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Yatharth Kocharf528faf2016-06-28 16:58:26 +01005 */
6
7#ifndef __EL3_COMMON_MACROS_S__
8#define __EL3_COMMON_MACROS_S__
9
10#include <arch.h>
11#include <asm_macros.S>
12#include <assert_macros.S>
13
14 /*
15 * Helper macro to initialise EL3 registers we care about.
16 */
17 .macro el3_arch_init_common _exception_vectors
18 /* ---------------------------------------------------------------------
19 * Enable the instruction cache and alignment checks
20 * ---------------------------------------------------------------------
21 */
22 ldr r1, =(SCTLR_RES1 | SCTLR_I_BIT | SCTLR_A_BIT)
23 ldcopr r0, SCTLR
24 orr r0, r0, r1
25 stcopr r0, SCTLR
26 isb
27
28 /* ---------------------------------------------------------------------
29 * Set the exception vectors (VBAR/MVBAR).
30 * ---------------------------------------------------------------------
31 */
32 ldr r0, =\_exception_vectors
33 stcopr r0, VBAR
34 stcopr r0, MVBAR
35 isb
36
37 /* -----------------------------------------------------
38 * Enable the SIF bit to disable instruction fetches
39 * from Non-secure memory.
40 * -----------------------------------------------------
41 */
42 ldcopr r0, SCR
43 orr r0, r0, #SCR_SIF_BIT
44 stcopr r0, SCR
45
46 /* -----------------------------------------------------
47 * Enable the Asynchronous data abort now that the
48 * exception vectors have been setup.
49 * -----------------------------------------------------
50 */
51 cpsie a
52 isb
53
54 /* Enable access to Advanced SIMD registers */
55 ldcopr r0, NSACR
56 bic r0, r0, #NSASEDIS_BIT
57 bic r0, r0, #NSTRCDIS_BIT
58 orr r0, r0, #(NASCR_CP10_BIT | NASCR_CP11_BIT)
59 stcopr r0, NSACR
60 isb
61
62 /*
63 * Enable access to Advanced SIMD, Floating point and to the Trace
64 * functionality as well.
65 */
66 ldcopr r0, CPACR
67 bic r0, r0, #ASEDIS_BIT
68 bic r0, r0, #TRCDIS_BIT
69 orr r0, r0, #CPACR_ENABLE_FP_ACCESS
70 stcopr r0, CPACR
71 isb
72
73 vmrs r0, FPEXC
74 orr r0, r0, #FPEXC_EN_BIT
75 vmsr FPEXC, r0
76 isb
dp-arm595d0d52017-02-08 11:51:50 +000077
78 /* Disable secure self-hosted invasive debug. */
79 ldr r0, =SDCR_DEF_VAL
80 stcopr r0, SDCR
81
Yatharth Kocharf528faf2016-06-28 16:58:26 +010082 .endm
83
84/* -----------------------------------------------------------------------------
85 * This is the super set of actions that need to be performed during a cold boot
86 * or a warm boot in EL3. This code is shared by BL1 and BL32 (SP_MIN).
87 *
88 * This macro will always perform reset handling, architectural initialisations
89 * and stack setup. The rest of the actions are optional because they might not
90 * be needed, depending on the context in which this macro is called. This is
91 * why this macro is parameterised ; each parameter allows to enable/disable
92 * some actions.
93 *
94 * _set_endian:
95 * Whether the macro needs to configure the endianness of data accesses.
96 *
97 * _warm_boot_mailbox:
98 * Whether the macro needs to detect the type of boot (cold/warm). The
99 * detection is based on the platform entrypoint address : if it is zero
100 * then it is a cold boot, otherwise it is a warm boot. In the latter case,
101 * this macro jumps on the platform entrypoint address.
102 *
103 * _secondary_cold_boot:
104 * Whether the macro needs to identify the CPU that is calling it: primary
105 * CPU or secondary CPU. The primary CPU will be allowed to carry on with
106 * the platform initialisations, while the secondaries will be put in a
107 * platform-specific state in the meantime.
108 *
109 * If the caller knows this macro will only be called by the primary CPU
110 * then this parameter can be defined to 0 to skip this step.
111 *
112 * _init_memory:
113 * Whether the macro needs to initialise the memory.
114 *
115 * _init_c_runtime:
116 * Whether the macro needs to initialise the C runtime environment.
117 *
118 * _exception_vectors:
119 * Address of the exception vectors to program in the VBAR_EL3 register.
120 * -----------------------------------------------------------------------------
121 */
122 .macro el3_entrypoint_common \
123 _set_endian, _warm_boot_mailbox, _secondary_cold_boot, \
124 _init_memory, _init_c_runtime, _exception_vectors
125
126 /* Make sure we are in Secure Mode */
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +0100127#if ENABLE_ASSERTIONS
Yatharth Kocharf528faf2016-06-28 16:58:26 +0100128 ldcopr r0, SCR
129 tst r0, #SCR_NS_BIT
130 ASM_ASSERT(eq)
131#endif
132
133 .if \_set_endian
134 /* -------------------------------------------------------------
135 * Set the CPU endianness before doing anything that might
136 * involve memory reads or writes.
137 * -------------------------------------------------------------
138 */
139 ldcopr r0, SCTLR
140 bic r0, r0, #SCTLR_EE_BIT
141 stcopr r0, SCTLR
142 isb
143 .endif /* _set_endian */
144
145 /* Switch to monitor mode */
146 cps #MODE32_mon
147 isb
148
149 .if \_warm_boot_mailbox
150 /* -------------------------------------------------------------
151 * This code will be executed for both warm and cold resets.
152 * Now is the time to distinguish between the two.
153 * Query the platform entrypoint address and if it is not zero
154 * then it means it is a warm boot so jump to this address.
155 * -------------------------------------------------------------
156 */
157 bl plat_get_my_entrypoint
158 cmp r0, #0
159 bxne r0
160 .endif /* _warm_boot_mailbox */
161
162 /* ---------------------------------------------------------------------
163 * It is a cold boot.
164 * Perform any processor specific actions upon reset e.g. cache, TLB
165 * invalidations etc.
166 * ---------------------------------------------------------------------
167 */
168 bl reset_handler
169
170 el3_arch_init_common \_exception_vectors
171
172 .if \_secondary_cold_boot
173 /* -------------------------------------------------------------
174 * Check if this is a primary or secondary CPU cold boot.
175 * The primary CPU will set up the platform while the
176 * secondaries are placed in a platform-specific state until the
177 * primary CPU performs the necessary actions to bring them out
178 * of that state and allows entry into the OS.
179 * -------------------------------------------------------------
180 */
181 bl plat_is_my_cpu_primary
182 cmp r0, #0
183 bne do_primary_cold_boot
184
185 /* This is a cold boot on a secondary CPU */
186 bl plat_secondary_cold_boot_setup
187 /* plat_secondary_cold_boot_setup() is not supposed to return */
Jeenu Viswambharan68aef102016-11-30 15:21:11 +0000188 no_ret plat_panic_handler
Yatharth Kocharf528faf2016-06-28 16:58:26 +0100189
190 do_primary_cold_boot:
191 .endif /* _secondary_cold_boot */
192
193 /* ---------------------------------------------------------------------
194 * Initialize memory now. Secondary CPU initialization won't get to this
195 * point.
196 * ---------------------------------------------------------------------
197 */
198
199 .if \_init_memory
200 bl platform_mem_init
201 .endif /* _init_memory */
202
203 /* ---------------------------------------------------------------------
204 * Init C runtime environment:
205 * - Zero-initialise the NOBITS sections. There are 2 of them:
206 * - the .bss section;
207 * - the coherent memory section (if any).
208 * - Relocate the data section from ROM to RAM, if required.
209 * ---------------------------------------------------------------------
210 */
211 .if \_init_c_runtime
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900212#ifdef IMAGE_BL32
Yatharth Kocharf528faf2016-06-28 16:58:26 +0100213 /* -----------------------------------------------------------------
214 * Invalidate the RW memory used by the BL32 (SP_MIN) image. This
215 * includes the data and NOBITS sections. This is done to
216 * safeguard against possible corruption of this memory by
217 * dirty cache lines in a system cache as a result of use by
218 * an earlier boot loader stage.
219 * -----------------------------------------------------------------
220 */
221 ldr r0, =__RW_START__
222 ldr r1, =__RW_END__
223 sub r1, r1, r0
224 bl inv_dcache_range
225#endif /* IMAGE_BL32 */
226
227 ldr r0, =__BSS_START__
228 ldr r1, =__BSS_SIZE__
229 bl zeromem
230
231#if USE_COHERENT_MEM
232 ldr r0, =__COHERENT_RAM_START__
233 ldr r1, =__COHERENT_RAM_UNALIGNED_SIZE__
234 bl zeromem
235#endif
236
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900237#ifdef IMAGE_BL1
Yatharth Kocharf528faf2016-06-28 16:58:26 +0100238 /* -----------------------------------------------------
239 * Copy data from ROM to RAM.
240 * -----------------------------------------------------
241 */
242 ldr r0, =__DATA_RAM_START__
243 ldr r1, =__DATA_ROM_START__
244 ldr r2, =__DATA_SIZE__
Yatharth Kocharc44c5af2016-09-28 11:00:05 +0100245 bl memcpy4
Yatharth Kocharf528faf2016-06-28 16:58:26 +0100246#endif
247 .endif /* _init_c_runtime */
248
249 /* ---------------------------------------------------------------------
250 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
251 * the MMU is enabled. There is no risk of reading stale stack memory
252 * after enabling the MMU as only the primary CPU is running at the
253 * moment.
254 * ---------------------------------------------------------------------
255 */
256 bl plat_set_my_stack
Douglas Raillard306593d2017-02-24 18:14:15 +0000257
258#if STACK_PROTECTOR_ENABLED
259 .if \_init_c_runtime
260 bl update_stack_protector_canary
261 .endif /* _init_c_runtime */
262#endif
Yatharth Kocharf528faf2016-06-28 16:58:26 +0100263 .endm
264
265#endif /* __EL3_COMMON_MACROS_S__ */