blob: e2ab923d13dff79c11d14c8e64cb6e01d5b6d253 [file] [log] [blame]
Soby Mathewec8ac1c2016-05-05 14:32:05 +01001/*
2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <asm_macros.S>
33#include <bl_common.h>
34#include <context.h>
Yatharth Kochar06460cd2016-06-30 15:02:31 +010035#include <el3_common_macros.S>
Soby Mathewec8ac1c2016-05-05 14:32:05 +010036#include <runtime_svc.h>
37#include <smcc_helpers.h>
38#include <smcc_macros.S>
39#include <xlat_tables.h>
40
41 .globl sp_min_vector_table
42 .globl sp_min_entrypoint
43 .globl sp_min_warm_entrypoint
44
Yatharth Kochar06460cd2016-06-30 15:02:31 +010045
46vector_base sp_min_vector_table
Soby Mathewec8ac1c2016-05-05 14:32:05 +010047 b sp_min_entrypoint
48 b plat_panic_handler /* Undef */
49 b handle_smc /* Syscall */
50 b plat_panic_handler /* Prefetch abort */
51 b plat_panic_handler /* Data abort */
52 b plat_panic_handler /* Reserved */
53 b plat_panic_handler /* IRQ */
54 b plat_panic_handler /* FIQ */
Soby Mathewec8ac1c2016-05-05 14:32:05 +010055
Soby Mathewec8ac1c2016-05-05 14:32:05 +010056
57/*
58 * The Cold boot/Reset entrypoint for SP_MIN
59 */
60func sp_min_entrypoint
Yatharth Kochar06460cd2016-06-30 15:02:31 +010061#if !RESET_TO_SP_MIN
62 /* ---------------------------------------------------------------
63 * Preceding bootloader has populated r0 with a pointer to a
64 * 'bl_params_t' structure & r1 with a pointer to platform
65 * specific structure
66 * ---------------------------------------------------------------
Soby Mathewec8ac1c2016-05-05 14:32:05 +010067 */
Yatharth Kochar06460cd2016-06-30 15:02:31 +010068 mov r11, r0
69 mov r12, r1
Soby Mathewec8ac1c2016-05-05 14:32:05 +010070
Yatharth Kochar06460cd2016-06-30 15:02:31 +010071 /* ---------------------------------------------------------------------
72 * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
73 * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot
74 * and primary/secondary CPU logic should not be executed in this case.
75 *
76 * Also, assume that the previous bootloader has already set up the CPU
77 * endianness and has initialised the memory.
78 * ---------------------------------------------------------------------
Soby Mathewec8ac1c2016-05-05 14:32:05 +010079 */
Yatharth Kochar06460cd2016-06-30 15:02:31 +010080 el3_entrypoint_common \
81 _set_endian=0 \
82 _warm_boot_mailbox=0 \
83 _secondary_cold_boot=0 \
84 _init_memory=0 \
85 _init_c_runtime=1 \
86 _exception_vectors=sp_min_vector_table
Soby Mathewec8ac1c2016-05-05 14:32:05 +010087
Yatharth Kochar06460cd2016-06-30 15:02:31 +010088 /* ---------------------------------------------------------------------
89 * Relay the previous bootloader's arguments to the platform layer
90 * ---------------------------------------------------------------------
Soby Mathewec8ac1c2016-05-05 14:32:05 +010091 */
Yatharth Kochar06460cd2016-06-30 15:02:31 +010092 mov r0, r11
93 mov r1, r12
94#else
95 /* ---------------------------------------------------------------------
96 * For RESET_TO_SP_MIN systems which have a programmable reset address,
97 * sp_min_entrypoint() is executed only on the cold boot path so we can
98 * skip the warm boot mailbox mechanism.
99 * ---------------------------------------------------------------------
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100100 */
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100101 el3_entrypoint_common \
102 _set_endian=1 \
103 _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
104 _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
105 _init_memory=1 \
106 _init_c_runtime=1 \
107 _exception_vectors=sp_min_vector_table
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100108
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100109 /* ---------------------------------------------------------------------
110 * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader
111 * to run so there's no argument to relay from a previous bootloader.
112 * Zero the arguments passed to the platform layer to reflect that.
113 * ---------------------------------------------------------------------
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100114 */
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100115 mov r0, #0
116 mov r1, #0
117#endif /* RESET_TO_SP_MIN */
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100118
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100119 bl sp_min_early_platform_setup
120 bl sp_min_plat_arch_setup
121
122 /* Jump to the main function */
123 bl sp_min_main
124
125 /* -------------------------------------------------------------
126 * Clean the .data & .bss sections to main memory. This ensures
127 * that any global data which was initialised by the primary CPU
128 * is visible to secondary CPUs before they enable their data
129 * caches and participate in coherency.
130 * -------------------------------------------------------------
131 */
132 ldr r0, =__DATA_START__
133 ldr r1, =__DATA_END__
134 sub r1, r1, r0
135 bl clean_dcache_range
136
137 ldr r0, =__BSS_START__
138 ldr r1, =__BSS_END__
139 sub r1, r1, r0
140 bl clean_dcache_range
141
142 /* Program the registers in cpu_context and exit monitor mode */
143 mov r0, #NON_SECURE
144 bl cm_get_context
145
146 /* Restore the SCR */
147 ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
148 stcopr r2, SCR
149 isb
150
151 /* Restore the SCTLR */
152 ldr r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
153 stcopr r2, SCTLR
154
155 bl smc_get_next_ctx
156 /* The other cpu_context registers have been copied to smc context */
157 b sp_min_exit
158endfunc sp_min_entrypoint
159
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100160
161/*
162 * SMC handling function for SP_MIN.
163 */
164func handle_smc
165 smcc_save_gp_mode_regs
166
167 /* r0 points to smc_context */
168 mov r2, r0 /* handle */
169 ldcopr r0, SCR
170
Soby Mathewadb70272016-12-06 12:10:51 +0000171 /*
172 * Save SCR in stack. r1 is pushed to meet the 8 byte
173 * stack alignment requirement.
174 */
175 push {r0, r1}
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100176 and r3, r0, #SCR_NS_BIT /* flags */
177
178 /* Switch to Secure Mode*/
179 bic r0, #SCR_NS_BIT
180 stcopr r0, SCR
181 isb
182 ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */
183 /* Check whether an SMC64 is issued */
184 tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
185 beq 1f /* SMC32 is detected */
186 mov r0, #SMC_UNK
187 str r0, [r2, #SMC_CTX_GPREG_R0]
188 mov r0, r2
189 b 2f /* Skip handling the SMC */
1901:
191 mov r1, #0 /* cookie */
192 bl handle_runtime_svc
1932:
194 /* r0 points to smc context */
195
196 /* Restore SCR from stack */
Soby Mathewadb70272016-12-06 12:10:51 +0000197 pop {r1, r2}
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100198 stcopr r1, SCR
199 isb
200
201 b sp_min_exit
202endfunc handle_smc
203
204
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100205/*
206 * The Warm boot entrypoint for SP_MIN.
207 */
208func sp_min_warm_entrypoint
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100209 /*
210 * On the warm boot path, most of the EL3 initialisations performed by
211 * 'el3_entrypoint_common' must be skipped:
212 *
213 * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by
214 * programming the reset address do we need to set the CPU endianness.
215 * In other cases, we assume this has been taken care by the
216 * entrypoint code.
217 *
218 * - No need to determine the type of boot, we know it is a warm boot.
219 *
220 * - Do not try to distinguish between primary and secondary CPUs, this
221 * notion only exists for a cold boot.
222 *
223 * - No need to initialise the memory or the C runtime environment,
224 * it has been done once and for all on the cold boot path.
225 */
226 el3_entrypoint_common \
227 _set_endian=PROGRAMMABLE_RESET_ADDRESS \
228 _warm_boot_mailbox=0 \
229 _secondary_cold_boot=0 \
230 _init_memory=0 \
231 _init_c_runtime=0 \
232 _exception_vectors=sp_min_vector_table
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100233
234 /* --------------------------------------------
235 * Enable the MMU with the DCache disabled. It
236 * is safe to use stacks allocated in normal
237 * memory as a result. All memory accesses are
238 * marked nGnRnE when the MMU is disabled. So
239 * all the stack writes will make it to memory.
240 * All memory accesses are marked Non-cacheable
241 * when the MMU is enabled but D$ is disabled.
242 * So used stack memory is guaranteed to be
243 * visible immediately after the MMU is enabled
244 * Enabling the DCache at the same time as the
245 * MMU can lead to speculatively fetched and
246 * possibly stale stack memory being read from
247 * other caches. This can lead to coherency
248 * issues.
249 * --------------------------------------------
250 */
251 mov r0, #DISABLE_DCACHE
252 bl bl32_plat_enable_mmu
253
254 bl sp_min_warm_boot
255
256 /* Program the registers in cpu_context and exit monitor mode */
257 mov r0, #NON_SECURE
258 bl cm_get_context
259
260 /* Restore the SCR */
261 ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
262 stcopr r2, SCR
263 isb
264
265 /* Restore the SCTLR */
266 ldr r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
267 stcopr r2, SCTLR
268
269 bl smc_get_next_ctx
270
271 /* The other cpu_context registers have been copied to smc context */
272 b sp_min_exit
273endfunc sp_min_warm_entrypoint
274
275/*
276 * The function to restore the registers from SMC context and return
277 * to the mode restored to SPSR.
278 *
279 * Arguments : r0 must point to the SMC context to restore from.
280 */
281func sp_min_exit
282 smcc_restore_gp_mode_regs
283 eret
284endfunc sp_min_exit