blob: b2b7953f829b6ed6e19e0f75d04b185a3d4d5f04 [file] [log] [blame]
Soby Mathewec8ac1c2016-05-05 14:32:05 +01001/*
Antonio Nino Diaz4ef91f12017-02-20 14:22:22 +00002 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
Soby Mathewec8ac1c2016-05-05 14:32:05 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewec8ac1c2016-05-05 14:32:05 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <bl_common.h>
10#include <context.h>
Yatharth Kochar06460cd2016-06-30 15:02:31 +010011#include <el3_common_macros.S>
Soby Mathewec8ac1c2016-05-05 14:32:05 +010012#include <runtime_svc.h>
13#include <smcc_helpers.h>
14#include <smcc_macros.S>
Antonio Nino Diaz4ef91f12017-02-20 14:22:22 +000015#include <xlat_tables_defs.h>
Soby Mathewec8ac1c2016-05-05 14:32:05 +010016
17 .globl sp_min_vector_table
18 .globl sp_min_entrypoint
19 .globl sp_min_warm_entrypoint
20
Etienne Carrieredc0fea72017-08-09 15:48:53 +020021 .macro route_fiq_to_sp_min reg
22 /* -----------------------------------------------------
23 * FIQs are secure interrupts trapped by Monitor and non
24 * secure is not allowed to mask the FIQs.
25 * -----------------------------------------------------
26 */
27 ldcopr \reg, SCR
28 orr \reg, \reg, #SCR_FIQ_BIT
29 bic \reg, \reg, #SCR_FW_BIT
30 stcopr \reg, SCR
31 .endm
Yatharth Kochar06460cd2016-06-30 15:02:31 +010032
Etienne Carriere7555ab72017-11-08 13:49:12 +010033 .macro clrex_on_monitor_entry
34#if (ARM_ARCH_MAJOR == 7)
35 /*
36 * ARMv7 architectures need to clear the exclusive access when
37 * entering Monitor mode.
38 */
39 clrex
40#endif
41 .endm
42
Yatharth Kochar06460cd2016-06-30 15:02:31 +010043vector_base sp_min_vector_table
Soby Mathewec8ac1c2016-05-05 14:32:05 +010044 b sp_min_entrypoint
45 b plat_panic_handler /* Undef */
46 b handle_smc /* Syscall */
47 b plat_panic_handler /* Prefetch abort */
48 b plat_panic_handler /* Data abort */
49 b plat_panic_handler /* Reserved */
50 b plat_panic_handler /* IRQ */
Etienne Carrieredc0fea72017-08-09 15:48:53 +020051 b handle_fiq /* FIQ */
Soby Mathewec8ac1c2016-05-05 14:32:05 +010052
Soby Mathewec8ac1c2016-05-05 14:32:05 +010053
54/*
55 * The Cold boot/Reset entrypoint for SP_MIN
56 */
57func sp_min_entrypoint
Yatharth Kochar06460cd2016-06-30 15:02:31 +010058#if !RESET_TO_SP_MIN
59 /* ---------------------------------------------------------------
60 * Preceding bootloader has populated r0 with a pointer to a
61 * 'bl_params_t' structure & r1 with a pointer to platform
62 * specific structure
63 * ---------------------------------------------------------------
Soby Mathewec8ac1c2016-05-05 14:32:05 +010064 */
Yatharth Kochar06460cd2016-06-30 15:02:31 +010065 mov r11, r0
66 mov r12, r1
Soby Mathewec8ac1c2016-05-05 14:32:05 +010067
Yatharth Kochar06460cd2016-06-30 15:02:31 +010068 /* ---------------------------------------------------------------------
69 * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
70 * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot
71 * and primary/secondary CPU logic should not be executed in this case.
72 *
David Cunadofee86532017-04-13 22:38:29 +010073 * Also, assume that the previous bootloader has already initialised the
74 * SCTLR, including the CPU endianness, and has initialised the memory.
Yatharth Kochar06460cd2016-06-30 15:02:31 +010075 * ---------------------------------------------------------------------
Soby Mathewec8ac1c2016-05-05 14:32:05 +010076 */
Yatharth Kochar06460cd2016-06-30 15:02:31 +010077 el3_entrypoint_common \
David Cunadofee86532017-04-13 22:38:29 +010078 _init_sctlr=0 \
Yatharth Kochar06460cd2016-06-30 15:02:31 +010079 _warm_boot_mailbox=0 \
80 _secondary_cold_boot=0 \
81 _init_memory=0 \
82 _init_c_runtime=1 \
83 _exception_vectors=sp_min_vector_table
Soby Mathewec8ac1c2016-05-05 14:32:05 +010084
Yatharth Kochar06460cd2016-06-30 15:02:31 +010085 /* ---------------------------------------------------------------------
86 * Relay the previous bootloader's arguments to the platform layer
87 * ---------------------------------------------------------------------
Soby Mathewec8ac1c2016-05-05 14:32:05 +010088 */
Yatharth Kochar06460cd2016-06-30 15:02:31 +010089 mov r0, r11
90 mov r1, r12
91#else
92 /* ---------------------------------------------------------------------
93 * For RESET_TO_SP_MIN systems which have a programmable reset address,
94 * sp_min_entrypoint() is executed only on the cold boot path so we can
95 * skip the warm boot mailbox mechanism.
96 * ---------------------------------------------------------------------
Soby Mathewec8ac1c2016-05-05 14:32:05 +010097 */
Yatharth Kochar06460cd2016-06-30 15:02:31 +010098 el3_entrypoint_common \
David Cunadofee86532017-04-13 22:38:29 +010099 _init_sctlr=1 \
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100100 _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
101 _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
102 _init_memory=1 \
103 _init_c_runtime=1 \
104 _exception_vectors=sp_min_vector_table
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100105
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100106 /* ---------------------------------------------------------------------
107 * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader
108 * to run so there's no argument to relay from a previous bootloader.
109 * Zero the arguments passed to the platform layer to reflect that.
110 * ---------------------------------------------------------------------
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100111 */
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100112 mov r0, #0
113 mov r1, #0
114#endif /* RESET_TO_SP_MIN */
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100115
Etienne Carrieredc0fea72017-08-09 15:48:53 +0200116#if SP_MIN_WITH_SECURE_FIQ
117 route_fiq_to_sp_min r4
118#endif
119
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100120 bl sp_min_early_platform_setup
121 bl sp_min_plat_arch_setup
122
123 /* Jump to the main function */
124 bl sp_min_main
125
126 /* -------------------------------------------------------------
127 * Clean the .data & .bss sections to main memory. This ensures
128 * that any global data which was initialised by the primary CPU
129 * is visible to secondary CPUs before they enable their data
130 * caches and participate in coherency.
131 * -------------------------------------------------------------
132 */
133 ldr r0, =__DATA_START__
134 ldr r1, =__DATA_END__
135 sub r1, r1, r0
136 bl clean_dcache_range
137
138 ldr r0, =__BSS_START__
139 ldr r1, =__BSS_END__
140 sub r1, r1, r0
141 bl clean_dcache_range
142
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100143 bl smc_get_next_ctx
Soby Mathewf3e3a432017-03-30 14:42:54 +0100144
145 /* r0 points to `smc_ctx_t` */
146 /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100147 b sp_min_exit
148endfunc sp_min_entrypoint
149
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100150
151/*
152 * SMC handling function for SP_MIN.
153 */
154func handle_smc
Soby Mathewf3e3a432017-03-30 14:42:54 +0100155 /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
156 str lr, [sp, #SMC_CTX_LR_MON]
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100157
Soby Mathewf3e3a432017-03-30 14:42:54 +0100158 smcc_save_gp_mode_regs
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100159
Etienne Carriere7555ab72017-11-08 13:49:12 +0100160 clrex_on_monitor_entry
161
Soby Mathewadb70272016-12-06 12:10:51 +0000162 /*
Soby Mathewf3e3a432017-03-30 14:42:54 +0100163 * `sp` still points to `smc_ctx_t`. Save it to a register
164 * and restore the C runtime stack pointer to `sp`.
Soby Mathewadb70272016-12-06 12:10:51 +0000165 */
Soby Mathewf3e3a432017-03-30 14:42:54 +0100166 mov r2, sp /* handle */
167 ldr sp, [r2, #SMC_CTX_SP_MON]
168
169 ldr r0, [r2, #SMC_CTX_SCR]
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100170 and r3, r0, #SCR_NS_BIT /* flags */
171
172 /* Switch to Secure Mode*/
173 bic r0, #SCR_NS_BIT
174 stcopr r0, SCR
175 isb
Soby Mathewf3e3a432017-03-30 14:42:54 +0100176
David Cunado4168f2f2017-10-02 17:41:39 +0100177 /*
178 * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode.
179 * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset
180 * and so set to 1 as ARM has deprecated use of PMCR.LC=0.
181 */
182 ldcopr r0, PMCR
183 orr r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT)
184 stcopr r0, PMCR
185
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100186 ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */
187 /* Check whether an SMC64 is issued */
188 tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
Soby Mathewf3e3a432017-03-30 14:42:54 +0100189 beq 1f
190 /* SMC32 is not detected. Return error back to caller */
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100191 mov r0, #SMC_UNK
192 str r0, [r2, #SMC_CTX_GPREG_R0]
193 mov r0, r2
Soby Mathewf3e3a432017-03-30 14:42:54 +0100194 b sp_min_exit
Yatharth Kochar06460cd2016-06-30 15:02:31 +01001951:
Soby Mathewf3e3a432017-03-30 14:42:54 +0100196 /* SMC32 is detected */
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100197 mov r1, #0 /* cookie */
198 bl handle_runtime_svc
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100199
Soby Mathewf3e3a432017-03-30 14:42:54 +0100200 /* `r0` points to `smc_ctx_t` */
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100201 b sp_min_exit
202endfunc handle_smc
203
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100204/*
Etienne Carrieredc0fea72017-08-09 15:48:53 +0200205 * Secure Interrupts handling function for SP_MIN.
206 */
207func handle_fiq
208#if !SP_MIN_WITH_SECURE_FIQ
209 b plat_panic_handler
210#else
211 /* FIQ has a +4 offset for lr compared to preferred return address */
212 sub lr, lr, #4
213 /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
214 str lr, [sp, #SMC_CTX_LR_MON]
215
216 smcc_save_gp_mode_regs
217
Etienne Carriere7555ab72017-11-08 13:49:12 +0100218 clrex_on_monitor_entry
Etienne Carrieredc0fea72017-08-09 15:48:53 +0200219
220 /* load run-time stack */
221 mov r2, sp
222 ldr sp, [r2, #SMC_CTX_SP_MON]
223
224 /* Switch to Secure Mode */
225 ldr r0, [r2, #SMC_CTX_SCR]
226 bic r0, #SCR_NS_BIT
227 stcopr r0, SCR
228 isb
229
David Cunado4168f2f2017-10-02 17:41:39 +0100230 /*
231 * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode.
232 * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset
233 * and so set to 1 as ARM has deprecated use of PMCR.LC=0.
234 */
235 ldcopr r0, PMCR
236 orr r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT)
237 stcopr r0, PMCR
238
Etienne Carrieredc0fea72017-08-09 15:48:53 +0200239 push {r2, r3}
240 bl sp_min_fiq
241 pop {r0, r3}
242
243 b sp_min_exit
244#endif
245endfunc handle_fiq
246
247/*
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100248 * The Warm boot entrypoint for SP_MIN.
249 */
250func sp_min_warm_entrypoint
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100251 /*
252 * On the warm boot path, most of the EL3 initialisations performed by
253 * 'el3_entrypoint_common' must be skipped:
254 *
255 * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by
David Cunadofee86532017-04-13 22:38:29 +0100256 * programming the reset address do we need to initialied the SCTLR.
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100257 * In other cases, we assume this has been taken care by the
258 * entrypoint code.
259 *
260 * - No need to determine the type of boot, we know it is a warm boot.
261 *
262 * - Do not try to distinguish between primary and secondary CPUs, this
263 * notion only exists for a cold boot.
264 *
265 * - No need to initialise the memory or the C runtime environment,
266 * it has been done once and for all on the cold boot path.
267 */
268 el3_entrypoint_common \
David Cunadofee86532017-04-13 22:38:29 +0100269 _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100270 _warm_boot_mailbox=0 \
271 _secondary_cold_boot=0 \
272 _init_memory=0 \
273 _init_c_runtime=0 \
274 _exception_vectors=sp_min_vector_table
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100275
Jeenu Viswambharan46144962017-01-05 10:37:21 +0000276 /*
277 * We're about to enable MMU and participate in PSCI state coordination.
278 *
279 * The PSCI implementation invokes platform routines that enable CPUs to
280 * participate in coherency. On a system where CPUs are not
Soby Mathew043fe9c2017-04-10 22:35:42 +0100281 * cache-coherent without appropriate platform specific programming,
282 * having caches enabled until such time might lead to coherency issues
283 * (resulting from stale data getting speculatively fetched, among
284 * others). Therefore we keep data caches disabled even after enabling
285 * the MMU for such platforms.
Jeenu Viswambharan46144962017-01-05 10:37:21 +0000286 *
Soby Mathew043fe9c2017-04-10 22:35:42 +0100287 * On systems with hardware-assisted coherency, or on single cluster
288 * platforms, such platform specific programming is not required to
289 * enter coherency (as CPUs already are); and there's no reason to have
290 * caches disabled either.
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100291 */
292 mov r0, #DISABLE_DCACHE
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100293 bl bl32_plat_enable_mmu
294
Etienne Carrieredc0fea72017-08-09 15:48:53 +0200295#if SP_MIN_WITH_SECURE_FIQ
296 route_fiq_to_sp_min r0
297#endif
298
Soby Mathew043fe9c2017-04-10 22:35:42 +0100299#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
300 ldcopr r0, SCTLR
301 orr r0, r0, #SCTLR_C_BIT
302 stcopr r0, SCTLR
303 isb
304#endif
305
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100306 bl sp_min_warm_boot
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100307 bl smc_get_next_ctx
Soby Mathewf3e3a432017-03-30 14:42:54 +0100308 /* r0 points to `smc_ctx_t` */
309 /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100310 b sp_min_exit
311endfunc sp_min_warm_entrypoint
312
313/*
314 * The function to restore the registers from SMC context and return
315 * to the mode restored to SPSR.
316 *
317 * Arguments : r0 must point to the SMC context to restore from.
318 */
319func sp_min_exit
Soby Mathewf3e3a432017-03-30 14:42:54 +0100320 monitor_exit
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100321endfunc sp_min_exit