blob: f6a21dc14efca77fea762386ebc862d6dc474cb3 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Jeenu Viswambharan46144962017-01-05 10:37:21 +00002 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Sandrine Bailleuxc10bd2c2013-11-12 16:41:16 +000031#include <arch.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010032#include <bl_common.h>
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +010033#include <el3_common_macros.S>
dp-arm3cac7862016-09-19 11:18:44 +010034#include <pmf_asm_macros.S>
35#include <runtime_instr.h>
Soby Mathewd0194872016-04-29 19:01:30 +010036#include <xlat_tables.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010037
38 .globl bl31_entrypoint
Soby Mathewd0194872016-04-29 19:01:30 +010039 .globl bl31_warm_entrypoint
Achin Gupta4f6ad662013-10-25 09:08:21 +010040
Achin Gupta4f6ad662013-10-25 09:08:21 +010041 /* -----------------------------------------------------
42 * bl31_entrypoint() is the cold boot entrypoint,
43 * executed only by the primary cpu.
44 * -----------------------------------------------------
45 */
46
Andrew Thoelke38bde412014-03-18 13:46:55 +000047func bl31_entrypoint
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +010048#if !RESET_TO_BL31
Vikram Kanigirida567432014-04-15 18:08:08 +010049 /* ---------------------------------------------------------------
50 * Preceding bootloader has populated x0 with a pointer to a
51 * 'bl31_params' structure & x1 with a pointer to platform
52 * specific structure
53 * ---------------------------------------------------------------
Sandrine Bailleuxc10bd2c2013-11-12 16:41:16 +000054 */
Vikram Kanigiria3a5e4a2014-05-15 18:27:15 +010055 mov x20, x0
56 mov x21, x1
Sandrine Bailleuxc10bd2c2013-11-12 16:41:16 +000057
Harry Liebel4f603682014-01-14 18:11:48 +000058 /* ---------------------------------------------------------------------
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +010059 * For !RESET_TO_BL31 systems, only the primary CPU ever reaches
60 * bl31_entrypoint() during the cold boot flow, so the cold/warm boot
61 * and primary/secondary CPU logic should not be executed in this case.
Harry Liebel4f603682014-01-14 18:11:48 +000062 *
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +010063 * Also, assume that the previous bootloader has already set up the CPU
64 * endianness and has initialised the memory.
Harry Liebel4f603682014-01-14 18:11:48 +000065 * ---------------------------------------------------------------------
66 */
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +010067 el3_entrypoint_common \
68 _set_endian=0 \
69 _warm_boot_mailbox=0 \
70 _secondary_cold_boot=0 \
71 _init_memory=0 \
72 _init_c_runtime=1 \
73 _exception_vectors=runtime_exceptions
Sandrine Bailleux65f546a2013-11-28 09:43:06 +000074
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +010075 /* ---------------------------------------------------------------------
76 * Relay the previous bootloader's arguments to the platform layer
77 * ---------------------------------------------------------------------
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000078 */
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +010079 mov x0, x20
80 mov x1, x21
81#else
Sandrine Bailleux449dbd52015-06-02 17:19:43 +010082 /* ---------------------------------------------------------------------
83 * For RESET_TO_BL31 systems which have a programmable reset address,
84 * bl31_entrypoint() is executed only on the cold boot path so we can
85 * skip the warm boot mailbox mechanism.
86 * ---------------------------------------------------------------------
87 */
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +010088 el3_entrypoint_common \
89 _set_endian=1 \
Sandrine Bailleux449dbd52015-06-02 17:19:43 +010090 _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
Sandrine Bailleuxb21b02f2015-10-30 15:05:17 +000091 _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +010092 _init_memory=1 \
93 _init_c_runtime=1 \
94 _exception_vectors=runtime_exceptions
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000095
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +010096 /* ---------------------------------------------------------------------
Juan Castillo7d199412015-12-14 09:35:25 +000097 * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +010098 * there's no argument to relay from a previous bootloader. Zero the
99 * arguments passed to the platform layer to reflect that.
100 * ---------------------------------------------------------------------
Achin Gupta4f6ad662013-10-25 09:08:21 +0100101 */
Sandrine Bailleuxacde8b02015-05-19 11:54:45 +0100102 mov x0, 0
103 mov x1, 0
104#endif /* RESET_TO_BL31 */
Achin Gupta4f6ad662013-10-25 09:08:21 +0100105
106 /* ---------------------------------------------
107 * Perform platform specific early arch. setup
108 * ---------------------------------------------
109 */
Achin Gupta4f6ad662013-10-25 09:08:21 +0100110 bl bl31_early_platform_setup
111 bl bl31_plat_arch_setup
112
113 /* ---------------------------------------------
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000114 * Jump to main function.
Achin Guptab739f222014-01-18 16:50:09 +0000115 * ---------------------------------------------
116 */
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000117 bl bl31_main
Achin Guptab739f222014-01-18 16:50:09 +0000118
Achin Guptae9c4a642015-09-11 16:03:13 +0100119 /* -------------------------------------------------------------
120 * Clean the .data & .bss sections to main memory. This ensures
121 * that any global data which was initialised by the primary CPU
122 * is visible to secondary CPUs before they enable their data
123 * caches and participate in coherency.
124 * -------------------------------------------------------------
125 */
126 adr x0, __DATA_START__
127 adr x1, __DATA_END__
128 sub x1, x1, x0
129 bl clean_dcache_range
130
131 adr x0, __BSS_START__
132 adr x1, __BSS_END__
133 sub x1, x1, x0
134 bl clean_dcache_range
135
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000136 b el3_exit
Kévin Petita877c252015-03-24 14:03:57 +0000137endfunc bl31_entrypoint
Soby Mathewd0194872016-04-29 19:01:30 +0100138
139 /* --------------------------------------------------------------------
140 * This CPU has been physically powered up. It is either resuming from
141 * suspend or has simply been turned on. In both cases, call the BL31
142 * warmboot entrypoint
143 * --------------------------------------------------------------------
144 */
145func bl31_warm_entrypoint
dp-arm3cac7862016-09-19 11:18:44 +0100146#if ENABLE_RUNTIME_INSTRUMENTATION
147
148 /*
149 * This timestamp update happens with cache off. The next
150 * timestamp collection will need to do cache maintenance prior
151 * to timestamp update.
152 */
153 pmf_calc_timestamp_addr rt_instr_svc RT_INSTR_EXIT_HW_LOW_PWR
154 mrs x1, cntpct_el0
155 str x1, [x0]
156#endif
157
Soby Mathewd0194872016-04-29 19:01:30 +0100158 /*
159 * On the warm boot path, most of the EL3 initialisations performed by
160 * 'el3_entrypoint_common' must be skipped:
161 *
162 * - Only when the platform bypasses the BL1/BL31 entrypoint by
163 * programming the reset address do we need to set the CPU endianness.
164 * In other cases, we assume this has been taken care by the
165 * entrypoint code.
166 *
167 * - No need to determine the type of boot, we know it is a warm boot.
168 *
169 * - Do not try to distinguish between primary and secondary CPUs, this
170 * notion only exists for a cold boot.
171 *
172 * - No need to initialise the memory or the C runtime environment,
173 * it has been done once and for all on the cold boot path.
174 */
175 el3_entrypoint_common \
176 _set_endian=PROGRAMMABLE_RESET_ADDRESS \
177 _warm_boot_mailbox=0 \
178 _secondary_cold_boot=0 \
179 _init_memory=0 \
180 _init_c_runtime=0 \
181 _exception_vectors=runtime_exceptions
182
Jeenu Viswambharan46144962017-01-05 10:37:21 +0000183 /*
184 * We're about to enable MMU and participate in PSCI state coordination.
185 *
186 * The PSCI implementation invokes platform routines that enable CPUs to
187 * participate in coherency. On a system where CPUs are not
188 * cache-coherent out of reset, having caches enabled until such time
189 * might lead to coherency issues (resulting from stale data getting
190 * speculatively fetched, among others). Therefore we keep data caches
191 * disabled while enabling the MMU, thereby forcing data accesses to
192 * have non-cacheable, nGnRnE attributes (these will always be coherent
193 * with main memory).
194 *
195 * On systems with hardware-assisted coherency, where CPUs are expected
196 * to be cache-coherent out of reset without needing explicit software
197 * intervention, PSCI need not invoke platform routines to enter
198 * coherency (as CPUs already are); and there's no reason to have caches
199 * disabled either.
Soby Mathewd0194872016-04-29 19:01:30 +0100200 */
Jeenu Viswambharan46144962017-01-05 10:37:21 +0000201#if HW_ASSISTED_COHERENCY
202 mov x0, #0
203#else
Soby Mathewd0194872016-04-29 19:01:30 +0100204 mov x0, #DISABLE_DCACHE
Jeenu Viswambharan46144962017-01-05 10:37:21 +0000205#endif
Soby Mathewd0194872016-04-29 19:01:30 +0100206 bl bl31_plat_enable_mmu
207
208 bl psci_warmboot_entrypoint
209
dp-arm3cac7862016-09-19 11:18:44 +0100210#if ENABLE_RUNTIME_INSTRUMENTATION
211 pmf_calc_timestamp_addr rt_instr_svc RT_INSTR_EXIT_PSCI
212 mov x19, x0
213
214 /*
215 * Invalidate before updating timestamp to ensure previous timestamp
216 * updates on the same cache line with caches disabled are properly
217 * seen by the same core. Without the cache invalidate, the core might
218 * write into a stale cache line.
219 */
220 mov x1, #PMF_TS_SIZE
221 mov x20, x30
222 bl inv_dcache_range
223 mov x30, x20
224
225 mrs x0, cntpct_el0
226 str x0, [x19]
227#endif
Soby Mathewd0194872016-04-29 19:01:30 +0100228 b el3_exit
229endfunc bl31_warm_entrypoint