blob: d4db4d044f6414cd8bae121a75fffce4a3b880fc [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001/* SPDX-License-Identifier: GPL-2.0+ */
David Feng85fd5f12013-12-14 11:47:35 +08002/*
3 * (C) Copyright 2013
4 * David Feng <fenghua@phytium.com.cn>
David Feng85fd5f12013-12-14 11:47:35 +08005 */
6
7#include <asm-offsets.h>
8#include <config.h>
David Feng85fd5f12013-12-14 11:47:35 +08009#include <linux/linkage.h>
10#include <asm/macro.h>
11#include <asm/armv8/mmu.h>
12
13/*************************************************************************
14 *
15 * Startup Code (reset vector)
16 *
17 *************************************************************************/
18
19.globl _start
20_start:
Stephen Warren80a93652018-01-03 14:31:51 -070021#if defined(LINUX_KERNEL_IMAGE_HEADER)
22#include <asm/boot0-linux-kernel-header.h>
23#elif defined(CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK)
Andre Przywara48321ba2016-05-31 10:45:06 -070024/*
25 * Various SoCs need something special and SoC-specific up front in
26 * order to boot, allow them to set that in their boot0.h file and then
27 * use it here.
28 */
29#include <asm/arch/boot0.h>
Andre Przywara313a5782017-01-02 11:48:33 +000030#else
31 b reset
Andre Przywara48321ba2016-05-31 10:45:06 -070032#endif
33
David Feng85fd5f12013-12-14 11:47:35 +080034 .align 3
35
36.globl _TEXT_BASE
37_TEXT_BASE:
38 .quad CONFIG_SYS_TEXT_BASE
39
40/*
41 * These are defined in the linker script.
42 */
43.globl _end_ofs
44_end_ofs:
45 .quad _end - _start
46
47.globl _bss_start_ofs
48_bss_start_ofs:
49 .quad __bss_start - _start
50
51.globl _bss_end_ofs
52_bss_end_ofs:
53 .quad __bss_end - _start
54
55reset:
Stephen Warren100a4792016-07-18 17:01:50 -060056 /* Allow the board to save important registers */
57 b save_boot_params
58.globl save_boot_params_ret
59save_boot_params_ret:
60
Stephen Warren81c21372017-11-02 18:11:27 -060061#if CONFIG_POSITION_INDEPENDENT
62 /*
63 * Fix .rela.dyn relocations. This allows U-Boot to be loaded to and
64 * executed at a different address than it was linked at.
65 */
66pie_fixup:
67 adr x0, _start /* x0 <- Runtime value of _start */
68 ldr x1, _TEXT_BASE /* x1 <- Linked value of _start */
69 sub x9, x0, x1 /* x9 <- Run-vs-link offset */
70 adr x2, __rel_dyn_start /* x2 <- Runtime &__rel_dyn_start */
71 adr x3, __rel_dyn_end /* x3 <- Runtime &__rel_dyn_end */
72pie_fix_loop:
73 ldp x0, x1, [x2], #16 /* (x0, x1) <- (Link location, fixup) */
74 ldr x4, [x2], #8 /* x4 <- addend */
75 cmp w1, #1027 /* relative fixup? */
76 bne pie_skip_reloc
77 /* relative fix: store addend plus offset at dest location */
78 add x0, x0, x9
79 add x4, x4, x9
80 str x4, [x0]
81pie_skip_reloc:
82 cmp x2, x3
83 b.lo pie_fix_loop
84pie_fixup_done:
85#endif
86
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -070087#ifdef CONFIG_SYS_RESET_SCTRL
88 bl reset_sctrl
89#endif
David Feng85fd5f12013-12-14 11:47:35 +080090 /*
91 * Could be EL3/EL2/EL1, Initial State:
92 * Little Endian, MMU Disabled, i/dCache Disabled
93 */
94 adr x0, vectors
95 switch_el x1, 3f, 2f, 1f
David Feng7c5eca72014-04-19 09:45:21 +0800963: msr vbar_el3, x0
97 mrs x0, scr_el3
David Feng79bbde02014-03-14 14:26:27 +080098 orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */
99 msr scr_el3, x0
David Feng85fd5f12013-12-14 11:47:35 +0800100 msr cptr_el3, xzr /* Enable FP/SIMD */
Thierry Reding25657922015-08-20 11:42:18 +0200101#ifdef COUNTER_FREQUENCY
David Feng85fd5f12013-12-14 11:47:35 +0800102 ldr x0, =COUNTER_FREQUENCY
103 msr cntfrq_el0, x0 /* Initialize CNTFRQ */
Thierry Reding25657922015-08-20 11:42:18 +0200104#endif
David Feng85fd5f12013-12-14 11:47:35 +0800105 b 0f
1062: msr vbar_el2, x0
107 mov x0, #0x33ff
108 msr cptr_el2, x0 /* Enable FP/SIMD */
109 b 0f
1101: msr vbar_el1, x0
111 mov x0, #3 << 20
112 msr cpacr_el1, x0 /* Enable FP/SIMD */
1130:
114
Mingkai Hu553d4052017-01-06 17:41:10 +0800115 /*
Dinh Nguyenc3e970a2017-04-26 23:36:03 -0500116 * Enable SMPEN bit for coherency.
Mingkai Hu553d4052017-01-06 17:41:10 +0800117 * This register is not architectural but at the moment
118 * this bit should be set for A53/A57/A72.
119 */
120#ifdef CONFIG_ARMV8_SET_SMPEN
York Sune6b871e2017-05-15 08:51:59 -0700121 switch_el x1, 3f, 1f, 1f
1223:
Dinh Nguyenc3e970a2017-04-26 23:36:03 -0500123 mrs x0, S3_1_c15_c2_1 /* cpuectlr_el1 */
Mingkai Hu553d4052017-01-06 17:41:10 +0800124 orr x0, x0, #0x40
125 msr S3_1_c15_c2_1, x0
York Sune6b871e2017-05-15 08:51:59 -07001261:
Mingkai Hu553d4052017-01-06 17:41:10 +0800127#endif
128
Bhupesh Sharma80a7e352015-01-23 15:50:04 +0530129 /* Apply ARM core specific erratas */
130 bl apply_core_errata
131
York Sunef042012014-02-26 13:26:04 -0800132 /*
133 * Cache/BPB/TLB Invalidate
134 * i-cache is invalidated before enabled in icache_enable()
135 * tlb is invalidated before mmu is enabled in dcache_enable()
136 * d-cache is invalidated before enabled in dcache_enable()
137 */
David Feng85fd5f12013-12-14 11:47:35 +0800138
139 /* Processor specific initialization */
140 bl lowlevel_init
141
Oded Gabbay97a8d652016-12-27 11:19:43 +0200142#if defined(CONFIG_ARMV8_SPIN_TABLE) && !defined(CONFIG_SPL_BUILD)
Masahiro Yamada2663cd62016-06-27 19:31:05 +0900143 branch_if_master x0, x1, master_cpu
144 b spin_table_secondary_jump
145 /* never return */
146#elif defined(CONFIG_ARMV8_MULTIENTRY)
David Feng85fd5f12013-12-14 11:47:35 +0800147 branch_if_master x0, x1, master_cpu
148
149 /*
150 * Slave CPUs
151 */
152slave_cpu:
153 wfe
154 ldr x1, =CPU_RELEASE_ADDR
155 ldr x0, [x1]
156 cbz x0, slave_cpu
157 br x0 /* branch to the given address */
Linus Walleij74771392015-03-09 10:53:21 +0100158#endif /* CONFIG_ARMV8_MULTIENTRY */
Masahiro Yamada2663cd62016-06-27 19:31:05 +0900159master_cpu:
David Feng85fd5f12013-12-14 11:47:35 +0800160 bl _main
161
Sergey Temerkhanov78eaa492015-10-14 09:55:45 -0700162#ifdef CONFIG_SYS_RESET_SCTRL
163reset_sctrl:
164 switch_el x1, 3f, 2f, 1f
1653:
166 mrs x0, sctlr_el3
167 b 0f
1682:
169 mrs x0, sctlr_el2
170 b 0f
1711:
172 mrs x0, sctlr_el1
173
1740:
175 ldr x1, =0xfdfffffa
176 and x0, x0, x1
177
178 switch_el x1, 6f, 5f, 4f
1796:
180 msr sctlr_el3, x0
181 b 7f
1825:
183 msr sctlr_el2, x0
184 b 7f
1854:
186 msr sctlr_el1, x0
187
1887:
189 dsb sy
190 isb
191 b __asm_invalidate_tlb_all
192 ret
193#endif
194
David Feng85fd5f12013-12-14 11:47:35 +0800195/*-----------------------------------------------------------------------*/
196
Bhupesh Sharma80a7e352015-01-23 15:50:04 +0530197WEAK(apply_core_errata)
198
199 mov x29, lr /* Save LR */
Alison Wangc1293872017-12-28 13:00:55 +0800200 /* For now, we support Cortex-A53, Cortex-A57 specific errata */
201
202 /* Check if we are running on a Cortex-A53 core */
203 branch_if_a53_core x0, apply_a53_core_errata
Bhupesh Sharma80a7e352015-01-23 15:50:04 +0530204
205 /* Check if we are running on a Cortex-A57 core */
206 branch_if_a57_core x0, apply_a57_core_errata
2070:
208 mov lr, x29 /* Restore LR */
209 ret
210
Alison Wangc1293872017-12-28 13:00:55 +0800211apply_a53_core_errata:
212
213#ifdef CONFIG_ARM_ERRATA_855873
214 mrs x0, midr_el1
215 tst x0, #(0xf << 20)
216 b.ne 0b
217
218 mrs x0, midr_el1
219 and x0, x0, #0xf
220 cmp x0, #3
221 b.lt 0b
222
223 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
224 /* Enable data cache clean as data cache clean/invalidate */
225 orr x0, x0, #1 << 44
226 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
227#endif
228 b 0b
229
Bhupesh Sharma80a7e352015-01-23 15:50:04 +0530230apply_a57_core_errata:
231
232#ifdef CONFIG_ARM_ERRATA_828024
233 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
234 /* Disable non-allocate hint of w-b-n-a memory type */
Bhupesh Sharma06a0f1d2015-05-28 14:54:13 +0530235 orr x0, x0, #1 << 49
Bhupesh Sharma80a7e352015-01-23 15:50:04 +0530236 /* Disable write streaming no L1-allocate threshold */
Bhupesh Sharma06a0f1d2015-05-28 14:54:13 +0530237 orr x0, x0, #3 << 25
Bhupesh Sharma80a7e352015-01-23 15:50:04 +0530238 /* Disable write streaming no-allocate threshold */
Bhupesh Sharma06a0f1d2015-05-28 14:54:13 +0530239 orr x0, x0, #3 << 27
Bhupesh Sharma80a7e352015-01-23 15:50:04 +0530240 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
241#endif
242
243#ifdef CONFIG_ARM_ERRATA_826974
244 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
245 /* Disable speculative load execution ahead of a DMB */
Bhupesh Sharma06a0f1d2015-05-28 14:54:13 +0530246 orr x0, x0, #1 << 59
Bhupesh Sharma80a7e352015-01-23 15:50:04 +0530247 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
248#endif
249
Ashish kumar9c6d33c2016-01-27 18:09:32 +0530250#ifdef CONFIG_ARM_ERRATA_833471
251 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
252 /* FPSCR write flush.
253 * Note that in some cases where a flush is unnecessary this
254 could impact performance. */
255 orr x0, x0, #1 << 38
256 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
257#endif
258
259#ifdef CONFIG_ARM_ERRATA_829520
260 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
261 /* Disable Indirect Predictor bit will prevent this erratum
262 from occurring
263 * Note that in some cases where a flush is unnecessary this
264 could impact performance. */
265 orr x0, x0, #1 << 4
266 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
267#endif
268
Bhupesh Sharma80a7e352015-01-23 15:50:04 +0530269#ifdef CONFIG_ARM_ERRATA_833069
270 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
271 /* Disable Enable Invalidates of BTB bit */
272 and x0, x0, #0xE
273 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
274#endif
275 b 0b
276ENDPROC(apply_core_errata)
277
278/*-----------------------------------------------------------------------*/
279
David Feng85fd5f12013-12-14 11:47:35 +0800280WEAK(lowlevel_init)
David Feng85fd5f12013-12-14 11:47:35 +0800281 mov x29, lr /* Save LR */
David Feng85fd5f12013-12-14 11:47:35 +0800282
David Feng79bbde02014-03-14 14:26:27 +0800283#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
284 branch_if_slave x0, 1f
285 ldr x0, =GICD_BASE
286 bl gic_init_secure
2871:
288#if defined(CONFIG_GICV3)
289 ldr x0, =GICR_BASE
290 bl gic_init_secure_percpu
291#elif defined(CONFIG_GICV2)
292 ldr x0, =GICD_BASE
293 ldr x1, =GICC_BASE
294 bl gic_init_secure_percpu
295#endif
Stephen Warren73f47af2016-04-28 12:45:44 -0600296#endif
David Feng79bbde02014-03-14 14:26:27 +0800297
Masahiro Yamadae4ce25f2016-05-20 12:13:10 +0900298#ifdef CONFIG_ARMV8_MULTIENTRY
David Feng79bbde02014-03-14 14:26:27 +0800299 branch_if_master x0, x1, 2f
David Feng85fd5f12013-12-14 11:47:35 +0800300
301 /*
302 * Slave should wait for master clearing spin table.
303 * This sync prevent salves observing incorrect
304 * value of spin table and jumping to wrong place.
305 */
David Feng79bbde02014-03-14 14:26:27 +0800306#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
307#ifdef CONFIG_GICV2
308 ldr x0, =GICC_BASE
309#endif
310 bl gic_wait_for_interrupt
311#endif
David Feng85fd5f12013-12-14 11:47:35 +0800312
313 /*
David Feng79bbde02014-03-14 14:26:27 +0800314 * All slaves will enter EL2 and optionally EL1.
David Feng85fd5f12013-12-14 11:47:35 +0800315 */
Alison Wangeb2088d2017-01-17 09:39:17 +0800316 adr x4, lowlevel_in_el2
317 ldr x5, =ES_TO_AARCH64
David Feng85fd5f12013-12-14 11:47:35 +0800318 bl armv8_switch_to_el2
Alison Wang73818d52016-11-10 10:49:03 +0800319
320lowlevel_in_el2:
David Feng85fd5f12013-12-14 11:47:35 +0800321#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
Alison Wangeb2088d2017-01-17 09:39:17 +0800322 adr x4, lowlevel_in_el1
323 ldr x5, =ES_TO_AARCH64
David Feng85fd5f12013-12-14 11:47:35 +0800324 bl armv8_switch_to_el1
Alison Wang73818d52016-11-10 10:49:03 +0800325
326lowlevel_in_el1:
David Feng85fd5f12013-12-14 11:47:35 +0800327#endif
328
Linus Walleij74771392015-03-09 10:53:21 +0100329#endif /* CONFIG_ARMV8_MULTIENTRY */
330
David Feng79bbde02014-03-14 14:26:27 +08003312:
David Feng85fd5f12013-12-14 11:47:35 +0800332 mov lr, x29 /* Restore LR */
333 ret
334ENDPROC(lowlevel_init)
335
David Feng79bbde02014-03-14 14:26:27 +0800336WEAK(smp_kick_all_cpus)
337 /* Kick secondary cpus up by SGI 0 interrupt */
David Feng79bbde02014-03-14 14:26:27 +0800338#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
339 ldr x0, =GICD_BASE
Masahiro Yamadab9140092016-06-17 18:32:47 +0900340 b gic_kick_secondary_cpus
David Feng79bbde02014-03-14 14:26:27 +0800341#endif
David Feng79bbde02014-03-14 14:26:27 +0800342 ret
343ENDPROC(smp_kick_all_cpus)
344
David Feng85fd5f12013-12-14 11:47:35 +0800345/*-----------------------------------------------------------------------*/
346
347ENTRY(c_runtime_cpu_setup)
David Feng85fd5f12013-12-14 11:47:35 +0800348 /* Relocate vBAR */
349 adr x0, vectors
350 switch_el x1, 3f, 2f, 1f
3513: msr vbar_el3, x0
352 b 0f
3532: msr vbar_el2, x0
354 b 0f
3551: msr vbar_el1, x0
3560:
357
358 ret
359ENDPROC(c_runtime_cpu_setup)
Stephen Warren100a4792016-07-18 17:01:50 -0600360
361WEAK(save_boot_params)
362 b save_boot_params_ret /* back to my caller */
363ENDPROC(save_boot_params)