blob: fc4f63d83489f7447ece164cf776926ecd1c3a15 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001/* SPDX-License-Identifier: GPL-2.0+ */
Simon Glass437e2b82012-02-23 03:28:41 +00002/*
3 * Copyright (c) 2004-2008 Texas Instruments
4 *
5 * (C) Copyright 2002
6 * Gary Jennejohn, DENX Software Engineering, <garyj@denx.de>
Simon Glass437e2b82012-02-23 03:28:41 +00007 */
8
Marc Zyngierc0451ec2014-07-12 14:24:02 +01009#include <config.h>
Chen-Yu Tsaia00f85d2016-06-19 12:38:36 +080010#include <asm/psci.h>
Marc Zyngierc0451ec2014-07-12 14:24:02 +010011
Simon Glass437e2b82012-02-23 03:28:41 +000012OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm")
13OUTPUT_ARCH(arm)
14ENTRY(_start)
15SECTIONS
16{
Simon Glass3e2c91c2016-03-13 19:07:29 -060017#ifndef CONFIG_CMDLINE
Andrew Scull5a9095c2022-05-30 10:00:04 +000018 /DISCARD/ : { *(__u_boot_list_2_cmd_*) }
Simon Glass3e2c91c2016-03-13 19:07:29 -060019#endif
Wang Dongsheng7eab3a62016-01-18 11:02:40 +080020#if defined(CONFIG_ARMV7_SECURE_BASE) && defined(CONFIG_ARMV7_NONSEC)
Peng Fan2e9e9a82015-10-23 10:13:03 +080021 /*
Wang Dongsheng7eab3a62016-01-18 11:02:40 +080022 * If CONFIG_ARMV7_SECURE_BASE is true, secure code will not
23 * bundle with u-boot, and code offsets are fixed. Secure zone
24 * only needs to be copied from the loading address to
25 * CONFIG_ARMV7_SECURE_BASE, which is the linking and running
26 * address for secure code.
Peng Fan2e9e9a82015-10-23 10:13:03 +080027 *
Wang Dongsheng7eab3a62016-01-18 11:02:40 +080028 * If CONFIG_ARMV7_SECURE_BASE is undefined, the secure zone will
29 * be included in u-boot address space, and some absolute address
30 * were used in secure code. The absolute addresses of the secure
31 * code also needs to be relocated along with the accompanying u-boot
32 * code.
33 *
34 * So DISCARD is only for CONFIG_ARMV7_SECURE_BASE.
Peng Fan2e9e9a82015-10-23 10:13:03 +080035 */
36 /DISCARD/ : { *(.rel._secure*) }
Wang Dongsheng7eab3a62016-01-18 11:02:40 +080037#endif
Simon Glass437e2b82012-02-23 03:28:41 +000038 . = 0x00000000;
39
40 . = ALIGN(4);
41 .text :
42 {
Albert ARIBAUDc53687e2013-06-11 14:17:33 +020043 *(.__image_copy_start)
Albert ARIBAUD9852cc62014-04-15 16:13:51 +020044 *(.vectors)
Stephen Warrenadddf452012-10-22 06:19:32 +000045 CPUDIR/start.o (.text*)
Alexander Graf94a10f22018-06-12 07:48:37 +020046 }
47
48 /* This needs to come before *(.text*) */
49 .__efi_runtime_start : {
50 *(.__efi_runtime_start)
51 }
52
53 .efi_runtime : {
54 *(.text.efi_runtime*)
55 *(.rodata.efi_runtime*)
56 *(.data.efi_runtime*)
57 }
58
59 .__efi_runtime_stop : {
60 *(.__efi_runtime_stop)
61 }
62
63 .text_rest :
64 {
Stephen Warrenadddf452012-10-22 06:19:32 +000065 *(.text*)
Simon Glass437e2b82012-02-23 03:28:41 +000066 }
Marc Zyngierc0451ec2014-07-12 14:24:02 +010067
Jan Kiszkaac31b5a2015-04-21 07:18:24 +020068#ifdef CONFIG_ARMV7_NONSEC
Marc Zyngierc0451ec2014-07-12 14:24:02 +010069
Chen-Yu Tsai277a8f62016-06-19 12:38:34 +080070 /* Align the secure section only if we're going to use it in situ */
Chen-Yu Tsaia7eb9d32018-09-06 11:56:28 +080071 .__secure_start
Chen-Yu Tsai277a8f62016-06-19 12:38:34 +080072#ifndef CONFIG_ARMV7_SECURE_BASE
73 ALIGN(CONSTANT(COMMONPAGESIZE))
74#endif
Chen-Yu Tsaia7eb9d32018-09-06 11:56:28 +080075 : {
Chen-Yu Tsai277a8f62016-06-19 12:38:34 +080076 KEEP(*(.__secure_start))
77 }
78
Marc Zyngierc0451ec2014-07-12 14:24:02 +010079#ifndef CONFIG_ARMV7_SECURE_BASE
Tom Rinia17db832023-01-10 11:19:31 -050080#define __ARMV7_SECURE_BASE
Chen-Yu Tsai72a48002016-06-07 10:54:27 +080081#define __ARMV7_PSCI_STACK_IN_RAM
Tom Rinia17db832023-01-10 11:19:31 -050082#else
83#define __ARMV7_SECURE_BASE CONFIG_ARMV7_SECURE_BASE
Marc Zyngierc0451ec2014-07-12 14:24:02 +010084#endif
85
Tom Rinia17db832023-01-10 11:19:31 -050086 .secure_text __ARMV7_SECURE_BASE :
Marc Zyngierc0451ec2014-07-12 14:24:02 +010087 AT(ADDR(.__secure_start) + SIZEOF(.__secure_start))
88 {
89 *(._secure.text)
90 }
91
Chen-Yu Tsai5ed03872016-07-05 21:45:06 +080092 .secure_data : AT(LOADADDR(.secure_text) + SIZEOF(.secure_text))
93 {
94 *(._secure.data)
95 }
96
Masahiro Yamada2aa46c02016-09-26 14:21:30 +090097#ifdef CONFIG_ARMV7_PSCI
Chen-Yu Tsai5ed03872016-07-05 21:45:06 +080098 .secure_stack ALIGN(ADDR(.secure_data) + SIZEOF(.secure_data),
Chen-Yu Tsaia00f85d2016-06-19 12:38:36 +080099 CONSTANT(COMMONPAGESIZE)) (NOLOAD) :
Chen-Yu Tsai72a48002016-06-07 10:54:27 +0800100#ifdef __ARMV7_PSCI_STACK_IN_RAM
Chen-Yu Tsaia00f85d2016-06-19 12:38:36 +0800101 AT(ADDR(.secure_stack))
102#else
Chen-Yu Tsai5ed03872016-07-05 21:45:06 +0800103 AT(LOADADDR(.secure_data) + SIZEOF(.secure_data))
Chen-Yu Tsaia00f85d2016-06-19 12:38:36 +0800104#endif
105 {
106 KEEP(*(.__secure_stack_start))
Masahiro Yamada2aa46c02016-09-26 14:21:30 +0900107
Chen-Yu Tsaia00f85d2016-06-19 12:38:36 +0800108 /* Skip addreses for stack */
109 . = . + CONFIG_ARMV7_PSCI_NR_CPUS * ARM_PSCI_STACK_SIZE;
Masahiro Yamada2aa46c02016-09-26 14:21:30 +0900110
Chen-Yu Tsaia00f85d2016-06-19 12:38:36 +0800111 /* Align end of stack section to page boundary */
112 . = ALIGN(CONSTANT(COMMONPAGESIZE));
113
114 KEEP(*(.__secure_stack_end))
Chen-Yu Tsai3de210c2016-06-19 12:38:39 +0800115
116#ifdef CONFIG_ARMV7_SECURE_MAX_SIZE
117 /*
118 * We are not checking (__secure_end - __secure_start) here,
119 * as these are the load addresses, and do not include the
120 * stack section. Instead, use the end of the stack section
121 * and the start of the text section.
122 */
123 ASSERT((. - ADDR(.secure_text)) <= CONFIG_ARMV7_SECURE_MAX_SIZE,
124 "Error: secure section exceeds secure memory size");
125#endif
Chen-Yu Tsaia00f85d2016-06-19 12:38:36 +0800126 }
127
128#ifndef __ARMV7_PSCI_STACK_IN_RAM
129 /* Reset VMA but don't allocate space if we have secure SRAM */
130 . = LOADADDR(.secure_stack);
Chen-Yu Tsai72a48002016-06-07 10:54:27 +0800131#endif
132
Masahiro Yamada2aa46c02016-09-26 14:21:30 +0900133#endif
134
Chen-Yu Tsaia00f85d2016-06-19 12:38:36 +0800135 .__secure_end : AT(ADDR(.__secure_end)) {
Marc Zyngierc0451ec2014-07-12 14:24:02 +0100136 *(.__secure_end)
137 LONG(0x1d1071c); /* Must output something to reset LMA */
138 }
139#endif
Simon Glass437e2b82012-02-23 03:28:41 +0000140
141 . = ALIGN(4);
142 .rodata : { *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.rodata*))) }
143
144 . = ALIGN(4);
145 .data : {
Stephen Warrenadddf452012-10-22 06:19:32 +0000146 *(.data*)
Simon Glass437e2b82012-02-23 03:28:41 +0000147 }
148
149 . = ALIGN(4);
150
151 . = .;
Simon Glass437e2b82012-02-23 03:28:41 +0000152
153 . = ALIGN(4);
Andrew Scull5a9095c2022-05-30 10:00:04 +0000154 __u_boot_list : {
155 KEEP(*(SORT(__u_boot_list*)));
Marek Vasut607092a2012-10-12 10:27:03 +0000156 }
157
158 . = ALIGN(4);
Simon Glass437e2b82012-02-23 03:28:41 +0000159
Alexander Graf0bd425a2016-03-04 01:10:01 +0100160 .efi_runtime_rel_start :
161 {
162 *(.__efi_runtime_rel_start)
163 }
164
165 .efi_runtime_rel : {
Alexander Graf94a10f22018-06-12 07:48:37 +0200166 *(.rel*.efi_runtime)
167 *(.rel*.efi_runtime.*)
Alexander Graf0bd425a2016-03-04 01:10:01 +0100168 }
169
170 .efi_runtime_rel_stop :
171 {
172 *(.__efi_runtime_rel_stop)
173 }
174
Tom Rini06ca0cf2017-06-14 09:13:21 -0400175 . = ALIGN(4);
Alexander Graf0bd425a2016-03-04 01:10:01 +0100176
Albert ARIBAUDc53687e2013-06-11 14:17:33 +0200177 .image_copy_end :
178 {
179 *(.__image_copy_end)
180 }
Simon Glass437e2b82012-02-23 03:28:41 +0000181
Albert ARIBAUDaf3ff162013-06-11 14:17:34 +0200182 .rel_dyn_start :
183 {
184 *(.__rel_dyn_start)
185 }
186
Simon Glass437e2b82012-02-23 03:28:41 +0000187 .rel.dyn : {
Simon Glass437e2b82012-02-23 03:28:41 +0000188 *(.rel*)
Albert ARIBAUDaf3ff162013-06-11 14:17:34 +0200189 }
190
191 .rel_dyn_end :
192 {
193 *(.__rel_dyn_end)
Simon Glass437e2b82012-02-23 03:28:41 +0000194 }
195
Albert ARIBAUD9d25fa42014-02-22 17:53:42 +0100196 .end :
197 {
198 *(.__end)
199 }
200
201 _image_binary_end = .;
Simon Glass437e2b82012-02-23 03:28:41 +0000202
203 /*
204 * Deprecated: this MMU section is used by pxa at present but
205 * should not be used by new boards/CPUs.
206 */
207 . = ALIGN(4096);
208 .mmutable : {
209 *(.mmutable)
210 }
211
Albert ARIBAUDba5662d2013-04-11 05:43:21 +0000212/*
213 * Compiler-generated __bss_start and __bss_end, see arch/arm/lib/bss.c
214 * __bss_base and __bss_limit are for linker only (overlay ordering)
215 */
216
Albert ARIBAUD436f6322013-02-25 00:58:59 +0000217 .bss_start __rel_dyn_start (OVERLAY) : {
218 KEEP(*(.__bss_start));
Albert ARIBAUDba5662d2013-04-11 05:43:21 +0000219 __bss_base = .;
Albert ARIBAUD436f6322013-02-25 00:58:59 +0000220 }
221
Albert ARIBAUDba5662d2013-04-11 05:43:21 +0000222 .bss __bss_base (OVERLAY) : {
Stephen Warrenadddf452012-10-22 06:19:32 +0000223 *(.bss*)
Simon Glass437e2b82012-02-23 03:28:41 +0000224 . = ALIGN(4);
Albert ARIBAUDba5662d2013-04-11 05:43:21 +0000225 __bss_limit = .;
Albert ARIBAUD436f6322013-02-25 00:58:59 +0000226 }
Tom Rini19aac972013-03-18 12:31:00 -0400227
Albert ARIBAUDba5662d2013-04-11 05:43:21 +0000228 .bss_end __bss_limit (OVERLAY) : {
229 KEEP(*(.__bss_end));
Simon Glass437e2b82012-02-23 03:28:41 +0000230 }
231
Albert ARIBAUD9d25fa42014-02-22 17:53:42 +0100232 .dynsym _image_binary_end : { *(.dynsym) }
Albert ARIBAUD95fc6d62013-11-07 14:21:46 +0100233 .dynbss : { *(.dynbss) }
234 .dynstr : { *(.dynstr*) }
235 .dynamic : { *(.dynamic*) }
236 .plt : { *(.plt*) }
237 .interp : { *(.interp*) }
Andreas Färber438a1672014-01-27 05:48:11 +0100238 .gnu.hash : { *(.gnu.hash) }
Albert ARIBAUD95fc6d62013-11-07 14:21:46 +0100239 .gnu : { *(.gnu*) }
240 .ARM.exidx : { *(.ARM.exidx*) }
Albert ARIBAUDddadbed2014-01-13 14:57:05 +0100241 .gnu.linkonce.armexidx : { *(.gnu.linkonce.armexidx.*) }
Simon Glass437e2b82012-02-23 03:28:41 +0000242}