blob: 11e86a3c13ffd2c41c33f6deeb710b74fd437bca [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Madhukar Pappireddyf4e6ea62020-01-27 15:32:15 -06002 * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta4f6ad662013-10-25 09:08:21 +01005 */
6
Masahiro Yamada0b67e562020-03-09 17:39:48 +09007#include <common/bl_common.ld.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00008#include <lib/xlat_tables/xlat_tables_defs.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +01009
10OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
11OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
Jeenu Viswambharan2a30a752014-03-11 11:06:45 +000012ENTRY(bl31_entrypoint)
Achin Gupta4f6ad662013-10-25 09:08:21 +010013
14
15MEMORY {
Juan Castillofd8c0772014-09-16 10:40:35 +010016 RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
Samuel Holland31a14e12018-10-17 21:40:18 -050017#if SEPARATE_NOBITS_REGION
18 NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE
19#else
20#define NOBITS RAM
21#endif
Achin Gupta4f6ad662013-10-25 09:08:21 +010022}
23
Caesar Wangd90f43e2016-10-11 09:36:00 +080024#ifdef PLAT_EXTRA_LD_SCRIPT
25#include <plat.ld.S>
26#endif
Achin Gupta4f6ad662013-10-25 09:08:21 +010027
28SECTIONS
29{
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000030 . = BL31_BASE;
Antonio Nino Diaz2ce2b092017-11-15 11:45:35 +000031 ASSERT(. == ALIGN(PAGE_SIZE),
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000032 "BL31_BASE address is not aligned on a page boundary.")
Achin Gupta4f6ad662013-10-25 09:08:21 +010033
Soby Mathew4e28c202018-10-14 08:09:22 +010034 __BL31_START__ = .;
35
Sandrine Bailleuxf91f1442016-07-08 14:37:40 +010036#if SEPARATE_CODE_AND_RODATA
37 .text . : {
38 __TEXT_START__ = .;
39 *bl31_entrypoint.o(.text*)
Samuel Holland23f5e542019-10-20 16:11:25 -050040 *(SORT_BY_ALIGNMENT(.text*))
Sandrine Bailleuxf91f1442016-07-08 14:37:40 +010041 *(.vectors)
Roberto Vargasd93fde32018-04-11 11:53:31 +010042 . = ALIGN(PAGE_SIZE);
Sandrine Bailleuxf91f1442016-07-08 14:37:40 +010043 __TEXT_END__ = .;
44 } >RAM
45
46 .rodata . : {
47 __RODATA_START__ = .;
Samuel Holland23f5e542019-10-20 16:11:25 -050048 *(SORT_BY_ALIGNMENT(.rodata*))
Sandrine Bailleuxf91f1442016-07-08 14:37:40 +010049
Masahiro Yamada583f8dd2020-03-26 10:57:12 +090050 RODATA_COMMON
Soby Mathew4e28c202018-10-14 08:09:22 +010051
Jeenu Viswambharane3f22002017-09-22 08:32:10 +010052 /* Place pubsub sections for events */
53 . = ALIGN(8);
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000054#include <lib/el3_runtime/pubsub_events.h>
Jeenu Viswambharane3f22002017-09-22 08:32:10 +010055
Roberto Vargasd93fde32018-04-11 11:53:31 +010056 . = ALIGN(PAGE_SIZE);
Sandrine Bailleuxf91f1442016-07-08 14:37:40 +010057 __RODATA_END__ = .;
58 } >RAM
59#else
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000060 ro . : {
61 __RO_START__ = .;
Andrew Thoelkee01ea342014-03-18 07:13:52 +000062 *bl31_entrypoint.o(.text*)
Samuel Holland23f5e542019-10-20 16:11:25 -050063 *(SORT_BY_ALIGNMENT(.text*))
64 *(SORT_BY_ALIGNMENT(.rodata*))
Achin Gupta7421b462014-02-01 18:53:26 +000065
Masahiro Yamada583f8dd2020-03-26 10:57:12 +090066 RODATA_COMMON
Soby Mathew2b3fc1d2018-12-12 14:33:11 +000067
Jeenu Viswambharane3f22002017-09-22 08:32:10 +010068 /* Place pubsub sections for events */
69 . = ALIGN(8);
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000070#include <lib/el3_runtime/pubsub_events.h>
Jeenu Viswambharane3f22002017-09-22 08:32:10 +010071
Achin Guptab739f222014-01-18 16:50:09 +000072 *(.vectors)
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000073 __RO_END_UNALIGNED__ = .;
74 /*
75 * Memory page(s) mapped to this section will be marked as read-only,
76 * executable. No RW data from the next section must creep in.
77 * Ensure the rest of the current memory page is unused.
78 */
Roberto Vargasd93fde32018-04-11 11:53:31 +010079 . = ALIGN(PAGE_SIZE);
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000080 __RO_END__ = .;
Achin Gupta4f6ad662013-10-25 09:08:21 +010081 } >RAM
Sandrine Bailleuxf91f1442016-07-08 14:37:40 +010082#endif
Achin Gupta4f6ad662013-10-25 09:08:21 +010083
Soby Mathewc704cbc2014-08-14 11:33:56 +010084 ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
85 "cpu_ops not defined for this platform.")
86
Paul Beesleydb4e25a2019-10-14 15:27:12 +000087#if SPM_MM
Ard Biesheuvel447d56f2019-01-06 10:07:24 +010088#ifndef SPM_SHIM_EXCEPTIONS_VMA
89#define SPM_SHIM_EXCEPTIONS_VMA RAM
90#endif
91
Antonio Nino Diazc41f2062017-10-24 10:07:35 +010092 /*
93 * Exception vectors of the SPM shim layer. They must be aligned to a 2K
94 * address, but we need to place them in a separate page so that we can set
95 * individual permissions to them, so the actual alignment needed is 4K.
96 *
97 * There's no need to include this into the RO section of BL31 because it
98 * doesn't need to be accessed by BL31.
99 */
Antonio Nino Diaz2ce2b092017-11-15 11:45:35 +0000100 spm_shim_exceptions : ALIGN(PAGE_SIZE) {
Antonio Nino Diazc41f2062017-10-24 10:07:35 +0100101 __SPM_SHIM_EXCEPTIONS_START__ = .;
102 *(.spm_shim_exceptions)
Roberto Vargasd93fde32018-04-11 11:53:31 +0100103 . = ALIGN(PAGE_SIZE);
Antonio Nino Diazc41f2062017-10-24 10:07:35 +0100104 __SPM_SHIM_EXCEPTIONS_END__ = .;
Ard Biesheuvel447d56f2019-01-06 10:07:24 +0100105 } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
106
107 PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(spm_shim_exceptions));
108 . = LOADADDR(spm_shim_exceptions) + SIZEOF(spm_shim_exceptions);
Antonio Nino Diazc41f2062017-10-24 10:07:35 +0100109#endif
110
Achin Guptae9c4a642015-09-11 16:03:13 +0100111 /*
112 * Define a linker symbol to mark start of the RW memory area for this
113 * image.
114 */
115 __RW_START__ = . ;
116
Masahiro Yamadac5864d82020-04-22 10:50:12 +0900117 DATA_SECTION >RAM
Achin Gupta4f6ad662013-10-25 09:08:21 +0100118
Soby Mathew4e28c202018-10-14 08:09:22 +0100119 /*
120 * .rela.dyn needs to come after .data for the read-elf utility to parse
Soby Mathew2b3fc1d2018-12-12 14:33:11 +0000121 * this section correctly. Ensure 8-byte alignment so that the fields of
122 * RELA data structure are aligned.
Soby Mathew4e28c202018-10-14 08:09:22 +0100123 */
Soby Mathew2b3fc1d2018-12-12 14:33:11 +0000124 . = ALIGN(8);
Soby Mathew4e28c202018-10-14 08:09:22 +0100125 __RELA_START__ = .;
126 .rela.dyn . : {
127 } >RAM
128 __RELA_END__ = .;
129
Sandrine Bailleuxe2e0c652014-06-16 16:12:27 +0100130#ifdef BL31_PROGBITS_LIMIT
Juan Castillo7d199412015-12-14 09:35:25 +0000131 ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
Sandrine Bailleuxe2e0c652014-06-16 16:12:27 +0100132#endif
133
Samuel Holland31a14e12018-10-17 21:40:18 -0500134#if SEPARATE_NOBITS_REGION
135 /*
136 * Define a linker symbol to mark end of the RW memory area for this
137 * image.
138 */
Madhukar Pappireddyf4e6ea62020-01-27 15:32:15 -0600139 . = ALIGN(PAGE_SIZE);
Samuel Holland31a14e12018-10-17 21:40:18 -0500140 __RW_END__ = .;
141 __BL31_END__ = .;
142
143 ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
144
145 . = BL31_NOBITS_BASE;
146 ASSERT(. == ALIGN(PAGE_SIZE),
147 "BL31 NOBITS base address is not aligned on a page boundary.")
148
149 __NOBITS_START__ = .;
150#endif
151
Masahiro Yamada403990e2020-04-07 13:04:24 +0900152 STACK_SECTION >NOBITS
Masahiro Yamadadd053b62020-03-26 13:16:33 +0900153 BSS_SECTION >NOBITS
Masahiro Yamada0b67e562020-03-09 17:39:48 +0900154 XLAT_TABLE_SECTION >NOBITS
Achin Guptaa0cd9892014-02-09 13:30:38 +0000155
Soby Mathew2ae20432015-01-08 18:02:44 +0000156#if USE_COHERENT_MEM
Achin Guptaa0cd9892014-02-09 13:30:38 +0000157 /*
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000158 * The base address of the coherent memory section must be page-aligned (4K)
159 * to guarantee that the coherent data are stored on their own pages and
160 * are not mixed with normal data. This is required to set up the correct
161 * memory attributes for the coherent data page tables.
162 */
Antonio Nino Diaz2ce2b092017-11-15 11:45:35 +0000163 coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000164 __COHERENT_RAM_START__ = .;
Andrew Thoelkee466c9f2015-09-10 11:39:36 +0100165 /*
166 * Bakery locks are stored in coherent memory
167 *
168 * Each lock's data is contiguous and fully allocated by the compiler
169 */
170 *(bakery_lock)
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000171 *(tzfw_coherent_mem)
172 __COHERENT_RAM_END_UNALIGNED__ = .;
173 /*
174 * Memory page(s) mapped to this section will be marked
175 * as device memory. No other unexpected data must creep in.
176 * Ensure the rest of the current memory page is unused.
177 */
Roberto Vargasd93fde32018-04-11 11:53:31 +0100178 . = ALIGN(PAGE_SIZE);
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000179 __COHERENT_RAM_END__ = .;
Samuel Holland31a14e12018-10-17 21:40:18 -0500180 } >NOBITS
Soby Mathew2ae20432015-01-08 18:02:44 +0000181#endif
Achin Gupta4f6ad662013-10-25 09:08:21 +0100182
Samuel Holland31a14e12018-10-17 21:40:18 -0500183#if SEPARATE_NOBITS_REGION
184 /*
185 * Define a linker symbol to mark end of the NOBITS memory area for this
186 * image.
187 */
188 __NOBITS_END__ = .;
189
190 ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.")
191#else
Achin Guptae9c4a642015-09-11 16:03:13 +0100192 /*
193 * Define a linker symbol to mark end of the RW memory area for this
194 * image.
195 */
196 __RW_END__ = .;
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000197 __BL31_END__ = .;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100198
Masahiro Yamadad3e7baa2020-01-17 13:44:50 +0900199 /DISCARD/ : {
200 *(.dynsym .dynstr .hash .gnu.hash)
201 }
202
Juan Castillo7d199412015-12-14 09:35:25 +0000203 ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
Samuel Holland31a14e12018-10-17 21:40:18 -0500204#endif
Achin Gupta4f6ad662013-10-25 09:08:21 +0100205}