blob: 7a8c41ab2ecf0fede37c81378ccabf399794d595 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Chris Kay33bfc5e2023-02-14 11:30:04 +00002 * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta4f6ad662013-10-25 09:08:21 +01005 */
6
Masahiro Yamada0b67e562020-03-09 17:39:48 +09007#include <common/bl_common.ld.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00008#include <lib/xlat_tables/xlat_tables_defs.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +01009
10OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
11OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
Jeenu Viswambharan2a30a752014-03-11 11:06:45 +000012ENTRY(bl31_entrypoint)
Achin Gupta4f6ad662013-10-25 09:08:21 +010013
Achin Gupta4f6ad662013-10-25 09:08:21 +010014MEMORY {
Juan Castillofd8c0772014-09-16 10:40:35 +010015 RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
Chris Kay4b7660a2022-09-29 14:36:53 +010016
Samuel Holland31a14e12018-10-17 21:40:18 -050017#if SEPARATE_NOBITS_REGION
18 NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE
Chris Kay4b7660a2022-09-29 14:36:53 +010019#else /* SEPARATE_NOBITS_REGION */
20# define NOBITS RAM
21#endif /* SEPARATE_NOBITS_REGION */
Achin Gupta4f6ad662013-10-25 09:08:21 +010022}
23
Caesar Wangd90f43e2016-10-11 09:36:00 +080024#ifdef PLAT_EXTRA_LD_SCRIPT
Chris Kay4b7660a2022-09-29 14:36:53 +010025# include <plat.ld.S>
26#endif /* PLAT_EXTRA_LD_SCRIPT */
Achin Gupta4f6ad662013-10-25 09:08:21 +010027
Chris Kay4b7660a2022-09-29 14:36:53 +010028SECTIONS {
Harrison Mutai8d6b7412023-04-19 09:30:15 +010029 RAM_REGION_START = ORIGIN(RAM);
30 RAM_REGION_LENGTH = LENGTH(RAM);
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000031 . = BL31_BASE;
Chris Kay4b7660a2022-09-29 14:36:53 +010032
Antonio Nino Diaz2ce2b092017-11-15 11:45:35 +000033 ASSERT(. == ALIGN(PAGE_SIZE),
Chris Kay4b7660a2022-09-29 14:36:53 +010034 "BL31_BASE address is not aligned on a page boundary.")
Achin Gupta4f6ad662013-10-25 09:08:21 +010035
Soby Mathew4e28c202018-10-14 08:09:22 +010036 __BL31_START__ = .;
37
Sandrine Bailleuxf91f1442016-07-08 14:37:40 +010038#if SEPARATE_CODE_AND_RODATA
39 .text . : {
40 __TEXT_START__ = .;
Chris Kay4b7660a2022-09-29 14:36:53 +010041
Sandrine Bailleuxf91f1442016-07-08 14:37:40 +010042 *bl31_entrypoint.o(.text*)
Jimmy Brissoned202072020-08-04 16:18:52 -050043 *(SORT_BY_ALIGNMENT(SORT(.text*)))
Sandrine Bailleuxf91f1442016-07-08 14:37:40 +010044 *(.vectors)
Michal Simek80c530e2023-04-27 14:26:03 +020045 __TEXT_END_UNALIGNED__ = .;
Chris Kay4b7660a2022-09-29 14:36:53 +010046
Roberto Vargasd93fde32018-04-11 11:53:31 +010047 . = ALIGN(PAGE_SIZE);
Chris Kay4b7660a2022-09-29 14:36:53 +010048
Sandrine Bailleuxf91f1442016-07-08 14:37:40 +010049 __TEXT_END__ = .;
50 } >RAM
51
52 .rodata . : {
53 __RODATA_START__ = .;
Chris Kay4b7660a2022-09-29 14:36:53 +010054
Samuel Holland23f5e542019-10-20 16:11:25 -050055 *(SORT_BY_ALIGNMENT(.rodata*))
Sandrine Bailleuxf91f1442016-07-08 14:37:40 +010056
Chris Kay4b7660a2022-09-29 14:36:53 +010057# if PLAT_EXTRA_RODATA_INCLUDES
58# include <plat.ld.rodata.inc>
59# endif /* PLAT_EXTRA_RODATA_INCLUDES */
developer8a3180d2022-08-05 10:04:10 +080060
Chris Kay4b7660a2022-09-29 14:36:53 +010061 RODATA_COMMON
Soby Mathew4e28c202018-10-14 08:09:22 +010062
Jeenu Viswambharane3f22002017-09-22 08:32:10 +010063 . = ALIGN(8);
Chris Kay4b7660a2022-09-29 14:36:53 +010064
65# include <lib/el3_runtime/pubsub_events.h>
Michal Simek80c530e2023-04-27 14:26:03 +020066 __RODATA_END_UNALIGNED__ = .;
Jeenu Viswambharane3f22002017-09-22 08:32:10 +010067
Roberto Vargasd93fde32018-04-11 11:53:31 +010068 . = ALIGN(PAGE_SIZE);
Chris Kay4b7660a2022-09-29 14:36:53 +010069
Sandrine Bailleuxf91f1442016-07-08 14:37:40 +010070 __RODATA_END__ = .;
71 } >RAM
Chris Kay4b7660a2022-09-29 14:36:53 +010072#else /* SEPARATE_CODE_AND_RODATA */
Chris Kay33bfc5e2023-02-14 11:30:04 +000073 .ro . : {
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000074 __RO_START__ = .;
Chris Kay4b7660a2022-09-29 14:36:53 +010075
Andrew Thoelkee01ea342014-03-18 07:13:52 +000076 *bl31_entrypoint.o(.text*)
Samuel Holland23f5e542019-10-20 16:11:25 -050077 *(SORT_BY_ALIGNMENT(.text*))
78 *(SORT_BY_ALIGNMENT(.rodata*))
Achin Gupta7421b462014-02-01 18:53:26 +000079
Chris Kay4b7660a2022-09-29 14:36:53 +010080 RODATA_COMMON
Soby Mathew2b3fc1d2018-12-12 14:33:11 +000081
Jeenu Viswambharane3f22002017-09-22 08:32:10 +010082 . = ALIGN(8);
Chris Kay4b7660a2022-09-29 14:36:53 +010083
84# include <lib/el3_runtime/pubsub_events.h>
Jeenu Viswambharane3f22002017-09-22 08:32:10 +010085
Achin Guptab739f222014-01-18 16:50:09 +000086 *(.vectors)
Chris Kay4b7660a2022-09-29 14:36:53 +010087
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000088 __RO_END_UNALIGNED__ = .;
Chris Kay4b7660a2022-09-29 14:36:53 +010089
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000090 /*
91 * Memory page(s) mapped to this section will be marked as read-only,
Chris Kay4b7660a2022-09-29 14:36:53 +010092 * executable. No RW data from the next section must creep in. Ensure
93 * that the rest of the current memory page is unused.
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000094 */
Roberto Vargasd93fde32018-04-11 11:53:31 +010095 . = ALIGN(PAGE_SIZE);
Chris Kay4b7660a2022-09-29 14:36:53 +010096
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000097 __RO_END__ = .;
Achin Gupta4f6ad662013-10-25 09:08:21 +010098 } >RAM
Chris Kay4b7660a2022-09-29 14:36:53 +010099#endif /* SEPARATE_CODE_AND_RODATA */
Achin Gupta4f6ad662013-10-25 09:08:21 +0100100
Soby Mathewc704cbc2014-08-14 11:33:56 +0100101 ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
Chris Kay4b7660a2022-09-29 14:36:53 +0100102 "cpu_ops not defined for this platform.")
Soby Mathewc704cbc2014-08-14 11:33:56 +0100103
Paul Beesleydb4e25a2019-10-14 15:27:12 +0000104#if SPM_MM
Chris Kay4b7660a2022-09-29 14:36:53 +0100105# ifndef SPM_SHIM_EXCEPTIONS_VMA
106# define SPM_SHIM_EXCEPTIONS_VMA RAM
107# endif /* SPM_SHIM_EXCEPTIONS_VMA */
Ard Biesheuvel447d56f2019-01-06 10:07:24 +0100108
Antonio Nino Diazc41f2062017-10-24 10:07:35 +0100109 /*
110 * Exception vectors of the SPM shim layer. They must be aligned to a 2K
Chris Kay4b7660a2022-09-29 14:36:53 +0100111 * address but we need to place them in a separate page so that we can set
112 * individual permissions on them, so the actual alignment needed is the
113 * page size.
Antonio Nino Diazc41f2062017-10-24 10:07:35 +0100114 *
115 * There's no need to include this into the RO section of BL31 because it
116 * doesn't need to be accessed by BL31.
117 */
Chris Kay33bfc5e2023-02-14 11:30:04 +0000118 .spm_shim_exceptions : ALIGN(PAGE_SIZE) {
Antonio Nino Diazc41f2062017-10-24 10:07:35 +0100119 __SPM_SHIM_EXCEPTIONS_START__ = .;
Chris Kay4b7660a2022-09-29 14:36:53 +0100120
Antonio Nino Diazc41f2062017-10-24 10:07:35 +0100121 *(.spm_shim_exceptions)
Chris Kay4b7660a2022-09-29 14:36:53 +0100122
Roberto Vargasd93fde32018-04-11 11:53:31 +0100123 . = ALIGN(PAGE_SIZE);
Chris Kay4b7660a2022-09-29 14:36:53 +0100124
Antonio Nino Diazc41f2062017-10-24 10:07:35 +0100125 __SPM_SHIM_EXCEPTIONS_END__ = .;
Ard Biesheuvel447d56f2019-01-06 10:07:24 +0100126 } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
127
Chris Kay33bfc5e2023-02-14 11:30:04 +0000128 PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(.spm_shim_exceptions));
Chris Kay4b7660a2022-09-29 14:36:53 +0100129
Chris Kay33bfc5e2023-02-14 11:30:04 +0000130 . = LOADADDR(.spm_shim_exceptions) + SIZEOF(.spm_shim_exceptions);
Chris Kay4b7660a2022-09-29 14:36:53 +0100131#endif /* SPM_MM */
Antonio Nino Diazc41f2062017-10-24 10:07:35 +0100132
Chris Kay4b7660a2022-09-29 14:36:53 +0100133 __RW_START__ = .;
Achin Guptae9c4a642015-09-11 16:03:13 +0100134
Masahiro Yamadac5864d82020-04-22 10:50:12 +0900135 DATA_SECTION >RAM
Masahiro Yamada85fa00e2020-04-22 11:27:55 +0900136 RELA_SECTION >RAM
Soby Mathew4e28c202018-10-14 08:09:22 +0100137
Sandrine Bailleuxe2e0c652014-06-16 16:12:27 +0100138#ifdef BL31_PROGBITS_LIMIT
Boyan Karatotev3e0e7892023-03-30 14:56:45 +0100139 ASSERT(
140 . <= BL31_PROGBITS_LIMIT,
141 "BL31 progbits has exceeded its limit. Consider disabling some features."
142 )
Chris Kay4b7660a2022-09-29 14:36:53 +0100143#endif /* BL31_PROGBITS_LIMIT */
Sandrine Bailleuxe2e0c652014-06-16 16:12:27 +0100144
Samuel Holland31a14e12018-10-17 21:40:18 -0500145#if SEPARATE_NOBITS_REGION
Madhukar Pappireddyf4e6ea62020-01-27 15:32:15 -0600146 . = ALIGN(PAGE_SIZE);
Chris Kay4b7660a2022-09-29 14:36:53 +0100147
Samuel Holland31a14e12018-10-17 21:40:18 -0500148 __RW_END__ = .;
149 __BL31_END__ = .;
150
151 ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
152
153 . = BL31_NOBITS_BASE;
Chris Kay4b7660a2022-09-29 14:36:53 +0100154
Samuel Holland31a14e12018-10-17 21:40:18 -0500155 ASSERT(. == ALIGN(PAGE_SIZE),
Chris Kay4b7660a2022-09-29 14:36:53 +0100156 "BL31 NOBITS base address is not aligned on a page boundary.")
Samuel Holland31a14e12018-10-17 21:40:18 -0500157
158 __NOBITS_START__ = .;
Chris Kay4b7660a2022-09-29 14:36:53 +0100159#endif /* SEPARATE_NOBITS_REGION */
Samuel Holland31a14e12018-10-17 21:40:18 -0500160
Masahiro Yamada403990e2020-04-07 13:04:24 +0900161 STACK_SECTION >NOBITS
Masahiro Yamadadd053b62020-03-26 13:16:33 +0900162 BSS_SECTION >NOBITS
Masahiro Yamada0b67e562020-03-09 17:39:48 +0900163 XLAT_TABLE_SECTION >NOBITS
Achin Guptaa0cd9892014-02-09 13:30:38 +0000164
Soby Mathew2ae20432015-01-08 18:02:44 +0000165#if USE_COHERENT_MEM
Achin Guptaa0cd9892014-02-09 13:30:38 +0000166 /*
Chris Kay4b7660a2022-09-29 14:36:53 +0100167 * The base address of the coherent memory section must be page-aligned to
168 * guarantee that the coherent data are stored on their own pages and are
169 * not mixed with normal data. This is required to set up the correct
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000170 * memory attributes for the coherent data page tables.
171 */
Chris Kay33bfc5e2023-02-14 11:30:04 +0000172 .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000173 __COHERENT_RAM_START__ = .;
Chris Kay4b7660a2022-09-29 14:36:53 +0100174
Andrew Thoelkee466c9f2015-09-10 11:39:36 +0100175 /*
Chris Kay4b7660a2022-09-29 14:36:53 +0100176 * Bakery locks are stored in coherent memory. Each lock's data is
177 * contiguous and fully allocated by the compiler.
Andrew Thoelkee466c9f2015-09-10 11:39:36 +0100178 */
Chris Kay33bfc5e2023-02-14 11:30:04 +0000179 *(.bakery_lock)
180 *(.tzfw_coherent_mem)
Chris Kay4b7660a2022-09-29 14:36:53 +0100181
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000182 __COHERENT_RAM_END_UNALIGNED__ = .;
Chris Kay4b7660a2022-09-29 14:36:53 +0100183
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000184 /*
Chris Kay4b7660a2022-09-29 14:36:53 +0100185 * Memory page(s) mapped to this section will be marked as device
186 * memory. No other unexpected data must creep in. Ensure the rest of
187 * the current memory page is unused.
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000188 */
Roberto Vargasd93fde32018-04-11 11:53:31 +0100189 . = ALIGN(PAGE_SIZE);
Chris Kay4b7660a2022-09-29 14:36:53 +0100190
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000191 __COHERENT_RAM_END__ = .;
Samuel Holland31a14e12018-10-17 21:40:18 -0500192 } >NOBITS
Chris Kay4b7660a2022-09-29 14:36:53 +0100193#endif /* USE_COHERENT_MEM */
Achin Gupta4f6ad662013-10-25 09:08:21 +0100194
Samuel Holland31a14e12018-10-17 21:40:18 -0500195#if SEPARATE_NOBITS_REGION
Samuel Holland31a14e12018-10-17 21:40:18 -0500196 __NOBITS_END__ = .;
197
198 ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.")
Chris Kay4b7660a2022-09-29 14:36:53 +0100199#else /* SEPARATE_NOBITS_REGION */
Achin Guptae9c4a642015-09-11 16:03:13 +0100200 __RW_END__ = .;
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000201 __BL31_END__ = .;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100202
Samuel Holland322df2a2022-04-08 22:22:04 -0500203 ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
Chris Kay4b7660a2022-09-29 14:36:53 +0100204#endif /* SEPARATE_NOBITS_REGION */
Harrison Mutai8d6b7412023-04-19 09:30:15 +0100205 RAM_REGION_END = .;
Samuel Holland322df2a2022-04-08 22:22:04 -0500206
Masahiro Yamadad3e7baa2020-01-17 13:44:50 +0900207 /DISCARD/ : {
208 *(.dynsym .dynstr .hash .gnu.hash)
209 }
Achin Gupta4f6ad662013-10-25 09:08:21 +0100210}