blob: b6dd0f006152d0081a9c5b83586a122a88522973 [file] [log] [blame]
Masahiro Yamada0b67e562020-03-09 17:39:48 +09001/*
Yann Gautier5d511762022-04-05 10:53:18 +02002 * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
Masahiro Yamada0b67e562020-03-09 17:39:48 +09003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#ifndef BL_COMMON_LD_H
8#define BL_COMMON_LD_H
9
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +090010#include <platform_def.h>
11
12#ifdef __aarch64__
13#define STRUCT_ALIGN 8
Masahiro Yamadadd053b62020-03-26 13:16:33 +090014#define BSS_ALIGN 16
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +090015#else
16#define STRUCT_ALIGN 4
Masahiro Yamadadd053b62020-03-26 13:16:33 +090017#define BSS_ALIGN 8
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +090018#endif
19
Masahiro Yamadac5864d82020-04-22 10:50:12 +090020#ifndef DATA_ALIGN
21#define DATA_ALIGN 1
22#endif
23
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +090024#define CPU_OPS \
25 . = ALIGN(STRUCT_ALIGN); \
26 __CPU_OPS_START__ = .; \
Chris Kay33bfc5e2023-02-14 11:30:04 +000027 KEEP(*(.cpu_ops)) \
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +090028 __CPU_OPS_END__ = .;
29
30#define PARSER_LIB_DESCS \
31 . = ALIGN(STRUCT_ALIGN); \
32 __PARSER_LIB_DESCS_START__ = .; \
33 KEEP(*(.img_parser_lib_descs)) \
34 __PARSER_LIB_DESCS_END__ = .;
35
36#define RT_SVC_DESCS \
37 . = ALIGN(STRUCT_ALIGN); \
38 __RT_SVC_DESCS_START__ = .; \
Chris Kay33bfc5e2023-02-14 11:30:04 +000039 KEEP(*(.rt_svc_descs)) \
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +090040 __RT_SVC_DESCS_END__ = .;
41
Marc Bonnici9a297042022-02-14 17:06:09 +000042#if SPMC_AT_EL3
43#define EL3_LP_DESCS \
44 . = ALIGN(STRUCT_ALIGN); \
45 __EL3_LP_DESCS_START__ = .; \
Chris Kay33bfc5e2023-02-14 11:30:04 +000046 KEEP(*(.el3_lp_descs)) \
Marc Bonnici9a297042022-02-14 17:06:09 +000047 __EL3_LP_DESCS_END__ = .;
48#else
49#define EL3_LP_DESCS
50#endif
51
Raghu Krishnamurthy7f046c12023-02-25 13:26:10 -080052#if ENABLE_SPMD_LP
53#define SPMD_LP_DESCS \
54 . = ALIGN(STRUCT_ALIGN); \
55 __SPMD_LP_DESCS_START__ = .; \
56 KEEP(*(.spmd_lp_descs)) \
57 __SPMD_LP_DESCS_END__ = .;
58#else
59#define SPMD_LP_DESCS
60#endif
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +090061#define PMF_SVC_DESCS \
62 . = ALIGN(STRUCT_ALIGN); \
63 __PMF_SVC_DESCS_START__ = .; \
Chris Kay33bfc5e2023-02-14 11:30:04 +000064 KEEP(*(.pmf_svc_descs)) \
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +090065 __PMF_SVC_DESCS_END__ = .;
66
67#define FCONF_POPULATOR \
68 . = ALIGN(STRUCT_ALIGN); \
69 __FCONF_POPULATOR_START__ = .; \
70 KEEP(*(.fconf_populator)) \
71 __FCONF_POPULATOR_END__ = .;
72
73/*
74 * Keep the .got section in the RO section as it is patched prior to enabling
75 * the MMU and having the .got in RO is better for security. GOT is a table of
76 * addresses so ensure pointer size alignment.
77 */
78#define GOT \
79 . = ALIGN(STRUCT_ALIGN); \
80 __GOT_START__ = .; \
81 *(.got) \
82 __GOT_END__ = .;
83
Masahiro Yamadaf5f203a2020-03-26 13:18:48 +090084/*
85 * The base xlat table
86 *
87 * It is put into the rodata section if PLAT_RO_XLAT_TABLES=1,
88 * or into the bss section otherwise.
89 */
90#define BASE_XLAT_TABLE \
91 . = ALIGN(16); \
Yann Gautier5d511762022-04-05 10:53:18 +020092 __BASE_XLAT_TABLE_START__ = .; \
Chris Kay33bfc5e2023-02-14 11:30:04 +000093 *(.base_xlat_table) \
Yann Gautier5d511762022-04-05 10:53:18 +020094 __BASE_XLAT_TABLE_END__ = .;
Masahiro Yamadaf5f203a2020-03-26 13:18:48 +090095
96#if PLAT_RO_XLAT_TABLES
97#define BASE_XLAT_TABLE_RO BASE_XLAT_TABLE
98#define BASE_XLAT_TABLE_BSS
99#else
100#define BASE_XLAT_TABLE_RO
101#define BASE_XLAT_TABLE_BSS BASE_XLAT_TABLE
102#endif
103
Masahiro Yamada583f8dd2020-03-26 10:57:12 +0900104#define RODATA_COMMON \
105 RT_SVC_DESCS \
106 FCONF_POPULATOR \
107 PMF_SVC_DESCS \
108 PARSER_LIB_DESCS \
109 CPU_OPS \
Masahiro Yamadaf5f203a2020-03-26 13:18:48 +0900110 GOT \
Marc Bonnici9a297042022-02-14 17:06:09 +0000111 BASE_XLAT_TABLE_RO \
Raghu Krishnamurthy7f046c12023-02-25 13:26:10 -0800112 EL3_LP_DESCS \
113 SPMD_LP_DESCS
Masahiro Yamada583f8dd2020-03-26 10:57:12 +0900114
Masahiro Yamadac5864d82020-04-22 10:50:12 +0900115/*
116 * .data must be placed at a lower address than the stacks if the stack
117 * protector is enabled. Alternatively, the .data.stack_protector_canary
118 * section can be placed independently of the main .data section.
119 */
120#define DATA_SECTION \
121 .data . : ALIGN(DATA_ALIGN) { \
122 __DATA_START__ = .; \
123 *(SORT_BY_ALIGNMENT(.data*)) \
124 __DATA_END__ = .; \
125 }
126
Masahiro Yamada85fa00e2020-04-22 11:27:55 +0900127/*
128 * .rela.dyn needs to come after .data for the read-elf utility to parse
129 * this section correctly.
130 */
Yann Gautier514e59c2020-10-05 11:02:54 +0200131#if __aarch64__
132#define RELA_DYN_NAME .rela.dyn
133#define RELOC_SECTIONS_PATTERN *(.rela*)
134#else
135#define RELA_DYN_NAME .rel.dyn
136#define RELOC_SECTIONS_PATTERN *(.rel*)
137#endif
138
Masahiro Yamada85fa00e2020-04-22 11:27:55 +0900139#define RELA_SECTION \
Yann Gautier514e59c2020-10-05 11:02:54 +0200140 RELA_DYN_NAME : ALIGN(STRUCT_ALIGN) { \
Masahiro Yamada85fa00e2020-04-22 11:27:55 +0900141 __RELA_START__ = .; \
Yann Gautier514e59c2020-10-05 11:02:54 +0200142 RELOC_SECTIONS_PATTERN \
Masahiro Yamada85fa00e2020-04-22 11:27:55 +0900143 __RELA_END__ = .; \
144 }
145
Alexei Fedorov490ace72020-05-30 17:33:26 +0100146#if !(defined(IMAGE_BL31) && RECLAIM_INIT_CODE)
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +0900147#define STACK_SECTION \
Chris Kay33bfc5e2023-02-14 11:30:04 +0000148 .stacks (NOLOAD) : { \
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +0900149 __STACKS_START__ = .; \
Chris Kay33bfc5e2023-02-14 11:30:04 +0000150 *(.tzfw_normal_stacks) \
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +0900151 __STACKS_END__ = .; \
152 }
Alexei Fedorov490ace72020-05-30 17:33:26 +0100153#endif
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +0900154
155/*
156 * If BL doesn't use any bakery lock then __PERCPU_BAKERY_LOCK_SIZE__
157 * will be zero. For this reason, the only two valid values for
158 * __PERCPU_BAKERY_LOCK_SIZE__ are 0 or the platform defined value
159 * PLAT_PERCPU_BAKERY_LOCK_SIZE.
160 */
161#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
162#define BAKERY_LOCK_SIZE_CHECK \
163 ASSERT((__PERCPU_BAKERY_LOCK_SIZE__ == 0) || \
164 (__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE), \
165 "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
166#else
167#define BAKERY_LOCK_SIZE_CHECK
168#endif
169
170/*
171 * Bakery locks are stored in normal .bss memory
172 *
173 * Each lock's data is spread across multiple cache lines, one per CPU,
174 * but multiple locks can share the same cache line.
175 * The compiler will allocate enough memory for one CPU's bakery locks,
176 * the remaining cache lines are allocated by the linker script
177 */
178#if !USE_COHERENT_MEM
179#define BAKERY_LOCK_NORMAL \
180 . = ALIGN(CACHE_WRITEBACK_GRANULE); \
181 __BAKERY_LOCK_START__ = .; \
182 __PERCPU_BAKERY_LOCK_START__ = .; \
Chris Kay33bfc5e2023-02-14 11:30:04 +0000183 *(.bakery_lock) \
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +0900184 . = ALIGN(CACHE_WRITEBACK_GRANULE); \
185 __PERCPU_BAKERY_LOCK_END__ = .; \
186 __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__); \
187 . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \
188 __BAKERY_LOCK_END__ = .; \
189 BAKERY_LOCK_SIZE_CHECK
190#else
191#define BAKERY_LOCK_NORMAL
192#endif
193
194/*
195 * Time-stamps are stored in normal .bss memory
196 *
197 * The compiler will allocate enough memory for one CPU's time-stamps,
198 * the remaining memory for other CPUs is allocated by the
199 * linker script
200 */
201#define PMF_TIMESTAMP \
202 . = ALIGN(CACHE_WRITEBACK_GRANULE); \
203 __PMF_TIMESTAMP_START__ = .; \
Chris Kay33bfc5e2023-02-14 11:30:04 +0000204 KEEP(*(.pmf_timestamp_array)) \
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +0900205 . = ALIGN(CACHE_WRITEBACK_GRANULE); \
206 __PMF_PERCPU_TIMESTAMP_END__ = .; \
207 __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__); \
208 . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \
209 __PMF_TIMESTAMP_END__ = .;
210
Masahiro Yamadadd053b62020-03-26 13:16:33 +0900211
212/*
213 * The .bss section gets initialised to 0 at runtime.
214 * Its base address has bigger alignment for better performance of the
215 * zero-initialization code.
216 */
217#define BSS_SECTION \
218 .bss (NOLOAD) : ALIGN(BSS_ALIGN) { \
219 __BSS_START__ = .; \
220 *(SORT_BY_ALIGNMENT(.bss*)) \
221 *(COMMON) \
222 BAKERY_LOCK_NORMAL \
223 PMF_TIMESTAMP \
Masahiro Yamadaf5f203a2020-03-26 13:18:48 +0900224 BASE_XLAT_TABLE_BSS \
Masahiro Yamadadd053b62020-03-26 13:16:33 +0900225 __BSS_END__ = .; \
226 }
227
Masahiro Yamada0b67e562020-03-09 17:39:48 +0900228/*
Chris Kay33bfc5e2023-02-14 11:30:04 +0000229 * The .xlat_table section is for full, aligned page tables (4K).
Masahiro Yamada0b67e562020-03-09 17:39:48 +0900230 * Removing them from .bss avoids forcing 4K alignment on
231 * the .bss section. The tables are initialized to zero by the translation
232 * tables library.
233 */
234#define XLAT_TABLE_SECTION \
Chris Kay33bfc5e2023-02-14 11:30:04 +0000235 .xlat_table (NOLOAD) : { \
Yann Gautier5d511762022-04-05 10:53:18 +0200236 __XLAT_TABLE_START__ = .; \
Chris Kay33bfc5e2023-02-14 11:30:04 +0000237 *(.xlat_table) \
Yann Gautier5d511762022-04-05 10:53:18 +0200238 __XLAT_TABLE_END__ = .; \
Masahiro Yamada0b67e562020-03-09 17:39:48 +0900239 }
240
241#endif /* BL_COMMON_LD_H */