blob: 9888a3caf74f505a1b5390b3cf1952d4ea5f3be4 [file] [log] [blame]
Masahiro Yamada0b67e562020-03-09 17:39:48 +09001/*
Yann Gautier5d511762022-04-05 10:53:18 +02002 * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
Masahiro Yamada0b67e562020-03-09 17:39:48 +09003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#ifndef BL_COMMON_LD_H
8#define BL_COMMON_LD_H
9
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +090010#include <platform_def.h>
11
12#ifdef __aarch64__
13#define STRUCT_ALIGN 8
Masahiro Yamadadd053b62020-03-26 13:16:33 +090014#define BSS_ALIGN 16
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +090015#else
16#define STRUCT_ALIGN 4
Masahiro Yamadadd053b62020-03-26 13:16:33 +090017#define BSS_ALIGN 8
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +090018#endif
19
Masahiro Yamadac5864d82020-04-22 10:50:12 +090020#ifndef DATA_ALIGN
21#define DATA_ALIGN 1
22#endif
23
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +090024#define CPU_OPS \
25 . = ALIGN(STRUCT_ALIGN); \
26 __CPU_OPS_START__ = .; \
27 KEEP(*(cpu_ops)) \
28 __CPU_OPS_END__ = .;
29
30#define PARSER_LIB_DESCS \
31 . = ALIGN(STRUCT_ALIGN); \
32 __PARSER_LIB_DESCS_START__ = .; \
33 KEEP(*(.img_parser_lib_descs)) \
34 __PARSER_LIB_DESCS_END__ = .;
35
36#define RT_SVC_DESCS \
37 . = ALIGN(STRUCT_ALIGN); \
38 __RT_SVC_DESCS_START__ = .; \
39 KEEP(*(rt_svc_descs)) \
40 __RT_SVC_DESCS_END__ = .;
41
42#define PMF_SVC_DESCS \
43 . = ALIGN(STRUCT_ALIGN); \
44 __PMF_SVC_DESCS_START__ = .; \
45 KEEP(*(pmf_svc_descs)) \
46 __PMF_SVC_DESCS_END__ = .;
47
48#define FCONF_POPULATOR \
49 . = ALIGN(STRUCT_ALIGN); \
50 __FCONF_POPULATOR_START__ = .; \
51 KEEP(*(.fconf_populator)) \
52 __FCONF_POPULATOR_END__ = .;
53
54/*
55 * Keep the .got section in the RO section as it is patched prior to enabling
56 * the MMU and having the .got in RO is better for security. GOT is a table of
57 * addresses so ensure pointer size alignment.
58 */
59#define GOT \
60 . = ALIGN(STRUCT_ALIGN); \
61 __GOT_START__ = .; \
62 *(.got) \
63 __GOT_END__ = .;
64
Masahiro Yamadaf5f203a2020-03-26 13:18:48 +090065/*
66 * The base xlat table
67 *
68 * It is put into the rodata section if PLAT_RO_XLAT_TABLES=1,
69 * or into the bss section otherwise.
70 */
71#define BASE_XLAT_TABLE \
72 . = ALIGN(16); \
Yann Gautier5d511762022-04-05 10:53:18 +020073 __BASE_XLAT_TABLE_START__ = .; \
74 *(base_xlat_table) \
75 __BASE_XLAT_TABLE_END__ = .;
Masahiro Yamadaf5f203a2020-03-26 13:18:48 +090076
77#if PLAT_RO_XLAT_TABLES
78#define BASE_XLAT_TABLE_RO BASE_XLAT_TABLE
79#define BASE_XLAT_TABLE_BSS
80#else
81#define BASE_XLAT_TABLE_RO
82#define BASE_XLAT_TABLE_BSS BASE_XLAT_TABLE
83#endif
84
Masahiro Yamada583f8dd2020-03-26 10:57:12 +090085#define RODATA_COMMON \
86 RT_SVC_DESCS \
87 FCONF_POPULATOR \
88 PMF_SVC_DESCS \
89 PARSER_LIB_DESCS \
90 CPU_OPS \
Masahiro Yamadaf5f203a2020-03-26 13:18:48 +090091 GOT \
92 BASE_XLAT_TABLE_RO
Masahiro Yamada583f8dd2020-03-26 10:57:12 +090093
Masahiro Yamadac5864d82020-04-22 10:50:12 +090094/*
95 * .data must be placed at a lower address than the stacks if the stack
96 * protector is enabled. Alternatively, the .data.stack_protector_canary
97 * section can be placed independently of the main .data section.
98 */
99#define DATA_SECTION \
100 .data . : ALIGN(DATA_ALIGN) { \
101 __DATA_START__ = .; \
102 *(SORT_BY_ALIGNMENT(.data*)) \
103 __DATA_END__ = .; \
104 }
105
Masahiro Yamada85fa00e2020-04-22 11:27:55 +0900106/*
107 * .rela.dyn needs to come after .data for the read-elf utility to parse
108 * this section correctly.
109 */
Yann Gautier514e59c2020-10-05 11:02:54 +0200110#if __aarch64__
111#define RELA_DYN_NAME .rela.dyn
112#define RELOC_SECTIONS_PATTERN *(.rela*)
113#else
114#define RELA_DYN_NAME .rel.dyn
115#define RELOC_SECTIONS_PATTERN *(.rel*)
116#endif
117
Masahiro Yamada85fa00e2020-04-22 11:27:55 +0900118#define RELA_SECTION \
Yann Gautier514e59c2020-10-05 11:02:54 +0200119 RELA_DYN_NAME : ALIGN(STRUCT_ALIGN) { \
Masahiro Yamada85fa00e2020-04-22 11:27:55 +0900120 __RELA_START__ = .; \
Yann Gautier514e59c2020-10-05 11:02:54 +0200121 RELOC_SECTIONS_PATTERN \
Masahiro Yamada85fa00e2020-04-22 11:27:55 +0900122 __RELA_END__ = .; \
123 }
124
Alexei Fedorov490ace72020-05-30 17:33:26 +0100125#if !(defined(IMAGE_BL31) && RECLAIM_INIT_CODE)
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +0900126#define STACK_SECTION \
127 stacks (NOLOAD) : { \
128 __STACKS_START__ = .; \
129 *(tzfw_normal_stacks) \
130 __STACKS_END__ = .; \
131 }
Alexei Fedorov490ace72020-05-30 17:33:26 +0100132#endif
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +0900133
134/*
135 * If BL doesn't use any bakery lock then __PERCPU_BAKERY_LOCK_SIZE__
136 * will be zero. For this reason, the only two valid values for
137 * __PERCPU_BAKERY_LOCK_SIZE__ are 0 or the platform defined value
138 * PLAT_PERCPU_BAKERY_LOCK_SIZE.
139 */
140#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
141#define BAKERY_LOCK_SIZE_CHECK \
142 ASSERT((__PERCPU_BAKERY_LOCK_SIZE__ == 0) || \
143 (__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE), \
144 "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
145#else
146#define BAKERY_LOCK_SIZE_CHECK
147#endif
148
149/*
150 * Bakery locks are stored in normal .bss memory
151 *
152 * Each lock's data is spread across multiple cache lines, one per CPU,
153 * but multiple locks can share the same cache line.
154 * The compiler will allocate enough memory for one CPU's bakery locks,
155 * the remaining cache lines are allocated by the linker script
156 */
157#if !USE_COHERENT_MEM
158#define BAKERY_LOCK_NORMAL \
159 . = ALIGN(CACHE_WRITEBACK_GRANULE); \
160 __BAKERY_LOCK_START__ = .; \
161 __PERCPU_BAKERY_LOCK_START__ = .; \
162 *(bakery_lock) \
163 . = ALIGN(CACHE_WRITEBACK_GRANULE); \
164 __PERCPU_BAKERY_LOCK_END__ = .; \
165 __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__); \
166 . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \
167 __BAKERY_LOCK_END__ = .; \
168 BAKERY_LOCK_SIZE_CHECK
169#else
170#define BAKERY_LOCK_NORMAL
171#endif
172
173/*
174 * Time-stamps are stored in normal .bss memory
175 *
176 * The compiler will allocate enough memory for one CPU's time-stamps,
177 * the remaining memory for other CPUs is allocated by the
178 * linker script
179 */
180#define PMF_TIMESTAMP \
181 . = ALIGN(CACHE_WRITEBACK_GRANULE); \
182 __PMF_TIMESTAMP_START__ = .; \
183 KEEP(*(pmf_timestamp_array)) \
184 . = ALIGN(CACHE_WRITEBACK_GRANULE); \
185 __PMF_PERCPU_TIMESTAMP_END__ = .; \
186 __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__); \
187 . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \
188 __PMF_TIMESTAMP_END__ = .;
189
Masahiro Yamadadd053b62020-03-26 13:16:33 +0900190
191/*
192 * The .bss section gets initialised to 0 at runtime.
193 * Its base address has bigger alignment for better performance of the
194 * zero-initialization code.
195 */
196#define BSS_SECTION \
197 .bss (NOLOAD) : ALIGN(BSS_ALIGN) { \
198 __BSS_START__ = .; \
199 *(SORT_BY_ALIGNMENT(.bss*)) \
200 *(COMMON) \
201 BAKERY_LOCK_NORMAL \
202 PMF_TIMESTAMP \
Masahiro Yamadaf5f203a2020-03-26 13:18:48 +0900203 BASE_XLAT_TABLE_BSS \
Masahiro Yamadadd053b62020-03-26 13:16:33 +0900204 __BSS_END__ = .; \
205 }
206
Masahiro Yamada0b67e562020-03-09 17:39:48 +0900207/*
208 * The xlat_table section is for full, aligned page tables (4K).
209 * Removing them from .bss avoids forcing 4K alignment on
210 * the .bss section. The tables are initialized to zero by the translation
211 * tables library.
212 */
213#define XLAT_TABLE_SECTION \
214 xlat_table (NOLOAD) : { \
Yann Gautier5d511762022-04-05 10:53:18 +0200215 __XLAT_TABLE_START__ = .; \
Masahiro Yamada0b67e562020-03-09 17:39:48 +0900216 *(xlat_table) \
Yann Gautier5d511762022-04-05 10:53:18 +0200217 __XLAT_TABLE_END__ = .; \
Masahiro Yamada0b67e562020-03-09 17:39:48 +0900218 }
219
220#endif /* BL_COMMON_LD_H */