blob: 8ea7d6a8cce6479db3f31cb285cebfc3915fd537 [file] [log] [blame]
Masahiro Yamada0b67e562020-03-09 17:39:48 +09001/*
2 * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#ifndef BL_COMMON_LD_H
8#define BL_COMMON_LD_H
9
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +090010#include <platform_def.h>
11
12#ifdef __aarch64__
13#define STRUCT_ALIGN 8
Masahiro Yamadadd053b62020-03-26 13:16:33 +090014#define BSS_ALIGN 16
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +090015#else
16#define STRUCT_ALIGN 4
Masahiro Yamadadd053b62020-03-26 13:16:33 +090017#define BSS_ALIGN 8
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +090018#endif
19
20#define CPU_OPS \
21 . = ALIGN(STRUCT_ALIGN); \
22 __CPU_OPS_START__ = .; \
23 KEEP(*(cpu_ops)) \
24 __CPU_OPS_END__ = .;
25
26#define PARSER_LIB_DESCS \
27 . = ALIGN(STRUCT_ALIGN); \
28 __PARSER_LIB_DESCS_START__ = .; \
29 KEEP(*(.img_parser_lib_descs)) \
30 __PARSER_LIB_DESCS_END__ = .;
31
32#define RT_SVC_DESCS \
33 . = ALIGN(STRUCT_ALIGN); \
34 __RT_SVC_DESCS_START__ = .; \
35 KEEP(*(rt_svc_descs)) \
36 __RT_SVC_DESCS_END__ = .;
37
38#define PMF_SVC_DESCS \
39 . = ALIGN(STRUCT_ALIGN); \
40 __PMF_SVC_DESCS_START__ = .; \
41 KEEP(*(pmf_svc_descs)) \
42 __PMF_SVC_DESCS_END__ = .;
43
44#define FCONF_POPULATOR \
45 . = ALIGN(STRUCT_ALIGN); \
46 __FCONF_POPULATOR_START__ = .; \
47 KEEP(*(.fconf_populator)) \
48 __FCONF_POPULATOR_END__ = .;
49
50/*
51 * Keep the .got section in the RO section as it is patched prior to enabling
52 * the MMU and having the .got in RO is better for security. GOT is a table of
53 * addresses so ensure pointer size alignment.
54 */
55#define GOT \
56 . = ALIGN(STRUCT_ALIGN); \
57 __GOT_START__ = .; \
58 *(.got) \
59 __GOT_END__ = .;
60
Masahiro Yamadaf5f203a2020-03-26 13:18:48 +090061/*
62 * The base xlat table
63 *
64 * It is put into the rodata section if PLAT_RO_XLAT_TABLES=1,
65 * or into the bss section otherwise.
66 */
67#define BASE_XLAT_TABLE \
68 . = ALIGN(16); \
69 *(base_xlat_table)
70
71#if PLAT_RO_XLAT_TABLES
72#define BASE_XLAT_TABLE_RO BASE_XLAT_TABLE
73#define BASE_XLAT_TABLE_BSS
74#else
75#define BASE_XLAT_TABLE_RO
76#define BASE_XLAT_TABLE_BSS BASE_XLAT_TABLE
77#endif
78
Masahiro Yamada583f8dd2020-03-26 10:57:12 +090079#define RODATA_COMMON \
80 RT_SVC_DESCS \
81 FCONF_POPULATOR \
82 PMF_SVC_DESCS \
83 PARSER_LIB_DESCS \
84 CPU_OPS \
Masahiro Yamadaf5f203a2020-03-26 13:18:48 +090085 GOT \
86 BASE_XLAT_TABLE_RO
Masahiro Yamada583f8dd2020-03-26 10:57:12 +090087
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +090088#define STACK_SECTION \
89 stacks (NOLOAD) : { \
90 __STACKS_START__ = .; \
91 *(tzfw_normal_stacks) \
92 __STACKS_END__ = .; \
93 }
94
95/*
96 * If BL doesn't use any bakery lock then __PERCPU_BAKERY_LOCK_SIZE__
97 * will be zero. For this reason, the only two valid values for
98 * __PERCPU_BAKERY_LOCK_SIZE__ are 0 or the platform defined value
99 * PLAT_PERCPU_BAKERY_LOCK_SIZE.
100 */
101#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
102#define BAKERY_LOCK_SIZE_CHECK \
103 ASSERT((__PERCPU_BAKERY_LOCK_SIZE__ == 0) || \
104 (__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE), \
105 "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
106#else
107#define BAKERY_LOCK_SIZE_CHECK
108#endif
109
110/*
111 * Bakery locks are stored in normal .bss memory
112 *
113 * Each lock's data is spread across multiple cache lines, one per CPU,
114 * but multiple locks can share the same cache line.
115 * The compiler will allocate enough memory for one CPU's bakery locks,
116 * the remaining cache lines are allocated by the linker script
117 */
118#if !USE_COHERENT_MEM
119#define BAKERY_LOCK_NORMAL \
120 . = ALIGN(CACHE_WRITEBACK_GRANULE); \
121 __BAKERY_LOCK_START__ = .; \
122 __PERCPU_BAKERY_LOCK_START__ = .; \
123 *(bakery_lock) \
124 . = ALIGN(CACHE_WRITEBACK_GRANULE); \
125 __PERCPU_BAKERY_LOCK_END__ = .; \
126 __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__); \
127 . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \
128 __BAKERY_LOCK_END__ = .; \
129 BAKERY_LOCK_SIZE_CHECK
130#else
131#define BAKERY_LOCK_NORMAL
132#endif
133
134/*
135 * Time-stamps are stored in normal .bss memory
136 *
137 * The compiler will allocate enough memory for one CPU's time-stamps,
138 * the remaining memory for other CPUs is allocated by the
139 * linker script
140 */
141#define PMF_TIMESTAMP \
142 . = ALIGN(CACHE_WRITEBACK_GRANULE); \
143 __PMF_TIMESTAMP_START__ = .; \
144 KEEP(*(pmf_timestamp_array)) \
145 . = ALIGN(CACHE_WRITEBACK_GRANULE); \
146 __PMF_PERCPU_TIMESTAMP_END__ = .; \
147 __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__); \
148 . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \
149 __PMF_TIMESTAMP_END__ = .;
150
Masahiro Yamadadd053b62020-03-26 13:16:33 +0900151
152/*
153 * The .bss section gets initialised to 0 at runtime.
154 * Its base address has bigger alignment for better performance of the
155 * zero-initialization code.
156 */
157#define BSS_SECTION \
158 .bss (NOLOAD) : ALIGN(BSS_ALIGN) { \
159 __BSS_START__ = .; \
160 *(SORT_BY_ALIGNMENT(.bss*)) \
161 *(COMMON) \
162 BAKERY_LOCK_NORMAL \
163 PMF_TIMESTAMP \
Masahiro Yamadaf5f203a2020-03-26 13:18:48 +0900164 BASE_XLAT_TABLE_BSS \
Masahiro Yamadadd053b62020-03-26 13:16:33 +0900165 __BSS_END__ = .; \
166 }
167
Masahiro Yamada0b67e562020-03-09 17:39:48 +0900168/*
169 * The xlat_table section is for full, aligned page tables (4K).
170 * Removing them from .bss avoids forcing 4K alignment on
171 * the .bss section. The tables are initialized to zero by the translation
172 * tables library.
173 */
174#define XLAT_TABLE_SECTION \
175 xlat_table (NOLOAD) : { \
176 *(xlat_table) \
177 }
178
179#endif /* BL_COMMON_LD_H */