blob: 2d6d2b3d2b953d42b71b8521a6fdca970f65b659 [file] [log] [blame]
Varun Wadekar4d034c52019-01-11 14:47:48 -08001#! armclang -E -x c
2
3/*
4 * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8
9#include <platform_def.h>
10
11#define PAGE_SIZE (1024 * 4)
12
13LR_START BL31_BASE
14{
15 __BL31_START__ +0 FIXED EMPTY 0
16 {
17 /* placeholder */
18 }
19
20 /* BL31_BASE address must be aligned on a page boundary. */
21 ScatterAssert((ImageBase(__BL31_START__) AND 0xFFF) == 0)
22}
23
24LR_TEXT BL31_BASE
25{
26 __TEXT__ +0 FIXED
27 {
28 *(:gdef:bl31_entrypoint, +FIRST)
29 *(.text*)
30 *(.vectors)
31 .ANY1(+RO-CODE)
32 }
33
34 __TEXT_EPILOGUE__ AlignExpr(+0, PAGE_SIZE) FIXED EMPTY 0
35 {
36 /* section delimiter */
37 }
38}
39
40LR_RO_DATA +0
41{
42 __RODATA__ AlignExpr(ImageLimit(LR_TEXT), 0) FIXED
43 {
44 *(.rodata*)
45 .ANY2(+RO-DATA)
46 }
47
48 /* Ensure 8-byte alignment for descriptors and ensure inclusion */
49 __RT_SVC_DESCS__ AlignExpr(ImageLimit(__RODATA__), 8) FIXED
50 {
51 *(rt_svc_descs)
52 }
53
54#if ENABLE_PMF
55 /* Ensure 8-byte alignment for descriptors and ensure inclusion */
56 __PMF_SVC_DESCS__ AlignExpr(ImageLimit(__RT_SVC_DESCS__), 8) FIXED
57 {
58 *(pmf_svc_descs)
59 }
60#endif /* ENABLE_PMF */
61
62 /*
63 * Ensure 8-byte alignment for cpu_ops so that its fields are also
64 * aligned.
65 */
66 __CPU_OPS__ AlignExpr(+0, 8) FIXED
67 {
68 *(cpu_ops)
69 }
70
71 /*
72 * Keep the .got section in the RO section as it is patched
73 * prior to enabling the MMU and having the .got in RO is better for
74 * security. GOT is a table of addresses so ensure 8-byte alignment.
75 */
76 __GOT__ AlignExpr(ImageLimit(__CPU_OPS__), 8) FIXED
77 {
78 *(.got)
79 }
80
81 /* Place pubsub sections for events */
82 __PUBSUB_EVENTS__ AlignExpr(+0, 8) EMPTY 0
83 {
84 /* placeholder */
85 }
86
87#include <lib/el3_runtime/pubsub_events.h>
88
89 __RODATA_EPILOGUE__ AlignExpr(+0, PAGE_SIZE) FIXED EMPTY 0
90 {
91 /* section delimiter */
92 }
93}
94
95 /* cpu_ops must always be defined */
96 ScatterAssert(ImageLength(__CPU_OPS__) > 0)
97
Paul Beesleydb4e25a2019-10-14 15:27:12 +000098#if SPM_MM
Varun Wadekar4d034c52019-01-11 14:47:48 -080099LR_SPM +0
100{
101 /*
102 * Exception vectors of the SPM shim layer. They must be aligned to a 2K
103 * address, but we need to place them in a separate page so that we can set
104 * individual permissions to them, so the actual alignment needed is 4K.
105 *
106 * There's no need to include this into the RO section of BL31 because it
107 * doesn't need to be accessed by BL31.
108 */
109 __SPM_SHIM_EXCEPTIONS__ AlignExpr(ImageLimit(LR_RO_DATA), PAGE_SIZE) FIXED
110 {
111 *(.spm_shim_exceptions)
112 }
113
114 __SPM_SHIM_EXCEPTIONS_EPILOGUE__ AlignExpr(ImageLimit(__SPM_SHIM_EXCEPTIONS__), PAGE_SIZE) FIXED
115 {
116 /* placeholder */
117 }
118}
119#endif
120
121LR_RW_DATA +0
122{
123 __DATA__ AlignExpr(+0, 16) FIXED
124 {
125 *(.data*)
126 *(.constdata)
127 *(locale$$data)
128 }
129}
130
131LR_RELA +0
132{
133 /*
134 * .rela.dyn needs to come after .data for the read-elf utility to parse
135 * this section correctly. Ensure 8-byte alignment so that the fields of
136 * RELA data structure are aligned.
137 */
138 __RELA__ AlignExpr(ImageLimit(LR_RW_DATA), 8) FIXED
139 {
140 *(.rela.dyn)
141 }
142}
143
144#ifdef BL31_PROGBITS_LIMIT
145 /* BL31 progbits has exceeded its limit. */
146 ScatterAssert(ImageLimit(LR_RELA) <= BL31_PROGBITS_LIMIT)
147#endif
148
149LR_STACKS +0
150{
151 __STACKS__ AlignExpr(+0, 64) FIXED
152 {
153 *(tzfw_normal_stacks)
154 }
155}
156
157#define __BAKERY_LOCK_SIZE__ (ImageLimit(__BAKERY_LOCKS_EPILOGUE__) - \
158 ImageBase(__BAKERY_LOCKS__))
159#define BAKERY_LOCK_SIZE (__BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1))
160#define __PMF_TIMESTAMP_SIZE__ (ImageLimit(__PMF_TIMESTAMP__) - \
161 ImageBase(__PMF_TIMESTAMP__))
162#define PER_CPU_TIMESTAMP_SIZE (__PMF_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1))
163
164LR_BSS +0
165{
166 __BSS__ AlignExpr(ImageLimit(LR_STACKS), 256) FIXED
167 {
168 *(.bss*)
169 *(COMDAT)
170 }
171
172#if !USE_COHERENT_MEM
173 /*
174 * Bakery locks are stored in normal .bss memory
175 *
176 * Each lock's data is spread across multiple cache lines, one per CPU,
177 * but multiple locks can share the same cache line.
178 * The compiler will allocate enough memory for one CPU's bakery locks,
179 * the remaining cache lines are allocated by the linker script
180 */
181 __BAKERY_LOCKS__ AlignExpr(ImageLimit(__BSS__), CACHE_WRITEBACK_GRANULE) FIXED
182 {
183 *(bakery_lock)
184 }
185
186 __BAKERY_LOCKS_EPILOGUE__ AlignExpr(ImageLimit(__BAKERY_LOCKS__), CACHE_WRITEBACK_GRANULE) FIXED EMPTY 0
187 {
188 /* section delimiter */
189 }
190
191 __PER_CPU_BAKERY_LOCKS__ ImageLimit(__BAKERY_LOCKS_EPILOGUE__) FIXED FILL 0 BAKERY_LOCK_SIZE
192 {
193 /* padded memory section to store per cpu bakery locks */
194 }
195
196#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
197 /* PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements */
198 ScatterAssert(__PER_CPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE)
199#endif
200#endif
201
202#if ENABLE_PMF
203 /*
204 * Time-stamps are stored in normal .bss memory
205 *
206 * The compiler will allocate enough memory for one CPU's time-stamps,
207 * the remaining memory for other CPU's is allocated by the
208 * linker script
209 */
210 __PMF_TIMESTAMP__ AlignExpr(+0, CACHE_WRITEBACK_GRANULE) FIXED EMPTY CACHE_WRITEBACK_GRANULE
211 {
212 /* store timestamps in this carved out memory */
213 }
214
215 __PMF_TIMESTAMP_EPILOGUE__ AlignExpr(ImageLimit(__PMF_TIMESTAMP__), CACHE_WRITEBACK_GRANULE) FIXED EMPTY 0
216 {
217 /*
218 * placeholder to make __PMF_TIMESTAMP_START__ end on a
219 * CACHE_WRITEBACK_GRANULE boundary
220 */
221 }
222
223 __PER_CPU_TIMESTAMPS__ +0 FIXED FILL 0 PER_CPU_TIMESTAMP_SIZE
224 {
225 /* padded memory section to store per cpu timestamps */
226 }
227#endif /* ENABLE_PMF */
228}
229
230LR_XLAT_TABLE +0
231{
232 xlat_table +0 FIXED
233 {
234 *(xlat_table)
235 }
236}
237
238#if USE_COHERENT_MEM
239LR_COHERENT_RAM +0
240{
241 /*
242 * The base address of the coherent memory section must be page-aligned (4K)
243 * to guarantee that the coherent data are stored on their own pages and
244 * are not mixed with normal data. This is required to set up the correct
245 * memory attributes for the coherent data page tables.
246 */
247 __COHERENT_RAM__ AlignExpr(+0, PAGE_SIZE) FIXED
248 {
249 /*
250 * Bakery locks are stored in coherent memory
251 *
252 * Each lock's data is contiguous and fully allocated by the compiler
253 */
254 *(bakery_lock)
255 *(tzfw_coherent_mem)
256 }
257
258 __COHERENT_RAM_EPILOGUE_UNALIGNED__ +0 FIXED EMPTY 0
259 {
260 /* section delimiter */
261 }
262
263 /*
264 * Memory page(s) mapped to this section will be marked
265 * as device memory. No other unexpected data must creep in.
266 * Ensure the rest of the current memory page is unused.
267 */
268 __COHERENT_RAM_EPILOGUE__ AlignExpr(ImageLimit(__COHERENT_RAM_START__), PAGE_SIZE) FIXED EMPTY 0
269 {
270 /* section delimiter */
271 }
272}
273#endif
274
275LR_END +0
276{
277 __BL31_END__ +0 FIXED EMPTY 0
278 {
279 /* placeholder */
280 }
281
282 /* BL31 image has exceeded its limit. */
283 ScatterAssert(ImageLimit(__BL31_END__) <= BL31_LIMIT)
284}