blob: e0e23e8f9e94f7beef652839a130a808c8ec3ea2 [file] [log] [blame]
Soby Mathewec8ac1c2016-05-05 14:32:05 +01001/*
2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <platform_def.h>
32
33OUTPUT_FORMAT(elf32-littlearm)
34OUTPUT_ARCH(arm)
35ENTRY(sp_min_vector_table)
36
37MEMORY {
38 RAM (rwx): ORIGIN = BL32_BASE, LENGTH = BL32_LIMIT - BL32_BASE
39}
40
41
42SECTIONS
43{
44 . = BL32_BASE;
45 ASSERT(. == ALIGN(4096),
46 "BL32_BASE address is not aligned on a page boundary.")
47
48#if SEPARATE_CODE_AND_RODATA
49 .text . : {
50 __TEXT_START__ = .;
51 *entrypoint.o(.text*)
52 *(.text*)
Yatharth Kochar06460cd2016-06-30 15:02:31 +010053 *(.vectors)
Soby Mathewec8ac1c2016-05-05 14:32:05 +010054 . = NEXT(4096);
55 __TEXT_END__ = .;
56 } >RAM
57
58 .rodata . : {
59 __RODATA_START__ = .;
60 *(.rodata*)
61
62 /* Ensure 4-byte alignment for descriptors and ensure inclusion */
63 . = ALIGN(4);
64 __RT_SVC_DESCS_START__ = .;
65 KEEP(*(rt_svc_descs))
66 __RT_SVC_DESCS_END__ = .;
67
68 /*
69 * Ensure 4-byte alignment for cpu_ops so that its fields are also
70 * aligned. Also ensure cpu_ops inclusion.
71 */
72 . = ALIGN(4);
73 __CPU_OPS_START__ = .;
74 KEEP(*(cpu_ops))
75 __CPU_OPS_END__ = .;
76
77 . = NEXT(4096);
78 __RODATA_END__ = .;
79 } >RAM
80#else
81 ro . : {
82 __RO_START__ = .;
83 *entrypoint.o(.text*)
84 *(.text*)
85 *(.rodata*)
86
87 /* Ensure 4-byte alignment for descriptors and ensure inclusion */
88 . = ALIGN(4);
89 __RT_SVC_DESCS_START__ = .;
90 KEEP(*(rt_svc_descs))
91 __RT_SVC_DESCS_END__ = .;
92
93 /*
94 * Ensure 4-byte alignment for cpu_ops so that its fields are also
95 * aligned. Also ensure cpu_ops inclusion.
96 */
97 . = ALIGN(4);
98 __CPU_OPS_START__ = .;
99 KEEP(*(cpu_ops))
100 __CPU_OPS_END__ = .;
101
Yatharth Kochar06460cd2016-06-30 15:02:31 +0100102 *(.vectors)
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100103 __RO_END_UNALIGNED__ = .;
104
105 /*
106 * Memory page(s) mapped to this section will be marked as
107 * read-only, executable. No RW data from the next section must
108 * creep in. Ensure the rest of the current memory block is unused.
109 */
110 . = NEXT(4096);
111 __RO_END__ = .;
112 } >RAM
113#endif
114
115 ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
116 "cpu_ops not defined for this platform.")
117 /*
118 * Define a linker symbol to mark start of the RW memory area for this
119 * image.
120 */
121 __RW_START__ = . ;
122
123 .data . : {
124 __DATA_START__ = .;
125 *(.data*)
126 __DATA_END__ = .;
127 } >RAM
128
129 stacks (NOLOAD) : {
130 __STACKS_START__ = .;
131 *(tzfw_normal_stacks)
132 __STACKS_END__ = .;
133 } >RAM
134
135 /*
136 * The .bss section gets initialised to 0 at runtime.
137 * Its base address must be 16-byte aligned.
138 */
139 .bss (NOLOAD) : ALIGN(16) {
140 __BSS_START__ = .;
141 *(.bss*)
142 *(COMMON)
143#if !USE_COHERENT_MEM
144 /*
145 * Bakery locks are stored in normal .bss memory
146 *
147 * Each lock's data is spread across multiple cache lines, one per CPU,
148 * but multiple locks can share the same cache line.
149 * The compiler will allocate enough memory for one CPU's bakery locks,
150 * the remaining cache lines are allocated by the linker script
151 */
152 . = ALIGN(CACHE_WRITEBACK_GRANULE);
153 __BAKERY_LOCK_START__ = .;
154 *(bakery_lock)
155 . = ALIGN(CACHE_WRITEBACK_GRANULE);
156 __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(. - __BAKERY_LOCK_START__);
157 . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
158 __BAKERY_LOCK_END__ = .;
159#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
160 ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE,
161 "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
162#endif
163#endif
164
165#if ENABLE_PMF
166 /*
167 * Time-stamps are stored in normal .bss memory
168 *
169 * The compiler will allocate enough memory for one CPU's time-stamps,
170 * the remaining memory for other CPU's is allocated by the
171 * linker script
172 */
173 . = ALIGN(CACHE_WRITEBACK_GRANULE);
174 __PMF_TIMESTAMP_START__ = .;
175 KEEP(*(pmf_timestamp_array))
176 . = ALIGN(CACHE_WRITEBACK_GRANULE);
177 __PMF_PERCPU_TIMESTAMP_END__ = .;
178 __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
179 . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
180 __PMF_TIMESTAMP_END__ = .;
181#endif /* ENABLE_PMF */
182
183 __BSS_END__ = .;
184 } >RAM
185
186 /*
187 * The xlat_table section is for full, aligned page tables (4K).
188 * Removing them from .bss avoids forcing 4K alignment on
189 * the .bss section and eliminates the unecessary zero init
190 */
191 xlat_table (NOLOAD) : {
192 *(xlat_table)
193 } >RAM
194
195 __BSS_SIZE__ = SIZEOF(.bss);
196
197#if USE_COHERENT_MEM
198 /*
199 * The base address of the coherent memory section must be page-aligned (4K)
200 * to guarantee that the coherent data are stored on their own pages and
201 * are not mixed with normal data. This is required to set up the correct
202 * memory attributes for the coherent data page tables.
203 */
204 coherent_ram (NOLOAD) : ALIGN(4096) {
205 __COHERENT_RAM_START__ = .;
206 /*
207 * Bakery locks are stored in coherent memory
208 *
209 * Each lock's data is contiguous and fully allocated by the compiler
210 */
211 *(bakery_lock)
212 *(tzfw_coherent_mem)
213 __COHERENT_RAM_END_UNALIGNED__ = .;
214 /*
215 * Memory page(s) mapped to this section will be marked
216 * as device memory. No other unexpected data must creep in.
217 * Ensure the rest of the current memory page is unused.
218 */
219 . = NEXT(4096);
220 __COHERENT_RAM_END__ = .;
221 } >RAM
222
223 __COHERENT_RAM_UNALIGNED_SIZE__ =
224 __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
225#endif
226
227 /*
228 * Define a linker symbol to mark end of the RW memory area for this
229 * image.
230 */
231 __RW_END__ = .;
232
233 __BL32_END__ = .;
234}