blob: 57709e352bba5e1c1905b9554a0be14c9f45fe95 [file] [log] [blame]
Roberto Vargase0e99462017-10-30 14:43:43 +00001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <platform_def.h>
8#include <xlat_tables_defs.h>
9
10OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
11OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
12ENTRY(bl2_entrypoint)
13
14MEMORY {
15 RAM (rwx): ORIGIN = BL2_BASE, LENGTH = BL2_LIMIT - BL2_BASE
16}
17
18
19SECTIONS
20{
21 . = BL2_BASE;
22 ASSERT(. == ALIGN(PAGE_SIZE),
23 "BL2_BASE address is not aligned on a page boundary.")
24
25#if SEPARATE_CODE_AND_RODATA
26 .text . : {
27 __TEXT_START__ = .;
Roberto Vargas51abc342017-11-17 10:51:54 +000028 __TEXT_RESIDENT_START__ = .;
29 *bl2_el3_entrypoint.o(.text*)
30 *(.text.asm.*)
31 __TEXT_RESIDENT_END__ = .;
Roberto Vargase0e99462017-10-30 14:43:43 +000032 *(.text*)
33 *(.vectors)
34 . = NEXT(PAGE_SIZE);
35 __TEXT_END__ = .;
36 } >RAM
37
38 .rodata . : {
39 __RODATA_START__ = .;
40 *(.rodata*)
41
42 /* Ensure 8-byte alignment for descriptors and ensure inclusion */
43 . = ALIGN(8);
44 __PARSER_LIB_DESCS_START__ = .;
45 KEEP(*(.img_parser_lib_descs))
46 __PARSER_LIB_DESCS_END__ = .;
47
48 /*
49 * Ensure 8-byte alignment for cpu_ops so that its fields are also
50 * aligned. Also ensure cpu_ops inclusion.
51 */
52 . = ALIGN(8);
53 __CPU_OPS_START__ = .;
54 KEEP(*(cpu_ops))
55 __CPU_OPS_END__ = .;
56
57 . = NEXT(PAGE_SIZE);
58 __RODATA_END__ = .;
59 } >RAM
Roberto Vargas51abc342017-11-17 10:51:54 +000060
61 ASSERT(__TEXT_RESIDENT_END__ - __TEXT_RESIDENT_START__ <= PAGE_SIZE,
62 "Resident part of BL2 has exceeded its limit.")
Roberto Vargase0e99462017-10-30 14:43:43 +000063#else
64 ro . : {
65 __RO_START__ = .;
Roberto Vargas51abc342017-11-17 10:51:54 +000066 __TEXT_RESIDENT_START__ = .;
67 *bl2_el3_entrypoint.o(.text*)
68 *(.text.asm.*)
69 __TEXT_RESIDENT_END__ = .;
Roberto Vargase0e99462017-10-30 14:43:43 +000070 *(.text*)
71 *(.rodata*)
72
73 /*
74 * Ensure 8-byte alignment for cpu_ops so that its fields are also
75 * aligned. Also ensure cpu_ops inclusion.
76 */
77 . = ALIGN(8);
78 __CPU_OPS_START__ = .;
79 KEEP(*(cpu_ops))
80 __CPU_OPS_END__ = .;
81
82 /* Ensure 8-byte alignment for descriptors and ensure inclusion */
83 . = ALIGN(8);
84 __PARSER_LIB_DESCS_START__ = .;
85 KEEP(*(.img_parser_lib_descs))
86 __PARSER_LIB_DESCS_END__ = .;
87
88 *(.vectors)
89 __RO_END_UNALIGNED__ = .;
90 /*
91 * Memory page(s) mapped to this section will be marked as
92 * read-only, executable. No RW data from the next section must
93 * creep in. Ensure the rest of the current memory page is unused.
94 */
95 . = NEXT(PAGE_SIZE);
96
97 __RO_END__ = .;
98 } >RAM
99#endif
100
101 ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
102 "cpu_ops not defined for this platform.")
103
104 /*
105 * Define a linker symbol to mark start of the RW memory area for this
106 * image.
107 */
108 __RW_START__ = . ;
109
110 /*
111 * .data must be placed at a lower address than the stacks if the stack
112 * protector is enabled. Alternatively, the .data.stack_protector_canary
113 * section can be placed independently of the main .data section.
114 */
115 .data . : {
116 __DATA_START__ = .;
117 *(.data*)
118 __DATA_END__ = .;
119 } >RAM
120
121 stacks (NOLOAD) : {
122 __STACKS_START__ = .;
123 *(tzfw_normal_stacks)
124 __STACKS_END__ = .;
125 } >RAM
126
127 /*
128 * The .bss section gets initialised to 0 at runtime.
129 * Its base address should be 16-byte aligned for better performance of the
130 * zero-initialization code.
131 */
132 .bss : ALIGN(16) {
133 __BSS_START__ = .;
134 *(SORT_BY_ALIGNMENT(.bss*))
135 *(COMMON)
136 __BSS_END__ = .;
137 } >RAM
138
139 /*
140 * The xlat_table section is for full, aligned page tables (4K).
141 * Removing them from .bss avoids forcing 4K alignment on
142 * the .bss section and eliminates the unnecessary zero init
143 */
144 xlat_table (NOLOAD) : {
145 *(xlat_table)
146 } >RAM
147
148#if USE_COHERENT_MEM
149 /*
150 * The base address of the coherent memory section must be page-aligned (4K)
151 * to guarantee that the coherent data are stored on their own pages and
152 * are not mixed with normal data. This is required to set up the correct
153 * memory attributes for the coherent data page tables.
154 */
155 coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
156 __COHERENT_RAM_START__ = .;
157 *(tzfw_coherent_mem)
158 __COHERENT_RAM_END_UNALIGNED__ = .;
159 /*
160 * Memory page(s) mapped to this section will be marked
161 * as device memory. No other unexpected data must creep in.
162 * Ensure the rest of the current memory page is unused.
163 */
164 . = NEXT(PAGE_SIZE);
165 __COHERENT_RAM_END__ = .;
166 } >RAM
167#endif
168
169 /*
170 * Define a linker symbol to mark end of the RW memory area for this
171 * image.
172 */
173 __RW_END__ = .;
174 __BL2_END__ = .;
175
176 __BSS_SIZE__ = SIZEOF(.bss);
177
178#if USE_COHERENT_MEM
179 __COHERENT_RAM_UNALIGNED_SIZE__ =
180 __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
181#endif
182
183 ASSERT(. <= BL2_LIMIT, "BL2 image has exceeded its limit.")
184}