blob: f652f17e262428184f2aa875f90a955c32e0830e [file] [log] [blame]
Soby Mathewec8ac1c2016-05-05 14:32:05 +01001/*
Masahiro Yamada0b67e562020-03-09 17:39:48 +09002 * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
Soby Mathewec8ac1c2016-05-05 14:32:05 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewec8ac1c2016-05-05 14:32:05 +01005 */
6
Masahiro Yamada0b67e562020-03-09 17:39:48 +09007#include <common/bl_common.ld.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00008#include <lib/xlat_tables/xlat_tables_defs.h>
Soby Mathewec8ac1c2016-05-05 14:32:05 +01009
10OUTPUT_FORMAT(elf32-littlearm)
11OUTPUT_ARCH(arm)
12ENTRY(sp_min_vector_table)
13
14MEMORY {
15 RAM (rwx): ORIGIN = BL32_BASE, LENGTH = BL32_LIMIT - BL32_BASE
16}
17
Heiko Stuebner95ba3552019-04-11 15:26:07 +020018#ifdef PLAT_SP_MIN_EXTRA_LD_SCRIPT
19#include <plat_sp_min.ld.S>
20#endif
Soby Mathewec8ac1c2016-05-05 14:32:05 +010021
22SECTIONS
23{
24 . = BL32_BASE;
Antonio Nino Diaz2ce2b092017-11-15 11:45:35 +000025 ASSERT(. == ALIGN(PAGE_SIZE),
Soby Mathewec8ac1c2016-05-05 14:32:05 +010026 "BL32_BASE address is not aligned on a page boundary.")
27
28#if SEPARATE_CODE_AND_RODATA
29 .text . : {
30 __TEXT_START__ = .;
31 *entrypoint.o(.text*)
32 *(.text*)
Yatharth Kochar06460cd2016-06-30 15:02:31 +010033 *(.vectors)
Roberto Vargasd93fde32018-04-11 11:53:31 +010034 . = ALIGN(PAGE_SIZE);
Soby Mathewec8ac1c2016-05-05 14:32:05 +010035 __TEXT_END__ = .;
36 } >RAM
37
Roberto Vargas1d04c632018-05-10 11:01:16 +010038 /* .ARM.extab and .ARM.exidx are only added because Clang need them */
39 .ARM.extab . : {
40 *(.ARM.extab* .gnu.linkonce.armextab.*)
41 } >RAM
42
43 .ARM.exidx . : {
44 *(.ARM.exidx* .gnu.linkonce.armexidx.*)
45 } >RAM
46
Soby Mathewec8ac1c2016-05-05 14:32:05 +010047 .rodata . : {
48 __RODATA_START__ = .;
49 *(.rodata*)
50
Masahiro Yamada583f8dd2020-03-26 10:57:12 +090051 RODATA_COMMON
Soby Mathewec8ac1c2016-05-05 14:32:05 +010052
Jeenu Viswambharane3f22002017-09-22 08:32:10 +010053 /* Place pubsub sections for events */
54 . = ALIGN(8);
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000055#include <lib/el3_runtime/pubsub_events.h>
Jeenu Viswambharane3f22002017-09-22 08:32:10 +010056
Roberto Vargasd93fde32018-04-11 11:53:31 +010057 . = ALIGN(PAGE_SIZE);
Soby Mathewec8ac1c2016-05-05 14:32:05 +010058 __RODATA_END__ = .;
59 } >RAM
60#else
61 ro . : {
62 __RO_START__ = .;
63 *entrypoint.o(.text*)
64 *(.text*)
65 *(.rodata*)
66
Masahiro Yamada583f8dd2020-03-26 10:57:12 +090067 RODATA_COMMON
Soby Mathewec8ac1c2016-05-05 14:32:05 +010068
Jeenu Viswambharane3f22002017-09-22 08:32:10 +010069 /* Place pubsub sections for events */
70 . = ALIGN(8);
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000071#include <lib/el3_runtime/pubsub_events.h>
Jeenu Viswambharane3f22002017-09-22 08:32:10 +010072
Yatharth Kochar06460cd2016-06-30 15:02:31 +010073 *(.vectors)
Soby Mathewec8ac1c2016-05-05 14:32:05 +010074 __RO_END_UNALIGNED__ = .;
75
76 /*
77 * Memory page(s) mapped to this section will be marked as
78 * read-only, executable. No RW data from the next section must
79 * creep in. Ensure the rest of the current memory block is unused.
80 */
Roberto Vargasd93fde32018-04-11 11:53:31 +010081 . = ALIGN(PAGE_SIZE);
Soby Mathewec8ac1c2016-05-05 14:32:05 +010082 __RO_END__ = .;
83 } >RAM
84#endif
85
86 ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
87 "cpu_ops not defined for this platform.")
88 /*
89 * Define a linker symbol to mark start of the RW memory area for this
90 * image.
91 */
92 __RW_START__ = . ;
93
94 .data . : {
95 __DATA_START__ = .;
96 *(.data*)
97 __DATA_END__ = .;
98 } >RAM
99
Soby Mathewbf169232017-11-14 14:10:10 +0000100#ifdef BL32_PROGBITS_LIMIT
101 ASSERT(. <= BL32_PROGBITS_LIMIT, "BL32 progbits has exceeded its limit.")
102#endif
103
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100104 stacks (NOLOAD) : {
105 __STACKS_START__ = .;
106 *(tzfw_normal_stacks)
107 __STACKS_END__ = .;
108 } >RAM
109
110 /*
111 * The .bss section gets initialised to 0 at runtime.
Douglas Raillard21362a92016-12-02 13:51:54 +0000112 * Its base address should be 8-byte aligned for better performance of the
113 * zero-initialization code.
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100114 */
Douglas Raillard21362a92016-12-02 13:51:54 +0000115 .bss (NOLOAD) : ALIGN(8) {
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100116 __BSS_START__ = .;
117 *(.bss*)
118 *(COMMON)
Masahiro Yamadaac1bfb92020-03-26 10:51:39 +0900119 BAKERY_LOCK_NORMAL
120 PMF_TIMESTAMP
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100121 __BSS_END__ = .;
122 } >RAM
123
Masahiro Yamada0b67e562020-03-09 17:39:48 +0900124 XLAT_TABLE_SECTION >RAM
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100125
126 __BSS_SIZE__ = SIZEOF(.bss);
127
128#if USE_COHERENT_MEM
129 /*
130 * The base address of the coherent memory section must be page-aligned (4K)
131 * to guarantee that the coherent data are stored on their own pages and
132 * are not mixed with normal data. This is required to set up the correct
133 * memory attributes for the coherent data page tables.
134 */
Antonio Nino Diaz2ce2b092017-11-15 11:45:35 +0000135 coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100136 __COHERENT_RAM_START__ = .;
137 /*
138 * Bakery locks are stored in coherent memory
139 *
140 * Each lock's data is contiguous and fully allocated by the compiler
141 */
142 *(bakery_lock)
143 *(tzfw_coherent_mem)
144 __COHERENT_RAM_END_UNALIGNED__ = .;
145 /*
146 * Memory page(s) mapped to this section will be marked
147 * as device memory. No other unexpected data must creep in.
148 * Ensure the rest of the current memory page is unused.
149 */
Roberto Vargasd93fde32018-04-11 11:53:31 +0100150 . = ALIGN(PAGE_SIZE);
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100151 __COHERENT_RAM_END__ = .;
152 } >RAM
153
154 __COHERENT_RAM_UNALIGNED_SIZE__ =
155 __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
156#endif
157
158 /*
159 * Define a linker symbol to mark end of the RW memory area for this
160 * image.
161 */
162 __RW_END__ = .;
163
164 __BL32_END__ = .;
165}