blob: 9e0596f1f056e4917245fb3be953d7b11cc07c36 [file] [log] [blame]
Soby Mathewec8ac1c2016-05-05 14:32:05 +01001/*
Masahiro Yamada0b67e562020-03-09 17:39:48 +09002 * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
Soby Mathewec8ac1c2016-05-05 14:32:05 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewec8ac1c2016-05-05 14:32:05 +01005 */
6
Masahiro Yamada0b67e562020-03-09 17:39:48 +09007#include <common/bl_common.ld.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00008#include <lib/xlat_tables/xlat_tables_defs.h>
Soby Mathewec8ac1c2016-05-05 14:32:05 +01009
10OUTPUT_FORMAT(elf32-littlearm)
11OUTPUT_ARCH(arm)
12ENTRY(sp_min_vector_table)
13
14MEMORY {
15 RAM (rwx): ORIGIN = BL32_BASE, LENGTH = BL32_LIMIT - BL32_BASE
16}
17
Heiko Stuebner95ba3552019-04-11 15:26:07 +020018#ifdef PLAT_SP_MIN_EXTRA_LD_SCRIPT
19#include <plat_sp_min.ld.S>
20#endif
Soby Mathewec8ac1c2016-05-05 14:32:05 +010021
22SECTIONS
23{
24 . = BL32_BASE;
Antonio Nino Diaz2ce2b092017-11-15 11:45:35 +000025 ASSERT(. == ALIGN(PAGE_SIZE),
Soby Mathewec8ac1c2016-05-05 14:32:05 +010026 "BL32_BASE address is not aligned on a page boundary.")
27
28#if SEPARATE_CODE_AND_RODATA
29 .text . : {
30 __TEXT_START__ = .;
31 *entrypoint.o(.text*)
32 *(.text*)
Yatharth Kochar06460cd2016-06-30 15:02:31 +010033 *(.vectors)
Roberto Vargasd93fde32018-04-11 11:53:31 +010034 . = ALIGN(PAGE_SIZE);
Soby Mathewec8ac1c2016-05-05 14:32:05 +010035 __TEXT_END__ = .;
36 } >RAM
37
Roberto Vargas1d04c632018-05-10 11:01:16 +010038 /* .ARM.extab and .ARM.exidx are only added because Clang need them */
39 .ARM.extab . : {
40 *(.ARM.extab* .gnu.linkonce.armextab.*)
41 } >RAM
42
43 .ARM.exidx . : {
44 *(.ARM.exidx* .gnu.linkonce.armexidx.*)
45 } >RAM
46
Soby Mathewec8ac1c2016-05-05 14:32:05 +010047 .rodata . : {
48 __RODATA_START__ = .;
49 *(.rodata*)
50
Masahiro Yamada583f8dd2020-03-26 10:57:12 +090051 RODATA_COMMON
Soby Mathewec8ac1c2016-05-05 14:32:05 +010052
Jeenu Viswambharane3f22002017-09-22 08:32:10 +010053 /* Place pubsub sections for events */
54 . = ALIGN(8);
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000055#include <lib/el3_runtime/pubsub_events.h>
Jeenu Viswambharane3f22002017-09-22 08:32:10 +010056
Roberto Vargasd93fde32018-04-11 11:53:31 +010057 . = ALIGN(PAGE_SIZE);
Soby Mathewec8ac1c2016-05-05 14:32:05 +010058 __RODATA_END__ = .;
59 } >RAM
60#else
61 ro . : {
62 __RO_START__ = .;
63 *entrypoint.o(.text*)
64 *(.text*)
65 *(.rodata*)
66
Masahiro Yamada583f8dd2020-03-26 10:57:12 +090067 RODATA_COMMON
Soby Mathewec8ac1c2016-05-05 14:32:05 +010068
Jeenu Viswambharane3f22002017-09-22 08:32:10 +010069 /* Place pubsub sections for events */
70 . = ALIGN(8);
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000071#include <lib/el3_runtime/pubsub_events.h>
Jeenu Viswambharane3f22002017-09-22 08:32:10 +010072
Yatharth Kochar06460cd2016-06-30 15:02:31 +010073 *(.vectors)
Soby Mathewec8ac1c2016-05-05 14:32:05 +010074 __RO_END_UNALIGNED__ = .;
75
76 /*
77 * Memory page(s) mapped to this section will be marked as
78 * read-only, executable. No RW data from the next section must
79 * creep in. Ensure the rest of the current memory block is unused.
80 */
Roberto Vargasd93fde32018-04-11 11:53:31 +010081 . = ALIGN(PAGE_SIZE);
Soby Mathewec8ac1c2016-05-05 14:32:05 +010082 __RO_END__ = .;
83 } >RAM
84#endif
85
86 ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
87 "cpu_ops not defined for this platform.")
88 /*
89 * Define a linker symbol to mark start of the RW memory area for this
90 * image.
91 */
92 __RW_START__ = . ;
93
Masahiro Yamadac5864d82020-04-22 10:50:12 +090094 DATA_SECTION >RAM
Soby Mathewec8ac1c2016-05-05 14:32:05 +010095
Soby Mathewbf169232017-11-14 14:10:10 +000096#ifdef BL32_PROGBITS_LIMIT
97 ASSERT(. <= BL32_PROGBITS_LIMIT, "BL32 progbits has exceeded its limit.")
98#endif
99
Masahiro Yamada403990e2020-04-07 13:04:24 +0900100 STACK_SECTION >RAM
Masahiro Yamadadd053b62020-03-26 13:16:33 +0900101 BSS_SECTION >RAM
Masahiro Yamada0b67e562020-03-09 17:39:48 +0900102 XLAT_TABLE_SECTION >RAM
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100103
104 __BSS_SIZE__ = SIZEOF(.bss);
105
106#if USE_COHERENT_MEM
107 /*
108 * The base address of the coherent memory section must be page-aligned (4K)
109 * to guarantee that the coherent data are stored on their own pages and
110 * are not mixed with normal data. This is required to set up the correct
111 * memory attributes for the coherent data page tables.
112 */
Antonio Nino Diaz2ce2b092017-11-15 11:45:35 +0000113 coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100114 __COHERENT_RAM_START__ = .;
115 /*
116 * Bakery locks are stored in coherent memory
117 *
118 * Each lock's data is contiguous and fully allocated by the compiler
119 */
120 *(bakery_lock)
121 *(tzfw_coherent_mem)
122 __COHERENT_RAM_END_UNALIGNED__ = .;
123 /*
124 * Memory page(s) mapped to this section will be marked
125 * as device memory. No other unexpected data must creep in.
126 * Ensure the rest of the current memory page is unused.
127 */
Roberto Vargasd93fde32018-04-11 11:53:31 +0100128 . = ALIGN(PAGE_SIZE);
Soby Mathewec8ac1c2016-05-05 14:32:05 +0100129 __COHERENT_RAM_END__ = .;
130 } >RAM
131
132 __COHERENT_RAM_UNALIGNED_SIZE__ =
133 __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
134#endif
135
136 /*
137 * Define a linker symbol to mark end of the RW memory area for this
138 * image.
139 */
140 __RW_END__ = .;
141
142 __BL32_END__ = .;
143}