blob: 6d26cdb226855a8cab44ea40180b76c4644d6a2e [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Antonio Nino Diaz7c2a3ca2018-02-23 15:07:54 +00002 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta4f6ad662013-10-25 09:08:21 +01005 */
6
Dan Handleyed6ff952014-05-14 17:44:19 +01007#include <platform_def.h>
Antonio Nino Diaz2ce2b092017-11-15 11:45:35 +00008#include <xlat_tables_defs.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +01009
10OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
11OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
Jeenu Viswambharan2a30a752014-03-11 11:06:45 +000012ENTRY(bl2_entrypoint)
Achin Gupta4f6ad662013-10-25 09:08:21 +010013
14MEMORY {
Juan Castillofd8c0772014-09-16 10:40:35 +010015 RAM (rwx): ORIGIN = BL2_BASE, LENGTH = BL2_LIMIT - BL2_BASE
Achin Gupta4f6ad662013-10-25 09:08:21 +010016}
17
18
19SECTIONS
20{
21 . = BL2_BASE;
Antonio Nino Diaz2ce2b092017-11-15 11:45:35 +000022 ASSERT(. == ALIGN(PAGE_SIZE),
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000023 "BL2_BASE address is not aligned on a page boundary.")
Achin Gupta4f6ad662013-10-25 09:08:21 +010024
Sandrine Bailleuxf91f1442016-07-08 14:37:40 +010025#if SEPARATE_CODE_AND_RODATA
26 .text . : {
27 __TEXT_START__ = .;
28 *bl2_entrypoint.o(.text*)
29 *(.text*)
30 *(.vectors)
Roberto Vargasd93fde32018-04-11 11:53:31 +010031 . = ALIGN(PAGE_SIZE);
Sandrine Bailleuxf91f1442016-07-08 14:37:40 +010032 __TEXT_END__ = .;
33 } >RAM
34
Roberto Vargas1d04c632018-05-10 11:01:16 +010035 /* .ARM.extab and .ARM.exidx are only added because Clang need them */
36 .ARM.extab . : {
37 *(.ARM.extab* .gnu.linkonce.armextab.*)
38 } >RAM
39
40 .ARM.exidx . : {
41 *(.ARM.exidx* .gnu.linkonce.armexidx.*)
42 } >RAM
43
Sandrine Bailleuxf91f1442016-07-08 14:37:40 +010044 .rodata . : {
45 __RODATA_START__ = .;
46 *(.rodata*)
47
48 /* Ensure 8-byte alignment for descriptors and ensure inclusion */
49 . = ALIGN(8);
50 __PARSER_LIB_DESCS_START__ = .;
51 KEEP(*(.img_parser_lib_descs))
52 __PARSER_LIB_DESCS_END__ = .;
53
Roberto Vargasd93fde32018-04-11 11:53:31 +010054 . = ALIGN(PAGE_SIZE);
Sandrine Bailleuxf91f1442016-07-08 14:37:40 +010055 __RODATA_END__ = .;
56 } >RAM
57#else
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000058 ro . : {
59 __RO_START__ = .;
Andrew Thoelkee01ea342014-03-18 07:13:52 +000060 *bl2_entrypoint.o(.text*)
61 *(.text*)
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000062 *(.rodata*)
Juan Castillo8e55d932015-04-02 09:48:16 +010063
64 /* Ensure 8-byte alignment for descriptors and ensure inclusion */
65 . = ALIGN(8);
66 __PARSER_LIB_DESCS_START__ = .;
67 KEEP(*(.img_parser_lib_descs))
68 __PARSER_LIB_DESCS_END__ = .;
69
Achin Guptab739f222014-01-18 16:50:09 +000070 *(.vectors)
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000071 __RO_END_UNALIGNED__ = .;
72 /*
73 * Memory page(s) mapped to this section will be marked as
74 * read-only, executable. No RW data from the next section must
75 * creep in. Ensure the rest of the current memory page is unused.
76 */
Roberto Vargasd93fde32018-04-11 11:53:31 +010077 . = ALIGN(PAGE_SIZE);
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000078 __RO_END__ = .;
Achin Gupta4f6ad662013-10-25 09:08:21 +010079 } >RAM
Sandrine Bailleuxf91f1442016-07-08 14:37:40 +010080#endif
Achin Gupta4f6ad662013-10-25 09:08:21 +010081
Achin Guptae9c4a642015-09-11 16:03:13 +010082 /*
83 * Define a linker symbol to mark start of the RW memory area for this
84 * image.
85 */
86 __RW_START__ = . ;
87
Douglas Raillard306593d2017-02-24 18:14:15 +000088 /*
89 * .data must be placed at a lower address than the stacks if the stack
90 * protector is enabled. Alternatively, the .data.stack_protector_canary
91 * section can be placed independently of the main .data section.
92 */
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000093 .data . : {
94 __DATA_START__ = .;
Andrew Thoelkee01ea342014-03-18 07:13:52 +000095 *(.data*)
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000096 __DATA_END__ = .;
Achin Gupta4f6ad662013-10-25 09:08:21 +010097 } >RAM
98
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000099 stacks (NOLOAD) : {
100 __STACKS_START__ = .;
101 *(tzfw_normal_stacks)
102 __STACKS_END__ = .;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100103 } >RAM
104
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000105 /*
106 * The .bss section gets initialised to 0 at runtime.
Douglas Raillard21362a92016-12-02 13:51:54 +0000107 * Its base address should be 16-byte aligned for better performance of the
108 * zero-initialization code.
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000109 */
110 .bss : ALIGN(16) {
111 __BSS_START__ = .;
Andrew Thoelkee01ea342014-03-18 07:13:52 +0000112 *(SORT_BY_ALIGNMENT(.bss*))
Achin Gupta4f6ad662013-10-25 09:08:21 +0100113 *(COMMON)
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000114 __BSS_END__ = .;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100115 } >RAM
116
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000117 /*
Jeenu Viswambharan97cc9ee2014-02-24 15:20:28 +0000118 * The xlat_table section is for full, aligned page tables (4K).
Achin Guptaa0cd9892014-02-09 13:30:38 +0000119 * Removing them from .bss avoids forcing 4K alignment on
Antonio Nino Diaz7c2a3ca2018-02-23 15:07:54 +0000120 * the .bss section. The tables are initialized to zero by the translation
121 * tables library.
Achin Guptaa0cd9892014-02-09 13:30:38 +0000122 */
123 xlat_table (NOLOAD) : {
124 *(xlat_table)
125 } >RAM
126
Soby Mathew2ae20432015-01-08 18:02:44 +0000127#if USE_COHERENT_MEM
Achin Guptaa0cd9892014-02-09 13:30:38 +0000128 /*
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000129 * The base address of the coherent memory section must be page-aligned (4K)
130 * to guarantee that the coherent data are stored on their own pages and
131 * are not mixed with normal data. This is required to set up the correct
132 * memory attributes for the coherent data page tables.
133 */
Antonio Nino Diaz2ce2b092017-11-15 11:45:35 +0000134 coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000135 __COHERENT_RAM_START__ = .;
136 *(tzfw_coherent_mem)
137 __COHERENT_RAM_END_UNALIGNED__ = .;
138 /*
139 * Memory page(s) mapped to this section will be marked
140 * as device memory. No other unexpected data must creep in.
141 * Ensure the rest of the current memory page is unused.
142 */
Roberto Vargasd93fde32018-04-11 11:53:31 +0100143 . = ALIGN(PAGE_SIZE);
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000144 __COHERENT_RAM_END__ = .;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100145 } >RAM
Soby Mathew2ae20432015-01-08 18:02:44 +0000146#endif
Achin Gupta4f6ad662013-10-25 09:08:21 +0100147
Achin Guptae9c4a642015-09-11 16:03:13 +0100148 /*
149 * Define a linker symbol to mark end of the RW memory area for this
150 * image.
151 */
152 __RW_END__ = .;
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000153 __BL2_END__ = .;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100154
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000155 __BSS_SIZE__ = SIZEOF(.bss);
Soby Mathew2ae20432015-01-08 18:02:44 +0000156
157#if USE_COHERENT_MEM
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000158 __COHERENT_RAM_UNALIGNED_SIZE__ =
159 __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
Soby Mathew2ae20432015-01-08 18:02:44 +0000160#endif
Sandrine Bailleux6c8b3592014-05-22 15:28:26 +0100161
162 ASSERT(. <= BL2_LIMIT, "BL2 image has exceeded its limit.")
Achin Gupta4f6ad662013-10-25 09:08:21 +0100163}