blob: c96e4b956a2fa3af1d1b46fe2dda53db0016098b [file] [log] [blame]
Jens Wiklander52c798e2015-12-07 14:37:10 +01001/*
Jean-Philippe Bruckerb54f6c92023-09-07 17:46:12 +01002 * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
Jens Wiklander52c798e2015-12-07 14:37:10 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Jens Wiklander52c798e2015-12-07 14:37:10 +01005 */
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00006
Fu Weic2f78442017-05-27 21:21:42 +08007#include <assert.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00008#include <string.h>
9
Jens Wiklander52c798e2015-12-07 14:37:10 +010010#include <libfdt.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011
Jens Wiklander52c798e2015-12-07 14:37:10 +010012#include <platform_def.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013
Jean-Philippe Brucker721b83d2023-09-07 18:13:07 +010014#include <arch_features.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000015#include <arch_helpers.h>
16#include <common/bl_common.h>
17#include <common/debug.h>
18#include <common/desc_image_load.h>
Andre Przywaraffbacb02019-07-10 17:27:17 +010019#include <common/fdt_fixup.h>
Jens Wiklandera43c1282022-11-22 14:39:26 +010020#include <common/fdt_wrappers.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000021#include <lib/optee_utils.h>
Raymond Mao032ba022023-06-28 15:07:15 -070022#include <lib/transfer_list.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000023#include <lib/utils.h>
24#include <plat/common/platform.h>
Jean-Philippe Brucker4453ba92023-09-07 18:47:48 +010025#if ENABLE_RME
26#include <qemu_pas_def.h>
27#endif
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000028
Isla Mitchelle3631462017-07-14 10:46:32 +010029#include "qemu_private.h"
Jens Wiklander52c798e2015-12-07 14:37:10 +010030
Chen Baozif7d9aa82023-02-20 10:50:15 +000031#define MAP_BL2_TOTAL MAP_REGION_FLAT( \
32 bl2_tzram_layout.total_base, \
33 bl2_tzram_layout.total_size, \
Jean-Philippe Bruckerb54f6c92023-09-07 17:46:12 +010034 MT_MEMORY | MT_RW | EL3_PAS)
Chen Baozif7d9aa82023-02-20 10:50:15 +000035
36#define MAP_BL2_RO MAP_REGION_FLAT( \
37 BL_CODE_BASE, \
38 BL_CODE_END - BL_CODE_BASE, \
Jean-Philippe Bruckerb54f6c92023-09-07 17:46:12 +010039 MT_CODE | EL3_PAS), \
Chen Baozif7d9aa82023-02-20 10:50:15 +000040 MAP_REGION_FLAT( \
41 BL_RO_DATA_BASE, \
42 BL_RO_DATA_END \
43 - BL_RO_DATA_BASE, \
Jean-Philippe Bruckerb54f6c92023-09-07 17:46:12 +010044 MT_RO_DATA | EL3_PAS)
Chen Baozif7d9aa82023-02-20 10:50:15 +000045
Chen Baozi097a43a2023-03-12 20:58:04 +080046#if USE_COHERENT_MEM
Chen Baozif7d9aa82023-02-20 10:50:15 +000047#define MAP_BL_COHERENT_RAM MAP_REGION_FLAT( \
48 BL_COHERENT_RAM_BASE, \
49 BL_COHERENT_RAM_END \
50 - BL_COHERENT_RAM_BASE, \
Jean-Philippe Bruckerb54f6c92023-09-07 17:46:12 +010051 MT_DEVICE | MT_RW | EL3_PAS)
Chen Baozi097a43a2023-03-12 20:58:04 +080052#endif
Jens Wiklander52c798e2015-12-07 14:37:10 +010053
Fu Weic2f78442017-05-27 21:21:42 +080054/* Data structure which holds the extents of the trusted SRAM for BL2 */
55static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
Raymond Mao032ba022023-06-28 15:07:15 -070056static struct transfer_list_header *bl2_tl;
Fu Weic2f78442017-05-27 21:21:42 +080057
Jens Wiklandere22b91e2018-09-04 14:07:19 +020058void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1,
59 u_register_t arg2, u_register_t arg3)
Jens Wiklander52c798e2015-12-07 14:37:10 +010060{
Jens Wiklandere22b91e2018-09-04 14:07:19 +020061 meminfo_t *mem_layout = (void *)arg1;
62
Jens Wiklander52c798e2015-12-07 14:37:10 +010063 /* Initialize the console to provide early debug support */
Michalis Pappascca6cb72018-03-04 15:43:38 +080064 qemu_console_init();
Jens Wiklander52c798e2015-12-07 14:37:10 +010065
66 /* Setup the BL2 memory layout */
67 bl2_tzram_layout = *mem_layout;
68
69 plat_qemu_io_setup();
70}
71
72static void security_setup(void)
73{
74 /*
75 * This is where a TrustZone address space controller and other
76 * security related peripherals, would be configured.
77 */
78}
79
80static void update_dt(void)
81{
Raymond Mao032ba022023-06-28 15:07:15 -070082#if TRANSFER_LIST
83 struct transfer_list_entry *te;
84#endif
Jens Wiklander52c798e2015-12-07 14:37:10 +010085 int ret;
Andrew Walbran9c4d0692020-01-15 14:11:31 +000086 void *fdt = (void *)(uintptr_t)ARM_PRELOADED_DTB_BASE;
Jens Wiklander52c798e2015-12-07 14:37:10 +010087
88 ret = fdt_open_into(fdt, fdt, PLAT_QEMU_DT_MAX_SIZE);
89 if (ret < 0) {
90 ERROR("Invalid Device Tree at %p: error %d\n", fdt, ret);
91 return;
92 }
93
94 if (dt_add_psci_node(fdt)) {
95 ERROR("Failed to add PSCI Device Tree node\n");
96 return;
97 }
98
99 if (dt_add_psci_cpu_enable_methods(fdt)) {
100 ERROR("Failed to add PSCI cpu enable methods in Device Tree\n");
101 return;
102 }
103
Jean-Philippe Brucker721b83d2023-09-07 18:13:07 +0100104#if ENABLE_RME
105 if (fdt_add_reserved_memory(fdt, "rmm", REALM_DRAM_BASE,
106 REALM_DRAM_SIZE)) {
107 ERROR("Failed to reserve RMM memory in Device Tree\n");
108 return;
109 }
110
111 INFO("Reserved RMM memory [0x%lx, 0x%lx] in Device tree\n",
112 (uintptr_t)REALM_DRAM_BASE,
113 (uintptr_t)REALM_DRAM_BASE + REALM_DRAM_SIZE - 1);
114#endif
115
Jens Wiklander52c798e2015-12-07 14:37:10 +0100116 ret = fdt_pack(fdt);
117 if (ret < 0)
118 ERROR("Failed to pack Device Tree at %p: error %d\n", fdt, ret);
Raymond Mao032ba022023-06-28 15:07:15 -0700119
120#if TRANSFER_LIST
Raymond Maobb653862023-10-04 09:58:29 -0700121 /* create a TE */
Raymond Mao032ba022023-06-28 15:07:15 -0700122 te = transfer_list_add(bl2_tl, TL_TAG_FDT, fdt_totalsize(fdt), fdt);
123 if (!te) {
124 ERROR("Failed to add FDT entry to Transfer List\n");
125 return;
126 }
127#endif
Jens Wiklander52c798e2015-12-07 14:37:10 +0100128}
129
130void bl2_platform_setup(void)
131{
Raymond Mao032ba022023-06-28 15:07:15 -0700132#if TRANSFER_LIST
133 bl2_tl = transfer_list_init((void *)(uintptr_t)FW_HANDOFF_BASE,
134 FW_HANDOFF_SIZE);
135 if (!bl2_tl) {
136 ERROR("Failed to initialize Transfer List at 0x%lx\n",
137 (unsigned long)FW_HANDOFF_BASE);
138 }
139#endif
Jens Wiklander52c798e2015-12-07 14:37:10 +0100140 security_setup();
141 update_dt();
142
143 /* TODO Initialize timer */
144}
145
Raymond Mao032ba022023-06-28 15:07:15 -0700146void qemu_bl2_sync_transfer_list(void)
147{
148#if TRANSFER_LIST
149 transfer_list_update_checksum(bl2_tl);
150#endif
151}
152
Jean-Philippe Brucker4453ba92023-09-07 18:47:48 +0100153#if ENABLE_RME
154static void bl2_plat_gpt_setup(void)
155{
156 /*
157 * The GPT library might modify the gpt regions structure to optimize
158 * the layout, so the array cannot be constant.
159 */
160 pas_region_t pas_regions[] = {
161 QEMU_PAS_ROOT,
162 QEMU_PAS_SECURE,
163 QEMU_PAS_GPTS,
164 QEMU_PAS_NS0,
165 QEMU_PAS_REALM,
166 QEMU_PAS_NS1,
167 };
168
169 /*
170 * Initialize entire protected space to GPT_GPI_ANY. With each L0 entry
171 * covering 1GB (currently the only supported option), then covering
172 * 256TB of RAM (48-bit PA) would require a 2MB L0 region. At the
173 * moment we use a 8KB table, which covers 1TB of RAM (40-bit PA).
174 */
175 if (gpt_init_l0_tables(GPCCR_PPS_1TB, PLAT_QEMU_L0_GPT_BASE,
Jean-Philippe Brucker2a8b2412024-04-18 08:47:42 +0100176 PLAT_QEMU_L0_GPT_SIZE +
177 PLAT_QEMU_GPT_BITLOCK_SIZE) < 0) {
Jean-Philippe Brucker4453ba92023-09-07 18:47:48 +0100178 ERROR("gpt_init_l0_tables() failed!\n");
179 panic();
180 }
181
182 /* Carve out defined PAS ranges. */
183 if (gpt_init_pas_l1_tables(GPCCR_PGS_4K,
184 PLAT_QEMU_L1_GPT_BASE,
185 PLAT_QEMU_L1_GPT_SIZE,
186 pas_regions,
187 (unsigned int)(sizeof(pas_regions) /
188 sizeof(pas_region_t))) < 0) {
189 ERROR("gpt_init_pas_l1_tables() failed!\n");
190 panic();
191 }
192
193 INFO("Enabling Granule Protection Checks\n");
194 if (gpt_enable() < 0) {
195 ERROR("gpt_enable() failed!\n");
196 panic();
197 }
198}
199#endif
200
Chen Baozif7d9aa82023-02-20 10:50:15 +0000201void bl2_plat_arch_setup(void)
202{
203 const mmap_region_t bl_regions[] = {
204 MAP_BL2_TOTAL,
205 MAP_BL2_RO,
Chen Baozi097a43a2023-03-12 20:58:04 +0800206#if USE_COHERENT_MEM
Chen Baozif7d9aa82023-02-20 10:50:15 +0000207 MAP_BL_COHERENT_RAM,
Chen Baozi097a43a2023-03-12 20:58:04 +0800208#endif
Jean-Philippe Brucker721b83d2023-09-07 18:13:07 +0100209#if ENABLE_RME
210 MAP_RMM_DRAM,
211 MAP_GPT_L0_REGION,
212 MAP_GPT_L1_REGION,
213#endif
Chen Baozif7d9aa82023-02-20 10:50:15 +0000214 {0}
215 };
216
217 setup_page_tables(bl_regions, plat_qemu_get_mmap());
218
Jean-Philippe Brucker721b83d2023-09-07 18:13:07 +0100219#if ENABLE_RME
220 /* BL2 runs in EL3 when RME enabled. */
Sona Mathew9e505f92024-03-13 11:33:54 -0500221 assert(is_feat_rme_present());
Jean-Philippe Brucker721b83d2023-09-07 18:13:07 +0100222 enable_mmu_el3(0);
Jean-Philippe Brucker4453ba92023-09-07 18:47:48 +0100223
224 /* Initialise and enable granule protection after MMU. */
225 bl2_plat_gpt_setup();
Jean-Philippe Brucker721b83d2023-09-07 18:13:07 +0100226#else /* ENABLE_RME */
227
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700228#ifdef __aarch64__
Chen Baozif7d9aa82023-02-20 10:50:15 +0000229 enable_mmu_el1(0);
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700230#else
Chen Baozif7d9aa82023-02-20 10:50:15 +0000231 enable_mmu_svc_mon(0);
Etienne Carriere911de8c2018-02-02 13:23:22 +0100232#endif
Jean-Philippe Brucker721b83d2023-09-07 18:13:07 +0100233#endif /* ENABLE_RME */
Jens Wiklander52c798e2015-12-07 14:37:10 +0100234}
235
236/*******************************************************************************
237 * Gets SPSR for BL32 entry
238 ******************************************************************************/
239static uint32_t qemu_get_spsr_for_bl32_entry(void)
240{
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700241#ifdef __aarch64__
Jens Wiklander52c798e2015-12-07 14:37:10 +0100242 /*
243 * The Secure Payload Dispatcher service is responsible for
244 * setting the SPSR prior to entry into the BL3-2 image.
245 */
246 return 0;
Etienne Carriere911de8c2018-02-02 13:23:22 +0100247#else
248 return SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE,
249 DISABLE_ALL_EXCEPTIONS);
250#endif
Jens Wiklander52c798e2015-12-07 14:37:10 +0100251}
252
253/*******************************************************************************
254 * Gets SPSR for BL33 entry
255 ******************************************************************************/
256static uint32_t qemu_get_spsr_for_bl33_entry(void)
257{
Jens Wiklander52c798e2015-12-07 14:37:10 +0100258 uint32_t spsr;
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700259#ifdef __aarch64__
Etienne Carriere911de8c2018-02-02 13:23:22 +0100260 unsigned int mode;
Jens Wiklander52c798e2015-12-07 14:37:10 +0100261
262 /* Figure out what mode we enter the non-secure world in */
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +0000263 mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
Jens Wiklander52c798e2015-12-07 14:37:10 +0100264
265 /*
266 * TODO: Consider the possibility of specifying the SPSR in
267 * the FIP ToC and allowing the platform to have a say as
268 * well.
269 */
270 spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
Etienne Carriere911de8c2018-02-02 13:23:22 +0100271#else
272 spsr = SPSR_MODE32(MODE32_svc,
273 plat_get_ns_image_entrypoint() & 0x1,
274 SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
275#endif
Jens Wiklander52c798e2015-12-07 14:37:10 +0100276 return spsr;
277}
278
Jens Wiklandera43c1282022-11-22 14:39:26 +0100279#if defined(SPD_spmd) && SPMD_SPM_AT_SEL2
280static int load_sps_from_tb_fw_config(struct image_info *image_info)
281{
282 void *dtb = (void *)image_info->image_base;
283 const char *compat_str = "arm,sp";
284 const struct fdt_property *uuid;
285 uint32_t load_addr;
286 const char *name;
287 int sp_node;
288 int node;
289
290 node = fdt_node_offset_by_compatible(dtb, -1, compat_str);
291 if (node < 0) {
292 ERROR("Can't find %s in TB_FW_CONFIG", compat_str);
293 return -1;
294 }
295
296 fdt_for_each_subnode(sp_node, dtb, node) {
297 name = fdt_get_name(dtb, sp_node, NULL);
298 if (name == NULL) {
299 ERROR("Can't get name of node in dtb\n");
300 return -1;
301 }
302 uuid = fdt_get_property(dtb, sp_node, "uuid", NULL);
303 if (uuid == NULL) {
304 ERROR("Can't find property uuid in node %s", name);
305 return -1;
306 }
307 if (fdt_read_uint32(dtb, sp_node, "load-address",
308 &load_addr) < 0) {
309 ERROR("Can't read load-address in node %s", name);
310 return -1;
311 }
312 if (qemu_io_register_sp_pkg(name, uuid->data, load_addr) < 0) {
313 return -1;
314 }
315 }
316
317 return 0;
318}
319#endif /*defined(SPD_spmd) && SPMD_SPM_AT_SEL2*/
320
Raymond Maobb653862023-10-04 09:58:29 -0700321#if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
322static int handoff_pageable_part(uint64_t pagable_part)
323{
324#if TRANSFER_LIST
325 struct transfer_list_entry *te;
326
327 te = transfer_list_add(bl2_tl, TL_TAG_OPTEE_PAGABLE_PART,
328 sizeof(pagable_part), &pagable_part);
329 if (!te) {
330 INFO("Cannot add TE for pageable part\n");
331 return -1;
332 }
333#endif
334 return 0;
335}
336#endif
337
Fu Weic2f78442017-05-27 21:21:42 +0800338static int qemu_bl2_handle_post_image_load(unsigned int image_id)
339{
340 int err = 0;
341 bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200342#if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
Jens Wiklander0acbaaa2017-08-24 13:16:26 +0200343 bl_mem_params_node_t *pager_mem_params = NULL;
344 bl_mem_params_node_t *paged_mem_params = NULL;
345#endif
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200346#if defined(SPD_spmd)
Jens Wiklanderd4b84f02022-11-18 15:40:04 +0100347 bl_mem_params_node_t *bl32_mem_params = NULL;
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200348#endif
Raymond Mao032ba022023-06-28 15:07:15 -0700349#if TRANSFER_LIST
350 struct transfer_list_header *ns_tl = NULL;
Raymond Mao032ba022023-06-28 15:07:15 -0700351#endif
Fu Weic2f78442017-05-27 21:21:42 +0800352
353 assert(bl_mem_params);
354
355 switch (image_id) {
Raymond Maobb653862023-10-04 09:58:29 -0700356#if TRANSFER_LIST
357 case BL31_IMAGE_ID:
358 /*
359 * arg0 is a bl_params_t reserved for bl31_early_platform_setup2
levi.yun010d2ae2024-05-13 10:27:17 +0100360 * we just need arg1 and arg3 for BL31 to update the TL from S
Raymond Maobb653862023-10-04 09:58:29 -0700361 * to NS memory before it exits
362 */
levi.yun010d2ae2024-05-13 10:27:17 +0100363#ifdef __aarch64__
364 if (GET_RW(bl_mem_params->ep_info.spsr) == MODE_RW_64) {
365 bl_mem_params->ep_info.args.arg1 =
366 TRANSFER_LIST_HANDOFF_X1_VALUE(REGISTER_CONVENTION_VERSION);
367 } else
368#endif
369 {
370 bl_mem_params->ep_info.args.arg1 =
371 TRANSFER_LIST_HANDOFF_R1_VALUE(REGISTER_CONVENTION_VERSION);
372 }
373
Raymond Maobb653862023-10-04 09:58:29 -0700374 bl_mem_params->ep_info.args.arg3 = (uintptr_t)bl2_tl;
375 break;
376#endif
Fu Weic2f78442017-05-27 21:21:42 +0800377 case BL32_IMAGE_ID:
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200378#if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
Jens Wiklander0acbaaa2017-08-24 13:16:26 +0200379 pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
380 assert(pager_mem_params);
381
382 paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
383 assert(paged_mem_params);
384
385 err = parse_optee_header(&bl_mem_params->ep_info,
386 &pager_mem_params->image_info,
387 &paged_mem_params->image_info);
388 if (err != 0) {
389 WARN("OPTEE header parse error.\n");
390 }
Raymond Maobb653862023-10-04 09:58:29 -0700391
392 /* add TL_TAG_OPTEE_PAGABLE_PART entry to the TL */
393 if (handoff_pageable_part(bl_mem_params->ep_info.args.arg1)) {
394 return -1;
395 }
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200396#endif
Jens Wiklander0acbaaa2017-08-24 13:16:26 +0200397
Raymond Maobb653862023-10-04 09:58:29 -0700398 INFO("Handoff to BL32\n");
399 bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl32_entry();
400 if (TRANSFER_LIST &&
401 transfer_list_set_handoff_args(bl2_tl,
402 &bl_mem_params->ep_info))
403 break;
404
405 INFO("Using default arguments\n");
Jens Wiklanderd4b84f02022-11-18 15:40:04 +0100406#if defined(SPMC_OPTEE)
407 /*
408 * Explicit zeroes to unused registers since they may have
409 * been populated by parse_optee_header() above.
410 *
411 * OP-TEE expects system DTB in x2 and TOS_FW_CONFIG in x0,
412 * the latter is filled in below for TOS_FW_CONFIG_ID and
413 * applies to any other SPMC too.
414 */
415 bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200416#elif defined(SPD_opteed)
Jens Wiklander0acbaaa2017-08-24 13:16:26 +0200417 /*
418 * OP-TEE expect to receive DTB address in x2.
419 * This will be copied into x2 by dispatcher.
420 */
Andrew Walbran9c4d0692020-01-15 14:11:31 +0000421 bl_mem_params->ep_info.args.arg3 = ARM_PRELOADED_DTB_BASE;
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200422#elif defined(AARCH32_SP_OPTEE)
Etienne Carriere911de8c2018-02-02 13:23:22 +0100423 bl_mem_params->ep_info.args.arg0 =
424 bl_mem_params->ep_info.args.arg1;
425 bl_mem_params->ep_info.args.arg1 = 0;
Andrew Walbran9c4d0692020-01-15 14:11:31 +0000426 bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
Etienne Carriere911de8c2018-02-02 13:23:22 +0100427 bl_mem_params->ep_info.args.arg3 = 0;
428#endif
Fu Weic2f78442017-05-27 21:21:42 +0800429 break;
Etienne Carriere911de8c2018-02-02 13:23:22 +0100430
Fu Weic2f78442017-05-27 21:21:42 +0800431 case BL33_IMAGE_ID:
Etienne Carriere911de8c2018-02-02 13:23:22 +0100432#ifdef AARCH32_SP_OPTEE
433 /* AArch32 only core: OP-TEE expects NSec EP in register LR */
434 pager_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
435 assert(pager_mem_params);
436 pager_mem_params->ep_info.lr_svc = bl_mem_params->ep_info.pc;
437#endif
438
Raymond Mao032ba022023-06-28 15:07:15 -0700439 bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl33_entry();
440
Andrew Walbran9c4d0692020-01-15 14:11:31 +0000441#if ARM_LINUX_KERNEL_AS_BL33
442 /*
443 * According to the file ``Documentation/arm64/booting.txt`` of
444 * the Linux kernel tree, Linux expects the physical address of
445 * the device tree blob (DTB) in x0, while x1-x3 are reserved
446 * for future use and must be 0.
447 */
448 bl_mem_params->ep_info.args.arg0 =
449 (u_register_t)ARM_PRELOADED_DTB_BASE;
450 bl_mem_params->ep_info.args.arg1 = 0U;
451 bl_mem_params->ep_info.args.arg2 = 0U;
452 bl_mem_params->ep_info.args.arg3 = 0U;
Raymond Mao032ba022023-06-28 15:07:15 -0700453#elif TRANSFER_LIST
454 if (bl2_tl) {
Raymond Maobb653862023-10-04 09:58:29 -0700455 /* relocate the tl to pre-allocate NS memory */
Raymond Mao032ba022023-06-28 15:07:15 -0700456 ns_tl = transfer_list_relocate(bl2_tl,
457 (void *)(uintptr_t)FW_NS_HANDOFF_BASE,
458 bl2_tl->max_size);
459 if (!ns_tl) {
460 ERROR("Relocate TL to 0x%lx failed\n",
461 (unsigned long)FW_NS_HANDOFF_BASE);
462 return -1;
463 }
Raymond Maobb653862023-10-04 09:58:29 -0700464 }
Raymond Mao032ba022023-06-28 15:07:15 -0700465
Raymond Maobb653862023-10-04 09:58:29 -0700466 INFO("Handoff to BL33\n");
467 if (!transfer_list_set_handoff_args(ns_tl,
468 &bl_mem_params->ep_info)) {
469 INFO("Invalid TL, fallback to default arguments\n");
Raymond Mao032ba022023-06-28 15:07:15 -0700470 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
471 }
Andrew Walbran9c4d0692020-01-15 14:11:31 +0000472#else
Fu Weic2f78442017-05-27 21:21:42 +0800473 /* BL33 expects to receive the primary CPU MPID (through r0) */
474 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
Raymond Maobb653862023-10-04 09:58:29 -0700475#endif /* ARM_LINUX_KERNEL_AS_BL33 */
Andrew Walbran9c4d0692020-01-15 14:11:31 +0000476
Fu Weic2f78442017-05-27 21:21:42 +0800477 break;
Jens Wiklandera43c1282022-11-22 14:39:26 +0100478#ifdef SPD_spmd
479#if SPMD_SPM_AT_SEL2
480 case TB_FW_CONFIG_ID:
481 err = load_sps_from_tb_fw_config(&bl_mem_params->image_info);
482 break;
483#endif
Jens Wiklanderd4b84f02022-11-18 15:40:04 +0100484 case TOS_FW_CONFIG_ID:
485 /* An SPMC expects TOS_FW_CONFIG in x0/r0 */
486 bl32_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
487 bl32_mem_params->ep_info.args.arg0 =
488 bl_mem_params->image_info.image_base;
489 break;
490#endif
Jonathan Wrightff957ed2018-03-14 15:24:00 +0000491 default:
492 /* Do nothing in default case */
493 break;
Fu Weic2f78442017-05-27 21:21:42 +0800494 }
495
496 return err;
497}
498
499/*******************************************************************************
500 * This function can be used by the platforms to update/use image
501 * information for given `image_id`.
502 ******************************************************************************/
503int bl2_plat_handle_post_image_load(unsigned int image_id)
504{
505 return qemu_bl2_handle_post_image_load(image_id);
506}
Jens Wiklander52c798e2015-12-07 14:37:10 +0100507
Etienne Carriere911de8c2018-02-02 13:23:22 +0100508uintptr_t plat_get_ns_image_entrypoint(void)
Jens Wiklander52c798e2015-12-07 14:37:10 +0100509{
510 return NS_IMAGE_OFFSET;
511}