blob: 8c7518d1d227d13accdb0437c1be6e689277cbe9 [file] [log] [blame]
Jens Wiklander52c798e2015-12-07 14:37:10 +01001/*
Jean-Philippe Bruckerb54f6c92023-09-07 17:46:12 +01002 * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
Jens Wiklander52c798e2015-12-07 14:37:10 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Jens Wiklander52c798e2015-12-07 14:37:10 +01005 */
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00006
Fu Weic2f78442017-05-27 21:21:42 +08007#include <assert.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00008#include <string.h>
9
Jens Wiklander52c798e2015-12-07 14:37:10 +010010#include <libfdt.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011
Jens Wiklander52c798e2015-12-07 14:37:10 +010012#include <platform_def.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013
Jean-Philippe Brucker721b83d2023-09-07 18:13:07 +010014#include <arch_features.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000015#include <arch_helpers.h>
16#include <common/bl_common.h>
17#include <common/debug.h>
18#include <common/desc_image_load.h>
Andre Przywaraffbacb02019-07-10 17:27:17 +010019#include <common/fdt_fixup.h>
Jens Wiklandera43c1282022-11-22 14:39:26 +010020#include <common/fdt_wrappers.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000021#include <lib/optee_utils.h>
Raymond Mao032ba022023-06-28 15:07:15 -070022#if TRANSFER_LIST
23#include <lib/transfer_list.h>
24#endif
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000025#include <lib/utils.h>
26#include <plat/common/platform.h>
Jean-Philippe Brucker4453ba92023-09-07 18:47:48 +010027#if ENABLE_RME
28#include <qemu_pas_def.h>
29#endif
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000030
Isla Mitchelle3631462017-07-14 10:46:32 +010031#include "qemu_private.h"
Jens Wiklander52c798e2015-12-07 14:37:10 +010032
Chen Baozif7d9aa82023-02-20 10:50:15 +000033#define MAP_BL2_TOTAL MAP_REGION_FLAT( \
34 bl2_tzram_layout.total_base, \
35 bl2_tzram_layout.total_size, \
Jean-Philippe Bruckerb54f6c92023-09-07 17:46:12 +010036 MT_MEMORY | MT_RW | EL3_PAS)
Chen Baozif7d9aa82023-02-20 10:50:15 +000037
38#define MAP_BL2_RO MAP_REGION_FLAT( \
39 BL_CODE_BASE, \
40 BL_CODE_END - BL_CODE_BASE, \
Jean-Philippe Bruckerb54f6c92023-09-07 17:46:12 +010041 MT_CODE | EL3_PAS), \
Chen Baozif7d9aa82023-02-20 10:50:15 +000042 MAP_REGION_FLAT( \
43 BL_RO_DATA_BASE, \
44 BL_RO_DATA_END \
45 - BL_RO_DATA_BASE, \
Jean-Philippe Bruckerb54f6c92023-09-07 17:46:12 +010046 MT_RO_DATA | EL3_PAS)
Chen Baozif7d9aa82023-02-20 10:50:15 +000047
Chen Baozi097a43a2023-03-12 20:58:04 +080048#if USE_COHERENT_MEM
Chen Baozif7d9aa82023-02-20 10:50:15 +000049#define MAP_BL_COHERENT_RAM MAP_REGION_FLAT( \
50 BL_COHERENT_RAM_BASE, \
51 BL_COHERENT_RAM_END \
52 - BL_COHERENT_RAM_BASE, \
Jean-Philippe Bruckerb54f6c92023-09-07 17:46:12 +010053 MT_DEVICE | MT_RW | EL3_PAS)
Chen Baozi097a43a2023-03-12 20:58:04 +080054#endif
Jens Wiklander52c798e2015-12-07 14:37:10 +010055
Fu Weic2f78442017-05-27 21:21:42 +080056/* Data structure which holds the extents of the trusted SRAM for BL2 */
57static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
Raymond Mao032ba022023-06-28 15:07:15 -070058#if TRANSFER_LIST
59static struct transfer_list_header *bl2_tl;
60#endif
Fu Weic2f78442017-05-27 21:21:42 +080061
Jens Wiklandere22b91e2018-09-04 14:07:19 +020062void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1,
63 u_register_t arg2, u_register_t arg3)
Jens Wiklander52c798e2015-12-07 14:37:10 +010064{
Jens Wiklandere22b91e2018-09-04 14:07:19 +020065 meminfo_t *mem_layout = (void *)arg1;
66
Jens Wiklander52c798e2015-12-07 14:37:10 +010067 /* Initialize the console to provide early debug support */
Michalis Pappascca6cb72018-03-04 15:43:38 +080068 qemu_console_init();
Jens Wiklander52c798e2015-12-07 14:37:10 +010069
70 /* Setup the BL2 memory layout */
71 bl2_tzram_layout = *mem_layout;
72
73 plat_qemu_io_setup();
74}
75
76static void security_setup(void)
77{
78 /*
79 * This is where a TrustZone address space controller and other
80 * security related peripherals, would be configured.
81 */
82}
83
84static void update_dt(void)
85{
Raymond Mao032ba022023-06-28 15:07:15 -070086#if TRANSFER_LIST
87 struct transfer_list_entry *te;
88#endif
Jens Wiklander52c798e2015-12-07 14:37:10 +010089 int ret;
Andrew Walbran9c4d0692020-01-15 14:11:31 +000090 void *fdt = (void *)(uintptr_t)ARM_PRELOADED_DTB_BASE;
Jens Wiklander52c798e2015-12-07 14:37:10 +010091
92 ret = fdt_open_into(fdt, fdt, PLAT_QEMU_DT_MAX_SIZE);
93 if (ret < 0) {
94 ERROR("Invalid Device Tree at %p: error %d\n", fdt, ret);
95 return;
96 }
97
98 if (dt_add_psci_node(fdt)) {
99 ERROR("Failed to add PSCI Device Tree node\n");
100 return;
101 }
102
103 if (dt_add_psci_cpu_enable_methods(fdt)) {
104 ERROR("Failed to add PSCI cpu enable methods in Device Tree\n");
105 return;
106 }
107
Jean-Philippe Brucker721b83d2023-09-07 18:13:07 +0100108#if ENABLE_RME
109 if (fdt_add_reserved_memory(fdt, "rmm", REALM_DRAM_BASE,
110 REALM_DRAM_SIZE)) {
111 ERROR("Failed to reserve RMM memory in Device Tree\n");
112 return;
113 }
114
115 INFO("Reserved RMM memory [0x%lx, 0x%lx] in Device tree\n",
116 (uintptr_t)REALM_DRAM_BASE,
117 (uintptr_t)REALM_DRAM_BASE + REALM_DRAM_SIZE - 1);
118#endif
119
Jens Wiklander52c798e2015-12-07 14:37:10 +0100120 ret = fdt_pack(fdt);
121 if (ret < 0)
122 ERROR("Failed to pack Device Tree at %p: error %d\n", fdt, ret);
Raymond Mao032ba022023-06-28 15:07:15 -0700123
124#if TRANSFER_LIST
125 // create a TE
126 te = transfer_list_add(bl2_tl, TL_TAG_FDT, fdt_totalsize(fdt), fdt);
127 if (!te) {
128 ERROR("Failed to add FDT entry to Transfer List\n");
129 return;
130 }
131#endif
Jens Wiklander52c798e2015-12-07 14:37:10 +0100132}
133
134void bl2_platform_setup(void)
135{
Raymond Mao032ba022023-06-28 15:07:15 -0700136#if TRANSFER_LIST
137 bl2_tl = transfer_list_init((void *)(uintptr_t)FW_HANDOFF_BASE,
138 FW_HANDOFF_SIZE);
139 if (!bl2_tl) {
140 ERROR("Failed to initialize Transfer List at 0x%lx\n",
141 (unsigned long)FW_HANDOFF_BASE);
142 }
143#endif
Jens Wiklander52c798e2015-12-07 14:37:10 +0100144 security_setup();
145 update_dt();
146
147 /* TODO Initialize timer */
148}
149
Raymond Mao032ba022023-06-28 15:07:15 -0700150void qemu_bl2_sync_transfer_list(void)
151{
152#if TRANSFER_LIST
153 transfer_list_update_checksum(bl2_tl);
154#endif
155}
156
Jean-Philippe Brucker4453ba92023-09-07 18:47:48 +0100157#if ENABLE_RME
158static void bl2_plat_gpt_setup(void)
159{
160 /*
161 * The GPT library might modify the gpt regions structure to optimize
162 * the layout, so the array cannot be constant.
163 */
164 pas_region_t pas_regions[] = {
165 QEMU_PAS_ROOT,
166 QEMU_PAS_SECURE,
167 QEMU_PAS_GPTS,
168 QEMU_PAS_NS0,
169 QEMU_PAS_REALM,
170 QEMU_PAS_NS1,
171 };
172
173 /*
174 * Initialize entire protected space to GPT_GPI_ANY. With each L0 entry
175 * covering 1GB (currently the only supported option), then covering
176 * 256TB of RAM (48-bit PA) would require a 2MB L0 region. At the
177 * moment we use a 8KB table, which covers 1TB of RAM (40-bit PA).
178 */
179 if (gpt_init_l0_tables(GPCCR_PPS_1TB, PLAT_QEMU_L0_GPT_BASE,
180 PLAT_QEMU_L0_GPT_SIZE) < 0) {
181 ERROR("gpt_init_l0_tables() failed!\n");
182 panic();
183 }
184
185 /* Carve out defined PAS ranges. */
186 if (gpt_init_pas_l1_tables(GPCCR_PGS_4K,
187 PLAT_QEMU_L1_GPT_BASE,
188 PLAT_QEMU_L1_GPT_SIZE,
189 pas_regions,
190 (unsigned int)(sizeof(pas_regions) /
191 sizeof(pas_region_t))) < 0) {
192 ERROR("gpt_init_pas_l1_tables() failed!\n");
193 panic();
194 }
195
196 INFO("Enabling Granule Protection Checks\n");
197 if (gpt_enable() < 0) {
198 ERROR("gpt_enable() failed!\n");
199 panic();
200 }
201}
202#endif
203
Chen Baozif7d9aa82023-02-20 10:50:15 +0000204void bl2_plat_arch_setup(void)
205{
206 const mmap_region_t bl_regions[] = {
207 MAP_BL2_TOTAL,
208 MAP_BL2_RO,
Chen Baozi097a43a2023-03-12 20:58:04 +0800209#if USE_COHERENT_MEM
Chen Baozif7d9aa82023-02-20 10:50:15 +0000210 MAP_BL_COHERENT_RAM,
Chen Baozi097a43a2023-03-12 20:58:04 +0800211#endif
Jean-Philippe Brucker721b83d2023-09-07 18:13:07 +0100212#if ENABLE_RME
213 MAP_RMM_DRAM,
214 MAP_GPT_L0_REGION,
215 MAP_GPT_L1_REGION,
216#endif
Chen Baozif7d9aa82023-02-20 10:50:15 +0000217 {0}
218 };
219
220 setup_page_tables(bl_regions, plat_qemu_get_mmap());
221
Jean-Philippe Brucker721b83d2023-09-07 18:13:07 +0100222#if ENABLE_RME
223 /* BL2 runs in EL3 when RME enabled. */
224 assert(get_armv9_2_feat_rme_support() != 0U);
225 enable_mmu_el3(0);
Jean-Philippe Brucker4453ba92023-09-07 18:47:48 +0100226
227 /* Initialise and enable granule protection after MMU. */
228 bl2_plat_gpt_setup();
Jean-Philippe Brucker721b83d2023-09-07 18:13:07 +0100229#else /* ENABLE_RME */
230
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700231#ifdef __aarch64__
Chen Baozif7d9aa82023-02-20 10:50:15 +0000232 enable_mmu_el1(0);
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700233#else
Chen Baozif7d9aa82023-02-20 10:50:15 +0000234 enable_mmu_svc_mon(0);
Etienne Carriere911de8c2018-02-02 13:23:22 +0100235#endif
Jean-Philippe Brucker721b83d2023-09-07 18:13:07 +0100236#endif /* ENABLE_RME */
Jens Wiklander52c798e2015-12-07 14:37:10 +0100237}
238
239/*******************************************************************************
240 * Gets SPSR for BL32 entry
241 ******************************************************************************/
242static uint32_t qemu_get_spsr_for_bl32_entry(void)
243{
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700244#ifdef __aarch64__
Jens Wiklander52c798e2015-12-07 14:37:10 +0100245 /*
246 * The Secure Payload Dispatcher service is responsible for
247 * setting the SPSR prior to entry into the BL3-2 image.
248 */
249 return 0;
Etienne Carriere911de8c2018-02-02 13:23:22 +0100250#else
251 return SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE,
252 DISABLE_ALL_EXCEPTIONS);
253#endif
Jens Wiklander52c798e2015-12-07 14:37:10 +0100254}
255
256/*******************************************************************************
257 * Gets SPSR for BL33 entry
258 ******************************************************************************/
259static uint32_t qemu_get_spsr_for_bl33_entry(void)
260{
Jens Wiklander52c798e2015-12-07 14:37:10 +0100261 uint32_t spsr;
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700262#ifdef __aarch64__
Etienne Carriere911de8c2018-02-02 13:23:22 +0100263 unsigned int mode;
Jens Wiklander52c798e2015-12-07 14:37:10 +0100264
265 /* Figure out what mode we enter the non-secure world in */
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +0000266 mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
Jens Wiklander52c798e2015-12-07 14:37:10 +0100267
268 /*
269 * TODO: Consider the possibility of specifying the SPSR in
270 * the FIP ToC and allowing the platform to have a say as
271 * well.
272 */
273 spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
Etienne Carriere911de8c2018-02-02 13:23:22 +0100274#else
275 spsr = SPSR_MODE32(MODE32_svc,
276 plat_get_ns_image_entrypoint() & 0x1,
277 SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
278#endif
Jens Wiklander52c798e2015-12-07 14:37:10 +0100279 return spsr;
280}
281
Jens Wiklandera43c1282022-11-22 14:39:26 +0100282#if defined(SPD_spmd) && SPMD_SPM_AT_SEL2
283static int load_sps_from_tb_fw_config(struct image_info *image_info)
284{
285 void *dtb = (void *)image_info->image_base;
286 const char *compat_str = "arm,sp";
287 const struct fdt_property *uuid;
288 uint32_t load_addr;
289 const char *name;
290 int sp_node;
291 int node;
292
293 node = fdt_node_offset_by_compatible(dtb, -1, compat_str);
294 if (node < 0) {
295 ERROR("Can't find %s in TB_FW_CONFIG", compat_str);
296 return -1;
297 }
298
299 fdt_for_each_subnode(sp_node, dtb, node) {
300 name = fdt_get_name(dtb, sp_node, NULL);
301 if (name == NULL) {
302 ERROR("Can't get name of node in dtb\n");
303 return -1;
304 }
305 uuid = fdt_get_property(dtb, sp_node, "uuid", NULL);
306 if (uuid == NULL) {
307 ERROR("Can't find property uuid in node %s", name);
308 return -1;
309 }
310 if (fdt_read_uint32(dtb, sp_node, "load-address",
311 &load_addr) < 0) {
312 ERROR("Can't read load-address in node %s", name);
313 return -1;
314 }
315 if (qemu_io_register_sp_pkg(name, uuid->data, load_addr) < 0) {
316 return -1;
317 }
318 }
319
320 return 0;
321}
322#endif /*defined(SPD_spmd) && SPMD_SPM_AT_SEL2*/
323
Fu Weic2f78442017-05-27 21:21:42 +0800324static int qemu_bl2_handle_post_image_load(unsigned int image_id)
325{
326 int err = 0;
327 bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200328#if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
Jens Wiklander0acbaaa2017-08-24 13:16:26 +0200329 bl_mem_params_node_t *pager_mem_params = NULL;
330 bl_mem_params_node_t *paged_mem_params = NULL;
331#endif
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200332#if defined(SPD_spmd)
Jens Wiklanderd4b84f02022-11-18 15:40:04 +0100333 bl_mem_params_node_t *bl32_mem_params = NULL;
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200334#endif
Raymond Mao032ba022023-06-28 15:07:15 -0700335#if TRANSFER_LIST
336 struct transfer_list_header *ns_tl = NULL;
337 struct transfer_list_entry *te = NULL;
338#endif
Fu Weic2f78442017-05-27 21:21:42 +0800339
340 assert(bl_mem_params);
341
342 switch (image_id) {
Fu Weic2f78442017-05-27 21:21:42 +0800343 case BL32_IMAGE_ID:
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200344#if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
Jens Wiklander0acbaaa2017-08-24 13:16:26 +0200345 pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
346 assert(pager_mem_params);
347
348 paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
349 assert(paged_mem_params);
350
351 err = parse_optee_header(&bl_mem_params->ep_info,
352 &pager_mem_params->image_info,
353 &paged_mem_params->image_info);
354 if (err != 0) {
355 WARN("OPTEE header parse error.\n");
356 }
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200357#endif
Jens Wiklander0acbaaa2017-08-24 13:16:26 +0200358
Jens Wiklanderd4b84f02022-11-18 15:40:04 +0100359#if defined(SPMC_OPTEE)
360 /*
361 * Explicit zeroes to unused registers since they may have
362 * been populated by parse_optee_header() above.
363 *
364 * OP-TEE expects system DTB in x2 and TOS_FW_CONFIG in x0,
365 * the latter is filled in below for TOS_FW_CONFIG_ID and
366 * applies to any other SPMC too.
367 */
368 bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200369#elif defined(SPD_opteed)
Jens Wiklander0acbaaa2017-08-24 13:16:26 +0200370 /*
371 * OP-TEE expect to receive DTB address in x2.
372 * This will be copied into x2 by dispatcher.
373 */
Andrew Walbran9c4d0692020-01-15 14:11:31 +0000374 bl_mem_params->ep_info.args.arg3 = ARM_PRELOADED_DTB_BASE;
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200375#elif defined(AARCH32_SP_OPTEE)
Etienne Carriere911de8c2018-02-02 13:23:22 +0100376 bl_mem_params->ep_info.args.arg0 =
377 bl_mem_params->ep_info.args.arg1;
378 bl_mem_params->ep_info.args.arg1 = 0;
Andrew Walbran9c4d0692020-01-15 14:11:31 +0000379 bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
Etienne Carriere911de8c2018-02-02 13:23:22 +0100380 bl_mem_params->ep_info.args.arg3 = 0;
381#endif
Fu Weic2f78442017-05-27 21:21:42 +0800382 bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl32_entry();
383 break;
Etienne Carriere911de8c2018-02-02 13:23:22 +0100384
Fu Weic2f78442017-05-27 21:21:42 +0800385 case BL33_IMAGE_ID:
Etienne Carriere911de8c2018-02-02 13:23:22 +0100386#ifdef AARCH32_SP_OPTEE
387 /* AArch32 only core: OP-TEE expects NSec EP in register LR */
388 pager_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
389 assert(pager_mem_params);
390 pager_mem_params->ep_info.lr_svc = bl_mem_params->ep_info.pc;
391#endif
392
Raymond Mao032ba022023-06-28 15:07:15 -0700393 bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl33_entry();
394
Andrew Walbran9c4d0692020-01-15 14:11:31 +0000395#if ARM_LINUX_KERNEL_AS_BL33
396 /*
397 * According to the file ``Documentation/arm64/booting.txt`` of
398 * the Linux kernel tree, Linux expects the physical address of
399 * the device tree blob (DTB) in x0, while x1-x3 are reserved
400 * for future use and must be 0.
401 */
402 bl_mem_params->ep_info.args.arg0 =
403 (u_register_t)ARM_PRELOADED_DTB_BASE;
404 bl_mem_params->ep_info.args.arg1 = 0U;
405 bl_mem_params->ep_info.args.arg2 = 0U;
406 bl_mem_params->ep_info.args.arg3 = 0U;
Raymond Mao032ba022023-06-28 15:07:15 -0700407#elif TRANSFER_LIST
408 if (bl2_tl) {
409 // relocate the tl to pre-allocate NS memory
410 ns_tl = transfer_list_relocate(bl2_tl,
411 (void *)(uintptr_t)FW_NS_HANDOFF_BASE,
412 bl2_tl->max_size);
413 if (!ns_tl) {
414 ERROR("Relocate TL to 0x%lx failed\n",
415 (unsigned long)FW_NS_HANDOFF_BASE);
416 return -1;
417 }
418 NOTICE("Transfer list handoff to BL33\n");
419 transfer_list_dump(ns_tl);
420
421 te = transfer_list_find(ns_tl, TL_TAG_FDT);
422
423 bl_mem_params->ep_info.args.arg1 =
424 TRANSFER_LIST_SIGNATURE |
425 REGISTER_CONVENTION_VERSION_MASK;
426 bl_mem_params->ep_info.args.arg3 = (uintptr_t)ns_tl;
427
428 if (GET_RW(bl_mem_params->ep_info.spsr) == MODE_RW_32) {
429 // aarch32
430 bl_mem_params->ep_info.args.arg0 = 0;
431 bl_mem_params->ep_info.args.arg2 = te ?
432 (uintptr_t)transfer_list_entry_data(te)
433 : 0;
434 } else {
435 // aarch64
436 bl_mem_params->ep_info.args.arg0 = te ?
437 (uintptr_t)transfer_list_entry_data(te)
438 : 0;
439 bl_mem_params->ep_info.args.arg2 = 0;
440 }
441 } else {
442 // Legacy handoff
443 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
444 }
Andrew Walbran9c4d0692020-01-15 14:11:31 +0000445#else
Fu Weic2f78442017-05-27 21:21:42 +0800446 /* BL33 expects to receive the primary CPU MPID (through r0) */
447 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
Raymond Mao032ba022023-06-28 15:07:15 -0700448#endif // ARM_LINUX_KERNEL_AS_BL33
Andrew Walbran9c4d0692020-01-15 14:11:31 +0000449
Fu Weic2f78442017-05-27 21:21:42 +0800450 break;
Jens Wiklandera43c1282022-11-22 14:39:26 +0100451#ifdef SPD_spmd
452#if SPMD_SPM_AT_SEL2
453 case TB_FW_CONFIG_ID:
454 err = load_sps_from_tb_fw_config(&bl_mem_params->image_info);
455 break;
456#endif
Jens Wiklanderd4b84f02022-11-18 15:40:04 +0100457 case TOS_FW_CONFIG_ID:
458 /* An SPMC expects TOS_FW_CONFIG in x0/r0 */
459 bl32_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
460 bl32_mem_params->ep_info.args.arg0 =
461 bl_mem_params->image_info.image_base;
462 break;
463#endif
Jonathan Wrightff957ed2018-03-14 15:24:00 +0000464 default:
465 /* Do nothing in default case */
466 break;
Fu Weic2f78442017-05-27 21:21:42 +0800467 }
468
469 return err;
470}
471
472/*******************************************************************************
473 * This function can be used by the platforms to update/use image
474 * information for given `image_id`.
475 ******************************************************************************/
476int bl2_plat_handle_post_image_load(unsigned int image_id)
477{
478 return qemu_bl2_handle_post_image_load(image_id);
479}
Jens Wiklander52c798e2015-12-07 14:37:10 +0100480
Etienne Carriere911de8c2018-02-02 13:23:22 +0100481uintptr_t plat_get_ns_image_entrypoint(void)
Jens Wiklander52c798e2015-12-07 14:37:10 +0100482{
483 return NS_IMAGE_OFFSET;
484}