blob: 71f9cf719a28ee0b33323f83d4c6910a793a0d9f [file] [log] [blame]
Jens Wiklander52c798e2015-12-07 14:37:10 +01001/*
Jean-Philippe Bruckerb54f6c92023-09-07 17:46:12 +01002 * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
Jens Wiklander52c798e2015-12-07 14:37:10 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Jens Wiklander52c798e2015-12-07 14:37:10 +01005 */
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00006
Fu Weic2f78442017-05-27 21:21:42 +08007#include <assert.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00008#include <string.h>
9
Jens Wiklander52c798e2015-12-07 14:37:10 +010010#include <libfdt.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011
Jens Wiklander52c798e2015-12-07 14:37:10 +010012#include <platform_def.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013
Jean-Philippe Brucker721b83d2023-09-07 18:13:07 +010014#include <arch_features.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000015#include <arch_helpers.h>
16#include <common/bl_common.h>
17#include <common/debug.h>
18#include <common/desc_image_load.h>
Andre Przywaraffbacb02019-07-10 17:27:17 +010019#include <common/fdt_fixup.h>
Jens Wiklandera43c1282022-11-22 14:39:26 +010020#include <common/fdt_wrappers.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000021#include <lib/optee_utils.h>
Raymond Mao032ba022023-06-28 15:07:15 -070022#include <lib/transfer_list.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000023#include <lib/utils.h>
24#include <plat/common/platform.h>
25
Isla Mitchelle3631462017-07-14 10:46:32 +010026#include "qemu_private.h"
Jens Wiklander52c798e2015-12-07 14:37:10 +010027
Chen Baozif7d9aa82023-02-20 10:50:15 +000028#define MAP_BL2_TOTAL MAP_REGION_FLAT( \
29 bl2_tzram_layout.total_base, \
30 bl2_tzram_layout.total_size, \
Jean-Philippe Bruckerb54f6c92023-09-07 17:46:12 +010031 MT_MEMORY | MT_RW | EL3_PAS)
Chen Baozif7d9aa82023-02-20 10:50:15 +000032
33#define MAP_BL2_RO MAP_REGION_FLAT( \
34 BL_CODE_BASE, \
35 BL_CODE_END - BL_CODE_BASE, \
Jean-Philippe Bruckerb54f6c92023-09-07 17:46:12 +010036 MT_CODE | EL3_PAS), \
Chen Baozif7d9aa82023-02-20 10:50:15 +000037 MAP_REGION_FLAT( \
38 BL_RO_DATA_BASE, \
39 BL_RO_DATA_END \
40 - BL_RO_DATA_BASE, \
Jean-Philippe Bruckerb54f6c92023-09-07 17:46:12 +010041 MT_RO_DATA | EL3_PAS)
Chen Baozif7d9aa82023-02-20 10:50:15 +000042
Chen Baozi097a43a2023-03-12 20:58:04 +080043#if USE_COHERENT_MEM
Chen Baozif7d9aa82023-02-20 10:50:15 +000044#define MAP_BL_COHERENT_RAM MAP_REGION_FLAT( \
45 BL_COHERENT_RAM_BASE, \
46 BL_COHERENT_RAM_END \
47 - BL_COHERENT_RAM_BASE, \
Jean-Philippe Bruckerb54f6c92023-09-07 17:46:12 +010048 MT_DEVICE | MT_RW | EL3_PAS)
Chen Baozi097a43a2023-03-12 20:58:04 +080049#endif
Jens Wiklander52c798e2015-12-07 14:37:10 +010050
Fu Weic2f78442017-05-27 21:21:42 +080051/* Data structure which holds the extents of the trusted SRAM for BL2 */
52static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
Raymond Mao032ba022023-06-28 15:07:15 -070053static struct transfer_list_header *bl2_tl;
Fu Weic2f78442017-05-27 21:21:42 +080054
Jens Wiklandere22b91e2018-09-04 14:07:19 +020055void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1,
56 u_register_t arg2, u_register_t arg3)
Jens Wiklander52c798e2015-12-07 14:37:10 +010057{
Jens Wiklandere22b91e2018-09-04 14:07:19 +020058 meminfo_t *mem_layout = (void *)arg1;
59
Jens Wiklander52c798e2015-12-07 14:37:10 +010060 /* Initialize the console to provide early debug support */
Michalis Pappascca6cb72018-03-04 15:43:38 +080061 qemu_console_init();
Jens Wiklander52c798e2015-12-07 14:37:10 +010062
63 /* Setup the BL2 memory layout */
64 bl2_tzram_layout = *mem_layout;
65
66 plat_qemu_io_setup();
67}
68
69static void security_setup(void)
70{
71 /*
72 * This is where a TrustZone address space controller and other
73 * security related peripherals, would be configured.
74 */
75}
76
77static void update_dt(void)
78{
Raymond Mao032ba022023-06-28 15:07:15 -070079#if TRANSFER_LIST
80 struct transfer_list_entry *te;
81#endif
Jens Wiklander52c798e2015-12-07 14:37:10 +010082 int ret;
Andrew Walbran9c4d0692020-01-15 14:11:31 +000083 void *fdt = (void *)(uintptr_t)ARM_PRELOADED_DTB_BASE;
Mathieu Poirierdfbaf642024-10-10 15:07:49 -060084 void *dst = plat_qemu_dt_runtime_address();
Jens Wiklander52c798e2015-12-07 14:37:10 +010085
Mathieu Poirierdfbaf642024-10-10 15:07:49 -060086 ret = fdt_open_into(fdt, dst, PLAT_QEMU_DT_MAX_SIZE);
Jens Wiklander52c798e2015-12-07 14:37:10 +010087 if (ret < 0) {
88 ERROR("Invalid Device Tree at %p: error %d\n", fdt, ret);
89 return;
90 }
91
92 if (dt_add_psci_node(fdt)) {
93 ERROR("Failed to add PSCI Device Tree node\n");
94 return;
95 }
96
97 if (dt_add_psci_cpu_enable_methods(fdt)) {
98 ERROR("Failed to add PSCI cpu enable methods in Device Tree\n");
99 return;
100 }
101
Jean-Philippe Brucker721b83d2023-09-07 18:13:07 +0100102#if ENABLE_RME
103 if (fdt_add_reserved_memory(fdt, "rmm", REALM_DRAM_BASE,
104 REALM_DRAM_SIZE)) {
105 ERROR("Failed to reserve RMM memory in Device Tree\n");
106 return;
107 }
108
109 INFO("Reserved RMM memory [0x%lx, 0x%lx] in Device tree\n",
110 (uintptr_t)REALM_DRAM_BASE,
111 (uintptr_t)REALM_DRAM_BASE + REALM_DRAM_SIZE - 1);
112#endif
113
Jens Wiklander52c798e2015-12-07 14:37:10 +0100114 ret = fdt_pack(fdt);
115 if (ret < 0)
116 ERROR("Failed to pack Device Tree at %p: error %d\n", fdt, ret);
Raymond Mao032ba022023-06-28 15:07:15 -0700117
118#if TRANSFER_LIST
Raymond Maobb653862023-10-04 09:58:29 -0700119 /* create a TE */
Raymond Mao032ba022023-06-28 15:07:15 -0700120 te = transfer_list_add(bl2_tl, TL_TAG_FDT, fdt_totalsize(fdt), fdt);
121 if (!te) {
122 ERROR("Failed to add FDT entry to Transfer List\n");
123 return;
124 }
125#endif
Jens Wiklander52c798e2015-12-07 14:37:10 +0100126}
127
128void bl2_platform_setup(void)
129{
Raymond Mao032ba022023-06-28 15:07:15 -0700130#if TRANSFER_LIST
131 bl2_tl = transfer_list_init((void *)(uintptr_t)FW_HANDOFF_BASE,
132 FW_HANDOFF_SIZE);
133 if (!bl2_tl) {
134 ERROR("Failed to initialize Transfer List at 0x%lx\n",
135 (unsigned long)FW_HANDOFF_BASE);
136 }
137#endif
Jens Wiklander52c798e2015-12-07 14:37:10 +0100138 security_setup();
139 update_dt();
140
141 /* TODO Initialize timer */
142}
143
Raymond Mao032ba022023-06-28 15:07:15 -0700144void qemu_bl2_sync_transfer_list(void)
145{
146#if TRANSFER_LIST
147 transfer_list_update_checksum(bl2_tl);
148#endif
149}
150
Chen Baozif7d9aa82023-02-20 10:50:15 +0000151void bl2_plat_arch_setup(void)
152{
153 const mmap_region_t bl_regions[] = {
154 MAP_BL2_TOTAL,
155 MAP_BL2_RO,
Chen Baozi097a43a2023-03-12 20:58:04 +0800156#if USE_COHERENT_MEM
Chen Baozif7d9aa82023-02-20 10:50:15 +0000157 MAP_BL_COHERENT_RAM,
Chen Baozi097a43a2023-03-12 20:58:04 +0800158#endif
Jean-Philippe Brucker721b83d2023-09-07 18:13:07 +0100159#if ENABLE_RME
160 MAP_RMM_DRAM,
161 MAP_GPT_L0_REGION,
162 MAP_GPT_L1_REGION,
163#endif
Chen Baozif7d9aa82023-02-20 10:50:15 +0000164 {0}
165 };
166
167 setup_page_tables(bl_regions, plat_qemu_get_mmap());
168
Jean-Philippe Brucker721b83d2023-09-07 18:13:07 +0100169#if ENABLE_RME
170 /* BL2 runs in EL3 when RME enabled. */
Sona Mathew9e505f92024-03-13 11:33:54 -0500171 assert(is_feat_rme_present());
Jean-Philippe Brucker721b83d2023-09-07 18:13:07 +0100172 enable_mmu_el3(0);
173#else /* ENABLE_RME */
174
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700175#ifdef __aarch64__
Chen Baozif7d9aa82023-02-20 10:50:15 +0000176 enable_mmu_el1(0);
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700177#else
Chen Baozif7d9aa82023-02-20 10:50:15 +0000178 enable_mmu_svc_mon(0);
Etienne Carriere911de8c2018-02-02 13:23:22 +0100179#endif
Jean-Philippe Brucker721b83d2023-09-07 18:13:07 +0100180#endif /* ENABLE_RME */
Jens Wiklander52c798e2015-12-07 14:37:10 +0100181}
182
183/*******************************************************************************
184 * Gets SPSR for BL32 entry
185 ******************************************************************************/
186static uint32_t qemu_get_spsr_for_bl32_entry(void)
187{
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700188#ifdef __aarch64__
Jens Wiklander52c798e2015-12-07 14:37:10 +0100189 /*
190 * The Secure Payload Dispatcher service is responsible for
191 * setting the SPSR prior to entry into the BL3-2 image.
192 */
193 return 0;
Etienne Carriere911de8c2018-02-02 13:23:22 +0100194#else
195 return SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE,
196 DISABLE_ALL_EXCEPTIONS);
197#endif
Jens Wiklander52c798e2015-12-07 14:37:10 +0100198}
199
200/*******************************************************************************
201 * Gets SPSR for BL33 entry
202 ******************************************************************************/
203static uint32_t qemu_get_spsr_for_bl33_entry(void)
204{
Jens Wiklander52c798e2015-12-07 14:37:10 +0100205 uint32_t spsr;
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700206#ifdef __aarch64__
Etienne Carriere911de8c2018-02-02 13:23:22 +0100207 unsigned int mode;
Jens Wiklander52c798e2015-12-07 14:37:10 +0100208
209 /* Figure out what mode we enter the non-secure world in */
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +0000210 mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
Jens Wiklander52c798e2015-12-07 14:37:10 +0100211
212 /*
213 * TODO: Consider the possibility of specifying the SPSR in
214 * the FIP ToC and allowing the platform to have a say as
215 * well.
216 */
217 spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
Etienne Carriere911de8c2018-02-02 13:23:22 +0100218#else
219 spsr = SPSR_MODE32(MODE32_svc,
220 plat_get_ns_image_entrypoint() & 0x1,
221 SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
222#endif
Jens Wiklander52c798e2015-12-07 14:37:10 +0100223 return spsr;
224}
225
Jens Wiklandera43c1282022-11-22 14:39:26 +0100226#if defined(SPD_spmd) && SPMD_SPM_AT_SEL2
227static int load_sps_from_tb_fw_config(struct image_info *image_info)
228{
229 void *dtb = (void *)image_info->image_base;
230 const char *compat_str = "arm,sp";
231 const struct fdt_property *uuid;
232 uint32_t load_addr;
233 const char *name;
234 int sp_node;
235 int node;
236
237 node = fdt_node_offset_by_compatible(dtb, -1, compat_str);
238 if (node < 0) {
239 ERROR("Can't find %s in TB_FW_CONFIG", compat_str);
240 return -1;
241 }
242
243 fdt_for_each_subnode(sp_node, dtb, node) {
244 name = fdt_get_name(dtb, sp_node, NULL);
245 if (name == NULL) {
246 ERROR("Can't get name of node in dtb\n");
247 return -1;
248 }
249 uuid = fdt_get_property(dtb, sp_node, "uuid", NULL);
250 if (uuid == NULL) {
251 ERROR("Can't find property uuid in node %s", name);
252 return -1;
253 }
254 if (fdt_read_uint32(dtb, sp_node, "load-address",
255 &load_addr) < 0) {
256 ERROR("Can't read load-address in node %s", name);
257 return -1;
258 }
259 if (qemu_io_register_sp_pkg(name, uuid->data, load_addr) < 0) {
260 return -1;
261 }
262 }
263
264 return 0;
265}
266#endif /*defined(SPD_spmd) && SPMD_SPM_AT_SEL2*/
267
Raymond Maobb653862023-10-04 09:58:29 -0700268#if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
269static int handoff_pageable_part(uint64_t pagable_part)
270{
271#if TRANSFER_LIST
272 struct transfer_list_entry *te;
273
274 te = transfer_list_add(bl2_tl, TL_TAG_OPTEE_PAGABLE_PART,
275 sizeof(pagable_part), &pagable_part);
276 if (!te) {
277 INFO("Cannot add TE for pageable part\n");
278 return -1;
279 }
280#endif
281 return 0;
282}
283#endif
284
Fu Weic2f78442017-05-27 21:21:42 +0800285static int qemu_bl2_handle_post_image_load(unsigned int image_id)
286{
287 int err = 0;
288 bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200289#if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
Jens Wiklander0acbaaa2017-08-24 13:16:26 +0200290 bl_mem_params_node_t *pager_mem_params = NULL;
291 bl_mem_params_node_t *paged_mem_params = NULL;
292#endif
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200293#if defined(SPD_spmd)
Jens Wiklanderd4b84f02022-11-18 15:40:04 +0100294 bl_mem_params_node_t *bl32_mem_params = NULL;
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200295#endif
Raymond Mao032ba022023-06-28 15:07:15 -0700296#if TRANSFER_LIST
297 struct transfer_list_header *ns_tl = NULL;
Raymond Mao032ba022023-06-28 15:07:15 -0700298#endif
Fu Weic2f78442017-05-27 21:21:42 +0800299
300 assert(bl_mem_params);
301
302 switch (image_id) {
Raymond Maobb653862023-10-04 09:58:29 -0700303#if TRANSFER_LIST
304 case BL31_IMAGE_ID:
305 /*
306 * arg0 is a bl_params_t reserved for bl31_early_platform_setup2
levi.yun010d2ae2024-05-13 10:27:17 +0100307 * we just need arg1 and arg3 for BL31 to update the TL from S
Raymond Maobb653862023-10-04 09:58:29 -0700308 * to NS memory before it exits
309 */
levi.yun010d2ae2024-05-13 10:27:17 +0100310#ifdef __aarch64__
311 if (GET_RW(bl_mem_params->ep_info.spsr) == MODE_RW_64) {
312 bl_mem_params->ep_info.args.arg1 =
313 TRANSFER_LIST_HANDOFF_X1_VALUE(REGISTER_CONVENTION_VERSION);
314 } else
315#endif
316 {
317 bl_mem_params->ep_info.args.arg1 =
318 TRANSFER_LIST_HANDOFF_R1_VALUE(REGISTER_CONVENTION_VERSION);
319 }
320
Raymond Maobb653862023-10-04 09:58:29 -0700321 bl_mem_params->ep_info.args.arg3 = (uintptr_t)bl2_tl;
322 break;
323#endif
Fu Weic2f78442017-05-27 21:21:42 +0800324 case BL32_IMAGE_ID:
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200325#if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
Jens Wiklander0acbaaa2017-08-24 13:16:26 +0200326 pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
327 assert(pager_mem_params);
328
329 paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
330 assert(paged_mem_params);
331
332 err = parse_optee_header(&bl_mem_params->ep_info,
333 &pager_mem_params->image_info,
334 &paged_mem_params->image_info);
335 if (err != 0) {
336 WARN("OPTEE header parse error.\n");
337 }
Raymond Maobb653862023-10-04 09:58:29 -0700338
339 /* add TL_TAG_OPTEE_PAGABLE_PART entry to the TL */
340 if (handoff_pageable_part(bl_mem_params->ep_info.args.arg1)) {
341 return -1;
342 }
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200343#endif
Jens Wiklander0acbaaa2017-08-24 13:16:26 +0200344
Raymond Maobb653862023-10-04 09:58:29 -0700345 INFO("Handoff to BL32\n");
346 bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl32_entry();
347 if (TRANSFER_LIST &&
348 transfer_list_set_handoff_args(bl2_tl,
349 &bl_mem_params->ep_info))
350 break;
351
352 INFO("Using default arguments\n");
Jens Wiklanderd4b84f02022-11-18 15:40:04 +0100353#if defined(SPMC_OPTEE)
354 /*
355 * Explicit zeroes to unused registers since they may have
356 * been populated by parse_optee_header() above.
357 *
358 * OP-TEE expects system DTB in x2 and TOS_FW_CONFIG in x0,
359 * the latter is filled in below for TOS_FW_CONFIG_ID and
360 * applies to any other SPMC too.
361 */
362 bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200363#elif defined(SPD_opteed)
Jens Wiklander0acbaaa2017-08-24 13:16:26 +0200364 /*
365 * OP-TEE expect to receive DTB address in x2.
366 * This will be copied into x2 by dispatcher.
367 */
Andrew Walbran9c4d0692020-01-15 14:11:31 +0000368 bl_mem_params->ep_info.args.arg3 = ARM_PRELOADED_DTB_BASE;
Jens Wiklanderff263dc2021-05-25 18:15:11 +0200369#elif defined(AARCH32_SP_OPTEE)
Etienne Carriere911de8c2018-02-02 13:23:22 +0100370 bl_mem_params->ep_info.args.arg0 =
371 bl_mem_params->ep_info.args.arg1;
372 bl_mem_params->ep_info.args.arg1 = 0;
Andrew Walbran9c4d0692020-01-15 14:11:31 +0000373 bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
Etienne Carriere911de8c2018-02-02 13:23:22 +0100374 bl_mem_params->ep_info.args.arg3 = 0;
375#endif
Fu Weic2f78442017-05-27 21:21:42 +0800376 break;
Etienne Carriere911de8c2018-02-02 13:23:22 +0100377
Fu Weic2f78442017-05-27 21:21:42 +0800378 case BL33_IMAGE_ID:
Etienne Carriere911de8c2018-02-02 13:23:22 +0100379#ifdef AARCH32_SP_OPTEE
380 /* AArch32 only core: OP-TEE expects NSec EP in register LR */
381 pager_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
382 assert(pager_mem_params);
383 pager_mem_params->ep_info.lr_svc = bl_mem_params->ep_info.pc;
384#endif
385
Raymond Mao032ba022023-06-28 15:07:15 -0700386 bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl33_entry();
387
Andrew Walbran9c4d0692020-01-15 14:11:31 +0000388#if ARM_LINUX_KERNEL_AS_BL33
389 /*
390 * According to the file ``Documentation/arm64/booting.txt`` of
391 * the Linux kernel tree, Linux expects the physical address of
392 * the device tree blob (DTB) in x0, while x1-x3 are reserved
393 * for future use and must be 0.
394 */
395 bl_mem_params->ep_info.args.arg0 =
396 (u_register_t)ARM_PRELOADED_DTB_BASE;
397 bl_mem_params->ep_info.args.arg1 = 0U;
398 bl_mem_params->ep_info.args.arg2 = 0U;
399 bl_mem_params->ep_info.args.arg3 = 0U;
Raymond Mao032ba022023-06-28 15:07:15 -0700400#elif TRANSFER_LIST
401 if (bl2_tl) {
Raymond Maobb653862023-10-04 09:58:29 -0700402 /* relocate the tl to pre-allocate NS memory */
Raymond Mao032ba022023-06-28 15:07:15 -0700403 ns_tl = transfer_list_relocate(bl2_tl,
404 (void *)(uintptr_t)FW_NS_HANDOFF_BASE,
405 bl2_tl->max_size);
406 if (!ns_tl) {
407 ERROR("Relocate TL to 0x%lx failed\n",
408 (unsigned long)FW_NS_HANDOFF_BASE);
409 return -1;
410 }
Raymond Maobb653862023-10-04 09:58:29 -0700411 }
Raymond Mao032ba022023-06-28 15:07:15 -0700412
Raymond Maobb653862023-10-04 09:58:29 -0700413 INFO("Handoff to BL33\n");
414 if (!transfer_list_set_handoff_args(ns_tl,
415 &bl_mem_params->ep_info)) {
416 INFO("Invalid TL, fallback to default arguments\n");
Raymond Mao032ba022023-06-28 15:07:15 -0700417 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
418 }
Andrew Walbran9c4d0692020-01-15 14:11:31 +0000419#else
Fu Weic2f78442017-05-27 21:21:42 +0800420 /* BL33 expects to receive the primary CPU MPID (through r0) */
421 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
Raymond Maobb653862023-10-04 09:58:29 -0700422#endif /* ARM_LINUX_KERNEL_AS_BL33 */
Andrew Walbran9c4d0692020-01-15 14:11:31 +0000423
Fu Weic2f78442017-05-27 21:21:42 +0800424 break;
Jens Wiklandera43c1282022-11-22 14:39:26 +0100425#ifdef SPD_spmd
426#if SPMD_SPM_AT_SEL2
427 case TB_FW_CONFIG_ID:
428 err = load_sps_from_tb_fw_config(&bl_mem_params->image_info);
429 break;
430#endif
Jens Wiklanderd4b84f02022-11-18 15:40:04 +0100431 case TOS_FW_CONFIG_ID:
432 /* An SPMC expects TOS_FW_CONFIG in x0/r0 */
433 bl32_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
434 bl32_mem_params->ep_info.args.arg0 =
435 bl_mem_params->image_info.image_base;
436 break;
437#endif
Jonathan Wrightff957ed2018-03-14 15:24:00 +0000438 default:
439 /* Do nothing in default case */
440 break;
Fu Weic2f78442017-05-27 21:21:42 +0800441 }
442
443 return err;
444}
445
446/*******************************************************************************
447 * This function can be used by the platforms to update/use image
448 * information for given `image_id`.
449 ******************************************************************************/
450int bl2_plat_handle_post_image_load(unsigned int image_id)
451{
452 return qemu_bl2_handle_post_image_load(image_id);
453}
Jens Wiklander52c798e2015-12-07 14:37:10 +0100454
Etienne Carriere911de8c2018-02-02 13:23:22 +0100455uintptr_t plat_get_ns_image_entrypoint(void)
Jens Wiklander52c798e2015-12-07 14:37:10 +0100456{
457 return NS_IMAGE_OFFSET;
458}