blob: d830403fccb13c3d7dc97581b92f5fa134404d30 [file] [log] [blame]
Achin Gupta86f23532019-10-11 15:41:16 +01001/*
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +00002 * Copyright (c) 2020-2023, Arm Limited and Contributors. All rights reserved.
Achin Gupta86f23532019-10-11 15:41:16 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
Scott Brandene5dcf982020-08-25 13:49:32 -07009#include <inttypes.h>
10#include <stdint.h>
Achin Gupta86f23532019-10-11 15:41:16 +010011#include <string.h>
12
13#include <arch_helpers.h>
Olivier Deprez2bae35f2020-04-16 13:39:06 +020014#include <arch/aarch64/arch_features.h>
Achin Gupta86f23532019-10-11 15:41:16 +010015#include <bl31/bl31.h>
Olivier Depreza664c492020-08-05 11:27:42 +020016#include <bl31/interrupt_mgmt.h>
Achin Gupta86f23532019-10-11 15:41:16 +010017#include <common/debug.h>
18#include <common/runtime_svc.h>
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +000019#include <common/tbbr/tbbr_img_def.h>
Achin Gupta86f23532019-10-11 15:41:16 +010020#include <lib/el3_runtime/context_mgmt.h>
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +000021#include <lib/fconf/fconf.h>
22#include <lib/fconf/fconf_dyn_cfg_getter.h>
Achin Gupta86f23532019-10-11 15:41:16 +010023#include <lib/smccc.h>
24#include <lib/spinlock.h>
25#include <lib/utils.h>
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +000026#include <lib/xlat_tables/xlat_tables_v2.h>
Achin Gupta86f23532019-10-11 15:41:16 +010027#include <plat/common/common_def.h>
28#include <plat/common/platform.h>
29#include <platform_def.h>
Raghu Krishnamurthy7f046c12023-02-25 13:26:10 -080030#include <services/el3_spmd_logical_sp.h>
J-Alves2672cde2020-05-07 18:42:25 +010031#include <services/ffa_svc.h>
Marc Bonnici1c33cc32021-11-29 17:57:03 +000032#include <services/spmc_svc.h>
Achin Gupta86f23532019-10-11 15:41:16 +010033#include <services/spmd_svc.h>
34#include <smccc_helpers.h>
35#include "spmd_private.h"
36
37/*******************************************************************************
38 * SPM Core context information.
39 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020040static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
Achin Gupta86f23532019-10-11 15:41:16 +010041
42/*******************************************************************************
Marc Bonnici1c33cc32021-11-29 17:57:03 +000043 * SPM Core attribute information is read from its manifest if the SPMC is not
44 * at EL3. Else, it is populated from the SPMC directly.
Achin Gupta86f23532019-10-11 15:41:16 +010045 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020046static spmc_manifest_attribute_t spmc_attrs;
Achin Gupta86f23532019-10-11 15:41:16 +010047
48/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000049 * SPM Core entry point information. Discovered on the primary core and reused
50 * on secondary cores.
51 ******************************************************************************/
52static entry_point_info_t *spmc_ep_info;
53
54/*******************************************************************************
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020055 * SPM Core context on CPU based on mpidr.
56 ******************************************************************************/
57spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr)
58{
Max Shvetsovf80c64d2020-08-25 11:50:18 +010059 int core_idx = plat_core_pos_by_mpidr(mpidr);
60
61 if (core_idx < 0) {
Scott Brandene5dcf982020-08-25 13:49:32 -070062 ERROR("Invalid mpidr: %" PRIx64 ", returned ID: %d\n", mpidr, core_idx);
Max Shvetsovf80c64d2020-08-25 11:50:18 +010063 panic();
64 }
65
66 return &spm_core_context[core_idx];
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020067}
68
69/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +020070 * SPM Core context on current CPU get helper.
71 ******************************************************************************/
72spmd_spm_core_context_t *spmd_get_context(void)
73{
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020074 return spmd_get_context_by_mpidr(read_mpidr());
Olivier Deprez2bae35f2020-04-16 13:39:06 +020075}
76
77/*******************************************************************************
Olivier Deprezc7631a52020-03-23 09:53:06 +010078 * SPM Core ID getter.
79 ******************************************************************************/
80uint16_t spmd_spmc_id_get(void)
81{
82 return spmc_attrs.spmc_id;
83}
84
85/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000086 * Static function declaration.
87 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020088static int32_t spmd_init(void);
Olivier Deprez69ca84a2020-02-07 15:44:43 +010089static int spmd_spmc_init(void *pm_addr);
Raghu Krishnamurthy9d9584f2023-04-22 18:00:02 -070090
Olivier Deprez2bae35f2020-04-16 13:39:06 +020091static uint64_t spmd_smc_forward(uint32_t smc_fid,
92 bool secure_origin,
93 uint64_t x1,
94 uint64_t x2,
95 uint64_t x3,
96 uint64_t x4,
Marc Bonnicida2c9e12021-11-29 18:02:45 +000097 void *cookie,
98 void *handle,
99 uint64_t flags);
Max Shvetsov745889c2020-02-27 14:54:21 +0000100
Daniel Boulby9460a232021-12-09 11:20:13 +0000101/******************************************************************************
102 * Builds an SPMD to SPMC direct message request.
103 *****************************************************************************/
104void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func,
105 unsigned long long message)
106{
107 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
108 write_ctx_reg(gpregs, CTX_GPREG_X1,
109 (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
110 spmd_spmc_id_get());
111 write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func);
112 write_ctx_reg(gpregs, CTX_GPREG_X3, message);
113}
114
115
Max Shvetsov745889c2020-02-27 14:54:21 +0000116/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200117 * This function takes an SPMC context pointer and performs a synchronous
118 * SPMC entry.
Achin Gupta86f23532019-10-11 15:41:16 +0100119 ******************************************************************************/
120uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
121{
122 uint64_t rc;
123
124 assert(spmc_ctx != NULL);
125
126 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
127
128 /* Restore the context assigned above */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000129#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000130 cm_el2_sysregs_context_restore(SECURE);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200131#else
132 cm_el1_sysregs_context_restore(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000133#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100134 cm_set_next_eret_context(SECURE);
135
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000136 /* Enter SPMC */
Achin Gupta86f23532019-10-11 15:41:16 +0100137 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
138
139 /* Save secure state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000140#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000141 cm_el2_sysregs_context_save(SECURE);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200142#else
143 cm_el1_sysregs_context_save(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000144#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100145
146 return rc;
147}
148
149/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200150 * This function returns to the place where spmd_spm_core_sync_entry() was
Achin Gupta86f23532019-10-11 15:41:16 +0100151 * called originally.
152 ******************************************************************************/
153__dead2 void spmd_spm_core_sync_exit(uint64_t rc)
154{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200155 spmd_spm_core_context_t *ctx = spmd_get_context();
Achin Gupta86f23532019-10-11 15:41:16 +0100156
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200157 /* Get current CPU context from SPMC context */
Achin Gupta86f23532019-10-11 15:41:16 +0100158 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
159
160 /*
161 * The SPMD must have initiated the original request through a
162 * synchronous entry into SPMC. Jump back to the original C runtime
163 * context with the value of rc in x0;
164 */
165 spmd_spm_core_exit(ctx->c_rt_ctx, rc);
166
167 panic();
168}
169
170/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200171 * Jump to the SPM Core for the first time.
Achin Gupta86f23532019-10-11 15:41:16 +0100172 ******************************************************************************/
173static int32_t spmd_init(void)
174{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200175 spmd_spm_core_context_t *ctx = spmd_get_context();
176 uint64_t rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100177
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200178 VERBOSE("SPM Core init start.\n");
Olivier Deprez7c016332019-10-28 09:03:13 +0000179
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200180 /* Primary boot core enters the SPMC for initialization. */
181 ctx->state = SPMC_STATE_ON_PENDING;
Achin Gupta86f23532019-10-11 15:41:16 +0100182
183 rc = spmd_spm_core_sync_entry(ctx);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200184 if (rc != 0ULL) {
Scott Brandene5dcf982020-08-25 13:49:32 -0700185 ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200186 return 0;
Achin Gupta86f23532019-10-11 15:41:16 +0100187 }
188
Olivier Deprez7c016332019-10-28 09:03:13 +0000189 ctx->state = SPMC_STATE_ON;
190
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200191 VERBOSE("SPM Core init end.\n");
Achin Gupta86f23532019-10-11 15:41:16 +0100192
Raghu Krishnamurthy7f046c12023-02-25 13:26:10 -0800193 spmd_logical_sp_set_spmc_initialized();
194 rc = spmd_logical_sp_init();
195 if (rc != 0) {
196 WARN("SPMD Logical partitions failed init.\n");
197 }
198
Achin Gupta86f23532019-10-11 15:41:16 +0100199 return 1;
200}
201
202/*******************************************************************************
Olivier Depreza664c492020-08-05 11:27:42 +0200203 * spmd_secure_interrupt_handler
204 * Enter the SPMC for further handling of the secure interrupt by the SPMC
205 * itself or a Secure Partition.
206 ******************************************************************************/
207static uint64_t spmd_secure_interrupt_handler(uint32_t id,
208 uint32_t flags,
209 void *handle,
210 void *cookie)
211{
212 spmd_spm_core_context_t *ctx = spmd_get_context();
213 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
214 unsigned int linear_id = plat_my_core_pos();
215 int64_t rc;
216
217 /* Sanity check the security state when the exception was generated */
218 assert(get_interrupt_src_ss(flags) == NON_SECURE);
219
220 /* Sanity check the pointer to this cpu's context */
221 assert(handle == cm_get_context(NON_SECURE));
222
223 /* Save the non-secure context before entering SPMC */
224 cm_el1_sysregs_context_save(NON_SECURE);
225#if SPMD_SPM_AT_SEL2
226 cm_el2_sysregs_context_save(NON_SECURE);
227#endif
228
229 /* Convey the event to the SPMC through the FFA_INTERRUPT interface. */
230 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT);
231 write_ctx_reg(gpregs, CTX_GPREG_X1, 0);
232 write_ctx_reg(gpregs, CTX_GPREG_X2, 0);
233 write_ctx_reg(gpregs, CTX_GPREG_X3, 0);
234 write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
235 write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
236 write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
237 write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
238
239 /* Mark current core as handling a secure interrupt. */
240 ctx->secure_interrupt_ongoing = true;
241
242 rc = spmd_spm_core_sync_entry(ctx);
243 if (rc != 0ULL) {
Olivier Deprezba100f22021-11-09 12:37:20 +0100244 ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, linear_id);
Olivier Depreza664c492020-08-05 11:27:42 +0200245 }
246
247 ctx->secure_interrupt_ongoing = false;
248
249 cm_el1_sysregs_context_restore(NON_SECURE);
250#if SPMD_SPM_AT_SEL2
251 cm_el2_sysregs_context_restore(NON_SECURE);
252#endif
253 cm_set_next_eret_context(NON_SECURE);
254
255 SMC_RET0(&ctx->cpu_ctx);
256}
257
Olivier Deprez35bbcf22023-06-08 18:23:26 +0200258#if (EL3_EXCEPTION_HANDLING == 0)
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600259/*******************************************************************************
260 * spmd_group0_interrupt_handler_nwd
261 * Group0 secure interrupt in the normal world are trapped to EL3. Delegate the
262 * handling of the interrupt to the platform handler, and return only upon
263 * successfully handling the Group0 interrupt.
264 ******************************************************************************/
265static uint64_t spmd_group0_interrupt_handler_nwd(uint32_t id,
266 uint32_t flags,
267 void *handle,
268 void *cookie)
269{
270 uint32_t intid;
271
272 /* Sanity check the security state when the exception was generated. */
273 assert(get_interrupt_src_ss(flags) == NON_SECURE);
274
275 /* Sanity check the pointer to this cpu's context. */
276 assert(handle == cm_get_context(NON_SECURE));
277
278 assert(id == INTR_ID_UNAVAILABLE);
279
280 assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3);
281
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -0500282 intid = plat_ic_acknowledge_interrupt();
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600283
284 if (plat_spmd_handle_group0_interrupt(intid) < 0) {
285 ERROR("Group0 interrupt %u not handled\n", intid);
286 panic();
287 }
288
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -0500289 /* Deactivate the corresponding Group0 interrupt. */
290 plat_ic_end_of_interrupt(intid);
291
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600292 return 0U;
293}
Olivier Deprez35bbcf22023-06-08 18:23:26 +0200294#endif
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600295
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -0600296/*******************************************************************************
297 * spmd_handle_group0_intr_swd
298 * SPMC delegates handling of Group0 secure interrupt to EL3 firmware using
299 * FFA_EL3_INTR_HANDLE SMC call. Further, SPMD delegates the handling of the
300 * interrupt to the platform handler, and returns only upon successfully
301 * handling the Group0 interrupt.
302 ******************************************************************************/
303static uint64_t spmd_handle_group0_intr_swd(void *handle)
304{
305 uint32_t intid;
306
307 /* Sanity check the pointer to this cpu's context */
308 assert(handle == cm_get_context(SECURE));
309
310 assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3);
311
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -0500312 intid = plat_ic_acknowledge_interrupt();
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -0600313
314 /*
315 * TODO: Currently due to a limitation in SPMD implementation, the
316 * platform handler is expected to not delegate handling to NWd while
317 * processing Group0 secure interrupt.
318 */
319 if (plat_spmd_handle_group0_interrupt(intid) < 0) {
320 /* Group0 interrupt was not handled by the platform. */
321 ERROR("Group0 interrupt %u not handled\n", intid);
322 panic();
323 }
324
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -0500325 /* Deactivate the corresponding Group0 interrupt. */
326 plat_ic_end_of_interrupt(intid);
327
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -0600328 /* Return success. */
329 SMC_RET8(handle, FFA_SUCCESS_SMC32, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
330 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
331 FFA_PARAM_MBZ);
332}
333
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +0000334#if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
335static int spmd_dynamic_map_mem(uintptr_t base_addr, size_t size,
336 unsigned int attr, uintptr_t *align_addr,
337 size_t *align_size)
338{
339 uintptr_t base_addr_align;
340 size_t mapped_size_align;
341 int rc;
342
343 /* Page aligned address and size if necessary */
344 base_addr_align = page_align(base_addr, DOWN);
345 mapped_size_align = page_align(size, UP);
346
347 if ((base_addr != base_addr_align) &&
348 (size == mapped_size_align)) {
349 mapped_size_align += PAGE_SIZE;
350 }
351
352 /*
353 * Map dynamically given region with its aligned base address and
354 * size
355 */
356 rc = mmap_add_dynamic_region((unsigned long long)base_addr_align,
357 base_addr_align,
358 mapped_size_align,
359 attr);
360 if (rc == 0) {
361 *align_addr = base_addr_align;
362 *align_size = mapped_size_align;
363 }
364
365 return rc;
366}
367
368static void spmd_do_sec_cpy(uintptr_t root_base_addr, uintptr_t sec_base_addr,
369 size_t size)
370{
371 uintptr_t root_base_addr_align, sec_base_addr_align;
372 size_t root_mapped_size_align, sec_mapped_size_align;
373 int rc;
374
375 assert(root_base_addr != 0UL);
376 assert(sec_base_addr != 0UL);
377 assert(size != 0UL);
378
379 /* Map the memory with required attributes */
380 rc = spmd_dynamic_map_mem(root_base_addr, size, MT_RO_DATA | MT_ROOT,
381 &root_base_addr_align,
382 &root_mapped_size_align);
383 if (rc != 0) {
384 ERROR("%s %s %lu (%d)\n", "Error while mapping", "root region",
385 root_base_addr, rc);
386 panic();
387 }
388
389 rc = spmd_dynamic_map_mem(sec_base_addr, size, MT_RW_DATA | MT_SECURE,
390 &sec_base_addr_align, &sec_mapped_size_align);
391 if (rc != 0) {
392 ERROR("%s %s %lu (%d)\n", "Error while mapping",
393 "secure region", sec_base_addr, rc);
394 panic();
395 }
396
397 /* Do copy operation */
398 (void)memcpy((void *)sec_base_addr, (void *)root_base_addr, size);
399
400 /* Unmap root memory region */
401 rc = mmap_remove_dynamic_region(root_base_addr_align,
402 root_mapped_size_align);
403 if (rc != 0) {
404 ERROR("%s %s %lu (%d)\n", "Error while unmapping",
405 "root region", root_base_addr_align, rc);
406 panic();
407 }
408
409 /* Unmap secure memory region */
410 rc = mmap_remove_dynamic_region(sec_base_addr_align,
411 sec_mapped_size_align);
412 if (rc != 0) {
413 ERROR("%s %s %lu (%d)\n", "Error while unmapping",
414 "secure region", sec_base_addr_align, rc);
415 panic();
416 }
417}
418#endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
419
Olivier Depreza664c492020-08-05 11:27:42 +0200420/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200421 * Loads SPMC manifest and inits SPMC.
Achin Gupta86f23532019-10-11 15:41:16 +0100422 ******************************************************************************/
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100423static int spmd_spmc_init(void *pm_addr)
Achin Gupta86f23532019-10-11 15:41:16 +0100424{
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200425 cpu_context_t *cpu_ctx;
426 unsigned int core_id;
Olivier Depreza664c492020-08-05 11:27:42 +0200427 uint32_t ep_attr, flags;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200428 int rc;
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +0000429 const struct dyn_cfg_dtb_info_t *image_info __unused;
Achin Gupta86f23532019-10-11 15:41:16 +0100430
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200431 /* Load the SPM Core manifest */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100432 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
Max Shvetsov745889c2020-02-27 14:54:21 +0000433 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200434 WARN("No or invalid SPM Core manifest image provided by BL2\n");
435 return rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100436 }
437
438 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200439 * Ensure that the SPM Core version is compatible with the SPM
440 * Dispatcher version.
Achin Gupta86f23532019-10-11 15:41:16 +0100441 */
J-Alves2672cde2020-05-07 18:42:25 +0100442 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
443 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
444 WARN("Unsupported FFA version (%u.%u)\n",
Achin Gupta86f23532019-10-11 15:41:16 +0100445 spmc_attrs.major_version, spmc_attrs.minor_version);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200446 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100447 }
448
J-Alves2672cde2020-05-07 18:42:25 +0100449 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
Achin Gupta86f23532019-10-11 15:41:16 +0100450 spmc_attrs.minor_version);
451
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200452 VERBOSE("SPM Core run time EL%x.\n",
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000453 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
Achin Gupta86f23532019-10-11 15:41:16 +0100454
Max Shvetsove79062e2020-03-12 15:16:40 +0000455 /* Validate the SPMC ID, Ensure high bit is set */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200456 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
457 SPMC_SECURE_ID_MASK) == 0U) {
458 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
459 return -EINVAL;
Max Shvetsove79062e2020-03-12 15:16:40 +0000460 }
461
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200462 /* Validate the SPM Core execution state */
Achin Gupta86f23532019-10-11 15:41:16 +0100463 if ((spmc_attrs.exec_state != MODE_RW_64) &&
464 (spmc_attrs.exec_state != MODE_RW_32)) {
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100465 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
Achin Gupta86f23532019-10-11 15:41:16 +0100466 spmc_attrs.exec_state);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200467 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100468 }
469
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100470 VERBOSE("%s%x.\n", "SPM Core execution state 0x",
471 spmc_attrs.exec_state);
Achin Gupta86f23532019-10-11 15:41:16 +0100472
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000473#if SPMD_SPM_AT_SEL2
474 /* Ensure manifest has not requested AArch32 state in S-EL2 */
475 if (spmc_attrs.exec_state == MODE_RW_32) {
476 WARN("AArch32 state at S-EL2 is not supported.\n");
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200477 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100478 }
479
480 /*
481 * Check if S-EL2 is supported on this system if S-EL2
482 * is required for SPM
483 */
Andre Przywara6dd2d062023-02-22 16:53:50 +0000484 if (!is_feat_sel2_supported()) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200485 WARN("SPM Core run time S-EL2 is not supported.\n");
486 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100487 }
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000488#endif /* SPMD_SPM_AT_SEL2 */
Achin Gupta86f23532019-10-11 15:41:16 +0100489
490 /* Initialise an entrypoint to set up the CPU context */
491 ep_attr = SECURE | EP_ST_ENABLE;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200492 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
Achin Gupta86f23532019-10-11 15:41:16 +0100493 ep_attr |= EP_EE_BIG;
Max Shvetsov745889c2020-02-27 14:54:21 +0000494 }
495
Achin Gupta86f23532019-10-11 15:41:16 +0100496 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
Achin Gupta86f23532019-10-11 15:41:16 +0100497
498 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200499 * Populate SPSR for SPM Core based upon validated parameters from the
500 * manifest.
Achin Gupta86f23532019-10-11 15:41:16 +0100501 */
502 if (spmc_attrs.exec_state == MODE_RW_32) {
503 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
504 SPSR_E_LITTLE,
505 DAIF_FIQ_BIT |
506 DAIF_IRQ_BIT |
507 DAIF_ABT_BIT);
508 } else {
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000509
510#if SPMD_SPM_AT_SEL2
511 static const uint32_t runtime_el = MODE_EL2;
512#else
513 static const uint32_t runtime_el = MODE_EL1;
514#endif
515 spmc_ep_info->spsr = SPSR_64(runtime_el,
Achin Gupta86f23532019-10-11 15:41:16 +0100516 MODE_SP_ELX,
517 DISABLE_ALL_EXCEPTIONS);
518 }
519
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +0000520#if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
521 image_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TOS_FW_CONFIG_ID);
522 assert(image_info != NULL);
523
524 if ((image_info->config_addr == 0UL) ||
525 (image_info->secondary_config_addr == 0UL) ||
526 (image_info->config_max_size == 0UL)) {
527 return -EINVAL;
528 }
529
530 /* Copy manifest from root->secure region */
531 spmd_do_sec_cpy(image_info->config_addr,
532 image_info->secondary_config_addr,
533 image_info->config_max_size);
534
535 /* Update ep info of BL32 */
536 assert(spmc_ep_info != NULL);
537 spmc_ep_info->args.arg0 = image_info->secondary_config_addr;
538#endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
539
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200540 /* Set an initial SPMC context state for all cores. */
541 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
542 spm_core_context[core_id].state = SPMC_STATE_OFF;
Max Shvetsov745889c2020-02-27 14:54:21 +0000543
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200544 /* Setup an initial cpu context for the SPMC. */
545 cpu_ctx = &spm_core_context[core_id].cpu_ctx;
546 cm_setup_context(cpu_ctx, spmc_ep_info);
Achin Gupta86f23532019-10-11 15:41:16 +0100547
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200548 /*
549 * Pass the core linear ID to the SPMC through x4.
550 * (TF-A implementation defined behavior helping
551 * a legacy TOS migration to adopt FF-A).
552 */
553 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id);
554 }
Achin Gupta86f23532019-10-11 15:41:16 +0100555
Olivier Deprez9afca122019-10-28 09:15:52 +0000556 /* Register power management hooks with PSCI */
557 psci_register_spd_pm_hook(&spmd_pm);
558
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200559 /* Register init function for deferred init. */
Achin Gupta86f23532019-10-11 15:41:16 +0100560 bl31_register_bl32_init(&spmd_init);
561
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200562 INFO("SPM Core setup done.\n");
563
Olivier Depreza664c492020-08-05 11:27:42 +0200564 /*
565 * Register an interrupt handler routing secure interrupts to SPMD
566 * while the NWd is running.
567 */
568 flags = 0;
569 set_interrupt_rm_flag(flags, NON_SECURE);
570 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
571 spmd_secure_interrupt_handler,
572 flags);
573 if (rc != 0) {
574 panic();
575 }
576
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600577 /*
Olivier Deprez35bbcf22023-06-08 18:23:26 +0200578 * Permit configurations where the SPM resides at S-EL1/2 and upon a
579 * Group0 interrupt triggering while the normal world runs, the
580 * interrupt is routed either through the EHF or directly to the SPMD:
581 *
582 * EL3_EXCEPTION_HANDLING=0: the Group0 interrupt is routed to the SPMD
583 * for handling by spmd_group0_interrupt_handler_nwd.
584 *
585 * EL3_EXCEPTION_HANDLING=1: the Group0 interrupt is routed to the EHF.
586 *
587 */
588#if (EL3_EXCEPTION_HANDLING == 0)
589 /*
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600590 * Register an interrupt handler routing Group0 interrupts to SPMD
591 * while the NWd is running.
592 */
593 rc = register_interrupt_type_handler(INTR_TYPE_EL3,
594 spmd_group0_interrupt_handler_nwd,
595 flags);
596 if (rc != 0) {
597 panic();
598 }
Olivier Deprez35bbcf22023-06-08 18:23:26 +0200599#endif
600
Achin Gupta86f23532019-10-11 15:41:16 +0100601 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000602}
Achin Gupta86f23532019-10-11 15:41:16 +0100603
Max Shvetsov745889c2020-02-27 14:54:21 +0000604/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200605 * Initialize context of SPM Core.
Max Shvetsov745889c2020-02-27 14:54:21 +0000606 ******************************************************************************/
607int spmd_setup(void)
608{
609 int rc;
Marc Bonnici1c33cc32021-11-29 17:57:03 +0000610 void *spmc_manifest;
611
612 /*
613 * If the SPMC is at EL3, then just initialise it directly. The
614 * shenanigans of when it is at a lower EL are not needed.
615 */
616 if (is_spmc_at_el3()) {
617 /* Allow the SPMC to populate its attributes directly. */
618 spmc_populate_attrs(&spmc_attrs);
619
620 rc = spmc_setup();
621 if (rc != 0) {
Olivier Deprez3d203f42022-11-16 16:46:23 +0100622 WARN("SPMC initialisation failed 0x%x.\n", rc);
Marc Bonnici1c33cc32021-11-29 17:57:03 +0000623 }
Olivier Deprez3d203f42022-11-16 16:46:23 +0100624 return 0;
Marc Bonnici1c33cc32021-11-29 17:57:03 +0000625 }
Achin Gupta86f23532019-10-11 15:41:16 +0100626
Max Shvetsov745889c2020-02-27 14:54:21 +0000627 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200628 if (spmc_ep_info == NULL) {
629 WARN("No SPM Core image provided by BL2 boot loader.\n");
Olivier Deprez3d203f42022-11-16 16:46:23 +0100630 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000631 }
632
633 /* Under no circumstances will this parameter be 0 */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200634 assert(spmc_ep_info->pc != 0ULL);
Max Shvetsov745889c2020-02-27 14:54:21 +0000635
636 /*
637 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200638 * be used as a manifest for the SPM Core at the next lower EL/mode.
Max Shvetsov745889c2020-02-27 14:54:21 +0000639 */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100640 spmc_manifest = (void *)spmc_ep_info->args.arg0;
641 if (spmc_manifest == NULL) {
Olivier Deprez3d203f42022-11-16 16:46:23 +0100642 WARN("Invalid or absent SPM Core manifest.\n");
643 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000644 }
645
646 /* Load manifest, init SPMC */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100647 rc = spmd_spmc_init(spmc_manifest);
Max Shvetsov745889c2020-02-27 14:54:21 +0000648 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200649 WARN("Booting device without SPM initialization.\n");
Max Shvetsov745889c2020-02-27 14:54:21 +0000650 }
651
Olivier Deprez3d203f42022-11-16 16:46:23 +0100652 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000653}
654
655/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000656 * Forward FF-A SMCs to the other security state.
Max Shvetsov745889c2020-02-27 14:54:21 +0000657 ******************************************************************************/
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000658uint64_t spmd_smc_switch_state(uint32_t smc_fid,
659 bool secure_origin,
660 uint64_t x1,
661 uint64_t x2,
662 uint64_t x3,
663 uint64_t x4,
664 void *handle)
Max Shvetsov745889c2020-02-27 14:54:21 +0000665{
Olivier Deprezebc34772020-04-16 16:59:21 +0200666 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
667 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100668
Max Shvetsov745889c2020-02-27 14:54:21 +0000669 /* Save incoming security state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000670#if SPMD_SPM_AT_SEL2
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200671 if (secure_state_in == NON_SECURE) {
672 cm_el1_sysregs_context_save(secure_state_in);
673 }
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100674 cm_el2_sysregs_context_save(secure_state_in);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200675#else
676 cm_el1_sysregs_context_save(secure_state_in);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000677#endif
Max Shvetsov745889c2020-02-27 14:54:21 +0000678
679 /* Restore outgoing security state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000680#if SPMD_SPM_AT_SEL2
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200681 if (secure_state_out == NON_SECURE) {
682 cm_el1_sysregs_context_restore(secure_state_out);
683 }
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100684 cm_el2_sysregs_context_restore(secure_state_out);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200685#else
686 cm_el1_sysregs_context_restore(secure_state_out);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000687#endif
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100688 cm_set_next_eret_context(secure_state_out);
Max Shvetsov745889c2020-02-27 14:54:21 +0000689
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -0800690#if SPMD_SPM_AT_SEL2
691 /*
692 * If SPMC is at SEL2, save additional registers x8-x17, which may
693 * be used in FF-A calls such as FFA_PARTITION_INFO_GET_REGS.
694 * Note that technically, all SPMCs can support this, but this code is
695 * under ifdef to minimize breakage in case other SPMCs do not save
696 * and restore x8-x17.
697 * We also need to pass through these registers since not all FF-A ABIs
698 * modify x8-x17, in which case, SMCCC requires that these registers be
699 * preserved, so the SPMD passes through these registers and expects the
700 * SPMC to save and restore (potentially also modify) them.
701 */
702 SMC_RET18(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
703 SMC_GET_GP(handle, CTX_GPREG_X5),
704 SMC_GET_GP(handle, CTX_GPREG_X6),
705 SMC_GET_GP(handle, CTX_GPREG_X7),
706 SMC_GET_GP(handle, CTX_GPREG_X8),
707 SMC_GET_GP(handle, CTX_GPREG_X9),
708 SMC_GET_GP(handle, CTX_GPREG_X10),
709 SMC_GET_GP(handle, CTX_GPREG_X11),
710 SMC_GET_GP(handle, CTX_GPREG_X12),
711 SMC_GET_GP(handle, CTX_GPREG_X13),
712 SMC_GET_GP(handle, CTX_GPREG_X14),
713 SMC_GET_GP(handle, CTX_GPREG_X15),
714 SMC_GET_GP(handle, CTX_GPREG_X16),
715 SMC_GET_GP(handle, CTX_GPREG_X17)
716 );
717
718#else
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100719 SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
Max Shvetsov745889c2020-02-27 14:54:21 +0000720 SMC_GET_GP(handle, CTX_GPREG_X5),
721 SMC_GET_GP(handle, CTX_GPREG_X6),
722 SMC_GET_GP(handle, CTX_GPREG_X7));
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -0800723#endif
Max Shvetsov745889c2020-02-27 14:54:21 +0000724}
725
726/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000727 * Forward SMCs to the other security state.
728 ******************************************************************************/
729static uint64_t spmd_smc_forward(uint32_t smc_fid,
730 bool secure_origin,
731 uint64_t x1,
732 uint64_t x2,
733 uint64_t x3,
734 uint64_t x4,
735 void *cookie,
736 void *handle,
737 uint64_t flags)
738{
739 if (is_spmc_at_el3() && !secure_origin) {
740 return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4,
741 cookie, handle, flags);
742 }
743 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4,
744 handle);
745
746}
747
748/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100749 * Return FFA_ERROR with specified error code
Max Shvetsov745889c2020-02-27 14:54:21 +0000750 ******************************************************************************/
Raghu Krishnamurthy9d9584f2023-04-22 18:00:02 -0700751uint64_t spmd_ffa_error_return(void *handle, int error_code)
Max Shvetsov745889c2020-02-27 14:54:21 +0000752{
J-Alves64ff9932021-03-01 10:26:59 +0000753 SMC_RET8(handle, (uint32_t) FFA_ERROR,
754 FFA_TARGET_INFO_MBZ, (uint32_t)error_code,
J-Alves2672cde2020-05-07 18:42:25 +0100755 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
756 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100757}
758
Olivier Deprez33e44122020-04-16 17:54:27 +0200759/*******************************************************************************
760 * spmd_check_address_in_binary_image
761 ******************************************************************************/
762bool spmd_check_address_in_binary_image(uint64_t address)
763{
764 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
765
766 return ((address >= spmc_attrs.load_address) &&
767 (address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
768}
769
Olivier Deprezebc34772020-04-16 16:59:21 +0200770/******************************************************************************
771 * spmd_is_spmc_message
772 *****************************************************************************/
773static bool spmd_is_spmc_message(unsigned int ep)
774{
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000775 if (is_spmc_at_el3()) {
776 return false;
777 }
778
Olivier Deprezebc34772020-04-16 16:59:21 +0200779 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
780 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
781}
782
Olivier Deprez33e44122020-04-16 17:54:27 +0200783/******************************************************************************
784 * spmd_handle_spmc_message
785 *****************************************************************************/
Olivier Deprezc7631a52020-03-23 09:53:06 +0100786static int spmd_handle_spmc_message(unsigned long long msg,
787 unsigned long long parm1, unsigned long long parm2,
788 unsigned long long parm3, unsigned long long parm4)
Olivier Deprez33e44122020-04-16 17:54:27 +0200789{
790 VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__,
791 msg, parm1, parm2, parm3, parm4);
792
Olivier Deprez33e44122020-04-16 17:54:27 +0200793 return -EINVAL;
794}
795
Achin Gupta86f23532019-10-11 15:41:16 +0100796/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000797 * This function forwards FF-A SMCs to either the main SPMD handler or the
798 * SPMC at EL3, depending on the origin security state, if enabled.
799 ******************************************************************************/
800uint64_t spmd_ffa_smc_handler(uint32_t smc_fid,
801 uint64_t x1,
802 uint64_t x2,
803 uint64_t x3,
804 uint64_t x4,
805 void *cookie,
806 void *handle,
807 uint64_t flags)
808{
809 if (is_spmc_at_el3()) {
810 /*
811 * If we have an SPMC at EL3 allow handling of the SMC first.
812 * The SPMC will call back through to SPMD handler if required.
813 */
814 if (is_caller_secure(flags)) {
815 return spmc_smc_handler(smc_fid,
816 is_caller_secure(flags),
817 x1, x2, x3, x4, cookie,
818 handle, flags);
819 }
820 }
821 return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
822 handle, flags);
823}
824
825/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100826 * This function handles all SMCs in the range reserved for FFA. Each call is
Achin Gupta86f23532019-10-11 15:41:16 +0100827 * either forwarded to the other security state or handled by the SPM dispatcher
828 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200829uint64_t spmd_smc_handler(uint32_t smc_fid,
830 uint64_t x1,
831 uint64_t x2,
832 uint64_t x3,
833 uint64_t x4,
834 void *cookie,
835 void *handle,
Achin Gupta86f23532019-10-11 15:41:16 +0100836 uint64_t flags)
837{
Olivier Deprezeae45962021-01-19 15:06:47 +0100838 unsigned int linear_id = plat_my_core_pos();
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200839 spmd_spm_core_context_t *ctx = spmd_get_context();
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100840 bool secure_origin;
841 int32_t ret;
J-Alves4c95c702020-05-26 14:03:05 +0100842 uint32_t input_version;
Achin Gupta86f23532019-10-11 15:41:16 +0100843
844 /* Determine which security state this SMC originated from */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100845 secure_origin = is_caller_secure(flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100846
Scott Brandene5dcf982020-08-25 13:49:32 -0700847 VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64
848 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n",
849 linear_id, smc_fid, x1, x2, x3, x4,
850 SMC_GET_GP(handle, CTX_GPREG_X5),
851 SMC_GET_GP(handle, CTX_GPREG_X6),
852 SMC_GET_GP(handle, CTX_GPREG_X7));
Achin Gupta86f23532019-10-11 15:41:16 +0100853
Raghu Krishnamurthy43fda972023-04-22 11:28:38 -0700854 /*
855 * If there is an on-going info regs from EL3 SPMD LP, unconditionally
856 * return, we don't expect any other FF-A ABIs to be called between
857 * calls to FFA_PARTITION_INFO_GET_REGS.
858 */
859 if (is_spmd_logical_sp_info_regs_req_in_progress(ctx)) {
860 assert(secure_origin);
861 spmd_spm_core_sync_exit(0ULL);
862 }
863
Achin Gupta86f23532019-10-11 15:41:16 +0100864 switch (smc_fid) {
J-Alves2672cde2020-05-07 18:42:25 +0100865 case FFA_ERROR:
Achin Gupta86f23532019-10-11 15:41:16 +0100866 /*
867 * Check if this is the first invocation of this interface on
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200868 * this CPU. If so, then indicate that the SPM Core initialised
Achin Gupta86f23532019-10-11 15:41:16 +0100869 * unsuccessfully.
870 */
Olivier Deprez7c016332019-10-28 09:03:13 +0000871 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Achin Gupta86f23532019-10-11 15:41:16 +0100872 spmd_spm_core_sync_exit(x2);
Max Shvetsov745889c2020-02-27 14:54:21 +0000873 }
Achin Gupta86f23532019-10-11 15:41:16 +0100874
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -0800875 /*
876 * If there was an SPMD logical partition direct request on-going,
877 * return back to the SPMD logical partition so the error can be
878 * consumed.
879 */
880 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
881 assert(secure_origin);
882 spmd_spm_core_sync_exit(0ULL);
883 }
884
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100885 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000886 x1, x2, x3, x4, cookie,
887 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100888 break; /* not reached */
889
J-Alves2672cde2020-05-07 18:42:25 +0100890 case FFA_VERSION:
J-Alves4c95c702020-05-26 14:03:05 +0100891 input_version = (uint32_t)(0xFFFFFFFF & x1);
Achin Gupta86f23532019-10-11 15:41:16 +0100892 /*
J-Alves4c95c702020-05-26 14:03:05 +0100893 * If caller is secure and SPMC was initialized,
894 * return FFA_VERSION of SPMD.
895 * If caller is non secure and SPMC was initialized,
Marc Bonnici815d1012021-12-08 14:27:40 +0000896 * forward to the EL3 SPMC if enabled, otherwise return
897 * the SPMC version if implemented at a lower EL.
J-Alves4c95c702020-05-26 14:03:05 +0100898 * Sanity check to "input_version".
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000899 * If the EL3 SPMC is enabled, ignore the SPMC state as
900 * this is not used.
Achin Gupta86f23532019-10-11 15:41:16 +0100901 */
J-Alves4c95c702020-05-26 14:03:05 +0100902 if ((input_version & FFA_VERSION_BIT31_MASK) ||
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000903 (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) {
J-Alves4c95c702020-05-26 14:03:05 +0100904 ret = FFA_ERROR_NOT_SUPPORTED;
905 } else if (!secure_origin) {
Marc Bonnici815d1012021-12-08 14:27:40 +0000906 if (is_spmc_at_el3()) {
907 /*
908 * Forward the call directly to the EL3 SPMC, if
909 * enabled, as we don't need to wrap the call in
910 * a direct request.
911 */
912 return spmd_smc_forward(smc_fid, secure_origin,
913 x1, x2, x3, x4, cookie,
914 handle, flags);
915 }
916
Daniel Boulby9460a232021-12-09 11:20:13 +0000917 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
918 uint64_t rc;
919
920 if (spmc_attrs.major_version == 1 &&
921 spmc_attrs.minor_version == 0) {
922 ret = MAKE_FFA_VERSION(spmc_attrs.major_version,
923 spmc_attrs.minor_version);
924 SMC_RET8(handle, (uint32_t)ret,
925 FFA_TARGET_INFO_MBZ,
926 FFA_TARGET_INFO_MBZ,
927 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
928 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
929 FFA_PARAM_MBZ);
930 break;
931 }
932 /* Save non-secure system registers context */
933 cm_el1_sysregs_context_save(NON_SECURE);
934#if SPMD_SPM_AT_SEL2
935 cm_el2_sysregs_context_save(NON_SECURE);
936#endif
937
938 /*
939 * The incoming request has FFA_VERSION as X0 smc_fid
940 * and requested version in x1. Prepare a direct request
941 * from SPMD to SPMC with FFA_VERSION framework function
942 * identifier in X2 and requested version in X3.
943 */
944 spmd_build_spmc_message(gpregs,
945 SPMD_FWK_MSG_FFA_VERSION_REQ,
946 input_version);
947
948 rc = spmd_spm_core_sync_entry(ctx);
949
950 if ((rc != 0ULL) ||
951 (SMC_GET_GP(gpregs, CTX_GPREG_X0) !=
952 FFA_MSG_SEND_DIRECT_RESP_SMC32) ||
953 (SMC_GET_GP(gpregs, CTX_GPREG_X2) !=
Marc Bonnici25f4b542022-04-12 17:18:13 +0100954 (FFA_FWK_MSG_BIT |
Daniel Boulby9460a232021-12-09 11:20:13 +0000955 SPMD_FWK_MSG_FFA_VERSION_RESP))) {
956 ERROR("Failed to forward FFA_VERSION\n");
957 ret = FFA_ERROR_NOT_SUPPORTED;
958 } else {
959 ret = SMC_GET_GP(gpregs, CTX_GPREG_X3);
960 }
961
962 /*
963 * Return here after SPMC has handled FFA_VERSION.
964 * The returned SPMC version is held in X3.
965 * Forward this version in X0 to the non-secure caller.
966 */
967 return spmd_smc_forward(ret, true, FFA_PARAM_MBZ,
968 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000969 FFA_PARAM_MBZ, cookie, gpregs,
970 flags);
J-Alves4c95c702020-05-26 14:03:05 +0100971 } else {
J-Alves64ff9932021-03-01 10:26:59 +0000972 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
973 FFA_VERSION_MINOR);
J-Alves4c95c702020-05-26 14:03:05 +0100974 }
975
J-Alves64ff9932021-03-01 10:26:59 +0000976 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ,
977 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
978 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100979 break; /* not reached */
980
J-Alves2672cde2020-05-07 18:42:25 +0100981 case FFA_FEATURES:
Achin Gupta86f23532019-10-11 15:41:16 +0100982 /*
983 * This is an optional interface. Do the minimal checks and
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200984 * forward to SPM Core which will handle it if implemented.
Achin Gupta86f23532019-10-11 15:41:16 +0100985 */
986
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200987 /* Forward SMC from Normal world to the SPM Core */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100988 if (!secure_origin) {
989 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000990 x1, x2, x3, x4, cookie,
991 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100992 }
Max Shvetsov745889c2020-02-27 14:54:21 +0000993
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200994 /*
995 * Return success if call was from secure world i.e. all
J-Alves2672cde2020-05-07 18:42:25 +0100996 * FFA functions are supported. This is essentially a
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200997 * nop.
998 */
J-Alves2672cde2020-05-07 18:42:25 +0100999 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001000 SMC_GET_GP(handle, CTX_GPREG_X5),
1001 SMC_GET_GP(handle, CTX_GPREG_X6),
1002 SMC_GET_GP(handle, CTX_GPREG_X7));
1003
Achin Gupta86f23532019-10-11 15:41:16 +01001004 break; /* not reached */
1005
J-Alves2672cde2020-05-07 18:42:25 +01001006 case FFA_ID_GET:
Max Shvetsove79062e2020-03-12 15:16:40 +00001007 /*
J-Alves2672cde2020-05-07 18:42:25 +01001008 * Returns the ID of the calling FFA component.
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001009 */
Max Shvetsove79062e2020-03-12 15:16:40 +00001010 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +01001011 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1012 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
1013 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1014 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1015 FFA_PARAM_MBZ);
Max Shvetsove79062e2020-03-12 15:16:40 +00001016 }
1017
J-Alves2672cde2020-05-07 18:42:25 +01001018 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1019 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
1020 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1021 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1022 FFA_PARAM_MBZ);
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001023
Max Shvetsove79062e2020-03-12 15:16:40 +00001024 break; /* not reached */
1025
Olivier Deprezeae45962021-01-19 15:06:47 +01001026 case FFA_SECONDARY_EP_REGISTER_SMC64:
1027 if (secure_origin) {
1028 ret = spmd_pm_secondary_ep_register(x1);
1029
1030 if (ret < 0) {
1031 SMC_RET8(handle, FFA_ERROR_SMC64,
1032 FFA_TARGET_INFO_MBZ, ret,
1033 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1034 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1035 FFA_PARAM_MBZ);
1036 } else {
1037 SMC_RET8(handle, FFA_SUCCESS_SMC64,
1038 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ,
1039 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1040 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1041 FFA_PARAM_MBZ);
1042 }
1043 }
1044
1045 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1046 break; /* Not reached */
1047
Daniel Boulby27f35df2021-02-03 12:13:19 +00001048 case FFA_SPM_ID_GET:
1049 if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) {
1050 return spmd_ffa_error_return(handle,
1051 FFA_ERROR_NOT_SUPPORTED);
1052 }
1053 /*
1054 * Returns the ID of the SPMC or SPMD depending on the FF-A
1055 * instance where this function is invoked
1056 */
1057 if (!secure_origin) {
1058 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1059 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
1060 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1061 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1062 FFA_PARAM_MBZ);
1063 }
1064 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1065 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID,
1066 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1067 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1068 FFA_PARAM_MBZ);
1069
1070 break; /* not reached */
1071
Olivier Deprez33e44122020-04-16 17:54:27 +02001072 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
Shruti3d859672022-06-09 11:03:11 +01001073 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -08001074 /*
1075 * Regardless of secure_origin, SPMD logical partitions cannot
1076 * handle direct messages. They can only initiate direct
1077 * messages and consume direct responses or errors.
1078 */
1079 if (is_spmd_lp_id(ffa_endpoint_source(x1)) ||
1080 is_spmd_lp_id(ffa_endpoint_destination(x1))) {
1081 return spmd_ffa_error_return(handle,
1082 FFA_ERROR_INVALID_PARAMETER
1083 );
1084 }
1085
1086 /*
1087 * When there is an ongoing SPMD logical partition direct
1088 * request, there cannot be another direct request. Return
1089 * error in this case. Panic'ing is an option but that does
1090 * not provide the opportunity for caller to abort based on
1091 * error codes.
1092 */
1093 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
1094 assert(secure_origin);
1095 return spmd_ffa_error_return(handle,
1096 FFA_ERROR_DENIED);
1097 }
1098
Shruti3d859672022-06-09 11:03:11 +01001099 if (!secure_origin) {
1100 /* Validate source endpoint is non-secure for non-secure caller. */
1101 if (ffa_is_secure_world_id(ffa_endpoint_source(x1))) {
1102 return spmd_ffa_error_return(handle,
1103 FFA_ERROR_INVALID_PARAMETER);
1104 }
1105 }
Olivier Deprez33e44122020-04-16 17:54:27 +02001106 if (secure_origin && spmd_is_spmc_message(x1)) {
1107 ret = spmd_handle_spmc_message(x3, x4,
1108 SMC_GET_GP(handle, CTX_GPREG_X5),
1109 SMC_GET_GP(handle, CTX_GPREG_X6),
1110 SMC_GET_GP(handle, CTX_GPREG_X7));
1111
1112 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1113 FFA_TARGET_INFO_MBZ, ret,
1114 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1115 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1116 FFA_PARAM_MBZ);
1117 } else {
1118 /* Forward direct message to the other world */
1119 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001120 x1, x2, x3, x4, cookie,
1121 handle, flags);
Olivier Deprez33e44122020-04-16 17:54:27 +02001122 }
1123 break; /* Not reached */
1124
1125 case FFA_MSG_SEND_DIRECT_RESP_SMC32:
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -08001126 case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1127 if (secure_origin && (spmd_is_spmc_message(x1) ||
1128 is_spmd_logical_sp_dir_req_in_progress(ctx))) {
Olivier Depreza664c492020-08-05 11:27:42 +02001129 spmd_spm_core_sync_exit(0ULL);
Olivier Deprez33e44122020-04-16 17:54:27 +02001130 } else {
1131 /* Forward direct message to the other world */
1132 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001133 x1, x2, x3, x4, cookie,
1134 handle, flags);
Olivier Deprez33e44122020-04-16 17:54:27 +02001135 }
1136 break; /* Not reached */
1137
J-Alves2672cde2020-05-07 18:42:25 +01001138 case FFA_RX_RELEASE:
1139 case FFA_RXTX_MAP_SMC32:
1140 case FFA_RXTX_MAP_SMC64:
1141 case FFA_RXTX_UNMAP:
Ruari Phipps93dff702020-07-28 10:33:35 +01001142 case FFA_PARTITION_INFO_GET:
J-Alves2621cfd2021-03-11 17:46:47 +00001143#if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
1144 case FFA_NOTIFICATION_BITMAP_CREATE:
1145 case FFA_NOTIFICATION_BITMAP_DESTROY:
1146 case FFA_NOTIFICATION_BIND:
1147 case FFA_NOTIFICATION_UNBIND:
1148 case FFA_NOTIFICATION_SET:
1149 case FFA_NOTIFICATION_GET:
1150 case FFA_NOTIFICATION_INFO_GET:
1151 case FFA_NOTIFICATION_INFO_GET_SMC64:
Federico Recanatieecb4b02022-02-03 17:22:37 +01001152 case FFA_MSG_SEND2:
Federico Recanati5c7c5c42022-03-18 10:30:00 +01001153 case FFA_RX_ACQUIRE:
J-Alves2621cfd2021-03-11 17:46:47 +00001154#endif
Federico Recanatieecb4b02022-02-03 17:22:37 +01001155 case FFA_MSG_RUN:
Ruari Phipps93dff702020-07-28 10:33:35 +01001156 /*
Federico Recanatieecb4b02022-02-03 17:22:37 +01001157 * Above calls should be invoked only by the Normal world and
1158 * must not be forwarded from Secure world to Normal world.
Ruari Phipps93dff702020-07-28 10:33:35 +01001159 */
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001160 if (secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +01001161 return spmd_ffa_error_return(handle,
Ruari Phipps93dff702020-07-28 10:33:35 +01001162 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +01001163 }
1164
Boyan Karatotev87266002022-11-18 14:17:17 +00001165 /* Forward the call to the other world */
1166 /* fallthrough */
J-Alves2672cde2020-05-07 18:42:25 +01001167 case FFA_MSG_SEND:
J-Alves2672cde2020-05-07 18:42:25 +01001168 case FFA_MEM_DONATE_SMC32:
1169 case FFA_MEM_DONATE_SMC64:
1170 case FFA_MEM_LEND_SMC32:
1171 case FFA_MEM_LEND_SMC64:
1172 case FFA_MEM_SHARE_SMC32:
1173 case FFA_MEM_SHARE_SMC64:
1174 case FFA_MEM_RETRIEVE_REQ_SMC32:
1175 case FFA_MEM_RETRIEVE_REQ_SMC64:
1176 case FFA_MEM_RETRIEVE_RESP:
1177 case FFA_MEM_RELINQUISH:
1178 case FFA_MEM_RECLAIM:
Marc Bonnici9fa01e92021-09-23 09:44:14 +01001179 case FFA_MEM_FRAG_TX:
1180 case FFA_MEM_FRAG_RX:
J-Alves2672cde2020-05-07 18:42:25 +01001181 case FFA_SUCCESS_SMC32:
1182 case FFA_SUCCESS_SMC64:
Achin Gupta86f23532019-10-11 15:41:16 +01001183 /*
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -08001184 * If there is an ongoing direct request from an SPMD logical
1185 * partition, return an error.
Achin Gupta86f23532019-10-11 15:41:16 +01001186 */
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -08001187 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
1188 assert(secure_origin);
1189 return spmd_ffa_error_return(handle,
1190 FFA_ERROR_DENIED);
1191 }
Achin Gupta86f23532019-10-11 15:41:16 +01001192
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001193 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001194 x1, x2, x3, x4, cookie,
1195 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +01001196 break; /* not reached */
1197
J-Alves2672cde2020-05-07 18:42:25 +01001198 case FFA_MSG_WAIT:
Achin Gupta86f23532019-10-11 15:41:16 +01001199 /*
1200 * Check if this is the first invocation of this interface on
1201 * this CPU from the Secure world. If so, then indicate that the
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001202 * SPM Core initialised successfully.
Achin Gupta86f23532019-10-11 15:41:16 +01001203 */
Olivier Deprez7c016332019-10-28 09:03:13 +00001204 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Olivier Depreza664c492020-08-05 11:27:42 +02001205 spmd_spm_core_sync_exit(0ULL);
Achin Gupta86f23532019-10-11 15:41:16 +01001206 }
1207
Boyan Karatotev87266002022-11-18 14:17:17 +00001208 /* Forward the call to the other world */
1209 /* fallthrough */
Olivier Deprezae18caf2021-04-02 11:09:10 +02001210 case FFA_INTERRUPT:
J-Alves2672cde2020-05-07 18:42:25 +01001211 case FFA_MSG_YIELD:
Achin Gupta86f23532019-10-11 15:41:16 +01001212 /* This interface must be invoked only by the Secure world */
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001213 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +01001214 return spmd_ffa_error_return(handle,
1215 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +01001216 }
1217
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -08001218 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
1219 assert(secure_origin);
1220 return spmd_ffa_error_return(handle,
1221 FFA_ERROR_DENIED);
1222 }
1223
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001224 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001225 x1, x2, x3, x4, cookie,
1226 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +01001227 break; /* not reached */
1228
Olivier Depreza664c492020-08-05 11:27:42 +02001229 case FFA_NORMAL_WORLD_RESUME:
1230 if (secure_origin && ctx->secure_interrupt_ongoing) {
1231 spmd_spm_core_sync_exit(0ULL);
1232 } else {
1233 return spmd_ffa_error_return(handle, FFA_ERROR_DENIED);
1234 }
1235 break; /* Not reached */
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -08001236#if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
1237 case FFA_PARTITION_INFO_GET_REGS_SMC64:
1238 if (secure_origin) {
Raghu Krishnamurthy9d9584f2023-04-22 18:00:02 -07001239 return spmd_el3_populate_logical_partition_info(handle, x1,
1240 x2, x3);
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -08001241 }
Olivier Depreza664c492020-08-05 11:27:42 +02001242
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -08001243 /* Call only supported with SMCCC 1.2+ */
1244 if (MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION) < 0x10002) {
1245 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1246 }
1247
1248 return spmd_smc_forward(smc_fid, secure_origin,
1249 x1, x2, x3, x4, cookie,
1250 handle, flags);
1251 break; /* Not reached */
1252#endif
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -06001253 case FFA_EL3_INTR_HANDLE:
1254 if (secure_origin) {
1255 return spmd_handle_group0_intr_swd(handle);
1256 } else {
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -05001257 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -06001258 }
Achin Gupta86f23532019-10-11 15:41:16 +01001259 default:
1260 WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
J-Alves2672cde2020-05-07 18:42:25 +01001261 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +01001262 }
1263}