blob: 310610d14988be3136f35625e133e5485fedefc1 [file] [log] [blame]
Achin Gupta86f23532019-10-11 15:41:16 +01001/*
Rakshit Goyalcf939ac2025-02-06 11:58:53 +00002 * Copyright (c) 2020-2025, Arm Limited and Contributors. All rights reserved.
Achin Gupta86f23532019-10-11 15:41:16 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
Scott Brandene5dcf982020-08-25 13:49:32 -07009#include <inttypes.h>
10#include <stdint.h>
Achin Gupta86f23532019-10-11 15:41:16 +010011#include <string.h>
12
13#include <arch_helpers.h>
Olivier Deprez2bae35f2020-04-16 13:39:06 +020014#include <arch/aarch64/arch_features.h>
Achin Gupta86f23532019-10-11 15:41:16 +010015#include <bl31/bl31.h>
Olivier Depreza664c492020-08-05 11:27:42 +020016#include <bl31/interrupt_mgmt.h>
Achin Gupta86f23532019-10-11 15:41:16 +010017#include <common/debug.h>
18#include <common/runtime_svc.h>
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +000019#include <common/tbbr/tbbr_img_def.h>
Achin Gupta86f23532019-10-11 15:41:16 +010020#include <lib/el3_runtime/context_mgmt.h>
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +000021#include <lib/fconf/fconf.h>
22#include <lib/fconf/fconf_dyn_cfg_getter.h>
Achin Gupta86f23532019-10-11 15:41:16 +010023#include <lib/smccc.h>
24#include <lib/spinlock.h>
25#include <lib/utils.h>
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +000026#include <lib/xlat_tables/xlat_tables_v2.h>
Achin Gupta86f23532019-10-11 15:41:16 +010027#include <plat/common/common_def.h>
28#include <plat/common/platform.h>
29#include <platform_def.h>
Raghu Krishnamurthy7f046c12023-02-25 13:26:10 -080030#include <services/el3_spmd_logical_sp.h>
J-Alves2672cde2020-05-07 18:42:25 +010031#include <services/ffa_svc.h>
Marc Bonnici1c33cc32021-11-29 17:57:03 +000032#include <services/spmc_svc.h>
Achin Gupta86f23532019-10-11 15:41:16 +010033#include <services/spmd_svc.h>
34#include <smccc_helpers.h>
35#include "spmd_private.h"
36
37/*******************************************************************************
38 * SPM Core context information.
39 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020040static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
Achin Gupta86f23532019-10-11 15:41:16 +010041
42/*******************************************************************************
Marc Bonnici1c33cc32021-11-29 17:57:03 +000043 * SPM Core attribute information is read from its manifest if the SPMC is not
44 * at EL3. Else, it is populated from the SPMC directly.
Achin Gupta86f23532019-10-11 15:41:16 +010045 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020046static spmc_manifest_attribute_t spmc_attrs;
Achin Gupta86f23532019-10-11 15:41:16 +010047
48/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000049 * SPM Core entry point information. Discovered on the primary core and reused
50 * on secondary cores.
51 ******************************************************************************/
52static entry_point_info_t *spmc_ep_info;
53
54/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +020055 * SPM Core context on current CPU get helper.
56 ******************************************************************************/
57spmd_spm_core_context_t *spmd_get_context(void)
58{
Olivier Deprez13a4a072024-06-07 08:51:20 +020059 return &spm_core_context[plat_my_core_pos()];
Olivier Deprez2bae35f2020-04-16 13:39:06 +020060}
61
62/*******************************************************************************
Olivier Deprezc7631a52020-03-23 09:53:06 +010063 * SPM Core ID getter.
64 ******************************************************************************/
65uint16_t spmd_spmc_id_get(void)
66{
67 return spmc_attrs.spmc_id;
68}
69
70/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000071 * Static function declaration.
72 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020073static int32_t spmd_init(void);
Olivier Deprez69ca84a2020-02-07 15:44:43 +010074static int spmd_spmc_init(void *pm_addr);
Raghu Krishnamurthy9d9584f2023-04-22 18:00:02 -070075
Olivier Deprez2bae35f2020-04-16 13:39:06 +020076static uint64_t spmd_smc_forward(uint32_t smc_fid,
77 bool secure_origin,
78 uint64_t x1,
79 uint64_t x2,
80 uint64_t x3,
81 uint64_t x4,
Marc Bonnicida2c9e12021-11-29 18:02:45 +000082 void *cookie,
83 void *handle,
84 uint64_t flags);
Max Shvetsov745889c2020-02-27 14:54:21 +000085
Daniel Boulby9460a232021-12-09 11:20:13 +000086/******************************************************************************
87 * Builds an SPMD to SPMC direct message request.
88 *****************************************************************************/
89void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func,
90 unsigned long long message)
91{
92 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
93 write_ctx_reg(gpregs, CTX_GPREG_X1,
94 (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
95 spmd_spmc_id_get());
96 write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func);
97 write_ctx_reg(gpregs, CTX_GPREG_X3, message);
Olivier Deprez4911eb82023-07-10 11:04:30 +020098
99 /* Zero out x4-x7 for the direct request emitted towards the SPMC. */
100 write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
101 write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
102 write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
103 write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
Daniel Boulby9460a232021-12-09 11:20:13 +0000104}
105
106
Max Shvetsov745889c2020-02-27 14:54:21 +0000107/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200108 * This function takes an SPMC context pointer and performs a synchronous
109 * SPMC entry.
Achin Gupta86f23532019-10-11 15:41:16 +0100110 ******************************************************************************/
111uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
112{
113 uint64_t rc;
114
115 assert(spmc_ctx != NULL);
116
117 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
118
119 /* Restore the context assigned above */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000120#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000121 cm_el2_sysregs_context_restore(SECURE);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200122#else
123 cm_el1_sysregs_context_restore(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000124#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100125 cm_set_next_eret_context(SECURE);
126
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000127 /* Enter SPMC */
Achin Gupta86f23532019-10-11 15:41:16 +0100128 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
129
130 /* Save secure state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000131#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000132 cm_el2_sysregs_context_save(SECURE);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200133#else
134 cm_el1_sysregs_context_save(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000135#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100136
137 return rc;
138}
139
140/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200141 * This function returns to the place where spmd_spm_core_sync_entry() was
Achin Gupta86f23532019-10-11 15:41:16 +0100142 * called originally.
143 ******************************************************************************/
144__dead2 void spmd_spm_core_sync_exit(uint64_t rc)
145{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200146 spmd_spm_core_context_t *ctx = spmd_get_context();
Achin Gupta86f23532019-10-11 15:41:16 +0100147
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200148 /* Get current CPU context from SPMC context */
Achin Gupta86f23532019-10-11 15:41:16 +0100149 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
150
151 /*
152 * The SPMD must have initiated the original request through a
153 * synchronous entry into SPMC. Jump back to the original C runtime
154 * context with the value of rc in x0;
155 */
156 spmd_spm_core_exit(ctx->c_rt_ctx, rc);
157
158 panic();
159}
160
161/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200162 * Jump to the SPM Core for the first time.
Achin Gupta86f23532019-10-11 15:41:16 +0100163 ******************************************************************************/
164static int32_t spmd_init(void)
165{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200166 spmd_spm_core_context_t *ctx = spmd_get_context();
167 uint64_t rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100168
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200169 VERBOSE("SPM Core init start.\n");
Olivier Deprez7c016332019-10-28 09:03:13 +0000170
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200171 /* Primary boot core enters the SPMC for initialization. */
172 ctx->state = SPMC_STATE_ON_PENDING;
Achin Gupta86f23532019-10-11 15:41:16 +0100173
174 rc = spmd_spm_core_sync_entry(ctx);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200175 if (rc != 0ULL) {
Scott Brandene5dcf982020-08-25 13:49:32 -0700176 ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200177 return 0;
Achin Gupta86f23532019-10-11 15:41:16 +0100178 }
179
Olivier Deprez7c016332019-10-28 09:03:13 +0000180 ctx->state = SPMC_STATE_ON;
181
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200182 VERBOSE("SPM Core init end.\n");
Achin Gupta86f23532019-10-11 15:41:16 +0100183
Raghu Krishnamurthy7f046c12023-02-25 13:26:10 -0800184 spmd_logical_sp_set_spmc_initialized();
185 rc = spmd_logical_sp_init();
186 if (rc != 0) {
187 WARN("SPMD Logical partitions failed init.\n");
188 }
189
Achin Gupta86f23532019-10-11 15:41:16 +0100190 return 1;
191}
192
193/*******************************************************************************
Olivier Depreza664c492020-08-05 11:27:42 +0200194 * spmd_secure_interrupt_handler
195 * Enter the SPMC for further handling of the secure interrupt by the SPMC
196 * itself or a Secure Partition.
197 ******************************************************************************/
198static uint64_t spmd_secure_interrupt_handler(uint32_t id,
199 uint32_t flags,
200 void *handle,
201 void *cookie)
202{
203 spmd_spm_core_context_t *ctx = spmd_get_context();
204 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
Olivier Depreza664c492020-08-05 11:27:42 +0200205 int64_t rc;
206
207 /* Sanity check the security state when the exception was generated */
208 assert(get_interrupt_src_ss(flags) == NON_SECURE);
209
210 /* Sanity check the pointer to this cpu's context */
211 assert(handle == cm_get_context(NON_SECURE));
212
213 /* Save the non-secure context before entering SPMC */
Olivier Depreza664c492020-08-05 11:27:42 +0200214#if SPMD_SPM_AT_SEL2
215 cm_el2_sysregs_context_save(NON_SECURE);
Madhukar Pappireddyd3f32072024-01-29 16:43:56 -0600216#else
217 cm_el1_sysregs_context_save(NON_SECURE);
Madhukar Pappireddy7503cdc2024-04-25 23:01:00 -0500218
219#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
220 /*
221 * The hint bit denoting absence of SVE live state is effectively false
222 * in this scenario where execution was trapped to EL3 due to FIQ.
223 */
224 simd_ctx_save(NON_SECURE, false);
Rakshit Goyalcf939ac2025-02-06 11:58:53 +0000225 simd_ctx_restore(SECURE);
Madhukar Pappireddy7503cdc2024-04-25 23:01:00 -0500226#endif
Olivier Depreza664c492020-08-05 11:27:42 +0200227#endif
228
229 /* Convey the event to the SPMC through the FFA_INTERRUPT interface. */
230 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT);
231 write_ctx_reg(gpregs, CTX_GPREG_X1, 0);
232 write_ctx_reg(gpregs, CTX_GPREG_X2, 0);
233 write_ctx_reg(gpregs, CTX_GPREG_X3, 0);
234 write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
235 write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
236 write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
237 write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
238
239 /* Mark current core as handling a secure interrupt. */
240 ctx->secure_interrupt_ongoing = true;
241
242 rc = spmd_spm_core_sync_entry(ctx);
Madhukar Pappireddy7503cdc2024-04-25 23:01:00 -0500243
Olivier Depreza664c492020-08-05 11:27:42 +0200244 if (rc != 0ULL) {
Olivier Deprez5f875b82024-06-07 10:22:50 +0200245 ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, plat_my_core_pos());
Olivier Depreza664c492020-08-05 11:27:42 +0200246 }
247
248 ctx->secure_interrupt_ongoing = false;
249
Olivier Depreza664c492020-08-05 11:27:42 +0200250#if SPMD_SPM_AT_SEL2
251 cm_el2_sysregs_context_restore(NON_SECURE);
Madhukar Pappireddyd3f32072024-01-29 16:43:56 -0600252#else
253 cm_el1_sysregs_context_restore(NON_SECURE);
Madhukar Pappireddy7503cdc2024-04-25 23:01:00 -0500254
255#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
Rakshit Goyalcf939ac2025-02-06 11:58:53 +0000256 simd_ctx_save(SECURE, false);
Madhukar Pappireddy7503cdc2024-04-25 23:01:00 -0500257 simd_ctx_restore(NON_SECURE);
258#endif
Olivier Depreza664c492020-08-05 11:27:42 +0200259#endif
260 cm_set_next_eret_context(NON_SECURE);
261
262 SMC_RET0(&ctx->cpu_ctx);
263}
264
Olivier Deprez35bbcf22023-06-08 18:23:26 +0200265#if (EL3_EXCEPTION_HANDLING == 0)
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600266/*******************************************************************************
267 * spmd_group0_interrupt_handler_nwd
268 * Group0 secure interrupt in the normal world are trapped to EL3. Delegate the
269 * handling of the interrupt to the platform handler, and return only upon
270 * successfully handling the Group0 interrupt.
271 ******************************************************************************/
272static uint64_t spmd_group0_interrupt_handler_nwd(uint32_t id,
273 uint32_t flags,
274 void *handle,
275 void *cookie)
276{
277 uint32_t intid;
278
279 /* Sanity check the security state when the exception was generated. */
280 assert(get_interrupt_src_ss(flags) == NON_SECURE);
281
282 /* Sanity check the pointer to this cpu's context. */
283 assert(handle == cm_get_context(NON_SECURE));
284
285 assert(id == INTR_ID_UNAVAILABLE);
286
287 assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3);
288
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -0500289 intid = plat_ic_acknowledge_interrupt();
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600290
291 if (plat_spmd_handle_group0_interrupt(intid) < 0) {
292 ERROR("Group0 interrupt %u not handled\n", intid);
293 panic();
294 }
295
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -0500296 /* Deactivate the corresponding Group0 interrupt. */
297 plat_ic_end_of_interrupt(intid);
298
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600299 return 0U;
300}
Olivier Deprez35bbcf22023-06-08 18:23:26 +0200301#endif
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600302
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -0600303/*******************************************************************************
304 * spmd_handle_group0_intr_swd
305 * SPMC delegates handling of Group0 secure interrupt to EL3 firmware using
306 * FFA_EL3_INTR_HANDLE SMC call. Further, SPMD delegates the handling of the
307 * interrupt to the platform handler, and returns only upon successfully
308 * handling the Group0 interrupt.
309 ******************************************************************************/
310static uint64_t spmd_handle_group0_intr_swd(void *handle)
311{
312 uint32_t intid;
313
314 /* Sanity check the pointer to this cpu's context */
315 assert(handle == cm_get_context(SECURE));
316
317 assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3);
318
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -0500319 intid = plat_ic_acknowledge_interrupt();
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -0600320
321 /*
322 * TODO: Currently due to a limitation in SPMD implementation, the
323 * platform handler is expected to not delegate handling to NWd while
324 * processing Group0 secure interrupt.
325 */
326 if (plat_spmd_handle_group0_interrupt(intid) < 0) {
327 /* Group0 interrupt was not handled by the platform. */
328 ERROR("Group0 interrupt %u not handled\n", intid);
329 panic();
330 }
331
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -0500332 /* Deactivate the corresponding Group0 interrupt. */
333 plat_ic_end_of_interrupt(intid);
334
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -0600335 /* Return success. */
336 SMC_RET8(handle, FFA_SUCCESS_SMC32, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
337 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
338 FFA_PARAM_MBZ);
339}
340
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +0000341#if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
342static int spmd_dynamic_map_mem(uintptr_t base_addr, size_t size,
343 unsigned int attr, uintptr_t *align_addr,
344 size_t *align_size)
345{
346 uintptr_t base_addr_align;
347 size_t mapped_size_align;
348 int rc;
349
350 /* Page aligned address and size if necessary */
351 base_addr_align = page_align(base_addr, DOWN);
352 mapped_size_align = page_align(size, UP);
353
354 if ((base_addr != base_addr_align) &&
355 (size == mapped_size_align)) {
356 mapped_size_align += PAGE_SIZE;
357 }
358
359 /*
360 * Map dynamically given region with its aligned base address and
361 * size
362 */
363 rc = mmap_add_dynamic_region((unsigned long long)base_addr_align,
364 base_addr_align,
365 mapped_size_align,
366 attr);
367 if (rc == 0) {
368 *align_addr = base_addr_align;
369 *align_size = mapped_size_align;
370 }
371
372 return rc;
373}
374
375static void spmd_do_sec_cpy(uintptr_t root_base_addr, uintptr_t sec_base_addr,
376 size_t size)
377{
378 uintptr_t root_base_addr_align, sec_base_addr_align;
379 size_t root_mapped_size_align, sec_mapped_size_align;
380 int rc;
381
382 assert(root_base_addr != 0UL);
383 assert(sec_base_addr != 0UL);
384 assert(size != 0UL);
385
386 /* Map the memory with required attributes */
387 rc = spmd_dynamic_map_mem(root_base_addr, size, MT_RO_DATA | MT_ROOT,
388 &root_base_addr_align,
389 &root_mapped_size_align);
390 if (rc != 0) {
391 ERROR("%s %s %lu (%d)\n", "Error while mapping", "root region",
392 root_base_addr, rc);
393 panic();
394 }
395
396 rc = spmd_dynamic_map_mem(sec_base_addr, size, MT_RW_DATA | MT_SECURE,
397 &sec_base_addr_align, &sec_mapped_size_align);
398 if (rc != 0) {
399 ERROR("%s %s %lu (%d)\n", "Error while mapping",
400 "secure region", sec_base_addr, rc);
401 panic();
402 }
403
404 /* Do copy operation */
405 (void)memcpy((void *)sec_base_addr, (void *)root_base_addr, size);
406
407 /* Unmap root memory region */
408 rc = mmap_remove_dynamic_region(root_base_addr_align,
409 root_mapped_size_align);
410 if (rc != 0) {
411 ERROR("%s %s %lu (%d)\n", "Error while unmapping",
412 "root region", root_base_addr_align, rc);
413 panic();
414 }
415
416 /* Unmap secure memory region */
417 rc = mmap_remove_dynamic_region(sec_base_addr_align,
418 sec_mapped_size_align);
419 if (rc != 0) {
420 ERROR("%s %s %lu (%d)\n", "Error while unmapping",
421 "secure region", sec_base_addr_align, rc);
422 panic();
423 }
424}
425#endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
426
Olivier Depreza664c492020-08-05 11:27:42 +0200427/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200428 * Loads SPMC manifest and inits SPMC.
Achin Gupta86f23532019-10-11 15:41:16 +0100429 ******************************************************************************/
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100430static int spmd_spmc_init(void *pm_addr)
Achin Gupta86f23532019-10-11 15:41:16 +0100431{
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200432 cpu_context_t *cpu_ctx;
433 unsigned int core_id;
Olivier Depreza664c492020-08-05 11:27:42 +0200434 uint32_t ep_attr, flags;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200435 int rc;
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +0000436 const struct dyn_cfg_dtb_info_t *image_info __unused;
Achin Gupta86f23532019-10-11 15:41:16 +0100437
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200438 /* Load the SPM Core manifest */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100439 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
Max Shvetsov745889c2020-02-27 14:54:21 +0000440 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200441 WARN("No or invalid SPM Core manifest image provided by BL2\n");
442 return rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100443 }
444
445 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200446 * Ensure that the SPM Core version is compatible with the SPM
447 * Dispatcher version.
Achin Gupta86f23532019-10-11 15:41:16 +0100448 */
J-Alves2672cde2020-05-07 18:42:25 +0100449 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
450 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
451 WARN("Unsupported FFA version (%u.%u)\n",
Achin Gupta86f23532019-10-11 15:41:16 +0100452 spmc_attrs.major_version, spmc_attrs.minor_version);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200453 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100454 }
455
J-Alves2672cde2020-05-07 18:42:25 +0100456 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
Achin Gupta86f23532019-10-11 15:41:16 +0100457 spmc_attrs.minor_version);
458
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200459 VERBOSE("SPM Core run time EL%x.\n",
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000460 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
Achin Gupta86f23532019-10-11 15:41:16 +0100461
Max Shvetsove79062e2020-03-12 15:16:40 +0000462 /* Validate the SPMC ID, Ensure high bit is set */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200463 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
464 SPMC_SECURE_ID_MASK) == 0U) {
465 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
466 return -EINVAL;
Max Shvetsove79062e2020-03-12 15:16:40 +0000467 }
468
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200469 /* Validate the SPM Core execution state */
Achin Gupta86f23532019-10-11 15:41:16 +0100470 if ((spmc_attrs.exec_state != MODE_RW_64) &&
471 (spmc_attrs.exec_state != MODE_RW_32)) {
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100472 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
Achin Gupta86f23532019-10-11 15:41:16 +0100473 spmc_attrs.exec_state);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200474 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100475 }
476
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100477 VERBOSE("%s%x.\n", "SPM Core execution state 0x",
478 spmc_attrs.exec_state);
Achin Gupta86f23532019-10-11 15:41:16 +0100479
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000480#if SPMD_SPM_AT_SEL2
481 /* Ensure manifest has not requested AArch32 state in S-EL2 */
482 if (spmc_attrs.exec_state == MODE_RW_32) {
483 WARN("AArch32 state at S-EL2 is not supported.\n");
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200484 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100485 }
486
487 /*
488 * Check if S-EL2 is supported on this system if S-EL2
489 * is required for SPM
490 */
Andre Przywara6dd2d062023-02-22 16:53:50 +0000491 if (!is_feat_sel2_supported()) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200492 WARN("SPM Core run time S-EL2 is not supported.\n");
493 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100494 }
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000495#endif /* SPMD_SPM_AT_SEL2 */
Achin Gupta86f23532019-10-11 15:41:16 +0100496
497 /* Initialise an entrypoint to set up the CPU context */
498 ep_attr = SECURE | EP_ST_ENABLE;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200499 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
Achin Gupta86f23532019-10-11 15:41:16 +0100500 ep_attr |= EP_EE_BIG;
Max Shvetsov745889c2020-02-27 14:54:21 +0000501 }
502
Achin Gupta86f23532019-10-11 15:41:16 +0100503 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
Achin Gupta86f23532019-10-11 15:41:16 +0100504
505 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200506 * Populate SPSR for SPM Core based upon validated parameters from the
507 * manifest.
Achin Gupta86f23532019-10-11 15:41:16 +0100508 */
509 if (spmc_attrs.exec_state == MODE_RW_32) {
510 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
511 SPSR_E_LITTLE,
512 DAIF_FIQ_BIT |
513 DAIF_IRQ_BIT |
514 DAIF_ABT_BIT);
515 } else {
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000516
517#if SPMD_SPM_AT_SEL2
518 static const uint32_t runtime_el = MODE_EL2;
519#else
520 static const uint32_t runtime_el = MODE_EL1;
521#endif
522 spmc_ep_info->spsr = SPSR_64(runtime_el,
Achin Gupta86f23532019-10-11 15:41:16 +0100523 MODE_SP_ELX,
524 DISABLE_ALL_EXCEPTIONS);
525 }
526
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +0000527#if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
528 image_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TOS_FW_CONFIG_ID);
529 assert(image_info != NULL);
530
531 if ((image_info->config_addr == 0UL) ||
532 (image_info->secondary_config_addr == 0UL) ||
533 (image_info->config_max_size == 0UL)) {
534 return -EINVAL;
535 }
536
537 /* Copy manifest from root->secure region */
538 spmd_do_sec_cpy(image_info->config_addr,
539 image_info->secondary_config_addr,
540 image_info->config_max_size);
541
542 /* Update ep info of BL32 */
543 assert(spmc_ep_info != NULL);
544 spmc_ep_info->args.arg0 = image_info->secondary_config_addr;
545#endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
546
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200547 /* Set an initial SPMC context state for all cores. */
548 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
549 spm_core_context[core_id].state = SPMC_STATE_OFF;
Max Shvetsov745889c2020-02-27 14:54:21 +0000550
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200551 /* Setup an initial cpu context for the SPMC. */
552 cpu_ctx = &spm_core_context[core_id].cpu_ctx;
553 cm_setup_context(cpu_ctx, spmc_ep_info);
Achin Gupta86f23532019-10-11 15:41:16 +0100554
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200555 /*
556 * Pass the core linear ID to the SPMC through x4.
557 * (TF-A implementation defined behavior helping
558 * a legacy TOS migration to adopt FF-A).
559 */
560 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id);
561 }
Achin Gupta86f23532019-10-11 15:41:16 +0100562
Olivier Deprez9afca122019-10-28 09:15:52 +0000563 /* Register power management hooks with PSCI */
564 psci_register_spd_pm_hook(&spmd_pm);
565
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200566 /* Register init function for deferred init. */
Achin Gupta86f23532019-10-11 15:41:16 +0100567 bl31_register_bl32_init(&spmd_init);
568
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200569 INFO("SPM Core setup done.\n");
570
Olivier Depreza664c492020-08-05 11:27:42 +0200571 /*
572 * Register an interrupt handler routing secure interrupts to SPMD
573 * while the NWd is running.
574 */
575 flags = 0;
576 set_interrupt_rm_flag(flags, NON_SECURE);
577 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
578 spmd_secure_interrupt_handler,
579 flags);
580 if (rc != 0) {
581 panic();
582 }
583
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600584 /*
Olivier Deprez35bbcf22023-06-08 18:23:26 +0200585 * Permit configurations where the SPM resides at S-EL1/2 and upon a
586 * Group0 interrupt triggering while the normal world runs, the
587 * interrupt is routed either through the EHF or directly to the SPMD:
588 *
589 * EL3_EXCEPTION_HANDLING=0: the Group0 interrupt is routed to the SPMD
590 * for handling by spmd_group0_interrupt_handler_nwd.
591 *
592 * EL3_EXCEPTION_HANDLING=1: the Group0 interrupt is routed to the EHF.
593 *
594 */
595#if (EL3_EXCEPTION_HANDLING == 0)
596 /*
Madhukar Pappireddy89e84562024-03-26 09:21:25 -0500597 * If EL3 interrupts are supported by the platform, register an
598 * interrupt handler routing Group0 interrupts to SPMD while the NWd is
599 * running.
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600600 */
Madhukar Pappireddy89e84562024-03-26 09:21:25 -0500601 if (plat_ic_has_interrupt_type(INTR_TYPE_EL3)) {
602 rc = register_interrupt_type_handler(INTR_TYPE_EL3,
603 spmd_group0_interrupt_handler_nwd,
604 flags);
605 if (rc != 0) {
606 panic();
607 }
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600608 }
Olivier Deprez35bbcf22023-06-08 18:23:26 +0200609#endif
610
Achin Gupta86f23532019-10-11 15:41:16 +0100611 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000612}
Achin Gupta86f23532019-10-11 15:41:16 +0100613
Max Shvetsov745889c2020-02-27 14:54:21 +0000614/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200615 * Initialize context of SPM Core.
Max Shvetsov745889c2020-02-27 14:54:21 +0000616 ******************************************************************************/
617int spmd_setup(void)
618{
619 int rc;
Marc Bonnici1c33cc32021-11-29 17:57:03 +0000620 void *spmc_manifest;
621
622 /*
623 * If the SPMC is at EL3, then just initialise it directly. The
624 * shenanigans of when it is at a lower EL are not needed.
625 */
626 if (is_spmc_at_el3()) {
627 /* Allow the SPMC to populate its attributes directly. */
628 spmc_populate_attrs(&spmc_attrs);
629
630 rc = spmc_setup();
631 if (rc != 0) {
Olivier Deprez3d203f42022-11-16 16:46:23 +0100632 WARN("SPMC initialisation failed 0x%x.\n", rc);
Marc Bonnici1c33cc32021-11-29 17:57:03 +0000633 }
Olivier Deprez3d203f42022-11-16 16:46:23 +0100634 return 0;
Marc Bonnici1c33cc32021-11-29 17:57:03 +0000635 }
Achin Gupta86f23532019-10-11 15:41:16 +0100636
Max Shvetsov745889c2020-02-27 14:54:21 +0000637 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200638 if (spmc_ep_info == NULL) {
639 WARN("No SPM Core image provided by BL2 boot loader.\n");
Olivier Deprez3d203f42022-11-16 16:46:23 +0100640 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000641 }
642
643 /* Under no circumstances will this parameter be 0 */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200644 assert(spmc_ep_info->pc != 0ULL);
Max Shvetsov745889c2020-02-27 14:54:21 +0000645
646 /*
647 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200648 * be used as a manifest for the SPM Core at the next lower EL/mode.
Max Shvetsov745889c2020-02-27 14:54:21 +0000649 */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100650 spmc_manifest = (void *)spmc_ep_info->args.arg0;
651 if (spmc_manifest == NULL) {
Olivier Deprez3d203f42022-11-16 16:46:23 +0100652 WARN("Invalid or absent SPM Core manifest.\n");
653 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000654 }
655
656 /* Load manifest, init SPMC */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100657 rc = spmd_spmc_init(spmc_manifest);
Max Shvetsov745889c2020-02-27 14:54:21 +0000658 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200659 WARN("Booting device without SPM initialization.\n");
Max Shvetsov745889c2020-02-27 14:54:21 +0000660 }
661
Olivier Deprez3d203f42022-11-16 16:46:23 +0100662 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000663}
664
665/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000666 * Forward FF-A SMCs to the other security state.
Max Shvetsov745889c2020-02-27 14:54:21 +0000667 ******************************************************************************/
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000668uint64_t spmd_smc_switch_state(uint32_t smc_fid,
669 bool secure_origin,
670 uint64_t x1,
671 uint64_t x2,
672 uint64_t x3,
673 uint64_t x4,
Olivier Deprezdce23c02022-10-31 12:38:17 +0100674 void *handle,
675 uint64_t flags)
Max Shvetsov745889c2020-02-27 14:54:21 +0000676{
Olivier Deprezebc34772020-04-16 16:59:21 +0200677 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
678 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
Olivier Deprezaed30312024-06-07 09:24:43 +0200679 void *ctx_out;
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100680
Olivier Deprezdce23c02022-10-31 12:38:17 +0100681#if SPMD_SPM_AT_SEL2
682 if ((secure_state_out == SECURE) && (is_sve_hint_set(flags) == true)) {
683 /*
684 * Set the SVE hint bit in x0 and pass to the lower secure EL,
685 * if it was set by the caller.
686 */
687 smc_fid |= (FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT);
688 }
689#endif
690
Max Shvetsov745889c2020-02-27 14:54:21 +0000691 /* Save incoming security state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000692#if SPMD_SPM_AT_SEL2
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100693 cm_el2_sysregs_context_save(secure_state_in);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200694#else
695 cm_el1_sysregs_context_save(secure_state_in);
Madhukar Pappireddy7503cdc2024-04-25 23:01:00 -0500696#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
697 /* Forward the hint bit denoting the absence of SVE live state. */
698 simd_ctx_save(secure_state_in, (!secure_origin && (is_sve_hint_set(flags) == true)));
699#endif
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000700#endif
Max Shvetsov745889c2020-02-27 14:54:21 +0000701
702 /* Restore outgoing security state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000703#if SPMD_SPM_AT_SEL2
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100704 cm_el2_sysregs_context_restore(secure_state_out);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200705#else
706 cm_el1_sysregs_context_restore(secure_state_out);
Madhukar Pappireddy7503cdc2024-04-25 23:01:00 -0500707#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
708 simd_ctx_restore(secure_state_out);
709#endif
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000710#endif
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100711 cm_set_next_eret_context(secure_state_out);
Max Shvetsov745889c2020-02-27 14:54:21 +0000712
Olivier Deprezaed30312024-06-07 09:24:43 +0200713 ctx_out = cm_get_context(secure_state_out);
Andrei Homescu2b663692024-12-13 09:27:57 +0000714 if (smc_fid == FFA_NORMAL_WORLD_RESUME) {
715 SMC_RET0(ctx_out);
716 }
717
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -0800718#if SPMD_SPM_AT_SEL2
719 /*
720 * If SPMC is at SEL2, save additional registers x8-x17, which may
721 * be used in FF-A calls such as FFA_PARTITION_INFO_GET_REGS.
722 * Note that technically, all SPMCs can support this, but this code is
723 * under ifdef to minimize breakage in case other SPMCs do not save
724 * and restore x8-x17.
725 * We also need to pass through these registers since not all FF-A ABIs
726 * modify x8-x17, in which case, SMCCC requires that these registers be
727 * preserved, so the SPMD passes through these registers and expects the
728 * SPMC to save and restore (potentially also modify) them.
729 */
Olivier Deprezaed30312024-06-07 09:24:43 +0200730 SMC_RET18(ctx_out, smc_fid, x1, x2, x3, x4,
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -0800731 SMC_GET_GP(handle, CTX_GPREG_X5),
732 SMC_GET_GP(handle, CTX_GPREG_X6),
733 SMC_GET_GP(handle, CTX_GPREG_X7),
734 SMC_GET_GP(handle, CTX_GPREG_X8),
735 SMC_GET_GP(handle, CTX_GPREG_X9),
736 SMC_GET_GP(handle, CTX_GPREG_X10),
737 SMC_GET_GP(handle, CTX_GPREG_X11),
738 SMC_GET_GP(handle, CTX_GPREG_X12),
739 SMC_GET_GP(handle, CTX_GPREG_X13),
740 SMC_GET_GP(handle, CTX_GPREG_X14),
741 SMC_GET_GP(handle, CTX_GPREG_X15),
742 SMC_GET_GP(handle, CTX_GPREG_X16),
743 SMC_GET_GP(handle, CTX_GPREG_X17)
744 );
745
746#else
Olivier Deprezaed30312024-06-07 09:24:43 +0200747 SMC_RET8(ctx_out, smc_fid, x1, x2, x3, x4,
Max Shvetsov745889c2020-02-27 14:54:21 +0000748 SMC_GET_GP(handle, CTX_GPREG_X5),
749 SMC_GET_GP(handle, CTX_GPREG_X6),
750 SMC_GET_GP(handle, CTX_GPREG_X7));
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -0800751#endif
Max Shvetsov745889c2020-02-27 14:54:21 +0000752}
753
754/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000755 * Forward SMCs to the other security state.
756 ******************************************************************************/
757static uint64_t spmd_smc_forward(uint32_t smc_fid,
758 bool secure_origin,
759 uint64_t x1,
760 uint64_t x2,
761 uint64_t x3,
762 uint64_t x4,
763 void *cookie,
764 void *handle,
765 uint64_t flags)
766{
767 if (is_spmc_at_el3() && !secure_origin) {
768 return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4,
769 cookie, handle, flags);
770 }
Olivier Deprezdce23c02022-10-31 12:38:17 +0100771
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000772 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4,
Olivier Deprezdce23c02022-10-31 12:38:17 +0100773 handle, flags);
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000774
775}
776
777/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100778 * Return FFA_ERROR with specified error code
Max Shvetsov745889c2020-02-27 14:54:21 +0000779 ******************************************************************************/
Raghu Krishnamurthy9d9584f2023-04-22 18:00:02 -0700780uint64_t spmd_ffa_error_return(void *handle, int error_code)
Max Shvetsov745889c2020-02-27 14:54:21 +0000781{
J-Alves64ff9932021-03-01 10:26:59 +0000782 SMC_RET8(handle, (uint32_t) FFA_ERROR,
783 FFA_TARGET_INFO_MBZ, (uint32_t)error_code,
J-Alves2672cde2020-05-07 18:42:25 +0100784 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
785 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100786}
787
Olivier Deprez33e44122020-04-16 17:54:27 +0200788/*******************************************************************************
789 * spmd_check_address_in_binary_image
790 ******************************************************************************/
791bool spmd_check_address_in_binary_image(uint64_t address)
792{
793 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
794
795 return ((address >= spmc_attrs.load_address) &&
796 (address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
797}
798
Olivier Deprezebc34772020-04-16 16:59:21 +0200799/******************************************************************************
800 * spmd_is_spmc_message
801 *****************************************************************************/
802static bool spmd_is_spmc_message(unsigned int ep)
803{
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000804 if (is_spmc_at_el3()) {
805 return false;
806 }
807
Olivier Deprezebc34772020-04-16 16:59:21 +0200808 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
809 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
810}
811
Achin Gupta86f23532019-10-11 15:41:16 +0100812/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000813 * This function forwards FF-A SMCs to either the main SPMD handler or the
814 * SPMC at EL3, depending on the origin security state, if enabled.
815 ******************************************************************************/
816uint64_t spmd_ffa_smc_handler(uint32_t smc_fid,
817 uint64_t x1,
818 uint64_t x2,
819 uint64_t x3,
820 uint64_t x4,
821 void *cookie,
822 void *handle,
823 uint64_t flags)
824{
825 if (is_spmc_at_el3()) {
826 /*
827 * If we have an SPMC at EL3 allow handling of the SMC first.
828 * The SPMC will call back through to SPMD handler if required.
829 */
830 if (is_caller_secure(flags)) {
831 return spmc_smc_handler(smc_fid,
832 is_caller_secure(flags),
833 x1, x2, x3, x4, cookie,
834 handle, flags);
835 }
836 }
837 return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
838 handle, flags);
839}
840
841/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100842 * This function handles all SMCs in the range reserved for FFA. Each call is
Achin Gupta86f23532019-10-11 15:41:16 +0100843 * either forwarded to the other security state or handled by the SPM dispatcher
844 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200845uint64_t spmd_smc_handler(uint32_t smc_fid,
846 uint64_t x1,
847 uint64_t x2,
848 uint64_t x3,
849 uint64_t x4,
850 void *cookie,
851 void *handle,
Achin Gupta86f23532019-10-11 15:41:16 +0100852 uint64_t flags)
853{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200854 spmd_spm_core_context_t *ctx = spmd_get_context();
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100855 bool secure_origin;
J-Alves8676f242023-10-04 17:16:45 +0100856 int ret;
J-Alves4c95c702020-05-26 14:03:05 +0100857 uint32_t input_version;
Achin Gupta86f23532019-10-11 15:41:16 +0100858
859 /* Determine which security state this SMC originated from */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100860 secure_origin = is_caller_secure(flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100861
Scott Brandene5dcf982020-08-25 13:49:32 -0700862 VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64
863 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n",
Olivier Deprez5f875b82024-06-07 10:22:50 +0200864 plat_my_core_pos(), smc_fid, x1, x2, x3, x4,
Scott Brandene5dcf982020-08-25 13:49:32 -0700865 SMC_GET_GP(handle, CTX_GPREG_X5),
866 SMC_GET_GP(handle, CTX_GPREG_X6),
867 SMC_GET_GP(handle, CTX_GPREG_X7));
Achin Gupta86f23532019-10-11 15:41:16 +0100868
Raghu Krishnamurthy43fda972023-04-22 11:28:38 -0700869 /*
870 * If there is an on-going info regs from EL3 SPMD LP, unconditionally
871 * return, we don't expect any other FF-A ABIs to be called between
872 * calls to FFA_PARTITION_INFO_GET_REGS.
873 */
874 if (is_spmd_logical_sp_info_regs_req_in_progress(ctx)) {
875 assert(secure_origin);
876 spmd_spm_core_sync_exit(0ULL);
877 }
878
Achin Gupta86f23532019-10-11 15:41:16 +0100879 switch (smc_fid) {
J-Alves2672cde2020-05-07 18:42:25 +0100880 case FFA_ERROR:
Achin Gupta86f23532019-10-11 15:41:16 +0100881 /*
882 * Check if this is the first invocation of this interface on
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200883 * this CPU. If so, then indicate that the SPM Core initialised
Achin Gupta86f23532019-10-11 15:41:16 +0100884 * unsuccessfully.
885 */
Olivier Deprez7c016332019-10-28 09:03:13 +0000886 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Achin Gupta86f23532019-10-11 15:41:16 +0100887 spmd_spm_core_sync_exit(x2);
Max Shvetsov745889c2020-02-27 14:54:21 +0000888 }
Achin Gupta86f23532019-10-11 15:41:16 +0100889
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -0800890 /*
Madhukar Pappireddy20ec1da2025-02-08 11:35:46 -0600891 * Perform a synchronous exit:
892 * 1. If there was an SPMD logical partition direct request on-going,
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -0800893 * return back to the SPMD logical partition so the error can be
894 * consumed.
Madhukar Pappireddy20ec1da2025-02-08 11:35:46 -0600895 * 2. SPMC sent FFA_ERROR in response to a power management
896 * operation sent through direct request.
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -0800897 */
Madhukar Pappireddy20ec1da2025-02-08 11:35:46 -0600898 if (is_spmd_logical_sp_dir_req_in_progress(ctx) ||
899 ctx->psci_operation_ongoing) {
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -0800900 assert(secure_origin);
901 spmd_spm_core_sync_exit(0ULL);
902 }
903
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100904 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000905 x1, x2, x3, x4, cookie,
906 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100907 break; /* not reached */
908
J-Alves2672cde2020-05-07 18:42:25 +0100909 case FFA_VERSION:
J-Alves4c95c702020-05-26 14:03:05 +0100910 input_version = (uint32_t)(0xFFFFFFFF & x1);
Achin Gupta86f23532019-10-11 15:41:16 +0100911 /*
J-Alves4c95c702020-05-26 14:03:05 +0100912 * If caller is secure and SPMC was initialized,
913 * return FFA_VERSION of SPMD.
914 * If caller is non secure and SPMC was initialized,
Marc Bonnici815d1012021-12-08 14:27:40 +0000915 * forward to the EL3 SPMC if enabled, otherwise return
916 * the SPMC version if implemented at a lower EL.
J-Alves4c95c702020-05-26 14:03:05 +0100917 * Sanity check to "input_version".
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000918 * If the EL3 SPMC is enabled, ignore the SPMC state as
919 * this is not used.
Achin Gupta86f23532019-10-11 15:41:16 +0100920 */
J-Alves4c95c702020-05-26 14:03:05 +0100921 if ((input_version & FFA_VERSION_BIT31_MASK) ||
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000922 (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) {
J-Alves4c95c702020-05-26 14:03:05 +0100923 ret = FFA_ERROR_NOT_SUPPORTED;
924 } else if (!secure_origin) {
Marc Bonnici815d1012021-12-08 14:27:40 +0000925 if (is_spmc_at_el3()) {
926 /*
927 * Forward the call directly to the EL3 SPMC, if
928 * enabled, as we don't need to wrap the call in
929 * a direct request.
930 */
931 return spmd_smc_forward(smc_fid, secure_origin,
932 x1, x2, x3, x4, cookie,
933 handle, flags);
934 }
935
Daniel Boulby9460a232021-12-09 11:20:13 +0000936 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
937 uint64_t rc;
938
939 if (spmc_attrs.major_version == 1 &&
940 spmc_attrs.minor_version == 0) {
941 ret = MAKE_FFA_VERSION(spmc_attrs.major_version,
942 spmc_attrs.minor_version);
943 SMC_RET8(handle, (uint32_t)ret,
944 FFA_TARGET_INFO_MBZ,
945 FFA_TARGET_INFO_MBZ,
946 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
947 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
948 FFA_PARAM_MBZ);
949 break;
950 }
951 /* Save non-secure system registers context */
Daniel Boulby9460a232021-12-09 11:20:13 +0000952#if SPMD_SPM_AT_SEL2
953 cm_el2_sysregs_context_save(NON_SECURE);
Madhukar Pappireddyd3f32072024-01-29 16:43:56 -0600954#else
955 cm_el1_sysregs_context_save(NON_SECURE);
Daniel Boulby9460a232021-12-09 11:20:13 +0000956#endif
957
958 /*
959 * The incoming request has FFA_VERSION as X0 smc_fid
960 * and requested version in x1. Prepare a direct request
961 * from SPMD to SPMC with FFA_VERSION framework function
962 * identifier in X2 and requested version in X3.
963 */
964 spmd_build_spmc_message(gpregs,
965 SPMD_FWK_MSG_FFA_VERSION_REQ,
966 input_version);
967
Olivier Deprez4911eb82023-07-10 11:04:30 +0200968 /*
969 * Ensure x8-x17 NS GP register values are untouched when returning
970 * from the SPMC.
971 */
972 write_ctx_reg(gpregs, CTX_GPREG_X8, SMC_GET_GP(handle, CTX_GPREG_X8));
973 write_ctx_reg(gpregs, CTX_GPREG_X9, SMC_GET_GP(handle, CTX_GPREG_X9));
974 write_ctx_reg(gpregs, CTX_GPREG_X10, SMC_GET_GP(handle, CTX_GPREG_X10));
975 write_ctx_reg(gpregs, CTX_GPREG_X11, SMC_GET_GP(handle, CTX_GPREG_X11));
976 write_ctx_reg(gpregs, CTX_GPREG_X12, SMC_GET_GP(handle, CTX_GPREG_X12));
977 write_ctx_reg(gpregs, CTX_GPREG_X13, SMC_GET_GP(handle, CTX_GPREG_X13));
978 write_ctx_reg(gpregs, CTX_GPREG_X14, SMC_GET_GP(handle, CTX_GPREG_X14));
979 write_ctx_reg(gpregs, CTX_GPREG_X15, SMC_GET_GP(handle, CTX_GPREG_X15));
980 write_ctx_reg(gpregs, CTX_GPREG_X16, SMC_GET_GP(handle, CTX_GPREG_X16));
981 write_ctx_reg(gpregs, CTX_GPREG_X17, SMC_GET_GP(handle, CTX_GPREG_X17));
982
Daniel Boulby9460a232021-12-09 11:20:13 +0000983 rc = spmd_spm_core_sync_entry(ctx);
984
985 if ((rc != 0ULL) ||
986 (SMC_GET_GP(gpregs, CTX_GPREG_X0) !=
987 FFA_MSG_SEND_DIRECT_RESP_SMC32) ||
988 (SMC_GET_GP(gpregs, CTX_GPREG_X2) !=
Marc Bonnici25f4b542022-04-12 17:18:13 +0100989 (FFA_FWK_MSG_BIT |
Daniel Boulby9460a232021-12-09 11:20:13 +0000990 SPMD_FWK_MSG_FFA_VERSION_RESP))) {
991 ERROR("Failed to forward FFA_VERSION\n");
992 ret = FFA_ERROR_NOT_SUPPORTED;
993 } else {
994 ret = SMC_GET_GP(gpregs, CTX_GPREG_X3);
995 }
996
997 /*
Olivier Deprez4911eb82023-07-10 11:04:30 +0200998 * x0-x4 are updated by spmd_smc_forward below.
999 * Zero out x5-x7 in the FFA_VERSION response.
1000 */
1001 write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
1002 write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
1003 write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
1004
1005 /*
Daniel Boulby9460a232021-12-09 11:20:13 +00001006 * Return here after SPMC has handled FFA_VERSION.
1007 * The returned SPMC version is held in X3.
1008 * Forward this version in X0 to the non-secure caller.
1009 */
1010 return spmd_smc_forward(ret, true, FFA_PARAM_MBZ,
1011 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001012 FFA_PARAM_MBZ, cookie, gpregs,
1013 flags);
J-Alves4c95c702020-05-26 14:03:05 +01001014 } else {
J-Alves64ff9932021-03-01 10:26:59 +00001015 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
1016 FFA_VERSION_MINOR);
J-Alves4c95c702020-05-26 14:03:05 +01001017 }
1018
J-Alves64ff9932021-03-01 10:26:59 +00001019 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ,
1020 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1021 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +01001022 break; /* not reached */
1023
J-Alves2672cde2020-05-07 18:42:25 +01001024 case FFA_FEATURES:
Achin Gupta86f23532019-10-11 15:41:16 +01001025 /*
1026 * This is an optional interface. Do the minimal checks and
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001027 * forward to SPM Core which will handle it if implemented.
Achin Gupta86f23532019-10-11 15:41:16 +01001028 */
1029
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001030 /* Forward SMC from Normal world to the SPM Core */
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001031 if (!secure_origin) {
1032 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001033 x1, x2, x3, x4, cookie,
1034 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +01001035 }
Max Shvetsov745889c2020-02-27 14:54:21 +00001036
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001037 /*
1038 * Return success if call was from secure world i.e. all
J-Alves2672cde2020-05-07 18:42:25 +01001039 * FFA functions are supported. This is essentially a
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001040 * nop.
1041 */
J-Alves2672cde2020-05-07 18:42:25 +01001042 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001043 SMC_GET_GP(handle, CTX_GPREG_X5),
1044 SMC_GET_GP(handle, CTX_GPREG_X6),
1045 SMC_GET_GP(handle, CTX_GPREG_X7));
1046
Achin Gupta86f23532019-10-11 15:41:16 +01001047 break; /* not reached */
1048
J-Alves2672cde2020-05-07 18:42:25 +01001049 case FFA_ID_GET:
Max Shvetsove79062e2020-03-12 15:16:40 +00001050 /*
J-Alves2672cde2020-05-07 18:42:25 +01001051 * Returns the ID of the calling FFA component.
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001052 */
Max Shvetsove79062e2020-03-12 15:16:40 +00001053 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +01001054 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1055 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
1056 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1057 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1058 FFA_PARAM_MBZ);
Max Shvetsove79062e2020-03-12 15:16:40 +00001059 }
1060
J-Alves2672cde2020-05-07 18:42:25 +01001061 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1062 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
1063 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1064 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1065 FFA_PARAM_MBZ);
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001066
Max Shvetsove79062e2020-03-12 15:16:40 +00001067 break; /* not reached */
1068
Olivier Deprezeae45962021-01-19 15:06:47 +01001069 case FFA_SECONDARY_EP_REGISTER_SMC64:
1070 if (secure_origin) {
1071 ret = spmd_pm_secondary_ep_register(x1);
1072
1073 if (ret < 0) {
1074 SMC_RET8(handle, FFA_ERROR_SMC64,
1075 FFA_TARGET_INFO_MBZ, ret,
1076 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1077 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1078 FFA_PARAM_MBZ);
1079 } else {
1080 SMC_RET8(handle, FFA_SUCCESS_SMC64,
1081 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ,
1082 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1083 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1084 FFA_PARAM_MBZ);
1085 }
1086 }
1087
1088 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1089 break; /* Not reached */
1090
Daniel Boulby27f35df2021-02-03 12:13:19 +00001091 case FFA_SPM_ID_GET:
1092 if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) {
1093 return spmd_ffa_error_return(handle,
1094 FFA_ERROR_NOT_SUPPORTED);
1095 }
1096 /*
1097 * Returns the ID of the SPMC or SPMD depending on the FF-A
1098 * instance where this function is invoked
1099 */
1100 if (!secure_origin) {
1101 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1102 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
1103 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1104 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1105 FFA_PARAM_MBZ);
1106 }
1107 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1108 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID,
1109 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1110 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1111 FFA_PARAM_MBZ);
1112
1113 break; /* not reached */
1114
Olivier Deprez33e44122020-04-16 17:54:27 +02001115 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
Shruti3d859672022-06-09 11:03:11 +01001116 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
Kathleen Capella8f3edde2024-01-07 16:20:45 -05001117 case FFA_MSG_SEND_DIRECT_REQ2_SMC64:
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -08001118 /*
1119 * Regardless of secure_origin, SPMD logical partitions cannot
1120 * handle direct messages. They can only initiate direct
1121 * messages and consume direct responses or errors.
1122 */
1123 if (is_spmd_lp_id(ffa_endpoint_source(x1)) ||
1124 is_spmd_lp_id(ffa_endpoint_destination(x1))) {
1125 return spmd_ffa_error_return(handle,
1126 FFA_ERROR_INVALID_PARAMETER
1127 );
1128 }
1129
1130 /*
1131 * When there is an ongoing SPMD logical partition direct
1132 * request, there cannot be another direct request. Return
1133 * error in this case. Panic'ing is an option but that does
1134 * not provide the opportunity for caller to abort based on
1135 * error codes.
1136 */
1137 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
1138 assert(secure_origin);
1139 return spmd_ffa_error_return(handle,
1140 FFA_ERROR_DENIED);
1141 }
1142
Shruti3d859672022-06-09 11:03:11 +01001143 if (!secure_origin) {
1144 /* Validate source endpoint is non-secure for non-secure caller. */
1145 if (ffa_is_secure_world_id(ffa_endpoint_source(x1))) {
1146 return spmd_ffa_error_return(handle,
1147 FFA_ERROR_INVALID_PARAMETER);
1148 }
1149 }
Olivier Deprez33e44122020-04-16 17:54:27 +02001150 if (secure_origin && spmd_is_spmc_message(x1)) {
Kathleen Capella9d826a12023-07-31 14:45:58 -04001151 return spmd_ffa_error_return(handle,
Kathleen Capella8f3edde2024-01-07 16:20:45 -05001152 FFA_ERROR_DENIED);
Kathleen Capella9d826a12023-07-31 14:45:58 -04001153 } else {
1154 /* Forward direct message to the other world */
1155 return spmd_smc_forward(smc_fid, secure_origin,
1156 x1, x2, x3, x4, cookie,
1157 handle, flags);
1158 }
1159 break; /* Not reached */
1160
Olivier Deprez33e44122020-04-16 17:54:27 +02001161 case FFA_MSG_SEND_DIRECT_RESP_SMC32:
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -08001162 case FFA_MSG_SEND_DIRECT_RESP_SMC64:
Kathleen Capella8f3edde2024-01-07 16:20:45 -05001163 case FFA_MSG_SEND_DIRECT_RESP2_SMC64:
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -08001164 if (secure_origin && (spmd_is_spmc_message(x1) ||
1165 is_spmd_logical_sp_dir_req_in_progress(ctx))) {
Olivier Depreza664c492020-08-05 11:27:42 +02001166 spmd_spm_core_sync_exit(0ULL);
Olivier Deprez33e44122020-04-16 17:54:27 +02001167 } else {
1168 /* Forward direct message to the other world */
1169 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001170 x1, x2, x3, x4, cookie,
1171 handle, flags);
Olivier Deprez33e44122020-04-16 17:54:27 +02001172 }
1173 break; /* Not reached */
J-Alves2672cde2020-05-07 18:42:25 +01001174 case FFA_RX_RELEASE:
1175 case FFA_RXTX_MAP_SMC32:
1176 case FFA_RXTX_MAP_SMC64:
1177 case FFA_RXTX_UNMAP:
Ruari Phipps93dff702020-07-28 10:33:35 +01001178 case FFA_PARTITION_INFO_GET:
J-Alves2621cfd2021-03-11 17:46:47 +00001179#if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
1180 case FFA_NOTIFICATION_BITMAP_CREATE:
1181 case FFA_NOTIFICATION_BITMAP_DESTROY:
1182 case FFA_NOTIFICATION_BIND:
1183 case FFA_NOTIFICATION_UNBIND:
1184 case FFA_NOTIFICATION_SET:
1185 case FFA_NOTIFICATION_GET:
1186 case FFA_NOTIFICATION_INFO_GET:
1187 case FFA_NOTIFICATION_INFO_GET_SMC64:
Federico Recanatieecb4b02022-02-03 17:22:37 +01001188 case FFA_MSG_SEND2:
Federico Recanati5c7c5c42022-03-18 10:30:00 +01001189 case FFA_RX_ACQUIRE:
J-Alves2621cfd2021-03-11 17:46:47 +00001190#endif
Federico Recanatieecb4b02022-02-03 17:22:37 +01001191 case FFA_MSG_RUN:
Ruari Phipps93dff702020-07-28 10:33:35 +01001192 /*
Federico Recanatieecb4b02022-02-03 17:22:37 +01001193 * Above calls should be invoked only by the Normal world and
1194 * must not be forwarded from Secure world to Normal world.
Ruari Phipps93dff702020-07-28 10:33:35 +01001195 */
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001196 if (secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +01001197 return spmd_ffa_error_return(handle,
Ruari Phipps93dff702020-07-28 10:33:35 +01001198 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +01001199 }
1200
Boyan Karatotev87266002022-11-18 14:17:17 +00001201 /* Forward the call to the other world */
1202 /* fallthrough */
J-Alves2672cde2020-05-07 18:42:25 +01001203 case FFA_MSG_SEND:
J-Alves2672cde2020-05-07 18:42:25 +01001204 case FFA_MEM_DONATE_SMC32:
1205 case FFA_MEM_DONATE_SMC64:
1206 case FFA_MEM_LEND_SMC32:
1207 case FFA_MEM_LEND_SMC64:
1208 case FFA_MEM_SHARE_SMC32:
1209 case FFA_MEM_SHARE_SMC64:
1210 case FFA_MEM_RETRIEVE_REQ_SMC32:
1211 case FFA_MEM_RETRIEVE_REQ_SMC64:
1212 case FFA_MEM_RETRIEVE_RESP:
1213 case FFA_MEM_RELINQUISH:
1214 case FFA_MEM_RECLAIM:
Marc Bonnici9fa01e92021-09-23 09:44:14 +01001215 case FFA_MEM_FRAG_TX:
1216 case FFA_MEM_FRAG_RX:
J-Alves2672cde2020-05-07 18:42:25 +01001217 case FFA_SUCCESS_SMC32:
1218 case FFA_SUCCESS_SMC64:
Achin Gupta86f23532019-10-11 15:41:16 +01001219 /*
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -08001220 * If there is an ongoing direct request from an SPMD logical
1221 * partition, return an error.
Achin Gupta86f23532019-10-11 15:41:16 +01001222 */
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -08001223 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
1224 assert(secure_origin);
1225 return spmd_ffa_error_return(handle,
1226 FFA_ERROR_DENIED);
1227 }
Achin Gupta86f23532019-10-11 15:41:16 +01001228
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001229 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001230 x1, x2, x3, x4, cookie,
1231 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +01001232 break; /* not reached */
1233
J-Alves2672cde2020-05-07 18:42:25 +01001234 case FFA_MSG_WAIT:
Achin Gupta86f23532019-10-11 15:41:16 +01001235 /*
1236 * Check if this is the first invocation of this interface on
1237 * this CPU from the Secure world. If so, then indicate that the
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001238 * SPM Core initialised successfully.
Achin Gupta86f23532019-10-11 15:41:16 +01001239 */
Olivier Deprez7c016332019-10-28 09:03:13 +00001240 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Olivier Depreza664c492020-08-05 11:27:42 +02001241 spmd_spm_core_sync_exit(0ULL);
Achin Gupta86f23532019-10-11 15:41:16 +01001242 }
1243
Boyan Karatotev87266002022-11-18 14:17:17 +00001244 /* Forward the call to the other world */
1245 /* fallthrough */
Olivier Deprezae18caf2021-04-02 11:09:10 +02001246 case FFA_INTERRUPT:
J-Alves2672cde2020-05-07 18:42:25 +01001247 case FFA_MSG_YIELD:
Achin Gupta86f23532019-10-11 15:41:16 +01001248 /* This interface must be invoked only by the Secure world */
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001249 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +01001250 return spmd_ffa_error_return(handle,
1251 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +01001252 }
1253
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -08001254 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
1255 assert(secure_origin);
1256 return spmd_ffa_error_return(handle,
1257 FFA_ERROR_DENIED);
1258 }
1259
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001260 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001261 x1, x2, x3, x4, cookie,
1262 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +01001263 break; /* not reached */
1264
Olivier Depreza664c492020-08-05 11:27:42 +02001265 case FFA_NORMAL_WORLD_RESUME:
1266 if (secure_origin && ctx->secure_interrupt_ongoing) {
1267 spmd_spm_core_sync_exit(0ULL);
1268 } else {
1269 return spmd_ffa_error_return(handle, FFA_ERROR_DENIED);
1270 }
1271 break; /* Not reached */
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -08001272#if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
1273 case FFA_PARTITION_INFO_GET_REGS_SMC64:
1274 if (secure_origin) {
Raghu Krishnamurthy9d9584f2023-04-22 18:00:02 -07001275 return spmd_el3_populate_logical_partition_info(handle, x1,
1276 x2, x3);
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -08001277 }
Olivier Depreza664c492020-08-05 11:27:42 +02001278
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -08001279 /* Call only supported with SMCCC 1.2+ */
1280 if (MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION) < 0x10002) {
1281 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1282 }
1283
1284 return spmd_smc_forward(smc_fid, secure_origin,
1285 x1, x2, x3, x4, cookie,
1286 handle, flags);
1287 break; /* Not reached */
1288#endif
Shruti Guptaa5a1cbd2023-01-19 21:50:55 +00001289 case FFA_CONSOLE_LOG_SMC32:
1290 case FFA_CONSOLE_LOG_SMC64:
1291 /* This interface must not be forwarded to other worlds. */
1292 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1293 break; /* not reached */
1294
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -06001295 case FFA_EL3_INTR_HANDLE:
1296 if (secure_origin) {
1297 return spmd_handle_group0_intr_swd(handle);
1298 } else {
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -05001299 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -06001300 }
Achin Gupta86f23532019-10-11 15:41:16 +01001301 default:
1302 WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
J-Alves2672cde2020-05-07 18:42:25 +01001303 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +01001304 }
1305}