blob: 0a246f37db5e0415dfc1c1fc1770902fa79aa96f [file] [log] [blame]
Achin Gupta86f23532019-10-11 15:41:16 +01001/*
Kathleen Capella9d826a12023-07-31 14:45:58 -04002 * Copyright (c) 2020-2024, Arm Limited and Contributors. All rights reserved.
Achin Gupta86f23532019-10-11 15:41:16 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
Scott Brandene5dcf982020-08-25 13:49:32 -07009#include <inttypes.h>
10#include <stdint.h>
Achin Gupta86f23532019-10-11 15:41:16 +010011#include <string.h>
12
13#include <arch_helpers.h>
Olivier Deprez2bae35f2020-04-16 13:39:06 +020014#include <arch/aarch64/arch_features.h>
Achin Gupta86f23532019-10-11 15:41:16 +010015#include <bl31/bl31.h>
Olivier Depreza664c492020-08-05 11:27:42 +020016#include <bl31/interrupt_mgmt.h>
Achin Gupta86f23532019-10-11 15:41:16 +010017#include <common/debug.h>
18#include <common/runtime_svc.h>
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +000019#include <common/tbbr/tbbr_img_def.h>
Achin Gupta86f23532019-10-11 15:41:16 +010020#include <lib/el3_runtime/context_mgmt.h>
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +000021#include <lib/fconf/fconf.h>
22#include <lib/fconf/fconf_dyn_cfg_getter.h>
Achin Gupta86f23532019-10-11 15:41:16 +010023#include <lib/smccc.h>
24#include <lib/spinlock.h>
25#include <lib/utils.h>
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +000026#include <lib/xlat_tables/xlat_tables_v2.h>
Achin Gupta86f23532019-10-11 15:41:16 +010027#include <plat/common/common_def.h>
28#include <plat/common/platform.h>
29#include <platform_def.h>
Raghu Krishnamurthy7f046c12023-02-25 13:26:10 -080030#include <services/el3_spmd_logical_sp.h>
J-Alves2672cde2020-05-07 18:42:25 +010031#include <services/ffa_svc.h>
Marc Bonnici1c33cc32021-11-29 17:57:03 +000032#include <services/spmc_svc.h>
Achin Gupta86f23532019-10-11 15:41:16 +010033#include <services/spmd_svc.h>
34#include <smccc_helpers.h>
35#include "spmd_private.h"
36
37/*******************************************************************************
38 * SPM Core context information.
39 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020040static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
Achin Gupta86f23532019-10-11 15:41:16 +010041
42/*******************************************************************************
Marc Bonnici1c33cc32021-11-29 17:57:03 +000043 * SPM Core attribute information is read from its manifest if the SPMC is not
44 * at EL3. Else, it is populated from the SPMC directly.
Achin Gupta86f23532019-10-11 15:41:16 +010045 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020046static spmc_manifest_attribute_t spmc_attrs;
Achin Gupta86f23532019-10-11 15:41:16 +010047
48/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000049 * SPM Core entry point information. Discovered on the primary core and reused
50 * on secondary cores.
51 ******************************************************************************/
52static entry_point_info_t *spmc_ep_info;
53
54/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +020055 * SPM Core context on current CPU get helper.
56 ******************************************************************************/
57spmd_spm_core_context_t *spmd_get_context(void)
58{
Olivier Deprez13a4a072024-06-07 08:51:20 +020059 return &spm_core_context[plat_my_core_pos()];
Olivier Deprez2bae35f2020-04-16 13:39:06 +020060}
61
62/*******************************************************************************
Olivier Deprezc7631a52020-03-23 09:53:06 +010063 * SPM Core ID getter.
64 ******************************************************************************/
65uint16_t spmd_spmc_id_get(void)
66{
67 return spmc_attrs.spmc_id;
68}
69
70/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000071 * Static function declaration.
72 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020073static int32_t spmd_init(void);
Olivier Deprez69ca84a2020-02-07 15:44:43 +010074static int spmd_spmc_init(void *pm_addr);
Raghu Krishnamurthy9d9584f2023-04-22 18:00:02 -070075
Olivier Deprez2bae35f2020-04-16 13:39:06 +020076static uint64_t spmd_smc_forward(uint32_t smc_fid,
77 bool secure_origin,
78 uint64_t x1,
79 uint64_t x2,
80 uint64_t x3,
81 uint64_t x4,
Marc Bonnicida2c9e12021-11-29 18:02:45 +000082 void *cookie,
83 void *handle,
84 uint64_t flags);
Max Shvetsov745889c2020-02-27 14:54:21 +000085
Daniel Boulby9460a232021-12-09 11:20:13 +000086/******************************************************************************
87 * Builds an SPMD to SPMC direct message request.
88 *****************************************************************************/
89void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func,
90 unsigned long long message)
91{
92 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
93 write_ctx_reg(gpregs, CTX_GPREG_X1,
94 (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
95 spmd_spmc_id_get());
96 write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func);
97 write_ctx_reg(gpregs, CTX_GPREG_X3, message);
Olivier Deprez4911eb82023-07-10 11:04:30 +020098
99 /* Zero out x4-x7 for the direct request emitted towards the SPMC. */
100 write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
101 write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
102 write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
103 write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
Daniel Boulby9460a232021-12-09 11:20:13 +0000104}
105
106
Max Shvetsov745889c2020-02-27 14:54:21 +0000107/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200108 * This function takes an SPMC context pointer and performs a synchronous
109 * SPMC entry.
Achin Gupta86f23532019-10-11 15:41:16 +0100110 ******************************************************************************/
111uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
112{
113 uint64_t rc;
114
115 assert(spmc_ctx != NULL);
116
117 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
118
119 /* Restore the context assigned above */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000120#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000121 cm_el2_sysregs_context_restore(SECURE);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200122#else
123 cm_el1_sysregs_context_restore(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000124#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100125 cm_set_next_eret_context(SECURE);
126
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000127 /* Enter SPMC */
Achin Gupta86f23532019-10-11 15:41:16 +0100128 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
129
130 /* Save secure state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000131#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000132 cm_el2_sysregs_context_save(SECURE);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200133#else
134 cm_el1_sysregs_context_save(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000135#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100136
137 return rc;
138}
139
140/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200141 * This function returns to the place where spmd_spm_core_sync_entry() was
Achin Gupta86f23532019-10-11 15:41:16 +0100142 * called originally.
143 ******************************************************************************/
144__dead2 void spmd_spm_core_sync_exit(uint64_t rc)
145{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200146 spmd_spm_core_context_t *ctx = spmd_get_context();
Achin Gupta86f23532019-10-11 15:41:16 +0100147
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200148 /* Get current CPU context from SPMC context */
Achin Gupta86f23532019-10-11 15:41:16 +0100149 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
150
151 /*
152 * The SPMD must have initiated the original request through a
153 * synchronous entry into SPMC. Jump back to the original C runtime
154 * context with the value of rc in x0;
155 */
156 spmd_spm_core_exit(ctx->c_rt_ctx, rc);
157
158 panic();
159}
160
161/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200162 * Jump to the SPM Core for the first time.
Achin Gupta86f23532019-10-11 15:41:16 +0100163 ******************************************************************************/
164static int32_t spmd_init(void)
165{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200166 spmd_spm_core_context_t *ctx = spmd_get_context();
167 uint64_t rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100168
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200169 VERBOSE("SPM Core init start.\n");
Olivier Deprez7c016332019-10-28 09:03:13 +0000170
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200171 /* Primary boot core enters the SPMC for initialization. */
172 ctx->state = SPMC_STATE_ON_PENDING;
Achin Gupta86f23532019-10-11 15:41:16 +0100173
174 rc = spmd_spm_core_sync_entry(ctx);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200175 if (rc != 0ULL) {
Scott Brandene5dcf982020-08-25 13:49:32 -0700176 ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200177 return 0;
Achin Gupta86f23532019-10-11 15:41:16 +0100178 }
179
Olivier Deprez7c016332019-10-28 09:03:13 +0000180 ctx->state = SPMC_STATE_ON;
181
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200182 VERBOSE("SPM Core init end.\n");
Achin Gupta86f23532019-10-11 15:41:16 +0100183
Raghu Krishnamurthy7f046c12023-02-25 13:26:10 -0800184 spmd_logical_sp_set_spmc_initialized();
185 rc = spmd_logical_sp_init();
186 if (rc != 0) {
187 WARN("SPMD Logical partitions failed init.\n");
188 }
189
Achin Gupta86f23532019-10-11 15:41:16 +0100190 return 1;
191}
192
193/*******************************************************************************
Olivier Depreza664c492020-08-05 11:27:42 +0200194 * spmd_secure_interrupt_handler
195 * Enter the SPMC for further handling of the secure interrupt by the SPMC
196 * itself or a Secure Partition.
197 ******************************************************************************/
198static uint64_t spmd_secure_interrupt_handler(uint32_t id,
199 uint32_t flags,
200 void *handle,
201 void *cookie)
202{
203 spmd_spm_core_context_t *ctx = spmd_get_context();
204 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
Olivier Depreza664c492020-08-05 11:27:42 +0200205 int64_t rc;
206
207 /* Sanity check the security state when the exception was generated */
208 assert(get_interrupt_src_ss(flags) == NON_SECURE);
209
210 /* Sanity check the pointer to this cpu's context */
211 assert(handle == cm_get_context(NON_SECURE));
212
213 /* Save the non-secure context before entering SPMC */
Olivier Depreza664c492020-08-05 11:27:42 +0200214#if SPMD_SPM_AT_SEL2
215 cm_el2_sysregs_context_save(NON_SECURE);
Madhukar Pappireddyd3f32072024-01-29 16:43:56 -0600216#else
217 cm_el1_sysregs_context_save(NON_SECURE);
Madhukar Pappireddy7503cdc2024-04-25 23:01:00 -0500218
219#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
220 /*
221 * The hint bit denoting absence of SVE live state is effectively false
222 * in this scenario where execution was trapped to EL3 due to FIQ.
223 */
224 simd_ctx_save(NON_SECURE, false);
225#endif
Olivier Depreza664c492020-08-05 11:27:42 +0200226#endif
227
228 /* Convey the event to the SPMC through the FFA_INTERRUPT interface. */
229 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT);
230 write_ctx_reg(gpregs, CTX_GPREG_X1, 0);
231 write_ctx_reg(gpregs, CTX_GPREG_X2, 0);
232 write_ctx_reg(gpregs, CTX_GPREG_X3, 0);
233 write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
234 write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
235 write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
236 write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
237
238 /* Mark current core as handling a secure interrupt. */
239 ctx->secure_interrupt_ongoing = true;
240
Madhukar Pappireddy7503cdc2024-04-25 23:01:00 -0500241#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
242 simd_ctx_restore(SECURE);
243#endif
Olivier Depreza664c492020-08-05 11:27:42 +0200244 rc = spmd_spm_core_sync_entry(ctx);
Madhukar Pappireddy7503cdc2024-04-25 23:01:00 -0500245
246#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
247 simd_ctx_save(SECURE, false);
248#endif
Olivier Depreza664c492020-08-05 11:27:42 +0200249 if (rc != 0ULL) {
Olivier Deprez5f875b82024-06-07 10:22:50 +0200250 ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, plat_my_core_pos());
Olivier Depreza664c492020-08-05 11:27:42 +0200251 }
252
253 ctx->secure_interrupt_ongoing = false;
254
Olivier Depreza664c492020-08-05 11:27:42 +0200255#if SPMD_SPM_AT_SEL2
256 cm_el2_sysregs_context_restore(NON_SECURE);
Madhukar Pappireddyd3f32072024-01-29 16:43:56 -0600257#else
258 cm_el1_sysregs_context_restore(NON_SECURE);
Madhukar Pappireddy7503cdc2024-04-25 23:01:00 -0500259
260#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
261 simd_ctx_restore(NON_SECURE);
262#endif
Olivier Depreza664c492020-08-05 11:27:42 +0200263#endif
264 cm_set_next_eret_context(NON_SECURE);
265
266 SMC_RET0(&ctx->cpu_ctx);
267}
268
Olivier Deprez35bbcf22023-06-08 18:23:26 +0200269#if (EL3_EXCEPTION_HANDLING == 0)
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600270/*******************************************************************************
271 * spmd_group0_interrupt_handler_nwd
272 * Group0 secure interrupt in the normal world are trapped to EL3. Delegate the
273 * handling of the interrupt to the platform handler, and return only upon
274 * successfully handling the Group0 interrupt.
275 ******************************************************************************/
276static uint64_t spmd_group0_interrupt_handler_nwd(uint32_t id,
277 uint32_t flags,
278 void *handle,
279 void *cookie)
280{
281 uint32_t intid;
282
283 /* Sanity check the security state when the exception was generated. */
284 assert(get_interrupt_src_ss(flags) == NON_SECURE);
285
286 /* Sanity check the pointer to this cpu's context. */
287 assert(handle == cm_get_context(NON_SECURE));
288
289 assert(id == INTR_ID_UNAVAILABLE);
290
291 assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3);
292
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -0500293 intid = plat_ic_acknowledge_interrupt();
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600294
295 if (plat_spmd_handle_group0_interrupt(intid) < 0) {
296 ERROR("Group0 interrupt %u not handled\n", intid);
297 panic();
298 }
299
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -0500300 /* Deactivate the corresponding Group0 interrupt. */
301 plat_ic_end_of_interrupt(intid);
302
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600303 return 0U;
304}
Olivier Deprez35bbcf22023-06-08 18:23:26 +0200305#endif
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600306
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -0600307/*******************************************************************************
308 * spmd_handle_group0_intr_swd
309 * SPMC delegates handling of Group0 secure interrupt to EL3 firmware using
310 * FFA_EL3_INTR_HANDLE SMC call. Further, SPMD delegates the handling of the
311 * interrupt to the platform handler, and returns only upon successfully
312 * handling the Group0 interrupt.
313 ******************************************************************************/
314static uint64_t spmd_handle_group0_intr_swd(void *handle)
315{
316 uint32_t intid;
317
318 /* Sanity check the pointer to this cpu's context */
319 assert(handle == cm_get_context(SECURE));
320
321 assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3);
322
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -0500323 intid = plat_ic_acknowledge_interrupt();
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -0600324
325 /*
326 * TODO: Currently due to a limitation in SPMD implementation, the
327 * platform handler is expected to not delegate handling to NWd while
328 * processing Group0 secure interrupt.
329 */
330 if (plat_spmd_handle_group0_interrupt(intid) < 0) {
331 /* Group0 interrupt was not handled by the platform. */
332 ERROR("Group0 interrupt %u not handled\n", intid);
333 panic();
334 }
335
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -0500336 /* Deactivate the corresponding Group0 interrupt. */
337 plat_ic_end_of_interrupt(intid);
338
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -0600339 /* Return success. */
340 SMC_RET8(handle, FFA_SUCCESS_SMC32, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
341 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
342 FFA_PARAM_MBZ);
343}
344
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +0000345#if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
346static int spmd_dynamic_map_mem(uintptr_t base_addr, size_t size,
347 unsigned int attr, uintptr_t *align_addr,
348 size_t *align_size)
349{
350 uintptr_t base_addr_align;
351 size_t mapped_size_align;
352 int rc;
353
354 /* Page aligned address and size if necessary */
355 base_addr_align = page_align(base_addr, DOWN);
356 mapped_size_align = page_align(size, UP);
357
358 if ((base_addr != base_addr_align) &&
359 (size == mapped_size_align)) {
360 mapped_size_align += PAGE_SIZE;
361 }
362
363 /*
364 * Map dynamically given region with its aligned base address and
365 * size
366 */
367 rc = mmap_add_dynamic_region((unsigned long long)base_addr_align,
368 base_addr_align,
369 mapped_size_align,
370 attr);
371 if (rc == 0) {
372 *align_addr = base_addr_align;
373 *align_size = mapped_size_align;
374 }
375
376 return rc;
377}
378
379static void spmd_do_sec_cpy(uintptr_t root_base_addr, uintptr_t sec_base_addr,
380 size_t size)
381{
382 uintptr_t root_base_addr_align, sec_base_addr_align;
383 size_t root_mapped_size_align, sec_mapped_size_align;
384 int rc;
385
386 assert(root_base_addr != 0UL);
387 assert(sec_base_addr != 0UL);
388 assert(size != 0UL);
389
390 /* Map the memory with required attributes */
391 rc = spmd_dynamic_map_mem(root_base_addr, size, MT_RO_DATA | MT_ROOT,
392 &root_base_addr_align,
393 &root_mapped_size_align);
394 if (rc != 0) {
395 ERROR("%s %s %lu (%d)\n", "Error while mapping", "root region",
396 root_base_addr, rc);
397 panic();
398 }
399
400 rc = spmd_dynamic_map_mem(sec_base_addr, size, MT_RW_DATA | MT_SECURE,
401 &sec_base_addr_align, &sec_mapped_size_align);
402 if (rc != 0) {
403 ERROR("%s %s %lu (%d)\n", "Error while mapping",
404 "secure region", sec_base_addr, rc);
405 panic();
406 }
407
408 /* Do copy operation */
409 (void)memcpy((void *)sec_base_addr, (void *)root_base_addr, size);
410
411 /* Unmap root memory region */
412 rc = mmap_remove_dynamic_region(root_base_addr_align,
413 root_mapped_size_align);
414 if (rc != 0) {
415 ERROR("%s %s %lu (%d)\n", "Error while unmapping",
416 "root region", root_base_addr_align, rc);
417 panic();
418 }
419
420 /* Unmap secure memory region */
421 rc = mmap_remove_dynamic_region(sec_base_addr_align,
422 sec_mapped_size_align);
423 if (rc != 0) {
424 ERROR("%s %s %lu (%d)\n", "Error while unmapping",
425 "secure region", sec_base_addr_align, rc);
426 panic();
427 }
428}
429#endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
430
Olivier Depreza664c492020-08-05 11:27:42 +0200431/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200432 * Loads SPMC manifest and inits SPMC.
Achin Gupta86f23532019-10-11 15:41:16 +0100433 ******************************************************************************/
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100434static int spmd_spmc_init(void *pm_addr)
Achin Gupta86f23532019-10-11 15:41:16 +0100435{
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200436 cpu_context_t *cpu_ctx;
437 unsigned int core_id;
Olivier Depreza664c492020-08-05 11:27:42 +0200438 uint32_t ep_attr, flags;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200439 int rc;
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +0000440 const struct dyn_cfg_dtb_info_t *image_info __unused;
Achin Gupta86f23532019-10-11 15:41:16 +0100441
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200442 /* Load the SPM Core manifest */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100443 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
Max Shvetsov745889c2020-02-27 14:54:21 +0000444 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200445 WARN("No or invalid SPM Core manifest image provided by BL2\n");
446 return rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100447 }
448
449 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200450 * Ensure that the SPM Core version is compatible with the SPM
451 * Dispatcher version.
Achin Gupta86f23532019-10-11 15:41:16 +0100452 */
J-Alves2672cde2020-05-07 18:42:25 +0100453 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
454 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
455 WARN("Unsupported FFA version (%u.%u)\n",
Achin Gupta86f23532019-10-11 15:41:16 +0100456 spmc_attrs.major_version, spmc_attrs.minor_version);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200457 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100458 }
459
J-Alves2672cde2020-05-07 18:42:25 +0100460 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
Achin Gupta86f23532019-10-11 15:41:16 +0100461 spmc_attrs.minor_version);
462
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200463 VERBOSE("SPM Core run time EL%x.\n",
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000464 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
Achin Gupta86f23532019-10-11 15:41:16 +0100465
Max Shvetsove79062e2020-03-12 15:16:40 +0000466 /* Validate the SPMC ID, Ensure high bit is set */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200467 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
468 SPMC_SECURE_ID_MASK) == 0U) {
469 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
470 return -EINVAL;
Max Shvetsove79062e2020-03-12 15:16:40 +0000471 }
472
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200473 /* Validate the SPM Core execution state */
Achin Gupta86f23532019-10-11 15:41:16 +0100474 if ((spmc_attrs.exec_state != MODE_RW_64) &&
475 (spmc_attrs.exec_state != MODE_RW_32)) {
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100476 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
Achin Gupta86f23532019-10-11 15:41:16 +0100477 spmc_attrs.exec_state);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200478 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100479 }
480
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100481 VERBOSE("%s%x.\n", "SPM Core execution state 0x",
482 spmc_attrs.exec_state);
Achin Gupta86f23532019-10-11 15:41:16 +0100483
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000484#if SPMD_SPM_AT_SEL2
485 /* Ensure manifest has not requested AArch32 state in S-EL2 */
486 if (spmc_attrs.exec_state == MODE_RW_32) {
487 WARN("AArch32 state at S-EL2 is not supported.\n");
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200488 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100489 }
490
491 /*
492 * Check if S-EL2 is supported on this system if S-EL2
493 * is required for SPM
494 */
Andre Przywara6dd2d062023-02-22 16:53:50 +0000495 if (!is_feat_sel2_supported()) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200496 WARN("SPM Core run time S-EL2 is not supported.\n");
497 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100498 }
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000499#endif /* SPMD_SPM_AT_SEL2 */
Achin Gupta86f23532019-10-11 15:41:16 +0100500
501 /* Initialise an entrypoint to set up the CPU context */
502 ep_attr = SECURE | EP_ST_ENABLE;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200503 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
Achin Gupta86f23532019-10-11 15:41:16 +0100504 ep_attr |= EP_EE_BIG;
Max Shvetsov745889c2020-02-27 14:54:21 +0000505 }
506
Achin Gupta86f23532019-10-11 15:41:16 +0100507 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
Achin Gupta86f23532019-10-11 15:41:16 +0100508
509 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200510 * Populate SPSR for SPM Core based upon validated parameters from the
511 * manifest.
Achin Gupta86f23532019-10-11 15:41:16 +0100512 */
513 if (spmc_attrs.exec_state == MODE_RW_32) {
514 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
515 SPSR_E_LITTLE,
516 DAIF_FIQ_BIT |
517 DAIF_IRQ_BIT |
518 DAIF_ABT_BIT);
519 } else {
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000520
521#if SPMD_SPM_AT_SEL2
522 static const uint32_t runtime_el = MODE_EL2;
523#else
524 static const uint32_t runtime_el = MODE_EL1;
525#endif
526 spmc_ep_info->spsr = SPSR_64(runtime_el,
Achin Gupta86f23532019-10-11 15:41:16 +0100527 MODE_SP_ELX,
528 DISABLE_ALL_EXCEPTIONS);
529 }
530
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +0000531#if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
532 image_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TOS_FW_CONFIG_ID);
533 assert(image_info != NULL);
534
535 if ((image_info->config_addr == 0UL) ||
536 (image_info->secondary_config_addr == 0UL) ||
537 (image_info->config_max_size == 0UL)) {
538 return -EINVAL;
539 }
540
541 /* Copy manifest from root->secure region */
542 spmd_do_sec_cpy(image_info->config_addr,
543 image_info->secondary_config_addr,
544 image_info->config_max_size);
545
546 /* Update ep info of BL32 */
547 assert(spmc_ep_info != NULL);
548 spmc_ep_info->args.arg0 = image_info->secondary_config_addr;
549#endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
550
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200551 /* Set an initial SPMC context state for all cores. */
552 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
553 spm_core_context[core_id].state = SPMC_STATE_OFF;
Max Shvetsov745889c2020-02-27 14:54:21 +0000554
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200555 /* Setup an initial cpu context for the SPMC. */
556 cpu_ctx = &spm_core_context[core_id].cpu_ctx;
557 cm_setup_context(cpu_ctx, spmc_ep_info);
Achin Gupta86f23532019-10-11 15:41:16 +0100558
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200559 /*
560 * Pass the core linear ID to the SPMC through x4.
561 * (TF-A implementation defined behavior helping
562 * a legacy TOS migration to adopt FF-A).
563 */
564 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id);
565 }
Achin Gupta86f23532019-10-11 15:41:16 +0100566
Olivier Deprez9afca122019-10-28 09:15:52 +0000567 /* Register power management hooks with PSCI */
568 psci_register_spd_pm_hook(&spmd_pm);
569
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200570 /* Register init function for deferred init. */
Achin Gupta86f23532019-10-11 15:41:16 +0100571 bl31_register_bl32_init(&spmd_init);
572
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200573 INFO("SPM Core setup done.\n");
574
Olivier Depreza664c492020-08-05 11:27:42 +0200575 /*
576 * Register an interrupt handler routing secure interrupts to SPMD
577 * while the NWd is running.
578 */
579 flags = 0;
580 set_interrupt_rm_flag(flags, NON_SECURE);
581 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
582 spmd_secure_interrupt_handler,
583 flags);
584 if (rc != 0) {
585 panic();
586 }
587
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600588 /*
Olivier Deprez35bbcf22023-06-08 18:23:26 +0200589 * Permit configurations where the SPM resides at S-EL1/2 and upon a
590 * Group0 interrupt triggering while the normal world runs, the
591 * interrupt is routed either through the EHF or directly to the SPMD:
592 *
593 * EL3_EXCEPTION_HANDLING=0: the Group0 interrupt is routed to the SPMD
594 * for handling by spmd_group0_interrupt_handler_nwd.
595 *
596 * EL3_EXCEPTION_HANDLING=1: the Group0 interrupt is routed to the EHF.
597 *
598 */
599#if (EL3_EXCEPTION_HANDLING == 0)
600 /*
Madhukar Pappireddy89e84562024-03-26 09:21:25 -0500601 * If EL3 interrupts are supported by the platform, register an
602 * interrupt handler routing Group0 interrupts to SPMD while the NWd is
603 * running.
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600604 */
Madhukar Pappireddy89e84562024-03-26 09:21:25 -0500605 if (plat_ic_has_interrupt_type(INTR_TYPE_EL3)) {
606 rc = register_interrupt_type_handler(INTR_TYPE_EL3,
607 spmd_group0_interrupt_handler_nwd,
608 flags);
609 if (rc != 0) {
610 panic();
611 }
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600612 }
Olivier Deprez35bbcf22023-06-08 18:23:26 +0200613#endif
614
Achin Gupta86f23532019-10-11 15:41:16 +0100615 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000616}
Achin Gupta86f23532019-10-11 15:41:16 +0100617
Max Shvetsov745889c2020-02-27 14:54:21 +0000618/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200619 * Initialize context of SPM Core.
Max Shvetsov745889c2020-02-27 14:54:21 +0000620 ******************************************************************************/
621int spmd_setup(void)
622{
623 int rc;
Marc Bonnici1c33cc32021-11-29 17:57:03 +0000624 void *spmc_manifest;
625
626 /*
627 * If the SPMC is at EL3, then just initialise it directly. The
628 * shenanigans of when it is at a lower EL are not needed.
629 */
630 if (is_spmc_at_el3()) {
631 /* Allow the SPMC to populate its attributes directly. */
632 spmc_populate_attrs(&spmc_attrs);
633
634 rc = spmc_setup();
635 if (rc != 0) {
Olivier Deprez3d203f42022-11-16 16:46:23 +0100636 WARN("SPMC initialisation failed 0x%x.\n", rc);
Marc Bonnici1c33cc32021-11-29 17:57:03 +0000637 }
Olivier Deprez3d203f42022-11-16 16:46:23 +0100638 return 0;
Marc Bonnici1c33cc32021-11-29 17:57:03 +0000639 }
Achin Gupta86f23532019-10-11 15:41:16 +0100640
Max Shvetsov745889c2020-02-27 14:54:21 +0000641 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200642 if (spmc_ep_info == NULL) {
643 WARN("No SPM Core image provided by BL2 boot loader.\n");
Olivier Deprez3d203f42022-11-16 16:46:23 +0100644 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000645 }
646
647 /* Under no circumstances will this parameter be 0 */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200648 assert(spmc_ep_info->pc != 0ULL);
Max Shvetsov745889c2020-02-27 14:54:21 +0000649
650 /*
651 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200652 * be used as a manifest for the SPM Core at the next lower EL/mode.
Max Shvetsov745889c2020-02-27 14:54:21 +0000653 */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100654 spmc_manifest = (void *)spmc_ep_info->args.arg0;
655 if (spmc_manifest == NULL) {
Olivier Deprez3d203f42022-11-16 16:46:23 +0100656 WARN("Invalid or absent SPM Core manifest.\n");
657 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000658 }
659
660 /* Load manifest, init SPMC */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100661 rc = spmd_spmc_init(spmc_manifest);
Max Shvetsov745889c2020-02-27 14:54:21 +0000662 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200663 WARN("Booting device without SPM initialization.\n");
Max Shvetsov745889c2020-02-27 14:54:21 +0000664 }
665
Olivier Deprez3d203f42022-11-16 16:46:23 +0100666 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000667}
668
669/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000670 * Forward FF-A SMCs to the other security state.
Max Shvetsov745889c2020-02-27 14:54:21 +0000671 ******************************************************************************/
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000672uint64_t spmd_smc_switch_state(uint32_t smc_fid,
673 bool secure_origin,
674 uint64_t x1,
675 uint64_t x2,
676 uint64_t x3,
677 uint64_t x4,
Olivier Deprezdce23c02022-10-31 12:38:17 +0100678 void *handle,
679 uint64_t flags)
Max Shvetsov745889c2020-02-27 14:54:21 +0000680{
Olivier Deprezebc34772020-04-16 16:59:21 +0200681 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
682 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
Olivier Deprezaed30312024-06-07 09:24:43 +0200683 void *ctx_out;
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100684
Olivier Deprezdce23c02022-10-31 12:38:17 +0100685#if SPMD_SPM_AT_SEL2
686 if ((secure_state_out == SECURE) && (is_sve_hint_set(flags) == true)) {
687 /*
688 * Set the SVE hint bit in x0 and pass to the lower secure EL,
689 * if it was set by the caller.
690 */
691 smc_fid |= (FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT);
692 }
693#endif
694
Max Shvetsov745889c2020-02-27 14:54:21 +0000695 /* Save incoming security state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000696#if SPMD_SPM_AT_SEL2
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100697 cm_el2_sysregs_context_save(secure_state_in);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200698#else
699 cm_el1_sysregs_context_save(secure_state_in);
Madhukar Pappireddy7503cdc2024-04-25 23:01:00 -0500700#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
701 /* Forward the hint bit denoting the absence of SVE live state. */
702 simd_ctx_save(secure_state_in, (!secure_origin && (is_sve_hint_set(flags) == true)));
703#endif
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000704#endif
Max Shvetsov745889c2020-02-27 14:54:21 +0000705
706 /* Restore outgoing security state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000707#if SPMD_SPM_AT_SEL2
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100708 cm_el2_sysregs_context_restore(secure_state_out);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200709#else
710 cm_el1_sysregs_context_restore(secure_state_out);
Madhukar Pappireddy7503cdc2024-04-25 23:01:00 -0500711#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
712 simd_ctx_restore(secure_state_out);
713#endif
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000714#endif
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100715 cm_set_next_eret_context(secure_state_out);
Max Shvetsov745889c2020-02-27 14:54:21 +0000716
Olivier Deprezaed30312024-06-07 09:24:43 +0200717 ctx_out = cm_get_context(secure_state_out);
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -0800718#if SPMD_SPM_AT_SEL2
719 /*
720 * If SPMC is at SEL2, save additional registers x8-x17, which may
721 * be used in FF-A calls such as FFA_PARTITION_INFO_GET_REGS.
722 * Note that technically, all SPMCs can support this, but this code is
723 * under ifdef to minimize breakage in case other SPMCs do not save
724 * and restore x8-x17.
725 * We also need to pass through these registers since not all FF-A ABIs
726 * modify x8-x17, in which case, SMCCC requires that these registers be
727 * preserved, so the SPMD passes through these registers and expects the
728 * SPMC to save and restore (potentially also modify) them.
729 */
Olivier Deprezaed30312024-06-07 09:24:43 +0200730 SMC_RET18(ctx_out, smc_fid, x1, x2, x3, x4,
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -0800731 SMC_GET_GP(handle, CTX_GPREG_X5),
732 SMC_GET_GP(handle, CTX_GPREG_X6),
733 SMC_GET_GP(handle, CTX_GPREG_X7),
734 SMC_GET_GP(handle, CTX_GPREG_X8),
735 SMC_GET_GP(handle, CTX_GPREG_X9),
736 SMC_GET_GP(handle, CTX_GPREG_X10),
737 SMC_GET_GP(handle, CTX_GPREG_X11),
738 SMC_GET_GP(handle, CTX_GPREG_X12),
739 SMC_GET_GP(handle, CTX_GPREG_X13),
740 SMC_GET_GP(handle, CTX_GPREG_X14),
741 SMC_GET_GP(handle, CTX_GPREG_X15),
742 SMC_GET_GP(handle, CTX_GPREG_X16),
743 SMC_GET_GP(handle, CTX_GPREG_X17)
744 );
745
746#else
Olivier Deprezaed30312024-06-07 09:24:43 +0200747 SMC_RET8(ctx_out, smc_fid, x1, x2, x3, x4,
Max Shvetsov745889c2020-02-27 14:54:21 +0000748 SMC_GET_GP(handle, CTX_GPREG_X5),
749 SMC_GET_GP(handle, CTX_GPREG_X6),
750 SMC_GET_GP(handle, CTX_GPREG_X7));
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -0800751#endif
Max Shvetsov745889c2020-02-27 14:54:21 +0000752}
753
754/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000755 * Forward SMCs to the other security state.
756 ******************************************************************************/
757static uint64_t spmd_smc_forward(uint32_t smc_fid,
758 bool secure_origin,
759 uint64_t x1,
760 uint64_t x2,
761 uint64_t x3,
762 uint64_t x4,
763 void *cookie,
764 void *handle,
765 uint64_t flags)
766{
767 if (is_spmc_at_el3() && !secure_origin) {
768 return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4,
769 cookie, handle, flags);
770 }
Olivier Deprezdce23c02022-10-31 12:38:17 +0100771
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000772 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4,
Olivier Deprezdce23c02022-10-31 12:38:17 +0100773 handle, flags);
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000774
775}
776
777/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100778 * Return FFA_ERROR with specified error code
Max Shvetsov745889c2020-02-27 14:54:21 +0000779 ******************************************************************************/
Raghu Krishnamurthy9d9584f2023-04-22 18:00:02 -0700780uint64_t spmd_ffa_error_return(void *handle, int error_code)
Max Shvetsov745889c2020-02-27 14:54:21 +0000781{
J-Alves64ff9932021-03-01 10:26:59 +0000782 SMC_RET8(handle, (uint32_t) FFA_ERROR,
783 FFA_TARGET_INFO_MBZ, (uint32_t)error_code,
J-Alves2672cde2020-05-07 18:42:25 +0100784 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
785 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100786}
787
Olivier Deprez33e44122020-04-16 17:54:27 +0200788/*******************************************************************************
789 * spmd_check_address_in_binary_image
790 ******************************************************************************/
791bool spmd_check_address_in_binary_image(uint64_t address)
792{
793 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
794
795 return ((address >= spmc_attrs.load_address) &&
796 (address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
797}
798
Olivier Deprezebc34772020-04-16 16:59:21 +0200799/******************************************************************************
800 * spmd_is_spmc_message
801 *****************************************************************************/
802static bool spmd_is_spmc_message(unsigned int ep)
803{
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000804 if (is_spmc_at_el3()) {
805 return false;
806 }
807
Olivier Deprezebc34772020-04-16 16:59:21 +0200808 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
809 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
810}
811
Olivier Deprez33e44122020-04-16 17:54:27 +0200812/******************************************************************************
813 * spmd_handle_spmc_message
814 *****************************************************************************/
Olivier Deprezc7631a52020-03-23 09:53:06 +0100815static int spmd_handle_spmc_message(unsigned long long msg,
816 unsigned long long parm1, unsigned long long parm2,
817 unsigned long long parm3, unsigned long long parm4)
Olivier Deprez33e44122020-04-16 17:54:27 +0200818{
819 VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__,
820 msg, parm1, parm2, parm3, parm4);
821
Olivier Deprez33e44122020-04-16 17:54:27 +0200822 return -EINVAL;
823}
824
Achin Gupta86f23532019-10-11 15:41:16 +0100825/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000826 * This function forwards FF-A SMCs to either the main SPMD handler or the
827 * SPMC at EL3, depending on the origin security state, if enabled.
828 ******************************************************************************/
829uint64_t spmd_ffa_smc_handler(uint32_t smc_fid,
830 uint64_t x1,
831 uint64_t x2,
832 uint64_t x3,
833 uint64_t x4,
834 void *cookie,
835 void *handle,
836 uint64_t flags)
837{
838 if (is_spmc_at_el3()) {
839 /*
840 * If we have an SPMC at EL3 allow handling of the SMC first.
841 * The SPMC will call back through to SPMD handler if required.
842 */
843 if (is_caller_secure(flags)) {
844 return spmc_smc_handler(smc_fid,
845 is_caller_secure(flags),
846 x1, x2, x3, x4, cookie,
847 handle, flags);
848 }
849 }
850 return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
851 handle, flags);
852}
853
854/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100855 * This function handles all SMCs in the range reserved for FFA. Each call is
Achin Gupta86f23532019-10-11 15:41:16 +0100856 * either forwarded to the other security state or handled by the SPM dispatcher
857 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200858uint64_t spmd_smc_handler(uint32_t smc_fid,
859 uint64_t x1,
860 uint64_t x2,
861 uint64_t x3,
862 uint64_t x4,
863 void *cookie,
864 void *handle,
Achin Gupta86f23532019-10-11 15:41:16 +0100865 uint64_t flags)
866{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200867 spmd_spm_core_context_t *ctx = spmd_get_context();
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100868 bool secure_origin;
J-Alves8676f242023-10-04 17:16:45 +0100869 int ret;
J-Alves4c95c702020-05-26 14:03:05 +0100870 uint32_t input_version;
Achin Gupta86f23532019-10-11 15:41:16 +0100871
872 /* Determine which security state this SMC originated from */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100873 secure_origin = is_caller_secure(flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100874
Scott Brandene5dcf982020-08-25 13:49:32 -0700875 VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64
876 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n",
Olivier Deprez5f875b82024-06-07 10:22:50 +0200877 plat_my_core_pos(), smc_fid, x1, x2, x3, x4,
Scott Brandene5dcf982020-08-25 13:49:32 -0700878 SMC_GET_GP(handle, CTX_GPREG_X5),
879 SMC_GET_GP(handle, CTX_GPREG_X6),
880 SMC_GET_GP(handle, CTX_GPREG_X7));
Achin Gupta86f23532019-10-11 15:41:16 +0100881
Raghu Krishnamurthy43fda972023-04-22 11:28:38 -0700882 /*
883 * If there is an on-going info regs from EL3 SPMD LP, unconditionally
884 * return, we don't expect any other FF-A ABIs to be called between
885 * calls to FFA_PARTITION_INFO_GET_REGS.
886 */
887 if (is_spmd_logical_sp_info_regs_req_in_progress(ctx)) {
888 assert(secure_origin);
889 spmd_spm_core_sync_exit(0ULL);
890 }
891
Achin Gupta86f23532019-10-11 15:41:16 +0100892 switch (smc_fid) {
J-Alves2672cde2020-05-07 18:42:25 +0100893 case FFA_ERROR:
Achin Gupta86f23532019-10-11 15:41:16 +0100894 /*
895 * Check if this is the first invocation of this interface on
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200896 * this CPU. If so, then indicate that the SPM Core initialised
Achin Gupta86f23532019-10-11 15:41:16 +0100897 * unsuccessfully.
898 */
Olivier Deprez7c016332019-10-28 09:03:13 +0000899 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Achin Gupta86f23532019-10-11 15:41:16 +0100900 spmd_spm_core_sync_exit(x2);
Max Shvetsov745889c2020-02-27 14:54:21 +0000901 }
Achin Gupta86f23532019-10-11 15:41:16 +0100902
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -0800903 /*
904 * If there was an SPMD logical partition direct request on-going,
905 * return back to the SPMD logical partition so the error can be
906 * consumed.
907 */
908 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
909 assert(secure_origin);
910 spmd_spm_core_sync_exit(0ULL);
911 }
912
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100913 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000914 x1, x2, x3, x4, cookie,
915 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100916 break; /* not reached */
917
J-Alves2672cde2020-05-07 18:42:25 +0100918 case FFA_VERSION:
J-Alves4c95c702020-05-26 14:03:05 +0100919 input_version = (uint32_t)(0xFFFFFFFF & x1);
Achin Gupta86f23532019-10-11 15:41:16 +0100920 /*
J-Alves4c95c702020-05-26 14:03:05 +0100921 * If caller is secure and SPMC was initialized,
922 * return FFA_VERSION of SPMD.
923 * If caller is non secure and SPMC was initialized,
Marc Bonnici815d1012021-12-08 14:27:40 +0000924 * forward to the EL3 SPMC if enabled, otherwise return
925 * the SPMC version if implemented at a lower EL.
J-Alves4c95c702020-05-26 14:03:05 +0100926 * Sanity check to "input_version".
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000927 * If the EL3 SPMC is enabled, ignore the SPMC state as
928 * this is not used.
Achin Gupta86f23532019-10-11 15:41:16 +0100929 */
J-Alves4c95c702020-05-26 14:03:05 +0100930 if ((input_version & FFA_VERSION_BIT31_MASK) ||
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000931 (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) {
J-Alves4c95c702020-05-26 14:03:05 +0100932 ret = FFA_ERROR_NOT_SUPPORTED;
933 } else if (!secure_origin) {
Marc Bonnici815d1012021-12-08 14:27:40 +0000934 if (is_spmc_at_el3()) {
935 /*
936 * Forward the call directly to the EL3 SPMC, if
937 * enabled, as we don't need to wrap the call in
938 * a direct request.
939 */
940 return spmd_smc_forward(smc_fid, secure_origin,
941 x1, x2, x3, x4, cookie,
942 handle, flags);
943 }
944
Daniel Boulby9460a232021-12-09 11:20:13 +0000945 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
946 uint64_t rc;
947
948 if (spmc_attrs.major_version == 1 &&
949 spmc_attrs.minor_version == 0) {
950 ret = MAKE_FFA_VERSION(spmc_attrs.major_version,
951 spmc_attrs.minor_version);
952 SMC_RET8(handle, (uint32_t)ret,
953 FFA_TARGET_INFO_MBZ,
954 FFA_TARGET_INFO_MBZ,
955 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
956 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
957 FFA_PARAM_MBZ);
958 break;
959 }
960 /* Save non-secure system registers context */
Daniel Boulby9460a232021-12-09 11:20:13 +0000961#if SPMD_SPM_AT_SEL2
962 cm_el2_sysregs_context_save(NON_SECURE);
Madhukar Pappireddyd3f32072024-01-29 16:43:56 -0600963#else
964 cm_el1_sysregs_context_save(NON_SECURE);
Daniel Boulby9460a232021-12-09 11:20:13 +0000965#endif
966
967 /*
968 * The incoming request has FFA_VERSION as X0 smc_fid
969 * and requested version in x1. Prepare a direct request
970 * from SPMD to SPMC with FFA_VERSION framework function
971 * identifier in X2 and requested version in X3.
972 */
973 spmd_build_spmc_message(gpregs,
974 SPMD_FWK_MSG_FFA_VERSION_REQ,
975 input_version);
976
Olivier Deprez4911eb82023-07-10 11:04:30 +0200977 /*
978 * Ensure x8-x17 NS GP register values are untouched when returning
979 * from the SPMC.
980 */
981 write_ctx_reg(gpregs, CTX_GPREG_X8, SMC_GET_GP(handle, CTX_GPREG_X8));
982 write_ctx_reg(gpregs, CTX_GPREG_X9, SMC_GET_GP(handle, CTX_GPREG_X9));
983 write_ctx_reg(gpregs, CTX_GPREG_X10, SMC_GET_GP(handle, CTX_GPREG_X10));
984 write_ctx_reg(gpregs, CTX_GPREG_X11, SMC_GET_GP(handle, CTX_GPREG_X11));
985 write_ctx_reg(gpregs, CTX_GPREG_X12, SMC_GET_GP(handle, CTX_GPREG_X12));
986 write_ctx_reg(gpregs, CTX_GPREG_X13, SMC_GET_GP(handle, CTX_GPREG_X13));
987 write_ctx_reg(gpregs, CTX_GPREG_X14, SMC_GET_GP(handle, CTX_GPREG_X14));
988 write_ctx_reg(gpregs, CTX_GPREG_X15, SMC_GET_GP(handle, CTX_GPREG_X15));
989 write_ctx_reg(gpregs, CTX_GPREG_X16, SMC_GET_GP(handle, CTX_GPREG_X16));
990 write_ctx_reg(gpregs, CTX_GPREG_X17, SMC_GET_GP(handle, CTX_GPREG_X17));
991
Daniel Boulby9460a232021-12-09 11:20:13 +0000992 rc = spmd_spm_core_sync_entry(ctx);
993
994 if ((rc != 0ULL) ||
995 (SMC_GET_GP(gpregs, CTX_GPREG_X0) !=
996 FFA_MSG_SEND_DIRECT_RESP_SMC32) ||
997 (SMC_GET_GP(gpregs, CTX_GPREG_X2) !=
Marc Bonnici25f4b542022-04-12 17:18:13 +0100998 (FFA_FWK_MSG_BIT |
Daniel Boulby9460a232021-12-09 11:20:13 +0000999 SPMD_FWK_MSG_FFA_VERSION_RESP))) {
1000 ERROR("Failed to forward FFA_VERSION\n");
1001 ret = FFA_ERROR_NOT_SUPPORTED;
1002 } else {
1003 ret = SMC_GET_GP(gpregs, CTX_GPREG_X3);
1004 }
1005
1006 /*
Olivier Deprez4911eb82023-07-10 11:04:30 +02001007 * x0-x4 are updated by spmd_smc_forward below.
1008 * Zero out x5-x7 in the FFA_VERSION response.
1009 */
1010 write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
1011 write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
1012 write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
1013
1014 /*
Daniel Boulby9460a232021-12-09 11:20:13 +00001015 * Return here after SPMC has handled FFA_VERSION.
1016 * The returned SPMC version is held in X3.
1017 * Forward this version in X0 to the non-secure caller.
1018 */
1019 return spmd_smc_forward(ret, true, FFA_PARAM_MBZ,
1020 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001021 FFA_PARAM_MBZ, cookie, gpregs,
1022 flags);
J-Alves4c95c702020-05-26 14:03:05 +01001023 } else {
J-Alves64ff9932021-03-01 10:26:59 +00001024 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
1025 FFA_VERSION_MINOR);
J-Alves4c95c702020-05-26 14:03:05 +01001026 }
1027
J-Alves64ff9932021-03-01 10:26:59 +00001028 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ,
1029 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1030 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +01001031 break; /* not reached */
1032
J-Alves2672cde2020-05-07 18:42:25 +01001033 case FFA_FEATURES:
Achin Gupta86f23532019-10-11 15:41:16 +01001034 /*
1035 * This is an optional interface. Do the minimal checks and
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001036 * forward to SPM Core which will handle it if implemented.
Achin Gupta86f23532019-10-11 15:41:16 +01001037 */
1038
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001039 /* Forward SMC from Normal world to the SPM Core */
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001040 if (!secure_origin) {
1041 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001042 x1, x2, x3, x4, cookie,
1043 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +01001044 }
Max Shvetsov745889c2020-02-27 14:54:21 +00001045
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001046 /*
1047 * Return success if call was from secure world i.e. all
J-Alves2672cde2020-05-07 18:42:25 +01001048 * FFA functions are supported. This is essentially a
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001049 * nop.
1050 */
J-Alves2672cde2020-05-07 18:42:25 +01001051 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001052 SMC_GET_GP(handle, CTX_GPREG_X5),
1053 SMC_GET_GP(handle, CTX_GPREG_X6),
1054 SMC_GET_GP(handle, CTX_GPREG_X7));
1055
Achin Gupta86f23532019-10-11 15:41:16 +01001056 break; /* not reached */
1057
J-Alves2672cde2020-05-07 18:42:25 +01001058 case FFA_ID_GET:
Max Shvetsove79062e2020-03-12 15:16:40 +00001059 /*
J-Alves2672cde2020-05-07 18:42:25 +01001060 * Returns the ID of the calling FFA component.
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001061 */
Max Shvetsove79062e2020-03-12 15:16:40 +00001062 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +01001063 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1064 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
1065 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1066 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1067 FFA_PARAM_MBZ);
Max Shvetsove79062e2020-03-12 15:16:40 +00001068 }
1069
J-Alves2672cde2020-05-07 18:42:25 +01001070 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1071 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
1072 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1073 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1074 FFA_PARAM_MBZ);
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001075
Max Shvetsove79062e2020-03-12 15:16:40 +00001076 break; /* not reached */
1077
Olivier Deprezeae45962021-01-19 15:06:47 +01001078 case FFA_SECONDARY_EP_REGISTER_SMC64:
1079 if (secure_origin) {
1080 ret = spmd_pm_secondary_ep_register(x1);
1081
1082 if (ret < 0) {
1083 SMC_RET8(handle, FFA_ERROR_SMC64,
1084 FFA_TARGET_INFO_MBZ, ret,
1085 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1086 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1087 FFA_PARAM_MBZ);
1088 } else {
1089 SMC_RET8(handle, FFA_SUCCESS_SMC64,
1090 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ,
1091 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1092 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1093 FFA_PARAM_MBZ);
1094 }
1095 }
1096
1097 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1098 break; /* Not reached */
1099
Daniel Boulby27f35df2021-02-03 12:13:19 +00001100 case FFA_SPM_ID_GET:
1101 if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) {
1102 return spmd_ffa_error_return(handle,
1103 FFA_ERROR_NOT_SUPPORTED);
1104 }
1105 /*
1106 * Returns the ID of the SPMC or SPMD depending on the FF-A
1107 * instance where this function is invoked
1108 */
1109 if (!secure_origin) {
1110 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1111 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
1112 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1113 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1114 FFA_PARAM_MBZ);
1115 }
1116 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1117 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID,
1118 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1119 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1120 FFA_PARAM_MBZ);
1121
1122 break; /* not reached */
1123
Olivier Deprez33e44122020-04-16 17:54:27 +02001124 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
Shruti3d859672022-06-09 11:03:11 +01001125 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -08001126 /*
1127 * Regardless of secure_origin, SPMD logical partitions cannot
1128 * handle direct messages. They can only initiate direct
1129 * messages and consume direct responses or errors.
1130 */
1131 if (is_spmd_lp_id(ffa_endpoint_source(x1)) ||
1132 is_spmd_lp_id(ffa_endpoint_destination(x1))) {
1133 return spmd_ffa_error_return(handle,
1134 FFA_ERROR_INVALID_PARAMETER
1135 );
1136 }
1137
1138 /*
1139 * When there is an ongoing SPMD logical partition direct
1140 * request, there cannot be another direct request. Return
1141 * error in this case. Panic'ing is an option but that does
1142 * not provide the opportunity for caller to abort based on
1143 * error codes.
1144 */
1145 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
1146 assert(secure_origin);
1147 return spmd_ffa_error_return(handle,
1148 FFA_ERROR_DENIED);
1149 }
1150
Shruti3d859672022-06-09 11:03:11 +01001151 if (!secure_origin) {
1152 /* Validate source endpoint is non-secure for non-secure caller. */
1153 if (ffa_is_secure_world_id(ffa_endpoint_source(x1))) {
1154 return spmd_ffa_error_return(handle,
1155 FFA_ERROR_INVALID_PARAMETER);
1156 }
1157 }
Olivier Deprez33e44122020-04-16 17:54:27 +02001158 if (secure_origin && spmd_is_spmc_message(x1)) {
1159 ret = spmd_handle_spmc_message(x3, x4,
1160 SMC_GET_GP(handle, CTX_GPREG_X5),
1161 SMC_GET_GP(handle, CTX_GPREG_X6),
1162 SMC_GET_GP(handle, CTX_GPREG_X7));
1163
1164 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1165 FFA_TARGET_INFO_MBZ, ret,
1166 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1167 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1168 FFA_PARAM_MBZ);
1169 } else {
1170 /* Forward direct message to the other world */
1171 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001172 x1, x2, x3, x4, cookie,
1173 handle, flags);
Olivier Deprez33e44122020-04-16 17:54:27 +02001174 }
1175 break; /* Not reached */
1176
Kathleen Capella9d826a12023-07-31 14:45:58 -04001177 case FFA_MSG_SEND_DIRECT_REQ2_SMC64:
1178 if (!secure_origin) {
1179 /* Validate source endpoint is non-secure for non-secure caller. */
1180 if (ffa_is_secure_world_id(ffa_endpoint_source(x1))) {
1181 return spmd_ffa_error_return(handle,
1182 FFA_ERROR_INVALID_PARAMETER);
1183 }
1184 }
1185 /* FFA_MSG_SEND_DIRECT_REQ2 not used for framework messages. */
1186 if (secure_origin && spmd_is_spmc_message(x1)) {
1187 return spmd_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
1188 } else {
1189 /* Forward direct message to the other world */
1190 return spmd_smc_forward(smc_fid, secure_origin,
1191 x1, x2, x3, x4, cookie,
1192 handle, flags);
1193 }
1194 break; /* Not reached */
1195
Olivier Deprez33e44122020-04-16 17:54:27 +02001196 case FFA_MSG_SEND_DIRECT_RESP_SMC32:
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -08001197 case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1198 if (secure_origin && (spmd_is_spmc_message(x1) ||
1199 is_spmd_logical_sp_dir_req_in_progress(ctx))) {
Olivier Depreza664c492020-08-05 11:27:42 +02001200 spmd_spm_core_sync_exit(0ULL);
Olivier Deprez33e44122020-04-16 17:54:27 +02001201 } else {
1202 /* Forward direct message to the other world */
1203 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001204 x1, x2, x3, x4, cookie,
1205 handle, flags);
Olivier Deprez33e44122020-04-16 17:54:27 +02001206 }
1207 break; /* Not reached */
Kathleen Capellaa85a9d12023-09-08 17:45:45 -04001208 case FFA_MSG_SEND_DIRECT_RESP2_SMC64:
1209 /* Forward direct message to the other world */
1210 return spmd_smc_forward(smc_fid, secure_origin,
1211 x1, x2, x3, x4, cookie,
1212 handle, flags);
1213 break; /* Not reached */
J-Alves2672cde2020-05-07 18:42:25 +01001214 case FFA_RX_RELEASE:
1215 case FFA_RXTX_MAP_SMC32:
1216 case FFA_RXTX_MAP_SMC64:
1217 case FFA_RXTX_UNMAP:
Ruari Phipps93dff702020-07-28 10:33:35 +01001218 case FFA_PARTITION_INFO_GET:
J-Alves2621cfd2021-03-11 17:46:47 +00001219#if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
1220 case FFA_NOTIFICATION_BITMAP_CREATE:
1221 case FFA_NOTIFICATION_BITMAP_DESTROY:
1222 case FFA_NOTIFICATION_BIND:
1223 case FFA_NOTIFICATION_UNBIND:
1224 case FFA_NOTIFICATION_SET:
1225 case FFA_NOTIFICATION_GET:
1226 case FFA_NOTIFICATION_INFO_GET:
1227 case FFA_NOTIFICATION_INFO_GET_SMC64:
Federico Recanatieecb4b02022-02-03 17:22:37 +01001228 case FFA_MSG_SEND2:
Federico Recanati5c7c5c42022-03-18 10:30:00 +01001229 case FFA_RX_ACQUIRE:
J-Alves2621cfd2021-03-11 17:46:47 +00001230#endif
Federico Recanatieecb4b02022-02-03 17:22:37 +01001231 case FFA_MSG_RUN:
Ruari Phipps93dff702020-07-28 10:33:35 +01001232 /*
Federico Recanatieecb4b02022-02-03 17:22:37 +01001233 * Above calls should be invoked only by the Normal world and
1234 * must not be forwarded from Secure world to Normal world.
Ruari Phipps93dff702020-07-28 10:33:35 +01001235 */
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001236 if (secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +01001237 return spmd_ffa_error_return(handle,
Ruari Phipps93dff702020-07-28 10:33:35 +01001238 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +01001239 }
1240
Boyan Karatotev87266002022-11-18 14:17:17 +00001241 /* Forward the call to the other world */
1242 /* fallthrough */
J-Alves2672cde2020-05-07 18:42:25 +01001243 case FFA_MSG_SEND:
J-Alves2672cde2020-05-07 18:42:25 +01001244 case FFA_MEM_DONATE_SMC32:
1245 case FFA_MEM_DONATE_SMC64:
1246 case FFA_MEM_LEND_SMC32:
1247 case FFA_MEM_LEND_SMC64:
1248 case FFA_MEM_SHARE_SMC32:
1249 case FFA_MEM_SHARE_SMC64:
1250 case FFA_MEM_RETRIEVE_REQ_SMC32:
1251 case FFA_MEM_RETRIEVE_REQ_SMC64:
1252 case FFA_MEM_RETRIEVE_RESP:
1253 case FFA_MEM_RELINQUISH:
1254 case FFA_MEM_RECLAIM:
Marc Bonnici9fa01e92021-09-23 09:44:14 +01001255 case FFA_MEM_FRAG_TX:
1256 case FFA_MEM_FRAG_RX:
J-Alves2672cde2020-05-07 18:42:25 +01001257 case FFA_SUCCESS_SMC32:
1258 case FFA_SUCCESS_SMC64:
Achin Gupta86f23532019-10-11 15:41:16 +01001259 /*
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -08001260 * If there is an ongoing direct request from an SPMD logical
1261 * partition, return an error.
Achin Gupta86f23532019-10-11 15:41:16 +01001262 */
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -08001263 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
1264 assert(secure_origin);
1265 return spmd_ffa_error_return(handle,
1266 FFA_ERROR_DENIED);
1267 }
Achin Gupta86f23532019-10-11 15:41:16 +01001268
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001269 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001270 x1, x2, x3, x4, cookie,
1271 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +01001272 break; /* not reached */
1273
J-Alves2672cde2020-05-07 18:42:25 +01001274 case FFA_MSG_WAIT:
Achin Gupta86f23532019-10-11 15:41:16 +01001275 /*
1276 * Check if this is the first invocation of this interface on
1277 * this CPU from the Secure world. If so, then indicate that the
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001278 * SPM Core initialised successfully.
Achin Gupta86f23532019-10-11 15:41:16 +01001279 */
Olivier Deprez7c016332019-10-28 09:03:13 +00001280 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Olivier Depreza664c492020-08-05 11:27:42 +02001281 spmd_spm_core_sync_exit(0ULL);
Achin Gupta86f23532019-10-11 15:41:16 +01001282 }
1283
Boyan Karatotev87266002022-11-18 14:17:17 +00001284 /* Forward the call to the other world */
1285 /* fallthrough */
Olivier Deprezae18caf2021-04-02 11:09:10 +02001286 case FFA_INTERRUPT:
J-Alves2672cde2020-05-07 18:42:25 +01001287 case FFA_MSG_YIELD:
Achin Gupta86f23532019-10-11 15:41:16 +01001288 /* This interface must be invoked only by the Secure world */
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001289 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +01001290 return spmd_ffa_error_return(handle,
1291 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +01001292 }
1293
Raghu Krishnamurthy6a305142023-03-03 06:41:29 -08001294 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
1295 assert(secure_origin);
1296 return spmd_ffa_error_return(handle,
1297 FFA_ERROR_DENIED);
1298 }
1299
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001300 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001301 x1, x2, x3, x4, cookie,
1302 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +01001303 break; /* not reached */
1304
Olivier Depreza664c492020-08-05 11:27:42 +02001305 case FFA_NORMAL_WORLD_RESUME:
1306 if (secure_origin && ctx->secure_interrupt_ongoing) {
1307 spmd_spm_core_sync_exit(0ULL);
1308 } else {
1309 return spmd_ffa_error_return(handle, FFA_ERROR_DENIED);
1310 }
1311 break; /* Not reached */
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -08001312#if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
1313 case FFA_PARTITION_INFO_GET_REGS_SMC64:
1314 if (secure_origin) {
Raghu Krishnamurthy9d9584f2023-04-22 18:00:02 -07001315 return spmd_el3_populate_logical_partition_info(handle, x1,
1316 x2, x3);
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -08001317 }
Olivier Depreza664c492020-08-05 11:27:42 +02001318
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -08001319 /* Call only supported with SMCCC 1.2+ */
1320 if (MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION) < 0x10002) {
1321 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1322 }
1323
1324 return spmd_smc_forward(smc_fid, secure_origin,
1325 x1, x2, x3, x4, cookie,
1326 handle, flags);
1327 break; /* Not reached */
1328#endif
Shruti Guptaa5a1cbd2023-01-19 21:50:55 +00001329 case FFA_CONSOLE_LOG_SMC32:
1330 case FFA_CONSOLE_LOG_SMC64:
1331 /* This interface must not be forwarded to other worlds. */
1332 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1333 break; /* not reached */
1334
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -06001335 case FFA_EL3_INTR_HANDLE:
1336 if (secure_origin) {
1337 return spmd_handle_group0_intr_swd(handle);
1338 } else {
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -05001339 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -06001340 }
Achin Gupta86f23532019-10-11 15:41:16 +01001341 default:
1342 WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
J-Alves2672cde2020-05-07 18:42:25 +01001343 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +01001344 }
1345}