blob: 109a1bcbe9ecc9830f54c9730e203fccbaf18453 [file] [log] [blame]
Achin Gupta86f23532019-10-11 15:41:16 +01001/*
Olivier Deprezeae45962021-01-19 15:06:47 +01002 * Copyright (c) 2020-2021, ARM Limited and Contributors. All rights reserved.
Achin Gupta86f23532019-10-11 15:41:16 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
9#include <string.h>
10
11#include <arch_helpers.h>
Olivier Deprez2bae35f2020-04-16 13:39:06 +020012#include <arch/aarch64/arch_features.h>
Achin Gupta86f23532019-10-11 15:41:16 +010013#include <bl31/bl31.h>
Olivier Depreza664c492020-08-05 11:27:42 +020014#include <bl31/interrupt_mgmt.h>
Achin Gupta86f23532019-10-11 15:41:16 +010015#include <common/debug.h>
16#include <common/runtime_svc.h>
17#include <lib/el3_runtime/context_mgmt.h>
18#include <lib/smccc.h>
19#include <lib/spinlock.h>
20#include <lib/utils.h>
Achin Gupta86f23532019-10-11 15:41:16 +010021#include <plat/common/common_def.h>
22#include <plat/common/platform.h>
23#include <platform_def.h>
J-Alves2672cde2020-05-07 18:42:25 +010024#include <services/ffa_svc.h>
Achin Gupta86f23532019-10-11 15:41:16 +010025#include <services/spmd_svc.h>
26#include <smccc_helpers.h>
27#include "spmd_private.h"
28
29/*******************************************************************************
30 * SPM Core context information.
31 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020032static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
Achin Gupta86f23532019-10-11 15:41:16 +010033
34/*******************************************************************************
35 * SPM Core attribute information read from its manifest.
36 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020037static spmc_manifest_attribute_t spmc_attrs;
Achin Gupta86f23532019-10-11 15:41:16 +010038
39/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000040 * SPM Core entry point information. Discovered on the primary core and reused
41 * on secondary cores.
42 ******************************************************************************/
43static entry_point_info_t *spmc_ep_info;
44
45/*******************************************************************************
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020046 * SPM Core context on CPU based on mpidr.
47 ******************************************************************************/
48spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr)
49{
Max Shvetsovf80c64d2020-08-25 11:50:18 +010050 int core_idx = plat_core_pos_by_mpidr(mpidr);
51
52 if (core_idx < 0) {
53 ERROR("Invalid mpidr: %llx, returned ID: %d\n", mpidr, core_idx);
54 panic();
55 }
56
57 return &spm_core_context[core_idx];
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020058}
59
60/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +020061 * SPM Core context on current CPU get helper.
62 ******************************************************************************/
63spmd_spm_core_context_t *spmd_get_context(void)
64{
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020065 return spmd_get_context_by_mpidr(read_mpidr());
Olivier Deprez2bae35f2020-04-16 13:39:06 +020066}
67
68/*******************************************************************************
Olivier Deprezc7631a52020-03-23 09:53:06 +010069 * SPM Core ID getter.
70 ******************************************************************************/
71uint16_t spmd_spmc_id_get(void)
72{
73 return spmc_attrs.spmc_id;
74}
75
76/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000077 * Static function declaration.
78 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020079static int32_t spmd_init(void);
Olivier Deprez69ca84a2020-02-07 15:44:43 +010080static int spmd_spmc_init(void *pm_addr);
J-Alves2672cde2020-05-07 18:42:25 +010081static uint64_t spmd_ffa_error_return(void *handle,
Olivier Deprez2bae35f2020-04-16 13:39:06 +020082 int error_code);
83static uint64_t spmd_smc_forward(uint32_t smc_fid,
84 bool secure_origin,
85 uint64_t x1,
86 uint64_t x2,
87 uint64_t x3,
88 uint64_t x4,
89 void *handle);
Max Shvetsov745889c2020-02-27 14:54:21 +000090
91/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +020092 * This function takes an SPMC context pointer and performs a synchronous
93 * SPMC entry.
Achin Gupta86f23532019-10-11 15:41:16 +010094 ******************************************************************************/
95uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
96{
97 uint64_t rc;
98
99 assert(spmc_ctx != NULL);
100
101 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
102
103 /* Restore the context assigned above */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000104#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000105 cm_el2_sysregs_context_restore(SECURE);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200106#else
107 cm_el1_sysregs_context_restore(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000108#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100109 cm_set_next_eret_context(SECURE);
110
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000111 /* Enter SPMC */
Achin Gupta86f23532019-10-11 15:41:16 +0100112 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
113
114 /* Save secure state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000115#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000116 cm_el2_sysregs_context_save(SECURE);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200117#else
118 cm_el1_sysregs_context_save(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000119#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100120
121 return rc;
122}
123
124/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200125 * This function returns to the place where spmd_spm_core_sync_entry() was
Achin Gupta86f23532019-10-11 15:41:16 +0100126 * called originally.
127 ******************************************************************************/
128__dead2 void spmd_spm_core_sync_exit(uint64_t rc)
129{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200130 spmd_spm_core_context_t *ctx = spmd_get_context();
Achin Gupta86f23532019-10-11 15:41:16 +0100131
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200132 /* Get current CPU context from SPMC context */
Achin Gupta86f23532019-10-11 15:41:16 +0100133 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
134
135 /*
136 * The SPMD must have initiated the original request through a
137 * synchronous entry into SPMC. Jump back to the original C runtime
138 * context with the value of rc in x0;
139 */
140 spmd_spm_core_exit(ctx->c_rt_ctx, rc);
141
142 panic();
143}
144
145/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200146 * Jump to the SPM Core for the first time.
Achin Gupta86f23532019-10-11 15:41:16 +0100147 ******************************************************************************/
148static int32_t spmd_init(void)
149{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200150 spmd_spm_core_context_t *ctx = spmd_get_context();
151 uint64_t rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100152
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200153 VERBOSE("SPM Core init start.\n");
Olivier Deprez7c016332019-10-28 09:03:13 +0000154
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200155 /* Primary boot core enters the SPMC for initialization. */
156 ctx->state = SPMC_STATE_ON_PENDING;
Achin Gupta86f23532019-10-11 15:41:16 +0100157
158 rc = spmd_spm_core_sync_entry(ctx);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200159 if (rc != 0ULL) {
Achin Gupta86f23532019-10-11 15:41:16 +0100160 ERROR("SPMC initialisation failed 0x%llx\n", rc);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200161 return 0;
Achin Gupta86f23532019-10-11 15:41:16 +0100162 }
163
Olivier Deprez7c016332019-10-28 09:03:13 +0000164 ctx->state = SPMC_STATE_ON;
165
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200166 VERBOSE("SPM Core init end.\n");
Achin Gupta86f23532019-10-11 15:41:16 +0100167
168 return 1;
169}
170
171/*******************************************************************************
Olivier Depreza664c492020-08-05 11:27:42 +0200172 * spmd_secure_interrupt_handler
173 * Enter the SPMC for further handling of the secure interrupt by the SPMC
174 * itself or a Secure Partition.
175 ******************************************************************************/
176static uint64_t spmd_secure_interrupt_handler(uint32_t id,
177 uint32_t flags,
178 void *handle,
179 void *cookie)
180{
181 spmd_spm_core_context_t *ctx = spmd_get_context();
182 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
183 unsigned int linear_id = plat_my_core_pos();
184 int64_t rc;
185
186 /* Sanity check the security state when the exception was generated */
187 assert(get_interrupt_src_ss(flags) == NON_SECURE);
188
189 /* Sanity check the pointer to this cpu's context */
190 assert(handle == cm_get_context(NON_SECURE));
191
192 /* Save the non-secure context before entering SPMC */
193 cm_el1_sysregs_context_save(NON_SECURE);
194#if SPMD_SPM_AT_SEL2
195 cm_el2_sysregs_context_save(NON_SECURE);
196#endif
197
198 /* Convey the event to the SPMC through the FFA_INTERRUPT interface. */
199 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT);
200 write_ctx_reg(gpregs, CTX_GPREG_X1, 0);
201 write_ctx_reg(gpregs, CTX_GPREG_X2, 0);
202 write_ctx_reg(gpregs, CTX_GPREG_X3, 0);
203 write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
204 write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
205 write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
206 write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
207
208 /* Mark current core as handling a secure interrupt. */
209 ctx->secure_interrupt_ongoing = true;
210
211 rc = spmd_spm_core_sync_entry(ctx);
212 if (rc != 0ULL) {
213 ERROR("%s failed (%llu) on CPU%u\n", __func__, rc, linear_id);
214 }
215
216 ctx->secure_interrupt_ongoing = false;
217
218 cm_el1_sysregs_context_restore(NON_SECURE);
219#if SPMD_SPM_AT_SEL2
220 cm_el2_sysregs_context_restore(NON_SECURE);
221#endif
222 cm_set_next_eret_context(NON_SECURE);
223
224 SMC_RET0(&ctx->cpu_ctx);
225}
226
227/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200228 * Loads SPMC manifest and inits SPMC.
Achin Gupta86f23532019-10-11 15:41:16 +0100229 ******************************************************************************/
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100230static int spmd_spmc_init(void *pm_addr)
Achin Gupta86f23532019-10-11 15:41:16 +0100231{
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200232 cpu_context_t *cpu_ctx;
233 unsigned int core_id;
Olivier Depreza664c492020-08-05 11:27:42 +0200234 uint32_t ep_attr, flags;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200235 int rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100236
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200237 /* Load the SPM Core manifest */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100238 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
Max Shvetsov745889c2020-02-27 14:54:21 +0000239 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200240 WARN("No or invalid SPM Core manifest image provided by BL2\n");
241 return rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100242 }
243
244 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200245 * Ensure that the SPM Core version is compatible with the SPM
246 * Dispatcher version.
Achin Gupta86f23532019-10-11 15:41:16 +0100247 */
J-Alves2672cde2020-05-07 18:42:25 +0100248 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
249 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
250 WARN("Unsupported FFA version (%u.%u)\n",
Achin Gupta86f23532019-10-11 15:41:16 +0100251 spmc_attrs.major_version, spmc_attrs.minor_version);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200252 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100253 }
254
J-Alves2672cde2020-05-07 18:42:25 +0100255 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
Achin Gupta86f23532019-10-11 15:41:16 +0100256 spmc_attrs.minor_version);
257
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200258 VERBOSE("SPM Core run time EL%x.\n",
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000259 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
Achin Gupta86f23532019-10-11 15:41:16 +0100260
Max Shvetsove79062e2020-03-12 15:16:40 +0000261 /* Validate the SPMC ID, Ensure high bit is set */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200262 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
263 SPMC_SECURE_ID_MASK) == 0U) {
264 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
265 return -EINVAL;
Max Shvetsove79062e2020-03-12 15:16:40 +0000266 }
267
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200268 /* Validate the SPM Core execution state */
Achin Gupta86f23532019-10-11 15:41:16 +0100269 if ((spmc_attrs.exec_state != MODE_RW_64) &&
270 (spmc_attrs.exec_state != MODE_RW_32)) {
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100271 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
Achin Gupta86f23532019-10-11 15:41:16 +0100272 spmc_attrs.exec_state);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200273 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100274 }
275
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100276 VERBOSE("%s%x.\n", "SPM Core execution state 0x",
277 spmc_attrs.exec_state);
Achin Gupta86f23532019-10-11 15:41:16 +0100278
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000279#if SPMD_SPM_AT_SEL2
280 /* Ensure manifest has not requested AArch32 state in S-EL2 */
281 if (spmc_attrs.exec_state == MODE_RW_32) {
282 WARN("AArch32 state at S-EL2 is not supported.\n");
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200283 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100284 }
285
286 /*
287 * Check if S-EL2 is supported on this system if S-EL2
288 * is required for SPM
289 */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200290 if (!is_armv8_4_sel2_present()) {
291 WARN("SPM Core run time S-EL2 is not supported.\n");
292 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100293 }
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000294#endif /* SPMD_SPM_AT_SEL2 */
Achin Gupta86f23532019-10-11 15:41:16 +0100295
296 /* Initialise an entrypoint to set up the CPU context */
297 ep_attr = SECURE | EP_ST_ENABLE;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200298 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
Achin Gupta86f23532019-10-11 15:41:16 +0100299 ep_attr |= EP_EE_BIG;
Max Shvetsov745889c2020-02-27 14:54:21 +0000300 }
301
Achin Gupta86f23532019-10-11 15:41:16 +0100302 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
Achin Gupta86f23532019-10-11 15:41:16 +0100303
304 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200305 * Populate SPSR for SPM Core based upon validated parameters from the
306 * manifest.
Achin Gupta86f23532019-10-11 15:41:16 +0100307 */
308 if (spmc_attrs.exec_state == MODE_RW_32) {
309 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
310 SPSR_E_LITTLE,
311 DAIF_FIQ_BIT |
312 DAIF_IRQ_BIT |
313 DAIF_ABT_BIT);
314 } else {
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000315
316#if SPMD_SPM_AT_SEL2
317 static const uint32_t runtime_el = MODE_EL2;
318#else
319 static const uint32_t runtime_el = MODE_EL1;
320#endif
321 spmc_ep_info->spsr = SPSR_64(runtime_el,
Achin Gupta86f23532019-10-11 15:41:16 +0100322 MODE_SP_ELX,
323 DISABLE_ALL_EXCEPTIONS);
324 }
325
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200326 /* Set an initial SPMC context state for all cores. */
327 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
328 spm_core_context[core_id].state = SPMC_STATE_OFF;
Max Shvetsov745889c2020-02-27 14:54:21 +0000329
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200330 /* Setup an initial cpu context for the SPMC. */
331 cpu_ctx = &spm_core_context[core_id].cpu_ctx;
332 cm_setup_context(cpu_ctx, spmc_ep_info);
Achin Gupta86f23532019-10-11 15:41:16 +0100333
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200334 /*
335 * Pass the core linear ID to the SPMC through x4.
336 * (TF-A implementation defined behavior helping
337 * a legacy TOS migration to adopt FF-A).
338 */
339 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id);
340 }
Achin Gupta86f23532019-10-11 15:41:16 +0100341
Olivier Deprez9afca122019-10-28 09:15:52 +0000342 /* Register power management hooks with PSCI */
343 psci_register_spd_pm_hook(&spmd_pm);
344
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200345 /* Register init function for deferred init. */
Achin Gupta86f23532019-10-11 15:41:16 +0100346 bl31_register_bl32_init(&spmd_init);
347
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200348 INFO("SPM Core setup done.\n");
349
Olivier Depreza664c492020-08-05 11:27:42 +0200350 /*
351 * Register an interrupt handler routing secure interrupts to SPMD
352 * while the NWd is running.
353 */
354 flags = 0;
355 set_interrupt_rm_flag(flags, NON_SECURE);
356 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
357 spmd_secure_interrupt_handler,
358 flags);
359 if (rc != 0) {
360 panic();
361 }
362
Achin Gupta86f23532019-10-11 15:41:16 +0100363 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000364}
Achin Gupta86f23532019-10-11 15:41:16 +0100365
Max Shvetsov745889c2020-02-27 14:54:21 +0000366/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200367 * Initialize context of SPM Core.
Max Shvetsov745889c2020-02-27 14:54:21 +0000368 ******************************************************************************/
369int spmd_setup(void)
370{
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100371 void *spmc_manifest;
Max Shvetsov745889c2020-02-27 14:54:21 +0000372 int rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100373
Max Shvetsov745889c2020-02-27 14:54:21 +0000374 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200375 if (spmc_ep_info == NULL) {
376 WARN("No SPM Core image provided by BL2 boot loader.\n");
377 return -EINVAL;
Max Shvetsov745889c2020-02-27 14:54:21 +0000378 }
379
380 /* Under no circumstances will this parameter be 0 */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200381 assert(spmc_ep_info->pc != 0ULL);
Max Shvetsov745889c2020-02-27 14:54:21 +0000382
383 /*
384 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200385 * be used as a manifest for the SPM Core at the next lower EL/mode.
Max Shvetsov745889c2020-02-27 14:54:21 +0000386 */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100387 spmc_manifest = (void *)spmc_ep_info->args.arg0;
388 if (spmc_manifest == NULL) {
389 ERROR("Invalid or absent SPM Core manifest.\n");
390 return -EINVAL;
Max Shvetsov745889c2020-02-27 14:54:21 +0000391 }
392
393 /* Load manifest, init SPMC */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100394 rc = spmd_spmc_init(spmc_manifest);
Max Shvetsov745889c2020-02-27 14:54:21 +0000395 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200396 WARN("Booting device without SPM initialization.\n");
Max Shvetsov745889c2020-02-27 14:54:21 +0000397 }
398
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100399 return rc;
Max Shvetsov745889c2020-02-27 14:54:21 +0000400}
401
402/*******************************************************************************
403 * Forward SMC to the other security state
404 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200405static uint64_t spmd_smc_forward(uint32_t smc_fid,
406 bool secure_origin,
407 uint64_t x1,
408 uint64_t x2,
409 uint64_t x3,
410 uint64_t x4,
411 void *handle)
Max Shvetsov745889c2020-02-27 14:54:21 +0000412{
Olivier Deprezebc34772020-04-16 16:59:21 +0200413 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
414 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100415
Max Shvetsov745889c2020-02-27 14:54:21 +0000416 /* Save incoming security state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000417#if SPMD_SPM_AT_SEL2
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200418 if (secure_state_in == NON_SECURE) {
419 cm_el1_sysregs_context_save(secure_state_in);
420 }
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100421 cm_el2_sysregs_context_save(secure_state_in);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200422#else
423 cm_el1_sysregs_context_save(secure_state_in);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000424#endif
Max Shvetsov745889c2020-02-27 14:54:21 +0000425
426 /* Restore outgoing security state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000427#if SPMD_SPM_AT_SEL2
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200428 if (secure_state_out == NON_SECURE) {
429 cm_el1_sysregs_context_restore(secure_state_out);
430 }
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100431 cm_el2_sysregs_context_restore(secure_state_out);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200432#else
433 cm_el1_sysregs_context_restore(secure_state_out);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000434#endif
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100435 cm_set_next_eret_context(secure_state_out);
Max Shvetsov745889c2020-02-27 14:54:21 +0000436
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100437 SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
Max Shvetsov745889c2020-02-27 14:54:21 +0000438 SMC_GET_GP(handle, CTX_GPREG_X5),
439 SMC_GET_GP(handle, CTX_GPREG_X6),
440 SMC_GET_GP(handle, CTX_GPREG_X7));
441}
442
443/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100444 * Return FFA_ERROR with specified error code
Max Shvetsov745889c2020-02-27 14:54:21 +0000445 ******************************************************************************/
J-Alves2672cde2020-05-07 18:42:25 +0100446static uint64_t spmd_ffa_error_return(void *handle, int error_code)
Max Shvetsov745889c2020-02-27 14:54:21 +0000447{
J-Alves64ff9932021-03-01 10:26:59 +0000448 SMC_RET8(handle, (uint32_t) FFA_ERROR,
449 FFA_TARGET_INFO_MBZ, (uint32_t)error_code,
J-Alves2672cde2020-05-07 18:42:25 +0100450 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
451 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100452}
453
Olivier Deprez33e44122020-04-16 17:54:27 +0200454/*******************************************************************************
455 * spmd_check_address_in_binary_image
456 ******************************************************************************/
457bool spmd_check_address_in_binary_image(uint64_t address)
458{
459 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
460
461 return ((address >= spmc_attrs.load_address) &&
462 (address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
463}
464
Olivier Deprezebc34772020-04-16 16:59:21 +0200465/******************************************************************************
466 * spmd_is_spmc_message
467 *****************************************************************************/
468static bool spmd_is_spmc_message(unsigned int ep)
469{
470 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
471 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
472}
473
Olivier Deprez33e44122020-04-16 17:54:27 +0200474/******************************************************************************
475 * spmd_handle_spmc_message
476 *****************************************************************************/
Olivier Deprezc7631a52020-03-23 09:53:06 +0100477static int spmd_handle_spmc_message(unsigned long long msg,
478 unsigned long long parm1, unsigned long long parm2,
479 unsigned long long parm3, unsigned long long parm4)
Olivier Deprez33e44122020-04-16 17:54:27 +0200480{
481 VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__,
482 msg, parm1, parm2, parm3, parm4);
483
Olivier Deprez33e44122020-04-16 17:54:27 +0200484 return -EINVAL;
485}
486
Achin Gupta86f23532019-10-11 15:41:16 +0100487/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100488 * This function handles all SMCs in the range reserved for FFA. Each call is
Achin Gupta86f23532019-10-11 15:41:16 +0100489 * either forwarded to the other security state or handled by the SPM dispatcher
490 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200491uint64_t spmd_smc_handler(uint32_t smc_fid,
492 uint64_t x1,
493 uint64_t x2,
494 uint64_t x3,
495 uint64_t x4,
496 void *cookie,
497 void *handle,
Achin Gupta86f23532019-10-11 15:41:16 +0100498 uint64_t flags)
499{
Olivier Deprezeae45962021-01-19 15:06:47 +0100500 unsigned int linear_id = plat_my_core_pos();
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200501 spmd_spm_core_context_t *ctx = spmd_get_context();
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100502 bool secure_origin;
503 int32_t ret;
J-Alves4c95c702020-05-26 14:03:05 +0100504 uint32_t input_version;
Achin Gupta86f23532019-10-11 15:41:16 +0100505
506 /* Determine which security state this SMC originated from */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100507 secure_origin = is_caller_secure(flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100508
Olivier Deprezeae45962021-01-19 15:06:47 +0100509 VERBOSE("SPM(%u): 0x%x 0x%llx 0x%llx 0x%llx 0x%llx "
510 "0x%llx 0x%llx 0x%llx\n",
511 linear_id, smc_fid, x1, x2, x3, x4,
512 SMC_GET_GP(handle, CTX_GPREG_X5),
513 SMC_GET_GP(handle, CTX_GPREG_X6),
514 SMC_GET_GP(handle, CTX_GPREG_X7));
Achin Gupta86f23532019-10-11 15:41:16 +0100515
516 switch (smc_fid) {
J-Alves2672cde2020-05-07 18:42:25 +0100517 case FFA_ERROR:
Achin Gupta86f23532019-10-11 15:41:16 +0100518 /*
519 * Check if this is the first invocation of this interface on
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200520 * this CPU. If so, then indicate that the SPM Core initialised
Achin Gupta86f23532019-10-11 15:41:16 +0100521 * unsuccessfully.
522 */
Olivier Deprez7c016332019-10-28 09:03:13 +0000523 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Achin Gupta86f23532019-10-11 15:41:16 +0100524 spmd_spm_core_sync_exit(x2);
Max Shvetsov745889c2020-02-27 14:54:21 +0000525 }
Achin Gupta86f23532019-10-11 15:41:16 +0100526
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100527 return spmd_smc_forward(smc_fid, secure_origin,
Max Shvetsov745889c2020-02-27 14:54:21 +0000528 x1, x2, x3, x4, handle);
Achin Gupta86f23532019-10-11 15:41:16 +0100529 break; /* not reached */
530
J-Alves2672cde2020-05-07 18:42:25 +0100531 case FFA_VERSION:
J-Alves4c95c702020-05-26 14:03:05 +0100532 input_version = (uint32_t)(0xFFFFFFFF & x1);
Achin Gupta86f23532019-10-11 15:41:16 +0100533 /*
J-Alves4c95c702020-05-26 14:03:05 +0100534 * If caller is secure and SPMC was initialized,
535 * return FFA_VERSION of SPMD.
536 * If caller is non secure and SPMC was initialized,
537 * return SPMC's version.
538 * Sanity check to "input_version".
Achin Gupta86f23532019-10-11 15:41:16 +0100539 */
J-Alves4c95c702020-05-26 14:03:05 +0100540 if ((input_version & FFA_VERSION_BIT31_MASK) ||
541 (ctx->state == SPMC_STATE_RESET)) {
542 ret = FFA_ERROR_NOT_SUPPORTED;
543 } else if (!secure_origin) {
J-Alves64ff9932021-03-01 10:26:59 +0000544 ret = MAKE_FFA_VERSION(spmc_attrs.major_version,
545 spmc_attrs.minor_version);
J-Alves4c95c702020-05-26 14:03:05 +0100546 } else {
J-Alves64ff9932021-03-01 10:26:59 +0000547 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
548 FFA_VERSION_MINOR);
J-Alves4c95c702020-05-26 14:03:05 +0100549 }
550
J-Alves64ff9932021-03-01 10:26:59 +0000551 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ,
552 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
553 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100554 break; /* not reached */
555
J-Alves2672cde2020-05-07 18:42:25 +0100556 case FFA_FEATURES:
Achin Gupta86f23532019-10-11 15:41:16 +0100557 /*
558 * This is an optional interface. Do the minimal checks and
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200559 * forward to SPM Core which will handle it if implemented.
Achin Gupta86f23532019-10-11 15:41:16 +0100560 */
561
562 /*
J-Alves2672cde2020-05-07 18:42:25 +0100563 * Check if x1 holds a valid FFA fid. This is an
Achin Gupta86f23532019-10-11 15:41:16 +0100564 * optimization.
565 */
J-Alves2672cde2020-05-07 18:42:25 +0100566 if (!is_ffa_fid(x1)) {
567 return spmd_ffa_error_return(handle,
J-Alves64ff9932021-03-01 10:26:59 +0000568 FFA_ERROR_NOT_SUPPORTED);
Max Shvetsov745889c2020-02-27 14:54:21 +0000569 }
Achin Gupta86f23532019-10-11 15:41:16 +0100570
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200571 /* Forward SMC from Normal world to the SPM Core */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100572 if (!secure_origin) {
573 return spmd_smc_forward(smc_fid, secure_origin,
Max Shvetsov745889c2020-02-27 14:54:21 +0000574 x1, x2, x3, x4, handle);
Achin Gupta86f23532019-10-11 15:41:16 +0100575 }
Max Shvetsov745889c2020-02-27 14:54:21 +0000576
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200577 /*
578 * Return success if call was from secure world i.e. all
J-Alves2672cde2020-05-07 18:42:25 +0100579 * FFA functions are supported. This is essentially a
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200580 * nop.
581 */
J-Alves2672cde2020-05-07 18:42:25 +0100582 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200583 SMC_GET_GP(handle, CTX_GPREG_X5),
584 SMC_GET_GP(handle, CTX_GPREG_X6),
585 SMC_GET_GP(handle, CTX_GPREG_X7));
586
Achin Gupta86f23532019-10-11 15:41:16 +0100587 break; /* not reached */
588
J-Alves2672cde2020-05-07 18:42:25 +0100589 case FFA_ID_GET:
Max Shvetsove79062e2020-03-12 15:16:40 +0000590 /*
J-Alves2672cde2020-05-07 18:42:25 +0100591 * Returns the ID of the calling FFA component.
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200592 */
Max Shvetsove79062e2020-03-12 15:16:40 +0000593 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +0100594 SMC_RET8(handle, FFA_SUCCESS_SMC32,
595 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
596 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
597 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
598 FFA_PARAM_MBZ);
Max Shvetsove79062e2020-03-12 15:16:40 +0000599 }
600
J-Alves2672cde2020-05-07 18:42:25 +0100601 SMC_RET8(handle, FFA_SUCCESS_SMC32,
602 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
603 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
604 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
605 FFA_PARAM_MBZ);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200606
Max Shvetsove79062e2020-03-12 15:16:40 +0000607 break; /* not reached */
608
Olivier Deprezeae45962021-01-19 15:06:47 +0100609 case FFA_SECONDARY_EP_REGISTER_SMC64:
610 if (secure_origin) {
611 ret = spmd_pm_secondary_ep_register(x1);
612
613 if (ret < 0) {
614 SMC_RET8(handle, FFA_ERROR_SMC64,
615 FFA_TARGET_INFO_MBZ, ret,
616 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
617 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
618 FFA_PARAM_MBZ);
619 } else {
620 SMC_RET8(handle, FFA_SUCCESS_SMC64,
621 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ,
622 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
623 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
624 FFA_PARAM_MBZ);
625 }
626 }
627
628 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
629 break; /* Not reached */
630
Daniel Boulby27f35df2021-02-03 12:13:19 +0000631 case FFA_SPM_ID_GET:
632 if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) {
633 return spmd_ffa_error_return(handle,
634 FFA_ERROR_NOT_SUPPORTED);
635 }
636 /*
637 * Returns the ID of the SPMC or SPMD depending on the FF-A
638 * instance where this function is invoked
639 */
640 if (!secure_origin) {
641 SMC_RET8(handle, FFA_SUCCESS_SMC32,
642 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
643 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
644 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
645 FFA_PARAM_MBZ);
646 }
647 SMC_RET8(handle, FFA_SUCCESS_SMC32,
648 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID,
649 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
650 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
651 FFA_PARAM_MBZ);
652
653 break; /* not reached */
654
Olivier Deprez33e44122020-04-16 17:54:27 +0200655 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
656 if (secure_origin && spmd_is_spmc_message(x1)) {
657 ret = spmd_handle_spmc_message(x3, x4,
658 SMC_GET_GP(handle, CTX_GPREG_X5),
659 SMC_GET_GP(handle, CTX_GPREG_X6),
660 SMC_GET_GP(handle, CTX_GPREG_X7));
661
662 SMC_RET8(handle, FFA_SUCCESS_SMC32,
663 FFA_TARGET_INFO_MBZ, ret,
664 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
665 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
666 FFA_PARAM_MBZ);
667 } else {
668 /* Forward direct message to the other world */
669 return spmd_smc_forward(smc_fid, secure_origin,
670 x1, x2, x3, x4, handle);
671 }
672 break; /* Not reached */
673
674 case FFA_MSG_SEND_DIRECT_RESP_SMC32:
675 if (secure_origin && spmd_is_spmc_message(x1)) {
Olivier Depreza664c492020-08-05 11:27:42 +0200676 spmd_spm_core_sync_exit(0ULL);
Olivier Deprez33e44122020-04-16 17:54:27 +0200677 } else {
678 /* Forward direct message to the other world */
679 return spmd_smc_forward(smc_fid, secure_origin,
680 x1, x2, x3, x4, handle);
681 }
682 break; /* Not reached */
683
J-Alves2672cde2020-05-07 18:42:25 +0100684 case FFA_RX_RELEASE:
685 case FFA_RXTX_MAP_SMC32:
686 case FFA_RXTX_MAP_SMC64:
687 case FFA_RXTX_UNMAP:
Ruari Phipps93dff702020-07-28 10:33:35 +0100688 case FFA_PARTITION_INFO_GET:
J-Alves2621cfd2021-03-11 17:46:47 +0000689#if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
690 case FFA_NOTIFICATION_BITMAP_CREATE:
691 case FFA_NOTIFICATION_BITMAP_DESTROY:
692 case FFA_NOTIFICATION_BIND:
693 case FFA_NOTIFICATION_UNBIND:
694 case FFA_NOTIFICATION_SET:
695 case FFA_NOTIFICATION_GET:
696 case FFA_NOTIFICATION_INFO_GET:
697 case FFA_NOTIFICATION_INFO_GET_SMC64:
698#endif
Ruari Phipps93dff702020-07-28 10:33:35 +0100699 /*
J-Alves2621cfd2021-03-11 17:46:47 +0000700 * Above calls should not be forwarded from Secure world to
701 * Normal world.
Ruari Phipps93dff702020-07-28 10:33:35 +0100702 *
703 * Fall through to forward the call to the other world
704 */
J-Alves2672cde2020-05-07 18:42:25 +0100705 case FFA_MSG_RUN:
Achin Gupta86f23532019-10-11 15:41:16 +0100706 /* This interface must be invoked only by the Normal world */
Ruari Phipps93dff702020-07-28 10:33:35 +0100707
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100708 if (secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +0100709 return spmd_ffa_error_return(handle,
Ruari Phipps93dff702020-07-28 10:33:35 +0100710 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +0100711 }
712
713 /* Fall through to forward the call to the other world */
J-Alves2672cde2020-05-07 18:42:25 +0100714 case FFA_MSG_SEND:
J-Alves2672cde2020-05-07 18:42:25 +0100715 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
J-Alves2672cde2020-05-07 18:42:25 +0100716 case FFA_MSG_SEND_DIRECT_RESP_SMC64:
717 case FFA_MEM_DONATE_SMC32:
718 case FFA_MEM_DONATE_SMC64:
719 case FFA_MEM_LEND_SMC32:
720 case FFA_MEM_LEND_SMC64:
721 case FFA_MEM_SHARE_SMC32:
722 case FFA_MEM_SHARE_SMC64:
723 case FFA_MEM_RETRIEVE_REQ_SMC32:
724 case FFA_MEM_RETRIEVE_REQ_SMC64:
725 case FFA_MEM_RETRIEVE_RESP:
726 case FFA_MEM_RELINQUISH:
727 case FFA_MEM_RECLAIM:
728 case FFA_SUCCESS_SMC32:
729 case FFA_SUCCESS_SMC64:
Achin Gupta86f23532019-10-11 15:41:16 +0100730 /*
731 * TODO: Assume that no requests originate from EL3 at the
732 * moment. This will change if a SP service is required in
733 * response to secure interrupts targeted to EL3. Until then
734 * simply forward the call to the Normal world.
735 */
736
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100737 return spmd_smc_forward(smc_fid, secure_origin,
Max Shvetsov745889c2020-02-27 14:54:21 +0000738 x1, x2, x3, x4, handle);
Achin Gupta86f23532019-10-11 15:41:16 +0100739 break; /* not reached */
740
J-Alves2672cde2020-05-07 18:42:25 +0100741 case FFA_MSG_WAIT:
Achin Gupta86f23532019-10-11 15:41:16 +0100742 /*
743 * Check if this is the first invocation of this interface on
744 * this CPU from the Secure world. If so, then indicate that the
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200745 * SPM Core initialised successfully.
Achin Gupta86f23532019-10-11 15:41:16 +0100746 */
Olivier Deprez7c016332019-10-28 09:03:13 +0000747 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Olivier Depreza664c492020-08-05 11:27:42 +0200748 spmd_spm_core_sync_exit(0ULL);
Achin Gupta86f23532019-10-11 15:41:16 +0100749 }
750
Max Shvetsov745889c2020-02-27 14:54:21 +0000751 /* Fall through to forward the call to the other world */
Olivier Deprezae18caf2021-04-02 11:09:10 +0200752 case FFA_INTERRUPT:
J-Alves2672cde2020-05-07 18:42:25 +0100753 case FFA_MSG_YIELD:
Achin Gupta86f23532019-10-11 15:41:16 +0100754 /* This interface must be invoked only by the Secure world */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100755 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +0100756 return spmd_ffa_error_return(handle,
757 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +0100758 }
759
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100760 return spmd_smc_forward(smc_fid, secure_origin,
Max Shvetsov745889c2020-02-27 14:54:21 +0000761 x1, x2, x3, x4, handle);
Achin Gupta86f23532019-10-11 15:41:16 +0100762 break; /* not reached */
763
Olivier Depreza664c492020-08-05 11:27:42 +0200764 case FFA_NORMAL_WORLD_RESUME:
765 if (secure_origin && ctx->secure_interrupt_ongoing) {
766 spmd_spm_core_sync_exit(0ULL);
767 } else {
768 return spmd_ffa_error_return(handle, FFA_ERROR_DENIED);
769 }
770 break; /* Not reached */
771
Achin Gupta86f23532019-10-11 15:41:16 +0100772 default:
773 WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
J-Alves2672cde2020-05-07 18:42:25 +0100774 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +0100775 }
776}