blob: 448e12d40c219616f322189ee7bebc1b033a0ef3 [file] [log] [blame]
Achin Gupta86f23532019-10-11 15:41:16 +01001/*
Daniel Boulby9460a232021-12-09 11:20:13 +00002 * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
Achin Gupta86f23532019-10-11 15:41:16 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
Scott Brandene5dcf982020-08-25 13:49:32 -07009#include <inttypes.h>
10#include <stdint.h>
Achin Gupta86f23532019-10-11 15:41:16 +010011#include <string.h>
12
13#include <arch_helpers.h>
Olivier Deprez2bae35f2020-04-16 13:39:06 +020014#include <arch/aarch64/arch_features.h>
Achin Gupta86f23532019-10-11 15:41:16 +010015#include <bl31/bl31.h>
Olivier Depreza664c492020-08-05 11:27:42 +020016#include <bl31/interrupt_mgmt.h>
Achin Gupta86f23532019-10-11 15:41:16 +010017#include <common/debug.h>
18#include <common/runtime_svc.h>
19#include <lib/el3_runtime/context_mgmt.h>
20#include <lib/smccc.h>
21#include <lib/spinlock.h>
22#include <lib/utils.h>
Achin Gupta86f23532019-10-11 15:41:16 +010023#include <plat/common/common_def.h>
24#include <plat/common/platform.h>
25#include <platform_def.h>
J-Alves2672cde2020-05-07 18:42:25 +010026#include <services/ffa_svc.h>
Marc Bonnici1c33cc32021-11-29 17:57:03 +000027#include <services/spmc_svc.h>
Achin Gupta86f23532019-10-11 15:41:16 +010028#include <services/spmd_svc.h>
29#include <smccc_helpers.h>
30#include "spmd_private.h"
31
32/*******************************************************************************
33 * SPM Core context information.
34 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020035static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
Achin Gupta86f23532019-10-11 15:41:16 +010036
37/*******************************************************************************
Marc Bonnici1c33cc32021-11-29 17:57:03 +000038 * SPM Core attribute information is read from its manifest if the SPMC is not
39 * at EL3. Else, it is populated from the SPMC directly.
Achin Gupta86f23532019-10-11 15:41:16 +010040 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020041static spmc_manifest_attribute_t spmc_attrs;
Achin Gupta86f23532019-10-11 15:41:16 +010042
43/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000044 * SPM Core entry point information. Discovered on the primary core and reused
45 * on secondary cores.
46 ******************************************************************************/
47static entry_point_info_t *spmc_ep_info;
48
49/*******************************************************************************
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020050 * SPM Core context on CPU based on mpidr.
51 ******************************************************************************/
52spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr)
53{
Max Shvetsovf80c64d2020-08-25 11:50:18 +010054 int core_idx = plat_core_pos_by_mpidr(mpidr);
55
56 if (core_idx < 0) {
Scott Brandene5dcf982020-08-25 13:49:32 -070057 ERROR("Invalid mpidr: %" PRIx64 ", returned ID: %d\n", mpidr, core_idx);
Max Shvetsovf80c64d2020-08-25 11:50:18 +010058 panic();
59 }
60
61 return &spm_core_context[core_idx];
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020062}
63
64/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +020065 * SPM Core context on current CPU get helper.
66 ******************************************************************************/
67spmd_spm_core_context_t *spmd_get_context(void)
68{
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020069 return spmd_get_context_by_mpidr(read_mpidr());
Olivier Deprez2bae35f2020-04-16 13:39:06 +020070}
71
72/*******************************************************************************
Olivier Deprezc7631a52020-03-23 09:53:06 +010073 * SPM Core ID getter.
74 ******************************************************************************/
75uint16_t spmd_spmc_id_get(void)
76{
77 return spmc_attrs.spmc_id;
78}
79
80/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000081 * Static function declaration.
82 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020083static int32_t spmd_init(void);
Olivier Deprez69ca84a2020-02-07 15:44:43 +010084static int spmd_spmc_init(void *pm_addr);
J-Alves2672cde2020-05-07 18:42:25 +010085static uint64_t spmd_ffa_error_return(void *handle,
Olivier Deprez2bae35f2020-04-16 13:39:06 +020086 int error_code);
87static uint64_t spmd_smc_forward(uint32_t smc_fid,
88 bool secure_origin,
89 uint64_t x1,
90 uint64_t x2,
91 uint64_t x3,
92 uint64_t x4,
93 void *handle);
Max Shvetsov745889c2020-02-27 14:54:21 +000094
Daniel Boulby9460a232021-12-09 11:20:13 +000095/******************************************************************************
96 * Builds an SPMD to SPMC direct message request.
97 *****************************************************************************/
98void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func,
99 unsigned long long message)
100{
101 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
102 write_ctx_reg(gpregs, CTX_GPREG_X1,
103 (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
104 spmd_spmc_id_get());
105 write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func);
106 write_ctx_reg(gpregs, CTX_GPREG_X3, message);
107}
108
109
Max Shvetsov745889c2020-02-27 14:54:21 +0000110/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200111 * This function takes an SPMC context pointer and performs a synchronous
112 * SPMC entry.
Achin Gupta86f23532019-10-11 15:41:16 +0100113 ******************************************************************************/
114uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
115{
116 uint64_t rc;
117
118 assert(spmc_ctx != NULL);
119
120 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
121
122 /* Restore the context assigned above */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000123#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000124 cm_el2_sysregs_context_restore(SECURE);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200125#else
126 cm_el1_sysregs_context_restore(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000127#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100128 cm_set_next_eret_context(SECURE);
129
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000130 /* Enter SPMC */
Achin Gupta86f23532019-10-11 15:41:16 +0100131 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
132
133 /* Save secure state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000134#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000135 cm_el2_sysregs_context_save(SECURE);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200136#else
137 cm_el1_sysregs_context_save(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000138#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100139
140 return rc;
141}
142
143/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200144 * This function returns to the place where spmd_spm_core_sync_entry() was
Achin Gupta86f23532019-10-11 15:41:16 +0100145 * called originally.
146 ******************************************************************************/
147__dead2 void spmd_spm_core_sync_exit(uint64_t rc)
148{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200149 spmd_spm_core_context_t *ctx = spmd_get_context();
Achin Gupta86f23532019-10-11 15:41:16 +0100150
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200151 /* Get current CPU context from SPMC context */
Achin Gupta86f23532019-10-11 15:41:16 +0100152 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
153
154 /*
155 * The SPMD must have initiated the original request through a
156 * synchronous entry into SPMC. Jump back to the original C runtime
157 * context with the value of rc in x0;
158 */
159 spmd_spm_core_exit(ctx->c_rt_ctx, rc);
160
161 panic();
162}
163
164/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200165 * Jump to the SPM Core for the first time.
Achin Gupta86f23532019-10-11 15:41:16 +0100166 ******************************************************************************/
167static int32_t spmd_init(void)
168{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200169 spmd_spm_core_context_t *ctx = spmd_get_context();
170 uint64_t rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100171
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200172 VERBOSE("SPM Core init start.\n");
Olivier Deprez7c016332019-10-28 09:03:13 +0000173
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200174 /* Primary boot core enters the SPMC for initialization. */
175 ctx->state = SPMC_STATE_ON_PENDING;
Achin Gupta86f23532019-10-11 15:41:16 +0100176
177 rc = spmd_spm_core_sync_entry(ctx);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200178 if (rc != 0ULL) {
Scott Brandene5dcf982020-08-25 13:49:32 -0700179 ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200180 return 0;
Achin Gupta86f23532019-10-11 15:41:16 +0100181 }
182
Olivier Deprez7c016332019-10-28 09:03:13 +0000183 ctx->state = SPMC_STATE_ON;
184
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200185 VERBOSE("SPM Core init end.\n");
Achin Gupta86f23532019-10-11 15:41:16 +0100186
187 return 1;
188}
189
190/*******************************************************************************
Olivier Depreza664c492020-08-05 11:27:42 +0200191 * spmd_secure_interrupt_handler
192 * Enter the SPMC for further handling of the secure interrupt by the SPMC
193 * itself or a Secure Partition.
194 ******************************************************************************/
195static uint64_t spmd_secure_interrupt_handler(uint32_t id,
196 uint32_t flags,
197 void *handle,
198 void *cookie)
199{
200 spmd_spm_core_context_t *ctx = spmd_get_context();
201 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
202 unsigned int linear_id = plat_my_core_pos();
203 int64_t rc;
204
205 /* Sanity check the security state when the exception was generated */
206 assert(get_interrupt_src_ss(flags) == NON_SECURE);
207
208 /* Sanity check the pointer to this cpu's context */
209 assert(handle == cm_get_context(NON_SECURE));
210
211 /* Save the non-secure context before entering SPMC */
212 cm_el1_sysregs_context_save(NON_SECURE);
213#if SPMD_SPM_AT_SEL2
214 cm_el2_sysregs_context_save(NON_SECURE);
215#endif
216
217 /* Convey the event to the SPMC through the FFA_INTERRUPT interface. */
218 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT);
219 write_ctx_reg(gpregs, CTX_GPREG_X1, 0);
220 write_ctx_reg(gpregs, CTX_GPREG_X2, 0);
221 write_ctx_reg(gpregs, CTX_GPREG_X3, 0);
222 write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
223 write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
224 write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
225 write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
226
227 /* Mark current core as handling a secure interrupt. */
228 ctx->secure_interrupt_ongoing = true;
229
230 rc = spmd_spm_core_sync_entry(ctx);
231 if (rc != 0ULL) {
Olivier Deprezba100f22021-11-09 12:37:20 +0100232 ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, linear_id);
Olivier Depreza664c492020-08-05 11:27:42 +0200233 }
234
235 ctx->secure_interrupt_ongoing = false;
236
237 cm_el1_sysregs_context_restore(NON_SECURE);
238#if SPMD_SPM_AT_SEL2
239 cm_el2_sysregs_context_restore(NON_SECURE);
240#endif
241 cm_set_next_eret_context(NON_SECURE);
242
243 SMC_RET0(&ctx->cpu_ctx);
244}
245
246/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200247 * Loads SPMC manifest and inits SPMC.
Achin Gupta86f23532019-10-11 15:41:16 +0100248 ******************************************************************************/
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100249static int spmd_spmc_init(void *pm_addr)
Achin Gupta86f23532019-10-11 15:41:16 +0100250{
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200251 cpu_context_t *cpu_ctx;
252 unsigned int core_id;
Olivier Depreza664c492020-08-05 11:27:42 +0200253 uint32_t ep_attr, flags;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200254 int rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100255
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200256 /* Load the SPM Core manifest */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100257 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
Max Shvetsov745889c2020-02-27 14:54:21 +0000258 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200259 WARN("No or invalid SPM Core manifest image provided by BL2\n");
260 return rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100261 }
262
263 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200264 * Ensure that the SPM Core version is compatible with the SPM
265 * Dispatcher version.
Achin Gupta86f23532019-10-11 15:41:16 +0100266 */
J-Alves2672cde2020-05-07 18:42:25 +0100267 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
268 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
269 WARN("Unsupported FFA version (%u.%u)\n",
Achin Gupta86f23532019-10-11 15:41:16 +0100270 spmc_attrs.major_version, spmc_attrs.minor_version);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200271 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100272 }
273
J-Alves2672cde2020-05-07 18:42:25 +0100274 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
Achin Gupta86f23532019-10-11 15:41:16 +0100275 spmc_attrs.minor_version);
276
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200277 VERBOSE("SPM Core run time EL%x.\n",
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000278 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
Achin Gupta86f23532019-10-11 15:41:16 +0100279
Max Shvetsove79062e2020-03-12 15:16:40 +0000280 /* Validate the SPMC ID, Ensure high bit is set */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200281 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
282 SPMC_SECURE_ID_MASK) == 0U) {
283 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
284 return -EINVAL;
Max Shvetsove79062e2020-03-12 15:16:40 +0000285 }
286
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200287 /* Validate the SPM Core execution state */
Achin Gupta86f23532019-10-11 15:41:16 +0100288 if ((spmc_attrs.exec_state != MODE_RW_64) &&
289 (spmc_attrs.exec_state != MODE_RW_32)) {
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100290 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
Achin Gupta86f23532019-10-11 15:41:16 +0100291 spmc_attrs.exec_state);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200292 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100293 }
294
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100295 VERBOSE("%s%x.\n", "SPM Core execution state 0x",
296 spmc_attrs.exec_state);
Achin Gupta86f23532019-10-11 15:41:16 +0100297
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000298#if SPMD_SPM_AT_SEL2
299 /* Ensure manifest has not requested AArch32 state in S-EL2 */
300 if (spmc_attrs.exec_state == MODE_RW_32) {
301 WARN("AArch32 state at S-EL2 is not supported.\n");
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200302 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100303 }
304
305 /*
306 * Check if S-EL2 is supported on this system if S-EL2
307 * is required for SPM
308 */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200309 if (!is_armv8_4_sel2_present()) {
310 WARN("SPM Core run time S-EL2 is not supported.\n");
311 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100312 }
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000313#endif /* SPMD_SPM_AT_SEL2 */
Achin Gupta86f23532019-10-11 15:41:16 +0100314
315 /* Initialise an entrypoint to set up the CPU context */
316 ep_attr = SECURE | EP_ST_ENABLE;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200317 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
Achin Gupta86f23532019-10-11 15:41:16 +0100318 ep_attr |= EP_EE_BIG;
Max Shvetsov745889c2020-02-27 14:54:21 +0000319 }
320
Achin Gupta86f23532019-10-11 15:41:16 +0100321 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
Achin Gupta86f23532019-10-11 15:41:16 +0100322
323 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200324 * Populate SPSR for SPM Core based upon validated parameters from the
325 * manifest.
Achin Gupta86f23532019-10-11 15:41:16 +0100326 */
327 if (spmc_attrs.exec_state == MODE_RW_32) {
328 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
329 SPSR_E_LITTLE,
330 DAIF_FIQ_BIT |
331 DAIF_IRQ_BIT |
332 DAIF_ABT_BIT);
333 } else {
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000334
335#if SPMD_SPM_AT_SEL2
336 static const uint32_t runtime_el = MODE_EL2;
337#else
338 static const uint32_t runtime_el = MODE_EL1;
339#endif
340 spmc_ep_info->spsr = SPSR_64(runtime_el,
Achin Gupta86f23532019-10-11 15:41:16 +0100341 MODE_SP_ELX,
342 DISABLE_ALL_EXCEPTIONS);
343 }
344
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200345 /* Set an initial SPMC context state for all cores. */
346 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
347 spm_core_context[core_id].state = SPMC_STATE_OFF;
Max Shvetsov745889c2020-02-27 14:54:21 +0000348
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200349 /* Setup an initial cpu context for the SPMC. */
350 cpu_ctx = &spm_core_context[core_id].cpu_ctx;
351 cm_setup_context(cpu_ctx, spmc_ep_info);
Achin Gupta86f23532019-10-11 15:41:16 +0100352
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200353 /*
354 * Pass the core linear ID to the SPMC through x4.
355 * (TF-A implementation defined behavior helping
356 * a legacy TOS migration to adopt FF-A).
357 */
358 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id);
359 }
Achin Gupta86f23532019-10-11 15:41:16 +0100360
Olivier Deprez9afca122019-10-28 09:15:52 +0000361 /* Register power management hooks with PSCI */
362 psci_register_spd_pm_hook(&spmd_pm);
363
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200364 /* Register init function for deferred init. */
Achin Gupta86f23532019-10-11 15:41:16 +0100365 bl31_register_bl32_init(&spmd_init);
366
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200367 INFO("SPM Core setup done.\n");
368
Olivier Depreza664c492020-08-05 11:27:42 +0200369 /*
370 * Register an interrupt handler routing secure interrupts to SPMD
371 * while the NWd is running.
372 */
373 flags = 0;
374 set_interrupt_rm_flag(flags, NON_SECURE);
375 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
376 spmd_secure_interrupt_handler,
377 flags);
378 if (rc != 0) {
379 panic();
380 }
381
Achin Gupta86f23532019-10-11 15:41:16 +0100382 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000383}
Achin Gupta86f23532019-10-11 15:41:16 +0100384
Max Shvetsov745889c2020-02-27 14:54:21 +0000385/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200386 * Initialize context of SPM Core.
Max Shvetsov745889c2020-02-27 14:54:21 +0000387 ******************************************************************************/
388int spmd_setup(void)
389{
390 int rc;
Marc Bonnici1c33cc32021-11-29 17:57:03 +0000391 void *spmc_manifest;
392
393 /*
394 * If the SPMC is at EL3, then just initialise it directly. The
395 * shenanigans of when it is at a lower EL are not needed.
396 */
397 if (is_spmc_at_el3()) {
398 /* Allow the SPMC to populate its attributes directly. */
399 spmc_populate_attrs(&spmc_attrs);
400
401 rc = spmc_setup();
402 if (rc != 0) {
403 ERROR("SPMC initialisation failed 0x%x.\n", rc);
404 }
405 return rc;
406 }
Achin Gupta86f23532019-10-11 15:41:16 +0100407
Max Shvetsov745889c2020-02-27 14:54:21 +0000408 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200409 if (spmc_ep_info == NULL) {
410 WARN("No SPM Core image provided by BL2 boot loader.\n");
411 return -EINVAL;
Max Shvetsov745889c2020-02-27 14:54:21 +0000412 }
413
414 /* Under no circumstances will this parameter be 0 */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200415 assert(spmc_ep_info->pc != 0ULL);
Max Shvetsov745889c2020-02-27 14:54:21 +0000416
417 /*
418 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200419 * be used as a manifest for the SPM Core at the next lower EL/mode.
Max Shvetsov745889c2020-02-27 14:54:21 +0000420 */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100421 spmc_manifest = (void *)spmc_ep_info->args.arg0;
422 if (spmc_manifest == NULL) {
423 ERROR("Invalid or absent SPM Core manifest.\n");
424 return -EINVAL;
Max Shvetsov745889c2020-02-27 14:54:21 +0000425 }
426
427 /* Load manifest, init SPMC */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100428 rc = spmd_spmc_init(spmc_manifest);
Max Shvetsov745889c2020-02-27 14:54:21 +0000429 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200430 WARN("Booting device without SPM initialization.\n");
Max Shvetsov745889c2020-02-27 14:54:21 +0000431 }
432
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100433 return rc;
Max Shvetsov745889c2020-02-27 14:54:21 +0000434}
435
436/*******************************************************************************
437 * Forward SMC to the other security state
438 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200439static uint64_t spmd_smc_forward(uint32_t smc_fid,
440 bool secure_origin,
441 uint64_t x1,
442 uint64_t x2,
443 uint64_t x3,
444 uint64_t x4,
445 void *handle)
Max Shvetsov745889c2020-02-27 14:54:21 +0000446{
Olivier Deprezebc34772020-04-16 16:59:21 +0200447 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
448 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100449
Max Shvetsov745889c2020-02-27 14:54:21 +0000450 /* Save incoming security state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000451#if SPMD_SPM_AT_SEL2
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200452 if (secure_state_in == NON_SECURE) {
453 cm_el1_sysregs_context_save(secure_state_in);
454 }
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100455 cm_el2_sysregs_context_save(secure_state_in);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200456#else
457 cm_el1_sysregs_context_save(secure_state_in);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000458#endif
Max Shvetsov745889c2020-02-27 14:54:21 +0000459
460 /* Restore outgoing security state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000461#if SPMD_SPM_AT_SEL2
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200462 if (secure_state_out == NON_SECURE) {
463 cm_el1_sysregs_context_restore(secure_state_out);
464 }
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100465 cm_el2_sysregs_context_restore(secure_state_out);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200466#else
467 cm_el1_sysregs_context_restore(secure_state_out);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000468#endif
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100469 cm_set_next_eret_context(secure_state_out);
Max Shvetsov745889c2020-02-27 14:54:21 +0000470
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100471 SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
Max Shvetsov745889c2020-02-27 14:54:21 +0000472 SMC_GET_GP(handle, CTX_GPREG_X5),
473 SMC_GET_GP(handle, CTX_GPREG_X6),
474 SMC_GET_GP(handle, CTX_GPREG_X7));
475}
476
477/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100478 * Return FFA_ERROR with specified error code
Max Shvetsov745889c2020-02-27 14:54:21 +0000479 ******************************************************************************/
J-Alves2672cde2020-05-07 18:42:25 +0100480static uint64_t spmd_ffa_error_return(void *handle, int error_code)
Max Shvetsov745889c2020-02-27 14:54:21 +0000481{
J-Alves64ff9932021-03-01 10:26:59 +0000482 SMC_RET8(handle, (uint32_t) FFA_ERROR,
483 FFA_TARGET_INFO_MBZ, (uint32_t)error_code,
J-Alves2672cde2020-05-07 18:42:25 +0100484 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
485 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100486}
487
Olivier Deprez33e44122020-04-16 17:54:27 +0200488/*******************************************************************************
489 * spmd_check_address_in_binary_image
490 ******************************************************************************/
491bool spmd_check_address_in_binary_image(uint64_t address)
492{
493 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
494
495 return ((address >= spmc_attrs.load_address) &&
496 (address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
497}
498
Olivier Deprezebc34772020-04-16 16:59:21 +0200499/******************************************************************************
500 * spmd_is_spmc_message
501 *****************************************************************************/
502static bool spmd_is_spmc_message(unsigned int ep)
503{
504 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
505 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
506}
507
Olivier Deprez33e44122020-04-16 17:54:27 +0200508/******************************************************************************
509 * spmd_handle_spmc_message
510 *****************************************************************************/
Olivier Deprezc7631a52020-03-23 09:53:06 +0100511static int spmd_handle_spmc_message(unsigned long long msg,
512 unsigned long long parm1, unsigned long long parm2,
513 unsigned long long parm3, unsigned long long parm4)
Olivier Deprez33e44122020-04-16 17:54:27 +0200514{
515 VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__,
516 msg, parm1, parm2, parm3, parm4);
517
Olivier Deprez33e44122020-04-16 17:54:27 +0200518 return -EINVAL;
519}
520
Achin Gupta86f23532019-10-11 15:41:16 +0100521/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100522 * This function handles all SMCs in the range reserved for FFA. Each call is
Achin Gupta86f23532019-10-11 15:41:16 +0100523 * either forwarded to the other security state or handled by the SPM dispatcher
524 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200525uint64_t spmd_smc_handler(uint32_t smc_fid,
526 uint64_t x1,
527 uint64_t x2,
528 uint64_t x3,
529 uint64_t x4,
530 void *cookie,
531 void *handle,
Achin Gupta86f23532019-10-11 15:41:16 +0100532 uint64_t flags)
533{
Olivier Deprezeae45962021-01-19 15:06:47 +0100534 unsigned int linear_id = plat_my_core_pos();
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200535 spmd_spm_core_context_t *ctx = spmd_get_context();
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100536 bool secure_origin;
537 int32_t ret;
J-Alves4c95c702020-05-26 14:03:05 +0100538 uint32_t input_version;
Achin Gupta86f23532019-10-11 15:41:16 +0100539
540 /* Determine which security state this SMC originated from */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100541 secure_origin = is_caller_secure(flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100542
Scott Brandene5dcf982020-08-25 13:49:32 -0700543 VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64
544 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n",
545 linear_id, smc_fid, x1, x2, x3, x4,
546 SMC_GET_GP(handle, CTX_GPREG_X5),
547 SMC_GET_GP(handle, CTX_GPREG_X6),
548 SMC_GET_GP(handle, CTX_GPREG_X7));
Achin Gupta86f23532019-10-11 15:41:16 +0100549
550 switch (smc_fid) {
J-Alves2672cde2020-05-07 18:42:25 +0100551 case FFA_ERROR:
Achin Gupta86f23532019-10-11 15:41:16 +0100552 /*
553 * Check if this is the first invocation of this interface on
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200554 * this CPU. If so, then indicate that the SPM Core initialised
Achin Gupta86f23532019-10-11 15:41:16 +0100555 * unsuccessfully.
556 */
Olivier Deprez7c016332019-10-28 09:03:13 +0000557 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Achin Gupta86f23532019-10-11 15:41:16 +0100558 spmd_spm_core_sync_exit(x2);
Max Shvetsov745889c2020-02-27 14:54:21 +0000559 }
Achin Gupta86f23532019-10-11 15:41:16 +0100560
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100561 return spmd_smc_forward(smc_fid, secure_origin,
Max Shvetsov745889c2020-02-27 14:54:21 +0000562 x1, x2, x3, x4, handle);
Achin Gupta86f23532019-10-11 15:41:16 +0100563 break; /* not reached */
564
J-Alves2672cde2020-05-07 18:42:25 +0100565 case FFA_VERSION:
J-Alves4c95c702020-05-26 14:03:05 +0100566 input_version = (uint32_t)(0xFFFFFFFF & x1);
Achin Gupta86f23532019-10-11 15:41:16 +0100567 /*
J-Alves4c95c702020-05-26 14:03:05 +0100568 * If caller is secure and SPMC was initialized,
569 * return FFA_VERSION of SPMD.
570 * If caller is non secure and SPMC was initialized,
571 * return SPMC's version.
572 * Sanity check to "input_version".
Achin Gupta86f23532019-10-11 15:41:16 +0100573 */
J-Alves4c95c702020-05-26 14:03:05 +0100574 if ((input_version & FFA_VERSION_BIT31_MASK) ||
575 (ctx->state == SPMC_STATE_RESET)) {
576 ret = FFA_ERROR_NOT_SUPPORTED;
577 } else if (!secure_origin) {
Daniel Boulby9460a232021-12-09 11:20:13 +0000578 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
579 uint64_t rc;
580
581 if (spmc_attrs.major_version == 1 &&
582 spmc_attrs.minor_version == 0) {
583 ret = MAKE_FFA_VERSION(spmc_attrs.major_version,
584 spmc_attrs.minor_version);
585 SMC_RET8(handle, (uint32_t)ret,
586 FFA_TARGET_INFO_MBZ,
587 FFA_TARGET_INFO_MBZ,
588 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
589 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
590 FFA_PARAM_MBZ);
591 break;
592 }
593 /* Save non-secure system registers context */
594 cm_el1_sysregs_context_save(NON_SECURE);
595#if SPMD_SPM_AT_SEL2
596 cm_el2_sysregs_context_save(NON_SECURE);
597#endif
598
599 /*
600 * The incoming request has FFA_VERSION as X0 smc_fid
601 * and requested version in x1. Prepare a direct request
602 * from SPMD to SPMC with FFA_VERSION framework function
603 * identifier in X2 and requested version in X3.
604 */
605 spmd_build_spmc_message(gpregs,
606 SPMD_FWK_MSG_FFA_VERSION_REQ,
607 input_version);
608
609 rc = spmd_spm_core_sync_entry(ctx);
610
611 if ((rc != 0ULL) ||
612 (SMC_GET_GP(gpregs, CTX_GPREG_X0) !=
613 FFA_MSG_SEND_DIRECT_RESP_SMC32) ||
614 (SMC_GET_GP(gpregs, CTX_GPREG_X2) !=
615 (SPMD_FWK_MSG_BIT |
616 SPMD_FWK_MSG_FFA_VERSION_RESP))) {
617 ERROR("Failed to forward FFA_VERSION\n");
618 ret = FFA_ERROR_NOT_SUPPORTED;
619 } else {
620 ret = SMC_GET_GP(gpregs, CTX_GPREG_X3);
621 }
622
623 /*
624 * Return here after SPMC has handled FFA_VERSION.
625 * The returned SPMC version is held in X3.
626 * Forward this version in X0 to the non-secure caller.
627 */
628 return spmd_smc_forward(ret, true, FFA_PARAM_MBZ,
629 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
630 FFA_PARAM_MBZ, gpregs);
J-Alves4c95c702020-05-26 14:03:05 +0100631 } else {
J-Alves64ff9932021-03-01 10:26:59 +0000632 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
633 FFA_VERSION_MINOR);
J-Alves4c95c702020-05-26 14:03:05 +0100634 }
635
J-Alves64ff9932021-03-01 10:26:59 +0000636 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ,
637 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
638 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100639 break; /* not reached */
640
J-Alves2672cde2020-05-07 18:42:25 +0100641 case FFA_FEATURES:
Achin Gupta86f23532019-10-11 15:41:16 +0100642 /*
643 * This is an optional interface. Do the minimal checks and
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200644 * forward to SPM Core which will handle it if implemented.
Achin Gupta86f23532019-10-11 15:41:16 +0100645 */
646
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200647 /* Forward SMC from Normal world to the SPM Core */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100648 if (!secure_origin) {
649 return spmd_smc_forward(smc_fid, secure_origin,
Max Shvetsov745889c2020-02-27 14:54:21 +0000650 x1, x2, x3, x4, handle);
Achin Gupta86f23532019-10-11 15:41:16 +0100651 }
Max Shvetsov745889c2020-02-27 14:54:21 +0000652
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200653 /*
654 * Return success if call was from secure world i.e. all
J-Alves2672cde2020-05-07 18:42:25 +0100655 * FFA functions are supported. This is essentially a
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200656 * nop.
657 */
J-Alves2672cde2020-05-07 18:42:25 +0100658 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200659 SMC_GET_GP(handle, CTX_GPREG_X5),
660 SMC_GET_GP(handle, CTX_GPREG_X6),
661 SMC_GET_GP(handle, CTX_GPREG_X7));
662
Achin Gupta86f23532019-10-11 15:41:16 +0100663 break; /* not reached */
664
J-Alves2672cde2020-05-07 18:42:25 +0100665 case FFA_ID_GET:
Max Shvetsove79062e2020-03-12 15:16:40 +0000666 /*
J-Alves2672cde2020-05-07 18:42:25 +0100667 * Returns the ID of the calling FFA component.
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200668 */
Max Shvetsove79062e2020-03-12 15:16:40 +0000669 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +0100670 SMC_RET8(handle, FFA_SUCCESS_SMC32,
671 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
672 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
673 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
674 FFA_PARAM_MBZ);
Max Shvetsove79062e2020-03-12 15:16:40 +0000675 }
676
J-Alves2672cde2020-05-07 18:42:25 +0100677 SMC_RET8(handle, FFA_SUCCESS_SMC32,
678 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
679 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
680 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
681 FFA_PARAM_MBZ);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200682
Max Shvetsove79062e2020-03-12 15:16:40 +0000683 break; /* not reached */
684
Olivier Deprezeae45962021-01-19 15:06:47 +0100685 case FFA_SECONDARY_EP_REGISTER_SMC64:
686 if (secure_origin) {
687 ret = spmd_pm_secondary_ep_register(x1);
688
689 if (ret < 0) {
690 SMC_RET8(handle, FFA_ERROR_SMC64,
691 FFA_TARGET_INFO_MBZ, ret,
692 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
693 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
694 FFA_PARAM_MBZ);
695 } else {
696 SMC_RET8(handle, FFA_SUCCESS_SMC64,
697 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ,
698 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
699 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
700 FFA_PARAM_MBZ);
701 }
702 }
703
704 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
705 break; /* Not reached */
706
Daniel Boulby27f35df2021-02-03 12:13:19 +0000707 case FFA_SPM_ID_GET:
708 if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) {
709 return spmd_ffa_error_return(handle,
710 FFA_ERROR_NOT_SUPPORTED);
711 }
712 /*
713 * Returns the ID of the SPMC or SPMD depending on the FF-A
714 * instance where this function is invoked
715 */
716 if (!secure_origin) {
717 SMC_RET8(handle, FFA_SUCCESS_SMC32,
718 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
719 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
720 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
721 FFA_PARAM_MBZ);
722 }
723 SMC_RET8(handle, FFA_SUCCESS_SMC32,
724 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID,
725 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
726 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
727 FFA_PARAM_MBZ);
728
729 break; /* not reached */
730
Olivier Deprez33e44122020-04-16 17:54:27 +0200731 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
732 if (secure_origin && spmd_is_spmc_message(x1)) {
733 ret = spmd_handle_spmc_message(x3, x4,
734 SMC_GET_GP(handle, CTX_GPREG_X5),
735 SMC_GET_GP(handle, CTX_GPREG_X6),
736 SMC_GET_GP(handle, CTX_GPREG_X7));
737
738 SMC_RET8(handle, FFA_SUCCESS_SMC32,
739 FFA_TARGET_INFO_MBZ, ret,
740 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
741 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
742 FFA_PARAM_MBZ);
743 } else {
744 /* Forward direct message to the other world */
745 return spmd_smc_forward(smc_fid, secure_origin,
746 x1, x2, x3, x4, handle);
747 }
748 break; /* Not reached */
749
750 case FFA_MSG_SEND_DIRECT_RESP_SMC32:
751 if (secure_origin && spmd_is_spmc_message(x1)) {
Olivier Depreza664c492020-08-05 11:27:42 +0200752 spmd_spm_core_sync_exit(0ULL);
Olivier Deprez33e44122020-04-16 17:54:27 +0200753 } else {
754 /* Forward direct message to the other world */
755 return spmd_smc_forward(smc_fid, secure_origin,
756 x1, x2, x3, x4, handle);
757 }
758 break; /* Not reached */
759
J-Alves2672cde2020-05-07 18:42:25 +0100760 case FFA_RX_RELEASE:
761 case FFA_RXTX_MAP_SMC32:
762 case FFA_RXTX_MAP_SMC64:
763 case FFA_RXTX_UNMAP:
Ruari Phipps93dff702020-07-28 10:33:35 +0100764 case FFA_PARTITION_INFO_GET:
J-Alves2621cfd2021-03-11 17:46:47 +0000765#if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
766 case FFA_NOTIFICATION_BITMAP_CREATE:
767 case FFA_NOTIFICATION_BITMAP_DESTROY:
768 case FFA_NOTIFICATION_BIND:
769 case FFA_NOTIFICATION_UNBIND:
770 case FFA_NOTIFICATION_SET:
771 case FFA_NOTIFICATION_GET:
772 case FFA_NOTIFICATION_INFO_GET:
773 case FFA_NOTIFICATION_INFO_GET_SMC64:
Federico Recanatieecb4b02022-02-03 17:22:37 +0100774 case FFA_MSG_SEND2:
J-Alves2621cfd2021-03-11 17:46:47 +0000775#endif
Federico Recanatieecb4b02022-02-03 17:22:37 +0100776 case FFA_MSG_RUN:
Ruari Phipps93dff702020-07-28 10:33:35 +0100777 /*
Federico Recanatieecb4b02022-02-03 17:22:37 +0100778 * Above calls should be invoked only by the Normal world and
779 * must not be forwarded from Secure world to Normal world.
Ruari Phipps93dff702020-07-28 10:33:35 +0100780 */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100781 if (secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +0100782 return spmd_ffa_error_return(handle,
Ruari Phipps93dff702020-07-28 10:33:35 +0100783 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +0100784 }
785
786 /* Fall through to forward the call to the other world */
J-Alves2672cde2020-05-07 18:42:25 +0100787 case FFA_MSG_SEND:
J-Alves2672cde2020-05-07 18:42:25 +0100788 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
J-Alves2672cde2020-05-07 18:42:25 +0100789 case FFA_MSG_SEND_DIRECT_RESP_SMC64:
790 case FFA_MEM_DONATE_SMC32:
791 case FFA_MEM_DONATE_SMC64:
792 case FFA_MEM_LEND_SMC32:
793 case FFA_MEM_LEND_SMC64:
794 case FFA_MEM_SHARE_SMC32:
795 case FFA_MEM_SHARE_SMC64:
796 case FFA_MEM_RETRIEVE_REQ_SMC32:
797 case FFA_MEM_RETRIEVE_REQ_SMC64:
798 case FFA_MEM_RETRIEVE_RESP:
799 case FFA_MEM_RELINQUISH:
800 case FFA_MEM_RECLAIM:
801 case FFA_SUCCESS_SMC32:
802 case FFA_SUCCESS_SMC64:
Achin Gupta86f23532019-10-11 15:41:16 +0100803 /*
804 * TODO: Assume that no requests originate from EL3 at the
805 * moment. This will change if a SP service is required in
806 * response to secure interrupts targeted to EL3. Until then
807 * simply forward the call to the Normal world.
808 */
809
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100810 return spmd_smc_forward(smc_fid, secure_origin,
Max Shvetsov745889c2020-02-27 14:54:21 +0000811 x1, x2, x3, x4, handle);
Achin Gupta86f23532019-10-11 15:41:16 +0100812 break; /* not reached */
813
J-Alves2672cde2020-05-07 18:42:25 +0100814 case FFA_MSG_WAIT:
Achin Gupta86f23532019-10-11 15:41:16 +0100815 /*
816 * Check if this is the first invocation of this interface on
817 * this CPU from the Secure world. If so, then indicate that the
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200818 * SPM Core initialised successfully.
Achin Gupta86f23532019-10-11 15:41:16 +0100819 */
Olivier Deprez7c016332019-10-28 09:03:13 +0000820 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Olivier Depreza664c492020-08-05 11:27:42 +0200821 spmd_spm_core_sync_exit(0ULL);
Achin Gupta86f23532019-10-11 15:41:16 +0100822 }
823
Max Shvetsov745889c2020-02-27 14:54:21 +0000824 /* Fall through to forward the call to the other world */
Olivier Deprezae18caf2021-04-02 11:09:10 +0200825 case FFA_INTERRUPT:
J-Alves2672cde2020-05-07 18:42:25 +0100826 case FFA_MSG_YIELD:
Achin Gupta86f23532019-10-11 15:41:16 +0100827 /* This interface must be invoked only by the Secure world */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100828 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +0100829 return spmd_ffa_error_return(handle,
830 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +0100831 }
832
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100833 return spmd_smc_forward(smc_fid, secure_origin,
Max Shvetsov745889c2020-02-27 14:54:21 +0000834 x1, x2, x3, x4, handle);
Achin Gupta86f23532019-10-11 15:41:16 +0100835 break; /* not reached */
836
Olivier Depreza664c492020-08-05 11:27:42 +0200837 case FFA_NORMAL_WORLD_RESUME:
838 if (secure_origin && ctx->secure_interrupt_ongoing) {
839 spmd_spm_core_sync_exit(0ULL);
840 } else {
841 return spmd_ffa_error_return(handle, FFA_ERROR_DENIED);
842 }
843 break; /* Not reached */
844
Achin Gupta86f23532019-10-11 15:41:16 +0100845 default:
846 WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
J-Alves2672cde2020-05-07 18:42:25 +0100847 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +0100848 }
849}