blob: e38878432ea6de3bd9a38253961c173fd7348d89 [file] [log] [blame]
Achin Gupta86f23532019-10-11 15:41:16 +01001/*
Daniel Boulby9460a232021-12-09 11:20:13 +00002 * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
Achin Gupta86f23532019-10-11 15:41:16 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
Scott Brandene5dcf982020-08-25 13:49:32 -07009#include <inttypes.h>
10#include <stdint.h>
Achin Gupta86f23532019-10-11 15:41:16 +010011#include <string.h>
12
13#include <arch_helpers.h>
Olivier Deprez2bae35f2020-04-16 13:39:06 +020014#include <arch/aarch64/arch_features.h>
Achin Gupta86f23532019-10-11 15:41:16 +010015#include <bl31/bl31.h>
Olivier Depreza664c492020-08-05 11:27:42 +020016#include <bl31/interrupt_mgmt.h>
Achin Gupta86f23532019-10-11 15:41:16 +010017#include <common/debug.h>
18#include <common/runtime_svc.h>
19#include <lib/el3_runtime/context_mgmt.h>
20#include <lib/smccc.h>
21#include <lib/spinlock.h>
22#include <lib/utils.h>
Achin Gupta86f23532019-10-11 15:41:16 +010023#include <plat/common/common_def.h>
24#include <plat/common/platform.h>
25#include <platform_def.h>
J-Alves2672cde2020-05-07 18:42:25 +010026#include <services/ffa_svc.h>
Marc Bonnici1c33cc32021-11-29 17:57:03 +000027#include <services/spmc_svc.h>
Achin Gupta86f23532019-10-11 15:41:16 +010028#include <services/spmd_svc.h>
29#include <smccc_helpers.h>
30#include "spmd_private.h"
31
32/*******************************************************************************
33 * SPM Core context information.
34 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020035static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
Achin Gupta86f23532019-10-11 15:41:16 +010036
37/*******************************************************************************
Marc Bonnici1c33cc32021-11-29 17:57:03 +000038 * SPM Core attribute information is read from its manifest if the SPMC is not
39 * at EL3. Else, it is populated from the SPMC directly.
Achin Gupta86f23532019-10-11 15:41:16 +010040 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020041static spmc_manifest_attribute_t spmc_attrs;
Achin Gupta86f23532019-10-11 15:41:16 +010042
43/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000044 * SPM Core entry point information. Discovered on the primary core and reused
45 * on secondary cores.
46 ******************************************************************************/
47static entry_point_info_t *spmc_ep_info;
48
49/*******************************************************************************
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020050 * SPM Core context on CPU based on mpidr.
51 ******************************************************************************/
52spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr)
53{
Max Shvetsovf80c64d2020-08-25 11:50:18 +010054 int core_idx = plat_core_pos_by_mpidr(mpidr);
55
56 if (core_idx < 0) {
Scott Brandene5dcf982020-08-25 13:49:32 -070057 ERROR("Invalid mpidr: %" PRIx64 ", returned ID: %d\n", mpidr, core_idx);
Max Shvetsovf80c64d2020-08-25 11:50:18 +010058 panic();
59 }
60
61 return &spm_core_context[core_idx];
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020062}
63
64/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +020065 * SPM Core context on current CPU get helper.
66 ******************************************************************************/
67spmd_spm_core_context_t *spmd_get_context(void)
68{
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020069 return spmd_get_context_by_mpidr(read_mpidr());
Olivier Deprez2bae35f2020-04-16 13:39:06 +020070}
71
72/*******************************************************************************
Olivier Deprezc7631a52020-03-23 09:53:06 +010073 * SPM Core ID getter.
74 ******************************************************************************/
75uint16_t spmd_spmc_id_get(void)
76{
77 return spmc_attrs.spmc_id;
78}
79
80/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000081 * Static function declaration.
82 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020083static int32_t spmd_init(void);
Olivier Deprez69ca84a2020-02-07 15:44:43 +010084static int spmd_spmc_init(void *pm_addr);
J-Alves2672cde2020-05-07 18:42:25 +010085static uint64_t spmd_ffa_error_return(void *handle,
Olivier Deprez2bae35f2020-04-16 13:39:06 +020086 int error_code);
87static uint64_t spmd_smc_forward(uint32_t smc_fid,
88 bool secure_origin,
89 uint64_t x1,
90 uint64_t x2,
91 uint64_t x3,
92 uint64_t x4,
Marc Bonnicida2c9e12021-11-29 18:02:45 +000093 void *cookie,
94 void *handle,
95 uint64_t flags);
Max Shvetsov745889c2020-02-27 14:54:21 +000096
Daniel Boulby9460a232021-12-09 11:20:13 +000097/******************************************************************************
98 * Builds an SPMD to SPMC direct message request.
99 *****************************************************************************/
100void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func,
101 unsigned long long message)
102{
103 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
104 write_ctx_reg(gpregs, CTX_GPREG_X1,
105 (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
106 spmd_spmc_id_get());
107 write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func);
108 write_ctx_reg(gpregs, CTX_GPREG_X3, message);
109}
110
111
Max Shvetsov745889c2020-02-27 14:54:21 +0000112/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200113 * This function takes an SPMC context pointer and performs a synchronous
114 * SPMC entry.
Achin Gupta86f23532019-10-11 15:41:16 +0100115 ******************************************************************************/
116uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
117{
118 uint64_t rc;
119
120 assert(spmc_ctx != NULL);
121
122 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
123
124 /* Restore the context assigned above */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000125#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000126 cm_el2_sysregs_context_restore(SECURE);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200127#else
128 cm_el1_sysregs_context_restore(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000129#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100130 cm_set_next_eret_context(SECURE);
131
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000132 /* Enter SPMC */
Achin Gupta86f23532019-10-11 15:41:16 +0100133 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
134
135 /* Save secure state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000136#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000137 cm_el2_sysregs_context_save(SECURE);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200138#else
139 cm_el1_sysregs_context_save(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000140#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100141
142 return rc;
143}
144
145/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200146 * This function returns to the place where spmd_spm_core_sync_entry() was
Achin Gupta86f23532019-10-11 15:41:16 +0100147 * called originally.
148 ******************************************************************************/
149__dead2 void spmd_spm_core_sync_exit(uint64_t rc)
150{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200151 spmd_spm_core_context_t *ctx = spmd_get_context();
Achin Gupta86f23532019-10-11 15:41:16 +0100152
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200153 /* Get current CPU context from SPMC context */
Achin Gupta86f23532019-10-11 15:41:16 +0100154 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
155
156 /*
157 * The SPMD must have initiated the original request through a
158 * synchronous entry into SPMC. Jump back to the original C runtime
159 * context with the value of rc in x0;
160 */
161 spmd_spm_core_exit(ctx->c_rt_ctx, rc);
162
163 panic();
164}
165
166/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200167 * Jump to the SPM Core for the first time.
Achin Gupta86f23532019-10-11 15:41:16 +0100168 ******************************************************************************/
169static int32_t spmd_init(void)
170{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200171 spmd_spm_core_context_t *ctx = spmd_get_context();
172 uint64_t rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100173
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200174 VERBOSE("SPM Core init start.\n");
Olivier Deprez7c016332019-10-28 09:03:13 +0000175
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200176 /* Primary boot core enters the SPMC for initialization. */
177 ctx->state = SPMC_STATE_ON_PENDING;
Achin Gupta86f23532019-10-11 15:41:16 +0100178
179 rc = spmd_spm_core_sync_entry(ctx);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200180 if (rc != 0ULL) {
Scott Brandene5dcf982020-08-25 13:49:32 -0700181 ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200182 return 0;
Achin Gupta86f23532019-10-11 15:41:16 +0100183 }
184
Olivier Deprez7c016332019-10-28 09:03:13 +0000185 ctx->state = SPMC_STATE_ON;
186
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200187 VERBOSE("SPM Core init end.\n");
Achin Gupta86f23532019-10-11 15:41:16 +0100188
189 return 1;
190}
191
192/*******************************************************************************
Olivier Depreza664c492020-08-05 11:27:42 +0200193 * spmd_secure_interrupt_handler
194 * Enter the SPMC for further handling of the secure interrupt by the SPMC
195 * itself or a Secure Partition.
196 ******************************************************************************/
197static uint64_t spmd_secure_interrupt_handler(uint32_t id,
198 uint32_t flags,
199 void *handle,
200 void *cookie)
201{
202 spmd_spm_core_context_t *ctx = spmd_get_context();
203 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
204 unsigned int linear_id = plat_my_core_pos();
205 int64_t rc;
206
207 /* Sanity check the security state when the exception was generated */
208 assert(get_interrupt_src_ss(flags) == NON_SECURE);
209
210 /* Sanity check the pointer to this cpu's context */
211 assert(handle == cm_get_context(NON_SECURE));
212
213 /* Save the non-secure context before entering SPMC */
214 cm_el1_sysregs_context_save(NON_SECURE);
215#if SPMD_SPM_AT_SEL2
216 cm_el2_sysregs_context_save(NON_SECURE);
217#endif
218
219 /* Convey the event to the SPMC through the FFA_INTERRUPT interface. */
220 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT);
221 write_ctx_reg(gpregs, CTX_GPREG_X1, 0);
222 write_ctx_reg(gpregs, CTX_GPREG_X2, 0);
223 write_ctx_reg(gpregs, CTX_GPREG_X3, 0);
224 write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
225 write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
226 write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
227 write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
228
229 /* Mark current core as handling a secure interrupt. */
230 ctx->secure_interrupt_ongoing = true;
231
232 rc = spmd_spm_core_sync_entry(ctx);
233 if (rc != 0ULL) {
Olivier Deprezba100f22021-11-09 12:37:20 +0100234 ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, linear_id);
Olivier Depreza664c492020-08-05 11:27:42 +0200235 }
236
237 ctx->secure_interrupt_ongoing = false;
238
239 cm_el1_sysregs_context_restore(NON_SECURE);
240#if SPMD_SPM_AT_SEL2
241 cm_el2_sysregs_context_restore(NON_SECURE);
242#endif
243 cm_set_next_eret_context(NON_SECURE);
244
245 SMC_RET0(&ctx->cpu_ctx);
246}
247
248/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200249 * Loads SPMC manifest and inits SPMC.
Achin Gupta86f23532019-10-11 15:41:16 +0100250 ******************************************************************************/
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100251static int spmd_spmc_init(void *pm_addr)
Achin Gupta86f23532019-10-11 15:41:16 +0100252{
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200253 cpu_context_t *cpu_ctx;
254 unsigned int core_id;
Olivier Depreza664c492020-08-05 11:27:42 +0200255 uint32_t ep_attr, flags;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200256 int rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100257
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200258 /* Load the SPM Core manifest */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100259 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
Max Shvetsov745889c2020-02-27 14:54:21 +0000260 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200261 WARN("No or invalid SPM Core manifest image provided by BL2\n");
262 return rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100263 }
264
265 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200266 * Ensure that the SPM Core version is compatible with the SPM
267 * Dispatcher version.
Achin Gupta86f23532019-10-11 15:41:16 +0100268 */
J-Alves2672cde2020-05-07 18:42:25 +0100269 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
270 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
271 WARN("Unsupported FFA version (%u.%u)\n",
Achin Gupta86f23532019-10-11 15:41:16 +0100272 spmc_attrs.major_version, spmc_attrs.minor_version);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200273 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100274 }
275
J-Alves2672cde2020-05-07 18:42:25 +0100276 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
Achin Gupta86f23532019-10-11 15:41:16 +0100277 spmc_attrs.minor_version);
278
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200279 VERBOSE("SPM Core run time EL%x.\n",
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000280 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
Achin Gupta86f23532019-10-11 15:41:16 +0100281
Max Shvetsove79062e2020-03-12 15:16:40 +0000282 /* Validate the SPMC ID, Ensure high bit is set */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200283 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
284 SPMC_SECURE_ID_MASK) == 0U) {
285 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
286 return -EINVAL;
Max Shvetsove79062e2020-03-12 15:16:40 +0000287 }
288
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200289 /* Validate the SPM Core execution state */
Achin Gupta86f23532019-10-11 15:41:16 +0100290 if ((spmc_attrs.exec_state != MODE_RW_64) &&
291 (spmc_attrs.exec_state != MODE_RW_32)) {
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100292 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
Achin Gupta86f23532019-10-11 15:41:16 +0100293 spmc_attrs.exec_state);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200294 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100295 }
296
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100297 VERBOSE("%s%x.\n", "SPM Core execution state 0x",
298 spmc_attrs.exec_state);
Achin Gupta86f23532019-10-11 15:41:16 +0100299
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000300#if SPMD_SPM_AT_SEL2
301 /* Ensure manifest has not requested AArch32 state in S-EL2 */
302 if (spmc_attrs.exec_state == MODE_RW_32) {
303 WARN("AArch32 state at S-EL2 is not supported.\n");
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200304 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100305 }
306
307 /*
308 * Check if S-EL2 is supported on this system if S-EL2
309 * is required for SPM
310 */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200311 if (!is_armv8_4_sel2_present()) {
312 WARN("SPM Core run time S-EL2 is not supported.\n");
313 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100314 }
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000315#endif /* SPMD_SPM_AT_SEL2 */
Achin Gupta86f23532019-10-11 15:41:16 +0100316
317 /* Initialise an entrypoint to set up the CPU context */
318 ep_attr = SECURE | EP_ST_ENABLE;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200319 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
Achin Gupta86f23532019-10-11 15:41:16 +0100320 ep_attr |= EP_EE_BIG;
Max Shvetsov745889c2020-02-27 14:54:21 +0000321 }
322
Achin Gupta86f23532019-10-11 15:41:16 +0100323 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
Achin Gupta86f23532019-10-11 15:41:16 +0100324
325 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200326 * Populate SPSR for SPM Core based upon validated parameters from the
327 * manifest.
Achin Gupta86f23532019-10-11 15:41:16 +0100328 */
329 if (spmc_attrs.exec_state == MODE_RW_32) {
330 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
331 SPSR_E_LITTLE,
332 DAIF_FIQ_BIT |
333 DAIF_IRQ_BIT |
334 DAIF_ABT_BIT);
335 } else {
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000336
337#if SPMD_SPM_AT_SEL2
338 static const uint32_t runtime_el = MODE_EL2;
339#else
340 static const uint32_t runtime_el = MODE_EL1;
341#endif
342 spmc_ep_info->spsr = SPSR_64(runtime_el,
Achin Gupta86f23532019-10-11 15:41:16 +0100343 MODE_SP_ELX,
344 DISABLE_ALL_EXCEPTIONS);
345 }
346
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200347 /* Set an initial SPMC context state for all cores. */
348 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
349 spm_core_context[core_id].state = SPMC_STATE_OFF;
Max Shvetsov745889c2020-02-27 14:54:21 +0000350
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200351 /* Setup an initial cpu context for the SPMC. */
352 cpu_ctx = &spm_core_context[core_id].cpu_ctx;
353 cm_setup_context(cpu_ctx, spmc_ep_info);
Achin Gupta86f23532019-10-11 15:41:16 +0100354
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200355 /*
356 * Pass the core linear ID to the SPMC through x4.
357 * (TF-A implementation defined behavior helping
358 * a legacy TOS migration to adopt FF-A).
359 */
360 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id);
361 }
Achin Gupta86f23532019-10-11 15:41:16 +0100362
Olivier Deprez9afca122019-10-28 09:15:52 +0000363 /* Register power management hooks with PSCI */
364 psci_register_spd_pm_hook(&spmd_pm);
365
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200366 /* Register init function for deferred init. */
Achin Gupta86f23532019-10-11 15:41:16 +0100367 bl31_register_bl32_init(&spmd_init);
368
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200369 INFO("SPM Core setup done.\n");
370
Olivier Depreza664c492020-08-05 11:27:42 +0200371 /*
372 * Register an interrupt handler routing secure interrupts to SPMD
373 * while the NWd is running.
374 */
375 flags = 0;
376 set_interrupt_rm_flag(flags, NON_SECURE);
377 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
378 spmd_secure_interrupt_handler,
379 flags);
380 if (rc != 0) {
381 panic();
382 }
383
Achin Gupta86f23532019-10-11 15:41:16 +0100384 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000385}
Achin Gupta86f23532019-10-11 15:41:16 +0100386
Max Shvetsov745889c2020-02-27 14:54:21 +0000387/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200388 * Initialize context of SPM Core.
Max Shvetsov745889c2020-02-27 14:54:21 +0000389 ******************************************************************************/
390int spmd_setup(void)
391{
392 int rc;
Marc Bonnici1c33cc32021-11-29 17:57:03 +0000393 void *spmc_manifest;
394
395 /*
396 * If the SPMC is at EL3, then just initialise it directly. The
397 * shenanigans of when it is at a lower EL are not needed.
398 */
399 if (is_spmc_at_el3()) {
400 /* Allow the SPMC to populate its attributes directly. */
401 spmc_populate_attrs(&spmc_attrs);
402
403 rc = spmc_setup();
404 if (rc != 0) {
405 ERROR("SPMC initialisation failed 0x%x.\n", rc);
406 }
407 return rc;
408 }
Achin Gupta86f23532019-10-11 15:41:16 +0100409
Max Shvetsov745889c2020-02-27 14:54:21 +0000410 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200411 if (spmc_ep_info == NULL) {
412 WARN("No SPM Core image provided by BL2 boot loader.\n");
413 return -EINVAL;
Max Shvetsov745889c2020-02-27 14:54:21 +0000414 }
415
416 /* Under no circumstances will this parameter be 0 */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200417 assert(spmc_ep_info->pc != 0ULL);
Max Shvetsov745889c2020-02-27 14:54:21 +0000418
419 /*
420 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200421 * be used as a manifest for the SPM Core at the next lower EL/mode.
Max Shvetsov745889c2020-02-27 14:54:21 +0000422 */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100423 spmc_manifest = (void *)spmc_ep_info->args.arg0;
424 if (spmc_manifest == NULL) {
425 ERROR("Invalid or absent SPM Core manifest.\n");
426 return -EINVAL;
Max Shvetsov745889c2020-02-27 14:54:21 +0000427 }
428
429 /* Load manifest, init SPMC */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100430 rc = spmd_spmc_init(spmc_manifest);
Max Shvetsov745889c2020-02-27 14:54:21 +0000431 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200432 WARN("Booting device without SPM initialization.\n");
Max Shvetsov745889c2020-02-27 14:54:21 +0000433 }
434
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100435 return rc;
Max Shvetsov745889c2020-02-27 14:54:21 +0000436}
437
438/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000439 * Forward FF-A SMCs to the other security state.
Max Shvetsov745889c2020-02-27 14:54:21 +0000440 ******************************************************************************/
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000441uint64_t spmd_smc_switch_state(uint32_t smc_fid,
442 bool secure_origin,
443 uint64_t x1,
444 uint64_t x2,
445 uint64_t x3,
446 uint64_t x4,
447 void *handle)
Max Shvetsov745889c2020-02-27 14:54:21 +0000448{
Olivier Deprezebc34772020-04-16 16:59:21 +0200449 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
450 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100451
Max Shvetsov745889c2020-02-27 14:54:21 +0000452 /* Save incoming security state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000453#if SPMD_SPM_AT_SEL2
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200454 if (secure_state_in == NON_SECURE) {
455 cm_el1_sysregs_context_save(secure_state_in);
456 }
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100457 cm_el2_sysregs_context_save(secure_state_in);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200458#else
459 cm_el1_sysregs_context_save(secure_state_in);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000460#endif
Max Shvetsov745889c2020-02-27 14:54:21 +0000461
462 /* Restore outgoing security state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000463#if SPMD_SPM_AT_SEL2
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200464 if (secure_state_out == NON_SECURE) {
465 cm_el1_sysregs_context_restore(secure_state_out);
466 }
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100467 cm_el2_sysregs_context_restore(secure_state_out);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200468#else
469 cm_el1_sysregs_context_restore(secure_state_out);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000470#endif
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100471 cm_set_next_eret_context(secure_state_out);
Max Shvetsov745889c2020-02-27 14:54:21 +0000472
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100473 SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
Max Shvetsov745889c2020-02-27 14:54:21 +0000474 SMC_GET_GP(handle, CTX_GPREG_X5),
475 SMC_GET_GP(handle, CTX_GPREG_X6),
476 SMC_GET_GP(handle, CTX_GPREG_X7));
477}
478
479/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000480 * Forward SMCs to the other security state.
481 ******************************************************************************/
482static uint64_t spmd_smc_forward(uint32_t smc_fid,
483 bool secure_origin,
484 uint64_t x1,
485 uint64_t x2,
486 uint64_t x3,
487 uint64_t x4,
488 void *cookie,
489 void *handle,
490 uint64_t flags)
491{
492 if (is_spmc_at_el3() && !secure_origin) {
493 return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4,
494 cookie, handle, flags);
495 }
496 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4,
497 handle);
498
499}
500
501/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100502 * Return FFA_ERROR with specified error code
Max Shvetsov745889c2020-02-27 14:54:21 +0000503 ******************************************************************************/
J-Alves2672cde2020-05-07 18:42:25 +0100504static uint64_t spmd_ffa_error_return(void *handle, int error_code)
Max Shvetsov745889c2020-02-27 14:54:21 +0000505{
J-Alves64ff9932021-03-01 10:26:59 +0000506 SMC_RET8(handle, (uint32_t) FFA_ERROR,
507 FFA_TARGET_INFO_MBZ, (uint32_t)error_code,
J-Alves2672cde2020-05-07 18:42:25 +0100508 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
509 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100510}
511
Olivier Deprez33e44122020-04-16 17:54:27 +0200512/*******************************************************************************
513 * spmd_check_address_in_binary_image
514 ******************************************************************************/
515bool spmd_check_address_in_binary_image(uint64_t address)
516{
517 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
518
519 return ((address >= spmc_attrs.load_address) &&
520 (address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
521}
522
Olivier Deprezebc34772020-04-16 16:59:21 +0200523/******************************************************************************
524 * spmd_is_spmc_message
525 *****************************************************************************/
526static bool spmd_is_spmc_message(unsigned int ep)
527{
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000528 if (is_spmc_at_el3()) {
529 return false;
530 }
531
Olivier Deprezebc34772020-04-16 16:59:21 +0200532 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
533 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
534}
535
Olivier Deprez33e44122020-04-16 17:54:27 +0200536/******************************************************************************
537 * spmd_handle_spmc_message
538 *****************************************************************************/
Olivier Deprezc7631a52020-03-23 09:53:06 +0100539static int spmd_handle_spmc_message(unsigned long long msg,
540 unsigned long long parm1, unsigned long long parm2,
541 unsigned long long parm3, unsigned long long parm4)
Olivier Deprez33e44122020-04-16 17:54:27 +0200542{
543 VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__,
544 msg, parm1, parm2, parm3, parm4);
545
Olivier Deprez33e44122020-04-16 17:54:27 +0200546 return -EINVAL;
547}
548
Achin Gupta86f23532019-10-11 15:41:16 +0100549/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000550 * This function forwards FF-A SMCs to either the main SPMD handler or the
551 * SPMC at EL3, depending on the origin security state, if enabled.
552 ******************************************************************************/
553uint64_t spmd_ffa_smc_handler(uint32_t smc_fid,
554 uint64_t x1,
555 uint64_t x2,
556 uint64_t x3,
557 uint64_t x4,
558 void *cookie,
559 void *handle,
560 uint64_t flags)
561{
562 if (is_spmc_at_el3()) {
563 /*
564 * If we have an SPMC at EL3 allow handling of the SMC first.
565 * The SPMC will call back through to SPMD handler if required.
566 */
567 if (is_caller_secure(flags)) {
568 return spmc_smc_handler(smc_fid,
569 is_caller_secure(flags),
570 x1, x2, x3, x4, cookie,
571 handle, flags);
572 }
573 }
574 return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
575 handle, flags);
576}
577
578/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100579 * This function handles all SMCs in the range reserved for FFA. Each call is
Achin Gupta86f23532019-10-11 15:41:16 +0100580 * either forwarded to the other security state or handled by the SPM dispatcher
581 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200582uint64_t spmd_smc_handler(uint32_t smc_fid,
583 uint64_t x1,
584 uint64_t x2,
585 uint64_t x3,
586 uint64_t x4,
587 void *cookie,
588 void *handle,
Achin Gupta86f23532019-10-11 15:41:16 +0100589 uint64_t flags)
590{
Olivier Deprezeae45962021-01-19 15:06:47 +0100591 unsigned int linear_id = plat_my_core_pos();
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200592 spmd_spm_core_context_t *ctx = spmd_get_context();
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100593 bool secure_origin;
594 int32_t ret;
J-Alves4c95c702020-05-26 14:03:05 +0100595 uint32_t input_version;
Achin Gupta86f23532019-10-11 15:41:16 +0100596
597 /* Determine which security state this SMC originated from */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100598 secure_origin = is_caller_secure(flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100599
Scott Brandene5dcf982020-08-25 13:49:32 -0700600 VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64
601 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n",
602 linear_id, smc_fid, x1, x2, x3, x4,
603 SMC_GET_GP(handle, CTX_GPREG_X5),
604 SMC_GET_GP(handle, CTX_GPREG_X6),
605 SMC_GET_GP(handle, CTX_GPREG_X7));
Achin Gupta86f23532019-10-11 15:41:16 +0100606
607 switch (smc_fid) {
J-Alves2672cde2020-05-07 18:42:25 +0100608 case FFA_ERROR:
Achin Gupta86f23532019-10-11 15:41:16 +0100609 /*
610 * Check if this is the first invocation of this interface on
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200611 * this CPU. If so, then indicate that the SPM Core initialised
Achin Gupta86f23532019-10-11 15:41:16 +0100612 * unsuccessfully.
613 */
Olivier Deprez7c016332019-10-28 09:03:13 +0000614 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Achin Gupta86f23532019-10-11 15:41:16 +0100615 spmd_spm_core_sync_exit(x2);
Max Shvetsov745889c2020-02-27 14:54:21 +0000616 }
Achin Gupta86f23532019-10-11 15:41:16 +0100617
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100618 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000619 x1, x2, x3, x4, cookie,
620 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100621 break; /* not reached */
622
J-Alves2672cde2020-05-07 18:42:25 +0100623 case FFA_VERSION:
J-Alves4c95c702020-05-26 14:03:05 +0100624 input_version = (uint32_t)(0xFFFFFFFF & x1);
Achin Gupta86f23532019-10-11 15:41:16 +0100625 /*
J-Alves4c95c702020-05-26 14:03:05 +0100626 * If caller is secure and SPMC was initialized,
627 * return FFA_VERSION of SPMD.
628 * If caller is non secure and SPMC was initialized,
Marc Bonnici815d1012021-12-08 14:27:40 +0000629 * forward to the EL3 SPMC if enabled, otherwise return
630 * the SPMC version if implemented at a lower EL.
J-Alves4c95c702020-05-26 14:03:05 +0100631 * Sanity check to "input_version".
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000632 * If the EL3 SPMC is enabled, ignore the SPMC state as
633 * this is not used.
Achin Gupta86f23532019-10-11 15:41:16 +0100634 */
J-Alves4c95c702020-05-26 14:03:05 +0100635 if ((input_version & FFA_VERSION_BIT31_MASK) ||
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000636 (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) {
J-Alves4c95c702020-05-26 14:03:05 +0100637 ret = FFA_ERROR_NOT_SUPPORTED;
638 } else if (!secure_origin) {
Marc Bonnici815d1012021-12-08 14:27:40 +0000639 if (is_spmc_at_el3()) {
640 /*
641 * Forward the call directly to the EL3 SPMC, if
642 * enabled, as we don't need to wrap the call in
643 * a direct request.
644 */
645 return spmd_smc_forward(smc_fid, secure_origin,
646 x1, x2, x3, x4, cookie,
647 handle, flags);
648 }
649
Daniel Boulby9460a232021-12-09 11:20:13 +0000650 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
651 uint64_t rc;
652
653 if (spmc_attrs.major_version == 1 &&
654 spmc_attrs.minor_version == 0) {
655 ret = MAKE_FFA_VERSION(spmc_attrs.major_version,
656 spmc_attrs.minor_version);
657 SMC_RET8(handle, (uint32_t)ret,
658 FFA_TARGET_INFO_MBZ,
659 FFA_TARGET_INFO_MBZ,
660 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
661 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
662 FFA_PARAM_MBZ);
663 break;
664 }
665 /* Save non-secure system registers context */
666 cm_el1_sysregs_context_save(NON_SECURE);
667#if SPMD_SPM_AT_SEL2
668 cm_el2_sysregs_context_save(NON_SECURE);
669#endif
670
671 /*
672 * The incoming request has FFA_VERSION as X0 smc_fid
673 * and requested version in x1. Prepare a direct request
674 * from SPMD to SPMC with FFA_VERSION framework function
675 * identifier in X2 and requested version in X3.
676 */
677 spmd_build_spmc_message(gpregs,
678 SPMD_FWK_MSG_FFA_VERSION_REQ,
679 input_version);
680
681 rc = spmd_spm_core_sync_entry(ctx);
682
683 if ((rc != 0ULL) ||
684 (SMC_GET_GP(gpregs, CTX_GPREG_X0) !=
685 FFA_MSG_SEND_DIRECT_RESP_SMC32) ||
686 (SMC_GET_GP(gpregs, CTX_GPREG_X2) !=
Marc Bonnici25f4b542022-04-12 17:18:13 +0100687 (FFA_FWK_MSG_BIT |
Daniel Boulby9460a232021-12-09 11:20:13 +0000688 SPMD_FWK_MSG_FFA_VERSION_RESP))) {
689 ERROR("Failed to forward FFA_VERSION\n");
690 ret = FFA_ERROR_NOT_SUPPORTED;
691 } else {
692 ret = SMC_GET_GP(gpregs, CTX_GPREG_X3);
693 }
694
695 /*
696 * Return here after SPMC has handled FFA_VERSION.
697 * The returned SPMC version is held in X3.
698 * Forward this version in X0 to the non-secure caller.
699 */
700 return spmd_smc_forward(ret, true, FFA_PARAM_MBZ,
701 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000702 FFA_PARAM_MBZ, cookie, gpregs,
703 flags);
J-Alves4c95c702020-05-26 14:03:05 +0100704 } else {
J-Alves64ff9932021-03-01 10:26:59 +0000705 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
706 FFA_VERSION_MINOR);
J-Alves4c95c702020-05-26 14:03:05 +0100707 }
708
J-Alves64ff9932021-03-01 10:26:59 +0000709 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ,
710 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
711 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100712 break; /* not reached */
713
J-Alves2672cde2020-05-07 18:42:25 +0100714 case FFA_FEATURES:
Achin Gupta86f23532019-10-11 15:41:16 +0100715 /*
716 * This is an optional interface. Do the minimal checks and
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200717 * forward to SPM Core which will handle it if implemented.
Achin Gupta86f23532019-10-11 15:41:16 +0100718 */
719
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200720 /* Forward SMC from Normal world to the SPM Core */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100721 if (!secure_origin) {
722 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000723 x1, x2, x3, x4, cookie,
724 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100725 }
Max Shvetsov745889c2020-02-27 14:54:21 +0000726
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200727 /*
728 * Return success if call was from secure world i.e. all
J-Alves2672cde2020-05-07 18:42:25 +0100729 * FFA functions are supported. This is essentially a
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200730 * nop.
731 */
J-Alves2672cde2020-05-07 18:42:25 +0100732 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200733 SMC_GET_GP(handle, CTX_GPREG_X5),
734 SMC_GET_GP(handle, CTX_GPREG_X6),
735 SMC_GET_GP(handle, CTX_GPREG_X7));
736
Achin Gupta86f23532019-10-11 15:41:16 +0100737 break; /* not reached */
738
J-Alves2672cde2020-05-07 18:42:25 +0100739 case FFA_ID_GET:
Max Shvetsove79062e2020-03-12 15:16:40 +0000740 /*
J-Alves2672cde2020-05-07 18:42:25 +0100741 * Returns the ID of the calling FFA component.
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200742 */
Max Shvetsove79062e2020-03-12 15:16:40 +0000743 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +0100744 SMC_RET8(handle, FFA_SUCCESS_SMC32,
745 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
746 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
747 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
748 FFA_PARAM_MBZ);
Max Shvetsove79062e2020-03-12 15:16:40 +0000749 }
750
J-Alves2672cde2020-05-07 18:42:25 +0100751 SMC_RET8(handle, FFA_SUCCESS_SMC32,
752 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
753 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
754 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
755 FFA_PARAM_MBZ);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200756
Max Shvetsove79062e2020-03-12 15:16:40 +0000757 break; /* not reached */
758
Olivier Deprezeae45962021-01-19 15:06:47 +0100759 case FFA_SECONDARY_EP_REGISTER_SMC64:
760 if (secure_origin) {
761 ret = spmd_pm_secondary_ep_register(x1);
762
763 if (ret < 0) {
764 SMC_RET8(handle, FFA_ERROR_SMC64,
765 FFA_TARGET_INFO_MBZ, ret,
766 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
767 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
768 FFA_PARAM_MBZ);
769 } else {
770 SMC_RET8(handle, FFA_SUCCESS_SMC64,
771 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ,
772 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
773 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
774 FFA_PARAM_MBZ);
775 }
776 }
777
778 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
779 break; /* Not reached */
780
Daniel Boulby27f35df2021-02-03 12:13:19 +0000781 case FFA_SPM_ID_GET:
782 if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) {
783 return spmd_ffa_error_return(handle,
784 FFA_ERROR_NOT_SUPPORTED);
785 }
786 /*
787 * Returns the ID of the SPMC or SPMD depending on the FF-A
788 * instance where this function is invoked
789 */
790 if (!secure_origin) {
791 SMC_RET8(handle, FFA_SUCCESS_SMC32,
792 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
793 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
794 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
795 FFA_PARAM_MBZ);
796 }
797 SMC_RET8(handle, FFA_SUCCESS_SMC32,
798 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID,
799 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
800 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
801 FFA_PARAM_MBZ);
802
803 break; /* not reached */
804
Olivier Deprez33e44122020-04-16 17:54:27 +0200805 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
806 if (secure_origin && spmd_is_spmc_message(x1)) {
807 ret = spmd_handle_spmc_message(x3, x4,
808 SMC_GET_GP(handle, CTX_GPREG_X5),
809 SMC_GET_GP(handle, CTX_GPREG_X6),
810 SMC_GET_GP(handle, CTX_GPREG_X7));
811
812 SMC_RET8(handle, FFA_SUCCESS_SMC32,
813 FFA_TARGET_INFO_MBZ, ret,
814 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
815 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
816 FFA_PARAM_MBZ);
817 } else {
818 /* Forward direct message to the other world */
819 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000820 x1, x2, x3, x4, cookie,
821 handle, flags);
Olivier Deprez33e44122020-04-16 17:54:27 +0200822 }
823 break; /* Not reached */
824
825 case FFA_MSG_SEND_DIRECT_RESP_SMC32:
826 if (secure_origin && spmd_is_spmc_message(x1)) {
Olivier Depreza664c492020-08-05 11:27:42 +0200827 spmd_spm_core_sync_exit(0ULL);
Olivier Deprez33e44122020-04-16 17:54:27 +0200828 } else {
829 /* Forward direct message to the other world */
830 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000831 x1, x2, x3, x4, cookie,
832 handle, flags);
Olivier Deprez33e44122020-04-16 17:54:27 +0200833 }
834 break; /* Not reached */
835
J-Alves2672cde2020-05-07 18:42:25 +0100836 case FFA_RX_RELEASE:
837 case FFA_RXTX_MAP_SMC32:
838 case FFA_RXTX_MAP_SMC64:
839 case FFA_RXTX_UNMAP:
Ruari Phipps93dff702020-07-28 10:33:35 +0100840 case FFA_PARTITION_INFO_GET:
J-Alves2621cfd2021-03-11 17:46:47 +0000841#if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
842 case FFA_NOTIFICATION_BITMAP_CREATE:
843 case FFA_NOTIFICATION_BITMAP_DESTROY:
844 case FFA_NOTIFICATION_BIND:
845 case FFA_NOTIFICATION_UNBIND:
846 case FFA_NOTIFICATION_SET:
847 case FFA_NOTIFICATION_GET:
848 case FFA_NOTIFICATION_INFO_GET:
849 case FFA_NOTIFICATION_INFO_GET_SMC64:
Federico Recanatieecb4b02022-02-03 17:22:37 +0100850 case FFA_MSG_SEND2:
Federico Recanati5c7c5c42022-03-18 10:30:00 +0100851 case FFA_RX_ACQUIRE:
J-Alves2621cfd2021-03-11 17:46:47 +0000852#endif
Federico Recanatieecb4b02022-02-03 17:22:37 +0100853 case FFA_MSG_RUN:
Ruari Phipps93dff702020-07-28 10:33:35 +0100854 /*
Federico Recanatieecb4b02022-02-03 17:22:37 +0100855 * Above calls should be invoked only by the Normal world and
856 * must not be forwarded from Secure world to Normal world.
Ruari Phipps93dff702020-07-28 10:33:35 +0100857 */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100858 if (secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +0100859 return spmd_ffa_error_return(handle,
Ruari Phipps93dff702020-07-28 10:33:35 +0100860 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +0100861 }
862
863 /* Fall through to forward the call to the other world */
J-Alves2672cde2020-05-07 18:42:25 +0100864 case FFA_MSG_SEND:
J-Alves2672cde2020-05-07 18:42:25 +0100865 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
J-Alves2672cde2020-05-07 18:42:25 +0100866 case FFA_MSG_SEND_DIRECT_RESP_SMC64:
867 case FFA_MEM_DONATE_SMC32:
868 case FFA_MEM_DONATE_SMC64:
869 case FFA_MEM_LEND_SMC32:
870 case FFA_MEM_LEND_SMC64:
871 case FFA_MEM_SHARE_SMC32:
872 case FFA_MEM_SHARE_SMC64:
873 case FFA_MEM_RETRIEVE_REQ_SMC32:
874 case FFA_MEM_RETRIEVE_REQ_SMC64:
875 case FFA_MEM_RETRIEVE_RESP:
876 case FFA_MEM_RELINQUISH:
877 case FFA_MEM_RECLAIM:
Marc Bonnici9fa01e92021-09-23 09:44:14 +0100878 case FFA_MEM_FRAG_TX:
879 case FFA_MEM_FRAG_RX:
J-Alves2672cde2020-05-07 18:42:25 +0100880 case FFA_SUCCESS_SMC32:
881 case FFA_SUCCESS_SMC64:
Achin Gupta86f23532019-10-11 15:41:16 +0100882 /*
883 * TODO: Assume that no requests originate from EL3 at the
884 * moment. This will change if a SP service is required in
885 * response to secure interrupts targeted to EL3. Until then
886 * simply forward the call to the Normal world.
887 */
888
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100889 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000890 x1, x2, x3, x4, cookie,
891 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100892 break; /* not reached */
893
J-Alves2672cde2020-05-07 18:42:25 +0100894 case FFA_MSG_WAIT:
Achin Gupta86f23532019-10-11 15:41:16 +0100895 /*
896 * Check if this is the first invocation of this interface on
897 * this CPU from the Secure world. If so, then indicate that the
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200898 * SPM Core initialised successfully.
Achin Gupta86f23532019-10-11 15:41:16 +0100899 */
Olivier Deprez7c016332019-10-28 09:03:13 +0000900 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Olivier Depreza664c492020-08-05 11:27:42 +0200901 spmd_spm_core_sync_exit(0ULL);
Achin Gupta86f23532019-10-11 15:41:16 +0100902 }
903
Max Shvetsov745889c2020-02-27 14:54:21 +0000904 /* Fall through to forward the call to the other world */
Olivier Deprezae18caf2021-04-02 11:09:10 +0200905 case FFA_INTERRUPT:
J-Alves2672cde2020-05-07 18:42:25 +0100906 case FFA_MSG_YIELD:
Achin Gupta86f23532019-10-11 15:41:16 +0100907 /* This interface must be invoked only by the Secure world */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100908 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +0100909 return spmd_ffa_error_return(handle,
910 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +0100911 }
912
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100913 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000914 x1, x2, x3, x4, cookie,
915 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100916 break; /* not reached */
917
Olivier Depreza664c492020-08-05 11:27:42 +0200918 case FFA_NORMAL_WORLD_RESUME:
919 if (secure_origin && ctx->secure_interrupt_ongoing) {
920 spmd_spm_core_sync_exit(0ULL);
921 } else {
922 return spmd_ffa_error_return(handle, FFA_ERROR_DENIED);
923 }
924 break; /* Not reached */
925
Achin Gupta86f23532019-10-11 15:41:16 +0100926 default:
927 WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
J-Alves2672cde2020-05-07 18:42:25 +0100928 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +0100929 }
930}