blob: 02b73c649e0e89527fe715933c41635c924d9123 [file] [log] [blame]
Achin Gupta86f23532019-10-11 15:41:16 +01001/*
2 * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
9#include <string.h>
10
11#include <arch_helpers.h>
Olivier Deprez2bae35f2020-04-16 13:39:06 +020012#include <arch/aarch64/arch_features.h>
Achin Gupta86f23532019-10-11 15:41:16 +010013#include <bl31/bl31.h>
14#include <common/debug.h>
15#include <common/runtime_svc.h>
16#include <lib/el3_runtime/context_mgmt.h>
17#include <lib/smccc.h>
18#include <lib/spinlock.h>
19#include <lib/utils.h>
Achin Gupta86f23532019-10-11 15:41:16 +010020#include <plat/common/common_def.h>
21#include <plat/common/platform.h>
22#include <platform_def.h>
J-Alves2672cde2020-05-07 18:42:25 +010023#include <services/ffa_svc.h>
Achin Gupta86f23532019-10-11 15:41:16 +010024#include <services/spmd_svc.h>
25#include <smccc_helpers.h>
26#include "spmd_private.h"
27
28/*******************************************************************************
29 * SPM Core context information.
30 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020031static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
Achin Gupta86f23532019-10-11 15:41:16 +010032
33/*******************************************************************************
34 * SPM Core attribute information read from its manifest.
35 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020036static spmc_manifest_attribute_t spmc_attrs;
Achin Gupta86f23532019-10-11 15:41:16 +010037
38/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000039 * SPM Core entry point information. Discovered on the primary core and reused
40 * on secondary cores.
41 ******************************************************************************/
42static entry_point_info_t *spmc_ep_info;
43
44/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +020045 * SPM Core context on current CPU get helper.
46 ******************************************************************************/
47spmd_spm_core_context_t *spmd_get_context(void)
48{
49 unsigned int linear_id = plat_my_core_pos();
50
51 return &spm_core_context[linear_id];
52}
53
54/*******************************************************************************
Olivier Deprez87a9ee72019-10-28 08:52:45 +000055 * SPM Core entry point information get helper.
56 ******************************************************************************/
57entry_point_info_t *spmd_spmc_ep_info_get(void)
58{
59 return spmc_ep_info;
60}
61
62/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000063 * Static function declaration.
64 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020065static int32_t spmd_init(void);
Olivier Deprez69ca84a2020-02-07 15:44:43 +010066static int spmd_spmc_init(void *pm_addr);
J-Alves2672cde2020-05-07 18:42:25 +010067static uint64_t spmd_ffa_error_return(void *handle,
Olivier Deprez2bae35f2020-04-16 13:39:06 +020068 int error_code);
69static uint64_t spmd_smc_forward(uint32_t smc_fid,
70 bool secure_origin,
71 uint64_t x1,
72 uint64_t x2,
73 uint64_t x3,
74 uint64_t x4,
75 void *handle);
Max Shvetsov745889c2020-02-27 14:54:21 +000076
77/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +020078 * This function takes an SPMC context pointer and performs a synchronous
79 * SPMC entry.
Achin Gupta86f23532019-10-11 15:41:16 +010080 ******************************************************************************/
81uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
82{
83 uint64_t rc;
84
85 assert(spmc_ctx != NULL);
86
87 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
88
89 /* Restore the context assigned above */
90 cm_el1_sysregs_context_restore(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +000091#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +000092 cm_el2_sysregs_context_restore(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +000093#endif
Achin Gupta86f23532019-10-11 15:41:16 +010094 cm_set_next_eret_context(SECURE);
95
Max Shvetsove7fd80e2020-02-25 13:55:00 +000096 /* Enter SPMC */
Achin Gupta86f23532019-10-11 15:41:16 +010097 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
98
99 /* Save secure state */
100 cm_el1_sysregs_context_save(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000101#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000102 cm_el2_sysregs_context_save(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000103#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100104
105 return rc;
106}
107
108/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200109 * This function returns to the place where spmd_spm_core_sync_entry() was
Achin Gupta86f23532019-10-11 15:41:16 +0100110 * called originally.
111 ******************************************************************************/
112__dead2 void spmd_spm_core_sync_exit(uint64_t rc)
113{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200114 spmd_spm_core_context_t *ctx = spmd_get_context();
Achin Gupta86f23532019-10-11 15:41:16 +0100115
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200116 /* Get current CPU context from SPMC context */
Achin Gupta86f23532019-10-11 15:41:16 +0100117 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
118
119 /*
120 * The SPMD must have initiated the original request through a
121 * synchronous entry into SPMC. Jump back to the original C runtime
122 * context with the value of rc in x0;
123 */
124 spmd_spm_core_exit(ctx->c_rt_ctx, rc);
125
126 panic();
127}
128
129/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200130 * Jump to the SPM Core for the first time.
Achin Gupta86f23532019-10-11 15:41:16 +0100131 ******************************************************************************/
132static int32_t spmd_init(void)
133{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200134 spmd_spm_core_context_t *ctx = spmd_get_context();
135 uint64_t rc;
Olivier Deprez7c016332019-10-28 09:03:13 +0000136 unsigned int linear_id = plat_my_core_pos();
137 unsigned int core_id;
Achin Gupta86f23532019-10-11 15:41:16 +0100138
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200139 VERBOSE("SPM Core init start.\n");
Olivier Deprez7c016332019-10-28 09:03:13 +0000140 ctx->state = SPMC_STATE_ON_PENDING;
141
142 /* Set the SPMC context state on other CPUs to OFF */
143 for (core_id = 0; core_id < PLATFORM_CORE_COUNT; core_id++) {
144 if (core_id != linear_id) {
145 spm_core_context[core_id].state = SPMC_STATE_OFF;
146 }
147 }
Achin Gupta86f23532019-10-11 15:41:16 +0100148
149 rc = spmd_spm_core_sync_entry(ctx);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200150 if (rc != 0ULL) {
Achin Gupta86f23532019-10-11 15:41:16 +0100151 ERROR("SPMC initialisation failed 0x%llx\n", rc);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200152 return 0;
Achin Gupta86f23532019-10-11 15:41:16 +0100153 }
154
Olivier Deprez7c016332019-10-28 09:03:13 +0000155 ctx->state = SPMC_STATE_ON;
156
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200157 VERBOSE("SPM Core init end.\n");
Achin Gupta86f23532019-10-11 15:41:16 +0100158
159 return 1;
160}
161
162/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200163 * Loads SPMC manifest and inits SPMC.
Achin Gupta86f23532019-10-11 15:41:16 +0100164 ******************************************************************************/
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100165static int spmd_spmc_init(void *pm_addr)
Achin Gupta86f23532019-10-11 15:41:16 +0100166{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200167 spmd_spm_core_context_t *spm_ctx = spmd_get_context();
Achin Gupta86f23532019-10-11 15:41:16 +0100168 uint32_t ep_attr;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200169 int rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100170
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200171 /* Load the SPM Core manifest */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100172 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
Max Shvetsov745889c2020-02-27 14:54:21 +0000173 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200174 WARN("No or invalid SPM Core manifest image provided by BL2\n");
175 return rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100176 }
177
178 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200179 * Ensure that the SPM Core version is compatible with the SPM
180 * Dispatcher version.
Achin Gupta86f23532019-10-11 15:41:16 +0100181 */
J-Alves2672cde2020-05-07 18:42:25 +0100182 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
183 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
184 WARN("Unsupported FFA version (%u.%u)\n",
Achin Gupta86f23532019-10-11 15:41:16 +0100185 spmc_attrs.major_version, spmc_attrs.minor_version);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200186 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100187 }
188
J-Alves2672cde2020-05-07 18:42:25 +0100189 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
Achin Gupta86f23532019-10-11 15:41:16 +0100190 spmc_attrs.minor_version);
191
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200192 VERBOSE("SPM Core run time EL%x.\n",
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000193 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
Achin Gupta86f23532019-10-11 15:41:16 +0100194
Max Shvetsove79062e2020-03-12 15:16:40 +0000195 /* Validate the SPMC ID, Ensure high bit is set */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200196 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
197 SPMC_SECURE_ID_MASK) == 0U) {
198 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
199 return -EINVAL;
Max Shvetsove79062e2020-03-12 15:16:40 +0000200 }
201
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200202 /* Validate the SPM Core execution state */
Achin Gupta86f23532019-10-11 15:41:16 +0100203 if ((spmc_attrs.exec_state != MODE_RW_64) &&
204 (spmc_attrs.exec_state != MODE_RW_32)) {
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100205 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
Achin Gupta86f23532019-10-11 15:41:16 +0100206 spmc_attrs.exec_state);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200207 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100208 }
209
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100210 VERBOSE("%s%x.\n", "SPM Core execution state 0x",
211 spmc_attrs.exec_state);
Achin Gupta86f23532019-10-11 15:41:16 +0100212
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000213#if SPMD_SPM_AT_SEL2
214 /* Ensure manifest has not requested AArch32 state in S-EL2 */
215 if (spmc_attrs.exec_state == MODE_RW_32) {
216 WARN("AArch32 state at S-EL2 is not supported.\n");
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200217 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100218 }
219
220 /*
221 * Check if S-EL2 is supported on this system if S-EL2
222 * is required for SPM
223 */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200224 if (!is_armv8_4_sel2_present()) {
225 WARN("SPM Core run time S-EL2 is not supported.\n");
226 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100227 }
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000228#endif /* SPMD_SPM_AT_SEL2 */
Achin Gupta86f23532019-10-11 15:41:16 +0100229
230 /* Initialise an entrypoint to set up the CPU context */
231 ep_attr = SECURE | EP_ST_ENABLE;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200232 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
Achin Gupta86f23532019-10-11 15:41:16 +0100233 ep_attr |= EP_EE_BIG;
Max Shvetsov745889c2020-02-27 14:54:21 +0000234 }
235
Achin Gupta86f23532019-10-11 15:41:16 +0100236 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
237 assert(spmc_ep_info->pc == BL32_BASE);
238
239 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200240 * Populate SPSR for SPM Core based upon validated parameters from the
241 * manifest.
Achin Gupta86f23532019-10-11 15:41:16 +0100242 */
243 if (spmc_attrs.exec_state == MODE_RW_32) {
244 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
245 SPSR_E_LITTLE,
246 DAIF_FIQ_BIT |
247 DAIF_IRQ_BIT |
248 DAIF_ABT_BIT);
249 } else {
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000250
251#if SPMD_SPM_AT_SEL2
252 static const uint32_t runtime_el = MODE_EL2;
253#else
254 static const uint32_t runtime_el = MODE_EL1;
255#endif
256 spmc_ep_info->spsr = SPSR_64(runtime_el,
Achin Gupta86f23532019-10-11 15:41:16 +0100257 MODE_SP_ELX,
258 DISABLE_ALL_EXCEPTIONS);
259 }
260
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200261 /* Initialise SPM Core context with this entry point information */
Max Shvetsov745889c2020-02-27 14:54:21 +0000262 cm_setup_context(&spm_ctx->cpu_ctx, spmc_ep_info);
263
264 /* Reuse PSCI affinity states to mark this SPMC context as off */
265 spm_ctx->state = AFF_STATE_OFF;
Achin Gupta86f23532019-10-11 15:41:16 +0100266
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200267 INFO("SPM Core setup done.\n");
Achin Gupta86f23532019-10-11 15:41:16 +0100268
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200269 /* Register init function for deferred init. */
Achin Gupta86f23532019-10-11 15:41:16 +0100270 bl31_register_bl32_init(&spmd_init);
271
272 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000273}
Achin Gupta86f23532019-10-11 15:41:16 +0100274
Max Shvetsov745889c2020-02-27 14:54:21 +0000275/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200276 * Initialize context of SPM Core.
Max Shvetsov745889c2020-02-27 14:54:21 +0000277 ******************************************************************************/
278int spmd_setup(void)
279{
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100280 void *spmc_manifest;
Max Shvetsov745889c2020-02-27 14:54:21 +0000281 int rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100282
Max Shvetsov745889c2020-02-27 14:54:21 +0000283 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200284 if (spmc_ep_info == NULL) {
285 WARN("No SPM Core image provided by BL2 boot loader.\n");
286 return -EINVAL;
Max Shvetsov745889c2020-02-27 14:54:21 +0000287 }
288
289 /* Under no circumstances will this parameter be 0 */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200290 assert(spmc_ep_info->pc != 0ULL);
Max Shvetsov745889c2020-02-27 14:54:21 +0000291
292 /*
293 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200294 * be used as a manifest for the SPM Core at the next lower EL/mode.
Max Shvetsov745889c2020-02-27 14:54:21 +0000295 */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100296 spmc_manifest = (void *)spmc_ep_info->args.arg0;
297 if (spmc_manifest == NULL) {
298 ERROR("Invalid or absent SPM Core manifest.\n");
299 return -EINVAL;
Max Shvetsov745889c2020-02-27 14:54:21 +0000300 }
301
302 /* Load manifest, init SPMC */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100303 rc = spmd_spmc_init(spmc_manifest);
Max Shvetsov745889c2020-02-27 14:54:21 +0000304 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200305 WARN("Booting device without SPM initialization.\n");
Max Shvetsov745889c2020-02-27 14:54:21 +0000306 }
307
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100308 return rc;
Max Shvetsov745889c2020-02-27 14:54:21 +0000309}
310
311/*******************************************************************************
312 * Forward SMC to the other security state
313 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200314static uint64_t spmd_smc_forward(uint32_t smc_fid,
315 bool secure_origin,
316 uint64_t x1,
317 uint64_t x2,
318 uint64_t x3,
319 uint64_t x4,
320 void *handle)
Max Shvetsov745889c2020-02-27 14:54:21 +0000321{
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100322 uint32_t secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
323 uint32_t secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
324
Max Shvetsov745889c2020-02-27 14:54:21 +0000325 /* Save incoming security state */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100326 cm_el1_sysregs_context_save(secure_state_in);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000327#if SPMD_SPM_AT_SEL2
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100328 cm_el2_sysregs_context_save(secure_state_in);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000329#endif
Max Shvetsov745889c2020-02-27 14:54:21 +0000330
331 /* Restore outgoing security state */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100332 cm_el1_sysregs_context_restore(secure_state_out);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000333#if SPMD_SPM_AT_SEL2
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100334 cm_el2_sysregs_context_restore(secure_state_out);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000335#endif
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100336 cm_set_next_eret_context(secure_state_out);
Max Shvetsov745889c2020-02-27 14:54:21 +0000337
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100338 SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
Max Shvetsov745889c2020-02-27 14:54:21 +0000339 SMC_GET_GP(handle, CTX_GPREG_X5),
340 SMC_GET_GP(handle, CTX_GPREG_X6),
341 SMC_GET_GP(handle, CTX_GPREG_X7));
342}
343
344/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100345 * Return FFA_ERROR with specified error code
Max Shvetsov745889c2020-02-27 14:54:21 +0000346 ******************************************************************************/
J-Alves2672cde2020-05-07 18:42:25 +0100347static uint64_t spmd_ffa_error_return(void *handle, int error_code)
Max Shvetsov745889c2020-02-27 14:54:21 +0000348{
J-Alves2672cde2020-05-07 18:42:25 +0100349 SMC_RET8(handle, FFA_ERROR,
350 FFA_TARGET_INFO_MBZ, error_code,
351 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
352 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100353}
354
355/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100356 * This function handles all SMCs in the range reserved for FFA. Each call is
Achin Gupta86f23532019-10-11 15:41:16 +0100357 * either forwarded to the other security state or handled by the SPM dispatcher
358 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200359uint64_t spmd_smc_handler(uint32_t smc_fid,
360 uint64_t x1,
361 uint64_t x2,
362 uint64_t x3,
363 uint64_t x4,
364 void *cookie,
365 void *handle,
Achin Gupta86f23532019-10-11 15:41:16 +0100366 uint64_t flags)
367{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200368 spmd_spm_core_context_t *ctx = spmd_get_context();
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100369 bool secure_origin;
370 int32_t ret;
J-Alves4c95c702020-05-26 14:03:05 +0100371 uint32_t input_version;
Achin Gupta86f23532019-10-11 15:41:16 +0100372
373 /* Determine which security state this SMC originated from */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100374 secure_origin = is_caller_secure(flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100375
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200376 INFO("SPM: 0x%x 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n",
Achin Gupta86f23532019-10-11 15:41:16 +0100377 smc_fid, x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5),
378 SMC_GET_GP(handle, CTX_GPREG_X6),
379 SMC_GET_GP(handle, CTX_GPREG_X7));
380
381 switch (smc_fid) {
J-Alves2672cde2020-05-07 18:42:25 +0100382 case FFA_ERROR:
Achin Gupta86f23532019-10-11 15:41:16 +0100383 /*
384 * Check if this is the first invocation of this interface on
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200385 * this CPU. If so, then indicate that the SPM Core initialised
Achin Gupta86f23532019-10-11 15:41:16 +0100386 * unsuccessfully.
387 */
Olivier Deprez7c016332019-10-28 09:03:13 +0000388 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Achin Gupta86f23532019-10-11 15:41:16 +0100389 spmd_spm_core_sync_exit(x2);
Max Shvetsov745889c2020-02-27 14:54:21 +0000390 }
Achin Gupta86f23532019-10-11 15:41:16 +0100391
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100392 return spmd_smc_forward(smc_fid, secure_origin,
Max Shvetsov745889c2020-02-27 14:54:21 +0000393 x1, x2, x3, x4, handle);
Achin Gupta86f23532019-10-11 15:41:16 +0100394 break; /* not reached */
395
J-Alves2672cde2020-05-07 18:42:25 +0100396 case FFA_VERSION:
J-Alves4c95c702020-05-26 14:03:05 +0100397 input_version = (uint32_t)(0xFFFFFFFF & x1);
Achin Gupta86f23532019-10-11 15:41:16 +0100398 /*
J-Alves4c95c702020-05-26 14:03:05 +0100399 * If caller is secure and SPMC was initialized,
400 * return FFA_VERSION of SPMD.
401 * If caller is non secure and SPMC was initialized,
402 * return SPMC's version.
403 * Sanity check to "input_version".
Achin Gupta86f23532019-10-11 15:41:16 +0100404 */
J-Alves4c95c702020-05-26 14:03:05 +0100405 if ((input_version & FFA_VERSION_BIT31_MASK) ||
406 (ctx->state == SPMC_STATE_RESET)) {
407 ret = FFA_ERROR_NOT_SUPPORTED;
408 } else if (!secure_origin) {
409 ret = MAKE_FFA_VERSION(spmc_attrs.major_version, spmc_attrs.minor_version);
410 } else {
411 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR);
412 }
413
414 SMC_RET8(handle, ret, FFA_TARGET_INFO_MBZ, FFA_TARGET_INFO_MBZ,
J-Alves2672cde2020-05-07 18:42:25 +0100415 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
416 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100417 break; /* not reached */
418
J-Alves2672cde2020-05-07 18:42:25 +0100419 case FFA_FEATURES:
Achin Gupta86f23532019-10-11 15:41:16 +0100420 /*
421 * This is an optional interface. Do the minimal checks and
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200422 * forward to SPM Core which will handle it if implemented.
Achin Gupta86f23532019-10-11 15:41:16 +0100423 */
424
425 /*
J-Alves2672cde2020-05-07 18:42:25 +0100426 * Check if x1 holds a valid FFA fid. This is an
Achin Gupta86f23532019-10-11 15:41:16 +0100427 * optimization.
428 */
J-Alves2672cde2020-05-07 18:42:25 +0100429 if (!is_ffa_fid(x1)) {
430 return spmd_ffa_error_return(handle,
431 FFA_ERROR_NOT_SUPPORTED);
Max Shvetsov745889c2020-02-27 14:54:21 +0000432 }
Achin Gupta86f23532019-10-11 15:41:16 +0100433
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200434 /* Forward SMC from Normal world to the SPM Core */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100435 if (!secure_origin) {
436 return spmd_smc_forward(smc_fid, secure_origin,
Max Shvetsov745889c2020-02-27 14:54:21 +0000437 x1, x2, x3, x4, handle);
Achin Gupta86f23532019-10-11 15:41:16 +0100438 }
Max Shvetsov745889c2020-02-27 14:54:21 +0000439
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200440 /*
441 * Return success if call was from secure world i.e. all
J-Alves2672cde2020-05-07 18:42:25 +0100442 * FFA functions are supported. This is essentially a
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200443 * nop.
444 */
J-Alves2672cde2020-05-07 18:42:25 +0100445 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200446 SMC_GET_GP(handle, CTX_GPREG_X5),
447 SMC_GET_GP(handle, CTX_GPREG_X6),
448 SMC_GET_GP(handle, CTX_GPREG_X7));
449
Achin Gupta86f23532019-10-11 15:41:16 +0100450 break; /* not reached */
451
J-Alves2672cde2020-05-07 18:42:25 +0100452 case FFA_ID_GET:
Max Shvetsove79062e2020-03-12 15:16:40 +0000453 /*
J-Alves2672cde2020-05-07 18:42:25 +0100454 * Returns the ID of the calling FFA component.
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200455 */
Max Shvetsove79062e2020-03-12 15:16:40 +0000456 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +0100457 SMC_RET8(handle, FFA_SUCCESS_SMC32,
458 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
459 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
460 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
461 FFA_PARAM_MBZ);
Max Shvetsove79062e2020-03-12 15:16:40 +0000462 }
463
J-Alves2672cde2020-05-07 18:42:25 +0100464 SMC_RET8(handle, FFA_SUCCESS_SMC32,
465 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
466 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
467 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
468 FFA_PARAM_MBZ);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200469
Max Shvetsove79062e2020-03-12 15:16:40 +0000470 break; /* not reached */
471
J-Alves2672cde2020-05-07 18:42:25 +0100472 case FFA_RX_RELEASE:
473 case FFA_RXTX_MAP_SMC32:
474 case FFA_RXTX_MAP_SMC64:
475 case FFA_RXTX_UNMAP:
476 case FFA_MSG_RUN:
Achin Gupta86f23532019-10-11 15:41:16 +0100477 /* This interface must be invoked only by the Normal world */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100478 if (secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +0100479 return spmd_ffa_error_return(handle,
480 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +0100481 }
482
483 /* Fall through to forward the call to the other world */
484
J-Alves2672cde2020-05-07 18:42:25 +0100485 case FFA_PARTITION_INFO_GET:
486 case FFA_MSG_SEND:
487 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
488 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
489 case FFA_MSG_SEND_DIRECT_RESP_SMC32:
490 case FFA_MSG_SEND_DIRECT_RESP_SMC64:
491 case FFA_MEM_DONATE_SMC32:
492 case FFA_MEM_DONATE_SMC64:
493 case FFA_MEM_LEND_SMC32:
494 case FFA_MEM_LEND_SMC64:
495 case FFA_MEM_SHARE_SMC32:
496 case FFA_MEM_SHARE_SMC64:
497 case FFA_MEM_RETRIEVE_REQ_SMC32:
498 case FFA_MEM_RETRIEVE_REQ_SMC64:
499 case FFA_MEM_RETRIEVE_RESP:
500 case FFA_MEM_RELINQUISH:
501 case FFA_MEM_RECLAIM:
502 case FFA_SUCCESS_SMC32:
503 case FFA_SUCCESS_SMC64:
Achin Gupta86f23532019-10-11 15:41:16 +0100504 /*
505 * TODO: Assume that no requests originate from EL3 at the
506 * moment. This will change if a SP service is required in
507 * response to secure interrupts targeted to EL3. Until then
508 * simply forward the call to the Normal world.
509 */
510
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100511 return spmd_smc_forward(smc_fid, secure_origin,
Max Shvetsov745889c2020-02-27 14:54:21 +0000512 x1, x2, x3, x4, handle);
Achin Gupta86f23532019-10-11 15:41:16 +0100513 break; /* not reached */
514
J-Alves2672cde2020-05-07 18:42:25 +0100515 case FFA_MSG_WAIT:
Achin Gupta86f23532019-10-11 15:41:16 +0100516 /*
517 * Check if this is the first invocation of this interface on
518 * this CPU from the Secure world. If so, then indicate that the
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200519 * SPM Core initialised successfully.
Achin Gupta86f23532019-10-11 15:41:16 +0100520 */
Olivier Deprez7c016332019-10-28 09:03:13 +0000521 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Achin Gupta86f23532019-10-11 15:41:16 +0100522 spmd_spm_core_sync_exit(0);
523 }
524
Max Shvetsov745889c2020-02-27 14:54:21 +0000525 /* Fall through to forward the call to the other world */
Achin Gupta86f23532019-10-11 15:41:16 +0100526
J-Alves2672cde2020-05-07 18:42:25 +0100527 case FFA_MSG_YIELD:
Achin Gupta86f23532019-10-11 15:41:16 +0100528 /* This interface must be invoked only by the Secure world */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100529 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +0100530 return spmd_ffa_error_return(handle,
531 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +0100532 }
533
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100534 return spmd_smc_forward(smc_fid, secure_origin,
Max Shvetsov745889c2020-02-27 14:54:21 +0000535 x1, x2, x3, x4, handle);
Achin Gupta86f23532019-10-11 15:41:16 +0100536 break; /* not reached */
537
538 default:
539 WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
J-Alves2672cde2020-05-07 18:42:25 +0100540 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +0100541 }
542}