blob: 93a0203c5e54ad1b8171830a5b95a767c2fe38ab [file] [log] [blame]
Achin Gupta86f23532019-10-11 15:41:16 +01001/*
2 * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
9#include <string.h>
10
11#include <arch_helpers.h>
Olivier Deprez2bae35f2020-04-16 13:39:06 +020012#include <arch/aarch64/arch_features.h>
Achin Gupta86f23532019-10-11 15:41:16 +010013#include <bl31/bl31.h>
14#include <common/debug.h>
15#include <common/runtime_svc.h>
16#include <lib/el3_runtime/context_mgmt.h>
17#include <lib/smccc.h>
18#include <lib/spinlock.h>
19#include <lib/utils.h>
Achin Gupta86f23532019-10-11 15:41:16 +010020#include <plat/common/common_def.h>
21#include <plat/common/platform.h>
22#include <platform_def.h>
J-Alves2672cde2020-05-07 18:42:25 +010023#include <services/ffa_svc.h>
Achin Gupta86f23532019-10-11 15:41:16 +010024#include <services/spmd_svc.h>
25#include <smccc_helpers.h>
26#include "spmd_private.h"
27
28/*******************************************************************************
29 * SPM Core context information.
30 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020031static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
Achin Gupta86f23532019-10-11 15:41:16 +010032
33/*******************************************************************************
34 * SPM Core attribute information read from its manifest.
35 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020036static spmc_manifest_attribute_t spmc_attrs;
Achin Gupta86f23532019-10-11 15:41:16 +010037
38/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000039 * SPM Core entry point information. Discovered on the primary core and reused
40 * on secondary cores.
41 ******************************************************************************/
42static entry_point_info_t *spmc_ep_info;
43
44/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +020045 * SPM Core context on current CPU get helper.
46 ******************************************************************************/
47spmd_spm_core_context_t *spmd_get_context(void)
48{
49 unsigned int linear_id = plat_my_core_pos();
50
51 return &spm_core_context[linear_id];
52}
53
54/*******************************************************************************
Olivier Deprez87a9ee72019-10-28 08:52:45 +000055 * SPM Core entry point information get helper.
56 ******************************************************************************/
57entry_point_info_t *spmd_spmc_ep_info_get(void)
58{
59 return spmc_ep_info;
60}
61
62/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000063 * Static function declaration.
64 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020065static int32_t spmd_init(void);
Olivier Deprez69ca84a2020-02-07 15:44:43 +010066static int spmd_spmc_init(void *pm_addr);
J-Alves2672cde2020-05-07 18:42:25 +010067static uint64_t spmd_ffa_error_return(void *handle,
Olivier Deprez2bae35f2020-04-16 13:39:06 +020068 int error_code);
69static uint64_t spmd_smc_forward(uint32_t smc_fid,
70 bool secure_origin,
71 uint64_t x1,
72 uint64_t x2,
73 uint64_t x3,
74 uint64_t x4,
75 void *handle);
Max Shvetsov745889c2020-02-27 14:54:21 +000076
77/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +020078 * This function takes an SPMC context pointer and performs a synchronous
79 * SPMC entry.
Achin Gupta86f23532019-10-11 15:41:16 +010080 ******************************************************************************/
81uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
82{
83 uint64_t rc;
84
85 assert(spmc_ctx != NULL);
86
87 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
88
89 /* Restore the context assigned above */
90 cm_el1_sysregs_context_restore(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +000091#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +000092 cm_el2_sysregs_context_restore(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +000093#endif
Achin Gupta86f23532019-10-11 15:41:16 +010094 cm_set_next_eret_context(SECURE);
95
Max Shvetsove7fd80e2020-02-25 13:55:00 +000096 /* Enter SPMC */
Achin Gupta86f23532019-10-11 15:41:16 +010097 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
98
99 /* Save secure state */
100 cm_el1_sysregs_context_save(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000101#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000102 cm_el2_sysregs_context_save(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000103#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100104
105 return rc;
106}
107
108/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200109 * This function returns to the place where spmd_spm_core_sync_entry() was
Achin Gupta86f23532019-10-11 15:41:16 +0100110 * called originally.
111 ******************************************************************************/
112__dead2 void spmd_spm_core_sync_exit(uint64_t rc)
113{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200114 spmd_spm_core_context_t *ctx = spmd_get_context();
Achin Gupta86f23532019-10-11 15:41:16 +0100115
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200116 /* Get current CPU context from SPMC context */
Achin Gupta86f23532019-10-11 15:41:16 +0100117 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
118
119 /*
120 * The SPMD must have initiated the original request through a
121 * synchronous entry into SPMC. Jump back to the original C runtime
122 * context with the value of rc in x0;
123 */
124 spmd_spm_core_exit(ctx->c_rt_ctx, rc);
125
126 panic();
127}
128
129/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200130 * Jump to the SPM Core for the first time.
Achin Gupta86f23532019-10-11 15:41:16 +0100131 ******************************************************************************/
132static int32_t spmd_init(void)
133{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200134 spmd_spm_core_context_t *ctx = spmd_get_context();
135 uint64_t rc;
Olivier Deprez7c016332019-10-28 09:03:13 +0000136 unsigned int linear_id = plat_my_core_pos();
137 unsigned int core_id;
Achin Gupta86f23532019-10-11 15:41:16 +0100138
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200139 VERBOSE("SPM Core init start.\n");
Olivier Deprez7c016332019-10-28 09:03:13 +0000140 ctx->state = SPMC_STATE_ON_PENDING;
141
142 /* Set the SPMC context state on other CPUs to OFF */
143 for (core_id = 0; core_id < PLATFORM_CORE_COUNT; core_id++) {
144 if (core_id != linear_id) {
145 spm_core_context[core_id].state = SPMC_STATE_OFF;
146 }
147 }
Achin Gupta86f23532019-10-11 15:41:16 +0100148
149 rc = spmd_spm_core_sync_entry(ctx);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200150 if (rc != 0ULL) {
Achin Gupta86f23532019-10-11 15:41:16 +0100151 ERROR("SPMC initialisation failed 0x%llx\n", rc);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200152 return 0;
Achin Gupta86f23532019-10-11 15:41:16 +0100153 }
154
Olivier Deprez7c016332019-10-28 09:03:13 +0000155 ctx->state = SPMC_STATE_ON;
156
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200157 VERBOSE("SPM Core init end.\n");
Achin Gupta86f23532019-10-11 15:41:16 +0100158
159 return 1;
160}
161
162/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200163 * Loads SPMC manifest and inits SPMC.
Achin Gupta86f23532019-10-11 15:41:16 +0100164 ******************************************************************************/
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100165static int spmd_spmc_init(void *pm_addr)
Achin Gupta86f23532019-10-11 15:41:16 +0100166{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200167 spmd_spm_core_context_t *spm_ctx = spmd_get_context();
Achin Gupta86f23532019-10-11 15:41:16 +0100168 uint32_t ep_attr;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200169 int rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100170
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200171 /* Load the SPM Core manifest */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100172 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
Max Shvetsov745889c2020-02-27 14:54:21 +0000173 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200174 WARN("No or invalid SPM Core manifest image provided by BL2\n");
175 return rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100176 }
177
178 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200179 * Ensure that the SPM Core version is compatible with the SPM
180 * Dispatcher version.
Achin Gupta86f23532019-10-11 15:41:16 +0100181 */
J-Alves2672cde2020-05-07 18:42:25 +0100182 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
183 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
184 WARN("Unsupported FFA version (%u.%u)\n",
Achin Gupta86f23532019-10-11 15:41:16 +0100185 spmc_attrs.major_version, spmc_attrs.minor_version);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200186 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100187 }
188
J-Alves2672cde2020-05-07 18:42:25 +0100189 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
Achin Gupta86f23532019-10-11 15:41:16 +0100190 spmc_attrs.minor_version);
191
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200192 VERBOSE("SPM Core run time EL%x.\n",
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000193 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
Achin Gupta86f23532019-10-11 15:41:16 +0100194
Max Shvetsove79062e2020-03-12 15:16:40 +0000195 /* Validate the SPMC ID, Ensure high bit is set */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200196 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
197 SPMC_SECURE_ID_MASK) == 0U) {
198 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
199 return -EINVAL;
Max Shvetsove79062e2020-03-12 15:16:40 +0000200 }
201
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200202 /* Validate the SPM Core execution state */
Achin Gupta86f23532019-10-11 15:41:16 +0100203 if ((spmc_attrs.exec_state != MODE_RW_64) &&
204 (spmc_attrs.exec_state != MODE_RW_32)) {
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100205 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
Achin Gupta86f23532019-10-11 15:41:16 +0100206 spmc_attrs.exec_state);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200207 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100208 }
209
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100210 VERBOSE("%s%x.\n", "SPM Core execution state 0x",
211 spmc_attrs.exec_state);
Achin Gupta86f23532019-10-11 15:41:16 +0100212
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000213#if SPMD_SPM_AT_SEL2
214 /* Ensure manifest has not requested AArch32 state in S-EL2 */
215 if (spmc_attrs.exec_state == MODE_RW_32) {
216 WARN("AArch32 state at S-EL2 is not supported.\n");
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200217 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100218 }
219
220 /*
221 * Check if S-EL2 is supported on this system if S-EL2
222 * is required for SPM
223 */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200224 if (!is_armv8_4_sel2_present()) {
225 WARN("SPM Core run time S-EL2 is not supported.\n");
226 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100227 }
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000228#endif /* SPMD_SPM_AT_SEL2 */
Achin Gupta86f23532019-10-11 15:41:16 +0100229
230 /* Initialise an entrypoint to set up the CPU context */
231 ep_attr = SECURE | EP_ST_ENABLE;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200232 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
Achin Gupta86f23532019-10-11 15:41:16 +0100233 ep_attr |= EP_EE_BIG;
Max Shvetsov745889c2020-02-27 14:54:21 +0000234 }
235
Achin Gupta86f23532019-10-11 15:41:16 +0100236 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
237 assert(spmc_ep_info->pc == BL32_BASE);
238
239 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200240 * Populate SPSR for SPM Core based upon validated parameters from the
241 * manifest.
Achin Gupta86f23532019-10-11 15:41:16 +0100242 */
243 if (spmc_attrs.exec_state == MODE_RW_32) {
244 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
245 SPSR_E_LITTLE,
246 DAIF_FIQ_BIT |
247 DAIF_IRQ_BIT |
248 DAIF_ABT_BIT);
249 } else {
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000250
251#if SPMD_SPM_AT_SEL2
252 static const uint32_t runtime_el = MODE_EL2;
253#else
254 static const uint32_t runtime_el = MODE_EL1;
255#endif
256 spmc_ep_info->spsr = SPSR_64(runtime_el,
Achin Gupta86f23532019-10-11 15:41:16 +0100257 MODE_SP_ELX,
258 DISABLE_ALL_EXCEPTIONS);
259 }
260
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200261 /* Initialise SPM Core context with this entry point information */
Max Shvetsov745889c2020-02-27 14:54:21 +0000262 cm_setup_context(&spm_ctx->cpu_ctx, spmc_ep_info);
263
264 /* Reuse PSCI affinity states to mark this SPMC context as off */
265 spm_ctx->state = AFF_STATE_OFF;
Achin Gupta86f23532019-10-11 15:41:16 +0100266
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200267 INFO("SPM Core setup done.\n");
Achin Gupta86f23532019-10-11 15:41:16 +0100268
Olivier Deprez9afca122019-10-28 09:15:52 +0000269 /* Register power management hooks with PSCI */
270 psci_register_spd_pm_hook(&spmd_pm);
271
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200272 /* Register init function for deferred init. */
Achin Gupta86f23532019-10-11 15:41:16 +0100273 bl31_register_bl32_init(&spmd_init);
274
275 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000276}
Achin Gupta86f23532019-10-11 15:41:16 +0100277
Max Shvetsov745889c2020-02-27 14:54:21 +0000278/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200279 * Initialize context of SPM Core.
Max Shvetsov745889c2020-02-27 14:54:21 +0000280 ******************************************************************************/
281int spmd_setup(void)
282{
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100283 void *spmc_manifest;
Max Shvetsov745889c2020-02-27 14:54:21 +0000284 int rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100285
Max Shvetsov745889c2020-02-27 14:54:21 +0000286 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200287 if (spmc_ep_info == NULL) {
288 WARN("No SPM Core image provided by BL2 boot loader.\n");
289 return -EINVAL;
Max Shvetsov745889c2020-02-27 14:54:21 +0000290 }
291
292 /* Under no circumstances will this parameter be 0 */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200293 assert(spmc_ep_info->pc != 0ULL);
Max Shvetsov745889c2020-02-27 14:54:21 +0000294
295 /*
296 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200297 * be used as a manifest for the SPM Core at the next lower EL/mode.
Max Shvetsov745889c2020-02-27 14:54:21 +0000298 */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100299 spmc_manifest = (void *)spmc_ep_info->args.arg0;
300 if (spmc_manifest == NULL) {
301 ERROR("Invalid or absent SPM Core manifest.\n");
302 return -EINVAL;
Max Shvetsov745889c2020-02-27 14:54:21 +0000303 }
304
305 /* Load manifest, init SPMC */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100306 rc = spmd_spmc_init(spmc_manifest);
Max Shvetsov745889c2020-02-27 14:54:21 +0000307 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200308 WARN("Booting device without SPM initialization.\n");
Max Shvetsov745889c2020-02-27 14:54:21 +0000309 }
310
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100311 return rc;
Max Shvetsov745889c2020-02-27 14:54:21 +0000312}
313
314/*******************************************************************************
315 * Forward SMC to the other security state
316 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200317static uint64_t spmd_smc_forward(uint32_t smc_fid,
318 bool secure_origin,
319 uint64_t x1,
320 uint64_t x2,
321 uint64_t x3,
322 uint64_t x4,
323 void *handle)
Max Shvetsov745889c2020-02-27 14:54:21 +0000324{
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100325 uint32_t secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
326 uint32_t secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
327
Max Shvetsov745889c2020-02-27 14:54:21 +0000328 /* Save incoming security state */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100329 cm_el1_sysregs_context_save(secure_state_in);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000330#if SPMD_SPM_AT_SEL2
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100331 cm_el2_sysregs_context_save(secure_state_in);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000332#endif
Max Shvetsov745889c2020-02-27 14:54:21 +0000333
334 /* Restore outgoing security state */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100335 cm_el1_sysregs_context_restore(secure_state_out);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000336#if SPMD_SPM_AT_SEL2
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100337 cm_el2_sysregs_context_restore(secure_state_out);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000338#endif
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100339 cm_set_next_eret_context(secure_state_out);
Max Shvetsov745889c2020-02-27 14:54:21 +0000340
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100341 SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
Max Shvetsov745889c2020-02-27 14:54:21 +0000342 SMC_GET_GP(handle, CTX_GPREG_X5),
343 SMC_GET_GP(handle, CTX_GPREG_X6),
344 SMC_GET_GP(handle, CTX_GPREG_X7));
345}
346
347/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100348 * Return FFA_ERROR with specified error code
Max Shvetsov745889c2020-02-27 14:54:21 +0000349 ******************************************************************************/
J-Alves2672cde2020-05-07 18:42:25 +0100350static uint64_t spmd_ffa_error_return(void *handle, int error_code)
Max Shvetsov745889c2020-02-27 14:54:21 +0000351{
J-Alves2672cde2020-05-07 18:42:25 +0100352 SMC_RET8(handle, FFA_ERROR,
353 FFA_TARGET_INFO_MBZ, error_code,
354 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
355 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100356}
357
358/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100359 * This function handles all SMCs in the range reserved for FFA. Each call is
Achin Gupta86f23532019-10-11 15:41:16 +0100360 * either forwarded to the other security state or handled by the SPM dispatcher
361 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200362uint64_t spmd_smc_handler(uint32_t smc_fid,
363 uint64_t x1,
364 uint64_t x2,
365 uint64_t x3,
366 uint64_t x4,
367 void *cookie,
368 void *handle,
Achin Gupta86f23532019-10-11 15:41:16 +0100369 uint64_t flags)
370{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200371 spmd_spm_core_context_t *ctx = spmd_get_context();
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100372 bool secure_origin;
373 int32_t ret;
J-Alves4c95c702020-05-26 14:03:05 +0100374 uint32_t input_version;
Achin Gupta86f23532019-10-11 15:41:16 +0100375
376 /* Determine which security state this SMC originated from */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100377 secure_origin = is_caller_secure(flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100378
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200379 INFO("SPM: 0x%x 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n",
Achin Gupta86f23532019-10-11 15:41:16 +0100380 smc_fid, x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5),
381 SMC_GET_GP(handle, CTX_GPREG_X6),
382 SMC_GET_GP(handle, CTX_GPREG_X7));
383
384 switch (smc_fid) {
J-Alves2672cde2020-05-07 18:42:25 +0100385 case FFA_ERROR:
Achin Gupta86f23532019-10-11 15:41:16 +0100386 /*
387 * Check if this is the first invocation of this interface on
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200388 * this CPU. If so, then indicate that the SPM Core initialised
Achin Gupta86f23532019-10-11 15:41:16 +0100389 * unsuccessfully.
390 */
Olivier Deprez7c016332019-10-28 09:03:13 +0000391 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Achin Gupta86f23532019-10-11 15:41:16 +0100392 spmd_spm_core_sync_exit(x2);
Max Shvetsov745889c2020-02-27 14:54:21 +0000393 }
Achin Gupta86f23532019-10-11 15:41:16 +0100394
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100395 return spmd_smc_forward(smc_fid, secure_origin,
Max Shvetsov745889c2020-02-27 14:54:21 +0000396 x1, x2, x3, x4, handle);
Achin Gupta86f23532019-10-11 15:41:16 +0100397 break; /* not reached */
398
J-Alves2672cde2020-05-07 18:42:25 +0100399 case FFA_VERSION:
J-Alves4c95c702020-05-26 14:03:05 +0100400 input_version = (uint32_t)(0xFFFFFFFF & x1);
Achin Gupta86f23532019-10-11 15:41:16 +0100401 /*
J-Alves4c95c702020-05-26 14:03:05 +0100402 * If caller is secure and SPMC was initialized,
403 * return FFA_VERSION of SPMD.
404 * If caller is non secure and SPMC was initialized,
405 * return SPMC's version.
406 * Sanity check to "input_version".
Achin Gupta86f23532019-10-11 15:41:16 +0100407 */
J-Alves4c95c702020-05-26 14:03:05 +0100408 if ((input_version & FFA_VERSION_BIT31_MASK) ||
409 (ctx->state == SPMC_STATE_RESET)) {
410 ret = FFA_ERROR_NOT_SUPPORTED;
411 } else if (!secure_origin) {
412 ret = MAKE_FFA_VERSION(spmc_attrs.major_version, spmc_attrs.minor_version);
413 } else {
414 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR);
415 }
416
417 SMC_RET8(handle, ret, FFA_TARGET_INFO_MBZ, FFA_TARGET_INFO_MBZ,
J-Alves2672cde2020-05-07 18:42:25 +0100418 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
419 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100420 break; /* not reached */
421
J-Alves2672cde2020-05-07 18:42:25 +0100422 case FFA_FEATURES:
Achin Gupta86f23532019-10-11 15:41:16 +0100423 /*
424 * This is an optional interface. Do the minimal checks and
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200425 * forward to SPM Core which will handle it if implemented.
Achin Gupta86f23532019-10-11 15:41:16 +0100426 */
427
428 /*
J-Alves2672cde2020-05-07 18:42:25 +0100429 * Check if x1 holds a valid FFA fid. This is an
Achin Gupta86f23532019-10-11 15:41:16 +0100430 * optimization.
431 */
J-Alves2672cde2020-05-07 18:42:25 +0100432 if (!is_ffa_fid(x1)) {
433 return spmd_ffa_error_return(handle,
434 FFA_ERROR_NOT_SUPPORTED);
Max Shvetsov745889c2020-02-27 14:54:21 +0000435 }
Achin Gupta86f23532019-10-11 15:41:16 +0100436
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200437 /* Forward SMC from Normal world to the SPM Core */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100438 if (!secure_origin) {
439 return spmd_smc_forward(smc_fid, secure_origin,
Max Shvetsov745889c2020-02-27 14:54:21 +0000440 x1, x2, x3, x4, handle);
Achin Gupta86f23532019-10-11 15:41:16 +0100441 }
Max Shvetsov745889c2020-02-27 14:54:21 +0000442
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200443 /*
444 * Return success if call was from secure world i.e. all
J-Alves2672cde2020-05-07 18:42:25 +0100445 * FFA functions are supported. This is essentially a
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200446 * nop.
447 */
J-Alves2672cde2020-05-07 18:42:25 +0100448 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200449 SMC_GET_GP(handle, CTX_GPREG_X5),
450 SMC_GET_GP(handle, CTX_GPREG_X6),
451 SMC_GET_GP(handle, CTX_GPREG_X7));
452
Achin Gupta86f23532019-10-11 15:41:16 +0100453 break; /* not reached */
454
J-Alves2672cde2020-05-07 18:42:25 +0100455 case FFA_ID_GET:
Max Shvetsove79062e2020-03-12 15:16:40 +0000456 /*
J-Alves2672cde2020-05-07 18:42:25 +0100457 * Returns the ID of the calling FFA component.
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200458 */
Max Shvetsove79062e2020-03-12 15:16:40 +0000459 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +0100460 SMC_RET8(handle, FFA_SUCCESS_SMC32,
461 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
462 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
463 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
464 FFA_PARAM_MBZ);
Max Shvetsove79062e2020-03-12 15:16:40 +0000465 }
466
J-Alves2672cde2020-05-07 18:42:25 +0100467 SMC_RET8(handle, FFA_SUCCESS_SMC32,
468 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
469 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
470 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
471 FFA_PARAM_MBZ);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200472
Max Shvetsove79062e2020-03-12 15:16:40 +0000473 break; /* not reached */
474
J-Alves2672cde2020-05-07 18:42:25 +0100475 case FFA_RX_RELEASE:
476 case FFA_RXTX_MAP_SMC32:
477 case FFA_RXTX_MAP_SMC64:
478 case FFA_RXTX_UNMAP:
479 case FFA_MSG_RUN:
Achin Gupta86f23532019-10-11 15:41:16 +0100480 /* This interface must be invoked only by the Normal world */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100481 if (secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +0100482 return spmd_ffa_error_return(handle,
483 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +0100484 }
485
486 /* Fall through to forward the call to the other world */
487
J-Alves2672cde2020-05-07 18:42:25 +0100488 case FFA_PARTITION_INFO_GET:
489 case FFA_MSG_SEND:
490 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
491 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
492 case FFA_MSG_SEND_DIRECT_RESP_SMC32:
493 case FFA_MSG_SEND_DIRECT_RESP_SMC64:
494 case FFA_MEM_DONATE_SMC32:
495 case FFA_MEM_DONATE_SMC64:
496 case FFA_MEM_LEND_SMC32:
497 case FFA_MEM_LEND_SMC64:
498 case FFA_MEM_SHARE_SMC32:
499 case FFA_MEM_SHARE_SMC64:
500 case FFA_MEM_RETRIEVE_REQ_SMC32:
501 case FFA_MEM_RETRIEVE_REQ_SMC64:
502 case FFA_MEM_RETRIEVE_RESP:
503 case FFA_MEM_RELINQUISH:
504 case FFA_MEM_RECLAIM:
505 case FFA_SUCCESS_SMC32:
506 case FFA_SUCCESS_SMC64:
Achin Gupta86f23532019-10-11 15:41:16 +0100507 /*
508 * TODO: Assume that no requests originate from EL3 at the
509 * moment. This will change if a SP service is required in
510 * response to secure interrupts targeted to EL3. Until then
511 * simply forward the call to the Normal world.
512 */
513
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100514 return spmd_smc_forward(smc_fid, secure_origin,
Max Shvetsov745889c2020-02-27 14:54:21 +0000515 x1, x2, x3, x4, handle);
Achin Gupta86f23532019-10-11 15:41:16 +0100516 break; /* not reached */
517
J-Alves2672cde2020-05-07 18:42:25 +0100518 case FFA_MSG_WAIT:
Achin Gupta86f23532019-10-11 15:41:16 +0100519 /*
520 * Check if this is the first invocation of this interface on
521 * this CPU from the Secure world. If so, then indicate that the
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200522 * SPM Core initialised successfully.
Achin Gupta86f23532019-10-11 15:41:16 +0100523 */
Olivier Deprez7c016332019-10-28 09:03:13 +0000524 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Achin Gupta86f23532019-10-11 15:41:16 +0100525 spmd_spm_core_sync_exit(0);
526 }
527
Max Shvetsov745889c2020-02-27 14:54:21 +0000528 /* Fall through to forward the call to the other world */
Achin Gupta86f23532019-10-11 15:41:16 +0100529
J-Alves2672cde2020-05-07 18:42:25 +0100530 case FFA_MSG_YIELD:
Achin Gupta86f23532019-10-11 15:41:16 +0100531 /* This interface must be invoked only by the Secure world */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100532 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +0100533 return spmd_ffa_error_return(handle,
534 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +0100535 }
536
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100537 return spmd_smc_forward(smc_fid, secure_origin,
Max Shvetsov745889c2020-02-27 14:54:21 +0000538 x1, x2, x3, x4, handle);
Achin Gupta86f23532019-10-11 15:41:16 +0100539 break; /* not reached */
540
541 default:
542 WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
J-Alves2672cde2020-05-07 18:42:25 +0100543 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +0100544 }
545}