blob: 0e1899edefd9414143294ff61db17657a8c22542 [file] [log] [blame]
Achin Gupta86f23532019-10-11 15:41:16 +01001/*
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +00002 * Copyright (c) 2020-2023, Arm Limited and Contributors. All rights reserved.
Achin Gupta86f23532019-10-11 15:41:16 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
Scott Brandene5dcf982020-08-25 13:49:32 -07009#include <inttypes.h>
10#include <stdint.h>
Achin Gupta86f23532019-10-11 15:41:16 +010011#include <string.h>
12
13#include <arch_helpers.h>
Olivier Deprez2bae35f2020-04-16 13:39:06 +020014#include <arch/aarch64/arch_features.h>
Achin Gupta86f23532019-10-11 15:41:16 +010015#include <bl31/bl31.h>
Olivier Depreza664c492020-08-05 11:27:42 +020016#include <bl31/interrupt_mgmt.h>
Achin Gupta86f23532019-10-11 15:41:16 +010017#include <common/debug.h>
18#include <common/runtime_svc.h>
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +000019#include <common/tbbr/tbbr_img_def.h>
Achin Gupta86f23532019-10-11 15:41:16 +010020#include <lib/el3_runtime/context_mgmt.h>
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +000021#include <lib/fconf/fconf.h>
22#include <lib/fconf/fconf_dyn_cfg_getter.h>
Achin Gupta86f23532019-10-11 15:41:16 +010023#include <lib/smccc.h>
24#include <lib/spinlock.h>
25#include <lib/utils.h>
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +000026#include <lib/xlat_tables/xlat_tables_v2.h>
Achin Gupta86f23532019-10-11 15:41:16 +010027#include <plat/common/common_def.h>
28#include <plat/common/platform.h>
29#include <platform_def.h>
J-Alves2672cde2020-05-07 18:42:25 +010030#include <services/ffa_svc.h>
Marc Bonnici1c33cc32021-11-29 17:57:03 +000031#include <services/spmc_svc.h>
Achin Gupta86f23532019-10-11 15:41:16 +010032#include <services/spmd_svc.h>
33#include <smccc_helpers.h>
34#include "spmd_private.h"
35
36/*******************************************************************************
37 * SPM Core context information.
38 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020039static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
Achin Gupta86f23532019-10-11 15:41:16 +010040
41/*******************************************************************************
Marc Bonnici1c33cc32021-11-29 17:57:03 +000042 * SPM Core attribute information is read from its manifest if the SPMC is not
43 * at EL3. Else, it is populated from the SPMC directly.
Achin Gupta86f23532019-10-11 15:41:16 +010044 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020045static spmc_manifest_attribute_t spmc_attrs;
Achin Gupta86f23532019-10-11 15:41:16 +010046
47/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000048 * SPM Core entry point information. Discovered on the primary core and reused
49 * on secondary cores.
50 ******************************************************************************/
51static entry_point_info_t *spmc_ep_info;
52
53/*******************************************************************************
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020054 * SPM Core context on CPU based on mpidr.
55 ******************************************************************************/
56spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr)
57{
Max Shvetsovf80c64d2020-08-25 11:50:18 +010058 int core_idx = plat_core_pos_by_mpidr(mpidr);
59
60 if (core_idx < 0) {
Scott Brandene5dcf982020-08-25 13:49:32 -070061 ERROR("Invalid mpidr: %" PRIx64 ", returned ID: %d\n", mpidr, core_idx);
Max Shvetsovf80c64d2020-08-25 11:50:18 +010062 panic();
63 }
64
65 return &spm_core_context[core_idx];
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020066}
67
68/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +020069 * SPM Core context on current CPU get helper.
70 ******************************************************************************/
71spmd_spm_core_context_t *spmd_get_context(void)
72{
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020073 return spmd_get_context_by_mpidr(read_mpidr());
Olivier Deprez2bae35f2020-04-16 13:39:06 +020074}
75
76/*******************************************************************************
Olivier Deprezc7631a52020-03-23 09:53:06 +010077 * SPM Core ID getter.
78 ******************************************************************************/
79uint16_t spmd_spmc_id_get(void)
80{
81 return spmc_attrs.spmc_id;
82}
83
84/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000085 * Static function declaration.
86 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020087static int32_t spmd_init(void);
Olivier Deprez69ca84a2020-02-07 15:44:43 +010088static int spmd_spmc_init(void *pm_addr);
J-Alves2672cde2020-05-07 18:42:25 +010089static uint64_t spmd_ffa_error_return(void *handle,
Olivier Deprez2bae35f2020-04-16 13:39:06 +020090 int error_code);
91static uint64_t spmd_smc_forward(uint32_t smc_fid,
92 bool secure_origin,
93 uint64_t x1,
94 uint64_t x2,
95 uint64_t x3,
96 uint64_t x4,
Marc Bonnicida2c9e12021-11-29 18:02:45 +000097 void *cookie,
98 void *handle,
99 uint64_t flags);
Max Shvetsov745889c2020-02-27 14:54:21 +0000100
Daniel Boulby9460a232021-12-09 11:20:13 +0000101/******************************************************************************
102 * Builds an SPMD to SPMC direct message request.
103 *****************************************************************************/
104void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func,
105 unsigned long long message)
106{
107 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
108 write_ctx_reg(gpregs, CTX_GPREG_X1,
109 (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
110 spmd_spmc_id_get());
111 write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func);
112 write_ctx_reg(gpregs, CTX_GPREG_X3, message);
113}
114
115
Max Shvetsov745889c2020-02-27 14:54:21 +0000116/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200117 * This function takes an SPMC context pointer and performs a synchronous
118 * SPMC entry.
Achin Gupta86f23532019-10-11 15:41:16 +0100119 ******************************************************************************/
120uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
121{
122 uint64_t rc;
123
124 assert(spmc_ctx != NULL);
125
126 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
127
128 /* Restore the context assigned above */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000129#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000130 cm_el2_sysregs_context_restore(SECURE);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200131#else
132 cm_el1_sysregs_context_restore(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000133#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100134 cm_set_next_eret_context(SECURE);
135
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000136 /* Enter SPMC */
Achin Gupta86f23532019-10-11 15:41:16 +0100137 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
138
139 /* Save secure state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000140#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000141 cm_el2_sysregs_context_save(SECURE);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200142#else
143 cm_el1_sysregs_context_save(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000144#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100145
146 return rc;
147}
148
149/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200150 * This function returns to the place where spmd_spm_core_sync_entry() was
Achin Gupta86f23532019-10-11 15:41:16 +0100151 * called originally.
152 ******************************************************************************/
153__dead2 void spmd_spm_core_sync_exit(uint64_t rc)
154{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200155 spmd_spm_core_context_t *ctx = spmd_get_context();
Achin Gupta86f23532019-10-11 15:41:16 +0100156
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200157 /* Get current CPU context from SPMC context */
Achin Gupta86f23532019-10-11 15:41:16 +0100158 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
159
160 /*
161 * The SPMD must have initiated the original request through a
162 * synchronous entry into SPMC. Jump back to the original C runtime
163 * context with the value of rc in x0;
164 */
165 spmd_spm_core_exit(ctx->c_rt_ctx, rc);
166
167 panic();
168}
169
170/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200171 * Jump to the SPM Core for the first time.
Achin Gupta86f23532019-10-11 15:41:16 +0100172 ******************************************************************************/
173static int32_t spmd_init(void)
174{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200175 spmd_spm_core_context_t *ctx = spmd_get_context();
176 uint64_t rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100177
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200178 VERBOSE("SPM Core init start.\n");
Olivier Deprez7c016332019-10-28 09:03:13 +0000179
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200180 /* Primary boot core enters the SPMC for initialization. */
181 ctx->state = SPMC_STATE_ON_PENDING;
Achin Gupta86f23532019-10-11 15:41:16 +0100182
183 rc = spmd_spm_core_sync_entry(ctx);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200184 if (rc != 0ULL) {
Scott Brandene5dcf982020-08-25 13:49:32 -0700185 ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200186 return 0;
Achin Gupta86f23532019-10-11 15:41:16 +0100187 }
188
Olivier Deprez7c016332019-10-28 09:03:13 +0000189 ctx->state = SPMC_STATE_ON;
190
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200191 VERBOSE("SPM Core init end.\n");
Achin Gupta86f23532019-10-11 15:41:16 +0100192
193 return 1;
194}
195
196/*******************************************************************************
Olivier Depreza664c492020-08-05 11:27:42 +0200197 * spmd_secure_interrupt_handler
198 * Enter the SPMC for further handling of the secure interrupt by the SPMC
199 * itself or a Secure Partition.
200 ******************************************************************************/
201static uint64_t spmd_secure_interrupt_handler(uint32_t id,
202 uint32_t flags,
203 void *handle,
204 void *cookie)
205{
206 spmd_spm_core_context_t *ctx = spmd_get_context();
207 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
208 unsigned int linear_id = plat_my_core_pos();
209 int64_t rc;
210
211 /* Sanity check the security state when the exception was generated */
212 assert(get_interrupt_src_ss(flags) == NON_SECURE);
213
214 /* Sanity check the pointer to this cpu's context */
215 assert(handle == cm_get_context(NON_SECURE));
216
217 /* Save the non-secure context before entering SPMC */
218 cm_el1_sysregs_context_save(NON_SECURE);
219#if SPMD_SPM_AT_SEL2
220 cm_el2_sysregs_context_save(NON_SECURE);
221#endif
222
223 /* Convey the event to the SPMC through the FFA_INTERRUPT interface. */
224 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT);
225 write_ctx_reg(gpregs, CTX_GPREG_X1, 0);
226 write_ctx_reg(gpregs, CTX_GPREG_X2, 0);
227 write_ctx_reg(gpregs, CTX_GPREG_X3, 0);
228 write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
229 write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
230 write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
231 write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
232
233 /* Mark current core as handling a secure interrupt. */
234 ctx->secure_interrupt_ongoing = true;
235
236 rc = spmd_spm_core_sync_entry(ctx);
237 if (rc != 0ULL) {
Olivier Deprezba100f22021-11-09 12:37:20 +0100238 ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, linear_id);
Olivier Depreza664c492020-08-05 11:27:42 +0200239 }
240
241 ctx->secure_interrupt_ongoing = false;
242
243 cm_el1_sysregs_context_restore(NON_SECURE);
244#if SPMD_SPM_AT_SEL2
245 cm_el2_sysregs_context_restore(NON_SECURE);
246#endif
247 cm_set_next_eret_context(NON_SECURE);
248
249 SMC_RET0(&ctx->cpu_ctx);
250}
251
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +0000252#if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
253static int spmd_dynamic_map_mem(uintptr_t base_addr, size_t size,
254 unsigned int attr, uintptr_t *align_addr,
255 size_t *align_size)
256{
257 uintptr_t base_addr_align;
258 size_t mapped_size_align;
259 int rc;
260
261 /* Page aligned address and size if necessary */
262 base_addr_align = page_align(base_addr, DOWN);
263 mapped_size_align = page_align(size, UP);
264
265 if ((base_addr != base_addr_align) &&
266 (size == mapped_size_align)) {
267 mapped_size_align += PAGE_SIZE;
268 }
269
270 /*
271 * Map dynamically given region with its aligned base address and
272 * size
273 */
274 rc = mmap_add_dynamic_region((unsigned long long)base_addr_align,
275 base_addr_align,
276 mapped_size_align,
277 attr);
278 if (rc == 0) {
279 *align_addr = base_addr_align;
280 *align_size = mapped_size_align;
281 }
282
283 return rc;
284}
285
286static void spmd_do_sec_cpy(uintptr_t root_base_addr, uintptr_t sec_base_addr,
287 size_t size)
288{
289 uintptr_t root_base_addr_align, sec_base_addr_align;
290 size_t root_mapped_size_align, sec_mapped_size_align;
291 int rc;
292
293 assert(root_base_addr != 0UL);
294 assert(sec_base_addr != 0UL);
295 assert(size != 0UL);
296
297 /* Map the memory with required attributes */
298 rc = spmd_dynamic_map_mem(root_base_addr, size, MT_RO_DATA | MT_ROOT,
299 &root_base_addr_align,
300 &root_mapped_size_align);
301 if (rc != 0) {
302 ERROR("%s %s %lu (%d)\n", "Error while mapping", "root region",
303 root_base_addr, rc);
304 panic();
305 }
306
307 rc = spmd_dynamic_map_mem(sec_base_addr, size, MT_RW_DATA | MT_SECURE,
308 &sec_base_addr_align, &sec_mapped_size_align);
309 if (rc != 0) {
310 ERROR("%s %s %lu (%d)\n", "Error while mapping",
311 "secure region", sec_base_addr, rc);
312 panic();
313 }
314
315 /* Do copy operation */
316 (void)memcpy((void *)sec_base_addr, (void *)root_base_addr, size);
317
318 /* Unmap root memory region */
319 rc = mmap_remove_dynamic_region(root_base_addr_align,
320 root_mapped_size_align);
321 if (rc != 0) {
322 ERROR("%s %s %lu (%d)\n", "Error while unmapping",
323 "root region", root_base_addr_align, rc);
324 panic();
325 }
326
327 /* Unmap secure memory region */
328 rc = mmap_remove_dynamic_region(sec_base_addr_align,
329 sec_mapped_size_align);
330 if (rc != 0) {
331 ERROR("%s %s %lu (%d)\n", "Error while unmapping",
332 "secure region", sec_base_addr_align, rc);
333 panic();
334 }
335}
336#endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
337
Olivier Depreza664c492020-08-05 11:27:42 +0200338/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200339 * Loads SPMC manifest and inits SPMC.
Achin Gupta86f23532019-10-11 15:41:16 +0100340 ******************************************************************************/
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100341static int spmd_spmc_init(void *pm_addr)
Achin Gupta86f23532019-10-11 15:41:16 +0100342{
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200343 cpu_context_t *cpu_ctx;
344 unsigned int core_id;
Olivier Depreza664c492020-08-05 11:27:42 +0200345 uint32_t ep_attr, flags;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200346 int rc;
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +0000347 const struct dyn_cfg_dtb_info_t *image_info __unused;
Achin Gupta86f23532019-10-11 15:41:16 +0100348
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200349 /* Load the SPM Core manifest */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100350 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
Max Shvetsov745889c2020-02-27 14:54:21 +0000351 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200352 WARN("No or invalid SPM Core manifest image provided by BL2\n");
353 return rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100354 }
355
356 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200357 * Ensure that the SPM Core version is compatible with the SPM
358 * Dispatcher version.
Achin Gupta86f23532019-10-11 15:41:16 +0100359 */
J-Alves2672cde2020-05-07 18:42:25 +0100360 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
361 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
362 WARN("Unsupported FFA version (%u.%u)\n",
Achin Gupta86f23532019-10-11 15:41:16 +0100363 spmc_attrs.major_version, spmc_attrs.minor_version);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200364 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100365 }
366
J-Alves2672cde2020-05-07 18:42:25 +0100367 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
Achin Gupta86f23532019-10-11 15:41:16 +0100368 spmc_attrs.minor_version);
369
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200370 VERBOSE("SPM Core run time EL%x.\n",
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000371 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
Achin Gupta86f23532019-10-11 15:41:16 +0100372
Max Shvetsove79062e2020-03-12 15:16:40 +0000373 /* Validate the SPMC ID, Ensure high bit is set */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200374 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
375 SPMC_SECURE_ID_MASK) == 0U) {
376 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
377 return -EINVAL;
Max Shvetsove79062e2020-03-12 15:16:40 +0000378 }
379
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200380 /* Validate the SPM Core execution state */
Achin Gupta86f23532019-10-11 15:41:16 +0100381 if ((spmc_attrs.exec_state != MODE_RW_64) &&
382 (spmc_attrs.exec_state != MODE_RW_32)) {
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100383 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
Achin Gupta86f23532019-10-11 15:41:16 +0100384 spmc_attrs.exec_state);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200385 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100386 }
387
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100388 VERBOSE("%s%x.\n", "SPM Core execution state 0x",
389 spmc_attrs.exec_state);
Achin Gupta86f23532019-10-11 15:41:16 +0100390
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000391#if SPMD_SPM_AT_SEL2
392 /* Ensure manifest has not requested AArch32 state in S-EL2 */
393 if (spmc_attrs.exec_state == MODE_RW_32) {
394 WARN("AArch32 state at S-EL2 is not supported.\n");
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200395 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100396 }
397
398 /*
399 * Check if S-EL2 is supported on this system if S-EL2
400 * is required for SPM
401 */
Andre Przywara6dd2d062023-02-22 16:53:50 +0000402 if (!is_feat_sel2_supported()) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200403 WARN("SPM Core run time S-EL2 is not supported.\n");
404 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100405 }
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000406#endif /* SPMD_SPM_AT_SEL2 */
Achin Gupta86f23532019-10-11 15:41:16 +0100407
408 /* Initialise an entrypoint to set up the CPU context */
409 ep_attr = SECURE | EP_ST_ENABLE;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200410 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
Achin Gupta86f23532019-10-11 15:41:16 +0100411 ep_attr |= EP_EE_BIG;
Max Shvetsov745889c2020-02-27 14:54:21 +0000412 }
413
Achin Gupta86f23532019-10-11 15:41:16 +0100414 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
Achin Gupta86f23532019-10-11 15:41:16 +0100415
416 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200417 * Populate SPSR for SPM Core based upon validated parameters from the
418 * manifest.
Achin Gupta86f23532019-10-11 15:41:16 +0100419 */
420 if (spmc_attrs.exec_state == MODE_RW_32) {
421 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
422 SPSR_E_LITTLE,
423 DAIF_FIQ_BIT |
424 DAIF_IRQ_BIT |
425 DAIF_ABT_BIT);
426 } else {
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000427
428#if SPMD_SPM_AT_SEL2
429 static const uint32_t runtime_el = MODE_EL2;
430#else
431 static const uint32_t runtime_el = MODE_EL1;
432#endif
433 spmc_ep_info->spsr = SPSR_64(runtime_el,
Achin Gupta86f23532019-10-11 15:41:16 +0100434 MODE_SP_ELX,
435 DISABLE_ALL_EXCEPTIONS);
436 }
437
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +0000438#if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
439 image_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TOS_FW_CONFIG_ID);
440 assert(image_info != NULL);
441
442 if ((image_info->config_addr == 0UL) ||
443 (image_info->secondary_config_addr == 0UL) ||
444 (image_info->config_max_size == 0UL)) {
445 return -EINVAL;
446 }
447
448 /* Copy manifest from root->secure region */
449 spmd_do_sec_cpy(image_info->config_addr,
450 image_info->secondary_config_addr,
451 image_info->config_max_size);
452
453 /* Update ep info of BL32 */
454 assert(spmc_ep_info != NULL);
455 spmc_ep_info->args.arg0 = image_info->secondary_config_addr;
456#endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
457
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200458 /* Set an initial SPMC context state for all cores. */
459 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
460 spm_core_context[core_id].state = SPMC_STATE_OFF;
Max Shvetsov745889c2020-02-27 14:54:21 +0000461
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200462 /* Setup an initial cpu context for the SPMC. */
463 cpu_ctx = &spm_core_context[core_id].cpu_ctx;
464 cm_setup_context(cpu_ctx, spmc_ep_info);
Achin Gupta86f23532019-10-11 15:41:16 +0100465
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200466 /*
467 * Pass the core linear ID to the SPMC through x4.
468 * (TF-A implementation defined behavior helping
469 * a legacy TOS migration to adopt FF-A).
470 */
471 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id);
472 }
Achin Gupta86f23532019-10-11 15:41:16 +0100473
Olivier Deprez9afca122019-10-28 09:15:52 +0000474 /* Register power management hooks with PSCI */
475 psci_register_spd_pm_hook(&spmd_pm);
476
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200477 /* Register init function for deferred init. */
Achin Gupta86f23532019-10-11 15:41:16 +0100478 bl31_register_bl32_init(&spmd_init);
479
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200480 INFO("SPM Core setup done.\n");
481
Olivier Depreza664c492020-08-05 11:27:42 +0200482 /*
483 * Register an interrupt handler routing secure interrupts to SPMD
484 * while the NWd is running.
485 */
486 flags = 0;
487 set_interrupt_rm_flag(flags, NON_SECURE);
488 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
489 spmd_secure_interrupt_handler,
490 flags);
491 if (rc != 0) {
492 panic();
493 }
494
Achin Gupta86f23532019-10-11 15:41:16 +0100495 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000496}
Achin Gupta86f23532019-10-11 15:41:16 +0100497
Max Shvetsov745889c2020-02-27 14:54:21 +0000498/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200499 * Initialize context of SPM Core.
Max Shvetsov745889c2020-02-27 14:54:21 +0000500 ******************************************************************************/
501int spmd_setup(void)
502{
503 int rc;
Marc Bonnici1c33cc32021-11-29 17:57:03 +0000504 void *spmc_manifest;
505
506 /*
507 * If the SPMC is at EL3, then just initialise it directly. The
508 * shenanigans of when it is at a lower EL are not needed.
509 */
510 if (is_spmc_at_el3()) {
511 /* Allow the SPMC to populate its attributes directly. */
512 spmc_populate_attrs(&spmc_attrs);
513
514 rc = spmc_setup();
515 if (rc != 0) {
Olivier Deprez3d203f42022-11-16 16:46:23 +0100516 WARN("SPMC initialisation failed 0x%x.\n", rc);
Marc Bonnici1c33cc32021-11-29 17:57:03 +0000517 }
Olivier Deprez3d203f42022-11-16 16:46:23 +0100518 return 0;
Marc Bonnici1c33cc32021-11-29 17:57:03 +0000519 }
Achin Gupta86f23532019-10-11 15:41:16 +0100520
Max Shvetsov745889c2020-02-27 14:54:21 +0000521 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200522 if (spmc_ep_info == NULL) {
523 WARN("No SPM Core image provided by BL2 boot loader.\n");
Olivier Deprez3d203f42022-11-16 16:46:23 +0100524 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000525 }
526
527 /* Under no circumstances will this parameter be 0 */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200528 assert(spmc_ep_info->pc != 0ULL);
Max Shvetsov745889c2020-02-27 14:54:21 +0000529
530 /*
531 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200532 * be used as a manifest for the SPM Core at the next lower EL/mode.
Max Shvetsov745889c2020-02-27 14:54:21 +0000533 */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100534 spmc_manifest = (void *)spmc_ep_info->args.arg0;
535 if (spmc_manifest == NULL) {
Olivier Deprez3d203f42022-11-16 16:46:23 +0100536 WARN("Invalid or absent SPM Core manifest.\n");
537 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000538 }
539
540 /* Load manifest, init SPMC */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100541 rc = spmd_spmc_init(spmc_manifest);
Max Shvetsov745889c2020-02-27 14:54:21 +0000542 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200543 WARN("Booting device without SPM initialization.\n");
Max Shvetsov745889c2020-02-27 14:54:21 +0000544 }
545
Olivier Deprez3d203f42022-11-16 16:46:23 +0100546 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000547}
548
549/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000550 * Forward FF-A SMCs to the other security state.
Max Shvetsov745889c2020-02-27 14:54:21 +0000551 ******************************************************************************/
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000552uint64_t spmd_smc_switch_state(uint32_t smc_fid,
553 bool secure_origin,
554 uint64_t x1,
555 uint64_t x2,
556 uint64_t x3,
557 uint64_t x4,
558 void *handle)
Max Shvetsov745889c2020-02-27 14:54:21 +0000559{
Olivier Deprezebc34772020-04-16 16:59:21 +0200560 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
561 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100562
Max Shvetsov745889c2020-02-27 14:54:21 +0000563 /* Save incoming security state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000564#if SPMD_SPM_AT_SEL2
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200565 if (secure_state_in == NON_SECURE) {
566 cm_el1_sysregs_context_save(secure_state_in);
567 }
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100568 cm_el2_sysregs_context_save(secure_state_in);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200569#else
570 cm_el1_sysregs_context_save(secure_state_in);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000571#endif
Max Shvetsov745889c2020-02-27 14:54:21 +0000572
573 /* Restore outgoing security state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000574#if SPMD_SPM_AT_SEL2
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200575 if (secure_state_out == NON_SECURE) {
576 cm_el1_sysregs_context_restore(secure_state_out);
577 }
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100578 cm_el2_sysregs_context_restore(secure_state_out);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200579#else
580 cm_el1_sysregs_context_restore(secure_state_out);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000581#endif
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100582 cm_set_next_eret_context(secure_state_out);
Max Shvetsov745889c2020-02-27 14:54:21 +0000583
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -0800584#if SPMD_SPM_AT_SEL2
585 /*
586 * If SPMC is at SEL2, save additional registers x8-x17, which may
587 * be used in FF-A calls such as FFA_PARTITION_INFO_GET_REGS.
588 * Note that technically, all SPMCs can support this, but this code is
589 * under ifdef to minimize breakage in case other SPMCs do not save
590 * and restore x8-x17.
591 * We also need to pass through these registers since not all FF-A ABIs
592 * modify x8-x17, in which case, SMCCC requires that these registers be
593 * preserved, so the SPMD passes through these registers and expects the
594 * SPMC to save and restore (potentially also modify) them.
595 */
596 SMC_RET18(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
597 SMC_GET_GP(handle, CTX_GPREG_X5),
598 SMC_GET_GP(handle, CTX_GPREG_X6),
599 SMC_GET_GP(handle, CTX_GPREG_X7),
600 SMC_GET_GP(handle, CTX_GPREG_X8),
601 SMC_GET_GP(handle, CTX_GPREG_X9),
602 SMC_GET_GP(handle, CTX_GPREG_X10),
603 SMC_GET_GP(handle, CTX_GPREG_X11),
604 SMC_GET_GP(handle, CTX_GPREG_X12),
605 SMC_GET_GP(handle, CTX_GPREG_X13),
606 SMC_GET_GP(handle, CTX_GPREG_X14),
607 SMC_GET_GP(handle, CTX_GPREG_X15),
608 SMC_GET_GP(handle, CTX_GPREG_X16),
609 SMC_GET_GP(handle, CTX_GPREG_X17)
610 );
611
612#else
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100613 SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
Max Shvetsov745889c2020-02-27 14:54:21 +0000614 SMC_GET_GP(handle, CTX_GPREG_X5),
615 SMC_GET_GP(handle, CTX_GPREG_X6),
616 SMC_GET_GP(handle, CTX_GPREG_X7));
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -0800617#endif
Max Shvetsov745889c2020-02-27 14:54:21 +0000618}
619
620/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000621 * Forward SMCs to the other security state.
622 ******************************************************************************/
623static uint64_t spmd_smc_forward(uint32_t smc_fid,
624 bool secure_origin,
625 uint64_t x1,
626 uint64_t x2,
627 uint64_t x3,
628 uint64_t x4,
629 void *cookie,
630 void *handle,
631 uint64_t flags)
632{
633 if (is_spmc_at_el3() && !secure_origin) {
634 return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4,
635 cookie, handle, flags);
636 }
637 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4,
638 handle);
639
640}
641
642/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100643 * Return FFA_ERROR with specified error code
Max Shvetsov745889c2020-02-27 14:54:21 +0000644 ******************************************************************************/
J-Alves2672cde2020-05-07 18:42:25 +0100645static uint64_t spmd_ffa_error_return(void *handle, int error_code)
Max Shvetsov745889c2020-02-27 14:54:21 +0000646{
J-Alves64ff9932021-03-01 10:26:59 +0000647 SMC_RET8(handle, (uint32_t) FFA_ERROR,
648 FFA_TARGET_INFO_MBZ, (uint32_t)error_code,
J-Alves2672cde2020-05-07 18:42:25 +0100649 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
650 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100651}
652
Olivier Deprez33e44122020-04-16 17:54:27 +0200653/*******************************************************************************
654 * spmd_check_address_in_binary_image
655 ******************************************************************************/
656bool spmd_check_address_in_binary_image(uint64_t address)
657{
658 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
659
660 return ((address >= spmc_attrs.load_address) &&
661 (address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
662}
663
Olivier Deprezebc34772020-04-16 16:59:21 +0200664/******************************************************************************
665 * spmd_is_spmc_message
666 *****************************************************************************/
667static bool spmd_is_spmc_message(unsigned int ep)
668{
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000669 if (is_spmc_at_el3()) {
670 return false;
671 }
672
Olivier Deprezebc34772020-04-16 16:59:21 +0200673 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
674 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
675}
676
Olivier Deprez33e44122020-04-16 17:54:27 +0200677/******************************************************************************
678 * spmd_handle_spmc_message
679 *****************************************************************************/
Olivier Deprezc7631a52020-03-23 09:53:06 +0100680static int spmd_handle_spmc_message(unsigned long long msg,
681 unsigned long long parm1, unsigned long long parm2,
682 unsigned long long parm3, unsigned long long parm4)
Olivier Deprez33e44122020-04-16 17:54:27 +0200683{
684 VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__,
685 msg, parm1, parm2, parm3, parm4);
686
Olivier Deprez33e44122020-04-16 17:54:27 +0200687 return -EINVAL;
688}
689
Achin Gupta86f23532019-10-11 15:41:16 +0100690/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000691 * This function forwards FF-A SMCs to either the main SPMD handler or the
692 * SPMC at EL3, depending on the origin security state, if enabled.
693 ******************************************************************************/
694uint64_t spmd_ffa_smc_handler(uint32_t smc_fid,
695 uint64_t x1,
696 uint64_t x2,
697 uint64_t x3,
698 uint64_t x4,
699 void *cookie,
700 void *handle,
701 uint64_t flags)
702{
703 if (is_spmc_at_el3()) {
704 /*
705 * If we have an SPMC at EL3 allow handling of the SMC first.
706 * The SPMC will call back through to SPMD handler if required.
707 */
708 if (is_caller_secure(flags)) {
709 return spmc_smc_handler(smc_fid,
710 is_caller_secure(flags),
711 x1, x2, x3, x4, cookie,
712 handle, flags);
713 }
714 }
715 return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
716 handle, flags);
717}
718
719/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100720 * This function handles all SMCs in the range reserved for FFA. Each call is
Achin Gupta86f23532019-10-11 15:41:16 +0100721 * either forwarded to the other security state or handled by the SPM dispatcher
722 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200723uint64_t spmd_smc_handler(uint32_t smc_fid,
724 uint64_t x1,
725 uint64_t x2,
726 uint64_t x3,
727 uint64_t x4,
728 void *cookie,
729 void *handle,
Achin Gupta86f23532019-10-11 15:41:16 +0100730 uint64_t flags)
731{
Olivier Deprezeae45962021-01-19 15:06:47 +0100732 unsigned int linear_id = plat_my_core_pos();
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200733 spmd_spm_core_context_t *ctx = spmd_get_context();
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100734 bool secure_origin;
735 int32_t ret;
J-Alves4c95c702020-05-26 14:03:05 +0100736 uint32_t input_version;
Achin Gupta86f23532019-10-11 15:41:16 +0100737
738 /* Determine which security state this SMC originated from */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100739 secure_origin = is_caller_secure(flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100740
Scott Brandene5dcf982020-08-25 13:49:32 -0700741 VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64
742 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n",
743 linear_id, smc_fid, x1, x2, x3, x4,
744 SMC_GET_GP(handle, CTX_GPREG_X5),
745 SMC_GET_GP(handle, CTX_GPREG_X6),
746 SMC_GET_GP(handle, CTX_GPREG_X7));
Achin Gupta86f23532019-10-11 15:41:16 +0100747
748 switch (smc_fid) {
J-Alves2672cde2020-05-07 18:42:25 +0100749 case FFA_ERROR:
Achin Gupta86f23532019-10-11 15:41:16 +0100750 /*
751 * Check if this is the first invocation of this interface on
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200752 * this CPU. If so, then indicate that the SPM Core initialised
Achin Gupta86f23532019-10-11 15:41:16 +0100753 * unsuccessfully.
754 */
Olivier Deprez7c016332019-10-28 09:03:13 +0000755 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Achin Gupta86f23532019-10-11 15:41:16 +0100756 spmd_spm_core_sync_exit(x2);
Max Shvetsov745889c2020-02-27 14:54:21 +0000757 }
Achin Gupta86f23532019-10-11 15:41:16 +0100758
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100759 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000760 x1, x2, x3, x4, cookie,
761 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100762 break; /* not reached */
763
J-Alves2672cde2020-05-07 18:42:25 +0100764 case FFA_VERSION:
J-Alves4c95c702020-05-26 14:03:05 +0100765 input_version = (uint32_t)(0xFFFFFFFF & x1);
Achin Gupta86f23532019-10-11 15:41:16 +0100766 /*
J-Alves4c95c702020-05-26 14:03:05 +0100767 * If caller is secure and SPMC was initialized,
768 * return FFA_VERSION of SPMD.
769 * If caller is non secure and SPMC was initialized,
Marc Bonnici815d1012021-12-08 14:27:40 +0000770 * forward to the EL3 SPMC if enabled, otherwise return
771 * the SPMC version if implemented at a lower EL.
J-Alves4c95c702020-05-26 14:03:05 +0100772 * Sanity check to "input_version".
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000773 * If the EL3 SPMC is enabled, ignore the SPMC state as
774 * this is not used.
Achin Gupta86f23532019-10-11 15:41:16 +0100775 */
J-Alves4c95c702020-05-26 14:03:05 +0100776 if ((input_version & FFA_VERSION_BIT31_MASK) ||
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000777 (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) {
J-Alves4c95c702020-05-26 14:03:05 +0100778 ret = FFA_ERROR_NOT_SUPPORTED;
779 } else if (!secure_origin) {
Marc Bonnici815d1012021-12-08 14:27:40 +0000780 if (is_spmc_at_el3()) {
781 /*
782 * Forward the call directly to the EL3 SPMC, if
783 * enabled, as we don't need to wrap the call in
784 * a direct request.
785 */
786 return spmd_smc_forward(smc_fid, secure_origin,
787 x1, x2, x3, x4, cookie,
788 handle, flags);
789 }
790
Daniel Boulby9460a232021-12-09 11:20:13 +0000791 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
792 uint64_t rc;
793
794 if (spmc_attrs.major_version == 1 &&
795 spmc_attrs.minor_version == 0) {
796 ret = MAKE_FFA_VERSION(spmc_attrs.major_version,
797 spmc_attrs.minor_version);
798 SMC_RET8(handle, (uint32_t)ret,
799 FFA_TARGET_INFO_MBZ,
800 FFA_TARGET_INFO_MBZ,
801 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
802 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
803 FFA_PARAM_MBZ);
804 break;
805 }
806 /* Save non-secure system registers context */
807 cm_el1_sysregs_context_save(NON_SECURE);
808#if SPMD_SPM_AT_SEL2
809 cm_el2_sysregs_context_save(NON_SECURE);
810#endif
811
812 /*
813 * The incoming request has FFA_VERSION as X0 smc_fid
814 * and requested version in x1. Prepare a direct request
815 * from SPMD to SPMC with FFA_VERSION framework function
816 * identifier in X2 and requested version in X3.
817 */
818 spmd_build_spmc_message(gpregs,
819 SPMD_FWK_MSG_FFA_VERSION_REQ,
820 input_version);
821
822 rc = spmd_spm_core_sync_entry(ctx);
823
824 if ((rc != 0ULL) ||
825 (SMC_GET_GP(gpregs, CTX_GPREG_X0) !=
826 FFA_MSG_SEND_DIRECT_RESP_SMC32) ||
827 (SMC_GET_GP(gpregs, CTX_GPREG_X2) !=
Marc Bonnici25f4b542022-04-12 17:18:13 +0100828 (FFA_FWK_MSG_BIT |
Daniel Boulby9460a232021-12-09 11:20:13 +0000829 SPMD_FWK_MSG_FFA_VERSION_RESP))) {
830 ERROR("Failed to forward FFA_VERSION\n");
831 ret = FFA_ERROR_NOT_SUPPORTED;
832 } else {
833 ret = SMC_GET_GP(gpregs, CTX_GPREG_X3);
834 }
835
836 /*
837 * Return here after SPMC has handled FFA_VERSION.
838 * The returned SPMC version is held in X3.
839 * Forward this version in X0 to the non-secure caller.
840 */
841 return spmd_smc_forward(ret, true, FFA_PARAM_MBZ,
842 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000843 FFA_PARAM_MBZ, cookie, gpregs,
844 flags);
J-Alves4c95c702020-05-26 14:03:05 +0100845 } else {
J-Alves64ff9932021-03-01 10:26:59 +0000846 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
847 FFA_VERSION_MINOR);
J-Alves4c95c702020-05-26 14:03:05 +0100848 }
849
J-Alves64ff9932021-03-01 10:26:59 +0000850 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ,
851 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
852 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100853 break; /* not reached */
854
J-Alves2672cde2020-05-07 18:42:25 +0100855 case FFA_FEATURES:
Achin Gupta86f23532019-10-11 15:41:16 +0100856 /*
857 * This is an optional interface. Do the minimal checks and
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200858 * forward to SPM Core which will handle it if implemented.
Achin Gupta86f23532019-10-11 15:41:16 +0100859 */
860
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200861 /* Forward SMC from Normal world to the SPM Core */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100862 if (!secure_origin) {
863 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000864 x1, x2, x3, x4, cookie,
865 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100866 }
Max Shvetsov745889c2020-02-27 14:54:21 +0000867
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200868 /*
869 * Return success if call was from secure world i.e. all
J-Alves2672cde2020-05-07 18:42:25 +0100870 * FFA functions are supported. This is essentially a
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200871 * nop.
872 */
J-Alves2672cde2020-05-07 18:42:25 +0100873 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200874 SMC_GET_GP(handle, CTX_GPREG_X5),
875 SMC_GET_GP(handle, CTX_GPREG_X6),
876 SMC_GET_GP(handle, CTX_GPREG_X7));
877
Achin Gupta86f23532019-10-11 15:41:16 +0100878 break; /* not reached */
879
J-Alves2672cde2020-05-07 18:42:25 +0100880 case FFA_ID_GET:
Max Shvetsove79062e2020-03-12 15:16:40 +0000881 /*
J-Alves2672cde2020-05-07 18:42:25 +0100882 * Returns the ID of the calling FFA component.
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200883 */
Max Shvetsove79062e2020-03-12 15:16:40 +0000884 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +0100885 SMC_RET8(handle, FFA_SUCCESS_SMC32,
886 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
887 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
888 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
889 FFA_PARAM_MBZ);
Max Shvetsove79062e2020-03-12 15:16:40 +0000890 }
891
J-Alves2672cde2020-05-07 18:42:25 +0100892 SMC_RET8(handle, FFA_SUCCESS_SMC32,
893 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
894 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
895 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
896 FFA_PARAM_MBZ);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200897
Max Shvetsove79062e2020-03-12 15:16:40 +0000898 break; /* not reached */
899
Olivier Deprezeae45962021-01-19 15:06:47 +0100900 case FFA_SECONDARY_EP_REGISTER_SMC64:
901 if (secure_origin) {
902 ret = spmd_pm_secondary_ep_register(x1);
903
904 if (ret < 0) {
905 SMC_RET8(handle, FFA_ERROR_SMC64,
906 FFA_TARGET_INFO_MBZ, ret,
907 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
908 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
909 FFA_PARAM_MBZ);
910 } else {
911 SMC_RET8(handle, FFA_SUCCESS_SMC64,
912 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ,
913 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
914 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
915 FFA_PARAM_MBZ);
916 }
917 }
918
919 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
920 break; /* Not reached */
921
Daniel Boulby27f35df2021-02-03 12:13:19 +0000922 case FFA_SPM_ID_GET:
923 if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) {
924 return spmd_ffa_error_return(handle,
925 FFA_ERROR_NOT_SUPPORTED);
926 }
927 /*
928 * Returns the ID of the SPMC or SPMD depending on the FF-A
929 * instance where this function is invoked
930 */
931 if (!secure_origin) {
932 SMC_RET8(handle, FFA_SUCCESS_SMC32,
933 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
934 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
935 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
936 FFA_PARAM_MBZ);
937 }
938 SMC_RET8(handle, FFA_SUCCESS_SMC32,
939 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID,
940 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
941 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
942 FFA_PARAM_MBZ);
943
944 break; /* not reached */
945
Olivier Deprez33e44122020-04-16 17:54:27 +0200946 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
Shruti3d859672022-06-09 11:03:11 +0100947 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
948 if (!secure_origin) {
949 /* Validate source endpoint is non-secure for non-secure caller. */
950 if (ffa_is_secure_world_id(ffa_endpoint_source(x1))) {
951 return spmd_ffa_error_return(handle,
952 FFA_ERROR_INVALID_PARAMETER);
953 }
954 }
Olivier Deprez33e44122020-04-16 17:54:27 +0200955 if (secure_origin && spmd_is_spmc_message(x1)) {
956 ret = spmd_handle_spmc_message(x3, x4,
957 SMC_GET_GP(handle, CTX_GPREG_X5),
958 SMC_GET_GP(handle, CTX_GPREG_X6),
959 SMC_GET_GP(handle, CTX_GPREG_X7));
960
961 SMC_RET8(handle, FFA_SUCCESS_SMC32,
962 FFA_TARGET_INFO_MBZ, ret,
963 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
964 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
965 FFA_PARAM_MBZ);
966 } else {
967 /* Forward direct message to the other world */
968 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000969 x1, x2, x3, x4, cookie,
970 handle, flags);
Olivier Deprez33e44122020-04-16 17:54:27 +0200971 }
972 break; /* Not reached */
973
974 case FFA_MSG_SEND_DIRECT_RESP_SMC32:
975 if (secure_origin && spmd_is_spmc_message(x1)) {
Olivier Depreza664c492020-08-05 11:27:42 +0200976 spmd_spm_core_sync_exit(0ULL);
Olivier Deprez33e44122020-04-16 17:54:27 +0200977 } else {
978 /* Forward direct message to the other world */
979 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000980 x1, x2, x3, x4, cookie,
981 handle, flags);
Olivier Deprez33e44122020-04-16 17:54:27 +0200982 }
983 break; /* Not reached */
984
J-Alves2672cde2020-05-07 18:42:25 +0100985 case FFA_RX_RELEASE:
986 case FFA_RXTX_MAP_SMC32:
987 case FFA_RXTX_MAP_SMC64:
988 case FFA_RXTX_UNMAP:
Ruari Phipps93dff702020-07-28 10:33:35 +0100989 case FFA_PARTITION_INFO_GET:
J-Alves2621cfd2021-03-11 17:46:47 +0000990#if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
991 case FFA_NOTIFICATION_BITMAP_CREATE:
992 case FFA_NOTIFICATION_BITMAP_DESTROY:
993 case FFA_NOTIFICATION_BIND:
994 case FFA_NOTIFICATION_UNBIND:
995 case FFA_NOTIFICATION_SET:
996 case FFA_NOTIFICATION_GET:
997 case FFA_NOTIFICATION_INFO_GET:
998 case FFA_NOTIFICATION_INFO_GET_SMC64:
Federico Recanatieecb4b02022-02-03 17:22:37 +0100999 case FFA_MSG_SEND2:
Federico Recanati5c7c5c42022-03-18 10:30:00 +01001000 case FFA_RX_ACQUIRE:
J-Alves2621cfd2021-03-11 17:46:47 +00001001#endif
Federico Recanatieecb4b02022-02-03 17:22:37 +01001002 case FFA_MSG_RUN:
Ruari Phipps93dff702020-07-28 10:33:35 +01001003 /*
Federico Recanatieecb4b02022-02-03 17:22:37 +01001004 * Above calls should be invoked only by the Normal world and
1005 * must not be forwarded from Secure world to Normal world.
Ruari Phipps93dff702020-07-28 10:33:35 +01001006 */
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001007 if (secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +01001008 return spmd_ffa_error_return(handle,
Ruari Phipps93dff702020-07-28 10:33:35 +01001009 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +01001010 }
1011
Boyan Karatotev87266002022-11-18 14:17:17 +00001012 /* Forward the call to the other world */
1013 /* fallthrough */
J-Alves2672cde2020-05-07 18:42:25 +01001014 case FFA_MSG_SEND:
J-Alves2672cde2020-05-07 18:42:25 +01001015 case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1016 case FFA_MEM_DONATE_SMC32:
1017 case FFA_MEM_DONATE_SMC64:
1018 case FFA_MEM_LEND_SMC32:
1019 case FFA_MEM_LEND_SMC64:
1020 case FFA_MEM_SHARE_SMC32:
1021 case FFA_MEM_SHARE_SMC64:
1022 case FFA_MEM_RETRIEVE_REQ_SMC32:
1023 case FFA_MEM_RETRIEVE_REQ_SMC64:
1024 case FFA_MEM_RETRIEVE_RESP:
1025 case FFA_MEM_RELINQUISH:
1026 case FFA_MEM_RECLAIM:
Marc Bonnici9fa01e92021-09-23 09:44:14 +01001027 case FFA_MEM_FRAG_TX:
1028 case FFA_MEM_FRAG_RX:
J-Alves2672cde2020-05-07 18:42:25 +01001029 case FFA_SUCCESS_SMC32:
1030 case FFA_SUCCESS_SMC64:
Achin Gupta86f23532019-10-11 15:41:16 +01001031 /*
1032 * TODO: Assume that no requests originate from EL3 at the
1033 * moment. This will change if a SP service is required in
1034 * response to secure interrupts targeted to EL3. Until then
1035 * simply forward the call to the Normal world.
1036 */
1037
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001038 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001039 x1, x2, x3, x4, cookie,
1040 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +01001041 break; /* not reached */
1042
J-Alves2672cde2020-05-07 18:42:25 +01001043 case FFA_MSG_WAIT:
Achin Gupta86f23532019-10-11 15:41:16 +01001044 /*
1045 * Check if this is the first invocation of this interface on
1046 * this CPU from the Secure world. If so, then indicate that the
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001047 * SPM Core initialised successfully.
Achin Gupta86f23532019-10-11 15:41:16 +01001048 */
Olivier Deprez7c016332019-10-28 09:03:13 +00001049 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Olivier Depreza664c492020-08-05 11:27:42 +02001050 spmd_spm_core_sync_exit(0ULL);
Achin Gupta86f23532019-10-11 15:41:16 +01001051 }
1052
Boyan Karatotev87266002022-11-18 14:17:17 +00001053 /* Forward the call to the other world */
1054 /* fallthrough */
Olivier Deprezae18caf2021-04-02 11:09:10 +02001055 case FFA_INTERRUPT:
J-Alves2672cde2020-05-07 18:42:25 +01001056 case FFA_MSG_YIELD:
Achin Gupta86f23532019-10-11 15:41:16 +01001057 /* This interface must be invoked only by the Secure world */
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001058 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +01001059 return spmd_ffa_error_return(handle,
1060 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +01001061 }
1062
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001063 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001064 x1, x2, x3, x4, cookie,
1065 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +01001066 break; /* not reached */
1067
Olivier Depreza664c492020-08-05 11:27:42 +02001068 case FFA_NORMAL_WORLD_RESUME:
1069 if (secure_origin && ctx->secure_interrupt_ongoing) {
1070 spmd_spm_core_sync_exit(0ULL);
1071 } else {
1072 return spmd_ffa_error_return(handle, FFA_ERROR_DENIED);
1073 }
1074 break; /* Not reached */
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -08001075#if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
1076 case FFA_PARTITION_INFO_GET_REGS_SMC64:
1077 if (secure_origin) {
1078 /* TODO: Future patches to enable support for this */
1079 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1080 }
Olivier Depreza664c492020-08-05 11:27:42 +02001081
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -08001082 /* Call only supported with SMCCC 1.2+ */
1083 if (MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION) < 0x10002) {
1084 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1085 }
1086
1087 return spmd_smc_forward(smc_fid, secure_origin,
1088 x1, x2, x3, x4, cookie,
1089 handle, flags);
1090 break; /* Not reached */
1091#endif
Achin Gupta86f23532019-10-11 15:41:16 +01001092 default:
1093 WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
J-Alves2672cde2020-05-07 18:42:25 +01001094 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +01001095 }
1096}