blob: dde1622887fd8a9a5aa847bb3c4bc138e57c1104 [file] [log] [blame]
Achin Gupta86f23532019-10-11 15:41:16 +01001/*
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +00002 * Copyright (c) 2020-2023, Arm Limited and Contributors. All rights reserved.
Achin Gupta86f23532019-10-11 15:41:16 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
Scott Brandene5dcf982020-08-25 13:49:32 -07009#include <inttypes.h>
10#include <stdint.h>
Achin Gupta86f23532019-10-11 15:41:16 +010011#include <string.h>
12
13#include <arch_helpers.h>
Olivier Deprez2bae35f2020-04-16 13:39:06 +020014#include <arch/aarch64/arch_features.h>
Achin Gupta86f23532019-10-11 15:41:16 +010015#include <bl31/bl31.h>
Olivier Depreza664c492020-08-05 11:27:42 +020016#include <bl31/interrupt_mgmt.h>
Achin Gupta86f23532019-10-11 15:41:16 +010017#include <common/debug.h>
18#include <common/runtime_svc.h>
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +000019#include <common/tbbr/tbbr_img_def.h>
Achin Gupta86f23532019-10-11 15:41:16 +010020#include <lib/el3_runtime/context_mgmt.h>
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +000021#include <lib/fconf/fconf.h>
22#include <lib/fconf/fconf_dyn_cfg_getter.h>
Achin Gupta86f23532019-10-11 15:41:16 +010023#include <lib/smccc.h>
24#include <lib/spinlock.h>
25#include <lib/utils.h>
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +000026#include <lib/xlat_tables/xlat_tables_v2.h>
Achin Gupta86f23532019-10-11 15:41:16 +010027#include <plat/common/common_def.h>
28#include <plat/common/platform.h>
29#include <platform_def.h>
J-Alves2672cde2020-05-07 18:42:25 +010030#include <services/ffa_svc.h>
Marc Bonnici1c33cc32021-11-29 17:57:03 +000031#include <services/spmc_svc.h>
Achin Gupta86f23532019-10-11 15:41:16 +010032#include <services/spmd_svc.h>
33#include <smccc_helpers.h>
34#include "spmd_private.h"
35
36/*******************************************************************************
37 * SPM Core context information.
38 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020039static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
Achin Gupta86f23532019-10-11 15:41:16 +010040
41/*******************************************************************************
Marc Bonnici1c33cc32021-11-29 17:57:03 +000042 * SPM Core attribute information is read from its manifest if the SPMC is not
43 * at EL3. Else, it is populated from the SPMC directly.
Achin Gupta86f23532019-10-11 15:41:16 +010044 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020045static spmc_manifest_attribute_t spmc_attrs;
Achin Gupta86f23532019-10-11 15:41:16 +010046
47/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000048 * SPM Core entry point information. Discovered on the primary core and reused
49 * on secondary cores.
50 ******************************************************************************/
51static entry_point_info_t *spmc_ep_info;
52
53/*******************************************************************************
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020054 * SPM Core context on CPU based on mpidr.
55 ******************************************************************************/
56spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr)
57{
Max Shvetsovf80c64d2020-08-25 11:50:18 +010058 int core_idx = plat_core_pos_by_mpidr(mpidr);
59
60 if (core_idx < 0) {
Scott Brandene5dcf982020-08-25 13:49:32 -070061 ERROR("Invalid mpidr: %" PRIx64 ", returned ID: %d\n", mpidr, core_idx);
Max Shvetsovf80c64d2020-08-25 11:50:18 +010062 panic();
63 }
64
65 return &spm_core_context[core_idx];
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020066}
67
68/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +020069 * SPM Core context on current CPU get helper.
70 ******************************************************************************/
71spmd_spm_core_context_t *spmd_get_context(void)
72{
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020073 return spmd_get_context_by_mpidr(read_mpidr());
Olivier Deprez2bae35f2020-04-16 13:39:06 +020074}
75
76/*******************************************************************************
Olivier Deprezc7631a52020-03-23 09:53:06 +010077 * SPM Core ID getter.
78 ******************************************************************************/
79uint16_t spmd_spmc_id_get(void)
80{
81 return spmc_attrs.spmc_id;
82}
83
84/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000085 * Static function declaration.
86 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020087static int32_t spmd_init(void);
Olivier Deprez69ca84a2020-02-07 15:44:43 +010088static int spmd_spmc_init(void *pm_addr);
J-Alves2672cde2020-05-07 18:42:25 +010089static uint64_t spmd_ffa_error_return(void *handle,
Olivier Deprez2bae35f2020-04-16 13:39:06 +020090 int error_code);
91static uint64_t spmd_smc_forward(uint32_t smc_fid,
92 bool secure_origin,
93 uint64_t x1,
94 uint64_t x2,
95 uint64_t x3,
96 uint64_t x4,
Marc Bonnicida2c9e12021-11-29 18:02:45 +000097 void *cookie,
98 void *handle,
99 uint64_t flags);
Max Shvetsov745889c2020-02-27 14:54:21 +0000100
Daniel Boulby9460a232021-12-09 11:20:13 +0000101/******************************************************************************
102 * Builds an SPMD to SPMC direct message request.
103 *****************************************************************************/
104void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func,
105 unsigned long long message)
106{
107 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
108 write_ctx_reg(gpregs, CTX_GPREG_X1,
109 (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
110 spmd_spmc_id_get());
111 write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func);
112 write_ctx_reg(gpregs, CTX_GPREG_X3, message);
113}
114
115
Max Shvetsov745889c2020-02-27 14:54:21 +0000116/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200117 * This function takes an SPMC context pointer and performs a synchronous
118 * SPMC entry.
Achin Gupta86f23532019-10-11 15:41:16 +0100119 ******************************************************************************/
120uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
121{
122 uint64_t rc;
123
124 assert(spmc_ctx != NULL);
125
126 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
127
128 /* Restore the context assigned above */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000129#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000130 cm_el2_sysregs_context_restore(SECURE);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200131#else
132 cm_el1_sysregs_context_restore(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000133#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100134 cm_set_next_eret_context(SECURE);
135
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000136 /* Enter SPMC */
Achin Gupta86f23532019-10-11 15:41:16 +0100137 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
138
139 /* Save secure state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000140#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000141 cm_el2_sysregs_context_save(SECURE);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200142#else
143 cm_el1_sysregs_context_save(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000144#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100145
146 return rc;
147}
148
149/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200150 * This function returns to the place where spmd_spm_core_sync_entry() was
Achin Gupta86f23532019-10-11 15:41:16 +0100151 * called originally.
152 ******************************************************************************/
153__dead2 void spmd_spm_core_sync_exit(uint64_t rc)
154{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200155 spmd_spm_core_context_t *ctx = spmd_get_context();
Achin Gupta86f23532019-10-11 15:41:16 +0100156
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200157 /* Get current CPU context from SPMC context */
Achin Gupta86f23532019-10-11 15:41:16 +0100158 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
159
160 /*
161 * The SPMD must have initiated the original request through a
162 * synchronous entry into SPMC. Jump back to the original C runtime
163 * context with the value of rc in x0;
164 */
165 spmd_spm_core_exit(ctx->c_rt_ctx, rc);
166
167 panic();
168}
169
170/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200171 * Jump to the SPM Core for the first time.
Achin Gupta86f23532019-10-11 15:41:16 +0100172 ******************************************************************************/
173static int32_t spmd_init(void)
174{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200175 spmd_spm_core_context_t *ctx = spmd_get_context();
176 uint64_t rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100177
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200178 VERBOSE("SPM Core init start.\n");
Olivier Deprez7c016332019-10-28 09:03:13 +0000179
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200180 /* Primary boot core enters the SPMC for initialization. */
181 ctx->state = SPMC_STATE_ON_PENDING;
Achin Gupta86f23532019-10-11 15:41:16 +0100182
183 rc = spmd_spm_core_sync_entry(ctx);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200184 if (rc != 0ULL) {
Scott Brandene5dcf982020-08-25 13:49:32 -0700185 ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200186 return 0;
Achin Gupta86f23532019-10-11 15:41:16 +0100187 }
188
Olivier Deprez7c016332019-10-28 09:03:13 +0000189 ctx->state = SPMC_STATE_ON;
190
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200191 VERBOSE("SPM Core init end.\n");
Achin Gupta86f23532019-10-11 15:41:16 +0100192
193 return 1;
194}
195
196/*******************************************************************************
Olivier Depreza664c492020-08-05 11:27:42 +0200197 * spmd_secure_interrupt_handler
198 * Enter the SPMC for further handling of the secure interrupt by the SPMC
199 * itself or a Secure Partition.
200 ******************************************************************************/
201static uint64_t spmd_secure_interrupt_handler(uint32_t id,
202 uint32_t flags,
203 void *handle,
204 void *cookie)
205{
206 spmd_spm_core_context_t *ctx = spmd_get_context();
207 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
208 unsigned int linear_id = plat_my_core_pos();
209 int64_t rc;
210
211 /* Sanity check the security state when the exception was generated */
212 assert(get_interrupt_src_ss(flags) == NON_SECURE);
213
214 /* Sanity check the pointer to this cpu's context */
215 assert(handle == cm_get_context(NON_SECURE));
216
217 /* Save the non-secure context before entering SPMC */
218 cm_el1_sysregs_context_save(NON_SECURE);
219#if SPMD_SPM_AT_SEL2
220 cm_el2_sysregs_context_save(NON_SECURE);
221#endif
222
223 /* Convey the event to the SPMC through the FFA_INTERRUPT interface. */
224 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT);
225 write_ctx_reg(gpregs, CTX_GPREG_X1, 0);
226 write_ctx_reg(gpregs, CTX_GPREG_X2, 0);
227 write_ctx_reg(gpregs, CTX_GPREG_X3, 0);
228 write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
229 write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
230 write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
231 write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
232
233 /* Mark current core as handling a secure interrupt. */
234 ctx->secure_interrupt_ongoing = true;
235
236 rc = spmd_spm_core_sync_entry(ctx);
237 if (rc != 0ULL) {
Olivier Deprezba100f22021-11-09 12:37:20 +0100238 ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, linear_id);
Olivier Depreza664c492020-08-05 11:27:42 +0200239 }
240
241 ctx->secure_interrupt_ongoing = false;
242
243 cm_el1_sysregs_context_restore(NON_SECURE);
244#if SPMD_SPM_AT_SEL2
245 cm_el2_sysregs_context_restore(NON_SECURE);
246#endif
247 cm_set_next_eret_context(NON_SECURE);
248
249 SMC_RET0(&ctx->cpu_ctx);
250}
251
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +0000252#if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
253static int spmd_dynamic_map_mem(uintptr_t base_addr, size_t size,
254 unsigned int attr, uintptr_t *align_addr,
255 size_t *align_size)
256{
257 uintptr_t base_addr_align;
258 size_t mapped_size_align;
259 int rc;
260
261 /* Page aligned address and size if necessary */
262 base_addr_align = page_align(base_addr, DOWN);
263 mapped_size_align = page_align(size, UP);
264
265 if ((base_addr != base_addr_align) &&
266 (size == mapped_size_align)) {
267 mapped_size_align += PAGE_SIZE;
268 }
269
270 /*
271 * Map dynamically given region with its aligned base address and
272 * size
273 */
274 rc = mmap_add_dynamic_region((unsigned long long)base_addr_align,
275 base_addr_align,
276 mapped_size_align,
277 attr);
278 if (rc == 0) {
279 *align_addr = base_addr_align;
280 *align_size = mapped_size_align;
281 }
282
283 return rc;
284}
285
286static void spmd_do_sec_cpy(uintptr_t root_base_addr, uintptr_t sec_base_addr,
287 size_t size)
288{
289 uintptr_t root_base_addr_align, sec_base_addr_align;
290 size_t root_mapped_size_align, sec_mapped_size_align;
291 int rc;
292
293 assert(root_base_addr != 0UL);
294 assert(sec_base_addr != 0UL);
295 assert(size != 0UL);
296
297 /* Map the memory with required attributes */
298 rc = spmd_dynamic_map_mem(root_base_addr, size, MT_RO_DATA | MT_ROOT,
299 &root_base_addr_align,
300 &root_mapped_size_align);
301 if (rc != 0) {
302 ERROR("%s %s %lu (%d)\n", "Error while mapping", "root region",
303 root_base_addr, rc);
304 panic();
305 }
306
307 rc = spmd_dynamic_map_mem(sec_base_addr, size, MT_RW_DATA | MT_SECURE,
308 &sec_base_addr_align, &sec_mapped_size_align);
309 if (rc != 0) {
310 ERROR("%s %s %lu (%d)\n", "Error while mapping",
311 "secure region", sec_base_addr, rc);
312 panic();
313 }
314
315 /* Do copy operation */
316 (void)memcpy((void *)sec_base_addr, (void *)root_base_addr, size);
317
318 /* Unmap root memory region */
319 rc = mmap_remove_dynamic_region(root_base_addr_align,
320 root_mapped_size_align);
321 if (rc != 0) {
322 ERROR("%s %s %lu (%d)\n", "Error while unmapping",
323 "root region", root_base_addr_align, rc);
324 panic();
325 }
326
327 /* Unmap secure memory region */
328 rc = mmap_remove_dynamic_region(sec_base_addr_align,
329 sec_mapped_size_align);
330 if (rc != 0) {
331 ERROR("%s %s %lu (%d)\n", "Error while unmapping",
332 "secure region", sec_base_addr_align, rc);
333 panic();
334 }
335}
336#endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
337
Olivier Depreza664c492020-08-05 11:27:42 +0200338/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200339 * Loads SPMC manifest and inits SPMC.
Achin Gupta86f23532019-10-11 15:41:16 +0100340 ******************************************************************************/
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100341static int spmd_spmc_init(void *pm_addr)
Achin Gupta86f23532019-10-11 15:41:16 +0100342{
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200343 cpu_context_t *cpu_ctx;
344 unsigned int core_id;
Olivier Depreza664c492020-08-05 11:27:42 +0200345 uint32_t ep_attr, flags;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200346 int rc;
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +0000347 const struct dyn_cfg_dtb_info_t *image_info __unused;
Achin Gupta86f23532019-10-11 15:41:16 +0100348
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200349 /* Load the SPM Core manifest */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100350 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
Max Shvetsov745889c2020-02-27 14:54:21 +0000351 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200352 WARN("No or invalid SPM Core manifest image provided by BL2\n");
353 return rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100354 }
355
356 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200357 * Ensure that the SPM Core version is compatible with the SPM
358 * Dispatcher version.
Achin Gupta86f23532019-10-11 15:41:16 +0100359 */
J-Alves2672cde2020-05-07 18:42:25 +0100360 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
361 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
362 WARN("Unsupported FFA version (%u.%u)\n",
Achin Gupta86f23532019-10-11 15:41:16 +0100363 spmc_attrs.major_version, spmc_attrs.minor_version);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200364 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100365 }
366
J-Alves2672cde2020-05-07 18:42:25 +0100367 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
Achin Gupta86f23532019-10-11 15:41:16 +0100368 spmc_attrs.minor_version);
369
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200370 VERBOSE("SPM Core run time EL%x.\n",
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000371 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
Achin Gupta86f23532019-10-11 15:41:16 +0100372
Max Shvetsove79062e2020-03-12 15:16:40 +0000373 /* Validate the SPMC ID, Ensure high bit is set */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200374 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
375 SPMC_SECURE_ID_MASK) == 0U) {
376 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
377 return -EINVAL;
Max Shvetsove79062e2020-03-12 15:16:40 +0000378 }
379
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200380 /* Validate the SPM Core execution state */
Achin Gupta86f23532019-10-11 15:41:16 +0100381 if ((spmc_attrs.exec_state != MODE_RW_64) &&
382 (spmc_attrs.exec_state != MODE_RW_32)) {
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100383 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
Achin Gupta86f23532019-10-11 15:41:16 +0100384 spmc_attrs.exec_state);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200385 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100386 }
387
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100388 VERBOSE("%s%x.\n", "SPM Core execution state 0x",
389 spmc_attrs.exec_state);
Achin Gupta86f23532019-10-11 15:41:16 +0100390
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000391#if SPMD_SPM_AT_SEL2
392 /* Ensure manifest has not requested AArch32 state in S-EL2 */
393 if (spmc_attrs.exec_state == MODE_RW_32) {
394 WARN("AArch32 state at S-EL2 is not supported.\n");
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200395 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100396 }
397
398 /*
399 * Check if S-EL2 is supported on this system if S-EL2
400 * is required for SPM
401 */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200402 if (!is_armv8_4_sel2_present()) {
403 WARN("SPM Core run time S-EL2 is not supported.\n");
404 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100405 }
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000406#endif /* SPMD_SPM_AT_SEL2 */
Achin Gupta86f23532019-10-11 15:41:16 +0100407
408 /* Initialise an entrypoint to set up the CPU context */
409 ep_attr = SECURE | EP_ST_ENABLE;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200410 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
Achin Gupta86f23532019-10-11 15:41:16 +0100411 ep_attr |= EP_EE_BIG;
Max Shvetsov745889c2020-02-27 14:54:21 +0000412 }
413
Achin Gupta86f23532019-10-11 15:41:16 +0100414 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
Achin Gupta86f23532019-10-11 15:41:16 +0100415
416 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200417 * Populate SPSR for SPM Core based upon validated parameters from the
418 * manifest.
Achin Gupta86f23532019-10-11 15:41:16 +0100419 */
420 if (spmc_attrs.exec_state == MODE_RW_32) {
421 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
422 SPSR_E_LITTLE,
423 DAIF_FIQ_BIT |
424 DAIF_IRQ_BIT |
425 DAIF_ABT_BIT);
426 } else {
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000427
428#if SPMD_SPM_AT_SEL2
429 static const uint32_t runtime_el = MODE_EL2;
430#else
431 static const uint32_t runtime_el = MODE_EL1;
432#endif
433 spmc_ep_info->spsr = SPSR_64(runtime_el,
Achin Gupta86f23532019-10-11 15:41:16 +0100434 MODE_SP_ELX,
435 DISABLE_ALL_EXCEPTIONS);
436 }
437
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +0000438#if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
439 image_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TOS_FW_CONFIG_ID);
440 assert(image_info != NULL);
441
442 if ((image_info->config_addr == 0UL) ||
443 (image_info->secondary_config_addr == 0UL) ||
444 (image_info->config_max_size == 0UL)) {
445 return -EINVAL;
446 }
447
448 /* Copy manifest from root->secure region */
449 spmd_do_sec_cpy(image_info->config_addr,
450 image_info->secondary_config_addr,
451 image_info->config_max_size);
452
453 /* Update ep info of BL32 */
454 assert(spmc_ep_info != NULL);
455 spmc_ep_info->args.arg0 = image_info->secondary_config_addr;
456#endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
457
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200458 /* Set an initial SPMC context state for all cores. */
459 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
460 spm_core_context[core_id].state = SPMC_STATE_OFF;
Max Shvetsov745889c2020-02-27 14:54:21 +0000461
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200462 /* Setup an initial cpu context for the SPMC. */
463 cpu_ctx = &spm_core_context[core_id].cpu_ctx;
464 cm_setup_context(cpu_ctx, spmc_ep_info);
Achin Gupta86f23532019-10-11 15:41:16 +0100465
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200466 /*
467 * Pass the core linear ID to the SPMC through x4.
468 * (TF-A implementation defined behavior helping
469 * a legacy TOS migration to adopt FF-A).
470 */
471 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id);
472 }
Achin Gupta86f23532019-10-11 15:41:16 +0100473
Olivier Deprez9afca122019-10-28 09:15:52 +0000474 /* Register power management hooks with PSCI */
475 psci_register_spd_pm_hook(&spmd_pm);
476
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200477 /* Register init function for deferred init. */
Achin Gupta86f23532019-10-11 15:41:16 +0100478 bl31_register_bl32_init(&spmd_init);
479
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200480 INFO("SPM Core setup done.\n");
481
Olivier Depreza664c492020-08-05 11:27:42 +0200482 /*
483 * Register an interrupt handler routing secure interrupts to SPMD
484 * while the NWd is running.
485 */
486 flags = 0;
487 set_interrupt_rm_flag(flags, NON_SECURE);
488 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
489 spmd_secure_interrupt_handler,
490 flags);
491 if (rc != 0) {
492 panic();
493 }
494
Achin Gupta86f23532019-10-11 15:41:16 +0100495 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000496}
Achin Gupta86f23532019-10-11 15:41:16 +0100497
Max Shvetsov745889c2020-02-27 14:54:21 +0000498/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200499 * Initialize context of SPM Core.
Max Shvetsov745889c2020-02-27 14:54:21 +0000500 ******************************************************************************/
501int spmd_setup(void)
502{
503 int rc;
Marc Bonnici1c33cc32021-11-29 17:57:03 +0000504 void *spmc_manifest;
505
506 /*
507 * If the SPMC is at EL3, then just initialise it directly. The
508 * shenanigans of when it is at a lower EL are not needed.
509 */
510 if (is_spmc_at_el3()) {
511 /* Allow the SPMC to populate its attributes directly. */
512 spmc_populate_attrs(&spmc_attrs);
513
514 rc = spmc_setup();
515 if (rc != 0) {
516 ERROR("SPMC initialisation failed 0x%x.\n", rc);
517 }
518 return rc;
519 }
Achin Gupta86f23532019-10-11 15:41:16 +0100520
Max Shvetsov745889c2020-02-27 14:54:21 +0000521 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200522 if (spmc_ep_info == NULL) {
523 WARN("No SPM Core image provided by BL2 boot loader.\n");
524 return -EINVAL;
Max Shvetsov745889c2020-02-27 14:54:21 +0000525 }
526
527 /* Under no circumstances will this parameter be 0 */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200528 assert(spmc_ep_info->pc != 0ULL);
Max Shvetsov745889c2020-02-27 14:54:21 +0000529
530 /*
531 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200532 * be used as a manifest for the SPM Core at the next lower EL/mode.
Max Shvetsov745889c2020-02-27 14:54:21 +0000533 */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100534 spmc_manifest = (void *)spmc_ep_info->args.arg0;
535 if (spmc_manifest == NULL) {
536 ERROR("Invalid or absent SPM Core manifest.\n");
537 return -EINVAL;
Max Shvetsov745889c2020-02-27 14:54:21 +0000538 }
539
540 /* Load manifest, init SPMC */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100541 rc = spmd_spmc_init(spmc_manifest);
Max Shvetsov745889c2020-02-27 14:54:21 +0000542 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200543 WARN("Booting device without SPM initialization.\n");
Max Shvetsov745889c2020-02-27 14:54:21 +0000544 }
545
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100546 return rc;
Max Shvetsov745889c2020-02-27 14:54:21 +0000547}
548
549/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000550 * Forward FF-A SMCs to the other security state.
Max Shvetsov745889c2020-02-27 14:54:21 +0000551 ******************************************************************************/
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000552uint64_t spmd_smc_switch_state(uint32_t smc_fid,
553 bool secure_origin,
554 uint64_t x1,
555 uint64_t x2,
556 uint64_t x3,
557 uint64_t x4,
558 void *handle)
Max Shvetsov745889c2020-02-27 14:54:21 +0000559{
Olivier Deprezebc34772020-04-16 16:59:21 +0200560 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
561 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100562
Max Shvetsov745889c2020-02-27 14:54:21 +0000563 /* Save incoming security state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000564#if SPMD_SPM_AT_SEL2
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200565 if (secure_state_in == NON_SECURE) {
566 cm_el1_sysregs_context_save(secure_state_in);
567 }
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100568 cm_el2_sysregs_context_save(secure_state_in);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200569#else
570 cm_el1_sysregs_context_save(secure_state_in);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000571#endif
Max Shvetsov745889c2020-02-27 14:54:21 +0000572
573 /* Restore outgoing security state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000574#if SPMD_SPM_AT_SEL2
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200575 if (secure_state_out == NON_SECURE) {
576 cm_el1_sysregs_context_restore(secure_state_out);
577 }
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100578 cm_el2_sysregs_context_restore(secure_state_out);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200579#else
580 cm_el1_sysregs_context_restore(secure_state_out);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000581#endif
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100582 cm_set_next_eret_context(secure_state_out);
Max Shvetsov745889c2020-02-27 14:54:21 +0000583
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100584 SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
Max Shvetsov745889c2020-02-27 14:54:21 +0000585 SMC_GET_GP(handle, CTX_GPREG_X5),
586 SMC_GET_GP(handle, CTX_GPREG_X6),
587 SMC_GET_GP(handle, CTX_GPREG_X7));
588}
589
590/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000591 * Forward SMCs to the other security state.
592 ******************************************************************************/
593static uint64_t spmd_smc_forward(uint32_t smc_fid,
594 bool secure_origin,
595 uint64_t x1,
596 uint64_t x2,
597 uint64_t x3,
598 uint64_t x4,
599 void *cookie,
600 void *handle,
601 uint64_t flags)
602{
603 if (is_spmc_at_el3() && !secure_origin) {
604 return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4,
605 cookie, handle, flags);
606 }
607 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4,
608 handle);
609
610}
611
612/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100613 * Return FFA_ERROR with specified error code
Max Shvetsov745889c2020-02-27 14:54:21 +0000614 ******************************************************************************/
J-Alves2672cde2020-05-07 18:42:25 +0100615static uint64_t spmd_ffa_error_return(void *handle, int error_code)
Max Shvetsov745889c2020-02-27 14:54:21 +0000616{
J-Alves64ff9932021-03-01 10:26:59 +0000617 SMC_RET8(handle, (uint32_t) FFA_ERROR,
618 FFA_TARGET_INFO_MBZ, (uint32_t)error_code,
J-Alves2672cde2020-05-07 18:42:25 +0100619 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
620 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100621}
622
Olivier Deprez33e44122020-04-16 17:54:27 +0200623/*******************************************************************************
624 * spmd_check_address_in_binary_image
625 ******************************************************************************/
626bool spmd_check_address_in_binary_image(uint64_t address)
627{
628 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
629
630 return ((address >= spmc_attrs.load_address) &&
631 (address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
632}
633
Olivier Deprezebc34772020-04-16 16:59:21 +0200634/******************************************************************************
635 * spmd_is_spmc_message
636 *****************************************************************************/
637static bool spmd_is_spmc_message(unsigned int ep)
638{
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000639 if (is_spmc_at_el3()) {
640 return false;
641 }
642
Olivier Deprezebc34772020-04-16 16:59:21 +0200643 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
644 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
645}
646
Olivier Deprez33e44122020-04-16 17:54:27 +0200647/******************************************************************************
648 * spmd_handle_spmc_message
649 *****************************************************************************/
Olivier Deprezc7631a52020-03-23 09:53:06 +0100650static int spmd_handle_spmc_message(unsigned long long msg,
651 unsigned long long parm1, unsigned long long parm2,
652 unsigned long long parm3, unsigned long long parm4)
Olivier Deprez33e44122020-04-16 17:54:27 +0200653{
654 VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__,
655 msg, parm1, parm2, parm3, parm4);
656
Olivier Deprez33e44122020-04-16 17:54:27 +0200657 return -EINVAL;
658}
659
Achin Gupta86f23532019-10-11 15:41:16 +0100660/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000661 * This function forwards FF-A SMCs to either the main SPMD handler or the
662 * SPMC at EL3, depending on the origin security state, if enabled.
663 ******************************************************************************/
664uint64_t spmd_ffa_smc_handler(uint32_t smc_fid,
665 uint64_t x1,
666 uint64_t x2,
667 uint64_t x3,
668 uint64_t x4,
669 void *cookie,
670 void *handle,
671 uint64_t flags)
672{
673 if (is_spmc_at_el3()) {
674 /*
675 * If we have an SPMC at EL3 allow handling of the SMC first.
676 * The SPMC will call back through to SPMD handler if required.
677 */
678 if (is_caller_secure(flags)) {
679 return spmc_smc_handler(smc_fid,
680 is_caller_secure(flags),
681 x1, x2, x3, x4, cookie,
682 handle, flags);
683 }
684 }
685 return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
686 handle, flags);
687}
688
689/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100690 * This function handles all SMCs in the range reserved for FFA. Each call is
Achin Gupta86f23532019-10-11 15:41:16 +0100691 * either forwarded to the other security state or handled by the SPM dispatcher
692 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200693uint64_t spmd_smc_handler(uint32_t smc_fid,
694 uint64_t x1,
695 uint64_t x2,
696 uint64_t x3,
697 uint64_t x4,
698 void *cookie,
699 void *handle,
Achin Gupta86f23532019-10-11 15:41:16 +0100700 uint64_t flags)
701{
Olivier Deprezeae45962021-01-19 15:06:47 +0100702 unsigned int linear_id = plat_my_core_pos();
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200703 spmd_spm_core_context_t *ctx = spmd_get_context();
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100704 bool secure_origin;
705 int32_t ret;
J-Alves4c95c702020-05-26 14:03:05 +0100706 uint32_t input_version;
Achin Gupta86f23532019-10-11 15:41:16 +0100707
708 /* Determine which security state this SMC originated from */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100709 secure_origin = is_caller_secure(flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100710
Scott Brandene5dcf982020-08-25 13:49:32 -0700711 VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64
712 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n",
713 linear_id, smc_fid, x1, x2, x3, x4,
714 SMC_GET_GP(handle, CTX_GPREG_X5),
715 SMC_GET_GP(handle, CTX_GPREG_X6),
716 SMC_GET_GP(handle, CTX_GPREG_X7));
Achin Gupta86f23532019-10-11 15:41:16 +0100717
718 switch (smc_fid) {
J-Alves2672cde2020-05-07 18:42:25 +0100719 case FFA_ERROR:
Achin Gupta86f23532019-10-11 15:41:16 +0100720 /*
721 * Check if this is the first invocation of this interface on
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200722 * this CPU. If so, then indicate that the SPM Core initialised
Achin Gupta86f23532019-10-11 15:41:16 +0100723 * unsuccessfully.
724 */
Olivier Deprez7c016332019-10-28 09:03:13 +0000725 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Achin Gupta86f23532019-10-11 15:41:16 +0100726 spmd_spm_core_sync_exit(x2);
Max Shvetsov745889c2020-02-27 14:54:21 +0000727 }
Achin Gupta86f23532019-10-11 15:41:16 +0100728
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100729 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000730 x1, x2, x3, x4, cookie,
731 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100732 break; /* not reached */
733
J-Alves2672cde2020-05-07 18:42:25 +0100734 case FFA_VERSION:
J-Alves4c95c702020-05-26 14:03:05 +0100735 input_version = (uint32_t)(0xFFFFFFFF & x1);
Achin Gupta86f23532019-10-11 15:41:16 +0100736 /*
J-Alves4c95c702020-05-26 14:03:05 +0100737 * If caller is secure and SPMC was initialized,
738 * return FFA_VERSION of SPMD.
739 * If caller is non secure and SPMC was initialized,
Marc Bonnici815d1012021-12-08 14:27:40 +0000740 * forward to the EL3 SPMC if enabled, otherwise return
741 * the SPMC version if implemented at a lower EL.
J-Alves4c95c702020-05-26 14:03:05 +0100742 * Sanity check to "input_version".
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000743 * If the EL3 SPMC is enabled, ignore the SPMC state as
744 * this is not used.
Achin Gupta86f23532019-10-11 15:41:16 +0100745 */
J-Alves4c95c702020-05-26 14:03:05 +0100746 if ((input_version & FFA_VERSION_BIT31_MASK) ||
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000747 (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) {
J-Alves4c95c702020-05-26 14:03:05 +0100748 ret = FFA_ERROR_NOT_SUPPORTED;
749 } else if (!secure_origin) {
Marc Bonnici815d1012021-12-08 14:27:40 +0000750 if (is_spmc_at_el3()) {
751 /*
752 * Forward the call directly to the EL3 SPMC, if
753 * enabled, as we don't need to wrap the call in
754 * a direct request.
755 */
756 return spmd_smc_forward(smc_fid, secure_origin,
757 x1, x2, x3, x4, cookie,
758 handle, flags);
759 }
760
Daniel Boulby9460a232021-12-09 11:20:13 +0000761 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
762 uint64_t rc;
763
764 if (spmc_attrs.major_version == 1 &&
765 spmc_attrs.minor_version == 0) {
766 ret = MAKE_FFA_VERSION(spmc_attrs.major_version,
767 spmc_attrs.minor_version);
768 SMC_RET8(handle, (uint32_t)ret,
769 FFA_TARGET_INFO_MBZ,
770 FFA_TARGET_INFO_MBZ,
771 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
772 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
773 FFA_PARAM_MBZ);
774 break;
775 }
776 /* Save non-secure system registers context */
777 cm_el1_sysregs_context_save(NON_SECURE);
778#if SPMD_SPM_AT_SEL2
779 cm_el2_sysregs_context_save(NON_SECURE);
780#endif
781
782 /*
783 * The incoming request has FFA_VERSION as X0 smc_fid
784 * and requested version in x1. Prepare a direct request
785 * from SPMD to SPMC with FFA_VERSION framework function
786 * identifier in X2 and requested version in X3.
787 */
788 spmd_build_spmc_message(gpregs,
789 SPMD_FWK_MSG_FFA_VERSION_REQ,
790 input_version);
791
792 rc = spmd_spm_core_sync_entry(ctx);
793
794 if ((rc != 0ULL) ||
795 (SMC_GET_GP(gpregs, CTX_GPREG_X0) !=
796 FFA_MSG_SEND_DIRECT_RESP_SMC32) ||
797 (SMC_GET_GP(gpregs, CTX_GPREG_X2) !=
Marc Bonnici25f4b542022-04-12 17:18:13 +0100798 (FFA_FWK_MSG_BIT |
Daniel Boulby9460a232021-12-09 11:20:13 +0000799 SPMD_FWK_MSG_FFA_VERSION_RESP))) {
800 ERROR("Failed to forward FFA_VERSION\n");
801 ret = FFA_ERROR_NOT_SUPPORTED;
802 } else {
803 ret = SMC_GET_GP(gpregs, CTX_GPREG_X3);
804 }
805
806 /*
807 * Return here after SPMC has handled FFA_VERSION.
808 * The returned SPMC version is held in X3.
809 * Forward this version in X0 to the non-secure caller.
810 */
811 return spmd_smc_forward(ret, true, FFA_PARAM_MBZ,
812 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000813 FFA_PARAM_MBZ, cookie, gpregs,
814 flags);
J-Alves4c95c702020-05-26 14:03:05 +0100815 } else {
J-Alves64ff9932021-03-01 10:26:59 +0000816 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
817 FFA_VERSION_MINOR);
J-Alves4c95c702020-05-26 14:03:05 +0100818 }
819
J-Alves64ff9932021-03-01 10:26:59 +0000820 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ,
821 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
822 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100823 break; /* not reached */
824
J-Alves2672cde2020-05-07 18:42:25 +0100825 case FFA_FEATURES:
Achin Gupta86f23532019-10-11 15:41:16 +0100826 /*
827 * This is an optional interface. Do the minimal checks and
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200828 * forward to SPM Core which will handle it if implemented.
Achin Gupta86f23532019-10-11 15:41:16 +0100829 */
830
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200831 /* Forward SMC from Normal world to the SPM Core */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100832 if (!secure_origin) {
833 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000834 x1, x2, x3, x4, cookie,
835 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100836 }
Max Shvetsov745889c2020-02-27 14:54:21 +0000837
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200838 /*
839 * Return success if call was from secure world i.e. all
J-Alves2672cde2020-05-07 18:42:25 +0100840 * FFA functions are supported. This is essentially a
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200841 * nop.
842 */
J-Alves2672cde2020-05-07 18:42:25 +0100843 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200844 SMC_GET_GP(handle, CTX_GPREG_X5),
845 SMC_GET_GP(handle, CTX_GPREG_X6),
846 SMC_GET_GP(handle, CTX_GPREG_X7));
847
Achin Gupta86f23532019-10-11 15:41:16 +0100848 break; /* not reached */
849
J-Alves2672cde2020-05-07 18:42:25 +0100850 case FFA_ID_GET:
Max Shvetsove79062e2020-03-12 15:16:40 +0000851 /*
J-Alves2672cde2020-05-07 18:42:25 +0100852 * Returns the ID of the calling FFA component.
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200853 */
Max Shvetsove79062e2020-03-12 15:16:40 +0000854 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +0100855 SMC_RET8(handle, FFA_SUCCESS_SMC32,
856 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
857 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
858 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
859 FFA_PARAM_MBZ);
Max Shvetsove79062e2020-03-12 15:16:40 +0000860 }
861
J-Alves2672cde2020-05-07 18:42:25 +0100862 SMC_RET8(handle, FFA_SUCCESS_SMC32,
863 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
864 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
865 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
866 FFA_PARAM_MBZ);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200867
Max Shvetsove79062e2020-03-12 15:16:40 +0000868 break; /* not reached */
869
Olivier Deprezeae45962021-01-19 15:06:47 +0100870 case FFA_SECONDARY_EP_REGISTER_SMC64:
871 if (secure_origin) {
872 ret = spmd_pm_secondary_ep_register(x1);
873
874 if (ret < 0) {
875 SMC_RET8(handle, FFA_ERROR_SMC64,
876 FFA_TARGET_INFO_MBZ, ret,
877 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
878 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
879 FFA_PARAM_MBZ);
880 } else {
881 SMC_RET8(handle, FFA_SUCCESS_SMC64,
882 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ,
883 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
884 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
885 FFA_PARAM_MBZ);
886 }
887 }
888
889 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
890 break; /* Not reached */
891
Daniel Boulby27f35df2021-02-03 12:13:19 +0000892 case FFA_SPM_ID_GET:
893 if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) {
894 return spmd_ffa_error_return(handle,
895 FFA_ERROR_NOT_SUPPORTED);
896 }
897 /*
898 * Returns the ID of the SPMC or SPMD depending on the FF-A
899 * instance where this function is invoked
900 */
901 if (!secure_origin) {
902 SMC_RET8(handle, FFA_SUCCESS_SMC32,
903 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
904 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
905 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
906 FFA_PARAM_MBZ);
907 }
908 SMC_RET8(handle, FFA_SUCCESS_SMC32,
909 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID,
910 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
911 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
912 FFA_PARAM_MBZ);
913
914 break; /* not reached */
915
Olivier Deprez33e44122020-04-16 17:54:27 +0200916 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
Shruti3d859672022-06-09 11:03:11 +0100917 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
918 if (!secure_origin) {
919 /* Validate source endpoint is non-secure for non-secure caller. */
920 if (ffa_is_secure_world_id(ffa_endpoint_source(x1))) {
921 return spmd_ffa_error_return(handle,
922 FFA_ERROR_INVALID_PARAMETER);
923 }
924 }
Olivier Deprez33e44122020-04-16 17:54:27 +0200925 if (secure_origin && spmd_is_spmc_message(x1)) {
926 ret = spmd_handle_spmc_message(x3, x4,
927 SMC_GET_GP(handle, CTX_GPREG_X5),
928 SMC_GET_GP(handle, CTX_GPREG_X6),
929 SMC_GET_GP(handle, CTX_GPREG_X7));
930
931 SMC_RET8(handle, FFA_SUCCESS_SMC32,
932 FFA_TARGET_INFO_MBZ, ret,
933 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
934 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
935 FFA_PARAM_MBZ);
936 } else {
937 /* Forward direct message to the other world */
938 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000939 x1, x2, x3, x4, cookie,
940 handle, flags);
Olivier Deprez33e44122020-04-16 17:54:27 +0200941 }
942 break; /* Not reached */
943
944 case FFA_MSG_SEND_DIRECT_RESP_SMC32:
945 if (secure_origin && spmd_is_spmc_message(x1)) {
Olivier Depreza664c492020-08-05 11:27:42 +0200946 spmd_spm_core_sync_exit(0ULL);
Olivier Deprez33e44122020-04-16 17:54:27 +0200947 } else {
948 /* Forward direct message to the other world */
949 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000950 x1, x2, x3, x4, cookie,
951 handle, flags);
Olivier Deprez33e44122020-04-16 17:54:27 +0200952 }
953 break; /* Not reached */
954
J-Alves2672cde2020-05-07 18:42:25 +0100955 case FFA_RX_RELEASE:
956 case FFA_RXTX_MAP_SMC32:
957 case FFA_RXTX_MAP_SMC64:
958 case FFA_RXTX_UNMAP:
Ruari Phipps93dff702020-07-28 10:33:35 +0100959 case FFA_PARTITION_INFO_GET:
J-Alves2621cfd2021-03-11 17:46:47 +0000960#if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
961 case FFA_NOTIFICATION_BITMAP_CREATE:
962 case FFA_NOTIFICATION_BITMAP_DESTROY:
963 case FFA_NOTIFICATION_BIND:
964 case FFA_NOTIFICATION_UNBIND:
965 case FFA_NOTIFICATION_SET:
966 case FFA_NOTIFICATION_GET:
967 case FFA_NOTIFICATION_INFO_GET:
968 case FFA_NOTIFICATION_INFO_GET_SMC64:
Federico Recanatieecb4b02022-02-03 17:22:37 +0100969 case FFA_MSG_SEND2:
Federico Recanati5c7c5c42022-03-18 10:30:00 +0100970 case FFA_RX_ACQUIRE:
J-Alves2621cfd2021-03-11 17:46:47 +0000971#endif
Federico Recanatieecb4b02022-02-03 17:22:37 +0100972 case FFA_MSG_RUN:
Ruari Phipps93dff702020-07-28 10:33:35 +0100973 /*
Federico Recanatieecb4b02022-02-03 17:22:37 +0100974 * Above calls should be invoked only by the Normal world and
975 * must not be forwarded from Secure world to Normal world.
Ruari Phipps93dff702020-07-28 10:33:35 +0100976 */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100977 if (secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +0100978 return spmd_ffa_error_return(handle,
Ruari Phipps93dff702020-07-28 10:33:35 +0100979 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +0100980 }
981
Boyan Karatotev87266002022-11-18 14:17:17 +0000982 /* Forward the call to the other world */
983 /* fallthrough */
J-Alves2672cde2020-05-07 18:42:25 +0100984 case FFA_MSG_SEND:
J-Alves2672cde2020-05-07 18:42:25 +0100985 case FFA_MSG_SEND_DIRECT_RESP_SMC64:
986 case FFA_MEM_DONATE_SMC32:
987 case FFA_MEM_DONATE_SMC64:
988 case FFA_MEM_LEND_SMC32:
989 case FFA_MEM_LEND_SMC64:
990 case FFA_MEM_SHARE_SMC32:
991 case FFA_MEM_SHARE_SMC64:
992 case FFA_MEM_RETRIEVE_REQ_SMC32:
993 case FFA_MEM_RETRIEVE_REQ_SMC64:
994 case FFA_MEM_RETRIEVE_RESP:
995 case FFA_MEM_RELINQUISH:
996 case FFA_MEM_RECLAIM:
Marc Bonnici9fa01e92021-09-23 09:44:14 +0100997 case FFA_MEM_FRAG_TX:
998 case FFA_MEM_FRAG_RX:
J-Alves2672cde2020-05-07 18:42:25 +0100999 case FFA_SUCCESS_SMC32:
1000 case FFA_SUCCESS_SMC64:
Achin Gupta86f23532019-10-11 15:41:16 +01001001 /*
1002 * TODO: Assume that no requests originate from EL3 at the
1003 * moment. This will change if a SP service is required in
1004 * response to secure interrupts targeted to EL3. Until then
1005 * simply forward the call to the Normal world.
1006 */
1007
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001008 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001009 x1, x2, x3, x4, cookie,
1010 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +01001011 break; /* not reached */
1012
J-Alves2672cde2020-05-07 18:42:25 +01001013 case FFA_MSG_WAIT:
Achin Gupta86f23532019-10-11 15:41:16 +01001014 /*
1015 * Check if this is the first invocation of this interface on
1016 * this CPU from the Secure world. If so, then indicate that the
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001017 * SPM Core initialised successfully.
Achin Gupta86f23532019-10-11 15:41:16 +01001018 */
Olivier Deprez7c016332019-10-28 09:03:13 +00001019 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Olivier Depreza664c492020-08-05 11:27:42 +02001020 spmd_spm_core_sync_exit(0ULL);
Achin Gupta86f23532019-10-11 15:41:16 +01001021 }
1022
Boyan Karatotev87266002022-11-18 14:17:17 +00001023 /* Forward the call to the other world */
1024 /* fallthrough */
Olivier Deprezae18caf2021-04-02 11:09:10 +02001025 case FFA_INTERRUPT:
J-Alves2672cde2020-05-07 18:42:25 +01001026 case FFA_MSG_YIELD:
Achin Gupta86f23532019-10-11 15:41:16 +01001027 /* This interface must be invoked only by the Secure world */
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001028 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +01001029 return spmd_ffa_error_return(handle,
1030 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +01001031 }
1032
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001033 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001034 x1, x2, x3, x4, cookie,
1035 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +01001036 break; /* not reached */
1037
Olivier Depreza664c492020-08-05 11:27:42 +02001038 case FFA_NORMAL_WORLD_RESUME:
1039 if (secure_origin && ctx->secure_interrupt_ongoing) {
1040 spmd_spm_core_sync_exit(0ULL);
1041 } else {
1042 return spmd_ffa_error_return(handle, FFA_ERROR_DENIED);
1043 }
1044 break; /* Not reached */
1045
Achin Gupta86f23532019-10-11 15:41:16 +01001046 default:
1047 WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
J-Alves2672cde2020-05-07 18:42:25 +01001048 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +01001049 }
1050}