blob: 587e60f4f1d0894fced4fc4ddcf6dea5eaeba100 [file] [log] [blame]
Achin Gupta86f23532019-10-11 15:41:16 +01001/*
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +00002 * Copyright (c) 2020-2023, Arm Limited and Contributors. All rights reserved.
Achin Gupta86f23532019-10-11 15:41:16 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
Scott Brandene5dcf982020-08-25 13:49:32 -07009#include <inttypes.h>
10#include <stdint.h>
Achin Gupta86f23532019-10-11 15:41:16 +010011#include <string.h>
12
13#include <arch_helpers.h>
Olivier Deprez2bae35f2020-04-16 13:39:06 +020014#include <arch/aarch64/arch_features.h>
Achin Gupta86f23532019-10-11 15:41:16 +010015#include <bl31/bl31.h>
Olivier Depreza664c492020-08-05 11:27:42 +020016#include <bl31/interrupt_mgmt.h>
Achin Gupta86f23532019-10-11 15:41:16 +010017#include <common/debug.h>
18#include <common/runtime_svc.h>
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +000019#include <common/tbbr/tbbr_img_def.h>
Achin Gupta86f23532019-10-11 15:41:16 +010020#include <lib/el3_runtime/context_mgmt.h>
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +000021#include <lib/fconf/fconf.h>
22#include <lib/fconf/fconf_dyn_cfg_getter.h>
Achin Gupta86f23532019-10-11 15:41:16 +010023#include <lib/smccc.h>
24#include <lib/spinlock.h>
25#include <lib/utils.h>
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +000026#include <lib/xlat_tables/xlat_tables_v2.h>
Achin Gupta86f23532019-10-11 15:41:16 +010027#include <plat/common/common_def.h>
28#include <plat/common/platform.h>
29#include <platform_def.h>
J-Alves2672cde2020-05-07 18:42:25 +010030#include <services/ffa_svc.h>
Marc Bonnici1c33cc32021-11-29 17:57:03 +000031#include <services/spmc_svc.h>
Achin Gupta86f23532019-10-11 15:41:16 +010032#include <services/spmd_svc.h>
33#include <smccc_helpers.h>
34#include "spmd_private.h"
35
36/*******************************************************************************
37 * SPM Core context information.
38 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020039static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
Achin Gupta86f23532019-10-11 15:41:16 +010040
41/*******************************************************************************
Marc Bonnici1c33cc32021-11-29 17:57:03 +000042 * SPM Core attribute information is read from its manifest if the SPMC is not
43 * at EL3. Else, it is populated from the SPMC directly.
Achin Gupta86f23532019-10-11 15:41:16 +010044 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020045static spmc_manifest_attribute_t spmc_attrs;
Achin Gupta86f23532019-10-11 15:41:16 +010046
47/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000048 * SPM Core entry point information. Discovered on the primary core and reused
49 * on secondary cores.
50 ******************************************************************************/
51static entry_point_info_t *spmc_ep_info;
52
53/*******************************************************************************
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020054 * SPM Core context on CPU based on mpidr.
55 ******************************************************************************/
56spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr)
57{
Max Shvetsovf80c64d2020-08-25 11:50:18 +010058 int core_idx = plat_core_pos_by_mpidr(mpidr);
59
60 if (core_idx < 0) {
Scott Brandene5dcf982020-08-25 13:49:32 -070061 ERROR("Invalid mpidr: %" PRIx64 ", returned ID: %d\n", mpidr, core_idx);
Max Shvetsovf80c64d2020-08-25 11:50:18 +010062 panic();
63 }
64
65 return &spm_core_context[core_idx];
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020066}
67
68/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +020069 * SPM Core context on current CPU get helper.
70 ******************************************************************************/
71spmd_spm_core_context_t *spmd_get_context(void)
72{
Olivier Deprez73ef0dc2020-06-19 15:33:41 +020073 return spmd_get_context_by_mpidr(read_mpidr());
Olivier Deprez2bae35f2020-04-16 13:39:06 +020074}
75
76/*******************************************************************************
Olivier Deprezc7631a52020-03-23 09:53:06 +010077 * SPM Core ID getter.
78 ******************************************************************************/
79uint16_t spmd_spmc_id_get(void)
80{
81 return spmc_attrs.spmc_id;
82}
83
84/*******************************************************************************
Max Shvetsov745889c2020-02-27 14:54:21 +000085 * Static function declaration.
86 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +020087static int32_t spmd_init(void);
Olivier Deprez69ca84a2020-02-07 15:44:43 +010088static int spmd_spmc_init(void *pm_addr);
J-Alves2672cde2020-05-07 18:42:25 +010089static uint64_t spmd_ffa_error_return(void *handle,
Olivier Deprez2bae35f2020-04-16 13:39:06 +020090 int error_code);
91static uint64_t spmd_smc_forward(uint32_t smc_fid,
92 bool secure_origin,
93 uint64_t x1,
94 uint64_t x2,
95 uint64_t x3,
96 uint64_t x4,
Marc Bonnicida2c9e12021-11-29 18:02:45 +000097 void *cookie,
98 void *handle,
99 uint64_t flags);
Max Shvetsov745889c2020-02-27 14:54:21 +0000100
Daniel Boulby9460a232021-12-09 11:20:13 +0000101/******************************************************************************
102 * Builds an SPMD to SPMC direct message request.
103 *****************************************************************************/
104void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func,
105 unsigned long long message)
106{
107 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
108 write_ctx_reg(gpregs, CTX_GPREG_X1,
109 (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
110 spmd_spmc_id_get());
111 write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func);
112 write_ctx_reg(gpregs, CTX_GPREG_X3, message);
113}
114
115
Max Shvetsov745889c2020-02-27 14:54:21 +0000116/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200117 * This function takes an SPMC context pointer and performs a synchronous
118 * SPMC entry.
Achin Gupta86f23532019-10-11 15:41:16 +0100119 ******************************************************************************/
120uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
121{
122 uint64_t rc;
123
124 assert(spmc_ctx != NULL);
125
126 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
127
128 /* Restore the context assigned above */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000129#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000130 cm_el2_sysregs_context_restore(SECURE);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200131#else
132 cm_el1_sysregs_context_restore(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000133#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100134 cm_set_next_eret_context(SECURE);
135
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000136 /* Enter SPMC */
Achin Gupta86f23532019-10-11 15:41:16 +0100137 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
138
139 /* Save secure state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000140#if SPMD_SPM_AT_SEL2
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000141 cm_el2_sysregs_context_save(SECURE);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200142#else
143 cm_el1_sysregs_context_save(SECURE);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000144#endif
Achin Gupta86f23532019-10-11 15:41:16 +0100145
146 return rc;
147}
148
149/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200150 * This function returns to the place where spmd_spm_core_sync_entry() was
Achin Gupta86f23532019-10-11 15:41:16 +0100151 * called originally.
152 ******************************************************************************/
153__dead2 void spmd_spm_core_sync_exit(uint64_t rc)
154{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200155 spmd_spm_core_context_t *ctx = spmd_get_context();
Achin Gupta86f23532019-10-11 15:41:16 +0100156
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200157 /* Get current CPU context from SPMC context */
Achin Gupta86f23532019-10-11 15:41:16 +0100158 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
159
160 /*
161 * The SPMD must have initiated the original request through a
162 * synchronous entry into SPMC. Jump back to the original C runtime
163 * context with the value of rc in x0;
164 */
165 spmd_spm_core_exit(ctx->c_rt_ctx, rc);
166
167 panic();
168}
169
170/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200171 * Jump to the SPM Core for the first time.
Achin Gupta86f23532019-10-11 15:41:16 +0100172 ******************************************************************************/
173static int32_t spmd_init(void)
174{
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200175 spmd_spm_core_context_t *ctx = spmd_get_context();
176 uint64_t rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100177
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200178 VERBOSE("SPM Core init start.\n");
Olivier Deprez7c016332019-10-28 09:03:13 +0000179
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200180 /* Primary boot core enters the SPMC for initialization. */
181 ctx->state = SPMC_STATE_ON_PENDING;
Achin Gupta86f23532019-10-11 15:41:16 +0100182
183 rc = spmd_spm_core_sync_entry(ctx);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200184 if (rc != 0ULL) {
Scott Brandene5dcf982020-08-25 13:49:32 -0700185 ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200186 return 0;
Achin Gupta86f23532019-10-11 15:41:16 +0100187 }
188
Olivier Deprez7c016332019-10-28 09:03:13 +0000189 ctx->state = SPMC_STATE_ON;
190
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200191 VERBOSE("SPM Core init end.\n");
Achin Gupta86f23532019-10-11 15:41:16 +0100192
193 return 1;
194}
195
196/*******************************************************************************
Olivier Depreza664c492020-08-05 11:27:42 +0200197 * spmd_secure_interrupt_handler
198 * Enter the SPMC for further handling of the secure interrupt by the SPMC
199 * itself or a Secure Partition.
200 ******************************************************************************/
201static uint64_t spmd_secure_interrupt_handler(uint32_t id,
202 uint32_t flags,
203 void *handle,
204 void *cookie)
205{
206 spmd_spm_core_context_t *ctx = spmd_get_context();
207 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
208 unsigned int linear_id = plat_my_core_pos();
209 int64_t rc;
210
211 /* Sanity check the security state when the exception was generated */
212 assert(get_interrupt_src_ss(flags) == NON_SECURE);
213
214 /* Sanity check the pointer to this cpu's context */
215 assert(handle == cm_get_context(NON_SECURE));
216
217 /* Save the non-secure context before entering SPMC */
218 cm_el1_sysregs_context_save(NON_SECURE);
219#if SPMD_SPM_AT_SEL2
220 cm_el2_sysregs_context_save(NON_SECURE);
221#endif
222
223 /* Convey the event to the SPMC through the FFA_INTERRUPT interface. */
224 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT);
225 write_ctx_reg(gpregs, CTX_GPREG_X1, 0);
226 write_ctx_reg(gpregs, CTX_GPREG_X2, 0);
227 write_ctx_reg(gpregs, CTX_GPREG_X3, 0);
228 write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
229 write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
230 write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
231 write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
232
233 /* Mark current core as handling a secure interrupt. */
234 ctx->secure_interrupt_ongoing = true;
235
236 rc = spmd_spm_core_sync_entry(ctx);
237 if (rc != 0ULL) {
Olivier Deprezba100f22021-11-09 12:37:20 +0100238 ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, linear_id);
Olivier Depreza664c492020-08-05 11:27:42 +0200239 }
240
241 ctx->secure_interrupt_ongoing = false;
242
243 cm_el1_sysregs_context_restore(NON_SECURE);
244#if SPMD_SPM_AT_SEL2
245 cm_el2_sysregs_context_restore(NON_SECURE);
246#endif
247 cm_set_next_eret_context(NON_SECURE);
248
249 SMC_RET0(&ctx->cpu_ctx);
250}
251
Olivier Deprez35bbcf22023-06-08 18:23:26 +0200252#if (EL3_EXCEPTION_HANDLING == 0)
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600253/*******************************************************************************
254 * spmd_group0_interrupt_handler_nwd
255 * Group0 secure interrupt in the normal world are trapped to EL3. Delegate the
256 * handling of the interrupt to the platform handler, and return only upon
257 * successfully handling the Group0 interrupt.
258 ******************************************************************************/
259static uint64_t spmd_group0_interrupt_handler_nwd(uint32_t id,
260 uint32_t flags,
261 void *handle,
262 void *cookie)
263{
264 uint32_t intid;
265
266 /* Sanity check the security state when the exception was generated. */
267 assert(get_interrupt_src_ss(flags) == NON_SECURE);
268
269 /* Sanity check the pointer to this cpu's context. */
270 assert(handle == cm_get_context(NON_SECURE));
271
272 assert(id == INTR_ID_UNAVAILABLE);
273
274 assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3);
275
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -0500276 intid = plat_ic_acknowledge_interrupt();
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600277
278 if (plat_spmd_handle_group0_interrupt(intid) < 0) {
279 ERROR("Group0 interrupt %u not handled\n", intid);
280 panic();
281 }
282
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -0500283 /* Deactivate the corresponding Group0 interrupt. */
284 plat_ic_end_of_interrupt(intid);
285
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600286 return 0U;
287}
Olivier Deprez35bbcf22023-06-08 18:23:26 +0200288#endif
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600289
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -0600290/*******************************************************************************
291 * spmd_handle_group0_intr_swd
292 * SPMC delegates handling of Group0 secure interrupt to EL3 firmware using
293 * FFA_EL3_INTR_HANDLE SMC call. Further, SPMD delegates the handling of the
294 * interrupt to the platform handler, and returns only upon successfully
295 * handling the Group0 interrupt.
296 ******************************************************************************/
297static uint64_t spmd_handle_group0_intr_swd(void *handle)
298{
299 uint32_t intid;
300
301 /* Sanity check the pointer to this cpu's context */
302 assert(handle == cm_get_context(SECURE));
303
304 assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3);
305
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -0500306 intid = plat_ic_acknowledge_interrupt();
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -0600307
308 /*
309 * TODO: Currently due to a limitation in SPMD implementation, the
310 * platform handler is expected to not delegate handling to NWd while
311 * processing Group0 secure interrupt.
312 */
313 if (plat_spmd_handle_group0_interrupt(intid) < 0) {
314 /* Group0 interrupt was not handled by the platform. */
315 ERROR("Group0 interrupt %u not handled\n", intid);
316 panic();
317 }
318
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -0500319 /* Deactivate the corresponding Group0 interrupt. */
320 plat_ic_end_of_interrupt(intid);
321
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -0600322 /* Return success. */
323 SMC_RET8(handle, FFA_SUCCESS_SMC32, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
324 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
325 FFA_PARAM_MBZ);
326}
327
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +0000328#if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
329static int spmd_dynamic_map_mem(uintptr_t base_addr, size_t size,
330 unsigned int attr, uintptr_t *align_addr,
331 size_t *align_size)
332{
333 uintptr_t base_addr_align;
334 size_t mapped_size_align;
335 int rc;
336
337 /* Page aligned address and size if necessary */
338 base_addr_align = page_align(base_addr, DOWN);
339 mapped_size_align = page_align(size, UP);
340
341 if ((base_addr != base_addr_align) &&
342 (size == mapped_size_align)) {
343 mapped_size_align += PAGE_SIZE;
344 }
345
346 /*
347 * Map dynamically given region with its aligned base address and
348 * size
349 */
350 rc = mmap_add_dynamic_region((unsigned long long)base_addr_align,
351 base_addr_align,
352 mapped_size_align,
353 attr);
354 if (rc == 0) {
355 *align_addr = base_addr_align;
356 *align_size = mapped_size_align;
357 }
358
359 return rc;
360}
361
362static void spmd_do_sec_cpy(uintptr_t root_base_addr, uintptr_t sec_base_addr,
363 size_t size)
364{
365 uintptr_t root_base_addr_align, sec_base_addr_align;
366 size_t root_mapped_size_align, sec_mapped_size_align;
367 int rc;
368
369 assert(root_base_addr != 0UL);
370 assert(sec_base_addr != 0UL);
371 assert(size != 0UL);
372
373 /* Map the memory with required attributes */
374 rc = spmd_dynamic_map_mem(root_base_addr, size, MT_RO_DATA | MT_ROOT,
375 &root_base_addr_align,
376 &root_mapped_size_align);
377 if (rc != 0) {
378 ERROR("%s %s %lu (%d)\n", "Error while mapping", "root region",
379 root_base_addr, rc);
380 panic();
381 }
382
383 rc = spmd_dynamic_map_mem(sec_base_addr, size, MT_RW_DATA | MT_SECURE,
384 &sec_base_addr_align, &sec_mapped_size_align);
385 if (rc != 0) {
386 ERROR("%s %s %lu (%d)\n", "Error while mapping",
387 "secure region", sec_base_addr, rc);
388 panic();
389 }
390
391 /* Do copy operation */
392 (void)memcpy((void *)sec_base_addr, (void *)root_base_addr, size);
393
394 /* Unmap root memory region */
395 rc = mmap_remove_dynamic_region(root_base_addr_align,
396 root_mapped_size_align);
397 if (rc != 0) {
398 ERROR("%s %s %lu (%d)\n", "Error while unmapping",
399 "root region", root_base_addr_align, rc);
400 panic();
401 }
402
403 /* Unmap secure memory region */
404 rc = mmap_remove_dynamic_region(sec_base_addr_align,
405 sec_mapped_size_align);
406 if (rc != 0) {
407 ERROR("%s %s %lu (%d)\n", "Error while unmapping",
408 "secure region", sec_base_addr_align, rc);
409 panic();
410 }
411}
412#endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
413
Olivier Depreza664c492020-08-05 11:27:42 +0200414/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200415 * Loads SPMC manifest and inits SPMC.
Achin Gupta86f23532019-10-11 15:41:16 +0100416 ******************************************************************************/
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100417static int spmd_spmc_init(void *pm_addr)
Achin Gupta86f23532019-10-11 15:41:16 +0100418{
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200419 cpu_context_t *cpu_ctx;
420 unsigned int core_id;
Olivier Depreza664c492020-08-05 11:27:42 +0200421 uint32_t ep_attr, flags;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200422 int rc;
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +0000423 const struct dyn_cfg_dtb_info_t *image_info __unused;
Achin Gupta86f23532019-10-11 15:41:16 +0100424
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200425 /* Load the SPM Core manifest */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100426 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
Max Shvetsov745889c2020-02-27 14:54:21 +0000427 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200428 WARN("No or invalid SPM Core manifest image provided by BL2\n");
429 return rc;
Achin Gupta86f23532019-10-11 15:41:16 +0100430 }
431
432 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200433 * Ensure that the SPM Core version is compatible with the SPM
434 * Dispatcher version.
Achin Gupta86f23532019-10-11 15:41:16 +0100435 */
J-Alves2672cde2020-05-07 18:42:25 +0100436 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
437 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
438 WARN("Unsupported FFA version (%u.%u)\n",
Achin Gupta86f23532019-10-11 15:41:16 +0100439 spmc_attrs.major_version, spmc_attrs.minor_version);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200440 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100441 }
442
J-Alves2672cde2020-05-07 18:42:25 +0100443 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
Achin Gupta86f23532019-10-11 15:41:16 +0100444 spmc_attrs.minor_version);
445
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200446 VERBOSE("SPM Core run time EL%x.\n",
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000447 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
Achin Gupta86f23532019-10-11 15:41:16 +0100448
Max Shvetsove79062e2020-03-12 15:16:40 +0000449 /* Validate the SPMC ID, Ensure high bit is set */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200450 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
451 SPMC_SECURE_ID_MASK) == 0U) {
452 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
453 return -EINVAL;
Max Shvetsove79062e2020-03-12 15:16:40 +0000454 }
455
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200456 /* Validate the SPM Core execution state */
Achin Gupta86f23532019-10-11 15:41:16 +0100457 if ((spmc_attrs.exec_state != MODE_RW_64) &&
458 (spmc_attrs.exec_state != MODE_RW_32)) {
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100459 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
Achin Gupta86f23532019-10-11 15:41:16 +0100460 spmc_attrs.exec_state);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200461 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100462 }
463
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100464 VERBOSE("%s%x.\n", "SPM Core execution state 0x",
465 spmc_attrs.exec_state);
Achin Gupta86f23532019-10-11 15:41:16 +0100466
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000467#if SPMD_SPM_AT_SEL2
468 /* Ensure manifest has not requested AArch32 state in S-EL2 */
469 if (spmc_attrs.exec_state == MODE_RW_32) {
470 WARN("AArch32 state at S-EL2 is not supported.\n");
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200471 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100472 }
473
474 /*
475 * Check if S-EL2 is supported on this system if S-EL2
476 * is required for SPM
477 */
Andre Przywara6dd2d062023-02-22 16:53:50 +0000478 if (!is_feat_sel2_supported()) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200479 WARN("SPM Core run time S-EL2 is not supported.\n");
480 return -EINVAL;
Achin Gupta86f23532019-10-11 15:41:16 +0100481 }
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000482#endif /* SPMD_SPM_AT_SEL2 */
Achin Gupta86f23532019-10-11 15:41:16 +0100483
484 /* Initialise an entrypoint to set up the CPU context */
485 ep_attr = SECURE | EP_ST_ENABLE;
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200486 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
Achin Gupta86f23532019-10-11 15:41:16 +0100487 ep_attr |= EP_EE_BIG;
Max Shvetsov745889c2020-02-27 14:54:21 +0000488 }
489
Achin Gupta86f23532019-10-11 15:41:16 +0100490 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
Achin Gupta86f23532019-10-11 15:41:16 +0100491
492 /*
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200493 * Populate SPSR for SPM Core based upon validated parameters from the
494 * manifest.
Achin Gupta86f23532019-10-11 15:41:16 +0100495 */
496 if (spmc_attrs.exec_state == MODE_RW_32) {
497 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
498 SPSR_E_LITTLE,
499 DAIF_FIQ_BIT |
500 DAIF_IRQ_BIT |
501 DAIF_ABT_BIT);
502 } else {
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000503
504#if SPMD_SPM_AT_SEL2
505 static const uint32_t runtime_el = MODE_EL2;
506#else
507 static const uint32_t runtime_el = MODE_EL1;
508#endif
509 spmc_ep_info->spsr = SPSR_64(runtime_el,
Achin Gupta86f23532019-10-11 15:41:16 +0100510 MODE_SP_ELX,
511 DISABLE_ALL_EXCEPTIONS);
512 }
513
Manish V Badarkhe2f4279a2023-02-07 11:26:38 +0000514#if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
515 image_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TOS_FW_CONFIG_ID);
516 assert(image_info != NULL);
517
518 if ((image_info->config_addr == 0UL) ||
519 (image_info->secondary_config_addr == 0UL) ||
520 (image_info->config_max_size == 0UL)) {
521 return -EINVAL;
522 }
523
524 /* Copy manifest from root->secure region */
525 spmd_do_sec_cpy(image_info->config_addr,
526 image_info->secondary_config_addr,
527 image_info->config_max_size);
528
529 /* Update ep info of BL32 */
530 assert(spmc_ep_info != NULL);
531 spmc_ep_info->args.arg0 = image_info->secondary_config_addr;
532#endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
533
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200534 /* Set an initial SPMC context state for all cores. */
535 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
536 spm_core_context[core_id].state = SPMC_STATE_OFF;
Max Shvetsov745889c2020-02-27 14:54:21 +0000537
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200538 /* Setup an initial cpu context for the SPMC. */
539 cpu_ctx = &spm_core_context[core_id].cpu_ctx;
540 cm_setup_context(cpu_ctx, spmc_ep_info);
Achin Gupta86f23532019-10-11 15:41:16 +0100541
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200542 /*
543 * Pass the core linear ID to the SPMC through x4.
544 * (TF-A implementation defined behavior helping
545 * a legacy TOS migration to adopt FF-A).
546 */
547 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id);
548 }
Achin Gupta86f23532019-10-11 15:41:16 +0100549
Olivier Deprez9afca122019-10-28 09:15:52 +0000550 /* Register power management hooks with PSCI */
551 psci_register_spd_pm_hook(&spmd_pm);
552
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200553 /* Register init function for deferred init. */
Achin Gupta86f23532019-10-11 15:41:16 +0100554 bl31_register_bl32_init(&spmd_init);
555
Olivier Deprez4ab7a4a2021-06-21 09:47:13 +0200556 INFO("SPM Core setup done.\n");
557
Olivier Depreza664c492020-08-05 11:27:42 +0200558 /*
559 * Register an interrupt handler routing secure interrupts to SPMD
560 * while the NWd is running.
561 */
562 flags = 0;
563 set_interrupt_rm_flag(flags, NON_SECURE);
564 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
565 spmd_secure_interrupt_handler,
566 flags);
567 if (rc != 0) {
568 panic();
569 }
570
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600571 /*
Olivier Deprez35bbcf22023-06-08 18:23:26 +0200572 * Permit configurations where the SPM resides at S-EL1/2 and upon a
573 * Group0 interrupt triggering while the normal world runs, the
574 * interrupt is routed either through the EHF or directly to the SPMD:
575 *
576 * EL3_EXCEPTION_HANDLING=0: the Group0 interrupt is routed to the SPMD
577 * for handling by spmd_group0_interrupt_handler_nwd.
578 *
579 * EL3_EXCEPTION_HANDLING=1: the Group0 interrupt is routed to the EHF.
580 *
581 */
582#if (EL3_EXCEPTION_HANDLING == 0)
583 /*
Madhukar Pappireddyb494acf2023-03-02 15:34:05 -0600584 * Register an interrupt handler routing Group0 interrupts to SPMD
585 * while the NWd is running.
586 */
587 rc = register_interrupt_type_handler(INTR_TYPE_EL3,
588 spmd_group0_interrupt_handler_nwd,
589 flags);
590 if (rc != 0) {
591 panic();
592 }
Olivier Deprez35bbcf22023-06-08 18:23:26 +0200593#endif
594
Achin Gupta86f23532019-10-11 15:41:16 +0100595 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000596}
Achin Gupta86f23532019-10-11 15:41:16 +0100597
Max Shvetsov745889c2020-02-27 14:54:21 +0000598/*******************************************************************************
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200599 * Initialize context of SPM Core.
Max Shvetsov745889c2020-02-27 14:54:21 +0000600 ******************************************************************************/
601int spmd_setup(void)
602{
603 int rc;
Marc Bonnici1c33cc32021-11-29 17:57:03 +0000604 void *spmc_manifest;
605
606 /*
607 * If the SPMC is at EL3, then just initialise it directly. The
608 * shenanigans of when it is at a lower EL are not needed.
609 */
610 if (is_spmc_at_el3()) {
611 /* Allow the SPMC to populate its attributes directly. */
612 spmc_populate_attrs(&spmc_attrs);
613
614 rc = spmc_setup();
615 if (rc != 0) {
Olivier Deprez3d203f42022-11-16 16:46:23 +0100616 WARN("SPMC initialisation failed 0x%x.\n", rc);
Marc Bonnici1c33cc32021-11-29 17:57:03 +0000617 }
Olivier Deprez3d203f42022-11-16 16:46:23 +0100618 return 0;
Marc Bonnici1c33cc32021-11-29 17:57:03 +0000619 }
Achin Gupta86f23532019-10-11 15:41:16 +0100620
Max Shvetsov745889c2020-02-27 14:54:21 +0000621 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200622 if (spmc_ep_info == NULL) {
623 WARN("No SPM Core image provided by BL2 boot loader.\n");
Olivier Deprez3d203f42022-11-16 16:46:23 +0100624 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000625 }
626
627 /* Under no circumstances will this parameter be 0 */
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200628 assert(spmc_ep_info->pc != 0ULL);
Max Shvetsov745889c2020-02-27 14:54:21 +0000629
630 /*
631 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200632 * be used as a manifest for the SPM Core at the next lower EL/mode.
Max Shvetsov745889c2020-02-27 14:54:21 +0000633 */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100634 spmc_manifest = (void *)spmc_ep_info->args.arg0;
635 if (spmc_manifest == NULL) {
Olivier Deprez3d203f42022-11-16 16:46:23 +0100636 WARN("Invalid or absent SPM Core manifest.\n");
637 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000638 }
639
640 /* Load manifest, init SPMC */
Olivier Deprez69ca84a2020-02-07 15:44:43 +0100641 rc = spmd_spmc_init(spmc_manifest);
Max Shvetsov745889c2020-02-27 14:54:21 +0000642 if (rc != 0) {
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200643 WARN("Booting device without SPM initialization.\n");
Max Shvetsov745889c2020-02-27 14:54:21 +0000644 }
645
Olivier Deprez3d203f42022-11-16 16:46:23 +0100646 return 0;
Max Shvetsov745889c2020-02-27 14:54:21 +0000647}
648
649/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000650 * Forward FF-A SMCs to the other security state.
Max Shvetsov745889c2020-02-27 14:54:21 +0000651 ******************************************************************************/
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000652uint64_t spmd_smc_switch_state(uint32_t smc_fid,
653 bool secure_origin,
654 uint64_t x1,
655 uint64_t x2,
656 uint64_t x3,
657 uint64_t x4,
658 void *handle)
Max Shvetsov745889c2020-02-27 14:54:21 +0000659{
Olivier Deprezebc34772020-04-16 16:59:21 +0200660 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
661 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100662
Max Shvetsov745889c2020-02-27 14:54:21 +0000663 /* Save incoming security state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000664#if SPMD_SPM_AT_SEL2
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200665 if (secure_state_in == NON_SECURE) {
666 cm_el1_sysregs_context_save(secure_state_in);
667 }
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100668 cm_el2_sysregs_context_save(secure_state_in);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200669#else
670 cm_el1_sysregs_context_save(secure_state_in);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000671#endif
Max Shvetsov745889c2020-02-27 14:54:21 +0000672
673 /* Restore outgoing security state */
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000674#if SPMD_SPM_AT_SEL2
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200675 if (secure_state_out == NON_SECURE) {
676 cm_el1_sysregs_context_restore(secure_state_out);
677 }
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100678 cm_el2_sysregs_context_restore(secure_state_out);
Olivier Deprez9a2e5be2021-05-21 18:00:04 +0200679#else
680 cm_el1_sysregs_context_restore(secure_state_out);
Max Shvetsove7fd80e2020-02-25 13:55:00 +0000681#endif
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100682 cm_set_next_eret_context(secure_state_out);
Max Shvetsov745889c2020-02-27 14:54:21 +0000683
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -0800684#if SPMD_SPM_AT_SEL2
685 /*
686 * If SPMC is at SEL2, save additional registers x8-x17, which may
687 * be used in FF-A calls such as FFA_PARTITION_INFO_GET_REGS.
688 * Note that technically, all SPMCs can support this, but this code is
689 * under ifdef to minimize breakage in case other SPMCs do not save
690 * and restore x8-x17.
691 * We also need to pass through these registers since not all FF-A ABIs
692 * modify x8-x17, in which case, SMCCC requires that these registers be
693 * preserved, so the SPMD passes through these registers and expects the
694 * SPMC to save and restore (potentially also modify) them.
695 */
696 SMC_RET18(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
697 SMC_GET_GP(handle, CTX_GPREG_X5),
698 SMC_GET_GP(handle, CTX_GPREG_X6),
699 SMC_GET_GP(handle, CTX_GPREG_X7),
700 SMC_GET_GP(handle, CTX_GPREG_X8),
701 SMC_GET_GP(handle, CTX_GPREG_X9),
702 SMC_GET_GP(handle, CTX_GPREG_X10),
703 SMC_GET_GP(handle, CTX_GPREG_X11),
704 SMC_GET_GP(handle, CTX_GPREG_X12),
705 SMC_GET_GP(handle, CTX_GPREG_X13),
706 SMC_GET_GP(handle, CTX_GPREG_X14),
707 SMC_GET_GP(handle, CTX_GPREG_X15),
708 SMC_GET_GP(handle, CTX_GPREG_X16),
709 SMC_GET_GP(handle, CTX_GPREG_X17)
710 );
711
712#else
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100713 SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
Max Shvetsov745889c2020-02-27 14:54:21 +0000714 SMC_GET_GP(handle, CTX_GPREG_X5),
715 SMC_GET_GP(handle, CTX_GPREG_X6),
716 SMC_GET_GP(handle, CTX_GPREG_X7));
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -0800717#endif
Max Shvetsov745889c2020-02-27 14:54:21 +0000718}
719
720/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000721 * Forward SMCs to the other security state.
722 ******************************************************************************/
723static uint64_t spmd_smc_forward(uint32_t smc_fid,
724 bool secure_origin,
725 uint64_t x1,
726 uint64_t x2,
727 uint64_t x3,
728 uint64_t x4,
729 void *cookie,
730 void *handle,
731 uint64_t flags)
732{
733 if (is_spmc_at_el3() && !secure_origin) {
734 return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4,
735 cookie, handle, flags);
736 }
737 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4,
738 handle);
739
740}
741
742/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100743 * Return FFA_ERROR with specified error code
Max Shvetsov745889c2020-02-27 14:54:21 +0000744 ******************************************************************************/
J-Alves2672cde2020-05-07 18:42:25 +0100745static uint64_t spmd_ffa_error_return(void *handle, int error_code)
Max Shvetsov745889c2020-02-27 14:54:21 +0000746{
J-Alves64ff9932021-03-01 10:26:59 +0000747 SMC_RET8(handle, (uint32_t) FFA_ERROR,
748 FFA_TARGET_INFO_MBZ, (uint32_t)error_code,
J-Alves2672cde2020-05-07 18:42:25 +0100749 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
750 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100751}
752
Olivier Deprez33e44122020-04-16 17:54:27 +0200753/*******************************************************************************
754 * spmd_check_address_in_binary_image
755 ******************************************************************************/
756bool spmd_check_address_in_binary_image(uint64_t address)
757{
758 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
759
760 return ((address >= spmc_attrs.load_address) &&
761 (address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
762}
763
Olivier Deprezebc34772020-04-16 16:59:21 +0200764/******************************************************************************
765 * spmd_is_spmc_message
766 *****************************************************************************/
767static bool spmd_is_spmc_message(unsigned int ep)
768{
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000769 if (is_spmc_at_el3()) {
770 return false;
771 }
772
Olivier Deprezebc34772020-04-16 16:59:21 +0200773 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
774 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
775}
776
Olivier Deprez33e44122020-04-16 17:54:27 +0200777/******************************************************************************
778 * spmd_handle_spmc_message
779 *****************************************************************************/
Olivier Deprezc7631a52020-03-23 09:53:06 +0100780static int spmd_handle_spmc_message(unsigned long long msg,
781 unsigned long long parm1, unsigned long long parm2,
782 unsigned long long parm3, unsigned long long parm4)
Olivier Deprez33e44122020-04-16 17:54:27 +0200783{
784 VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__,
785 msg, parm1, parm2, parm3, parm4);
786
Olivier Deprez33e44122020-04-16 17:54:27 +0200787 return -EINVAL;
788}
789
Achin Gupta86f23532019-10-11 15:41:16 +0100790/*******************************************************************************
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000791 * This function forwards FF-A SMCs to either the main SPMD handler or the
792 * SPMC at EL3, depending on the origin security state, if enabled.
793 ******************************************************************************/
794uint64_t spmd_ffa_smc_handler(uint32_t smc_fid,
795 uint64_t x1,
796 uint64_t x2,
797 uint64_t x3,
798 uint64_t x4,
799 void *cookie,
800 void *handle,
801 uint64_t flags)
802{
803 if (is_spmc_at_el3()) {
804 /*
805 * If we have an SPMC at EL3 allow handling of the SMC first.
806 * The SPMC will call back through to SPMD handler if required.
807 */
808 if (is_caller_secure(flags)) {
809 return spmc_smc_handler(smc_fid,
810 is_caller_secure(flags),
811 x1, x2, x3, x4, cookie,
812 handle, flags);
813 }
814 }
815 return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
816 handle, flags);
817}
818
819/*******************************************************************************
J-Alves2672cde2020-05-07 18:42:25 +0100820 * This function handles all SMCs in the range reserved for FFA. Each call is
Achin Gupta86f23532019-10-11 15:41:16 +0100821 * either forwarded to the other security state or handled by the SPM dispatcher
822 ******************************************************************************/
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200823uint64_t spmd_smc_handler(uint32_t smc_fid,
824 uint64_t x1,
825 uint64_t x2,
826 uint64_t x3,
827 uint64_t x4,
828 void *cookie,
829 void *handle,
Achin Gupta86f23532019-10-11 15:41:16 +0100830 uint64_t flags)
831{
Olivier Deprezeae45962021-01-19 15:06:47 +0100832 unsigned int linear_id = plat_my_core_pos();
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200833 spmd_spm_core_context_t *ctx = spmd_get_context();
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100834 bool secure_origin;
835 int32_t ret;
J-Alves4c95c702020-05-26 14:03:05 +0100836 uint32_t input_version;
Achin Gupta86f23532019-10-11 15:41:16 +0100837
838 /* Determine which security state this SMC originated from */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100839 secure_origin = is_caller_secure(flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100840
Scott Brandene5dcf982020-08-25 13:49:32 -0700841 VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64
842 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n",
843 linear_id, smc_fid, x1, x2, x3, x4,
844 SMC_GET_GP(handle, CTX_GPREG_X5),
845 SMC_GET_GP(handle, CTX_GPREG_X6),
846 SMC_GET_GP(handle, CTX_GPREG_X7));
Achin Gupta86f23532019-10-11 15:41:16 +0100847
848 switch (smc_fid) {
J-Alves2672cde2020-05-07 18:42:25 +0100849 case FFA_ERROR:
Achin Gupta86f23532019-10-11 15:41:16 +0100850 /*
851 * Check if this is the first invocation of this interface on
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200852 * this CPU. If so, then indicate that the SPM Core initialised
Achin Gupta86f23532019-10-11 15:41:16 +0100853 * unsuccessfully.
854 */
Olivier Deprez7c016332019-10-28 09:03:13 +0000855 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Achin Gupta86f23532019-10-11 15:41:16 +0100856 spmd_spm_core_sync_exit(x2);
Max Shvetsov745889c2020-02-27 14:54:21 +0000857 }
Achin Gupta86f23532019-10-11 15:41:16 +0100858
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100859 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000860 x1, x2, x3, x4, cookie,
861 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100862 break; /* not reached */
863
J-Alves2672cde2020-05-07 18:42:25 +0100864 case FFA_VERSION:
J-Alves4c95c702020-05-26 14:03:05 +0100865 input_version = (uint32_t)(0xFFFFFFFF & x1);
Achin Gupta86f23532019-10-11 15:41:16 +0100866 /*
J-Alves4c95c702020-05-26 14:03:05 +0100867 * If caller is secure and SPMC was initialized,
868 * return FFA_VERSION of SPMD.
869 * If caller is non secure and SPMC was initialized,
Marc Bonnici815d1012021-12-08 14:27:40 +0000870 * forward to the EL3 SPMC if enabled, otherwise return
871 * the SPMC version if implemented at a lower EL.
J-Alves4c95c702020-05-26 14:03:05 +0100872 * Sanity check to "input_version".
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000873 * If the EL3 SPMC is enabled, ignore the SPMC state as
874 * this is not used.
Achin Gupta86f23532019-10-11 15:41:16 +0100875 */
J-Alves4c95c702020-05-26 14:03:05 +0100876 if ((input_version & FFA_VERSION_BIT31_MASK) ||
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000877 (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) {
J-Alves4c95c702020-05-26 14:03:05 +0100878 ret = FFA_ERROR_NOT_SUPPORTED;
879 } else if (!secure_origin) {
Marc Bonnici815d1012021-12-08 14:27:40 +0000880 if (is_spmc_at_el3()) {
881 /*
882 * Forward the call directly to the EL3 SPMC, if
883 * enabled, as we don't need to wrap the call in
884 * a direct request.
885 */
886 return spmd_smc_forward(smc_fid, secure_origin,
887 x1, x2, x3, x4, cookie,
888 handle, flags);
889 }
890
Daniel Boulby9460a232021-12-09 11:20:13 +0000891 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
892 uint64_t rc;
893
894 if (spmc_attrs.major_version == 1 &&
895 spmc_attrs.minor_version == 0) {
896 ret = MAKE_FFA_VERSION(spmc_attrs.major_version,
897 spmc_attrs.minor_version);
898 SMC_RET8(handle, (uint32_t)ret,
899 FFA_TARGET_INFO_MBZ,
900 FFA_TARGET_INFO_MBZ,
901 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
902 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
903 FFA_PARAM_MBZ);
904 break;
905 }
906 /* Save non-secure system registers context */
907 cm_el1_sysregs_context_save(NON_SECURE);
908#if SPMD_SPM_AT_SEL2
909 cm_el2_sysregs_context_save(NON_SECURE);
910#endif
911
912 /*
913 * The incoming request has FFA_VERSION as X0 smc_fid
914 * and requested version in x1. Prepare a direct request
915 * from SPMD to SPMC with FFA_VERSION framework function
916 * identifier in X2 and requested version in X3.
917 */
918 spmd_build_spmc_message(gpregs,
919 SPMD_FWK_MSG_FFA_VERSION_REQ,
920 input_version);
921
922 rc = spmd_spm_core_sync_entry(ctx);
923
924 if ((rc != 0ULL) ||
925 (SMC_GET_GP(gpregs, CTX_GPREG_X0) !=
926 FFA_MSG_SEND_DIRECT_RESP_SMC32) ||
927 (SMC_GET_GP(gpregs, CTX_GPREG_X2) !=
Marc Bonnici25f4b542022-04-12 17:18:13 +0100928 (FFA_FWK_MSG_BIT |
Daniel Boulby9460a232021-12-09 11:20:13 +0000929 SPMD_FWK_MSG_FFA_VERSION_RESP))) {
930 ERROR("Failed to forward FFA_VERSION\n");
931 ret = FFA_ERROR_NOT_SUPPORTED;
932 } else {
933 ret = SMC_GET_GP(gpregs, CTX_GPREG_X3);
934 }
935
936 /*
937 * Return here after SPMC has handled FFA_VERSION.
938 * The returned SPMC version is held in X3.
939 * Forward this version in X0 to the non-secure caller.
940 */
941 return spmd_smc_forward(ret, true, FFA_PARAM_MBZ,
942 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000943 FFA_PARAM_MBZ, cookie, gpregs,
944 flags);
J-Alves4c95c702020-05-26 14:03:05 +0100945 } else {
J-Alves64ff9932021-03-01 10:26:59 +0000946 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
947 FFA_VERSION_MINOR);
J-Alves4c95c702020-05-26 14:03:05 +0100948 }
949
J-Alves64ff9932021-03-01 10:26:59 +0000950 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ,
951 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
952 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
Achin Gupta86f23532019-10-11 15:41:16 +0100953 break; /* not reached */
954
J-Alves2672cde2020-05-07 18:42:25 +0100955 case FFA_FEATURES:
Achin Gupta86f23532019-10-11 15:41:16 +0100956 /*
957 * This is an optional interface. Do the minimal checks and
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200958 * forward to SPM Core which will handle it if implemented.
Achin Gupta86f23532019-10-11 15:41:16 +0100959 */
960
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200961 /* Forward SMC from Normal world to the SPM Core */
Olivier Deprez41ff36a2019-12-23 16:21:12 +0100962 if (!secure_origin) {
963 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +0000964 x1, x2, x3, x4, cookie,
965 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +0100966 }
Max Shvetsov745889c2020-02-27 14:54:21 +0000967
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200968 /*
969 * Return success if call was from secure world i.e. all
J-Alves2672cde2020-05-07 18:42:25 +0100970 * FFA functions are supported. This is essentially a
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200971 * nop.
972 */
J-Alves2672cde2020-05-07 18:42:25 +0100973 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200974 SMC_GET_GP(handle, CTX_GPREG_X5),
975 SMC_GET_GP(handle, CTX_GPREG_X6),
976 SMC_GET_GP(handle, CTX_GPREG_X7));
977
Achin Gupta86f23532019-10-11 15:41:16 +0100978 break; /* not reached */
979
J-Alves2672cde2020-05-07 18:42:25 +0100980 case FFA_ID_GET:
Max Shvetsove79062e2020-03-12 15:16:40 +0000981 /*
J-Alves2672cde2020-05-07 18:42:25 +0100982 * Returns the ID of the calling FFA component.
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200983 */
Max Shvetsove79062e2020-03-12 15:16:40 +0000984 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +0100985 SMC_RET8(handle, FFA_SUCCESS_SMC32,
986 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
987 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
988 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
989 FFA_PARAM_MBZ);
Max Shvetsove79062e2020-03-12 15:16:40 +0000990 }
991
J-Alves2672cde2020-05-07 18:42:25 +0100992 SMC_RET8(handle, FFA_SUCCESS_SMC32,
993 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
994 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
995 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
996 FFA_PARAM_MBZ);
Olivier Deprez2bae35f2020-04-16 13:39:06 +0200997
Max Shvetsove79062e2020-03-12 15:16:40 +0000998 break; /* not reached */
999
Olivier Deprezeae45962021-01-19 15:06:47 +01001000 case FFA_SECONDARY_EP_REGISTER_SMC64:
1001 if (secure_origin) {
1002 ret = spmd_pm_secondary_ep_register(x1);
1003
1004 if (ret < 0) {
1005 SMC_RET8(handle, FFA_ERROR_SMC64,
1006 FFA_TARGET_INFO_MBZ, ret,
1007 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1008 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1009 FFA_PARAM_MBZ);
1010 } else {
1011 SMC_RET8(handle, FFA_SUCCESS_SMC64,
1012 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ,
1013 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1014 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1015 FFA_PARAM_MBZ);
1016 }
1017 }
1018
1019 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1020 break; /* Not reached */
1021
Daniel Boulby27f35df2021-02-03 12:13:19 +00001022 case FFA_SPM_ID_GET:
1023 if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) {
1024 return spmd_ffa_error_return(handle,
1025 FFA_ERROR_NOT_SUPPORTED);
1026 }
1027 /*
1028 * Returns the ID of the SPMC or SPMD depending on the FF-A
1029 * instance where this function is invoked
1030 */
1031 if (!secure_origin) {
1032 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1033 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
1034 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1035 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1036 FFA_PARAM_MBZ);
1037 }
1038 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1039 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID,
1040 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1041 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1042 FFA_PARAM_MBZ);
1043
1044 break; /* not reached */
1045
Olivier Deprez33e44122020-04-16 17:54:27 +02001046 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
Shruti3d859672022-06-09 11:03:11 +01001047 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1048 if (!secure_origin) {
1049 /* Validate source endpoint is non-secure for non-secure caller. */
1050 if (ffa_is_secure_world_id(ffa_endpoint_source(x1))) {
1051 return spmd_ffa_error_return(handle,
1052 FFA_ERROR_INVALID_PARAMETER);
1053 }
1054 }
Olivier Deprez33e44122020-04-16 17:54:27 +02001055 if (secure_origin && spmd_is_spmc_message(x1)) {
1056 ret = spmd_handle_spmc_message(x3, x4,
1057 SMC_GET_GP(handle, CTX_GPREG_X5),
1058 SMC_GET_GP(handle, CTX_GPREG_X6),
1059 SMC_GET_GP(handle, CTX_GPREG_X7));
1060
1061 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1062 FFA_TARGET_INFO_MBZ, ret,
1063 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1064 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1065 FFA_PARAM_MBZ);
1066 } else {
1067 /* Forward direct message to the other world */
1068 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001069 x1, x2, x3, x4, cookie,
1070 handle, flags);
Olivier Deprez33e44122020-04-16 17:54:27 +02001071 }
1072 break; /* Not reached */
1073
1074 case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1075 if (secure_origin && spmd_is_spmc_message(x1)) {
Olivier Depreza664c492020-08-05 11:27:42 +02001076 spmd_spm_core_sync_exit(0ULL);
Olivier Deprez33e44122020-04-16 17:54:27 +02001077 } else {
1078 /* Forward direct message to the other world */
1079 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001080 x1, x2, x3, x4, cookie,
1081 handle, flags);
Olivier Deprez33e44122020-04-16 17:54:27 +02001082 }
1083 break; /* Not reached */
1084
J-Alves2672cde2020-05-07 18:42:25 +01001085 case FFA_RX_RELEASE:
1086 case FFA_RXTX_MAP_SMC32:
1087 case FFA_RXTX_MAP_SMC64:
1088 case FFA_RXTX_UNMAP:
Ruari Phipps93dff702020-07-28 10:33:35 +01001089 case FFA_PARTITION_INFO_GET:
J-Alves2621cfd2021-03-11 17:46:47 +00001090#if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
1091 case FFA_NOTIFICATION_BITMAP_CREATE:
1092 case FFA_NOTIFICATION_BITMAP_DESTROY:
1093 case FFA_NOTIFICATION_BIND:
1094 case FFA_NOTIFICATION_UNBIND:
1095 case FFA_NOTIFICATION_SET:
1096 case FFA_NOTIFICATION_GET:
1097 case FFA_NOTIFICATION_INFO_GET:
1098 case FFA_NOTIFICATION_INFO_GET_SMC64:
Federico Recanatieecb4b02022-02-03 17:22:37 +01001099 case FFA_MSG_SEND2:
Federico Recanati5c7c5c42022-03-18 10:30:00 +01001100 case FFA_RX_ACQUIRE:
J-Alves2621cfd2021-03-11 17:46:47 +00001101#endif
Federico Recanatieecb4b02022-02-03 17:22:37 +01001102 case FFA_MSG_RUN:
Ruari Phipps93dff702020-07-28 10:33:35 +01001103 /*
Federico Recanatieecb4b02022-02-03 17:22:37 +01001104 * Above calls should be invoked only by the Normal world and
1105 * must not be forwarded from Secure world to Normal world.
Ruari Phipps93dff702020-07-28 10:33:35 +01001106 */
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001107 if (secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +01001108 return spmd_ffa_error_return(handle,
Ruari Phipps93dff702020-07-28 10:33:35 +01001109 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +01001110 }
1111
Boyan Karatotev87266002022-11-18 14:17:17 +00001112 /* Forward the call to the other world */
1113 /* fallthrough */
J-Alves2672cde2020-05-07 18:42:25 +01001114 case FFA_MSG_SEND:
J-Alves2672cde2020-05-07 18:42:25 +01001115 case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1116 case FFA_MEM_DONATE_SMC32:
1117 case FFA_MEM_DONATE_SMC64:
1118 case FFA_MEM_LEND_SMC32:
1119 case FFA_MEM_LEND_SMC64:
1120 case FFA_MEM_SHARE_SMC32:
1121 case FFA_MEM_SHARE_SMC64:
1122 case FFA_MEM_RETRIEVE_REQ_SMC32:
1123 case FFA_MEM_RETRIEVE_REQ_SMC64:
1124 case FFA_MEM_RETRIEVE_RESP:
1125 case FFA_MEM_RELINQUISH:
1126 case FFA_MEM_RECLAIM:
Marc Bonnici9fa01e92021-09-23 09:44:14 +01001127 case FFA_MEM_FRAG_TX:
1128 case FFA_MEM_FRAG_RX:
J-Alves2672cde2020-05-07 18:42:25 +01001129 case FFA_SUCCESS_SMC32:
1130 case FFA_SUCCESS_SMC64:
Achin Gupta86f23532019-10-11 15:41:16 +01001131 /*
1132 * TODO: Assume that no requests originate from EL3 at the
1133 * moment. This will change if a SP service is required in
1134 * response to secure interrupts targeted to EL3. Until then
1135 * simply forward the call to the Normal world.
1136 */
1137
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001138 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001139 x1, x2, x3, x4, cookie,
1140 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +01001141 break; /* not reached */
1142
J-Alves2672cde2020-05-07 18:42:25 +01001143 case FFA_MSG_WAIT:
Achin Gupta86f23532019-10-11 15:41:16 +01001144 /*
1145 * Check if this is the first invocation of this interface on
1146 * this CPU from the Secure world. If so, then indicate that the
Olivier Deprez2bae35f2020-04-16 13:39:06 +02001147 * SPM Core initialised successfully.
Achin Gupta86f23532019-10-11 15:41:16 +01001148 */
Olivier Deprez7c016332019-10-28 09:03:13 +00001149 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
Olivier Depreza664c492020-08-05 11:27:42 +02001150 spmd_spm_core_sync_exit(0ULL);
Achin Gupta86f23532019-10-11 15:41:16 +01001151 }
1152
Boyan Karatotev87266002022-11-18 14:17:17 +00001153 /* Forward the call to the other world */
1154 /* fallthrough */
Olivier Deprezae18caf2021-04-02 11:09:10 +02001155 case FFA_INTERRUPT:
J-Alves2672cde2020-05-07 18:42:25 +01001156 case FFA_MSG_YIELD:
Achin Gupta86f23532019-10-11 15:41:16 +01001157 /* This interface must be invoked only by the Secure world */
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001158 if (!secure_origin) {
J-Alves2672cde2020-05-07 18:42:25 +01001159 return spmd_ffa_error_return(handle,
1160 FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +01001161 }
1162
Olivier Deprez41ff36a2019-12-23 16:21:12 +01001163 return spmd_smc_forward(smc_fid, secure_origin,
Marc Bonnicida2c9e12021-11-29 18:02:45 +00001164 x1, x2, x3, x4, cookie,
1165 handle, flags);
Achin Gupta86f23532019-10-11 15:41:16 +01001166 break; /* not reached */
1167
Olivier Depreza664c492020-08-05 11:27:42 +02001168 case FFA_NORMAL_WORLD_RESUME:
1169 if (secure_origin && ctx->secure_interrupt_ongoing) {
1170 spmd_spm_core_sync_exit(0ULL);
1171 } else {
1172 return spmd_ffa_error_return(handle, FFA_ERROR_DENIED);
1173 }
1174 break; /* Not reached */
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -08001175#if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
1176 case FFA_PARTITION_INFO_GET_REGS_SMC64:
1177 if (secure_origin) {
1178 /* TODO: Future patches to enable support for this */
1179 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1180 }
Olivier Depreza664c492020-08-05 11:27:42 +02001181
Raghu Krishnamurthy435f11c2022-12-25 13:02:00 -08001182 /* Call only supported with SMCCC 1.2+ */
1183 if (MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION) < 0x10002) {
1184 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1185 }
1186
1187 return spmd_smc_forward(smc_fid, secure_origin,
1188 x1, x2, x3, x4, cookie,
1189 handle, flags);
1190 break; /* Not reached */
1191#endif
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -06001192 case FFA_EL3_INTR_HANDLE:
1193 if (secure_origin) {
1194 return spmd_handle_group0_intr_swd(handle);
1195 } else {
Madhukar Pappireddy2ca75702023-07-12 16:28:05 -05001196 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
Madhukar Pappireddy41416cc2023-03-02 16:04:38 -06001197 }
Achin Gupta86f23532019-10-11 15:41:16 +01001198 default:
1199 WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
J-Alves2672cde2020-05-07 18:42:25 +01001200 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
Achin Gupta86f23532019-10-11 15:41:16 +01001201 }
1202}