blob: 342f55c0f228a25f5411d6e69047e053dd231b02 [file] [log] [blame]
Marc Bonnici8e1a7552021-12-01 17:57:04 +00001/*
2 * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
9
10#include <arch_helpers.h>
11#include <bl31/bl31.h>
12#include <bl31/ehf.h>
Achin Gupta2ec5dbe2021-10-04 20:17:45 +010013#include <bl31/interrupt_mgmt.h>
Marc Bonnici8e1a7552021-12-01 17:57:04 +000014#include <common/debug.h>
15#include <common/fdt_wrappers.h>
16#include <common/runtime_svc.h>
Marc Bonnici37dd8e12021-08-17 18:00:07 +010017#include <common/uuid.h>
Marc Bonnici8e1a7552021-12-01 17:57:04 +000018#include <lib/el3_runtime/context_mgmt.h>
19#include <lib/smccc.h>
20#include <lib/utils.h>
21#include <lib/xlat_tables/xlat_tables_v2.h>
22#include <libfdt.h>
23#include <plat/common/platform.h>
Marc Bonnici9a297042022-02-14 17:06:09 +000024#include <services/el3_spmc_logical_sp.h>
Marc Bonnici8e1a7552021-12-01 17:57:04 +000025#include <services/ffa_svc.h>
26#include <services/spmc_svc.h>
27#include <services/spmd_svc.h>
28#include "spmc.h"
29
30#include <platform_def.h>
31
Marc Bonnici37dd8e12021-08-17 18:00:07 +010032/* Declare the maximum number of SPs and El3 LPs. */
33#define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT
34
Marc Bonnici8e1a7552021-12-01 17:57:04 +000035/*
36 * Allocate a secure partition descriptor to describe each SP in the system that
37 * does not reside at EL3.
38 */
39static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT];
40
41/*
42 * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
43 * the system that interacts with a SP. It is used to track the Hypervisor
44 * buffer pair, version and ID for now. It could be extended to track VM
45 * properties when the SPMC supports indirect messaging.
46 */
47static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
48
Achin Gupta2ec5dbe2021-10-04 20:17:45 +010049static uint64_t spmc_sp_interrupt_handler(uint32_t id,
50 uint32_t flags,
51 void *handle,
52 void *cookie);
53
Marc Bonnici8e1a7552021-12-01 17:57:04 +000054/*
Marc Bonnici9a297042022-02-14 17:06:09 +000055 * Helper function to obtain the array storing the EL3
56 * Logical Partition descriptors.
57 */
58struct el3_lp_desc *get_el3_lp_array(void)
59{
60 return (struct el3_lp_desc *) EL3_LP_DESCS_START;
61}
62
63/*
Marc Bonnici8e1a7552021-12-01 17:57:04 +000064 * Helper function to obtain the descriptor of the last SP to whom control was
65 * handed to on this physical cpu. Currently, we assume there is only one SP.
66 * TODO: Expand to track multiple partitions when required.
67 */
68struct secure_partition_desc *spmc_get_current_sp_ctx(void)
69{
70 return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
71}
72
73/*
74 * Helper function to obtain the execution context of an SP on the
75 * current physical cpu.
76 */
77struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp)
78{
79 return &(sp->ec[get_ec_index(sp)]);
80}
81
82/* Helper function to get pointer to SP context from its ID. */
83struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
84{
Marc Bonnicie95eb7c2021-12-08 14:24:03 +000085 /* Check for Secure World Partitions. */
Marc Bonnici8e1a7552021-12-01 17:57:04 +000086 for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
87 if (sp_desc[i].sp_id == id) {
88 return &(sp_desc[i]);
89 }
90 }
91 return NULL;
92}
93
Marc Bonnici52a9cbc2021-11-24 10:32:16 +000094/*
95 * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
96 * We assume that the first descriptor is reserved for this entity.
97 */
98struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
99{
100 return &(ns_ep_desc[0]);
101}
102
Marc Bonnicia2cfa612021-11-24 10:33:48 +0000103/*
104 * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
105 * or OS kernel in the normal world or the last SP that was run.
106 */
107struct mailbox *spmc_get_mbox_desc(bool secure_origin)
108{
109 /* Obtain the RX/TX buffer pair descriptor. */
110 if (secure_origin) {
111 return &(spmc_get_current_sp_ctx()->mailbox);
112 } else {
113 return &(spmc_get_hyp_ctx()->mailbox);
114 }
115}
116
Marc Bonnici8e1a7552021-12-01 17:57:04 +0000117/******************************************************************************
118 * This function returns to the place where spmc_sp_synchronous_entry() was
119 * called originally.
120 ******************************************************************************/
121__dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc)
122{
123 /*
124 * The SPM must have initiated the original request through a
125 * synchronous entry into the secure partition. Jump back to the
126 * original C runtime context with the value of rc in x0;
127 */
128 spm_secure_partition_exit(ec->c_rt_ctx, rc);
129
130 panic();
131}
132
133/*******************************************************************************
134 * Return FFA_ERROR with specified error code.
135 ******************************************************************************/
136uint64_t spmc_ffa_error_return(void *handle, int error_code)
137{
138 SMC_RET8(handle, FFA_ERROR,
139 FFA_TARGET_INFO_MBZ, error_code,
140 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
141 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
142}
143
144/******************************************************************************
145 * Helper function to validate a secure partition ID to ensure it does not
146 * conflict with any other FF-A component and follows the convention to
147 * indicate it resides within the secure world.
148 ******************************************************************************/
149bool is_ffa_secure_id_valid(uint16_t partition_id)
150{
Marc Bonnici9a297042022-02-14 17:06:09 +0000151 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
152
Marc Bonnici8e1a7552021-12-01 17:57:04 +0000153 /* Ensure the ID is not the invalid partition ID. */
154 if (partition_id == INV_SP_ID) {
155 return false;
156 }
157
158 /* Ensure the ID is not the SPMD ID. */
159 if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) {
160 return false;
161 }
162
163 /*
164 * Ensure the ID follows the convention to indicate it resides
165 * in the secure world.
166 */
167 if (!ffa_is_secure_world_id(partition_id)) {
168 return false;
169 }
170
171 /* Ensure we don't conflict with the SPMC partition ID. */
172 if (partition_id == FFA_SPMC_ID) {
173 return false;
174 }
175
176 /* Ensure we do not already have an SP context with this ID. */
177 if (spmc_get_sp_ctx(partition_id)) {
178 return false;
179 }
180
Marc Bonnici9a297042022-02-14 17:06:09 +0000181 /* Ensure we don't clash with any Logical SP's. */
182 for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
183 if (el3_lp_descs[i].sp_id == partition_id) {
184 return false;
185 }
186 }
187
Marc Bonnici8e1a7552021-12-01 17:57:04 +0000188 return true;
189}
190
191/*******************************************************************************
Marc Bonnici7e19db82021-11-29 17:17:29 +0000192 * This function either forwards the request to the other world or returns
193 * with an ERET depending on the source of the call.
Marc Bonnici9a297042022-02-14 17:06:09 +0000194 * We can assume that the destination is for an entity at a lower exception
195 * level as any messages destined for a logical SP resident in EL3 will have
196 * already been taken care of by the SPMC before entering this function.
Marc Bonnici7e19db82021-11-29 17:17:29 +0000197 ******************************************************************************/
198static uint64_t spmc_smc_return(uint32_t smc_fid,
199 bool secure_origin,
200 uint64_t x1,
201 uint64_t x2,
202 uint64_t x3,
203 uint64_t x4,
204 void *handle,
205 void *cookie,
206 uint64_t flags,
207 uint16_t dst_id)
208{
209 /* If the destination is in the normal world always go via the SPMD. */
210 if (ffa_is_normal_world_id(dst_id)) {
211 return spmd_smc_handler(smc_fid, x1, x2, x3, x4,
212 cookie, handle, flags);
213 }
214 /*
215 * If the caller is secure and we want to return to the secure world,
216 * ERET directly.
217 */
218 else if (secure_origin && ffa_is_secure_world_id(dst_id)) {
219 SMC_RET5(handle, smc_fid, x1, x2, x3, x4);
220 }
221 /* If we originated in the normal world then switch contexts. */
222 else if (!secure_origin && ffa_is_secure_world_id(dst_id)) {
223 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2,
224 x3, x4, handle);
225 } else {
226 /* Unknown State. */
227 panic();
228 }
229
230 /* Shouldn't be Reached. */
231 return 0;
232}
233
234/*******************************************************************************
Marc Bonnici8eb15202021-11-29 17:05:33 +0000235 * FF-A ABI Handlers.
236 ******************************************************************************/
Marc Bonnici5eeacd52021-11-29 17:05:57 +0000237
238/*******************************************************************************
239 * Helper function to validate arg2 as part of a direct message.
240 ******************************************************************************/
241static inline bool direct_msg_validate_arg2(uint64_t x2)
242{
Marc Bonnici25f4b542022-04-12 17:18:13 +0100243 /* Check message type. */
244 if (x2 & FFA_FWK_MSG_BIT) {
245 /* We have a framework message, ensure it is a known message. */
246 if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) {
247 VERBOSE("Invalid message format 0x%lx.\n", x2);
248 return false;
249 }
250 } else {
251 /* We have a partition messages, ensure x2 is not set. */
252 if (x2 != (uint64_t) 0) {
253 VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n",
254 x2);
255 return false;
256 }
Marc Bonnici5eeacd52021-11-29 17:05:57 +0000257 }
258 return true;
259}
260
261/*******************************************************************************
262 * Handle direct request messages and route to the appropriate destination.
263 ******************************************************************************/
264static uint64_t direct_req_smc_handler(uint32_t smc_fid,
265 bool secure_origin,
266 uint64_t x1,
267 uint64_t x2,
268 uint64_t x3,
269 uint64_t x4,
270 void *cookie,
271 void *handle,
272 uint64_t flags)
273{
274 uint16_t dst_id = ffa_endpoint_destination(x1);
Marc Bonnici9a297042022-02-14 17:06:09 +0000275 struct el3_lp_desc *el3_lp_descs;
Marc Bonnici5eeacd52021-11-29 17:05:57 +0000276 struct secure_partition_desc *sp;
277 unsigned int idx;
278
279 /* Check if arg2 has been populated correctly based on message type. */
280 if (!direct_msg_validate_arg2(x2)) {
281 return spmc_ffa_error_return(handle,
282 FFA_ERROR_INVALID_PARAMETER);
283 }
284
Marc Bonnici9a297042022-02-14 17:06:09 +0000285 el3_lp_descs = get_el3_lp_array();
286
287 /* Check if the request is destined for a Logical Partition. */
288 for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) {
289 if (el3_lp_descs[i].sp_id == dst_id) {
290 return el3_lp_descs[i].direct_req(
291 smc_fid, secure_origin, x1, x2, x3, x4,
292 cookie, handle, flags);
293 }
294 }
295
Marc Bonnici5eeacd52021-11-29 17:05:57 +0000296 /*
Marc Bonnici9a297042022-02-14 17:06:09 +0000297 * If the request was not targeted to a LSP and from the secure world
298 * then it is invalid since a SP cannot call into the Normal world and
299 * there is no other SP to call into. If there are other SPs in future
300 * then the partition runtime model would need to be validated as well.
Marc Bonnici5eeacd52021-11-29 17:05:57 +0000301 */
302 if (secure_origin) {
303 VERBOSE("Direct request not supported to the Normal World.\n");
304 return spmc_ffa_error_return(handle,
305 FFA_ERROR_INVALID_PARAMETER);
306 }
307
308 /* Check if the SP ID is valid. */
309 sp = spmc_get_sp_ctx(dst_id);
310 if (sp == NULL) {
311 VERBOSE("Direct request to unknown partition ID (0x%x).\n",
312 dst_id);
313 return spmc_ffa_error_return(handle,
314 FFA_ERROR_INVALID_PARAMETER);
315 }
316
317 /*
318 * Check that the target execution context is in a waiting state before
319 * forwarding the direct request to it.
320 */
321 idx = get_ec_index(sp);
322 if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
323 VERBOSE("SP context on core%u is not waiting (%u).\n",
324 idx, sp->ec[idx].rt_model);
325 return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
326 }
327
328 /*
329 * Everything checks out so forward the request to the SP after updating
330 * its state and runtime model.
331 */
332 sp->ec[idx].rt_state = RT_STATE_RUNNING;
333 sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
334 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
335 handle, cookie, flags, dst_id);
336}
337
338/*******************************************************************************
339 * Handle direct response messages and route to the appropriate destination.
340 ******************************************************************************/
341static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
342 bool secure_origin,
343 uint64_t x1,
344 uint64_t x2,
345 uint64_t x3,
346 uint64_t x4,
347 void *cookie,
348 void *handle,
349 uint64_t flags)
350{
351 uint16_t dst_id = ffa_endpoint_destination(x1);
352 struct secure_partition_desc *sp;
353 unsigned int idx;
354
355 /* Check if arg2 has been populated correctly based on message type. */
356 if (!direct_msg_validate_arg2(x2)) {
357 return spmc_ffa_error_return(handle,
358 FFA_ERROR_INVALID_PARAMETER);
359 }
360
361 /* Check that the response did not originate from the Normal world. */
362 if (!secure_origin) {
363 VERBOSE("Direct Response not supported from Normal World.\n");
364 return spmc_ffa_error_return(handle,
365 FFA_ERROR_INVALID_PARAMETER);
366 }
367
368 /*
369 * Check that the response is either targeted to the Normal world or the
370 * SPMC e.g. a PM response.
371 */
372 if ((dst_id != FFA_SPMC_ID) && ffa_is_secure_world_id(dst_id)) {
373 VERBOSE("Direct response to invalid partition ID (0x%x).\n",
374 dst_id);
375 return spmc_ffa_error_return(handle,
376 FFA_ERROR_INVALID_PARAMETER);
377 }
378
379 /* Obtain the SP descriptor and update its runtime state. */
380 sp = spmc_get_sp_ctx(ffa_endpoint_source(x1));
381 if (sp == NULL) {
382 VERBOSE("Direct response to unknown partition ID (0x%x).\n",
383 dst_id);
384 return spmc_ffa_error_return(handle,
385 FFA_ERROR_INVALID_PARAMETER);
386 }
387
388 /* Sanity check state is being tracked correctly in the SPMC. */
389 idx = get_ec_index(sp);
390 assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
391
392 /* Ensure SP execution context was in the right runtime model. */
393 if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
394 VERBOSE("SP context on core%u not handling direct req (%u).\n",
395 idx, sp->ec[idx].rt_model);
396 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
397 }
398
399 /* Update the state of the SP execution context. */
400 sp->ec[idx].rt_state = RT_STATE_WAITING;
401
402 /*
403 * If the receiver is not the SPMC then forward the response to the
404 * Normal world.
405 */
406 if (dst_id == FFA_SPMC_ID) {
407 spmc_sp_synchronous_exit(&sp->ec[idx], x4);
408 /* Should not get here. */
409 panic();
410 }
411
412 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
413 handle, cookie, flags, dst_id);
414}
415
Marc Bonnici8eb15202021-11-29 17:05:33 +0000416/*******************************************************************************
417 * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its
418 * cycles.
419 ******************************************************************************/
420static uint64_t msg_wait_handler(uint32_t smc_fid,
421 bool secure_origin,
422 uint64_t x1,
423 uint64_t x2,
424 uint64_t x3,
425 uint64_t x4,
426 void *cookie,
427 void *handle,
428 uint64_t flags)
429{
430 struct secure_partition_desc *sp;
431 unsigned int idx;
432
433 /*
434 * Check that the response did not originate from the Normal world as
435 * only the secure world can call this ABI.
436 */
437 if (!secure_origin) {
438 VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n");
439 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
440 }
441
442 /* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */
443 sp = spmc_get_current_sp_ctx();
444 if (sp == NULL) {
445 return spmc_ffa_error_return(handle,
446 FFA_ERROR_INVALID_PARAMETER);
447 }
448
449 /*
450 * Get the execution context of the SP that invoked FFA_MSG_WAIT.
451 */
452 idx = get_ec_index(sp);
453
454 /* Ensure SP execution context was in the right runtime model. */
455 if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
456 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
457 }
458
459 /* Sanity check the state is being tracked correctly in the SPMC. */
460 assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
461
462 /*
463 * Perform a synchronous exit if the partition was initialising. The
464 * state is updated after the exit.
465 */
466 if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
467 spmc_sp_synchronous_exit(&sp->ec[idx], x4);
468 /* Should not get here */
469 panic();
470 }
471
472 /* Update the state of the SP execution context. */
473 sp->ec[idx].rt_state = RT_STATE_WAITING;
474
475 /* Resume normal world if a secure interrupt was handled. */
476 if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
477 /* FFA_MSG_WAIT can only be called from the secure world. */
478 unsigned int secure_state_in = SECURE;
479 unsigned int secure_state_out = NON_SECURE;
480
481 cm_el1_sysregs_context_save(secure_state_in);
482 cm_el1_sysregs_context_restore(secure_state_out);
483 cm_set_next_eret_context(secure_state_out);
484 SMC_RET0(cm_get_context(secure_state_out));
485 }
486
487 /* Forward the response to the Normal world. */
488 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
489 handle, cookie, flags, FFA_NWD_ID);
490}
491
Marc Bonnicib4e99842021-12-10 09:21:56 +0000492static uint64_t ffa_error_handler(uint32_t smc_fid,
493 bool secure_origin,
494 uint64_t x1,
495 uint64_t x2,
496 uint64_t x3,
497 uint64_t x4,
498 void *cookie,
499 void *handle,
500 uint64_t flags)
501{
502 struct secure_partition_desc *sp;
503 unsigned int idx;
504
505 /* Check that the response did not originate from the Normal world. */
506 if (!secure_origin) {
507 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
508 }
509
510 /* Get the descriptor of the SP that invoked FFA_ERROR. */
511 sp = spmc_get_current_sp_ctx();
512 if (sp == NULL) {
513 return spmc_ffa_error_return(handle,
514 FFA_ERROR_INVALID_PARAMETER);
515 }
516
517 /* Get the execution context of the SP that invoked FFA_ERROR. */
518 idx = get_ec_index(sp);
519
520 /*
521 * We only expect FFA_ERROR to be received during SP initialisation
522 * otherwise this is an invalid call.
523 */
524 if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
525 ERROR("SP 0x%x failed to initialize.\n", sp->sp_id);
526 spmc_sp_synchronous_exit(&sp->ec[idx], x2);
527 /* Should not get here. */
528 panic();
529 }
530
531 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
532}
533
Marc Bonnicie95eb7c2021-12-08 14:24:03 +0000534static uint64_t ffa_version_handler(uint32_t smc_fid,
535 bool secure_origin,
536 uint64_t x1,
537 uint64_t x2,
538 uint64_t x3,
539 uint64_t x4,
540 void *cookie,
541 void *handle,
542 uint64_t flags)
543{
544 uint32_t requested_version = x1 & FFA_VERSION_MASK;
545
546 if (requested_version & FFA_VERSION_BIT31_MASK) {
547 /* Invalid encoding, return an error. */
548 SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
549 /* Execution stops here. */
550 }
551
552 /* Determine the caller to store the requested version. */
553 if (secure_origin) {
554 /*
555 * Ensure that the SP is reporting the same version as
556 * specified in its manifest. If these do not match there is
557 * something wrong with the SP.
558 * TODO: Should we abort the SP? For now assert this is not
559 * case.
560 */
561 assert(requested_version ==
562 spmc_get_current_sp_ctx()->ffa_version);
563 } else {
564 /*
565 * If this is called by the normal world, record this
566 * information in its descriptor.
567 */
568 spmc_get_hyp_ctx()->ffa_version = requested_version;
569 }
570
571 SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
572 FFA_VERSION_MINOR));
573}
574
Marc Bonnici8eb15202021-11-29 17:05:33 +0000575/*******************************************************************************
Marc Bonnici73fbe8f2021-12-09 11:32:30 +0000576 * Helper function to obtain the FF-A version of the calling partition.
577 ******************************************************************************/
578uint32_t get_partition_ffa_version(bool secure_origin)
579{
580 if (secure_origin) {
581 return spmc_get_current_sp_ctx()->ffa_version;
582 } else {
583 return spmc_get_hyp_ctx()->ffa_version;
584 }
585}
586
Marc Bonnici0cf1a152021-08-25 12:09:37 +0100587static uint64_t rxtx_map_handler(uint32_t smc_fid,
588 bool secure_origin,
589 uint64_t x1,
590 uint64_t x2,
591 uint64_t x3,
592 uint64_t x4,
593 void *cookie,
594 void *handle,
595 uint64_t flags)
596{
597 int ret;
598 uint32_t error_code;
599 uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
600 struct mailbox *mbox;
601 uintptr_t tx_address = x1;
602 uintptr_t rx_address = x2;
603 uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */
604 uint32_t buf_size = page_count * FFA_PAGE_SIZE;
605
606 /*
607 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
608 * indirect messaging with SPs. Check if the Hypervisor has invoked this
609 * ABI on behalf of a VM and reject it if this is the case.
610 */
611 if (tx_address == 0 || rx_address == 0) {
612 WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n");
613 return spmc_ffa_error_return(handle,
614 FFA_ERROR_INVALID_PARAMETER);
615 }
616
617 /* Ensure the specified buffers are not the same. */
618 if (tx_address == rx_address) {
619 WARN("TX Buffer must not be the same as RX Buffer.\n");
620 return spmc_ffa_error_return(handle,
621 FFA_ERROR_INVALID_PARAMETER);
622 }
623
624 /* Ensure the buffer size is not 0. */
625 if (buf_size == 0U) {
626 WARN("Buffer size must not be 0\n");
627 return spmc_ffa_error_return(handle,
628 FFA_ERROR_INVALID_PARAMETER);
629 }
630
631 /*
632 * Ensure the buffer size is a multiple of the translation granule size
633 * in TF-A.
634 */
635 if (buf_size % PAGE_SIZE != 0U) {
636 WARN("Buffer size must be aligned to translation granule.\n");
637 return spmc_ffa_error_return(handle,
638 FFA_ERROR_INVALID_PARAMETER);
639 }
640
641 /* Obtain the RX/TX buffer pair descriptor. */
642 mbox = spmc_get_mbox_desc(secure_origin);
643
644 spin_lock(&mbox->lock);
645
646 /* Check if buffers have already been mapped. */
647 if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
648 WARN("RX/TX Buffers already mapped (%p/%p)\n",
649 (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
650 error_code = FFA_ERROR_DENIED;
651 goto err;
652 }
653
654 /* memmap the TX buffer as read only. */
655 ret = mmap_add_dynamic_region(tx_address, /* PA */
656 tx_address, /* VA */
657 buf_size, /* size */
658 mem_atts | MT_RO_DATA); /* attrs */
659 if (ret != 0) {
660 /* Return the correct error code. */
661 error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
662 FFA_ERROR_INVALID_PARAMETER;
663 WARN("Unable to map TX buffer: %d\n", error_code);
664 goto err;
665 }
666
667 /* memmap the RX buffer as read write. */
668 ret = mmap_add_dynamic_region(rx_address, /* PA */
669 rx_address, /* VA */
670 buf_size, /* size */
671 mem_atts | MT_RW_DATA); /* attrs */
672
673 if (ret != 0) {
674 error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
675 FFA_ERROR_INVALID_PARAMETER;
676 WARN("Unable to map RX buffer: %d\n", error_code);
677 /* Unmap the TX buffer again. */
678 mmap_remove_dynamic_region(tx_address, buf_size);
679 goto err;
680 }
681
682 mbox->tx_buffer = (void *) tx_address;
683 mbox->rx_buffer = (void *) rx_address;
684 mbox->rxtx_page_count = page_count;
685 spin_unlock(&mbox->lock);
686
687 SMC_RET1(handle, FFA_SUCCESS_SMC32);
688 /* Execution stops here. */
689err:
690 spin_unlock(&mbox->lock);
691 return spmc_ffa_error_return(handle, error_code);
692}
693
694static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
695 bool secure_origin,
696 uint64_t x1,
697 uint64_t x2,
698 uint64_t x3,
699 uint64_t x4,
700 void *cookie,
701 void *handle,
702 uint64_t flags)
703{
704 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
705 uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
706
707 /*
708 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
709 * indirect messaging with SPs. Check if the Hypervisor has invoked this
710 * ABI on behalf of a VM and reject it if this is the case.
711 */
712 if (x1 != 0UL) {
713 return spmc_ffa_error_return(handle,
714 FFA_ERROR_INVALID_PARAMETER);
715 }
716
717 spin_lock(&mbox->lock);
718
719 /* Check if buffers are currently mapped. */
720 if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) {
721 spin_unlock(&mbox->lock);
722 return spmc_ffa_error_return(handle,
723 FFA_ERROR_INVALID_PARAMETER);
724 }
725
726 /* Unmap RX Buffer */
727 if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer,
728 buf_size) != 0) {
729 WARN("Unable to unmap RX buffer!\n");
730 }
731
732 mbox->rx_buffer = 0;
733
734 /* Unmap TX Buffer */
735 if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer,
736 buf_size) != 0) {
737 WARN("Unable to unmap TX buffer!\n");
738 }
739
740 mbox->tx_buffer = 0;
741 mbox->rxtx_page_count = 0;
742
743 spin_unlock(&mbox->lock);
744 SMC_RET1(handle, FFA_SUCCESS_SMC32);
745}
746
Marc Bonnici37dd8e12021-08-17 18:00:07 +0100747/*
748 * Collate the partition information in a v1.1 partition information
749 * descriptor format, this will be converter later if required.
750 */
751static int partition_info_get_handler_v1_1(uint32_t *uuid,
752 struct ffa_partition_info_v1_1
753 *partitions,
754 uint32_t max_partitions,
755 uint32_t *partition_count)
756{
757 uint32_t index;
758 struct ffa_partition_info_v1_1 *desc;
759 bool null_uuid = is_null_uuid(uuid);
760 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
761
762 /* Deal with Logical Partitions. */
763 for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
764 if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) {
765 /* Found a matching UUID, populate appropriately. */
766 if (*partition_count >= max_partitions) {
767 return FFA_ERROR_NO_MEMORY;
768 }
769
770 desc = &partitions[*partition_count];
771 desc->ep_id = el3_lp_descs[index].sp_id;
772 desc->execution_ctx_count = PLATFORM_CORE_COUNT;
773 desc->properties = el3_lp_descs[index].properties;
774 if (null_uuid) {
775 copy_uuid(desc->uuid, el3_lp_descs[index].uuid);
776 }
777 (*partition_count)++;
778 }
779 }
780
781 /* Deal with physical SP's. */
782 for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
783 if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
784 /* Found a matching UUID, populate appropriately. */
785 if (*partition_count >= max_partitions) {
786 return FFA_ERROR_NO_MEMORY;
787 }
788
789 desc = &partitions[*partition_count];
790 desc->ep_id = sp_desc[index].sp_id;
791 /*
792 * Execution context count must match No. cores for
793 * S-EL1 SPs.
794 */
795 desc->execution_ctx_count = PLATFORM_CORE_COUNT;
796 desc->properties = sp_desc[index].properties;
797 if (null_uuid) {
798 copy_uuid(desc->uuid, sp_desc[index].uuid);
799 }
800 (*partition_count)++;
801 }
802 }
803 return 0;
804}
805
806/*
807 * Handle the case where that caller only wants the count of partitions
808 * matching a given UUID and does not want the corresponding descriptors
809 * populated.
810 */
811static uint32_t partition_info_get_handler_count_only(uint32_t *uuid)
812{
813 uint32_t index = 0;
814 uint32_t partition_count = 0;
815 bool null_uuid = is_null_uuid(uuid);
816 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
817
818 /* Deal with Logical Partitions. */
819 for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
820 if (null_uuid ||
821 uuid_match(uuid, el3_lp_descs[index].uuid)) {
822 (partition_count)++;
823 }
824 }
825
826 /* Deal with physical SP's. */
827 for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
828 if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
829 (partition_count)++;
830 }
831 }
832 return partition_count;
833}
834
835/*
836 * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate
837 * the coresponding descriptor format from the v1.1 descriptor array.
838 */
839static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1
840 *partitions,
841 struct mailbox *mbox,
842 int partition_count)
843{
844 uint32_t index;
845 uint32_t buf_size;
846 uint32_t descriptor_size;
847 struct ffa_partition_info_v1_0 *v1_0_partitions =
848 (struct ffa_partition_info_v1_0 *) mbox->rx_buffer;
849
850 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
851 descriptor_size = partition_count *
852 sizeof(struct ffa_partition_info_v1_0);
853
854 if (descriptor_size > buf_size) {
855 return FFA_ERROR_NO_MEMORY;
856 }
857
858 for (index = 0U; index < partition_count; index++) {
859 v1_0_partitions[index].ep_id = partitions[index].ep_id;
860 v1_0_partitions[index].execution_ctx_count =
861 partitions[index].execution_ctx_count;
862 v1_0_partitions[index].properties =
863 partitions[index].properties;
864 }
865 return 0;
866}
867
868/*
869 * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and
870 * v1.0 implementations.
871 */
872static uint64_t partition_info_get_handler(uint32_t smc_fid,
873 bool secure_origin,
874 uint64_t x1,
875 uint64_t x2,
876 uint64_t x3,
877 uint64_t x4,
878 void *cookie,
879 void *handle,
880 uint64_t flags)
881{
882 int ret;
883 uint32_t partition_count = 0;
884 uint32_t size = 0;
885 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
886 struct mailbox *mbox;
887 uint64_t info_get_flags;
888 bool count_only;
889 uint32_t uuid[4];
890
891 uuid[0] = x1;
892 uuid[1] = x2;
893 uuid[2] = x3;
894 uuid[3] = x4;
895
896 /* Determine if the Partition descriptors should be populated. */
897 info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5);
898 count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK);
899
900 /* Handle the case where we don't need to populate the descriptors. */
901 if (count_only) {
902 partition_count = partition_info_get_handler_count_only(uuid);
903 if (partition_count == 0) {
904 return spmc_ffa_error_return(handle,
905 FFA_ERROR_INVALID_PARAMETER);
906 }
907 } else {
908 struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS];
909
910 /*
911 * Handle the case where the partition descriptors are required,
912 * check we have the buffers available and populate the
913 * appropriate structure version.
914 */
915
916 /* Obtain the v1.1 format of the descriptors. */
917 ret = partition_info_get_handler_v1_1(uuid, partitions,
918 MAX_SP_LP_PARTITIONS,
919 &partition_count);
920
921 /* Check if an error occurred during discovery. */
922 if (ret != 0) {
923 goto err;
924 }
925
926 /* If we didn't find any matches the UUID is unknown. */
927 if (partition_count == 0) {
928 ret = FFA_ERROR_INVALID_PARAMETER;
929 goto err;
930 }
931
932 /* Obtain the partition mailbox RX/TX buffer pair descriptor. */
933 mbox = spmc_get_mbox_desc(secure_origin);
934
935 /*
936 * If the caller has not bothered registering its RX/TX pair
937 * then return an error code.
938 */
939 spin_lock(&mbox->lock);
940 if (mbox->rx_buffer == NULL) {
941 ret = FFA_ERROR_BUSY;
942 goto err_unlock;
943 }
944
945 /* Ensure the RX buffer is currently free. */
946 if (mbox->state != MAILBOX_STATE_EMPTY) {
947 ret = FFA_ERROR_BUSY;
948 goto err_unlock;
949 }
950
951 /* Zero the RX buffer before populating. */
952 (void)memset(mbox->rx_buffer, 0,
953 mbox->rxtx_page_count * FFA_PAGE_SIZE);
954
955 /*
956 * Depending on the FF-A version of the requesting partition
957 * we may need to convert to a v1.0 format otherwise we can copy
958 * directly.
959 */
960 if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) {
961 ret = partition_info_populate_v1_0(partitions,
962 mbox,
963 partition_count);
964 if (ret != 0) {
965 goto err_unlock;
966 }
967 } else {
968 uint32_t buf_size = mbox->rxtx_page_count *
969 FFA_PAGE_SIZE;
970
971 /* Ensure the descriptor will fit in the buffer. */
972 size = sizeof(struct ffa_partition_info_v1_1);
973 if (partition_count * size > buf_size) {
974 ret = FFA_ERROR_NO_MEMORY;
975 goto err_unlock;
976 }
977 memcpy(mbox->rx_buffer, partitions,
978 partition_count * size);
979 }
980
981 mbox->state = MAILBOX_STATE_FULL;
982 spin_unlock(&mbox->lock);
983 }
984 SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size);
985
986err_unlock:
987 spin_unlock(&mbox->lock);
988err:
989 return spmc_ffa_error_return(handle, ret);
990}
991
Marc Bonnicid4bb2452021-12-13 11:08:59 +0000992static uint64_t ffa_features_handler(uint32_t smc_fid,
993 bool secure_origin,
994 uint64_t x1,
995 uint64_t x2,
996 uint64_t x3,
997 uint64_t x4,
998 void *cookie,
999 void *handle,
1000 uint64_t flags)
1001{
1002 uint32_t function_id = (uint32_t) x1;
1003 uint32_t input_properties = (uint32_t) x2;
1004
1005 /*
1006 * We don't currently support any additional input properties
1007 * for any ABI therefore ensure this value is always set to 0.
1008 */
1009 if (input_properties != 0) {
1010 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1011 }
1012
1013 /* Check if a Feature ID was requested. */
1014 if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) {
1015 /* We currently don't support any additional features. */
1016 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1017 }
1018
1019 /* Report if an FF-A ABI is supported. */
1020 switch (function_id) {
1021 /* Supported features from both worlds. */
1022 case FFA_ERROR:
1023 case FFA_SUCCESS_SMC32:
Achin Gupta2ec5dbe2021-10-04 20:17:45 +01001024 case FFA_INTERRUPT:
Marc Bonnici38add672021-11-25 15:54:52 +00001025 case FFA_SPM_ID_GET:
Marc Bonnicifaa4a762021-11-24 15:40:00 +00001026 case FFA_ID_GET:
Marc Bonnicid4bb2452021-12-13 11:08:59 +00001027 case FFA_FEATURES:
1028 case FFA_VERSION:
Marc Bonnicicb17d312022-04-12 17:17:45 +01001029 case FFA_RX_RELEASE:
Marc Bonnicid4bb2452021-12-13 11:08:59 +00001030 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1031 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1032 case FFA_PARTITION_INFO_GET:
1033 case FFA_RXTX_MAP_SMC32:
1034 case FFA_RXTX_MAP_SMC64:
1035 case FFA_RXTX_UNMAP:
Marc Bonnici764e6672021-08-31 17:57:04 +01001036 case FFA_MSG_RUN:
Marc Bonnicid4bb2452021-12-13 11:08:59 +00001037
1038 /*
1039 * We are relying on the fact that the other registers
1040 * will be set to 0 as these values align with the
1041 * currently implemented features of the SPMC. If this
1042 * changes this function must be extended to handle
1043 * reporting the additional functionality.
1044 */
1045
1046 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1047 /* Execution stops here. */
1048
1049 /* Supported ABIs only from the secure world. */
Marc Bonnici25f4b542022-04-12 17:18:13 +01001050 case FFA_SECONDARY_EP_REGISTER_SMC64:
Marc Bonnicid4bb2452021-12-13 11:08:59 +00001051 case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1052 case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1053 case FFA_MSG_WAIT:
1054
1055 if (!secure_origin) {
1056 return spmc_ffa_error_return(handle,
1057 FFA_ERROR_NOT_SUPPORTED);
1058 }
1059 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1060 /* Execution stops here. */
1061
1062 default:
1063 return spmc_ffa_error_return(handle,
1064 FFA_ERROR_NOT_SUPPORTED);
1065 }
1066}
1067
Marc Bonnicifaa4a762021-11-24 15:40:00 +00001068static uint64_t ffa_id_get_handler(uint32_t smc_fid,
1069 bool secure_origin,
1070 uint64_t x1,
1071 uint64_t x2,
1072 uint64_t x3,
1073 uint64_t x4,
1074 void *cookie,
1075 void *handle,
1076 uint64_t flags)
1077{
1078 if (secure_origin) {
1079 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1080 spmc_get_current_sp_ctx()->sp_id);
1081 } else {
1082 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1083 spmc_get_hyp_ctx()->ns_ep_id);
1084 }
1085}
1086
Marc Bonnici38add672021-11-25 15:54:52 +00001087/*
1088 * Enable an SP to query the ID assigned to the SPMC.
1089 */
1090static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid,
1091 bool secure_origin,
1092 uint64_t x1,
1093 uint64_t x2,
1094 uint64_t x3,
1095 uint64_t x4,
1096 void *cookie,
1097 void *handle,
1098 uint64_t flags)
1099{
1100 assert(x1 == 0UL);
1101 assert(x2 == 0UL);
1102 assert(x3 == 0UL);
1103 assert(x4 == 0UL);
1104 assert(SMC_GET_GP(handle, CTX_GPREG_X5) == 0UL);
1105 assert(SMC_GET_GP(handle, CTX_GPREG_X6) == 0UL);
1106 assert(SMC_GET_GP(handle, CTX_GPREG_X7) == 0UL);
1107
1108 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID);
1109}
1110
Marc Bonnici764e6672021-08-31 17:57:04 +01001111static uint64_t ffa_run_handler(uint32_t smc_fid,
1112 bool secure_origin,
1113 uint64_t x1,
1114 uint64_t x2,
1115 uint64_t x3,
1116 uint64_t x4,
1117 void *cookie,
1118 void *handle,
1119 uint64_t flags)
1120{
1121 struct secure_partition_desc *sp;
1122 uint16_t target_id = FFA_RUN_EP_ID(x1);
1123 uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1);
1124 unsigned int idx;
1125 unsigned int *rt_state;
1126 unsigned int *rt_model;
1127
1128 /* Can only be called from the normal world. */
1129 if (secure_origin) {
1130 ERROR("FFA_RUN can only be called from NWd.\n");
1131 return spmc_ffa_error_return(handle,
1132 FFA_ERROR_INVALID_PARAMETER);
1133 }
1134
1135 /* Cannot run a Normal world partition. */
1136 if (ffa_is_normal_world_id(target_id)) {
1137 ERROR("Cannot run a NWd partition (0x%x).\n", target_id);
1138 return spmc_ffa_error_return(handle,
1139 FFA_ERROR_INVALID_PARAMETER);
1140 }
1141
1142 /* Check that the target SP exists. */
1143 sp = spmc_get_sp_ctx(target_id);
1144 ERROR("Unknown partition ID (0x%x).\n", target_id);
1145 if (sp == NULL) {
1146 return spmc_ffa_error_return(handle,
1147 FFA_ERROR_INVALID_PARAMETER);
1148 }
1149
1150 idx = get_ec_index(sp);
1151 if (idx != vcpu_id) {
1152 ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id);
1153 return spmc_ffa_error_return(handle,
1154 FFA_ERROR_INVALID_PARAMETER);
1155 }
1156 rt_state = &((sp->ec[idx]).rt_state);
1157 rt_model = &((sp->ec[idx]).rt_model);
1158 if (*rt_state == RT_STATE_RUNNING) {
1159 ERROR("Partition (0x%x) is already running.\n", target_id);
1160 return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
1161 }
1162
1163 /*
1164 * Sanity check that if the execution context was not waiting then it
1165 * was either in the direct request or the run partition runtime model.
1166 */
1167 if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) {
1168 assert(*rt_model == RT_MODEL_RUN ||
1169 *rt_model == RT_MODEL_DIR_REQ);
1170 }
1171
1172 /*
1173 * If the context was waiting then update the partition runtime model.
1174 */
1175 if (*rt_state == RT_STATE_WAITING) {
1176 *rt_model = RT_MODEL_RUN;
1177 }
1178
1179 /*
1180 * Forward the request to the correct SP vCPU after updating
1181 * its state.
1182 */
1183 *rt_state = RT_STATE_RUNNING;
1184
1185 return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0,
1186 handle, cookie, flags, target_id);
1187}
1188
Marc Bonnicicb17d312022-04-12 17:17:45 +01001189static uint64_t rx_release_handler(uint32_t smc_fid,
1190 bool secure_origin,
1191 uint64_t x1,
1192 uint64_t x2,
1193 uint64_t x3,
1194 uint64_t x4,
1195 void *cookie,
1196 void *handle,
1197 uint64_t flags)
1198{
1199 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1200
1201 spin_lock(&mbox->lock);
1202
1203 if (mbox->state != MAILBOX_STATE_FULL) {
1204 spin_unlock(&mbox->lock);
1205 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1206 }
1207
1208 mbox->state = MAILBOX_STATE_EMPTY;
1209 spin_unlock(&mbox->lock);
1210
1211 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1212}
1213
Marc Bonnici25f4b542022-04-12 17:18:13 +01001214/*
1215 * Perform initial validation on the provided secondary entry point.
1216 * For now ensure it does not lie within the BL31 Image or the SP's
1217 * RX/TX buffers as these are mapped within EL3.
1218 * TODO: perform validation for additional invalid memory regions.
1219 */
1220static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp)
1221{
1222 struct mailbox *mb;
1223 uintptr_t buffer_size;
1224 uintptr_t sp_rx_buffer;
1225 uintptr_t sp_tx_buffer;
1226 uintptr_t sp_rx_buffer_limit;
1227 uintptr_t sp_tx_buffer_limit;
1228
1229 mb = &sp->mailbox;
1230 buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE);
1231 sp_rx_buffer = (uintptr_t) mb->rx_buffer;
1232 sp_tx_buffer = (uintptr_t) mb->tx_buffer;
1233 sp_rx_buffer_limit = sp_rx_buffer + buffer_size;
1234 sp_tx_buffer_limit = sp_tx_buffer + buffer_size;
1235
1236 /*
1237 * Check if the entry point lies within BL31, or the
1238 * SP's RX or TX buffer.
1239 */
1240 if ((ep >= BL31_BASE && ep < BL31_LIMIT) ||
1241 (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) ||
1242 (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) {
1243 return -EINVAL;
1244 }
1245 return 0;
1246}
1247
Marc Bonnici73fbe8f2021-12-09 11:32:30 +00001248/*******************************************************************************
Marc Bonnici25f4b542022-04-12 17:18:13 +01001249 * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to
1250 * register an entry point for initialization during a secondary cold boot.
1251 ******************************************************************************/
1252static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid,
1253 bool secure_origin,
1254 uint64_t x1,
1255 uint64_t x2,
1256 uint64_t x3,
1257 uint64_t x4,
1258 void *cookie,
1259 void *handle,
1260 uint64_t flags)
1261{
1262 struct secure_partition_desc *sp;
1263 struct sp_exec_ctx *sp_ctx;
1264
1265 /* This request cannot originate from the Normal world. */
1266 if (!secure_origin) {
1267 WARN("%s: Can only be called from SWd.\n", __func__);
1268 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1269 }
1270
1271 /* Get the context of the current SP. */
1272 sp = spmc_get_current_sp_ctx();
1273 if (sp == NULL) {
1274 WARN("%s: Cannot find SP context.\n", __func__);
1275 return spmc_ffa_error_return(handle,
1276 FFA_ERROR_INVALID_PARAMETER);
1277 }
1278
1279 /* Only an S-EL1 SP should be invoking this ABI. */
1280 if (sp->runtime_el != S_EL1) {
1281 WARN("%s: Can only be called for a S-EL1 SP.\n", __func__);
1282 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1283 }
1284
1285 /* Ensure the SP is in its initialization state. */
1286 sp_ctx = spmc_get_sp_ec(sp);
1287 if (sp_ctx->rt_model != RT_MODEL_INIT) {
1288 WARN("%s: Can only be called during SP initialization.\n",
1289 __func__);
1290 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1291 }
1292
1293 /* Perform initial validation of the secondary entry point. */
1294 if (validate_secondary_ep(x1, sp)) {
1295 WARN("%s: Invalid entry point provided (0x%lx).\n",
1296 __func__, x1);
1297 return spmc_ffa_error_return(handle,
1298 FFA_ERROR_INVALID_PARAMETER);
1299 }
1300
1301 /*
1302 * Update the secondary entrypoint in SP context.
1303 * We don't need a lock here as during partition initialization there
1304 * will only be a single core online.
1305 */
1306 sp->secondary_ep = x1;
1307 VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep);
1308
1309 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1310}
1311
1312/*******************************************************************************
Marc Bonnici8e1a7552021-12-01 17:57:04 +00001313 * This function will parse the Secure Partition Manifest. From manifest, it
1314 * will fetch details for preparing Secure partition image context and secure
1315 * partition image boot arguments if any.
1316 ******************************************************************************/
1317static int sp_manifest_parse(void *sp_manifest, int offset,
1318 struct secure_partition_desc *sp,
1319 entry_point_info_t *ep_info)
1320{
1321 int32_t ret, node;
1322 uint32_t config_32;
1323
1324 /*
1325 * Look for the mandatory fields that are expected to be present in
1326 * the SP manifests.
1327 */
1328 node = fdt_path_offset(sp_manifest, "/");
1329 if (node < 0) {
1330 ERROR("Did not find root node.\n");
1331 return node;
1332 }
1333
Marc Bonnici21e644d2021-08-24 11:31:52 +01001334 ret = fdt_read_uint32_array(sp_manifest, node, "uuid",
1335 ARRAY_SIZE(sp->uuid), sp->uuid);
1336 if (ret != 0) {
1337 ERROR("Missing Secure Partition UUID.\n");
1338 return ret;
1339 }
1340
Marc Bonnici8e1a7552021-12-01 17:57:04 +00001341 ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32);
1342 if (ret != 0) {
1343 ERROR("Missing SP Exception Level information.\n");
1344 return ret;
1345 }
1346
1347 sp->runtime_el = config_32;
1348
1349 ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32);
1350 if (ret != 0) {
1351 ERROR("Missing Secure Partition FF-A Version.\n");
1352 return ret;
1353 }
1354
1355 sp->ffa_version = config_32;
1356
1357 ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32);
1358 if (ret != 0) {
1359 ERROR("Missing Secure Partition Execution State.\n");
1360 return ret;
1361 }
1362
1363 sp->execution_state = config_32;
1364
Marc Bonnicieec0d042021-12-09 10:51:05 +00001365 ret = fdt_read_uint32(sp_manifest, node,
Marc Bonnicif5244892021-12-09 18:34:02 +00001366 "messaging-method", &config_32);
1367 if (ret != 0) {
1368 ERROR("Missing Secure Partition messaging method.\n");
1369 return ret;
1370 }
1371
1372 /* Validate this entry, we currently only support direct messaging. */
1373 if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
1374 FFA_PARTITION_DIRECT_REQ_SEND)) != 0U) {
1375 WARN("Invalid Secure Partition messaging method (0x%x)\n",
1376 config_32);
1377 return -EINVAL;
1378 }
1379
1380 sp->properties = config_32;
1381
1382 ret = fdt_read_uint32(sp_manifest, node,
Marc Bonnicieec0d042021-12-09 10:51:05 +00001383 "execution-ctx-count", &config_32);
1384
1385 if (ret != 0) {
1386 ERROR("Missing SP Execution Context Count.\n");
1387 return ret;
1388 }
1389
1390 /*
1391 * Ensure this field is set correctly in the manifest however
1392 * since this is currently a hardcoded value for S-EL1 partitions
1393 * we don't need to save it here, just validate.
1394 */
1395 if (config_32 != PLATFORM_CORE_COUNT) {
1396 ERROR("SP Execution Context Count (%u) must be %u.\n",
1397 config_32, PLATFORM_CORE_COUNT);
1398 return -EINVAL;
1399 }
1400
Marc Bonnici8e1a7552021-12-01 17:57:04 +00001401 /*
1402 * Look for the optional fields that are expected to be present in
1403 * an SP manifest.
1404 */
1405 ret = fdt_read_uint32(sp_manifest, node, "id", &config_32);
1406 if (ret != 0) {
1407 WARN("Missing Secure Partition ID.\n");
1408 } else {
1409 if (!is_ffa_secure_id_valid(config_32)) {
1410 ERROR("Invalid Secure Partition ID (0x%x).\n",
1411 config_32);
1412 return -EINVAL;
1413 }
1414 sp->sp_id = config_32;
1415 }
1416
Marc Bonnici25f4b542022-04-12 17:18:13 +01001417 ret = fdt_read_uint32(sp_manifest, node,
1418 "power-management-messages", &config_32);
1419 if (ret != 0) {
1420 WARN("Missing Power Management Messages entry.\n");
1421 } else {
1422 /*
1423 * Ensure only the currently supported power messages have
1424 * been requested.
1425 */
1426 if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF |
1427 FFA_PM_MSG_SUB_CPU_SUSPEND |
1428 FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) {
1429 ERROR("Requested unsupported PM messages (%x)\n",
1430 config_32);
1431 return -EINVAL;
1432 }
1433 sp->pwr_mgmt_msgs = config_32;
1434 }
1435
Marc Bonnici8e1a7552021-12-01 17:57:04 +00001436 return 0;
1437}
1438
1439/*******************************************************************************
1440 * This function gets the Secure Partition Manifest base and maps the manifest
1441 * region.
1442 * Currently only one Secure Partition manifest is considered which is used to
1443 * prepare the context for the single Secure Partition.
1444 ******************************************************************************/
1445static int find_and_prepare_sp_context(void)
1446{
1447 void *sp_manifest;
1448 uintptr_t manifest_base;
1449 uintptr_t manifest_base_align;
1450 entry_point_info_t *next_image_ep_info;
1451 int32_t ret;
1452 struct secure_partition_desc *sp;
1453
1454 next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
1455 if (next_image_ep_info == NULL) {
1456 WARN("No Secure Partition image provided by BL2.\n");
1457 return -ENOENT;
1458 }
1459
1460 sp_manifest = (void *)next_image_ep_info->args.arg0;
1461 if (sp_manifest == NULL) {
1462 WARN("Secure Partition manifest absent.\n");
1463 return -ENOENT;
1464 }
1465
1466 manifest_base = (uintptr_t)sp_manifest;
1467 manifest_base_align = page_align(manifest_base, DOWN);
1468
1469 /*
1470 * Map the secure partition manifest region in the EL3 translation
1471 * regime.
1472 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
1473 * alignment the region of 1 PAGE_SIZE from manifest align base may
1474 * not completely accommodate the secure partition manifest region.
1475 */
1476 ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
1477 manifest_base_align,
1478 PAGE_SIZE * 2,
1479 MT_RO_DATA);
1480 if (ret != 0) {
1481 ERROR("Error while mapping SP manifest (%d).\n", ret);
1482 return ret;
1483 }
1484
1485 ret = fdt_node_offset_by_compatible(sp_manifest, -1,
1486 "arm,ffa-manifest-1.0");
1487 if (ret < 0) {
1488 ERROR("Error happened in SP manifest reading.\n");
1489 return -EINVAL;
1490 }
1491
1492 /*
1493 * Store the size of the manifest so that it can be used later to pass
1494 * the manifest as boot information later.
1495 */
1496 next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest);
1497 INFO("Manifest size = %lu bytes.\n", next_image_ep_info->args.arg1);
1498
1499 /*
1500 * Select an SP descriptor for initialising the partition's execution
1501 * context on the primary CPU.
1502 */
1503 sp = spmc_get_current_sp_ctx();
1504
1505 /* Initialize entry point information for the SP */
1506 SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1,
1507 SECURE | EP_ST_ENABLE);
1508
1509 /* Parse the SP manifest. */
1510 ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info);
1511 if (ret != 0) {
1512 ERROR("Error in Secure Partition manifest parsing.\n");
1513 return ret;
1514 }
1515
1516 /* Check that the runtime EL in the manifest was correct. */
1517 if (sp->runtime_el != S_EL1) {
1518 ERROR("Unexpected runtime EL: %d\n", sp->runtime_el);
1519 return -EINVAL;
1520 }
1521
1522 /* Perform any common initialisation. */
1523 spmc_sp_common_setup(sp, next_image_ep_info);
1524
1525 /* Perform any initialisation specific to S-EL1 SPs. */
1526 spmc_el1_sp_setup(sp, next_image_ep_info);
1527
1528 /* Initialize the SP context with the required ep info. */
1529 spmc_sp_common_ep_commit(sp, next_image_ep_info);
1530
1531 return 0;
1532}
1533
1534/*******************************************************************************
1535 * This function takes an SP context pointer and performs a synchronous entry
1536 * into it.
1537 ******************************************************************************/
Marc Bonnici9a297042022-02-14 17:06:09 +00001538static int32_t logical_sp_init(void)
1539{
1540 int32_t rc = 0;
1541 struct el3_lp_desc *el3_lp_descs;
1542
1543 /* Perform initial validation of the Logical Partitions. */
1544 rc = el3_sp_desc_validate();
1545 if (rc != 0) {
1546 ERROR("Logical Partition validation failed!\n");
1547 return rc;
1548 }
1549
1550 el3_lp_descs = get_el3_lp_array();
1551
1552 INFO("Logical Secure Partition init start.\n");
1553 for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
1554 rc = el3_lp_descs[i].init();
1555 if (rc != 0) {
1556 ERROR("Logical SP (0x%x) Failed to Initialize\n",
1557 el3_lp_descs[i].sp_id);
1558 return rc;
1559 }
1560 VERBOSE("Logical SP (0x%x) Initialized\n",
1561 el3_lp_descs[i].sp_id);
1562 }
1563
1564 INFO("Logical Secure Partition init completed.\n");
1565
1566 return rc;
1567}
1568
Marc Bonnici8e1a7552021-12-01 17:57:04 +00001569uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec)
1570{
1571 uint64_t rc;
1572
1573 assert(ec != NULL);
1574
1575 /* Assign the context of the SP to this CPU */
1576 cm_set_context(&(ec->cpu_ctx), SECURE);
1577
1578 /* Restore the context assigned above */
1579 cm_el1_sysregs_context_restore(SECURE);
1580 cm_set_next_eret_context(SECURE);
1581
1582 /* Invalidate TLBs at EL1. */
1583 tlbivmalle1();
1584 dsbish();
1585
1586 /* Enter Secure Partition */
1587 rc = spm_secure_partition_enter(&ec->c_rt_ctx);
1588
1589 /* Save secure state */
1590 cm_el1_sysregs_context_save(SECURE);
1591
1592 return rc;
1593}
1594
1595/*******************************************************************************
1596 * SPMC Helper Functions.
1597 ******************************************************************************/
1598static int32_t sp_init(void)
1599{
1600 uint64_t rc;
1601 struct secure_partition_desc *sp;
1602 struct sp_exec_ctx *ec;
1603
1604 sp = spmc_get_current_sp_ctx();
1605 ec = spmc_get_sp_ec(sp);
1606 ec->rt_model = RT_MODEL_INIT;
1607 ec->rt_state = RT_STATE_RUNNING;
1608
1609 INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
1610
1611 rc = spmc_sp_synchronous_entry(ec);
1612 if (rc != 0) {
1613 /* Indicate SP init was not successful. */
1614 ERROR("SP (0x%x) failed to initialize (%lu).\n",
1615 sp->sp_id, rc);
1616 return 0;
1617 }
1618
1619 ec->rt_state = RT_STATE_WAITING;
1620 INFO("Secure Partition initialized.\n");
1621
1622 return 1;
1623}
1624
1625static void initalize_sp_descs(void)
1626{
1627 struct secure_partition_desc *sp;
1628
1629 for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
1630 sp = &sp_desc[i];
1631 sp->sp_id = INV_SP_ID;
Marc Bonniciecc460a2021-09-02 13:18:41 +01001632 sp->mailbox.rx_buffer = NULL;
1633 sp->mailbox.tx_buffer = NULL;
1634 sp->mailbox.state = MAILBOX_STATE_EMPTY;
Marc Bonnici8e1a7552021-12-01 17:57:04 +00001635 sp->secondary_ep = 0;
1636 }
1637}
1638
1639static void initalize_ns_ep_descs(void)
1640{
1641 struct ns_endpoint_desc *ns_ep;
1642
1643 for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) {
1644 ns_ep = &ns_ep_desc[i];
1645 /*
1646 * Clashes with the Hypervisor ID but will not be a
1647 * problem in practice.
1648 */
1649 ns_ep->ns_ep_id = 0;
1650 ns_ep->ffa_version = 0;
Marc Bonniciecc460a2021-09-02 13:18:41 +01001651 ns_ep->mailbox.rx_buffer = NULL;
1652 ns_ep->mailbox.tx_buffer = NULL;
1653 ns_ep->mailbox.state = MAILBOX_STATE_EMPTY;
Marc Bonnici8e1a7552021-12-01 17:57:04 +00001654 }
1655}
1656
1657/*******************************************************************************
Marc Bonnici1c33cc32021-11-29 17:57:03 +00001658 * Initialize SPMC attributes for the SPMD.
1659 ******************************************************************************/
1660void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs)
1661{
1662 spmc_attrs->major_version = FFA_VERSION_MAJOR;
1663 spmc_attrs->minor_version = FFA_VERSION_MINOR;
1664 spmc_attrs->exec_state = MODE_RW_64;
1665 spmc_attrs->spmc_id = FFA_SPMC_ID;
1666}
1667
1668/*******************************************************************************
Marc Bonnici8e1a7552021-12-01 17:57:04 +00001669 * Initialize contexts of all Secure Partitions.
1670 ******************************************************************************/
1671int32_t spmc_setup(void)
1672{
1673 int32_t ret;
Achin Gupta2ec5dbe2021-10-04 20:17:45 +01001674 uint32_t flags;
Marc Bonnici8e1a7552021-12-01 17:57:04 +00001675
1676 /* Initialize endpoint descriptors */
1677 initalize_sp_descs();
1678 initalize_ns_ep_descs();
1679
Marc Bonnici9a297042022-02-14 17:06:09 +00001680 /* Setup logical SPs. */
1681 ret = logical_sp_init();
1682 if (ret != 0) {
1683 ERROR("Failed to initialize Logical Partitions.\n");
1684 return ret;
1685 }
1686
Marc Bonnici8e1a7552021-12-01 17:57:04 +00001687 /* Perform physical SP setup. */
1688
1689 /* Disable MMU at EL1 (initialized by BL2) */
1690 disable_mmu_icache_el1();
1691
1692 /* Initialize context of the SP */
1693 INFO("Secure Partition context setup start.\n");
1694
1695 ret = find_and_prepare_sp_context();
1696 if (ret != 0) {
1697 ERROR("Error in SP finding and context preparation.\n");
1698 return ret;
1699 }
1700
Marc Bonnici25f4b542022-04-12 17:18:13 +01001701 /* Register power management hooks with PSCI */
1702 psci_register_spd_pm_hook(&spmc_pm);
1703
Achin Gupta2ec5dbe2021-10-04 20:17:45 +01001704 /*
1705 * Register an interrupt handler for S-EL1 interrupts
1706 * when generated during code executing in the
1707 * non-secure state.
1708 */
1709 flags = 0;
1710 set_interrupt_rm_flag(flags, NON_SECURE);
1711 ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
1712 spmc_sp_interrupt_handler,
1713 flags);
1714 if (ret != 0) {
1715 ERROR("Failed to register interrupt handler! (%d)\n", ret);
1716 panic();
1717 }
1718
Marc Bonnici8e1a7552021-12-01 17:57:04 +00001719 /* Register init function for deferred init. */
1720 bl31_register_bl32_init(&sp_init);
1721
1722 INFO("Secure Partition setup done.\n");
1723
1724 return 0;
1725}
1726
1727/*******************************************************************************
1728 * Secure Partition Manager SMC handler.
1729 ******************************************************************************/
1730uint64_t spmc_smc_handler(uint32_t smc_fid,
1731 bool secure_origin,
1732 uint64_t x1,
1733 uint64_t x2,
1734 uint64_t x3,
1735 uint64_t x4,
1736 void *cookie,
1737 void *handle,
1738 uint64_t flags)
1739{
1740 switch (smc_fid) {
1741
Marc Bonnicie95eb7c2021-12-08 14:24:03 +00001742 case FFA_VERSION:
1743 return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
1744 x4, cookie, handle, flags);
1745
Marc Bonnici38add672021-11-25 15:54:52 +00001746 case FFA_SPM_ID_GET:
1747 return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2,
1748 x3, x4, cookie, handle, flags);
1749
Marc Bonnicifaa4a762021-11-24 15:40:00 +00001750 case FFA_ID_GET:
1751 return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3,
1752 x4, cookie, handle, flags);
1753
Marc Bonnicid4bb2452021-12-13 11:08:59 +00001754 case FFA_FEATURES:
1755 return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3,
1756 x4, cookie, handle, flags);
1757
Marc Bonnici25f4b542022-04-12 17:18:13 +01001758 case FFA_SECONDARY_EP_REGISTER_SMC64:
1759 return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1,
1760 x2, x3, x4, cookie, handle,
1761 flags);
1762
Marc Bonnici5eeacd52021-11-29 17:05:57 +00001763 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1764 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1765 return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
1766 x3, x4, cookie, handle, flags);
1767
1768 case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1769 case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1770 return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
1771 x3, x4, cookie, handle, flags);
1772
Marc Bonnici0cf1a152021-08-25 12:09:37 +01001773 case FFA_RXTX_MAP_SMC32:
1774 case FFA_RXTX_MAP_SMC64:
1775 return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1776 cookie, handle, flags);
1777
1778 case FFA_RXTX_UNMAP:
1779 return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3,
1780 x4, cookie, handle, flags);
1781
Marc Bonnici37dd8e12021-08-17 18:00:07 +01001782 case FFA_PARTITION_INFO_GET:
1783 return partition_info_get_handler(smc_fid, secure_origin, x1,
1784 x2, x3, x4, cookie, handle,
1785 flags);
1786
Marc Bonnicicb17d312022-04-12 17:17:45 +01001787 case FFA_RX_RELEASE:
1788 return rx_release_handler(smc_fid, secure_origin, x1, x2, x3,
1789 x4, cookie, handle, flags);
1790
Marc Bonnici8eb15202021-11-29 17:05:33 +00001791 case FFA_MSG_WAIT:
1792 return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1793 cookie, handle, flags);
1794
Marc Bonnicib4e99842021-12-10 09:21:56 +00001795 case FFA_ERROR:
1796 return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1797 cookie, handle, flags);
1798
Marc Bonnici764e6672021-08-31 17:57:04 +01001799 case FFA_MSG_RUN:
1800 return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1801 cookie, handle, flags);
Marc Bonnici8e1a7552021-12-01 17:57:04 +00001802 default:
1803 WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
1804 break;
1805 }
1806 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1807}
Achin Gupta2ec5dbe2021-10-04 20:17:45 +01001808
1809/*******************************************************************************
1810 * This function is the handler registered for S-EL1 interrupts by the SPMC. It
1811 * validates the interrupt and upon success arranges entry into the SP for
1812 * handling the interrupt.
1813 ******************************************************************************/
1814static uint64_t spmc_sp_interrupt_handler(uint32_t id,
1815 uint32_t flags,
1816 void *handle,
1817 void *cookie)
1818{
1819 struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
1820 struct sp_exec_ctx *ec;
1821 uint32_t linear_id = plat_my_core_pos();
1822
1823 /* Sanity check for a NULL pointer dereference. */
1824 assert(sp != NULL);
1825
1826 /* Check the security state when the exception was generated. */
1827 assert(get_interrupt_src_ss(flags) == NON_SECURE);
1828
1829 /* Panic if not an S-EL1 Partition. */
1830 if (sp->runtime_el != S_EL1) {
1831 ERROR("Interrupt received for a non S-EL1 SP on core%u.\n",
1832 linear_id);
1833 panic();
1834 }
1835
1836 /* Obtain a reference to the SP execution context. */
1837 ec = spmc_get_sp_ec(sp);
1838
1839 /* Ensure that the execution context is in waiting state else panic. */
1840 if (ec->rt_state != RT_STATE_WAITING) {
1841 ERROR("SP EC on core%u is not waiting (%u), it is (%u).\n",
1842 linear_id, RT_STATE_WAITING, ec->rt_state);
1843 panic();
1844 }
1845
1846 /* Update the runtime model and state of the partition. */
1847 ec->rt_model = RT_MODEL_INTR;
1848 ec->rt_state = RT_STATE_RUNNING;
1849
1850 VERBOSE("SP (0x%x) interrupt start on core%u.\n", sp->sp_id, linear_id);
1851
1852 /*
1853 * Forward the interrupt to the S-EL1 SP. The interrupt ID is not
1854 * populated as the SP can determine this by itself.
1855 */
1856 return spmd_smc_switch_state(FFA_INTERRUPT, false,
1857 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1858 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1859 handle);
1860}