blob: b9ca2fe552f0fa0fe02604251e113ee6361fac6b [file] [log] [blame]
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001/*
2 * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6#include <errno.h>
7
8#include <common/debug.h>
9#include <common/runtime_svc.h>
10#include <lib/object_pool.h>
11#include <lib/spinlock.h>
12#include <lib/xlat_tables/xlat_tables_v2.h>
13#include <services/ffa_svc.h>
14#include "spmc.h"
15#include "spmc_shared_mem.h"
16
17#include <platform_def.h>
18
19/**
20 * struct spmc_shmem_obj - Shared memory object.
21 * @desc_size: Size of @desc.
22 * @desc_filled: Size of @desc already received.
23 * @in_use: Number of clients that have called ffa_mem_retrieve_req
24 * without a matching ffa_mem_relinquish call.
25 * @desc: FF-A memory region descriptor passed in ffa_mem_share.
26 */
27struct spmc_shmem_obj {
28 size_t desc_size;
29 size_t desc_filled;
30 size_t in_use;
31 struct ffa_mtd_v1_0 desc;
32};
33
34/*
35 * Declare our data structure to store the metadata of memory share requests.
36 * The main datastore is allocated on a per platform basis to ensure enough
37 * storage can be made available.
38 * The address of the data store will be populated by the SPMC during its
39 * initialization.
40 */
41
42struct spmc_shmem_obj_state spmc_shmem_obj_state = {
43 /* Set start value for handle so top 32 bits are needed quickly. */
44 .next_handle = 0xffffffc0U,
45};
46
47/**
48 * spmc_shmem_obj_size - Convert from descriptor size to object size.
49 * @desc_size: Size of struct ffa_memory_region_descriptor object.
50 *
51 * Return: Size of struct spmc_shmem_obj object.
52 */
53static size_t spmc_shmem_obj_size(size_t desc_size)
54{
55 return desc_size + offsetof(struct spmc_shmem_obj, desc);
56}
57
58/**
59 * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
60 * @state: Global state.
61 * @desc_size: Size of struct ffa_memory_region_descriptor object that
62 * allocated object will hold.
63 *
64 * Return: Pointer to newly allocated object, or %NULL if there not enough space
65 * left. The returned pointer is only valid while @state is locked, to
66 * used it again after unlocking @state, spmc_shmem_obj_lookup must be
67 * called.
68 */
69static struct spmc_shmem_obj *
70spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
71{
72 struct spmc_shmem_obj *obj;
73 size_t free = state->data_size - state->allocated;
74
75 if (state->data == NULL) {
76 ERROR("Missing shmem datastore!\n");
77 return NULL;
78 }
79
80 if (spmc_shmem_obj_size(desc_size) > free) {
81 WARN("%s(0x%zx) failed, free 0x%zx\n",
82 __func__, desc_size, free);
83 return NULL;
84 }
85 obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
86 obj->desc = (struct ffa_mtd_v1_0) {0};
87 obj->desc_size = desc_size;
88 obj->desc_filled = 0;
89 obj->in_use = 0;
90 state->allocated += spmc_shmem_obj_size(desc_size);
91 return obj;
92}
93
94/**
95 * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
96 * @state: Global state.
97 * @obj: Object to free.
98 *
99 * Release memory used by @obj. Other objects may move, so on return all
100 * pointers to struct spmc_shmem_obj object should be considered invalid, not
101 * just @obj.
102 *
103 * The current implementation always compacts the remaining objects to simplify
104 * the allocator and to avoid fragmentation.
105 */
106
107static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
108 struct spmc_shmem_obj *obj)
109{
110 size_t free_size = spmc_shmem_obj_size(obj->desc_size);
111 uint8_t *shift_dest = (uint8_t *)obj;
112 uint8_t *shift_src = shift_dest + free_size;
113 size_t shift_size = state->allocated - (shift_src - state->data);
114
115 if (shift_size != 0U) {
116 memmove(shift_dest, shift_src, shift_size);
117 }
118 state->allocated -= free_size;
119}
120
121/**
122 * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
123 * @state: Global state.
124 * @handle: Unique handle of object to return.
125 *
126 * Return: struct spmc_shmem_obj_state object with handle matching @handle.
127 * %NULL, if not object in @state->data has a matching handle.
128 */
129static struct spmc_shmem_obj *
130spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
131{
132 uint8_t *curr = state->data;
133
134 while (curr - state->data < state->allocated) {
135 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
136
137 if (obj->desc.handle == handle) {
138 return obj;
139 }
140 curr += spmc_shmem_obj_size(obj->desc_size);
141 }
142 return NULL;
143}
144
145static struct ffa_comp_mrd *
146spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj)
147{
148 return (struct ffa_comp_mrd *)
149 ((uint8_t *)(&obj->desc) + obj->desc.emad[0].comp_mrd_offset);
150}
151
152/**
153 * spmc_shmem_obj_ffa_constituent_size - Calculate variable size part of obj.
154 * @obj: Object containing ffa_memory_region_descriptor.
155 *
156 * Return: Size of ffa_constituent_memory_region_descriptors in @obj.
157 */
158static size_t
159spmc_shmem_obj_ffa_constituent_size(struct spmc_shmem_obj *obj)
160{
161 return spmc_shmem_obj_get_comp_mrd(obj)->address_range_count *
162 sizeof(struct ffa_cons_mrd);
163}
164
165/**
166 * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
167 * @obj: Object containing ffa_memory_region_descriptor.
168 *
169 * Return: 0 if object is valid, -EINVAL if memory region attributes count is
170 * not 1, -EINVAL if constituent_memory_region_descriptor offset or count is
171 * invalid.
172 */
173static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj)
174{
175 if (obj->desc.emad_count != 1) {
176 WARN("%s: unsupported attribute desc count %u != 1\n",
177 __func__, obj->desc.emad_count);
178 return -EINVAL;
179 }
180
181 for (size_t emad_num = 0; emad_num < obj->desc.emad_count; emad_num++) {
182 size_t size;
183 size_t count;
184 size_t expected_size;
185 size_t total_page_count;
186 struct ffa_comp_mrd *comp;
187
188 uint32_t offset = obj->desc.emad[emad_num].comp_mrd_offset;
189 size_t header_emad_size = sizeof(obj->desc) +
190 obj->desc.emad_count * sizeof(obj->desc.emad[emad_num]);
191
192 if (offset < header_emad_size) {
193 WARN("%s: invalid object, offset %u < header + emad %zu\n",
194 __func__, offset, header_emad_size);
195 return -EINVAL;
196 }
197
198 size = obj->desc_size;
199
200 if (offset > size) {
201 WARN("%s: invalid object, offset %u > total size %zu\n",
202 __func__, offset, obj->desc_size);
203 return -EINVAL;
204 }
205 size -= offset;
206
207 if (size < sizeof(struct ffa_comp_mrd)) {
208 WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
209 __func__, offset, obj->desc_size);
210 return -EINVAL;
211 }
212 size -= sizeof(struct ffa_comp_mrd);
213
214 count = size / sizeof(struct ffa_cons_mrd);
215
216 comp = spmc_shmem_obj_get_comp_mrd(obj);
217
218 if (comp->address_range_count != count) {
219 WARN("%s: invalid object, desc count %u != %zu\n",
220 __func__, comp->address_range_count, count);
221 return -EINVAL;
222 }
223
224 expected_size = offset + sizeof(*comp) +
225 spmc_shmem_obj_ffa_constituent_size(obj);
226 if (expected_size != obj->desc_size) {
227 WARN("%s: invalid object, computed size %zu != size %zu\n",
228 __func__, expected_size, obj->desc_size);
229 return -EINVAL;
230 }
231
232 if (obj->desc_filled < obj->desc_size) {
233 /*
234 * The whole descriptor has not yet been received.
235 * Skip final checks.
236 */
237 return 0;
238 }
239
240 total_page_count = 0;
241
242 for (size_t i = 0; i < count; i++) {
243 total_page_count +=
244 comp->address_range_array[i].page_count;
245 }
246 if (comp->total_page_count != total_page_count) {
247 WARN("%s: invalid object, desc total_page_count %u != %zu\n",
248 __func__, comp->total_page_count,
249 total_page_count);
250 return -EINVAL;
251 }
252 }
253
254 return 0;
255}
256
257static long spmc_ffa_fill_desc(struct mailbox *mbox,
258 struct spmc_shmem_obj *obj,
259 uint32_t fragment_length,
260 ffa_mtd_flag32_t mtd_flag,
261 void *smc_handle)
262{
263 int ret;
264 uint32_t handle_low;
265 uint32_t handle_high;
266
267 if (mbox->rxtx_page_count == 0U) {
268 WARN("%s: buffer pair not registered.\n", __func__);
269 ret = -EINVAL;
270 goto err_arg;
271 }
272
273 if (fragment_length > mbox->rxtx_page_count * PAGE_SIZE_4KB) {
274 WARN("%s: bad fragment size %u > %u buffer size\n", __func__,
275 fragment_length, mbox->rxtx_page_count * PAGE_SIZE_4KB);
276 ret = -EINVAL;
277 goto err_arg;
278 }
279
280 memcpy((uint8_t *)&obj->desc + obj->desc_filled,
281 (uint8_t *) mbox->tx_buffer,
282 fragment_length);
283
284 if (fragment_length > obj->desc_size - obj->desc_filled) {
285 WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
286 fragment_length, obj->desc_size - obj->desc_filled);
287 ret = -EINVAL;
288 goto err_arg;
289 }
290
291 /* Ensure that the sender ID resides in the normal world. */
292 if (ffa_is_secure_world_id(obj->desc.sender_id)) {
293 WARN("%s: Invalid sender ID 0x%x.\n",
294 __func__, obj->desc.sender_id);
295 ret = FFA_ERROR_DENIED;
296 goto err_arg;
297 }
298
299 /*
300 * We don't currently support any optional flags so ensure none are
301 * requested.
302 */
303 if (obj->desc.flags != 0U && mtd_flag != 0U &&
304 (obj->desc.flags != mtd_flag)) {
305 WARN("%s: invalid memory transaction flags %u != %u\n",
306 __func__, obj->desc.flags, mtd_flag);
307 ret = -EINVAL;
308 goto err_arg;
309 }
310
311 if (obj->desc_filled == 0U) {
312 /* First fragment, descriptor header has been copied */
313 obj->desc.handle = spmc_shmem_obj_state.next_handle++;
314 obj->desc.flags |= mtd_flag;
315 }
316
317 obj->desc_filled += fragment_length;
318
319 ret = spmc_shmem_check_obj(obj);
320 if (ret != 0) {
321 goto err_bad_desc;
322 }
323
324 handle_low = (uint32_t)obj->desc.handle;
325 handle_high = obj->desc.handle >> 32;
326
327 if (obj->desc_filled != obj->desc_size) {
328 SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
329 handle_high, obj->desc_filled,
330 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
331 }
332
333 SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
334 0, 0, 0);
335
336err_bad_desc:
337err_arg:
338 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
339 return spmc_ffa_error_return(smc_handle, FFA_ERROR_INVALID_PARAMETER);
340}
341
342/**
343 * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
344 * @client: Client state.
345 * @total_length: Total length of shared memory descriptor.
346 * @fragment_length: Length of fragment of shared memory descriptor passed in
347 * this call.
348 * @address: Not supported, must be 0.
349 * @page_count: Not supported, must be 0.
350 * @smc_handle: Handle passed to smc call. Used to return
351 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
352 *
353 * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
354 * to share or lend memory from non-secure os to secure os (with no stream
355 * endpoints).
356 *
357 * Return: 0 on success, error code on failure.
358 */
359long spmc_ffa_mem_send(uint32_t smc_fid,
360 bool secure_origin,
361 uint64_t total_length,
362 uint32_t fragment_length,
363 uint64_t address,
364 uint32_t page_count,
365 void *cookie,
366 void *handle,
367 uint64_t flags)
368
369{
370 long ret;
371 struct spmc_shmem_obj *obj;
372 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
373 ffa_mtd_flag32_t mtd_flag;
374
375 if (address != 0U || page_count != 0U) {
376 WARN("%s: custom memory region for message not supported.\n",
377 __func__);
378 return spmc_ffa_error_return(handle,
379 FFA_ERROR_INVALID_PARAMETER);
380 }
381
382 if (secure_origin) {
383 WARN("%s: unsupported share direction.\n", __func__);
384 return spmc_ffa_error_return(handle,
385 FFA_ERROR_INVALID_PARAMETER);
386 }
387
388 if (fragment_length < sizeof(obj->desc)) {
389 WARN("%s: bad first fragment size %u < %zu\n",
390 __func__, fragment_length, sizeof(obj->desc));
391 return spmc_ffa_error_return(handle,
392 FFA_ERROR_INVALID_PARAMETER);
393 }
394
395 if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
396 mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
397 } else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
398 mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
399 } else {
400 WARN("%s: invalid memory management operation.\n", __func__);
401 return spmc_ffa_error_return(handle,
402 FFA_ERROR_INVALID_PARAMETER);
403 }
404
405 spin_lock(&spmc_shmem_obj_state.lock);
406
407 obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
408 if (obj == NULL) {
409 ret = FFA_ERROR_NO_MEMORY;
410 goto err_unlock;
411 }
412
413 spin_lock(&mbox->lock);
414 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag, handle);
415 spin_unlock(&mbox->lock);
416
417 spin_unlock(&spmc_shmem_obj_state.lock);
418 return ret;
419
420err_unlock:
421 spin_unlock(&spmc_shmem_obj_state.lock);
422 return spmc_ffa_error_return(handle, ret);
423}
424
425/**
426 * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
427 * @client: Client state.
428 * @handle_low: Handle_low value returned from FFA_MEM_FRAG_RX.
429 * @handle_high: Handle_high value returned from FFA_MEM_FRAG_RX.
430 * @fragment_length: Length of fragments transmitted.
431 * @sender_id: Vmid of sender in bits [31:16]
432 * @smc_handle: Handle passed to smc call. Used to return
433 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
434 *
435 * Return: @smc_handle on success, error code on failure.
436 */
437long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
438 bool secure_origin,
439 uint64_t handle_low,
440 uint64_t handle_high,
441 uint32_t fragment_length,
442 uint32_t sender_id,
443 void *cookie,
444 void *handle,
445 uint64_t flags)
446{
447 long ret;
448 uint32_t desc_sender_id;
449 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
450
451 struct spmc_shmem_obj *obj;
452 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
453
454 spin_lock(&spmc_shmem_obj_state.lock);
455
456 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
457 if (obj == NULL) {
458 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
459 __func__, mem_handle);
460 ret = FFA_ERROR_INVALID_PARAMETER;
461 goto err_unlock;
462 }
463
464 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
465 if (sender_id != desc_sender_id) {
466 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
467 sender_id, desc_sender_id);
468 ret = FFA_ERROR_INVALID_PARAMETER;
469 goto err_unlock;
470 }
471
472 if (obj->desc_filled == obj->desc_size) {
473 WARN("%s: object desc already filled, %zu\n", __func__,
474 obj->desc_filled);
475 ret = FFA_ERROR_INVALID_PARAMETER;
476 goto err_unlock;
477 }
478
479 spin_lock(&mbox->lock);
480 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, handle);
481 spin_unlock(&mbox->lock);
482
483 spin_unlock(&spmc_shmem_obj_state.lock);
484 return ret;
485
486err_unlock:
487 spin_unlock(&spmc_shmem_obj_state.lock);
488 return spmc_ffa_error_return(handle, ret);
489}
490
491/**
492 * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
493 * @smc_fid: FID of SMC
494 * @total_length: Total length of retrieve request descriptor if this is
495 * the first call. Otherwise (unsupported) must be 0.
496 * @fragment_length: Length of fragment of retrieve request descriptor passed
497 * in this call. Only @fragment_length == @length is
498 * supported by this implementation.
499 * @address: Not supported, must be 0.
500 * @page_count: Not supported, must be 0.
501 * @smc_handle: Handle passed to smc call. Used to return
502 * FFA_MEM_RETRIEVE_RESP.
503 *
504 * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
505 * Used by secure os to retrieve memory already shared by non-secure os.
506 * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
507 * the client must call FFA_MEM_FRAG_RX until the full response has been
508 * received.
509 *
510 * Return: @handle on success, error code on failure.
511 */
512long
513spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
514 bool secure_origin,
515 uint32_t total_length,
516 uint32_t fragment_length,
517 uint64_t address,
518 uint32_t page_count,
519 void *cookie,
520 void *handle,
521 uint64_t flags)
522{
523 int ret;
524 size_t buf_size;
525 size_t copy_size;
526 struct ffa_mtd_v1_0 *resp;
527 const struct ffa_mtd_v1_0 *req;
528 struct spmc_shmem_obj *obj = NULL;
529 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
530
531 if (!secure_origin) {
532 WARN("%s: unsupported retrieve req direction.\n", __func__);
533 return spmc_ffa_error_return(handle,
534 FFA_ERROR_INVALID_PARAMETER);
535 }
536
537 if (address != 0U || page_count != 0U) {
538 WARN("%s: custom memory region not supported.\n", __func__);
539 return spmc_ffa_error_return(handle,
540 FFA_ERROR_INVALID_PARAMETER);
541 }
542
543 spin_lock(&mbox->lock);
544
545 req = mbox->tx_buffer;
546 resp = mbox->rx_buffer;
547 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
548
549 if (mbox->rxtx_page_count == 0U) {
550 WARN("%s: buffer pair not registered.\n", __func__);
551 ret = FFA_ERROR_INVALID_PARAMETER;
552 goto err_unlock_mailbox;
553 }
554
555 if (mbox->state != MAILBOX_STATE_EMPTY) {
556 WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
557 ret = FFA_ERROR_DENIED;
558 goto err_unlock_mailbox;
559 }
560
561 if (fragment_length != total_length) {
562 WARN("%s: fragmented retrieve request not supported.\n",
563 __func__);
564 ret = FFA_ERROR_INVALID_PARAMETER;
565 goto err_unlock_mailbox;
566 }
567
568 /*
569 * Ensure endpoint count is 1, additional receivers not currently
570 * supported.
571 */
572 if (req->emad_count != 1U) {
573 WARN("%s: unsupported retrieve descriptor count: %u\n",
574 __func__, req->emad_count);
575 ret = FFA_ERROR_INVALID_PARAMETER;
576 goto err_unlock_mailbox;
577 }
578
579 if (total_length < sizeof(*req)) {
580 WARN("%s: invalid length %u < %zu\n", __func__, total_length,
581 sizeof(*req));
582 ret = FFA_ERROR_INVALID_PARAMETER;
583 goto err_unlock_mailbox;
584 }
585
586 spin_lock(&spmc_shmem_obj_state.lock);
587
588 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
589 if (obj == NULL) {
590 ret = FFA_ERROR_INVALID_PARAMETER;
591 goto err_unlock_all;
592 }
593
594 if (obj->desc_filled != obj->desc_size) {
595 WARN("%s: incomplete object desc filled %zu < size %zu\n",
596 __func__, obj->desc_filled, obj->desc_size);
597 ret = FFA_ERROR_INVALID_PARAMETER;
598 goto err_unlock_all;
599 }
600
601 if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
602 WARN("%s: wrong sender id 0x%x != 0x%x\n",
603 __func__, req->sender_id, obj->desc.sender_id);
604 ret = FFA_ERROR_INVALID_PARAMETER;
605 goto err_unlock_all;
606 }
607
608 if (req->emad_count != 0U && req->tag != obj->desc.tag) {
609 WARN("%s: wrong tag 0x%lx != 0x%lx\n",
610 __func__, req->tag, obj->desc.tag);
611 ret = FFA_ERROR_INVALID_PARAMETER;
612 goto err_unlock_all;
613 }
614
615 if (req->flags != 0U) {
616 if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
617 (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
618 /*
619 * If the retrieve request specifies the memory
620 * transaction ensure it matches what we expect.
621 */
622 WARN("%s: wrong mem transaction flags %x != %x\n",
623 __func__, req->flags, obj->desc.flags);
624 ret = FFA_ERROR_INVALID_PARAMETER;
625 goto err_unlock_all;
626 }
627
628 if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
629 req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
630 /*
631 * Current implementation does not support donate and
632 * it supports no other flags.
633 */
634 WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
635 ret = FFA_ERROR_INVALID_PARAMETER;
636 goto err_unlock_all;
637 }
638 }
639
640 /* TODO: support more than one endpoint ids. */
641 if (req->emad_count != 0U &&
642 req->emad[0].mapd.endpoint_id !=
643 obj->desc.emad[0].mapd.endpoint_id) {
644 WARN("%s: wrong receiver id 0x%x != 0x%x\n",
645 __func__, req->emad[0].mapd.endpoint_id,
646 obj->desc.emad[0].mapd.endpoint_id);
647 ret = FFA_ERROR_INVALID_PARAMETER;
648 goto err_unlock_all;
649 }
650
651 mbox->state = MAILBOX_STATE_FULL;
652
653 if (req->emad_count != 0U) {
654 obj->in_use++;
655 }
656
657 copy_size = MIN(obj->desc_size, buf_size);
658
659 memcpy(resp, &obj->desc, copy_size);
660
661 spin_unlock(&spmc_shmem_obj_state.lock);
662 spin_unlock(&mbox->lock);
663
664 SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, obj->desc_size,
665 copy_size, 0, 0, 0, 0, 0);
666
667err_unlock_all:
668 spin_unlock(&spmc_shmem_obj_state.lock);
669err_unlock_mailbox:
670 spin_unlock(&mbox->lock);
671 return spmc_ffa_error_return(handle, ret);
672}
673
674/**
675 * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
676 * @client: Client state.
677 * @handle_low: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
678 * @handle_high: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
679 * @fragment_offset: Byte offset in descriptor to resume at.
680 * @sender_id: Bit[31:16]: Endpoint id of sender if client is a
681 * hypervisor. 0 otherwise.
682 * @smc_handle: Handle passed to smc call. Used to return
683 * FFA_MEM_FRAG_TX.
684 *
685 * Return: @smc_handle on success, error code on failure.
686 */
687long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
688 bool secure_origin,
689 uint32_t handle_low,
690 uint32_t handle_high,
691 uint32_t fragment_offset,
692 uint32_t sender_id,
693 void *cookie,
694 void *handle,
695 uint64_t flags)
696{
697 int ret;
698 void *src;
699 size_t buf_size;
700 size_t copy_size;
701 size_t full_copy_size;
702 uint32_t desc_sender_id;
703 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
704 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
705 struct spmc_shmem_obj *obj;
706
707 if (!secure_origin) {
708 WARN("%s: can only be called from swld.\n",
709 __func__);
710 return spmc_ffa_error_return(handle,
711 FFA_ERROR_INVALID_PARAMETER);
712 }
713
714 spin_lock(&spmc_shmem_obj_state.lock);
715
716 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
717 if (obj == NULL) {
718 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
719 __func__, mem_handle);
720 ret = FFA_ERROR_INVALID_PARAMETER;
721 goto err_unlock_shmem;
722 }
723
724 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
725 if (sender_id != 0U && sender_id != desc_sender_id) {
726 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
727 sender_id, desc_sender_id);
728 ret = FFA_ERROR_INVALID_PARAMETER;
729 goto err_unlock_shmem;
730 }
731
732 if (fragment_offset >= obj->desc_size) {
733 WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
734 __func__, fragment_offset, obj->desc_size);
735 ret = FFA_ERROR_INVALID_PARAMETER;
736 goto err_unlock_shmem;
737 }
738
739 spin_lock(&mbox->lock);
740
741 if (mbox->rxtx_page_count == 0U) {
742 WARN("%s: buffer pair not registered.\n", __func__);
743 ret = FFA_ERROR_INVALID_PARAMETER;
744 goto err_unlock_all;
745 }
746
747 if (mbox->state != MAILBOX_STATE_EMPTY) {
748 WARN("%s: RX Buffer is full!\n", __func__);
749 ret = FFA_ERROR_DENIED;
750 goto err_unlock_all;
751 }
752
753 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
754
755 mbox->state = MAILBOX_STATE_FULL;
756
757 full_copy_size = obj->desc_size - fragment_offset;
758 copy_size = MIN(full_copy_size, buf_size);
759
760 src = &obj->desc;
761
762 memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
763
764 spin_unlock(&mbox->lock);
765 spin_unlock(&spmc_shmem_obj_state.lock);
766
767 SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
768 copy_size, sender_id, 0, 0, 0);
769
770err_unlock_all:
771 spin_unlock(&mbox->lock);
772err_unlock_shmem:
773 spin_unlock(&spmc_shmem_obj_state.lock);
774 return spmc_ffa_error_return(handle, ret);
775}
776
777/**
778 * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
779 * @client: Client state.
780 *
781 * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
782 * Used by secure os release previously shared memory to non-secure os.
783 *
784 * The handle to release must be in the client's (secure os's) transmit buffer.
785 *
786 * Return: 0 on success, error code on failure.
787 */
788int spmc_ffa_mem_relinquish(uint32_t smc_fid,
789 bool secure_origin,
790 uint32_t handle_low,
791 uint32_t handle_high,
792 uint32_t fragment_offset,
793 uint32_t sender_id,
794 void *cookie,
795 void *handle,
796 uint64_t flags)
797{
798 int ret;
799 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
800 struct spmc_shmem_obj *obj;
801 const struct ffa_mem_relinquish_descriptor *req;
802
803 if (!secure_origin) {
804 WARN("%s: unsupported relinquish direction.\n", __func__);
805 return spmc_ffa_error_return(handle,
806 FFA_ERROR_INVALID_PARAMETER);
807 }
808
809 spin_lock(&mbox->lock);
810
811 if (mbox->rxtx_page_count == 0U) {
812 WARN("%s: buffer pair not registered.\n", __func__);
813 ret = FFA_ERROR_INVALID_PARAMETER;
814 goto err_unlock_mailbox;
815 }
816
817 req = mbox->tx_buffer;
818
819 if (req->flags != 0U) {
820 WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
821 ret = FFA_ERROR_INVALID_PARAMETER;
822 goto err_unlock_mailbox;
823 }
824
825 spin_lock(&spmc_shmem_obj_state.lock);
826
827 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
828 if (obj == NULL) {
829 ret = FFA_ERROR_INVALID_PARAMETER;
830 goto err_unlock_all;
831 }
832
833 if (obj->desc.emad_count != req->endpoint_count) {
834 ret = FFA_ERROR_INVALID_PARAMETER;
835 goto err_unlock_all;
836 }
837 for (size_t i = 0; i < req->endpoint_count; i++) {
838 if (req->endpoint_array[i] !=
839 obj->desc.emad[i].mapd.endpoint_id) {
840 ret = FFA_ERROR_INVALID_PARAMETER;
841 goto err_unlock_all;
842 }
843 }
844 if (obj->in_use == 0U) {
845 ret = FFA_ERROR_INVALID_PARAMETER;
846 goto err_unlock_all;
847 }
848 obj->in_use--;
849
850 spin_unlock(&spmc_shmem_obj_state.lock);
851 spin_unlock(&mbox->lock);
852
853 SMC_RET1(handle, FFA_SUCCESS_SMC32);
854
855err_unlock_all:
856 spin_unlock(&spmc_shmem_obj_state.lock);
857err_unlock_mailbox:
858 spin_unlock(&mbox->lock);
859 return spmc_ffa_error_return(handle, ret);
860}
861
862/**
863 * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
864 * @client: Client state.
865 * @handle_low: Unique handle of shared memory object to reclaim. Bit[31:0].
866 * @handle_high: Unique handle of shared memory object to reclaim.
867 * Bit[63:32].
868 * @flags: Unsupported, ignored.
869 *
870 * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
871 * Used by non-secure os reclaim memory previously shared with secure os.
872 *
873 * Return: 0 on success, error code on failure.
874 */
875int spmc_ffa_mem_reclaim(uint32_t smc_fid,
876 bool secure_origin,
877 uint32_t handle_low,
878 uint32_t handle_high,
879 uint32_t mem_flags,
880 uint64_t x4,
881 void *cookie,
882 void *handle,
883 uint64_t flags)
884{
885 int ret;
886 struct spmc_shmem_obj *obj;
887 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
888
889 if (secure_origin) {
890 WARN("%s: unsupported reclaim direction.\n", __func__);
891 return spmc_ffa_error_return(handle,
892 FFA_ERROR_INVALID_PARAMETER);
893 }
894
895 if (mem_flags != 0U) {
896 WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
897 return spmc_ffa_error_return(handle,
898 FFA_ERROR_INVALID_PARAMETER);
899 }
900
901 spin_lock(&spmc_shmem_obj_state.lock);
902
903 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
904 if (obj == NULL) {
905 ret = FFA_ERROR_INVALID_PARAMETER;
906 goto err_unlock;
907 }
908 if (obj->in_use != 0U) {
909 ret = FFA_ERROR_DENIED;
910 goto err_unlock;
911 }
912 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
913 spin_unlock(&spmc_shmem_obj_state.lock);
914
915 SMC_RET1(handle, FFA_SUCCESS_SMC32);
916
917err_unlock:
918 spin_unlock(&spmc_shmem_obj_state.lock);
919 return spmc_ffa_error_return(handle, ret);
920}