blob: d4d0407c116971dc671dc96d55dbe5f46292d3dd [file] [log] [blame]
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001/*
2 * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00006#include <assert.h>
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01007#include <errno.h>
8
9#include <common/debug.h>
10#include <common/runtime_svc.h>
11#include <lib/object_pool.h>
12#include <lib/spinlock.h>
13#include <lib/xlat_tables/xlat_tables_v2.h>
14#include <services/ffa_svc.h>
15#include "spmc.h"
16#include "spmc_shared_mem.h"
17
18#include <platform_def.h>
19
20/**
21 * struct spmc_shmem_obj - Shared memory object.
22 * @desc_size: Size of @desc.
23 * @desc_filled: Size of @desc already received.
24 * @in_use: Number of clients that have called ffa_mem_retrieve_req
25 * without a matching ffa_mem_relinquish call.
26 * @desc: FF-A memory region descriptor passed in ffa_mem_share.
27 */
28struct spmc_shmem_obj {
29 size_t desc_size;
30 size_t desc_filled;
31 size_t in_use;
Marc Bonnicid1907f02022-04-19 17:42:53 +010032 struct ffa_mtd desc;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010033};
34
35/*
36 * Declare our data structure to store the metadata of memory share requests.
37 * The main datastore is allocated on a per platform basis to ensure enough
38 * storage can be made available.
39 * The address of the data store will be populated by the SPMC during its
40 * initialization.
41 */
42
43struct spmc_shmem_obj_state spmc_shmem_obj_state = {
44 /* Set start value for handle so top 32 bits are needed quickly. */
45 .next_handle = 0xffffffc0U,
46};
47
48/**
49 * spmc_shmem_obj_size - Convert from descriptor size to object size.
50 * @desc_size: Size of struct ffa_memory_region_descriptor object.
51 *
52 * Return: Size of struct spmc_shmem_obj object.
53 */
54static size_t spmc_shmem_obj_size(size_t desc_size)
55{
56 return desc_size + offsetof(struct spmc_shmem_obj, desc);
57}
58
59/**
60 * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
61 * @state: Global state.
62 * @desc_size: Size of struct ffa_memory_region_descriptor object that
63 * allocated object will hold.
64 *
65 * Return: Pointer to newly allocated object, or %NULL if there not enough space
66 * left. The returned pointer is only valid while @state is locked, to
67 * used it again after unlocking @state, spmc_shmem_obj_lookup must be
68 * called.
69 */
70static struct spmc_shmem_obj *
71spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
72{
73 struct spmc_shmem_obj *obj;
74 size_t free = state->data_size - state->allocated;
75
76 if (state->data == NULL) {
77 ERROR("Missing shmem datastore!\n");
78 return NULL;
79 }
80
81 if (spmc_shmem_obj_size(desc_size) > free) {
82 WARN("%s(0x%zx) failed, free 0x%zx\n",
83 __func__, desc_size, free);
84 return NULL;
85 }
86 obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
Marc Bonnicid1907f02022-04-19 17:42:53 +010087 obj->desc = (struct ffa_mtd) {0};
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010088 obj->desc_size = desc_size;
89 obj->desc_filled = 0;
90 obj->in_use = 0;
91 state->allocated += spmc_shmem_obj_size(desc_size);
92 return obj;
93}
94
95/**
96 * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
97 * @state: Global state.
98 * @obj: Object to free.
99 *
100 * Release memory used by @obj. Other objects may move, so on return all
101 * pointers to struct spmc_shmem_obj object should be considered invalid, not
102 * just @obj.
103 *
104 * The current implementation always compacts the remaining objects to simplify
105 * the allocator and to avoid fragmentation.
106 */
107
108static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
109 struct spmc_shmem_obj *obj)
110{
111 size_t free_size = spmc_shmem_obj_size(obj->desc_size);
112 uint8_t *shift_dest = (uint8_t *)obj;
113 uint8_t *shift_src = shift_dest + free_size;
114 size_t shift_size = state->allocated - (shift_src - state->data);
115
116 if (shift_size != 0U) {
117 memmove(shift_dest, shift_src, shift_size);
118 }
119 state->allocated -= free_size;
120}
121
122/**
123 * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
124 * @state: Global state.
125 * @handle: Unique handle of object to return.
126 *
127 * Return: struct spmc_shmem_obj_state object with handle matching @handle.
128 * %NULL, if not object in @state->data has a matching handle.
129 */
130static struct spmc_shmem_obj *
131spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
132{
133 uint8_t *curr = state->data;
134
135 while (curr - state->data < state->allocated) {
136 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
137
138 if (obj->desc.handle == handle) {
139 return obj;
140 }
141 curr += spmc_shmem_obj_size(obj->desc_size);
142 }
143 return NULL;
144}
145
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000146/**
147 * spmc_shmem_obj_get_next - Get the next memory object from an offset.
148 * @offset: Offset used to track which objects have previously been
149 * returned.
150 *
151 * Return: the next struct spmc_shmem_obj_state object from the provided
152 * offset.
153 * %NULL, if there are no more objects.
154 */
155static struct spmc_shmem_obj *
156spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
157{
158 uint8_t *curr = state->data + *offset;
159
160 if (curr - state->data < state->allocated) {
161 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
162
163 *offset += spmc_shmem_obj_size(obj->desc_size);
164
165 return obj;
166 }
167 return NULL;
168}
169
Marc Bonnicid1907f02022-04-19 17:42:53 +0100170/*******************************************************************************
171 * FF-A memory descriptor helper functions.
172 ******************************************************************************/
173/**
174 * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
175 * clients FF-A version.
176 * @desc: The memory transaction descriptor.
177 * @index: The index of the emad element to be accessed.
178 * @ffa_version: FF-A version of the provided structure.
179 * @emad_size: Will be populated with the size of the returned emad
180 * descriptor.
181 * Return: A pointer to the requested emad structure.
182 */
183static void *
184spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
185 uint32_t ffa_version, size_t *emad_size)
186{
187 uint8_t *emad;
188 /*
189 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
190 * format, otherwise assume it is a v1.1 format.
191 */
192 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
193 /* Cast our descriptor to the v1.0 format. */
194 struct ffa_mtd_v1_0 *mtd_v1_0 =
195 (struct ffa_mtd_v1_0 *) desc;
196 emad = (uint8_t *) &(mtd_v1_0->emad);
197 *emad_size = sizeof(struct ffa_emad_v1_0);
198 } else {
199 if (!is_aligned(desc->emad_offset, 16)) {
200 WARN("Emad offset is not aligned.\n");
201 return NULL;
202 }
203 emad = ((uint8_t *) desc + desc->emad_offset);
204 *emad_size = desc->emad_size;
205 }
206 return (emad + (*emad_size * index));
207}
208
209/**
210 * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
211 * FF-A version of the descriptor.
212 * @obj: Object containing ffa_memory_region_descriptor.
213 *
214 * Return: struct ffa_comp_mrd object corresponding to the composite memory
215 * region descriptor.
216 */
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100217static struct ffa_comp_mrd *
Marc Bonnicid1907f02022-04-19 17:42:53 +0100218spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100219{
Marc Bonnicid1907f02022-04-19 17:42:53 +0100220 size_t emad_size;
221 /*
222 * The comp_mrd_offset field of the emad descriptor remains consistent
223 * between FF-A versions therefore we can use the v1.0 descriptor here
224 * in all cases.
225 */
226 struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
227 ffa_version,
228 &emad_size);
229 /* Ensure the emad array was found. */
230 if (emad == NULL) {
231 return NULL;
232 }
233
234 /* Ensure the composite descriptor offset is aligned. */
235 if (!is_aligned(emad->comp_mrd_offset, 8)) {
236 WARN("Unaligned composite memory region descriptor offset.\n");
237 return NULL;
238 }
239
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100240 return (struct ffa_comp_mrd *)
Marc Bonnicid1907f02022-04-19 17:42:53 +0100241 ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100242}
243
244/**
245 * spmc_shmem_obj_ffa_constituent_size - Calculate variable size part of obj.
246 * @obj: Object containing ffa_memory_region_descriptor.
247 *
248 * Return: Size of ffa_constituent_memory_region_descriptors in @obj.
249 */
250static size_t
Marc Bonnicid1907f02022-04-19 17:42:53 +0100251spmc_shmem_obj_ffa_constituent_size(struct spmc_shmem_obj *obj,
252 uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100253{
Marc Bonnicid1907f02022-04-19 17:42:53 +0100254 struct ffa_comp_mrd *comp_mrd;
255
256 comp_mrd = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
257 if (comp_mrd == NULL) {
258 return 0;
259 }
260 return comp_mrd->address_range_count * sizeof(struct ffa_cons_mrd);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100261}
262
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100263/**
264 * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
265 * a given memory transaction.
266 * @sp_id: Partition ID to validate.
267 * @desc: Descriptor of the memory transaction.
268 *
269 * Return: true if ID is valid, else false.
270 */
271bool spmc_shmem_obj_validate_id(const struct ffa_mtd *desc, uint16_t sp_id)
272{
273 bool found = false;
274
275 /* Validate the partition is a valid participant. */
276 for (unsigned int i = 0U; i < desc->emad_count; i++) {
277 size_t emad_size;
278 struct ffa_emad_v1_0 *emad;
279
280 emad = spmc_shmem_obj_get_emad(desc, i,
281 MAKE_FFA_VERSION(1, 1),
282 &emad_size);
283 if (sp_id == emad->mapd.endpoint_id) {
284 found = true;
285 break;
286 }
287 }
288 return found;
289}
290
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000291/*
292 * Compare two memory regions to determine if any range overlaps with another
293 * ongoing memory transaction.
294 */
295static bool
296overlapping_memory_regions(struct ffa_comp_mrd *region1,
297 struct ffa_comp_mrd *region2)
298{
299 uint64_t region1_start;
300 uint64_t region1_size;
301 uint64_t region1_end;
302 uint64_t region2_start;
303 uint64_t region2_size;
304 uint64_t region2_end;
305
306 assert(region1 != NULL);
307 assert(region2 != NULL);
308
309 if (region1 == region2) {
310 return true;
311 }
312
313 /*
314 * Check each memory region in the request against existing
315 * transactions.
316 */
317 for (size_t i = 0; i < region1->address_range_count; i++) {
318
319 region1_start = region1->address_range_array[i].address;
320 region1_size =
321 region1->address_range_array[i].page_count *
322 PAGE_SIZE_4KB;
323 region1_end = region1_start + region1_size;
324
325 for (size_t j = 0; j < region2->address_range_count; j++) {
326
327 region2_start = region2->address_range_array[j].address;
328 region2_size =
329 region2->address_range_array[j].page_count *
330 PAGE_SIZE_4KB;
331 region2_end = region2_start + region2_size;
332
Marc Bonnici79669bb2022-10-18 13:50:04 +0100333 /* Check if regions are not overlapping. */
334 if (!((region2_end <= region1_start) ||
335 (region1_end <= region2_start))) {
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000336 WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
337 region1_start, region1_end,
338 region2_start, region2_end);
339 return true;
340 }
341 }
342 }
343 return false;
344}
345
Marc Bonnicid1907f02022-04-19 17:42:53 +0100346/*******************************************************************************
347 * FF-A v1.0 Memory Descriptor Conversion Helpers.
348 ******************************************************************************/
349/**
350 * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
351 * converted descriptor.
352 * @orig: The original v1.0 memory transaction descriptor.
353 * @desc_size: The size of the original v1.0 memory transaction descriptor.
354 *
355 * Return: the size required to store the descriptor store in the v1.1 format.
356 */
357static size_t
358spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
359{
360 size_t size = 0;
361 struct ffa_comp_mrd *mrd;
362 struct ffa_emad_v1_0 *emad_array = orig->emad;
363
364 /* Get the size of the v1.1 descriptor. */
365 size += sizeof(struct ffa_mtd);
366
367 /* Add the size of the emad descriptors. */
368 size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
369
370 /* Add the size of the composite mrds. */
371 size += sizeof(struct ffa_comp_mrd);
372
373 /* Add the size of the constituent mrds. */
374 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
375 emad_array[0].comp_mrd_offset);
376
377 /* Check the calculated address is within the memory descriptor. */
378 if ((uintptr_t) mrd >= (uintptr_t)((uint8_t *) orig + desc_size)) {
379 return 0;
380 }
381 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
382
383 return size;
384}
385
386/**
387 * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
388 * converted descriptor.
389 * @orig: The original v1.1 memory transaction descriptor.
390 * @desc_size: The size of the original v1.1 memory transaction descriptor.
391 *
392 * Return: the size required to store the descriptor store in the v1.0 format.
393 */
394static size_t
395spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
396{
397 size_t size = 0;
398 struct ffa_comp_mrd *mrd;
399 struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
400 ((uint8_t *) orig +
401 orig->emad_offset);
402
403 /* Get the size of the v1.0 descriptor. */
404 size += sizeof(struct ffa_mtd_v1_0);
405
406 /* Add the size of the v1.0 emad descriptors. */
407 size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
408
409 /* Add the size of the composite mrds. */
410 size += sizeof(struct ffa_comp_mrd);
411
412 /* Add the size of the constituent mrds. */
413 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
414 emad_array[0].comp_mrd_offset);
415
416 /* Check the calculated address is within the memory descriptor. */
417 if ((uintptr_t) mrd >= (uintptr_t)((uint8_t *) orig + desc_size)) {
418 return 0;
419 }
420 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
421
422 return size;
423}
424
425/**
426 * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
427 * @out_obj: The shared memory object to populate the converted descriptor.
428 * @orig: The shared memory object containing the v1.0 descriptor.
429 *
430 * Return: true if the conversion is successful else false.
431 */
432static bool
433spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
434 struct spmc_shmem_obj *orig)
435{
436 struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
437 struct ffa_mtd *out = &out_obj->desc;
438 struct ffa_emad_v1_0 *emad_array_in;
439 struct ffa_emad_v1_0 *emad_array_out;
440 struct ffa_comp_mrd *mrd_in;
441 struct ffa_comp_mrd *mrd_out;
442
443 size_t mrd_in_offset;
444 size_t mrd_out_offset;
445 size_t mrd_size = 0;
446
447 /* Populate the new descriptor format from the v1.0 struct. */
448 out->sender_id = mtd_orig->sender_id;
449 out->memory_region_attributes = mtd_orig->memory_region_attributes;
450 out->flags = mtd_orig->flags;
451 out->handle = mtd_orig->handle;
452 out->tag = mtd_orig->tag;
453 out->emad_count = mtd_orig->emad_count;
454 out->emad_size = sizeof(struct ffa_emad_v1_0);
455
456 /*
457 * We will locate the emad descriptors directly after the ffa_mtd
458 * struct. This will be 8-byte aligned.
459 */
460 out->emad_offset = sizeof(struct ffa_mtd);
461
462 emad_array_in = mtd_orig->emad;
463 emad_array_out = (struct ffa_emad_v1_0 *)
464 ((uint8_t *) out + out->emad_offset);
465
466 /* Copy across the emad structs. */
467 for (unsigned int i = 0U; i < out->emad_count; i++) {
468 memcpy(&emad_array_out[i], &emad_array_in[i],
469 sizeof(struct ffa_emad_v1_0));
470 }
471
472 /* Place the mrd descriptors after the end of the emad descriptors.*/
473 mrd_in_offset = emad_array_in->comp_mrd_offset;
474 mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
475 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
476
477 /* Add the size of the composite memory region descriptor. */
478 mrd_size += sizeof(struct ffa_comp_mrd);
479
480 /* Find the mrd descriptor. */
481 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
482
483 /* Add the size of the constituent memory region descriptors. */
484 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
485
486 /*
487 * Update the offset in the emads by the delta between the input and
488 * output addresses.
489 */
490 for (unsigned int i = 0U; i < out->emad_count; i++) {
491 emad_array_out[i].comp_mrd_offset =
492 emad_array_in[i].comp_mrd_offset +
493 (mrd_out_offset - mrd_in_offset);
494 }
495
496 /* Verify that we stay within bound of the memory descriptors. */
497 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
498 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
499 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
500 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
501 ERROR("%s: Invalid mrd structure.\n", __func__);
502 return false;
503 }
504
505 /* Copy the mrd descriptors directly. */
506 memcpy(mrd_out, mrd_in, mrd_size);
507
508 return true;
509}
510
511/**
512 * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
513 * v1.0 memory object.
514 * @out_obj: The shared memory object to populate the v1.0 descriptor.
515 * @orig: The shared memory object containing the v1.1 descriptor.
516 *
517 * Return: true if the conversion is successful else false.
518 */
519static bool
520spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
521 struct spmc_shmem_obj *orig)
522{
523 struct ffa_mtd *mtd_orig = &orig->desc;
524 struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
525 struct ffa_emad_v1_0 *emad_in;
526 struct ffa_emad_v1_0 *emad_array_in;
527 struct ffa_emad_v1_0 *emad_array_out;
528 struct ffa_comp_mrd *mrd_in;
529 struct ffa_comp_mrd *mrd_out;
530
531 size_t mrd_in_offset;
532 size_t mrd_out_offset;
533 size_t emad_out_array_size;
534 size_t mrd_size = 0;
535
536 /* Populate the v1.0 descriptor format from the v1.1 struct. */
537 out->sender_id = mtd_orig->sender_id;
538 out->memory_region_attributes = mtd_orig->memory_region_attributes;
539 out->flags = mtd_orig->flags;
540 out->handle = mtd_orig->handle;
541 out->tag = mtd_orig->tag;
542 out->emad_count = mtd_orig->emad_count;
543
544 /* Determine the location of the emad array in both descriptors. */
545 emad_array_in = (struct ffa_emad_v1_0 *)
546 ((uint8_t *) mtd_orig + mtd_orig->emad_offset);
547 emad_array_out = out->emad;
548
549 /* Copy across the emad structs. */
550 emad_in = emad_array_in;
551 for (unsigned int i = 0U; i < out->emad_count; i++) {
552 memcpy(&emad_array_out[i], emad_in,
553 sizeof(struct ffa_emad_v1_0));
554
555 emad_in += mtd_orig->emad_size;
556 }
557
558 /* Place the mrd descriptors after the end of the emad descriptors. */
559 emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
560
561 mrd_out_offset = (uint8_t *) out->emad - (uint8_t *) out +
562 emad_out_array_size;
563
564 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
565
566 mrd_in_offset = mtd_orig->emad_offset +
567 (mtd_orig->emad_size * mtd_orig->emad_count);
568
569 /* Add the size of the composite memory region descriptor. */
570 mrd_size += sizeof(struct ffa_comp_mrd);
571
572 /* Find the mrd descriptor. */
573 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
574
575 /* Add the size of the constituent memory region descriptors. */
576 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
577
578 /*
579 * Update the offset in the emads by the delta between the input and
580 * output addresses.
581 */
582 emad_in = emad_array_in;
583
584 for (unsigned int i = 0U; i < out->emad_count; i++) {
585 emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
586 (mrd_out_offset -
587 mrd_in_offset);
588 emad_in += mtd_orig->emad_size;
589 }
590
591 /* Verify that we stay within bound of the memory descriptors. */
592 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
593 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
594 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
595 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
596 ERROR("%s: Invalid mrd structure.\n", __func__);
597 return false;
598 }
599
600 /* Copy the mrd descriptors directly. */
601 memcpy(mrd_out, mrd_in, mrd_size);
602
603 return true;
604}
605
606/**
607 * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
608 * the v1.0 format and populates the
609 * provided buffer.
610 * @dst: Buffer to populate v1.0 ffa_memory_region_descriptor.
611 * @orig_obj: Object containing v1.1 ffa_memory_region_descriptor.
612 * @buf_size: Size of the buffer to populate.
613 * @offset: The offset of the converted descriptor to copy.
614 * @copy_size: Will be populated with the number of bytes copied.
615 * @out_desc_size: Will be populated with the total size of the v1.0
616 * descriptor.
617 *
618 * Return: 0 if conversion and population succeeded.
619 * Note: This function invalidates the reference to @orig therefore
620 * `spmc_shmem_obj_lookup` must be called if further usage is required.
621 */
622static uint32_t
623spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
624 size_t buf_size, size_t offset,
625 size_t *copy_size, size_t *v1_0_desc_size)
626{
627 struct spmc_shmem_obj *v1_0_obj;
628
629 /* Calculate the size that the v1.0 descriptor will require. */
630 *v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
631 &orig_obj->desc, orig_obj->desc_size);
632
633 if (*v1_0_desc_size == 0) {
634 ERROR("%s: cannot determine size of descriptor.\n",
635 __func__);
636 return FFA_ERROR_INVALID_PARAMETER;
637 }
638
639 /* Get a new obj to store the v1.0 descriptor. */
640 v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
641 *v1_0_desc_size);
642
643 if (!v1_0_obj) {
644 return FFA_ERROR_NO_MEMORY;
645 }
646
647 /* Perform the conversion from v1.1 to v1.0. */
648 if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
649 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
650 return FFA_ERROR_INVALID_PARAMETER;
651 }
652
653 *copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
654 memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
655
656 /*
657 * We're finished with the v1.0 descriptor for now so free it.
658 * Note that this will invalidate any references to the v1.1
659 * descriptor.
660 */
661 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
662
663 return 0;
664}
665
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100666/**
667 * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
Marc Bonnicid1907f02022-04-19 17:42:53 +0100668 * @obj: Object containing ffa_memory_region_descriptor.
669 * @ffa_version: FF-A version of the provided descriptor.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100670 *
Marc Bonnici336630f2022-01-13 11:39:10 +0000671 * Return: 0 if object is valid, -EINVAL if constituent_memory_region_descriptor
672 * offset or count is invalid.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100673 */
Marc Bonnicid1907f02022-04-19 17:42:53 +0100674static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
675 uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100676{
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000677 uint32_t comp_mrd_offset = 0;
678
Marc Bonnici336630f2022-01-13 11:39:10 +0000679 if (obj->desc.emad_count == 0U) {
680 WARN("%s: unsupported attribute desc count %u.\n",
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100681 __func__, obj->desc.emad_count);
682 return -EINVAL;
683 }
684
685 for (size_t emad_num = 0; emad_num < obj->desc.emad_count; emad_num++) {
686 size_t size;
687 size_t count;
688 size_t expected_size;
689 size_t total_page_count;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100690 size_t emad_size;
691 size_t desc_size;
692 size_t header_emad_size;
693 uint32_t offset;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100694 struct ffa_comp_mrd *comp;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100695 struct ffa_emad_v1_0 *emad;
696
697 emad = spmc_shmem_obj_get_emad(&obj->desc, emad_num,
698 ffa_version, &emad_size);
699 if (emad == NULL) {
700 WARN("%s: invalid emad structure.\n", __func__);
701 return -EINVAL;
702 }
703
704 /*
705 * Validate the calculated emad address resides within the
706 * descriptor.
707 */
708 if ((uintptr_t) emad >=
709 (uintptr_t)((uint8_t *) &obj->desc + obj->desc_size)) {
710 WARN("Invalid emad access.\n");
711 return -EINVAL;
712 }
713
714 offset = emad->comp_mrd_offset;
715
716 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
717 desc_size = sizeof(struct ffa_mtd_v1_0);
718 } else {
719 desc_size = sizeof(struct ffa_mtd);
720 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100721
Marc Bonnicid1907f02022-04-19 17:42:53 +0100722 header_emad_size = desc_size +
723 (obj->desc.emad_count * emad_size);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100724
725 if (offset < header_emad_size) {
726 WARN("%s: invalid object, offset %u < header + emad %zu\n",
727 __func__, offset, header_emad_size);
728 return -EINVAL;
729 }
730
731 size = obj->desc_size;
732
733 if (offset > size) {
734 WARN("%s: invalid object, offset %u > total size %zu\n",
735 __func__, offset, obj->desc_size);
736 return -EINVAL;
737 }
738 size -= offset;
739
740 if (size < sizeof(struct ffa_comp_mrd)) {
741 WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
742 __func__, offset, obj->desc_size);
743 return -EINVAL;
744 }
745 size -= sizeof(struct ffa_comp_mrd);
746
747 count = size / sizeof(struct ffa_cons_mrd);
748
Marc Bonnicid1907f02022-04-19 17:42:53 +0100749 comp = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
750
751 if (comp == NULL) {
752 WARN("%s: invalid comp_mrd offset\n", __func__);
753 return -EINVAL;
754 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100755
756 if (comp->address_range_count != count) {
757 WARN("%s: invalid object, desc count %u != %zu\n",
758 __func__, comp->address_range_count, count);
759 return -EINVAL;
760 }
761
762 expected_size = offset + sizeof(*comp) +
Marc Bonnicid1907f02022-04-19 17:42:53 +0100763 spmc_shmem_obj_ffa_constituent_size(obj,
764 ffa_version);
765
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100766 if (expected_size != obj->desc_size) {
767 WARN("%s: invalid object, computed size %zu != size %zu\n",
768 __func__, expected_size, obj->desc_size);
769 return -EINVAL;
770 }
771
772 if (obj->desc_filled < obj->desc_size) {
773 /*
774 * The whole descriptor has not yet been received.
775 * Skip final checks.
776 */
777 return 0;
778 }
779
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000780 /*
781 * The offset provided to the composite memory region descriptor
782 * should be consistent across endpoint descriptors. Store the
783 * first entry and compare against subsequent entries.
784 */
785 if (comp_mrd_offset == 0) {
786 comp_mrd_offset = offset;
787 } else {
788 if (comp_mrd_offset != offset) {
789 ERROR("%s: mismatching offsets provided, %u != %u\n",
790 __func__, offset, comp_mrd_offset);
791 return -EINVAL;
792 }
793 }
794
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100795 total_page_count = 0;
796
797 for (size_t i = 0; i < count; i++) {
798 total_page_count +=
799 comp->address_range_array[i].page_count;
800 }
801 if (comp->total_page_count != total_page_count) {
802 WARN("%s: invalid object, desc total_page_count %u != %zu\n",
803 __func__, comp->total_page_count,
804 total_page_count);
805 return -EINVAL;
806 }
807 }
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000808 return 0;
809}
810
811/**
812 * spmc_shmem_check_state_obj - Check if the descriptor describes memory
813 * regions that are currently involved with an
814 * existing memory transactions. This implies that
815 * the memory is not in a valid state for lending.
816 * @obj: Object containing ffa_memory_region_descriptor.
817 *
818 * Return: 0 if object is valid, -EINVAL if invalid memory state.
819 */
Marc Bonnicid1907f02022-04-19 17:42:53 +0100820static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
821 uint32_t ffa_version)
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000822{
823 size_t obj_offset = 0;
824 struct spmc_shmem_obj *inflight_obj;
825
826 struct ffa_comp_mrd *other_mrd;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100827 struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
828 ffa_version);
829
830 if (requested_mrd == NULL) {
831 return -EINVAL;
832 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100833
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000834 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
835 &obj_offset);
836
837 while (inflight_obj != NULL) {
838 /*
839 * Don't compare the transaction to itself or to partially
840 * transmitted descriptors.
841 */
842 if ((obj->desc.handle != inflight_obj->desc.handle) &&
843 (obj->desc_size == obj->desc_filled)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100844 other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
Marc Bonnici344ca9d2022-05-20 14:38:55 +0100845 FFA_VERSION_COMPILED);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100846 if (other_mrd == NULL) {
847 return -EINVAL;
848 }
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000849 if (overlapping_memory_regions(requested_mrd,
850 other_mrd)) {
851 return -EINVAL;
852 }
853 }
854
855 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
856 &obj_offset);
857 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100858 return 0;
859}
860
861static long spmc_ffa_fill_desc(struct mailbox *mbox,
862 struct spmc_shmem_obj *obj,
863 uint32_t fragment_length,
864 ffa_mtd_flag32_t mtd_flag,
Marc Bonnicid1907f02022-04-19 17:42:53 +0100865 uint32_t ffa_version,
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100866 void *smc_handle)
867{
868 int ret;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100869 size_t emad_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100870 uint32_t handle_low;
871 uint32_t handle_high;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100872 struct ffa_emad_v1_0 *emad;
873 struct ffa_emad_v1_0 *other_emad;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100874
875 if (mbox->rxtx_page_count == 0U) {
876 WARN("%s: buffer pair not registered.\n", __func__);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100877 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100878 goto err_arg;
879 }
880
881 if (fragment_length > mbox->rxtx_page_count * PAGE_SIZE_4KB) {
882 WARN("%s: bad fragment size %u > %u buffer size\n", __func__,
883 fragment_length, mbox->rxtx_page_count * PAGE_SIZE_4KB);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100884 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100885 goto err_arg;
886 }
887
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100888 if (fragment_length > obj->desc_size - obj->desc_filled) {
889 WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
890 fragment_length, obj->desc_size - obj->desc_filled);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100891 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100892 goto err_arg;
893 }
894
Marc Bonnicif0f45dc2022-10-18 13:57:16 +0100895 memcpy((uint8_t *)&obj->desc + obj->desc_filled,
896 (uint8_t *) mbox->tx_buffer, fragment_length);
897
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100898 /* Ensure that the sender ID resides in the normal world. */
899 if (ffa_is_secure_world_id(obj->desc.sender_id)) {
900 WARN("%s: Invalid sender ID 0x%x.\n",
901 __func__, obj->desc.sender_id);
902 ret = FFA_ERROR_DENIED;
903 goto err_arg;
904 }
905
Marc Bonnici08f28ef2022-04-19 16:52:59 +0100906 /* Ensure the NS bit is set to 0. */
907 if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
908 WARN("%s: NS mem attributes flags MBZ.\n", __func__);
909 ret = FFA_ERROR_INVALID_PARAMETER;
910 goto err_arg;
911 }
912
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100913 /*
914 * We don't currently support any optional flags so ensure none are
915 * requested.
916 */
917 if (obj->desc.flags != 0U && mtd_flag != 0U &&
918 (obj->desc.flags != mtd_flag)) {
919 WARN("%s: invalid memory transaction flags %u != %u\n",
920 __func__, obj->desc.flags, mtd_flag);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100921 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100922 goto err_arg;
923 }
924
925 if (obj->desc_filled == 0U) {
926 /* First fragment, descriptor header has been copied */
927 obj->desc.handle = spmc_shmem_obj_state.next_handle++;
928 obj->desc.flags |= mtd_flag;
929 }
930
931 obj->desc_filled += fragment_length;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100932 ret = spmc_shmem_check_obj(obj, ffa_version);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100933 if (ret != 0) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100934 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100935 goto err_bad_desc;
936 }
937
938 handle_low = (uint32_t)obj->desc.handle;
939 handle_high = obj->desc.handle >> 32;
940
941 if (obj->desc_filled != obj->desc_size) {
942 SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
943 handle_high, obj->desc_filled,
944 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
945 }
946
Marc Bonnici336630f2022-01-13 11:39:10 +0000947 /* The full descriptor has been received, perform any final checks. */
948
949 /*
950 * If a partition ID resides in the secure world validate that the
951 * partition ID is for a known partition. Ignore any partition ID
952 * belonging to the normal world as it is assumed the Hypervisor will
953 * have validated these.
954 */
955 for (size_t i = 0; i < obj->desc.emad_count; i++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100956 emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
957 &emad_size);
958 if (emad == NULL) {
959 ret = FFA_ERROR_INVALID_PARAMETER;
960 goto err_bad_desc;
961 }
962
963 ffa_endpoint_id16_t ep_id = emad->mapd.endpoint_id;
Marc Bonnici336630f2022-01-13 11:39:10 +0000964
965 if (ffa_is_secure_world_id(ep_id)) {
966 if (spmc_get_sp_ctx(ep_id) == NULL) {
967 WARN("%s: Invalid receiver id 0x%x\n",
968 __func__, ep_id);
969 ret = FFA_ERROR_INVALID_PARAMETER;
970 goto err_bad_desc;
971 }
972 }
973 }
974
975 /* Ensure partition IDs are not duplicated. */
976 for (size_t i = 0; i < obj->desc.emad_count; i++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100977 emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
978 &emad_size);
979 if (emad == NULL) {
980 ret = FFA_ERROR_INVALID_PARAMETER;
981 goto err_bad_desc;
982 }
Marc Bonnici336630f2022-01-13 11:39:10 +0000983 for (size_t j = i + 1; j < obj->desc.emad_count; j++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100984 other_emad = spmc_shmem_obj_get_emad(&obj->desc, j,
985 ffa_version,
986 &emad_size);
987 if (other_emad == NULL) {
Marc Bonnici336630f2022-01-13 11:39:10 +0000988 ret = FFA_ERROR_INVALID_PARAMETER;
989 goto err_bad_desc;
990 }
Marc Bonnicid1907f02022-04-19 17:42:53 +0100991
992 if (emad->mapd.endpoint_id ==
993 other_emad->mapd.endpoint_id) {
994 WARN("%s: Duplicated endpoint id 0x%x\n",
995 __func__, emad->mapd.endpoint_id);
996 ret = FFA_ERROR_INVALID_PARAMETER;
997 goto err_bad_desc;
998 }
Marc Bonnici336630f2022-01-13 11:39:10 +0000999 }
1000 }
1001
Marc Bonnicid1907f02022-04-19 17:42:53 +01001002 ret = spmc_shmem_check_state_obj(obj, ffa_version);
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00001003 if (ret) {
1004 ERROR("%s: invalid memory region descriptor.\n", __func__);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001005 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00001006 goto err_bad_desc;
1007 }
1008
Marc Bonnicid1907f02022-04-19 17:42:53 +01001009 /*
1010 * Everything checks out, if the sender was using FF-A v1.0, convert
1011 * the descriptor format to use the v1.1 structures.
1012 */
1013 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1014 struct spmc_shmem_obj *v1_1_obj;
1015 uint64_t mem_handle;
1016
1017 /* Calculate the size that the v1.1 descriptor will required. */
1018 size_t v1_1_desc_size =
1019 spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
vallau0146dbac22022-08-08 14:10:14 +02001020 obj->desc_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001021
1022 if (v1_1_desc_size == 0U) {
1023 ERROR("%s: cannot determine size of descriptor.\n",
1024 __func__);
1025 goto err_arg;
1026 }
1027
1028 /* Get a new obj to store the v1.1 descriptor. */
1029 v1_1_obj =
1030 spmc_shmem_obj_alloc(&spmc_shmem_obj_state, v1_1_desc_size);
1031
vallau018f830992022-08-09 18:03:28 +02001032 if (!v1_1_obj) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001033 ret = FFA_ERROR_NO_MEMORY;
1034 goto err_arg;
1035 }
1036
1037 /* Perform the conversion from v1.0 to v1.1. */
1038 v1_1_obj->desc_size = v1_1_desc_size;
1039 v1_1_obj->desc_filled = v1_1_desc_size;
1040 if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
1041 ERROR("%s: Could not convert mtd!\n", __func__);
1042 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
1043 goto err_arg;
1044 }
1045
1046 /*
1047 * We're finished with the v1.0 descriptor so free it
1048 * and continue our checks with the new v1.1 descriptor.
1049 */
1050 mem_handle = obj->desc.handle;
1051 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1052 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1053 if (obj == NULL) {
1054 ERROR("%s: Failed to find converted descriptor.\n",
1055 __func__);
1056 ret = FFA_ERROR_INVALID_PARAMETER;
1057 return spmc_ffa_error_return(smc_handle, ret);
1058 }
1059 }
1060
Marc Bonnici503320e2022-02-21 15:02:36 +00001061 /* Allow for platform specific operations to be performed. */
1062 ret = plat_spmc_shmem_begin(&obj->desc);
1063 if (ret != 0) {
1064 goto err_arg;
1065 }
1066
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001067 SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
1068 0, 0, 0);
1069
1070err_bad_desc:
1071err_arg:
1072 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001073 return spmc_ffa_error_return(smc_handle, ret);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001074}
1075
1076/**
1077 * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
1078 * @client: Client state.
1079 * @total_length: Total length of shared memory descriptor.
1080 * @fragment_length: Length of fragment of shared memory descriptor passed in
1081 * this call.
1082 * @address: Not supported, must be 0.
1083 * @page_count: Not supported, must be 0.
1084 * @smc_handle: Handle passed to smc call. Used to return
1085 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1086 *
1087 * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
1088 * to share or lend memory from non-secure os to secure os (with no stream
1089 * endpoints).
1090 *
1091 * Return: 0 on success, error code on failure.
1092 */
1093long spmc_ffa_mem_send(uint32_t smc_fid,
1094 bool secure_origin,
1095 uint64_t total_length,
1096 uint32_t fragment_length,
1097 uint64_t address,
1098 uint32_t page_count,
1099 void *cookie,
1100 void *handle,
1101 uint64_t flags)
1102
1103{
1104 long ret;
1105 struct spmc_shmem_obj *obj;
1106 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1107 ffa_mtd_flag32_t mtd_flag;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001108 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001109
1110 if (address != 0U || page_count != 0U) {
1111 WARN("%s: custom memory region for message not supported.\n",
1112 __func__);
1113 return spmc_ffa_error_return(handle,
1114 FFA_ERROR_INVALID_PARAMETER);
1115 }
1116
1117 if (secure_origin) {
1118 WARN("%s: unsupported share direction.\n", __func__);
1119 return spmc_ffa_error_return(handle,
1120 FFA_ERROR_INVALID_PARAMETER);
1121 }
1122
Marc Bonnicid1907f02022-04-19 17:42:53 +01001123 /*
1124 * Check if the descriptor is smaller than the v1.0 descriptor. The
1125 * descriptor cannot be smaller than this structure.
1126 */
1127 if (fragment_length < sizeof(struct ffa_mtd_v1_0)) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001128 WARN("%s: bad first fragment size %u < %zu\n",
Marc Bonnicid1907f02022-04-19 17:42:53 +01001129 __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001130 return spmc_ffa_error_return(handle,
1131 FFA_ERROR_INVALID_PARAMETER);
1132 }
1133
1134 if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
1135 mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
1136 } else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
1137 mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
1138 } else {
1139 WARN("%s: invalid memory management operation.\n", __func__);
1140 return spmc_ffa_error_return(handle,
1141 FFA_ERROR_INVALID_PARAMETER);
1142 }
1143
1144 spin_lock(&spmc_shmem_obj_state.lock);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001145 obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
1146 if (obj == NULL) {
1147 ret = FFA_ERROR_NO_MEMORY;
1148 goto err_unlock;
1149 }
1150
1151 spin_lock(&mbox->lock);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001152 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
1153 ffa_version, handle);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001154 spin_unlock(&mbox->lock);
1155
1156 spin_unlock(&spmc_shmem_obj_state.lock);
1157 return ret;
1158
1159err_unlock:
1160 spin_unlock(&spmc_shmem_obj_state.lock);
1161 return spmc_ffa_error_return(handle, ret);
1162}
1163
1164/**
1165 * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
1166 * @client: Client state.
1167 * @handle_low: Handle_low value returned from FFA_MEM_FRAG_RX.
1168 * @handle_high: Handle_high value returned from FFA_MEM_FRAG_RX.
1169 * @fragment_length: Length of fragments transmitted.
1170 * @sender_id: Vmid of sender in bits [31:16]
1171 * @smc_handle: Handle passed to smc call. Used to return
1172 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1173 *
1174 * Return: @smc_handle on success, error code on failure.
1175 */
1176long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
1177 bool secure_origin,
1178 uint64_t handle_low,
1179 uint64_t handle_high,
1180 uint32_t fragment_length,
1181 uint32_t sender_id,
1182 void *cookie,
1183 void *handle,
1184 uint64_t flags)
1185{
1186 long ret;
1187 uint32_t desc_sender_id;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001188 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001189 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1190
1191 struct spmc_shmem_obj *obj;
1192 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1193
1194 spin_lock(&spmc_shmem_obj_state.lock);
1195
1196 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1197 if (obj == NULL) {
1198 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1199 __func__, mem_handle);
1200 ret = FFA_ERROR_INVALID_PARAMETER;
1201 goto err_unlock;
1202 }
1203
1204 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1205 if (sender_id != desc_sender_id) {
1206 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1207 sender_id, desc_sender_id);
1208 ret = FFA_ERROR_INVALID_PARAMETER;
1209 goto err_unlock;
1210 }
1211
1212 if (obj->desc_filled == obj->desc_size) {
1213 WARN("%s: object desc already filled, %zu\n", __func__,
1214 obj->desc_filled);
1215 ret = FFA_ERROR_INVALID_PARAMETER;
1216 goto err_unlock;
1217 }
1218
1219 spin_lock(&mbox->lock);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001220 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
1221 handle);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001222 spin_unlock(&mbox->lock);
1223
1224 spin_unlock(&spmc_shmem_obj_state.lock);
1225 return ret;
1226
1227err_unlock:
1228 spin_unlock(&spmc_shmem_obj_state.lock);
1229 return spmc_ffa_error_return(handle, ret);
1230}
1231
1232/**
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001233 * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor
1234 * if the caller implements a version greater
1235 * than FF-A 1.0 or if they have requested
1236 * the functionality.
1237 * TODO: We are assuming that the caller is
1238 * an SP. To support retrieval from the
1239 * normal world this function will need to be
1240 * expanded accordingly.
1241 * @resp: Descriptor populated in callers RX buffer.
1242 * @sp_ctx: Context of the calling SP.
1243 */
1244void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp,
1245 struct secure_partition_desc *sp_ctx)
1246{
1247 if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) ||
1248 sp_ctx->ns_bit_requested) {
1249 /*
1250 * Currently memory senders must reside in the normal
1251 * world, and we do not have the functionlaity to change
1252 * the state of memory dynamically. Therefore we can always set
1253 * the NS bit to 1.
1254 */
1255 resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
1256 }
1257}
1258
1259/**
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001260 * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
1261 * @smc_fid: FID of SMC
1262 * @total_length: Total length of retrieve request descriptor if this is
1263 * the first call. Otherwise (unsupported) must be 0.
1264 * @fragment_length: Length of fragment of retrieve request descriptor passed
1265 * in this call. Only @fragment_length == @length is
1266 * supported by this implementation.
1267 * @address: Not supported, must be 0.
1268 * @page_count: Not supported, must be 0.
1269 * @smc_handle: Handle passed to smc call. Used to return
1270 * FFA_MEM_RETRIEVE_RESP.
1271 *
1272 * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
1273 * Used by secure os to retrieve memory already shared by non-secure os.
1274 * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
1275 * the client must call FFA_MEM_FRAG_RX until the full response has been
1276 * received.
1277 *
1278 * Return: @handle on success, error code on failure.
1279 */
1280long
1281spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
1282 bool secure_origin,
1283 uint32_t total_length,
1284 uint32_t fragment_length,
1285 uint64_t address,
1286 uint32_t page_count,
1287 void *cookie,
1288 void *handle,
1289 uint64_t flags)
1290{
1291 int ret;
1292 size_t buf_size;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001293 size_t copy_size = 0;
1294 size_t min_desc_size;
1295 size_t out_desc_size = 0;
1296
1297 /*
1298 * Currently we are only accessing fields that are the same in both the
1299 * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
1300 * here. We only need validate against the appropriate struct size.
1301 */
1302 struct ffa_mtd *resp;
1303 const struct ffa_mtd *req;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001304 struct spmc_shmem_obj *obj = NULL;
1305 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001306 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001307 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001308
1309 if (!secure_origin) {
1310 WARN("%s: unsupported retrieve req direction.\n", __func__);
1311 return spmc_ffa_error_return(handle,
1312 FFA_ERROR_INVALID_PARAMETER);
1313 }
1314
1315 if (address != 0U || page_count != 0U) {
1316 WARN("%s: custom memory region not supported.\n", __func__);
1317 return spmc_ffa_error_return(handle,
1318 FFA_ERROR_INVALID_PARAMETER);
1319 }
1320
1321 spin_lock(&mbox->lock);
1322
1323 req = mbox->tx_buffer;
1324 resp = mbox->rx_buffer;
1325 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1326
1327 if (mbox->rxtx_page_count == 0U) {
1328 WARN("%s: buffer pair not registered.\n", __func__);
1329 ret = FFA_ERROR_INVALID_PARAMETER;
1330 goto err_unlock_mailbox;
1331 }
1332
1333 if (mbox->state != MAILBOX_STATE_EMPTY) {
1334 WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
1335 ret = FFA_ERROR_DENIED;
1336 goto err_unlock_mailbox;
1337 }
1338
1339 if (fragment_length != total_length) {
1340 WARN("%s: fragmented retrieve request not supported.\n",
1341 __func__);
1342 ret = FFA_ERROR_INVALID_PARAMETER;
1343 goto err_unlock_mailbox;
1344 }
1345
Marc Bonnici336630f2022-01-13 11:39:10 +00001346 if (req->emad_count == 0U) {
1347 WARN("%s: unsupported attribute desc count %u.\n",
1348 __func__, obj->desc.emad_count);
vallau01460d3962022-08-09 17:06:53 +02001349 ret = FFA_ERROR_INVALID_PARAMETER;
1350 goto err_unlock_mailbox;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001351 }
1352
Marc Bonnicid1907f02022-04-19 17:42:53 +01001353 /* Determine the appropriate minimum descriptor size. */
1354 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1355 min_desc_size = sizeof(struct ffa_mtd_v1_0);
1356 } else {
1357 min_desc_size = sizeof(struct ffa_mtd);
1358 }
1359 if (total_length < min_desc_size) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001360 WARN("%s: invalid length %u < %zu\n", __func__, total_length,
Marc Bonnicid1907f02022-04-19 17:42:53 +01001361 min_desc_size);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001362 ret = FFA_ERROR_INVALID_PARAMETER;
1363 goto err_unlock_mailbox;
1364 }
1365
1366 spin_lock(&spmc_shmem_obj_state.lock);
1367
1368 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1369 if (obj == NULL) {
1370 ret = FFA_ERROR_INVALID_PARAMETER;
1371 goto err_unlock_all;
1372 }
1373
1374 if (obj->desc_filled != obj->desc_size) {
1375 WARN("%s: incomplete object desc filled %zu < size %zu\n",
1376 __func__, obj->desc_filled, obj->desc_size);
1377 ret = FFA_ERROR_INVALID_PARAMETER;
1378 goto err_unlock_all;
1379 }
1380
1381 if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
1382 WARN("%s: wrong sender id 0x%x != 0x%x\n",
1383 __func__, req->sender_id, obj->desc.sender_id);
1384 ret = FFA_ERROR_INVALID_PARAMETER;
1385 goto err_unlock_all;
1386 }
1387
1388 if (req->emad_count != 0U && req->tag != obj->desc.tag) {
1389 WARN("%s: wrong tag 0x%lx != 0x%lx\n",
1390 __func__, req->tag, obj->desc.tag);
1391 ret = FFA_ERROR_INVALID_PARAMETER;
1392 goto err_unlock_all;
1393 }
1394
Marc Bonnici336630f2022-01-13 11:39:10 +00001395 if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
1396 WARN("%s: mistmatch of endpoint counts %u != %u\n",
1397 __func__, req->emad_count, obj->desc.emad_count);
1398 ret = FFA_ERROR_INVALID_PARAMETER;
1399 goto err_unlock_all;
1400 }
1401
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001402 /* Ensure the NS bit is set to 0 in the request. */
1403 if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1404 WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1405 ret = FFA_ERROR_INVALID_PARAMETER;
1406 goto err_unlock_all;
1407 }
1408
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001409 if (req->flags != 0U) {
1410 if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
1411 (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
1412 /*
1413 * If the retrieve request specifies the memory
1414 * transaction ensure it matches what we expect.
1415 */
1416 WARN("%s: wrong mem transaction flags %x != %x\n",
1417 __func__, req->flags, obj->desc.flags);
1418 ret = FFA_ERROR_INVALID_PARAMETER;
1419 goto err_unlock_all;
1420 }
1421
1422 if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
1423 req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
1424 /*
1425 * Current implementation does not support donate and
1426 * it supports no other flags.
1427 */
1428 WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
1429 ret = FFA_ERROR_INVALID_PARAMETER;
1430 goto err_unlock_all;
1431 }
1432 }
1433
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001434 /* Validate the caller is a valid participant. */
1435 if (!spmc_shmem_obj_validate_id(&obj->desc, sp_ctx->sp_id)) {
1436 WARN("%s: Invalid endpoint ID (0x%x).\n",
1437 __func__, sp_ctx->sp_id);
1438 ret = FFA_ERROR_INVALID_PARAMETER;
1439 goto err_unlock_all;
1440 }
1441
Marc Bonnicid1907f02022-04-19 17:42:53 +01001442 /* Validate that the provided emad offset and structure is valid.*/
1443 for (size_t i = 0; i < req->emad_count; i++) {
1444 size_t emad_size;
1445 struct ffa_emad_v1_0 *emad;
1446
1447 emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1448 &emad_size);
1449 if (emad == NULL) {
1450 WARN("%s: invalid emad structure.\n", __func__);
1451 ret = FFA_ERROR_INVALID_PARAMETER;
1452 goto err_unlock_all;
1453 }
1454
1455 if ((uintptr_t) emad >= (uintptr_t)
1456 ((uint8_t *) req + total_length)) {
1457 WARN("Invalid emad access.\n");
1458 ret = FFA_ERROR_INVALID_PARAMETER;
1459 goto err_unlock_all;
1460 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001461 }
1462
1463 /*
1464 * Validate all the endpoints match in the case of multiple
1465 * borrowers. We don't mandate that the order of the borrowers
1466 * must match in the descriptors therefore check to see if the
1467 * endpoints match in any order.
1468 */
1469 for (size_t i = 0; i < req->emad_count; i++) {
1470 bool found = false;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001471 size_t emad_size;
1472 struct ffa_emad_v1_0 *emad;
1473 struct ffa_emad_v1_0 *other_emad;
1474
1475 emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1476 &emad_size);
1477 if (emad == NULL) {
1478 ret = FFA_ERROR_INVALID_PARAMETER;
1479 goto err_unlock_all;
1480 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001481
1482 for (size_t j = 0; j < obj->desc.emad_count; j++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001483 other_emad = spmc_shmem_obj_get_emad(
1484 &obj->desc, j, MAKE_FFA_VERSION(1, 1),
1485 &emad_size);
1486
1487 if (other_emad == NULL) {
1488 ret = FFA_ERROR_INVALID_PARAMETER;
1489 goto err_unlock_all;
1490 }
1491
1492 if (req->emad_count &&
1493 emad->mapd.endpoint_id ==
1494 other_emad->mapd.endpoint_id) {
Marc Bonnici336630f2022-01-13 11:39:10 +00001495 found = true;
1496 break;
1497 }
1498 }
1499
1500 if (!found) {
1501 WARN("%s: invalid receiver id (0x%x).\n",
Marc Bonnicid1907f02022-04-19 17:42:53 +01001502 __func__, emad->mapd.endpoint_id);
Marc Bonnici336630f2022-01-13 11:39:10 +00001503 ret = FFA_ERROR_INVALID_PARAMETER;
1504 goto err_unlock_all;
1505 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001506 }
1507
1508 mbox->state = MAILBOX_STATE_FULL;
1509
1510 if (req->emad_count != 0U) {
1511 obj->in_use++;
1512 }
1513
Marc Bonnicid1907f02022-04-19 17:42:53 +01001514 /*
1515 * If the caller is v1.0 convert the descriptor, otherwise copy
1516 * directly.
1517 */
1518 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1519 ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
1520 &copy_size,
1521 &out_desc_size);
1522 if (ret != 0U) {
1523 ERROR("%s: Failed to process descriptor.\n", __func__);
1524 goto err_unlock_all;
1525 }
1526 } else {
1527 copy_size = MIN(obj->desc_size, buf_size);
1528 out_desc_size = obj->desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001529
Marc Bonnicid1907f02022-04-19 17:42:53 +01001530 memcpy(resp, &obj->desc, copy_size);
1531 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001532
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001533 /* Set the NS bit in the response if applicable. */
1534 spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx);
1535
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001536 spin_unlock(&spmc_shmem_obj_state.lock);
1537 spin_unlock(&mbox->lock);
1538
Marc Bonnicid1907f02022-04-19 17:42:53 +01001539 SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001540 copy_size, 0, 0, 0, 0, 0);
1541
1542err_unlock_all:
1543 spin_unlock(&spmc_shmem_obj_state.lock);
1544err_unlock_mailbox:
1545 spin_unlock(&mbox->lock);
1546 return spmc_ffa_error_return(handle, ret);
1547}
1548
1549/**
1550 * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
1551 * @client: Client state.
1552 * @handle_low: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
1553 * @handle_high: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
1554 * @fragment_offset: Byte offset in descriptor to resume at.
1555 * @sender_id: Bit[31:16]: Endpoint id of sender if client is a
1556 * hypervisor. 0 otherwise.
1557 * @smc_handle: Handle passed to smc call. Used to return
1558 * FFA_MEM_FRAG_TX.
1559 *
1560 * Return: @smc_handle on success, error code on failure.
1561 */
1562long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
1563 bool secure_origin,
1564 uint32_t handle_low,
1565 uint32_t handle_high,
1566 uint32_t fragment_offset,
1567 uint32_t sender_id,
1568 void *cookie,
1569 void *handle,
1570 uint64_t flags)
1571{
1572 int ret;
1573 void *src;
1574 size_t buf_size;
1575 size_t copy_size;
1576 size_t full_copy_size;
1577 uint32_t desc_sender_id;
1578 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1579 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1580 struct spmc_shmem_obj *obj;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001581 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001582
1583 if (!secure_origin) {
1584 WARN("%s: can only be called from swld.\n",
1585 __func__);
1586 return spmc_ffa_error_return(handle,
1587 FFA_ERROR_INVALID_PARAMETER);
1588 }
1589
1590 spin_lock(&spmc_shmem_obj_state.lock);
1591
1592 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1593 if (obj == NULL) {
1594 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1595 __func__, mem_handle);
1596 ret = FFA_ERROR_INVALID_PARAMETER;
1597 goto err_unlock_shmem;
1598 }
1599
1600 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1601 if (sender_id != 0U && sender_id != desc_sender_id) {
1602 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1603 sender_id, desc_sender_id);
1604 ret = FFA_ERROR_INVALID_PARAMETER;
1605 goto err_unlock_shmem;
1606 }
1607
1608 if (fragment_offset >= obj->desc_size) {
1609 WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
1610 __func__, fragment_offset, obj->desc_size);
1611 ret = FFA_ERROR_INVALID_PARAMETER;
1612 goto err_unlock_shmem;
1613 }
1614
1615 spin_lock(&mbox->lock);
1616
1617 if (mbox->rxtx_page_count == 0U) {
1618 WARN("%s: buffer pair not registered.\n", __func__);
1619 ret = FFA_ERROR_INVALID_PARAMETER;
1620 goto err_unlock_all;
1621 }
1622
1623 if (mbox->state != MAILBOX_STATE_EMPTY) {
1624 WARN("%s: RX Buffer is full!\n", __func__);
1625 ret = FFA_ERROR_DENIED;
1626 goto err_unlock_all;
1627 }
1628
1629 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1630
1631 mbox->state = MAILBOX_STATE_FULL;
1632
Marc Bonnicid1907f02022-04-19 17:42:53 +01001633 /*
1634 * If the caller is v1.0 convert the descriptor, otherwise copy
1635 * directly.
1636 */
1637 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1638 size_t out_desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001639
Marc Bonnicid1907f02022-04-19 17:42:53 +01001640 ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
1641 buf_size,
1642 fragment_offset,
1643 &copy_size,
1644 &out_desc_size);
1645 if (ret != 0U) {
1646 ERROR("%s: Failed to process descriptor.\n", __func__);
1647 goto err_unlock_all;
1648 }
1649 } else {
1650 full_copy_size = obj->desc_size - fragment_offset;
1651 copy_size = MIN(full_copy_size, buf_size);
1652
1653 src = &obj->desc;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001654
Marc Bonnicid1907f02022-04-19 17:42:53 +01001655 memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
1656 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001657
1658 spin_unlock(&mbox->lock);
1659 spin_unlock(&spmc_shmem_obj_state.lock);
1660
1661 SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
1662 copy_size, sender_id, 0, 0, 0);
1663
1664err_unlock_all:
1665 spin_unlock(&mbox->lock);
1666err_unlock_shmem:
1667 spin_unlock(&spmc_shmem_obj_state.lock);
1668 return spmc_ffa_error_return(handle, ret);
1669}
1670
1671/**
1672 * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
1673 * @client: Client state.
1674 *
1675 * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
1676 * Used by secure os release previously shared memory to non-secure os.
1677 *
1678 * The handle to release must be in the client's (secure os's) transmit buffer.
1679 *
1680 * Return: 0 on success, error code on failure.
1681 */
1682int spmc_ffa_mem_relinquish(uint32_t smc_fid,
1683 bool secure_origin,
1684 uint32_t handle_low,
1685 uint32_t handle_high,
1686 uint32_t fragment_offset,
1687 uint32_t sender_id,
1688 void *cookie,
1689 void *handle,
1690 uint64_t flags)
1691{
1692 int ret;
1693 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1694 struct spmc_shmem_obj *obj;
1695 const struct ffa_mem_relinquish_descriptor *req;
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001696 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001697
1698 if (!secure_origin) {
1699 WARN("%s: unsupported relinquish direction.\n", __func__);
1700 return spmc_ffa_error_return(handle,
1701 FFA_ERROR_INVALID_PARAMETER);
1702 }
1703
1704 spin_lock(&mbox->lock);
1705
1706 if (mbox->rxtx_page_count == 0U) {
1707 WARN("%s: buffer pair not registered.\n", __func__);
1708 ret = FFA_ERROR_INVALID_PARAMETER;
1709 goto err_unlock_mailbox;
1710 }
1711
1712 req = mbox->tx_buffer;
1713
1714 if (req->flags != 0U) {
1715 WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
1716 ret = FFA_ERROR_INVALID_PARAMETER;
1717 goto err_unlock_mailbox;
1718 }
1719
Marc Bonnici336630f2022-01-13 11:39:10 +00001720 if (req->endpoint_count == 0) {
1721 WARN("%s: endpoint count cannot be 0.\n", __func__);
1722 ret = FFA_ERROR_INVALID_PARAMETER;
1723 goto err_unlock_mailbox;
1724 }
1725
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001726 spin_lock(&spmc_shmem_obj_state.lock);
1727
1728 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1729 if (obj == NULL) {
1730 ret = FFA_ERROR_INVALID_PARAMETER;
1731 goto err_unlock_all;
1732 }
1733
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001734 /*
1735 * Validate the endpoint ID was populated correctly. We don't currently
1736 * support proxy endpoints so the endpoint count should always be 1.
1737 */
1738 if (req->endpoint_count != 1U) {
1739 WARN("%s: unsupported endpoint count %u != 1\n", __func__,
1740 req->endpoint_count);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001741 ret = FFA_ERROR_INVALID_PARAMETER;
1742 goto err_unlock_all;
1743 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001744
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001745 /* Validate provided endpoint ID matches the partition ID. */
1746 if (req->endpoint_array[0] != sp_ctx->sp_id) {
1747 WARN("%s: invalid endpoint ID %u != %u\n", __func__,
1748 req->endpoint_array[0], sp_ctx->sp_id);
1749 ret = FFA_ERROR_INVALID_PARAMETER;
1750 goto err_unlock_all;
1751 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001752
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001753 /* Validate the caller is a valid participant. */
1754 if (!spmc_shmem_obj_validate_id(&obj->desc, sp_ctx->sp_id)) {
1755 WARN("%s: Invalid endpoint ID (0x%x).\n",
1756 __func__, req->endpoint_array[0]);
1757 ret = FFA_ERROR_INVALID_PARAMETER;
1758 goto err_unlock_all;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001759 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001760
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001761 if (obj->in_use == 0U) {
1762 ret = FFA_ERROR_INVALID_PARAMETER;
1763 goto err_unlock_all;
1764 }
1765 obj->in_use--;
1766
1767 spin_unlock(&spmc_shmem_obj_state.lock);
1768 spin_unlock(&mbox->lock);
1769
1770 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1771
1772err_unlock_all:
1773 spin_unlock(&spmc_shmem_obj_state.lock);
1774err_unlock_mailbox:
1775 spin_unlock(&mbox->lock);
1776 return spmc_ffa_error_return(handle, ret);
1777}
1778
1779/**
1780 * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
1781 * @client: Client state.
1782 * @handle_low: Unique handle of shared memory object to reclaim. Bit[31:0].
1783 * @handle_high: Unique handle of shared memory object to reclaim.
1784 * Bit[63:32].
1785 * @flags: Unsupported, ignored.
1786 *
1787 * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
1788 * Used by non-secure os reclaim memory previously shared with secure os.
1789 *
1790 * Return: 0 on success, error code on failure.
1791 */
1792int spmc_ffa_mem_reclaim(uint32_t smc_fid,
1793 bool secure_origin,
1794 uint32_t handle_low,
1795 uint32_t handle_high,
1796 uint32_t mem_flags,
1797 uint64_t x4,
1798 void *cookie,
1799 void *handle,
1800 uint64_t flags)
1801{
1802 int ret;
1803 struct spmc_shmem_obj *obj;
1804 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1805
1806 if (secure_origin) {
1807 WARN("%s: unsupported reclaim direction.\n", __func__);
1808 return spmc_ffa_error_return(handle,
1809 FFA_ERROR_INVALID_PARAMETER);
1810 }
1811
1812 if (mem_flags != 0U) {
1813 WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
1814 return spmc_ffa_error_return(handle,
1815 FFA_ERROR_INVALID_PARAMETER);
1816 }
1817
1818 spin_lock(&spmc_shmem_obj_state.lock);
1819
1820 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1821 if (obj == NULL) {
1822 ret = FFA_ERROR_INVALID_PARAMETER;
1823 goto err_unlock;
1824 }
1825 if (obj->in_use != 0U) {
1826 ret = FFA_ERROR_DENIED;
1827 goto err_unlock;
1828 }
Marc Bonnici503320e2022-02-21 15:02:36 +00001829
Marc Bonnici82e28f12022-10-18 13:39:48 +01001830 if (obj->desc_filled != obj->desc_size) {
1831 WARN("%s: incomplete object desc filled %zu < size %zu\n",
1832 __func__, obj->desc_filled, obj->desc_size);
1833 ret = FFA_ERROR_INVALID_PARAMETER;
1834 goto err_unlock;
1835 }
1836
Marc Bonnici503320e2022-02-21 15:02:36 +00001837 /* Allow for platform specific operations to be performed. */
1838 ret = plat_spmc_shmem_reclaim(&obj->desc);
1839 if (ret != 0) {
1840 goto err_unlock;
1841 }
1842
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001843 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1844 spin_unlock(&spmc_shmem_obj_state.lock);
1845
1846 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1847
1848err_unlock:
1849 spin_unlock(&spmc_shmem_obj_state.lock);
1850 return spmc_ffa_error_return(handle, ret);
1851}