blob: a0c4ed51d8007becac2035d2b3f11d6842cc19c7 [file] [log] [blame]
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001/*
Demi Marie Obenour1f9f8302022-12-30 19:14:18 -05002 * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00006#include <assert.h>
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01007#include <errno.h>
Demi Marie Obenour4ed9df42022-12-30 19:30:58 -05008#include <inttypes.h>
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01009
10#include <common/debug.h>
11#include <common/runtime_svc.h>
12#include <lib/object_pool.h>
13#include <lib/spinlock.h>
14#include <lib/xlat_tables/xlat_tables_v2.h>
15#include <services/ffa_svc.h>
16#include "spmc.h"
17#include "spmc_shared_mem.h"
18
19#include <platform_def.h>
20
21/**
22 * struct spmc_shmem_obj - Shared memory object.
23 * @desc_size: Size of @desc.
24 * @desc_filled: Size of @desc already received.
25 * @in_use: Number of clients that have called ffa_mem_retrieve_req
26 * without a matching ffa_mem_relinquish call.
27 * @desc: FF-A memory region descriptor passed in ffa_mem_share.
28 */
29struct spmc_shmem_obj {
30 size_t desc_size;
31 size_t desc_filled;
32 size_t in_use;
Marc Bonnicid1907f02022-04-19 17:42:53 +010033 struct ffa_mtd desc;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010034};
35
36/*
37 * Declare our data structure to store the metadata of memory share requests.
38 * The main datastore is allocated on a per platform basis to ensure enough
39 * storage can be made available.
40 * The address of the data store will be populated by the SPMC during its
41 * initialization.
42 */
43
44struct spmc_shmem_obj_state spmc_shmem_obj_state = {
45 /* Set start value for handle so top 32 bits are needed quickly. */
46 .next_handle = 0xffffffc0U,
47};
48
49/**
50 * spmc_shmem_obj_size - Convert from descriptor size to object size.
51 * @desc_size: Size of struct ffa_memory_region_descriptor object.
52 *
53 * Return: Size of struct spmc_shmem_obj object.
54 */
55static size_t spmc_shmem_obj_size(size_t desc_size)
56{
57 return desc_size + offsetof(struct spmc_shmem_obj, desc);
58}
59
60/**
61 * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
62 * @state: Global state.
63 * @desc_size: Size of struct ffa_memory_region_descriptor object that
64 * allocated object will hold.
65 *
66 * Return: Pointer to newly allocated object, or %NULL if there not enough space
67 * left. The returned pointer is only valid while @state is locked, to
68 * used it again after unlocking @state, spmc_shmem_obj_lookup must be
69 * called.
70 */
71static struct spmc_shmem_obj *
72spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
73{
74 struct spmc_shmem_obj *obj;
75 size_t free = state->data_size - state->allocated;
Marc Bonnicib774f562022-10-18 14:03:13 +010076 size_t obj_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010077
78 if (state->data == NULL) {
79 ERROR("Missing shmem datastore!\n");
80 return NULL;
81 }
82
Marc Bonnicib774f562022-10-18 14:03:13 +010083 obj_size = spmc_shmem_obj_size(desc_size);
84
85 /* Ensure the obj size has not overflowed. */
86 if (obj_size < desc_size) {
87 WARN("%s(0x%zx) desc_size overflow\n",
88 __func__, desc_size);
89 return NULL;
90 }
91
92 if (obj_size > free) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010093 WARN("%s(0x%zx) failed, free 0x%zx\n",
94 __func__, desc_size, free);
95 return NULL;
96 }
97 obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
Marc Bonnicid1907f02022-04-19 17:42:53 +010098 obj->desc = (struct ffa_mtd) {0};
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010099 obj->desc_size = desc_size;
100 obj->desc_filled = 0;
101 obj->in_use = 0;
Marc Bonnicib774f562022-10-18 14:03:13 +0100102 state->allocated += obj_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100103 return obj;
104}
105
106/**
107 * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
108 * @state: Global state.
109 * @obj: Object to free.
110 *
111 * Release memory used by @obj. Other objects may move, so on return all
112 * pointers to struct spmc_shmem_obj object should be considered invalid, not
113 * just @obj.
114 *
115 * The current implementation always compacts the remaining objects to simplify
116 * the allocator and to avoid fragmentation.
117 */
118
119static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
120 struct spmc_shmem_obj *obj)
121{
122 size_t free_size = spmc_shmem_obj_size(obj->desc_size);
123 uint8_t *shift_dest = (uint8_t *)obj;
124 uint8_t *shift_src = shift_dest + free_size;
125 size_t shift_size = state->allocated - (shift_src - state->data);
126
127 if (shift_size != 0U) {
128 memmove(shift_dest, shift_src, shift_size);
129 }
130 state->allocated -= free_size;
131}
132
133/**
134 * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
135 * @state: Global state.
136 * @handle: Unique handle of object to return.
137 *
138 * Return: struct spmc_shmem_obj_state object with handle matching @handle.
139 * %NULL, if not object in @state->data has a matching handle.
140 */
141static struct spmc_shmem_obj *
142spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
143{
144 uint8_t *curr = state->data;
145
146 while (curr - state->data < state->allocated) {
147 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
148
149 if (obj->desc.handle == handle) {
150 return obj;
151 }
152 curr += spmc_shmem_obj_size(obj->desc_size);
153 }
154 return NULL;
155}
156
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000157/**
158 * spmc_shmem_obj_get_next - Get the next memory object from an offset.
159 * @offset: Offset used to track which objects have previously been
160 * returned.
161 *
162 * Return: the next struct spmc_shmem_obj_state object from the provided
163 * offset.
164 * %NULL, if there are no more objects.
165 */
166static struct spmc_shmem_obj *
167spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
168{
169 uint8_t *curr = state->data + *offset;
170
171 if (curr - state->data < state->allocated) {
172 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
173
174 *offset += spmc_shmem_obj_size(obj->desc_size);
175
176 return obj;
177 }
178 return NULL;
179}
180
Marc Bonnicid1907f02022-04-19 17:42:53 +0100181/*******************************************************************************
182 * FF-A memory descriptor helper functions.
183 ******************************************************************************/
184/**
185 * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
186 * clients FF-A version.
187 * @desc: The memory transaction descriptor.
188 * @index: The index of the emad element to be accessed.
189 * @ffa_version: FF-A version of the provided structure.
190 * @emad_size: Will be populated with the size of the returned emad
191 * descriptor.
192 * Return: A pointer to the requested emad structure.
193 */
194static void *
195spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
196 uint32_t ffa_version, size_t *emad_size)
197{
198 uint8_t *emad;
199 /*
200 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
201 * format, otherwise assume it is a v1.1 format.
202 */
203 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
204 /* Cast our descriptor to the v1.0 format. */
205 struct ffa_mtd_v1_0 *mtd_v1_0 =
206 (struct ffa_mtd_v1_0 *) desc;
207 emad = (uint8_t *) &(mtd_v1_0->emad);
208 *emad_size = sizeof(struct ffa_emad_v1_0);
209 } else {
210 if (!is_aligned(desc->emad_offset, 16)) {
211 WARN("Emad offset is not aligned.\n");
212 return NULL;
213 }
214 emad = ((uint8_t *) desc + desc->emad_offset);
215 *emad_size = desc->emad_size;
216 }
217 return (emad + (*emad_size * index));
218}
219
220/**
221 * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
222 * FF-A version of the descriptor.
223 * @obj: Object containing ffa_memory_region_descriptor.
224 *
225 * Return: struct ffa_comp_mrd object corresponding to the composite memory
226 * region descriptor.
227 */
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100228static struct ffa_comp_mrd *
Marc Bonnicid1907f02022-04-19 17:42:53 +0100229spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100230{
Marc Bonnicid1907f02022-04-19 17:42:53 +0100231 size_t emad_size;
232 /*
233 * The comp_mrd_offset field of the emad descriptor remains consistent
234 * between FF-A versions therefore we can use the v1.0 descriptor here
235 * in all cases.
236 */
237 struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
238 ffa_version,
239 &emad_size);
240 /* Ensure the emad array was found. */
241 if (emad == NULL) {
242 return NULL;
243 }
244
245 /* Ensure the composite descriptor offset is aligned. */
246 if (!is_aligned(emad->comp_mrd_offset, 8)) {
247 WARN("Unaligned composite memory region descriptor offset.\n");
248 return NULL;
249 }
250
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100251 return (struct ffa_comp_mrd *)
Marc Bonnicid1907f02022-04-19 17:42:53 +0100252 ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100253}
254
255/**
256 * spmc_shmem_obj_ffa_constituent_size - Calculate variable size part of obj.
257 * @obj: Object containing ffa_memory_region_descriptor.
258 *
259 * Return: Size of ffa_constituent_memory_region_descriptors in @obj.
260 */
261static size_t
Marc Bonnicid1907f02022-04-19 17:42:53 +0100262spmc_shmem_obj_ffa_constituent_size(struct spmc_shmem_obj *obj,
263 uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100264{
Marc Bonnicid1907f02022-04-19 17:42:53 +0100265 struct ffa_comp_mrd *comp_mrd;
266
267 comp_mrd = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
268 if (comp_mrd == NULL) {
269 return 0;
270 }
271 return comp_mrd->address_range_count * sizeof(struct ffa_cons_mrd);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100272}
273
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100274/**
275 * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
276 * a given memory transaction.
277 * @sp_id: Partition ID to validate.
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100278 * @obj: The shared memory object containing the descriptor
279 * of the memory transaction.
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100280 * Return: true if ID is valid, else false.
281 */
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100282bool spmc_shmem_obj_validate_id(struct spmc_shmem_obj *obj, uint16_t sp_id)
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100283{
284 bool found = false;
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100285 struct ffa_mtd *desc = &obj->desc;
286 size_t desc_size = obj->desc_size;
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100287
288 /* Validate the partition is a valid participant. */
289 for (unsigned int i = 0U; i < desc->emad_count; i++) {
290 size_t emad_size;
291 struct ffa_emad_v1_0 *emad;
292
293 emad = spmc_shmem_obj_get_emad(desc, i,
294 MAKE_FFA_VERSION(1, 1),
295 &emad_size);
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100296 /*
297 * Validate the calculated emad address resides within the
298 * descriptor.
299 */
300 if ((emad == NULL) || (uintptr_t) emad >=
301 (uintptr_t)((uint8_t *) desc + desc_size)) {
302 VERBOSE("Invalid emad.\n");
303 break;
304 }
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100305 if (sp_id == emad->mapd.endpoint_id) {
306 found = true;
307 break;
308 }
309 }
310 return found;
311}
312
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000313/*
314 * Compare two memory regions to determine if any range overlaps with another
315 * ongoing memory transaction.
316 */
317static bool
318overlapping_memory_regions(struct ffa_comp_mrd *region1,
319 struct ffa_comp_mrd *region2)
320{
321 uint64_t region1_start;
322 uint64_t region1_size;
323 uint64_t region1_end;
324 uint64_t region2_start;
325 uint64_t region2_size;
326 uint64_t region2_end;
327
328 assert(region1 != NULL);
329 assert(region2 != NULL);
330
331 if (region1 == region2) {
332 return true;
333 }
334
335 /*
336 * Check each memory region in the request against existing
337 * transactions.
338 */
339 for (size_t i = 0; i < region1->address_range_count; i++) {
340
341 region1_start = region1->address_range_array[i].address;
342 region1_size =
343 region1->address_range_array[i].page_count *
344 PAGE_SIZE_4KB;
345 region1_end = region1_start + region1_size;
346
347 for (size_t j = 0; j < region2->address_range_count; j++) {
348
349 region2_start = region2->address_range_array[j].address;
350 region2_size =
351 region2->address_range_array[j].page_count *
352 PAGE_SIZE_4KB;
353 region2_end = region2_start + region2_size;
354
Marc Bonnici79669bb2022-10-18 13:50:04 +0100355 /* Check if regions are not overlapping. */
356 if (!((region2_end <= region1_start) ||
357 (region1_end <= region2_start))) {
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000358 WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
359 region1_start, region1_end,
360 region2_start, region2_end);
361 return true;
362 }
363 }
364 }
365 return false;
366}
367
Marc Bonnicid1907f02022-04-19 17:42:53 +0100368/*******************************************************************************
369 * FF-A v1.0 Memory Descriptor Conversion Helpers.
370 ******************************************************************************/
371/**
372 * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
373 * converted descriptor.
374 * @orig: The original v1.0 memory transaction descriptor.
375 * @desc_size: The size of the original v1.0 memory transaction descriptor.
376 *
377 * Return: the size required to store the descriptor store in the v1.1 format.
378 */
379static size_t
380spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
381{
382 size_t size = 0;
383 struct ffa_comp_mrd *mrd;
384 struct ffa_emad_v1_0 *emad_array = orig->emad;
385
386 /* Get the size of the v1.1 descriptor. */
387 size += sizeof(struct ffa_mtd);
388
389 /* Add the size of the emad descriptors. */
390 size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
391
392 /* Add the size of the composite mrds. */
393 size += sizeof(struct ffa_comp_mrd);
394
395 /* Add the size of the constituent mrds. */
396 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
397 emad_array[0].comp_mrd_offset);
398
399 /* Check the calculated address is within the memory descriptor. */
Marc Bonnicif744c992022-10-18 18:01:44 +0100400 if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
401 (uintptr_t)((uint8_t *) orig + desc_size)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100402 return 0;
403 }
404 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
405
406 return size;
407}
408
409/**
410 * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
411 * converted descriptor.
412 * @orig: The original v1.1 memory transaction descriptor.
413 * @desc_size: The size of the original v1.1 memory transaction descriptor.
414 *
415 * Return: the size required to store the descriptor store in the v1.0 format.
416 */
417static size_t
418spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
419{
420 size_t size = 0;
421 struct ffa_comp_mrd *mrd;
422 struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
423 ((uint8_t *) orig +
424 orig->emad_offset);
425
426 /* Get the size of the v1.0 descriptor. */
427 size += sizeof(struct ffa_mtd_v1_0);
428
429 /* Add the size of the v1.0 emad descriptors. */
430 size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
431
432 /* Add the size of the composite mrds. */
433 size += sizeof(struct ffa_comp_mrd);
434
435 /* Add the size of the constituent mrds. */
436 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
437 emad_array[0].comp_mrd_offset);
438
439 /* Check the calculated address is within the memory descriptor. */
Marc Bonnicif744c992022-10-18 18:01:44 +0100440 if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
441 (uintptr_t)((uint8_t *) orig + desc_size)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100442 return 0;
443 }
444 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
445
446 return size;
447}
448
449/**
450 * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
451 * @out_obj: The shared memory object to populate the converted descriptor.
452 * @orig: The shared memory object containing the v1.0 descriptor.
453 *
454 * Return: true if the conversion is successful else false.
455 */
456static bool
457spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
458 struct spmc_shmem_obj *orig)
459{
460 struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
461 struct ffa_mtd *out = &out_obj->desc;
462 struct ffa_emad_v1_0 *emad_array_in;
463 struct ffa_emad_v1_0 *emad_array_out;
464 struct ffa_comp_mrd *mrd_in;
465 struct ffa_comp_mrd *mrd_out;
466
467 size_t mrd_in_offset;
468 size_t mrd_out_offset;
469 size_t mrd_size = 0;
470
471 /* Populate the new descriptor format from the v1.0 struct. */
472 out->sender_id = mtd_orig->sender_id;
473 out->memory_region_attributes = mtd_orig->memory_region_attributes;
474 out->flags = mtd_orig->flags;
475 out->handle = mtd_orig->handle;
476 out->tag = mtd_orig->tag;
477 out->emad_count = mtd_orig->emad_count;
478 out->emad_size = sizeof(struct ffa_emad_v1_0);
479
480 /*
481 * We will locate the emad descriptors directly after the ffa_mtd
482 * struct. This will be 8-byte aligned.
483 */
484 out->emad_offset = sizeof(struct ffa_mtd);
485
486 emad_array_in = mtd_orig->emad;
487 emad_array_out = (struct ffa_emad_v1_0 *)
488 ((uint8_t *) out + out->emad_offset);
489
490 /* Copy across the emad structs. */
491 for (unsigned int i = 0U; i < out->emad_count; i++) {
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100492 /* Bound check for emad array. */
493 if (((uint8_t *)emad_array_in + sizeof(struct ffa_emad_v1_0)) >
494 ((uint8_t *) mtd_orig + orig->desc_size)) {
495 VERBOSE("%s: Invalid mtd structure.\n", __func__);
496 return false;
497 }
Marc Bonnicid1907f02022-04-19 17:42:53 +0100498 memcpy(&emad_array_out[i], &emad_array_in[i],
499 sizeof(struct ffa_emad_v1_0));
500 }
501
502 /* Place the mrd descriptors after the end of the emad descriptors.*/
503 mrd_in_offset = emad_array_in->comp_mrd_offset;
504 mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
505 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
506
507 /* Add the size of the composite memory region descriptor. */
508 mrd_size += sizeof(struct ffa_comp_mrd);
509
510 /* Find the mrd descriptor. */
511 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
512
513 /* Add the size of the constituent memory region descriptors. */
514 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
515
516 /*
517 * Update the offset in the emads by the delta between the input and
518 * output addresses.
519 */
520 for (unsigned int i = 0U; i < out->emad_count; i++) {
521 emad_array_out[i].comp_mrd_offset =
522 emad_array_in[i].comp_mrd_offset +
523 (mrd_out_offset - mrd_in_offset);
524 }
525
526 /* Verify that we stay within bound of the memory descriptors. */
527 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
528 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
529 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
530 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
531 ERROR("%s: Invalid mrd structure.\n", __func__);
532 return false;
533 }
534
535 /* Copy the mrd descriptors directly. */
536 memcpy(mrd_out, mrd_in, mrd_size);
537
538 return true;
539}
540
541/**
542 * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
543 * v1.0 memory object.
544 * @out_obj: The shared memory object to populate the v1.0 descriptor.
545 * @orig: The shared memory object containing the v1.1 descriptor.
546 *
547 * Return: true if the conversion is successful else false.
548 */
549static bool
550spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
551 struct spmc_shmem_obj *orig)
552{
553 struct ffa_mtd *mtd_orig = &orig->desc;
554 struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
555 struct ffa_emad_v1_0 *emad_in;
556 struct ffa_emad_v1_0 *emad_array_in;
557 struct ffa_emad_v1_0 *emad_array_out;
558 struct ffa_comp_mrd *mrd_in;
559 struct ffa_comp_mrd *mrd_out;
560
561 size_t mrd_in_offset;
562 size_t mrd_out_offset;
563 size_t emad_out_array_size;
564 size_t mrd_size = 0;
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100565 size_t orig_desc_size = orig->desc_size;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100566
567 /* Populate the v1.0 descriptor format from the v1.1 struct. */
568 out->sender_id = mtd_orig->sender_id;
569 out->memory_region_attributes = mtd_orig->memory_region_attributes;
570 out->flags = mtd_orig->flags;
571 out->handle = mtd_orig->handle;
572 out->tag = mtd_orig->tag;
573 out->emad_count = mtd_orig->emad_count;
574
575 /* Determine the location of the emad array in both descriptors. */
576 emad_array_in = (struct ffa_emad_v1_0 *)
577 ((uint8_t *) mtd_orig + mtd_orig->emad_offset);
578 emad_array_out = out->emad;
579
580 /* Copy across the emad structs. */
581 emad_in = emad_array_in;
582 for (unsigned int i = 0U; i < out->emad_count; i++) {
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100583 /* Bound check for emad array. */
584 if (((uint8_t *)emad_in + sizeof(struct ffa_emad_v1_0)) >
585 ((uint8_t *) mtd_orig + orig_desc_size)) {
586 VERBOSE("%s: Invalid mtd structure.\n", __func__);
587 return false;
588 }
Marc Bonnicid1907f02022-04-19 17:42:53 +0100589 memcpy(&emad_array_out[i], emad_in,
590 sizeof(struct ffa_emad_v1_0));
591
592 emad_in += mtd_orig->emad_size;
593 }
594
595 /* Place the mrd descriptors after the end of the emad descriptors. */
596 emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
597
598 mrd_out_offset = (uint8_t *) out->emad - (uint8_t *) out +
599 emad_out_array_size;
600
601 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
602
603 mrd_in_offset = mtd_orig->emad_offset +
604 (mtd_orig->emad_size * mtd_orig->emad_count);
605
606 /* Add the size of the composite memory region descriptor. */
607 mrd_size += sizeof(struct ffa_comp_mrd);
608
609 /* Find the mrd descriptor. */
610 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
611
612 /* Add the size of the constituent memory region descriptors. */
613 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
614
615 /*
616 * Update the offset in the emads by the delta between the input and
617 * output addresses.
618 */
619 emad_in = emad_array_in;
620
621 for (unsigned int i = 0U; i < out->emad_count; i++) {
622 emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
623 (mrd_out_offset -
624 mrd_in_offset);
625 emad_in += mtd_orig->emad_size;
626 }
627
628 /* Verify that we stay within bound of the memory descriptors. */
629 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
630 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
631 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
632 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
633 ERROR("%s: Invalid mrd structure.\n", __func__);
634 return false;
635 }
636
637 /* Copy the mrd descriptors directly. */
638 memcpy(mrd_out, mrd_in, mrd_size);
639
640 return true;
641}
642
643/**
644 * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
645 * the v1.0 format and populates the
646 * provided buffer.
647 * @dst: Buffer to populate v1.0 ffa_memory_region_descriptor.
648 * @orig_obj: Object containing v1.1 ffa_memory_region_descriptor.
649 * @buf_size: Size of the buffer to populate.
650 * @offset: The offset of the converted descriptor to copy.
651 * @copy_size: Will be populated with the number of bytes copied.
652 * @out_desc_size: Will be populated with the total size of the v1.0
653 * descriptor.
654 *
655 * Return: 0 if conversion and population succeeded.
656 * Note: This function invalidates the reference to @orig therefore
657 * `spmc_shmem_obj_lookup` must be called if further usage is required.
658 */
659static uint32_t
660spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
661 size_t buf_size, size_t offset,
662 size_t *copy_size, size_t *v1_0_desc_size)
663{
664 struct spmc_shmem_obj *v1_0_obj;
665
666 /* Calculate the size that the v1.0 descriptor will require. */
667 *v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
668 &orig_obj->desc, orig_obj->desc_size);
669
670 if (*v1_0_desc_size == 0) {
671 ERROR("%s: cannot determine size of descriptor.\n",
672 __func__);
673 return FFA_ERROR_INVALID_PARAMETER;
674 }
675
676 /* Get a new obj to store the v1.0 descriptor. */
677 v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
678 *v1_0_desc_size);
679
680 if (!v1_0_obj) {
681 return FFA_ERROR_NO_MEMORY;
682 }
683
684 /* Perform the conversion from v1.1 to v1.0. */
685 if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
686 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
687 return FFA_ERROR_INVALID_PARAMETER;
688 }
689
690 *copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
691 memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
692
693 /*
694 * We're finished with the v1.0 descriptor for now so free it.
695 * Note that this will invalidate any references to the v1.1
696 * descriptor.
697 */
698 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
699
700 return 0;
701}
702
Demi Marie Obenour4ed9df42022-12-30 19:30:58 -0500703static int
704spmc_validate_mtd_start(struct ffa_mtd *desc, uint32_t ffa_version,
705 size_t fragment_length, size_t total_length)
706{
707 unsigned long long emad_end;
708 unsigned long long emad_size;
709 unsigned long long emad_offset;
710 unsigned int min_desc_size;
711
712 /* Determine the appropriate minimum descriptor size. */
713 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
714 min_desc_size = sizeof(struct ffa_mtd_v1_0);
715 } else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
716 min_desc_size = sizeof(struct ffa_mtd);
717 } else {
718 return FFA_ERROR_INVALID_PARAMETER;
719 }
720 if (fragment_length < min_desc_size) {
721 WARN("%s: invalid length %zu < %u\n", __func__, fragment_length,
722 min_desc_size);
723 return FFA_ERROR_INVALID_PARAMETER;
724 }
725
726 if (desc->emad_count == 0U) {
727 WARN("%s: unsupported attribute desc count %u.\n",
728 __func__, desc->emad_count);
729 return FFA_ERROR_INVALID_PARAMETER;
730 }
731
732 /*
733 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
734 * format, otherwise assume it is a v1.1 format.
735 */
736 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
737 emad_offset = emad_size = sizeof(struct ffa_emad_v1_0);
738 } else {
739 if (!is_aligned(desc->emad_offset, 16)) {
740 WARN("%s: Emad offset %" PRIx32 " is not 16-byte aligned.\n",
741 __func__, desc->emad_offset);
742 return FFA_ERROR_INVALID_PARAMETER;
743 }
744 if (desc->emad_offset < sizeof(struct ffa_mtd)) {
745 WARN("%s: Emad offset too small: 0x%" PRIx32 " < 0x%zx.\n",
746 __func__, desc->emad_offset,
747 sizeof(struct ffa_mtd));
748 return FFA_ERROR_INVALID_PARAMETER;
749 }
750 emad_offset = desc->emad_offset;
751 if (desc->emad_size < sizeof(struct ffa_emad_v1_0)) {
752 WARN("%s: Bad emad size (%" PRIu32 " < %zu).\n", __func__,
753 desc->emad_size, sizeof(struct ffa_emad_v1_0));
754 return FFA_ERROR_INVALID_PARAMETER;
755 }
756 if (!is_aligned(desc->emad_size, 16)) {
757 WARN("%s: Emad size 0x%" PRIx32 " is not 16-byte aligned.\n",
758 __func__, desc->emad_size);
759 return FFA_ERROR_INVALID_PARAMETER;
760 }
761 emad_size = desc->emad_size;
762 }
763
764 /*
765 * Overflow is impossible: the arithmetic happens in at least 64-bit
766 * precision, but all of the operands are bounded by UINT32_MAX, and
767 * ((2^32 - 1)^2 + (2^32 - 1) + (2^32 - 1)) = ((2^32 - 1) * (2^32 + 1))
768 * = (2^64 - 1).
769 */
770 CASSERT(sizeof(desc->emad_count == 4), assert_emad_count_max_too_large);
771 emad_end = (desc->emad_count * (unsigned long long)emad_size) +
772 (unsigned long long)sizeof(struct ffa_comp_mrd) +
773 (unsigned long long)emad_offset;
774
775 if (emad_end > total_length) {
776 WARN("%s: Composite memory region extends beyond descriptor: 0x%llx > 0x%zx\n",
777 __func__, emad_end, total_length);
778 return FFA_ERROR_INVALID_PARAMETER;
779 }
780
781 return 0;
782}
783
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100784/**
785 * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
Marc Bonnicid1907f02022-04-19 17:42:53 +0100786 * @obj: Object containing ffa_memory_region_descriptor.
787 * @ffa_version: FF-A version of the provided descriptor.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100788 *
Marc Bonnici336630f2022-01-13 11:39:10 +0000789 * Return: 0 if object is valid, -EINVAL if constituent_memory_region_descriptor
790 * offset or count is invalid.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100791 */
Marc Bonnicid1907f02022-04-19 17:42:53 +0100792static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
793 uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100794{
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000795 uint32_t comp_mrd_offset = 0;
796
Marc Bonnici336630f2022-01-13 11:39:10 +0000797 if (obj->desc.emad_count == 0U) {
798 WARN("%s: unsupported attribute desc count %u.\n",
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100799 __func__, obj->desc.emad_count);
800 return -EINVAL;
801 }
802
803 for (size_t emad_num = 0; emad_num < obj->desc.emad_count; emad_num++) {
804 size_t size;
805 size_t count;
806 size_t expected_size;
807 size_t total_page_count;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100808 size_t emad_size;
809 size_t desc_size;
810 size_t header_emad_size;
811 uint32_t offset;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100812 struct ffa_comp_mrd *comp;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100813 struct ffa_emad_v1_0 *emad;
814
815 emad = spmc_shmem_obj_get_emad(&obj->desc, emad_num,
816 ffa_version, &emad_size);
817 if (emad == NULL) {
818 WARN("%s: invalid emad structure.\n", __func__);
819 return -EINVAL;
820 }
821
822 /*
823 * Validate the calculated emad address resides within the
824 * descriptor.
825 */
826 if ((uintptr_t) emad >=
827 (uintptr_t)((uint8_t *) &obj->desc + obj->desc_size)) {
828 WARN("Invalid emad access.\n");
829 return -EINVAL;
830 }
831
832 offset = emad->comp_mrd_offset;
833
834 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
835 desc_size = sizeof(struct ffa_mtd_v1_0);
836 } else {
837 desc_size = sizeof(struct ffa_mtd);
838 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100839
Marc Bonnicid1907f02022-04-19 17:42:53 +0100840 header_emad_size = desc_size +
841 (obj->desc.emad_count * emad_size);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100842
843 if (offset < header_emad_size) {
844 WARN("%s: invalid object, offset %u < header + emad %zu\n",
845 __func__, offset, header_emad_size);
846 return -EINVAL;
847 }
848
849 size = obj->desc_size;
850
851 if (offset > size) {
852 WARN("%s: invalid object, offset %u > total size %zu\n",
853 __func__, offset, obj->desc_size);
854 return -EINVAL;
855 }
856 size -= offset;
857
858 if (size < sizeof(struct ffa_comp_mrd)) {
859 WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
860 __func__, offset, obj->desc_size);
861 return -EINVAL;
862 }
863 size -= sizeof(struct ffa_comp_mrd);
864
865 count = size / sizeof(struct ffa_cons_mrd);
866
Marc Bonnicid1907f02022-04-19 17:42:53 +0100867 comp = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
868
869 if (comp == NULL) {
870 WARN("%s: invalid comp_mrd offset\n", __func__);
871 return -EINVAL;
872 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100873
874 if (comp->address_range_count != count) {
875 WARN("%s: invalid object, desc count %u != %zu\n",
876 __func__, comp->address_range_count, count);
877 return -EINVAL;
878 }
879
880 expected_size = offset + sizeof(*comp) +
Marc Bonnicid1907f02022-04-19 17:42:53 +0100881 spmc_shmem_obj_ffa_constituent_size(obj,
882 ffa_version);
883
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100884 if (expected_size != obj->desc_size) {
885 WARN("%s: invalid object, computed size %zu != size %zu\n",
886 __func__, expected_size, obj->desc_size);
887 return -EINVAL;
888 }
889
890 if (obj->desc_filled < obj->desc_size) {
891 /*
892 * The whole descriptor has not yet been received.
893 * Skip final checks.
894 */
895 return 0;
896 }
897
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000898 /*
899 * The offset provided to the composite memory region descriptor
900 * should be consistent across endpoint descriptors. Store the
901 * first entry and compare against subsequent entries.
902 */
903 if (comp_mrd_offset == 0) {
904 comp_mrd_offset = offset;
905 } else {
906 if (comp_mrd_offset != offset) {
907 ERROR("%s: mismatching offsets provided, %u != %u\n",
908 __func__, offset, comp_mrd_offset);
909 return -EINVAL;
910 }
911 }
912
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100913 total_page_count = 0;
914
915 for (size_t i = 0; i < count; i++) {
916 total_page_count +=
917 comp->address_range_array[i].page_count;
918 }
919 if (comp->total_page_count != total_page_count) {
920 WARN("%s: invalid object, desc total_page_count %u != %zu\n",
921 __func__, comp->total_page_count,
922 total_page_count);
923 return -EINVAL;
924 }
925 }
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000926 return 0;
927}
928
929/**
930 * spmc_shmem_check_state_obj - Check if the descriptor describes memory
931 * regions that are currently involved with an
932 * existing memory transactions. This implies that
933 * the memory is not in a valid state for lending.
934 * @obj: Object containing ffa_memory_region_descriptor.
935 *
936 * Return: 0 if object is valid, -EINVAL if invalid memory state.
937 */
Marc Bonnicid1907f02022-04-19 17:42:53 +0100938static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
939 uint32_t ffa_version)
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000940{
941 size_t obj_offset = 0;
942 struct spmc_shmem_obj *inflight_obj;
943
944 struct ffa_comp_mrd *other_mrd;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100945 struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
946 ffa_version);
947
948 if (requested_mrd == NULL) {
949 return -EINVAL;
950 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100951
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000952 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
953 &obj_offset);
954
955 while (inflight_obj != NULL) {
956 /*
957 * Don't compare the transaction to itself or to partially
958 * transmitted descriptors.
959 */
960 if ((obj->desc.handle != inflight_obj->desc.handle) &&
961 (obj->desc_size == obj->desc_filled)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100962 other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
Marc Bonnici344ca9d2022-05-20 14:38:55 +0100963 FFA_VERSION_COMPILED);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100964 if (other_mrd == NULL) {
965 return -EINVAL;
966 }
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000967 if (overlapping_memory_regions(requested_mrd,
968 other_mrd)) {
969 return -EINVAL;
970 }
971 }
972
973 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
974 &obj_offset);
975 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100976 return 0;
977}
978
979static long spmc_ffa_fill_desc(struct mailbox *mbox,
980 struct spmc_shmem_obj *obj,
981 uint32_t fragment_length,
982 ffa_mtd_flag32_t mtd_flag,
Marc Bonnicid1907f02022-04-19 17:42:53 +0100983 uint32_t ffa_version,
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100984 void *smc_handle)
985{
986 int ret;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100987 size_t emad_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100988 uint32_t handle_low;
989 uint32_t handle_high;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100990 struct ffa_emad_v1_0 *emad;
991 struct ffa_emad_v1_0 *other_emad;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100992
993 if (mbox->rxtx_page_count == 0U) {
994 WARN("%s: buffer pair not registered.\n", __func__);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100995 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100996 goto err_arg;
997 }
998
999 if (fragment_length > mbox->rxtx_page_count * PAGE_SIZE_4KB) {
1000 WARN("%s: bad fragment size %u > %u buffer size\n", __func__,
1001 fragment_length, mbox->rxtx_page_count * PAGE_SIZE_4KB);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001002 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001003 goto err_arg;
1004 }
1005
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001006 if (fragment_length > obj->desc_size - obj->desc_filled) {
1007 WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
1008 fragment_length, obj->desc_size - obj->desc_filled);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001009 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001010 goto err_arg;
1011 }
1012
Marc Bonnicif0f45dc2022-10-18 13:57:16 +01001013 memcpy((uint8_t *)&obj->desc + obj->desc_filled,
1014 (uint8_t *) mbox->tx_buffer, fragment_length);
1015
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001016 /* Ensure that the sender ID resides in the normal world. */
1017 if (ffa_is_secure_world_id(obj->desc.sender_id)) {
1018 WARN("%s: Invalid sender ID 0x%x.\n",
1019 __func__, obj->desc.sender_id);
1020 ret = FFA_ERROR_DENIED;
1021 goto err_arg;
1022 }
1023
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001024 /* Ensure the NS bit is set to 0. */
1025 if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1026 WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1027 ret = FFA_ERROR_INVALID_PARAMETER;
1028 goto err_arg;
1029 }
1030
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001031 /*
1032 * We don't currently support any optional flags so ensure none are
1033 * requested.
1034 */
1035 if (obj->desc.flags != 0U && mtd_flag != 0U &&
1036 (obj->desc.flags != mtd_flag)) {
1037 WARN("%s: invalid memory transaction flags %u != %u\n",
1038 __func__, obj->desc.flags, mtd_flag);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001039 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001040 goto err_arg;
1041 }
1042
1043 if (obj->desc_filled == 0U) {
1044 /* First fragment, descriptor header has been copied */
Demi Marie Obenour4ed9df42022-12-30 19:30:58 -05001045 ret = spmc_validate_mtd_start(&obj->desc, ffa_version,
1046 fragment_length, obj->desc_size);
1047 if (ret != 0) {
1048 goto err_bad_desc;
1049 }
1050
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001051 obj->desc.handle = spmc_shmem_obj_state.next_handle++;
1052 obj->desc.flags |= mtd_flag;
1053 }
1054
1055 obj->desc_filled += fragment_length;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001056 ret = spmc_shmem_check_obj(obj, ffa_version);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001057 if (ret != 0) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001058 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001059 goto err_bad_desc;
1060 }
1061
1062 handle_low = (uint32_t)obj->desc.handle;
1063 handle_high = obj->desc.handle >> 32;
1064
1065 if (obj->desc_filled != obj->desc_size) {
1066 SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
1067 handle_high, obj->desc_filled,
1068 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
1069 }
1070
Marc Bonnici336630f2022-01-13 11:39:10 +00001071 /* The full descriptor has been received, perform any final checks. */
1072
1073 /*
1074 * If a partition ID resides in the secure world validate that the
1075 * partition ID is for a known partition. Ignore any partition ID
1076 * belonging to the normal world as it is assumed the Hypervisor will
1077 * have validated these.
1078 */
1079 for (size_t i = 0; i < obj->desc.emad_count; i++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001080 emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
1081 &emad_size);
1082 if (emad == NULL) {
1083 ret = FFA_ERROR_INVALID_PARAMETER;
1084 goto err_bad_desc;
1085 }
1086
1087 ffa_endpoint_id16_t ep_id = emad->mapd.endpoint_id;
Marc Bonnici336630f2022-01-13 11:39:10 +00001088
1089 if (ffa_is_secure_world_id(ep_id)) {
1090 if (spmc_get_sp_ctx(ep_id) == NULL) {
1091 WARN("%s: Invalid receiver id 0x%x\n",
1092 __func__, ep_id);
1093 ret = FFA_ERROR_INVALID_PARAMETER;
1094 goto err_bad_desc;
1095 }
1096 }
1097 }
1098
1099 /* Ensure partition IDs are not duplicated. */
1100 for (size_t i = 0; i < obj->desc.emad_count; i++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001101 emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
1102 &emad_size);
1103 if (emad == NULL) {
1104 ret = FFA_ERROR_INVALID_PARAMETER;
1105 goto err_bad_desc;
1106 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001107 for (size_t j = i + 1; j < obj->desc.emad_count; j++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001108 other_emad = spmc_shmem_obj_get_emad(&obj->desc, j,
1109 ffa_version,
1110 &emad_size);
1111 if (other_emad == NULL) {
Marc Bonnici336630f2022-01-13 11:39:10 +00001112 ret = FFA_ERROR_INVALID_PARAMETER;
1113 goto err_bad_desc;
1114 }
Marc Bonnicid1907f02022-04-19 17:42:53 +01001115
1116 if (emad->mapd.endpoint_id ==
1117 other_emad->mapd.endpoint_id) {
1118 WARN("%s: Duplicated endpoint id 0x%x\n",
1119 __func__, emad->mapd.endpoint_id);
1120 ret = FFA_ERROR_INVALID_PARAMETER;
1121 goto err_bad_desc;
1122 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001123 }
1124 }
1125
Marc Bonnicid1907f02022-04-19 17:42:53 +01001126 ret = spmc_shmem_check_state_obj(obj, ffa_version);
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00001127 if (ret) {
1128 ERROR("%s: invalid memory region descriptor.\n", __func__);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001129 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00001130 goto err_bad_desc;
1131 }
1132
Marc Bonnicid1907f02022-04-19 17:42:53 +01001133 /*
1134 * Everything checks out, if the sender was using FF-A v1.0, convert
1135 * the descriptor format to use the v1.1 structures.
1136 */
1137 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1138 struct spmc_shmem_obj *v1_1_obj;
1139 uint64_t mem_handle;
1140
1141 /* Calculate the size that the v1.1 descriptor will required. */
1142 size_t v1_1_desc_size =
1143 spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
vallau0146dbac22022-08-08 14:10:14 +02001144 obj->desc_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001145
1146 if (v1_1_desc_size == 0U) {
1147 ERROR("%s: cannot determine size of descriptor.\n",
1148 __func__);
1149 goto err_arg;
1150 }
1151
1152 /* Get a new obj to store the v1.1 descriptor. */
1153 v1_1_obj =
1154 spmc_shmem_obj_alloc(&spmc_shmem_obj_state, v1_1_desc_size);
1155
vallau018f830992022-08-09 18:03:28 +02001156 if (!v1_1_obj) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001157 ret = FFA_ERROR_NO_MEMORY;
1158 goto err_arg;
1159 }
1160
1161 /* Perform the conversion from v1.0 to v1.1. */
1162 v1_1_obj->desc_size = v1_1_desc_size;
1163 v1_1_obj->desc_filled = v1_1_desc_size;
1164 if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
1165 ERROR("%s: Could not convert mtd!\n", __func__);
1166 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
1167 goto err_arg;
1168 }
1169
1170 /*
1171 * We're finished with the v1.0 descriptor so free it
1172 * and continue our checks with the new v1.1 descriptor.
1173 */
1174 mem_handle = obj->desc.handle;
1175 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1176 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1177 if (obj == NULL) {
1178 ERROR("%s: Failed to find converted descriptor.\n",
1179 __func__);
1180 ret = FFA_ERROR_INVALID_PARAMETER;
1181 return spmc_ffa_error_return(smc_handle, ret);
1182 }
1183 }
1184
Marc Bonnici503320e2022-02-21 15:02:36 +00001185 /* Allow for platform specific operations to be performed. */
1186 ret = plat_spmc_shmem_begin(&obj->desc);
1187 if (ret != 0) {
1188 goto err_arg;
1189 }
1190
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001191 SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
1192 0, 0, 0);
1193
1194err_bad_desc:
1195err_arg:
1196 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001197 return spmc_ffa_error_return(smc_handle, ret);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001198}
1199
1200/**
1201 * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
1202 * @client: Client state.
1203 * @total_length: Total length of shared memory descriptor.
1204 * @fragment_length: Length of fragment of shared memory descriptor passed in
1205 * this call.
1206 * @address: Not supported, must be 0.
1207 * @page_count: Not supported, must be 0.
1208 * @smc_handle: Handle passed to smc call. Used to return
1209 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1210 *
1211 * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
1212 * to share or lend memory from non-secure os to secure os (with no stream
1213 * endpoints).
1214 *
1215 * Return: 0 on success, error code on failure.
1216 */
1217long spmc_ffa_mem_send(uint32_t smc_fid,
1218 bool secure_origin,
1219 uint64_t total_length,
1220 uint32_t fragment_length,
1221 uint64_t address,
1222 uint32_t page_count,
1223 void *cookie,
1224 void *handle,
1225 uint64_t flags)
1226
1227{
1228 long ret;
1229 struct spmc_shmem_obj *obj;
1230 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1231 ffa_mtd_flag32_t mtd_flag;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001232 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Demi Marie Obenour1f9f8302022-12-30 19:14:18 -05001233 size_t min_desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001234
1235 if (address != 0U || page_count != 0U) {
1236 WARN("%s: custom memory region for message not supported.\n",
1237 __func__);
1238 return spmc_ffa_error_return(handle,
1239 FFA_ERROR_INVALID_PARAMETER);
1240 }
1241
1242 if (secure_origin) {
1243 WARN("%s: unsupported share direction.\n", __func__);
1244 return spmc_ffa_error_return(handle,
1245 FFA_ERROR_INVALID_PARAMETER);
1246 }
1247
Demi Marie Obenour1f9f8302022-12-30 19:14:18 -05001248 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1249 min_desc_size = sizeof(struct ffa_mtd_v1_0);
1250 } else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
1251 min_desc_size = sizeof(struct ffa_mtd);
1252 } else {
1253 WARN("%s: bad FF-A version.\n", __func__);
1254 return spmc_ffa_error_return(handle,
1255 FFA_ERROR_INVALID_PARAMETER);
1256 }
1257
1258 /* Check if the descriptor is too small for the FF-A version. */
1259 if (fragment_length < min_desc_size) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001260 WARN("%s: bad first fragment size %u < %zu\n",
Marc Bonnicid1907f02022-04-19 17:42:53 +01001261 __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001262 return spmc_ffa_error_return(handle,
1263 FFA_ERROR_INVALID_PARAMETER);
1264 }
1265
1266 if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
1267 mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
1268 } else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
1269 mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
1270 } else {
1271 WARN("%s: invalid memory management operation.\n", __func__);
1272 return spmc_ffa_error_return(handle,
1273 FFA_ERROR_INVALID_PARAMETER);
1274 }
1275
1276 spin_lock(&spmc_shmem_obj_state.lock);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001277 obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
1278 if (obj == NULL) {
1279 ret = FFA_ERROR_NO_MEMORY;
1280 goto err_unlock;
1281 }
1282
1283 spin_lock(&mbox->lock);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001284 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
1285 ffa_version, handle);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001286 spin_unlock(&mbox->lock);
1287
1288 spin_unlock(&spmc_shmem_obj_state.lock);
1289 return ret;
1290
1291err_unlock:
1292 spin_unlock(&spmc_shmem_obj_state.lock);
1293 return spmc_ffa_error_return(handle, ret);
1294}
1295
1296/**
1297 * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
1298 * @client: Client state.
1299 * @handle_low: Handle_low value returned from FFA_MEM_FRAG_RX.
1300 * @handle_high: Handle_high value returned from FFA_MEM_FRAG_RX.
1301 * @fragment_length: Length of fragments transmitted.
1302 * @sender_id: Vmid of sender in bits [31:16]
1303 * @smc_handle: Handle passed to smc call. Used to return
1304 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1305 *
1306 * Return: @smc_handle on success, error code on failure.
1307 */
1308long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
1309 bool secure_origin,
1310 uint64_t handle_low,
1311 uint64_t handle_high,
1312 uint32_t fragment_length,
1313 uint32_t sender_id,
1314 void *cookie,
1315 void *handle,
1316 uint64_t flags)
1317{
1318 long ret;
1319 uint32_t desc_sender_id;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001320 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001321 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1322
1323 struct spmc_shmem_obj *obj;
1324 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1325
1326 spin_lock(&spmc_shmem_obj_state.lock);
1327
1328 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1329 if (obj == NULL) {
1330 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1331 __func__, mem_handle);
1332 ret = FFA_ERROR_INVALID_PARAMETER;
1333 goto err_unlock;
1334 }
1335
1336 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1337 if (sender_id != desc_sender_id) {
1338 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1339 sender_id, desc_sender_id);
1340 ret = FFA_ERROR_INVALID_PARAMETER;
1341 goto err_unlock;
1342 }
1343
1344 if (obj->desc_filled == obj->desc_size) {
1345 WARN("%s: object desc already filled, %zu\n", __func__,
1346 obj->desc_filled);
1347 ret = FFA_ERROR_INVALID_PARAMETER;
1348 goto err_unlock;
1349 }
1350
1351 spin_lock(&mbox->lock);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001352 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
1353 handle);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001354 spin_unlock(&mbox->lock);
1355
1356 spin_unlock(&spmc_shmem_obj_state.lock);
1357 return ret;
1358
1359err_unlock:
1360 spin_unlock(&spmc_shmem_obj_state.lock);
1361 return spmc_ffa_error_return(handle, ret);
1362}
1363
1364/**
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001365 * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor
1366 * if the caller implements a version greater
1367 * than FF-A 1.0 or if they have requested
1368 * the functionality.
1369 * TODO: We are assuming that the caller is
1370 * an SP. To support retrieval from the
1371 * normal world this function will need to be
1372 * expanded accordingly.
1373 * @resp: Descriptor populated in callers RX buffer.
1374 * @sp_ctx: Context of the calling SP.
1375 */
1376void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp,
1377 struct secure_partition_desc *sp_ctx)
1378{
1379 if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) ||
1380 sp_ctx->ns_bit_requested) {
1381 /*
1382 * Currently memory senders must reside in the normal
1383 * world, and we do not have the functionlaity to change
1384 * the state of memory dynamically. Therefore we can always set
1385 * the NS bit to 1.
1386 */
1387 resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
1388 }
1389}
1390
1391/**
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001392 * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
1393 * @smc_fid: FID of SMC
1394 * @total_length: Total length of retrieve request descriptor if this is
1395 * the first call. Otherwise (unsupported) must be 0.
1396 * @fragment_length: Length of fragment of retrieve request descriptor passed
1397 * in this call. Only @fragment_length == @length is
1398 * supported by this implementation.
1399 * @address: Not supported, must be 0.
1400 * @page_count: Not supported, must be 0.
1401 * @smc_handle: Handle passed to smc call. Used to return
1402 * FFA_MEM_RETRIEVE_RESP.
1403 *
1404 * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
1405 * Used by secure os to retrieve memory already shared by non-secure os.
1406 * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
1407 * the client must call FFA_MEM_FRAG_RX until the full response has been
1408 * received.
1409 *
1410 * Return: @handle on success, error code on failure.
1411 */
1412long
1413spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
1414 bool secure_origin,
1415 uint32_t total_length,
1416 uint32_t fragment_length,
1417 uint64_t address,
1418 uint32_t page_count,
1419 void *cookie,
1420 void *handle,
1421 uint64_t flags)
1422{
1423 int ret;
1424 size_t buf_size;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001425 size_t copy_size = 0;
1426 size_t min_desc_size;
1427 size_t out_desc_size = 0;
1428
1429 /*
1430 * Currently we are only accessing fields that are the same in both the
1431 * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
1432 * here. We only need validate against the appropriate struct size.
1433 */
1434 struct ffa_mtd *resp;
1435 const struct ffa_mtd *req;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001436 struct spmc_shmem_obj *obj = NULL;
1437 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001438 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001439 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001440
1441 if (!secure_origin) {
1442 WARN("%s: unsupported retrieve req direction.\n", __func__);
1443 return spmc_ffa_error_return(handle,
1444 FFA_ERROR_INVALID_PARAMETER);
1445 }
1446
1447 if (address != 0U || page_count != 0U) {
1448 WARN("%s: custom memory region not supported.\n", __func__);
1449 return spmc_ffa_error_return(handle,
1450 FFA_ERROR_INVALID_PARAMETER);
1451 }
1452
1453 spin_lock(&mbox->lock);
1454
1455 req = mbox->tx_buffer;
1456 resp = mbox->rx_buffer;
1457 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1458
1459 if (mbox->rxtx_page_count == 0U) {
1460 WARN("%s: buffer pair not registered.\n", __func__);
1461 ret = FFA_ERROR_INVALID_PARAMETER;
1462 goto err_unlock_mailbox;
1463 }
1464
1465 if (mbox->state != MAILBOX_STATE_EMPTY) {
1466 WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
1467 ret = FFA_ERROR_DENIED;
1468 goto err_unlock_mailbox;
1469 }
1470
1471 if (fragment_length != total_length) {
1472 WARN("%s: fragmented retrieve request not supported.\n",
1473 __func__);
1474 ret = FFA_ERROR_INVALID_PARAMETER;
1475 goto err_unlock_mailbox;
1476 }
1477
Marc Bonnici336630f2022-01-13 11:39:10 +00001478 if (req->emad_count == 0U) {
1479 WARN("%s: unsupported attribute desc count %u.\n",
1480 __func__, obj->desc.emad_count);
vallau01460d3962022-08-09 17:06:53 +02001481 ret = FFA_ERROR_INVALID_PARAMETER;
1482 goto err_unlock_mailbox;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001483 }
1484
Marc Bonnicid1907f02022-04-19 17:42:53 +01001485 /* Determine the appropriate minimum descriptor size. */
1486 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1487 min_desc_size = sizeof(struct ffa_mtd_v1_0);
1488 } else {
1489 min_desc_size = sizeof(struct ffa_mtd);
1490 }
1491 if (total_length < min_desc_size) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001492 WARN("%s: invalid length %u < %zu\n", __func__, total_length,
Marc Bonnicid1907f02022-04-19 17:42:53 +01001493 min_desc_size);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001494 ret = FFA_ERROR_INVALID_PARAMETER;
1495 goto err_unlock_mailbox;
1496 }
1497
1498 spin_lock(&spmc_shmem_obj_state.lock);
1499
1500 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1501 if (obj == NULL) {
1502 ret = FFA_ERROR_INVALID_PARAMETER;
1503 goto err_unlock_all;
1504 }
1505
1506 if (obj->desc_filled != obj->desc_size) {
1507 WARN("%s: incomplete object desc filled %zu < size %zu\n",
1508 __func__, obj->desc_filled, obj->desc_size);
1509 ret = FFA_ERROR_INVALID_PARAMETER;
1510 goto err_unlock_all;
1511 }
1512
1513 if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
1514 WARN("%s: wrong sender id 0x%x != 0x%x\n",
1515 __func__, req->sender_id, obj->desc.sender_id);
1516 ret = FFA_ERROR_INVALID_PARAMETER;
1517 goto err_unlock_all;
1518 }
1519
1520 if (req->emad_count != 0U && req->tag != obj->desc.tag) {
1521 WARN("%s: wrong tag 0x%lx != 0x%lx\n",
1522 __func__, req->tag, obj->desc.tag);
1523 ret = FFA_ERROR_INVALID_PARAMETER;
1524 goto err_unlock_all;
1525 }
1526
Marc Bonnici336630f2022-01-13 11:39:10 +00001527 if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
1528 WARN("%s: mistmatch of endpoint counts %u != %u\n",
1529 __func__, req->emad_count, obj->desc.emad_count);
1530 ret = FFA_ERROR_INVALID_PARAMETER;
1531 goto err_unlock_all;
1532 }
1533
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001534 /* Ensure the NS bit is set to 0 in the request. */
1535 if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1536 WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1537 ret = FFA_ERROR_INVALID_PARAMETER;
1538 goto err_unlock_all;
1539 }
1540
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001541 if (req->flags != 0U) {
1542 if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
1543 (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
1544 /*
1545 * If the retrieve request specifies the memory
1546 * transaction ensure it matches what we expect.
1547 */
1548 WARN("%s: wrong mem transaction flags %x != %x\n",
1549 __func__, req->flags, obj->desc.flags);
1550 ret = FFA_ERROR_INVALID_PARAMETER;
1551 goto err_unlock_all;
1552 }
1553
1554 if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
1555 req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
1556 /*
1557 * Current implementation does not support donate and
1558 * it supports no other flags.
1559 */
1560 WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
1561 ret = FFA_ERROR_INVALID_PARAMETER;
1562 goto err_unlock_all;
1563 }
1564 }
1565
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001566 /* Validate the caller is a valid participant. */
Shruti Gupta20ce06c2022-08-25 14:22:53 +01001567 if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001568 WARN("%s: Invalid endpoint ID (0x%x).\n",
1569 __func__, sp_ctx->sp_id);
1570 ret = FFA_ERROR_INVALID_PARAMETER;
1571 goto err_unlock_all;
1572 }
1573
Marc Bonnicid1907f02022-04-19 17:42:53 +01001574 /* Validate that the provided emad offset and structure is valid.*/
1575 for (size_t i = 0; i < req->emad_count; i++) {
1576 size_t emad_size;
1577 struct ffa_emad_v1_0 *emad;
1578
1579 emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1580 &emad_size);
1581 if (emad == NULL) {
1582 WARN("%s: invalid emad structure.\n", __func__);
1583 ret = FFA_ERROR_INVALID_PARAMETER;
1584 goto err_unlock_all;
1585 }
1586
1587 if ((uintptr_t) emad >= (uintptr_t)
1588 ((uint8_t *) req + total_length)) {
1589 WARN("Invalid emad access.\n");
1590 ret = FFA_ERROR_INVALID_PARAMETER;
1591 goto err_unlock_all;
1592 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001593 }
1594
1595 /*
1596 * Validate all the endpoints match in the case of multiple
1597 * borrowers. We don't mandate that the order of the borrowers
1598 * must match in the descriptors therefore check to see if the
1599 * endpoints match in any order.
1600 */
1601 for (size_t i = 0; i < req->emad_count; i++) {
1602 bool found = false;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001603 size_t emad_size;
1604 struct ffa_emad_v1_0 *emad;
1605 struct ffa_emad_v1_0 *other_emad;
1606
1607 emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1608 &emad_size);
1609 if (emad == NULL) {
1610 ret = FFA_ERROR_INVALID_PARAMETER;
1611 goto err_unlock_all;
1612 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001613
1614 for (size_t j = 0; j < obj->desc.emad_count; j++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001615 other_emad = spmc_shmem_obj_get_emad(
1616 &obj->desc, j, MAKE_FFA_VERSION(1, 1),
1617 &emad_size);
1618
1619 if (other_emad == NULL) {
1620 ret = FFA_ERROR_INVALID_PARAMETER;
1621 goto err_unlock_all;
1622 }
1623
1624 if (req->emad_count &&
1625 emad->mapd.endpoint_id ==
1626 other_emad->mapd.endpoint_id) {
Marc Bonnici336630f2022-01-13 11:39:10 +00001627 found = true;
1628 break;
1629 }
1630 }
1631
1632 if (!found) {
1633 WARN("%s: invalid receiver id (0x%x).\n",
Marc Bonnicid1907f02022-04-19 17:42:53 +01001634 __func__, emad->mapd.endpoint_id);
Marc Bonnici336630f2022-01-13 11:39:10 +00001635 ret = FFA_ERROR_INVALID_PARAMETER;
1636 goto err_unlock_all;
1637 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001638 }
1639
1640 mbox->state = MAILBOX_STATE_FULL;
1641
1642 if (req->emad_count != 0U) {
1643 obj->in_use++;
1644 }
1645
Marc Bonnicid1907f02022-04-19 17:42:53 +01001646 /*
1647 * If the caller is v1.0 convert the descriptor, otherwise copy
1648 * directly.
1649 */
1650 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1651 ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
1652 &copy_size,
1653 &out_desc_size);
1654 if (ret != 0U) {
1655 ERROR("%s: Failed to process descriptor.\n", __func__);
1656 goto err_unlock_all;
1657 }
1658 } else {
1659 copy_size = MIN(obj->desc_size, buf_size);
1660 out_desc_size = obj->desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001661
Marc Bonnicid1907f02022-04-19 17:42:53 +01001662 memcpy(resp, &obj->desc, copy_size);
1663 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001664
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001665 /* Set the NS bit in the response if applicable. */
1666 spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx);
1667
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001668 spin_unlock(&spmc_shmem_obj_state.lock);
1669 spin_unlock(&mbox->lock);
1670
Marc Bonnicid1907f02022-04-19 17:42:53 +01001671 SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001672 copy_size, 0, 0, 0, 0, 0);
1673
1674err_unlock_all:
1675 spin_unlock(&spmc_shmem_obj_state.lock);
1676err_unlock_mailbox:
1677 spin_unlock(&mbox->lock);
1678 return spmc_ffa_error_return(handle, ret);
1679}
1680
1681/**
1682 * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
1683 * @client: Client state.
1684 * @handle_low: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
1685 * @handle_high: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
1686 * @fragment_offset: Byte offset in descriptor to resume at.
1687 * @sender_id: Bit[31:16]: Endpoint id of sender if client is a
1688 * hypervisor. 0 otherwise.
1689 * @smc_handle: Handle passed to smc call. Used to return
1690 * FFA_MEM_FRAG_TX.
1691 *
1692 * Return: @smc_handle on success, error code on failure.
1693 */
1694long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
1695 bool secure_origin,
1696 uint32_t handle_low,
1697 uint32_t handle_high,
1698 uint32_t fragment_offset,
1699 uint32_t sender_id,
1700 void *cookie,
1701 void *handle,
1702 uint64_t flags)
1703{
1704 int ret;
1705 void *src;
1706 size_t buf_size;
1707 size_t copy_size;
1708 size_t full_copy_size;
1709 uint32_t desc_sender_id;
1710 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1711 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1712 struct spmc_shmem_obj *obj;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001713 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001714
1715 if (!secure_origin) {
1716 WARN("%s: can only be called from swld.\n",
1717 __func__);
1718 return spmc_ffa_error_return(handle,
1719 FFA_ERROR_INVALID_PARAMETER);
1720 }
1721
1722 spin_lock(&spmc_shmem_obj_state.lock);
1723
1724 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1725 if (obj == NULL) {
1726 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1727 __func__, mem_handle);
1728 ret = FFA_ERROR_INVALID_PARAMETER;
1729 goto err_unlock_shmem;
1730 }
1731
1732 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1733 if (sender_id != 0U && sender_id != desc_sender_id) {
1734 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1735 sender_id, desc_sender_id);
1736 ret = FFA_ERROR_INVALID_PARAMETER;
1737 goto err_unlock_shmem;
1738 }
1739
1740 if (fragment_offset >= obj->desc_size) {
1741 WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
1742 __func__, fragment_offset, obj->desc_size);
1743 ret = FFA_ERROR_INVALID_PARAMETER;
1744 goto err_unlock_shmem;
1745 }
1746
1747 spin_lock(&mbox->lock);
1748
1749 if (mbox->rxtx_page_count == 0U) {
1750 WARN("%s: buffer pair not registered.\n", __func__);
1751 ret = FFA_ERROR_INVALID_PARAMETER;
1752 goto err_unlock_all;
1753 }
1754
1755 if (mbox->state != MAILBOX_STATE_EMPTY) {
1756 WARN("%s: RX Buffer is full!\n", __func__);
1757 ret = FFA_ERROR_DENIED;
1758 goto err_unlock_all;
1759 }
1760
1761 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1762
1763 mbox->state = MAILBOX_STATE_FULL;
1764
Marc Bonnicid1907f02022-04-19 17:42:53 +01001765 /*
1766 * If the caller is v1.0 convert the descriptor, otherwise copy
1767 * directly.
1768 */
1769 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1770 size_t out_desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001771
Marc Bonnicid1907f02022-04-19 17:42:53 +01001772 ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
1773 buf_size,
1774 fragment_offset,
1775 &copy_size,
1776 &out_desc_size);
1777 if (ret != 0U) {
1778 ERROR("%s: Failed to process descriptor.\n", __func__);
1779 goto err_unlock_all;
1780 }
1781 } else {
1782 full_copy_size = obj->desc_size - fragment_offset;
1783 copy_size = MIN(full_copy_size, buf_size);
1784
1785 src = &obj->desc;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001786
Marc Bonnicid1907f02022-04-19 17:42:53 +01001787 memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
1788 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001789
1790 spin_unlock(&mbox->lock);
1791 spin_unlock(&spmc_shmem_obj_state.lock);
1792
1793 SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
1794 copy_size, sender_id, 0, 0, 0);
1795
1796err_unlock_all:
1797 spin_unlock(&mbox->lock);
1798err_unlock_shmem:
1799 spin_unlock(&spmc_shmem_obj_state.lock);
1800 return spmc_ffa_error_return(handle, ret);
1801}
1802
1803/**
1804 * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
1805 * @client: Client state.
1806 *
1807 * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
1808 * Used by secure os release previously shared memory to non-secure os.
1809 *
1810 * The handle to release must be in the client's (secure os's) transmit buffer.
1811 *
1812 * Return: 0 on success, error code on failure.
1813 */
1814int spmc_ffa_mem_relinquish(uint32_t smc_fid,
1815 bool secure_origin,
1816 uint32_t handle_low,
1817 uint32_t handle_high,
1818 uint32_t fragment_offset,
1819 uint32_t sender_id,
1820 void *cookie,
1821 void *handle,
1822 uint64_t flags)
1823{
1824 int ret;
1825 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1826 struct spmc_shmem_obj *obj;
1827 const struct ffa_mem_relinquish_descriptor *req;
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001828 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001829
1830 if (!secure_origin) {
1831 WARN("%s: unsupported relinquish direction.\n", __func__);
1832 return spmc_ffa_error_return(handle,
1833 FFA_ERROR_INVALID_PARAMETER);
1834 }
1835
1836 spin_lock(&mbox->lock);
1837
1838 if (mbox->rxtx_page_count == 0U) {
1839 WARN("%s: buffer pair not registered.\n", __func__);
1840 ret = FFA_ERROR_INVALID_PARAMETER;
1841 goto err_unlock_mailbox;
1842 }
1843
1844 req = mbox->tx_buffer;
1845
1846 if (req->flags != 0U) {
1847 WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
1848 ret = FFA_ERROR_INVALID_PARAMETER;
1849 goto err_unlock_mailbox;
1850 }
1851
Marc Bonnici336630f2022-01-13 11:39:10 +00001852 if (req->endpoint_count == 0) {
1853 WARN("%s: endpoint count cannot be 0.\n", __func__);
1854 ret = FFA_ERROR_INVALID_PARAMETER;
1855 goto err_unlock_mailbox;
1856 }
1857
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001858 spin_lock(&spmc_shmem_obj_state.lock);
1859
1860 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1861 if (obj == NULL) {
1862 ret = FFA_ERROR_INVALID_PARAMETER;
1863 goto err_unlock_all;
1864 }
1865
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001866 /*
1867 * Validate the endpoint ID was populated correctly. We don't currently
1868 * support proxy endpoints so the endpoint count should always be 1.
1869 */
1870 if (req->endpoint_count != 1U) {
1871 WARN("%s: unsupported endpoint count %u != 1\n", __func__,
1872 req->endpoint_count);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001873 ret = FFA_ERROR_INVALID_PARAMETER;
1874 goto err_unlock_all;
1875 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001876
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001877 /* Validate provided endpoint ID matches the partition ID. */
1878 if (req->endpoint_array[0] != sp_ctx->sp_id) {
1879 WARN("%s: invalid endpoint ID %u != %u\n", __func__,
1880 req->endpoint_array[0], sp_ctx->sp_id);
1881 ret = FFA_ERROR_INVALID_PARAMETER;
1882 goto err_unlock_all;
1883 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001884
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001885 /* Validate the caller is a valid participant. */
Shruti Gupta20ce06c2022-08-25 14:22:53 +01001886 if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001887 WARN("%s: Invalid endpoint ID (0x%x).\n",
1888 __func__, req->endpoint_array[0]);
1889 ret = FFA_ERROR_INVALID_PARAMETER;
1890 goto err_unlock_all;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001891 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001892
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001893 if (obj->in_use == 0U) {
1894 ret = FFA_ERROR_INVALID_PARAMETER;
1895 goto err_unlock_all;
1896 }
1897 obj->in_use--;
1898
1899 spin_unlock(&spmc_shmem_obj_state.lock);
1900 spin_unlock(&mbox->lock);
1901
1902 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1903
1904err_unlock_all:
1905 spin_unlock(&spmc_shmem_obj_state.lock);
1906err_unlock_mailbox:
1907 spin_unlock(&mbox->lock);
1908 return spmc_ffa_error_return(handle, ret);
1909}
1910
1911/**
1912 * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
1913 * @client: Client state.
1914 * @handle_low: Unique handle of shared memory object to reclaim. Bit[31:0].
1915 * @handle_high: Unique handle of shared memory object to reclaim.
1916 * Bit[63:32].
1917 * @flags: Unsupported, ignored.
1918 *
1919 * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
1920 * Used by non-secure os reclaim memory previously shared with secure os.
1921 *
1922 * Return: 0 on success, error code on failure.
1923 */
1924int spmc_ffa_mem_reclaim(uint32_t smc_fid,
1925 bool secure_origin,
1926 uint32_t handle_low,
1927 uint32_t handle_high,
1928 uint32_t mem_flags,
1929 uint64_t x4,
1930 void *cookie,
1931 void *handle,
1932 uint64_t flags)
1933{
1934 int ret;
1935 struct spmc_shmem_obj *obj;
1936 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1937
1938 if (secure_origin) {
1939 WARN("%s: unsupported reclaim direction.\n", __func__);
1940 return spmc_ffa_error_return(handle,
1941 FFA_ERROR_INVALID_PARAMETER);
1942 }
1943
1944 if (mem_flags != 0U) {
1945 WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
1946 return spmc_ffa_error_return(handle,
1947 FFA_ERROR_INVALID_PARAMETER);
1948 }
1949
1950 spin_lock(&spmc_shmem_obj_state.lock);
1951
1952 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1953 if (obj == NULL) {
1954 ret = FFA_ERROR_INVALID_PARAMETER;
1955 goto err_unlock;
1956 }
1957 if (obj->in_use != 0U) {
1958 ret = FFA_ERROR_DENIED;
1959 goto err_unlock;
1960 }
Marc Bonnici503320e2022-02-21 15:02:36 +00001961
Marc Bonnici82e28f12022-10-18 13:39:48 +01001962 if (obj->desc_filled != obj->desc_size) {
1963 WARN("%s: incomplete object desc filled %zu < size %zu\n",
1964 __func__, obj->desc_filled, obj->desc_size);
1965 ret = FFA_ERROR_INVALID_PARAMETER;
1966 goto err_unlock;
1967 }
1968
Marc Bonnici503320e2022-02-21 15:02:36 +00001969 /* Allow for platform specific operations to be performed. */
1970 ret = plat_spmc_shmem_reclaim(&obj->desc);
1971 if (ret != 0) {
1972 goto err_unlock;
1973 }
1974
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001975 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1976 spin_unlock(&spmc_shmem_obj_state.lock);
1977
1978 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1979
1980err_unlock:
1981 spin_unlock(&spmc_shmem_obj_state.lock);
1982 return spmc_ffa_error_return(handle, ret);
1983}