blob: bd32e4d1ab8f917bcc8a0dac4a71378167c3e3f3 [file] [log] [blame]
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001/*
Demi Marie Obenour1f9f8302022-12-30 19:14:18 -05002 * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00006#include <assert.h>
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01007#include <errno.h>
Demi Marie Obenour4ed9df42022-12-30 19:30:58 -05008#include <inttypes.h>
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01009
10#include <common/debug.h>
11#include <common/runtime_svc.h>
12#include <lib/object_pool.h>
13#include <lib/spinlock.h>
14#include <lib/xlat_tables/xlat_tables_v2.h>
15#include <services/ffa_svc.h>
16#include "spmc.h"
17#include "spmc_shared_mem.h"
18
19#include <platform_def.h>
20
21/**
22 * struct spmc_shmem_obj - Shared memory object.
23 * @desc_size: Size of @desc.
24 * @desc_filled: Size of @desc already received.
25 * @in_use: Number of clients that have called ffa_mem_retrieve_req
26 * without a matching ffa_mem_relinquish call.
27 * @desc: FF-A memory region descriptor passed in ffa_mem_share.
28 */
29struct spmc_shmem_obj {
30 size_t desc_size;
31 size_t desc_filled;
32 size_t in_use;
Marc Bonnicid1907f02022-04-19 17:42:53 +010033 struct ffa_mtd desc;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010034};
35
36/*
37 * Declare our data structure to store the metadata of memory share requests.
38 * The main datastore is allocated on a per platform basis to ensure enough
39 * storage can be made available.
40 * The address of the data store will be populated by the SPMC during its
41 * initialization.
42 */
43
44struct spmc_shmem_obj_state spmc_shmem_obj_state = {
45 /* Set start value for handle so top 32 bits are needed quickly. */
46 .next_handle = 0xffffffc0U,
47};
48
49/**
50 * spmc_shmem_obj_size - Convert from descriptor size to object size.
51 * @desc_size: Size of struct ffa_memory_region_descriptor object.
52 *
53 * Return: Size of struct spmc_shmem_obj object.
54 */
55static size_t spmc_shmem_obj_size(size_t desc_size)
56{
57 return desc_size + offsetof(struct spmc_shmem_obj, desc);
58}
59
60/**
61 * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
62 * @state: Global state.
63 * @desc_size: Size of struct ffa_memory_region_descriptor object that
64 * allocated object will hold.
65 *
66 * Return: Pointer to newly allocated object, or %NULL if there not enough space
67 * left. The returned pointer is only valid while @state is locked, to
68 * used it again after unlocking @state, spmc_shmem_obj_lookup must be
69 * called.
70 */
71static struct spmc_shmem_obj *
72spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
73{
74 struct spmc_shmem_obj *obj;
75 size_t free = state->data_size - state->allocated;
Marc Bonnicib774f562022-10-18 14:03:13 +010076 size_t obj_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010077
78 if (state->data == NULL) {
79 ERROR("Missing shmem datastore!\n");
80 return NULL;
81 }
82
Marc Bonnicib774f562022-10-18 14:03:13 +010083 obj_size = spmc_shmem_obj_size(desc_size);
84
85 /* Ensure the obj size has not overflowed. */
86 if (obj_size < desc_size) {
87 WARN("%s(0x%zx) desc_size overflow\n",
88 __func__, desc_size);
89 return NULL;
90 }
91
92 if (obj_size > free) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010093 WARN("%s(0x%zx) failed, free 0x%zx\n",
94 __func__, desc_size, free);
95 return NULL;
96 }
97 obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
Marc Bonnicid1907f02022-04-19 17:42:53 +010098 obj->desc = (struct ffa_mtd) {0};
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010099 obj->desc_size = desc_size;
100 obj->desc_filled = 0;
101 obj->in_use = 0;
Marc Bonnicib774f562022-10-18 14:03:13 +0100102 state->allocated += obj_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100103 return obj;
104}
105
106/**
107 * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
108 * @state: Global state.
109 * @obj: Object to free.
110 *
111 * Release memory used by @obj. Other objects may move, so on return all
112 * pointers to struct spmc_shmem_obj object should be considered invalid, not
113 * just @obj.
114 *
115 * The current implementation always compacts the remaining objects to simplify
116 * the allocator and to avoid fragmentation.
117 */
118
119static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
120 struct spmc_shmem_obj *obj)
121{
122 size_t free_size = spmc_shmem_obj_size(obj->desc_size);
123 uint8_t *shift_dest = (uint8_t *)obj;
124 uint8_t *shift_src = shift_dest + free_size;
125 size_t shift_size = state->allocated - (shift_src - state->data);
126
127 if (shift_size != 0U) {
128 memmove(shift_dest, shift_src, shift_size);
129 }
130 state->allocated -= free_size;
131}
132
133/**
134 * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
135 * @state: Global state.
136 * @handle: Unique handle of object to return.
137 *
138 * Return: struct spmc_shmem_obj_state object with handle matching @handle.
139 * %NULL, if not object in @state->data has a matching handle.
140 */
141static struct spmc_shmem_obj *
142spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
143{
144 uint8_t *curr = state->data;
145
146 while (curr - state->data < state->allocated) {
147 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
148
149 if (obj->desc.handle == handle) {
150 return obj;
151 }
152 curr += spmc_shmem_obj_size(obj->desc_size);
153 }
154 return NULL;
155}
156
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000157/**
158 * spmc_shmem_obj_get_next - Get the next memory object from an offset.
159 * @offset: Offset used to track which objects have previously been
160 * returned.
161 *
162 * Return: the next struct spmc_shmem_obj_state object from the provided
163 * offset.
164 * %NULL, if there are no more objects.
165 */
166static struct spmc_shmem_obj *
167spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
168{
169 uint8_t *curr = state->data + *offset;
170
171 if (curr - state->data < state->allocated) {
172 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
173
174 *offset += spmc_shmem_obj_size(obj->desc_size);
175
176 return obj;
177 }
178 return NULL;
179}
180
Marc Bonnicid1907f02022-04-19 17:42:53 +0100181/*******************************************************************************
182 * FF-A memory descriptor helper functions.
183 ******************************************************************************/
184/**
185 * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
186 * clients FF-A version.
187 * @desc: The memory transaction descriptor.
188 * @index: The index of the emad element to be accessed.
189 * @ffa_version: FF-A version of the provided structure.
190 * @emad_size: Will be populated with the size of the returned emad
191 * descriptor.
192 * Return: A pointer to the requested emad structure.
193 */
194static void *
195spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
196 uint32_t ffa_version, size_t *emad_size)
197{
198 uint8_t *emad;
Demi Marie Obenour32167a02023-01-11 10:51:01 -0500199
200 assert(index < desc->emad_count);
201
Marc Bonnicid1907f02022-04-19 17:42:53 +0100202 /*
203 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
204 * format, otherwise assume it is a v1.1 format.
205 */
206 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
Demi Marie Obenour57bf10c2022-12-31 11:11:18 -0500207 emad = (uint8_t *)desc + offsetof(struct ffa_mtd_v1_0, emad);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100208 *emad_size = sizeof(struct ffa_emad_v1_0);
209 } else {
Demi Marie Obenour57bf10c2022-12-31 11:11:18 -0500210 assert(is_aligned(desc->emad_offset, 16));
Marc Bonnicid1907f02022-04-19 17:42:53 +0100211 emad = ((uint8_t *) desc + desc->emad_offset);
212 *emad_size = desc->emad_size;
213 }
Demi Marie Obenour57bf10c2022-12-31 11:11:18 -0500214
215 assert(((uint64_t)index * (uint64_t)*emad_size) <= UINT32_MAX);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100216 return (emad + (*emad_size * index));
217}
218
219/**
220 * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
221 * FF-A version of the descriptor.
222 * @obj: Object containing ffa_memory_region_descriptor.
223 *
224 * Return: struct ffa_comp_mrd object corresponding to the composite memory
225 * region descriptor.
226 */
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100227static struct ffa_comp_mrd *
Marc Bonnicid1907f02022-04-19 17:42:53 +0100228spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100229{
Marc Bonnicid1907f02022-04-19 17:42:53 +0100230 size_t emad_size;
231 /*
232 * The comp_mrd_offset field of the emad descriptor remains consistent
233 * between FF-A versions therefore we can use the v1.0 descriptor here
234 * in all cases.
235 */
236 struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
237 ffa_version,
238 &emad_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100239
240 /* Ensure the composite descriptor offset is aligned. */
241 if (!is_aligned(emad->comp_mrd_offset, 8)) {
242 WARN("Unaligned composite memory region descriptor offset.\n");
243 return NULL;
244 }
245
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100246 return (struct ffa_comp_mrd *)
Marc Bonnicid1907f02022-04-19 17:42:53 +0100247 ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100248}
249
250/**
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100251 * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
252 * a given memory transaction.
253 * @sp_id: Partition ID to validate.
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100254 * @obj: The shared memory object containing the descriptor
255 * of the memory transaction.
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100256 * Return: true if ID is valid, else false.
257 */
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100258bool spmc_shmem_obj_validate_id(struct spmc_shmem_obj *obj, uint16_t sp_id)
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100259{
260 bool found = false;
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100261 struct ffa_mtd *desc = &obj->desc;
262 size_t desc_size = obj->desc_size;
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100263
264 /* Validate the partition is a valid participant. */
265 for (unsigned int i = 0U; i < desc->emad_count; i++) {
266 size_t emad_size;
267 struct ffa_emad_v1_0 *emad;
268
269 emad = spmc_shmem_obj_get_emad(desc, i,
270 MAKE_FFA_VERSION(1, 1),
271 &emad_size);
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100272 /*
273 * Validate the calculated emad address resides within the
274 * descriptor.
275 */
276 if ((emad == NULL) || (uintptr_t) emad >=
277 (uintptr_t)((uint8_t *) desc + desc_size)) {
278 VERBOSE("Invalid emad.\n");
279 break;
280 }
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100281 if (sp_id == emad->mapd.endpoint_id) {
282 found = true;
283 break;
284 }
285 }
286 return found;
287}
288
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000289/*
290 * Compare two memory regions to determine if any range overlaps with another
291 * ongoing memory transaction.
292 */
293static bool
294overlapping_memory_regions(struct ffa_comp_mrd *region1,
295 struct ffa_comp_mrd *region2)
296{
297 uint64_t region1_start;
298 uint64_t region1_size;
299 uint64_t region1_end;
300 uint64_t region2_start;
301 uint64_t region2_size;
302 uint64_t region2_end;
303
304 assert(region1 != NULL);
305 assert(region2 != NULL);
306
307 if (region1 == region2) {
308 return true;
309 }
310
311 /*
312 * Check each memory region in the request against existing
313 * transactions.
314 */
315 for (size_t i = 0; i < region1->address_range_count; i++) {
316
317 region1_start = region1->address_range_array[i].address;
318 region1_size =
319 region1->address_range_array[i].page_count *
320 PAGE_SIZE_4KB;
321 region1_end = region1_start + region1_size;
322
323 for (size_t j = 0; j < region2->address_range_count; j++) {
324
325 region2_start = region2->address_range_array[j].address;
326 region2_size =
327 region2->address_range_array[j].page_count *
328 PAGE_SIZE_4KB;
329 region2_end = region2_start + region2_size;
330
Marc Bonnici79669bb2022-10-18 13:50:04 +0100331 /* Check if regions are not overlapping. */
332 if (!((region2_end <= region1_start) ||
333 (region1_end <= region2_start))) {
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000334 WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
335 region1_start, region1_end,
336 region2_start, region2_end);
337 return true;
338 }
339 }
340 }
341 return false;
342}
343
Marc Bonnicid1907f02022-04-19 17:42:53 +0100344/*******************************************************************************
345 * FF-A v1.0 Memory Descriptor Conversion Helpers.
346 ******************************************************************************/
347/**
348 * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
349 * converted descriptor.
350 * @orig: The original v1.0 memory transaction descriptor.
351 * @desc_size: The size of the original v1.0 memory transaction descriptor.
352 *
353 * Return: the size required to store the descriptor store in the v1.1 format.
354 */
355static size_t
356spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
357{
358 size_t size = 0;
359 struct ffa_comp_mrd *mrd;
360 struct ffa_emad_v1_0 *emad_array = orig->emad;
361
362 /* Get the size of the v1.1 descriptor. */
363 size += sizeof(struct ffa_mtd);
364
365 /* Add the size of the emad descriptors. */
366 size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
367
368 /* Add the size of the composite mrds. */
369 size += sizeof(struct ffa_comp_mrd);
370
371 /* Add the size of the constituent mrds. */
372 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
373 emad_array[0].comp_mrd_offset);
374
375 /* Check the calculated address is within the memory descriptor. */
Marc Bonnicif744c992022-10-18 18:01:44 +0100376 if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
377 (uintptr_t)((uint8_t *) orig + desc_size)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100378 return 0;
379 }
380 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
381
382 return size;
383}
384
385/**
386 * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
387 * converted descriptor.
388 * @orig: The original v1.1 memory transaction descriptor.
389 * @desc_size: The size of the original v1.1 memory transaction descriptor.
390 *
391 * Return: the size required to store the descriptor store in the v1.0 format.
392 */
393static size_t
394spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
395{
396 size_t size = 0;
397 struct ffa_comp_mrd *mrd;
398 struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
399 ((uint8_t *) orig +
400 orig->emad_offset);
401
402 /* Get the size of the v1.0 descriptor. */
403 size += sizeof(struct ffa_mtd_v1_0);
404
405 /* Add the size of the v1.0 emad descriptors. */
406 size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
407
408 /* Add the size of the composite mrds. */
409 size += sizeof(struct ffa_comp_mrd);
410
411 /* Add the size of the constituent mrds. */
412 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
413 emad_array[0].comp_mrd_offset);
414
415 /* Check the calculated address is within the memory descriptor. */
Marc Bonnicif744c992022-10-18 18:01:44 +0100416 if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
417 (uintptr_t)((uint8_t *) orig + desc_size)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100418 return 0;
419 }
420 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
421
422 return size;
423}
424
425/**
426 * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
427 * @out_obj: The shared memory object to populate the converted descriptor.
428 * @orig: The shared memory object containing the v1.0 descriptor.
429 *
430 * Return: true if the conversion is successful else false.
431 */
432static bool
433spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
434 struct spmc_shmem_obj *orig)
435{
436 struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
437 struct ffa_mtd *out = &out_obj->desc;
438 struct ffa_emad_v1_0 *emad_array_in;
439 struct ffa_emad_v1_0 *emad_array_out;
440 struct ffa_comp_mrd *mrd_in;
441 struct ffa_comp_mrd *mrd_out;
442
443 size_t mrd_in_offset;
444 size_t mrd_out_offset;
445 size_t mrd_size = 0;
446
447 /* Populate the new descriptor format from the v1.0 struct. */
448 out->sender_id = mtd_orig->sender_id;
449 out->memory_region_attributes = mtd_orig->memory_region_attributes;
450 out->flags = mtd_orig->flags;
451 out->handle = mtd_orig->handle;
452 out->tag = mtd_orig->tag;
453 out->emad_count = mtd_orig->emad_count;
454 out->emad_size = sizeof(struct ffa_emad_v1_0);
455
456 /*
457 * We will locate the emad descriptors directly after the ffa_mtd
458 * struct. This will be 8-byte aligned.
459 */
460 out->emad_offset = sizeof(struct ffa_mtd);
461
462 emad_array_in = mtd_orig->emad;
463 emad_array_out = (struct ffa_emad_v1_0 *)
464 ((uint8_t *) out + out->emad_offset);
465
466 /* Copy across the emad structs. */
467 for (unsigned int i = 0U; i < out->emad_count; i++) {
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100468 /* Bound check for emad array. */
469 if (((uint8_t *)emad_array_in + sizeof(struct ffa_emad_v1_0)) >
470 ((uint8_t *) mtd_orig + orig->desc_size)) {
471 VERBOSE("%s: Invalid mtd structure.\n", __func__);
472 return false;
473 }
Marc Bonnicid1907f02022-04-19 17:42:53 +0100474 memcpy(&emad_array_out[i], &emad_array_in[i],
475 sizeof(struct ffa_emad_v1_0));
476 }
477
478 /* Place the mrd descriptors after the end of the emad descriptors.*/
479 mrd_in_offset = emad_array_in->comp_mrd_offset;
480 mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
481 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
482
483 /* Add the size of the composite memory region descriptor. */
484 mrd_size += sizeof(struct ffa_comp_mrd);
485
486 /* Find the mrd descriptor. */
487 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
488
489 /* Add the size of the constituent memory region descriptors. */
490 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
491
492 /*
493 * Update the offset in the emads by the delta between the input and
494 * output addresses.
495 */
496 for (unsigned int i = 0U; i < out->emad_count; i++) {
497 emad_array_out[i].comp_mrd_offset =
498 emad_array_in[i].comp_mrd_offset +
499 (mrd_out_offset - mrd_in_offset);
500 }
501
502 /* Verify that we stay within bound of the memory descriptors. */
503 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
504 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
505 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
506 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
507 ERROR("%s: Invalid mrd structure.\n", __func__);
508 return false;
509 }
510
511 /* Copy the mrd descriptors directly. */
512 memcpy(mrd_out, mrd_in, mrd_size);
513
514 return true;
515}
516
517/**
518 * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
519 * v1.0 memory object.
520 * @out_obj: The shared memory object to populate the v1.0 descriptor.
521 * @orig: The shared memory object containing the v1.1 descriptor.
522 *
523 * Return: true if the conversion is successful else false.
524 */
525static bool
526spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
527 struct spmc_shmem_obj *orig)
528{
529 struct ffa_mtd *mtd_orig = &orig->desc;
530 struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
531 struct ffa_emad_v1_0 *emad_in;
532 struct ffa_emad_v1_0 *emad_array_in;
533 struct ffa_emad_v1_0 *emad_array_out;
534 struct ffa_comp_mrd *mrd_in;
535 struct ffa_comp_mrd *mrd_out;
536
537 size_t mrd_in_offset;
538 size_t mrd_out_offset;
539 size_t emad_out_array_size;
540 size_t mrd_size = 0;
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100541 size_t orig_desc_size = orig->desc_size;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100542
543 /* Populate the v1.0 descriptor format from the v1.1 struct. */
544 out->sender_id = mtd_orig->sender_id;
545 out->memory_region_attributes = mtd_orig->memory_region_attributes;
546 out->flags = mtd_orig->flags;
547 out->handle = mtd_orig->handle;
548 out->tag = mtd_orig->tag;
549 out->emad_count = mtd_orig->emad_count;
550
551 /* Determine the location of the emad array in both descriptors. */
552 emad_array_in = (struct ffa_emad_v1_0 *)
553 ((uint8_t *) mtd_orig + mtd_orig->emad_offset);
554 emad_array_out = out->emad;
555
556 /* Copy across the emad structs. */
557 emad_in = emad_array_in;
558 for (unsigned int i = 0U; i < out->emad_count; i++) {
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100559 /* Bound check for emad array. */
560 if (((uint8_t *)emad_in + sizeof(struct ffa_emad_v1_0)) >
561 ((uint8_t *) mtd_orig + orig_desc_size)) {
562 VERBOSE("%s: Invalid mtd structure.\n", __func__);
563 return false;
564 }
Marc Bonnicid1907f02022-04-19 17:42:53 +0100565 memcpy(&emad_array_out[i], emad_in,
566 sizeof(struct ffa_emad_v1_0));
567
568 emad_in += mtd_orig->emad_size;
569 }
570
571 /* Place the mrd descriptors after the end of the emad descriptors. */
572 emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
573
574 mrd_out_offset = (uint8_t *) out->emad - (uint8_t *) out +
575 emad_out_array_size;
576
577 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
578
579 mrd_in_offset = mtd_orig->emad_offset +
580 (mtd_orig->emad_size * mtd_orig->emad_count);
581
582 /* Add the size of the composite memory region descriptor. */
583 mrd_size += sizeof(struct ffa_comp_mrd);
584
585 /* Find the mrd descriptor. */
586 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
587
588 /* Add the size of the constituent memory region descriptors. */
589 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
590
591 /*
592 * Update the offset in the emads by the delta between the input and
593 * output addresses.
594 */
595 emad_in = emad_array_in;
596
597 for (unsigned int i = 0U; i < out->emad_count; i++) {
598 emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
599 (mrd_out_offset -
600 mrd_in_offset);
601 emad_in += mtd_orig->emad_size;
602 }
603
604 /* Verify that we stay within bound of the memory descriptors. */
605 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
606 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
607 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
608 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
609 ERROR("%s: Invalid mrd structure.\n", __func__);
610 return false;
611 }
612
613 /* Copy the mrd descriptors directly. */
614 memcpy(mrd_out, mrd_in, mrd_size);
615
616 return true;
617}
618
619/**
620 * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
621 * the v1.0 format and populates the
622 * provided buffer.
623 * @dst: Buffer to populate v1.0 ffa_memory_region_descriptor.
624 * @orig_obj: Object containing v1.1 ffa_memory_region_descriptor.
625 * @buf_size: Size of the buffer to populate.
626 * @offset: The offset of the converted descriptor to copy.
627 * @copy_size: Will be populated with the number of bytes copied.
628 * @out_desc_size: Will be populated with the total size of the v1.0
629 * descriptor.
630 *
631 * Return: 0 if conversion and population succeeded.
632 * Note: This function invalidates the reference to @orig therefore
633 * `spmc_shmem_obj_lookup` must be called if further usage is required.
634 */
635static uint32_t
636spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
637 size_t buf_size, size_t offset,
638 size_t *copy_size, size_t *v1_0_desc_size)
639{
640 struct spmc_shmem_obj *v1_0_obj;
641
642 /* Calculate the size that the v1.0 descriptor will require. */
643 *v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
644 &orig_obj->desc, orig_obj->desc_size);
645
646 if (*v1_0_desc_size == 0) {
647 ERROR("%s: cannot determine size of descriptor.\n",
648 __func__);
649 return FFA_ERROR_INVALID_PARAMETER;
650 }
651
652 /* Get a new obj to store the v1.0 descriptor. */
653 v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
654 *v1_0_desc_size);
655
656 if (!v1_0_obj) {
657 return FFA_ERROR_NO_MEMORY;
658 }
659
660 /* Perform the conversion from v1.1 to v1.0. */
661 if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
662 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
663 return FFA_ERROR_INVALID_PARAMETER;
664 }
665
666 *copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
667 memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
668
669 /*
670 * We're finished with the v1.0 descriptor for now so free it.
671 * Note that this will invalidate any references to the v1.1
672 * descriptor.
673 */
674 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
675
676 return 0;
677}
678
Demi Marie Obenour4ed9df42022-12-30 19:30:58 -0500679static int
680spmc_validate_mtd_start(struct ffa_mtd *desc, uint32_t ffa_version,
681 size_t fragment_length, size_t total_length)
682{
683 unsigned long long emad_end;
684 unsigned long long emad_size;
685 unsigned long long emad_offset;
686 unsigned int min_desc_size;
687
688 /* Determine the appropriate minimum descriptor size. */
689 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
690 min_desc_size = sizeof(struct ffa_mtd_v1_0);
691 } else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
692 min_desc_size = sizeof(struct ffa_mtd);
693 } else {
694 return FFA_ERROR_INVALID_PARAMETER;
695 }
696 if (fragment_length < min_desc_size) {
697 WARN("%s: invalid length %zu < %u\n", __func__, fragment_length,
698 min_desc_size);
699 return FFA_ERROR_INVALID_PARAMETER;
700 }
701
702 if (desc->emad_count == 0U) {
703 WARN("%s: unsupported attribute desc count %u.\n",
704 __func__, desc->emad_count);
705 return FFA_ERROR_INVALID_PARAMETER;
706 }
707
708 /*
709 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
710 * format, otherwise assume it is a v1.1 format.
711 */
712 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
713 emad_offset = emad_size = sizeof(struct ffa_emad_v1_0);
714 } else {
715 if (!is_aligned(desc->emad_offset, 16)) {
716 WARN("%s: Emad offset %" PRIx32 " is not 16-byte aligned.\n",
717 __func__, desc->emad_offset);
718 return FFA_ERROR_INVALID_PARAMETER;
719 }
720 if (desc->emad_offset < sizeof(struct ffa_mtd)) {
721 WARN("%s: Emad offset too small: 0x%" PRIx32 " < 0x%zx.\n",
722 __func__, desc->emad_offset,
723 sizeof(struct ffa_mtd));
724 return FFA_ERROR_INVALID_PARAMETER;
725 }
726 emad_offset = desc->emad_offset;
727 if (desc->emad_size < sizeof(struct ffa_emad_v1_0)) {
728 WARN("%s: Bad emad size (%" PRIu32 " < %zu).\n", __func__,
729 desc->emad_size, sizeof(struct ffa_emad_v1_0));
730 return FFA_ERROR_INVALID_PARAMETER;
731 }
732 if (!is_aligned(desc->emad_size, 16)) {
733 WARN("%s: Emad size 0x%" PRIx32 " is not 16-byte aligned.\n",
734 __func__, desc->emad_size);
735 return FFA_ERROR_INVALID_PARAMETER;
736 }
737 emad_size = desc->emad_size;
738 }
739
740 /*
741 * Overflow is impossible: the arithmetic happens in at least 64-bit
742 * precision, but all of the operands are bounded by UINT32_MAX, and
743 * ((2^32 - 1)^2 + (2^32 - 1) + (2^32 - 1)) = ((2^32 - 1) * (2^32 + 1))
744 * = (2^64 - 1).
745 */
746 CASSERT(sizeof(desc->emad_count == 4), assert_emad_count_max_too_large);
747 emad_end = (desc->emad_count * (unsigned long long)emad_size) +
748 (unsigned long long)sizeof(struct ffa_comp_mrd) +
749 (unsigned long long)emad_offset;
750
751 if (emad_end > total_length) {
752 WARN("%s: Composite memory region extends beyond descriptor: 0x%llx > 0x%zx\n",
753 __func__, emad_end, total_length);
754 return FFA_ERROR_INVALID_PARAMETER;
755 }
756
757 return 0;
758}
759
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100760/**
761 * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
Marc Bonnicid1907f02022-04-19 17:42:53 +0100762 * @obj: Object containing ffa_memory_region_descriptor.
763 * @ffa_version: FF-A version of the provided descriptor.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100764 *
Marc Bonnici336630f2022-01-13 11:39:10 +0000765 * Return: 0 if object is valid, -EINVAL if constituent_memory_region_descriptor
766 * offset or count is invalid.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100767 */
Marc Bonnicid1907f02022-04-19 17:42:53 +0100768static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
769 uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100770{
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500771 const struct ffa_emad_v1_0 *emad;
772 size_t emad_size;
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000773 uint32_t comp_mrd_offset = 0;
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500774
Demi Marie Obenourf00e4d72023-01-12 13:25:23 -0500775 if (obj->desc_filled != obj->desc_size) {
776 ERROR("BUG: %s called on incomplete object (%zu != %zu)\n",
777 __func__, obj->desc_filled, obj->desc_size);
778 panic();
779 }
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000780
Demi Marie Obenourf00e4d72023-01-12 13:25:23 -0500781 if (spmc_validate_mtd_start(&obj->desc, ffa_version,
782 obj->desc_filled, obj->desc_size)) {
783 ERROR("BUG: %s called on object with corrupt memory region descriptor\n",
784 __func__);
785 panic();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100786 }
787
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500788 emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
789 ffa_version, &emad_size);
790
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100791 for (size_t emad_num = 0; emad_num < obj->desc.emad_count; emad_num++) {
792 size_t size;
793 size_t count;
794 size_t expected_size;
Demi Marie Obenour00d36b22023-01-12 13:24:50 -0500795 uint64_t total_page_count;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100796 size_t header_emad_size;
797 uint32_t offset;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100798 struct ffa_comp_mrd *comp;
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500799 ffa_endpoint_id16_t ep_id;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100800
801 /*
802 * Validate the calculated emad address resides within the
803 * descriptor.
804 */
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500805 if ((uintptr_t) emad >
806 ((uintptr_t) &obj->desc + obj->desc_size - emad_size)) {
807 ERROR("BUG: Invalid emad access not detected earlier.\n");
808 panic();
Marc Bonnicid1907f02022-04-19 17:42:53 +0100809 }
810
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500811 emad = (const struct ffa_emad_v1_0 *)((const uint8_t *)emad + emad_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100812 offset = emad->comp_mrd_offset;
813
Demi Marie Obenour8711be32023-01-11 14:20:07 -0500814 /*
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500815 * If a partition ID resides in the secure world validate that
816 * the partition ID is for a known partition. Ignore any
817 * partition ID belonging to the normal world as it is assumed
818 * the Hypervisor will have validated these.
819 */
820 ep_id = emad->mapd.endpoint_id;
821 if (ffa_is_secure_world_id(ep_id)) {
822 if (spmc_get_sp_ctx(ep_id) == NULL) {
823 WARN("%s: Invalid receiver id 0x%x\n",
824 __func__, ep_id);
825 return -EINVAL;
826 }
827 }
828
829 /*
Demi Marie Obenour8711be32023-01-11 14:20:07 -0500830 * The offset provided to the composite memory region descriptor
831 * should be consistent across endpoint descriptors. Store the
832 * first entry and compare against subsequent entries.
833 */
834 if (comp_mrd_offset == 0) {
835 comp_mrd_offset = offset;
836 } else {
837 if (comp_mrd_offset != offset) {
838 ERROR("%s: mismatching offsets provided, %u != %u\n",
839 __func__, offset, comp_mrd_offset);
840 return -EINVAL;
841 }
842 continue; /* Remainder only executed on first iteration. */
843 }
844
Demi Marie Obenour2bb87352023-01-11 14:25:24 -0500845 header_emad_size = (size_t)((uint8_t *)emad - (uint8_t *)&obj->desc) +
Marc Bonnicid1907f02022-04-19 17:42:53 +0100846 (obj->desc.emad_count * emad_size);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100847
848 if (offset < header_emad_size) {
849 WARN("%s: invalid object, offset %u < header + emad %zu\n",
850 __func__, offset, header_emad_size);
851 return -EINVAL;
852 }
853
854 size = obj->desc_size;
855
856 if (offset > size) {
857 WARN("%s: invalid object, offset %u > total size %zu\n",
858 __func__, offset, obj->desc_size);
859 return -EINVAL;
860 }
861 size -= offset;
862
863 if (size < sizeof(struct ffa_comp_mrd)) {
864 WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
865 __func__, offset, obj->desc_size);
866 return -EINVAL;
867 }
868 size -= sizeof(struct ffa_comp_mrd);
869
870 count = size / sizeof(struct ffa_cons_mrd);
871
Marc Bonnicid1907f02022-04-19 17:42:53 +0100872 comp = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
873
874 if (comp == NULL) {
875 WARN("%s: invalid comp_mrd offset\n", __func__);
876 return -EINVAL;
877 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100878
879 if (comp->address_range_count != count) {
880 WARN("%s: invalid object, desc count %u != %zu\n",
881 __func__, comp->address_range_count, count);
882 return -EINVAL;
883 }
884
885 expected_size = offset + sizeof(*comp) +
Demi Marie Obenour33e66662023-01-12 13:52:16 -0500886 count * sizeof(struct ffa_cons_mrd);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100887
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100888 if (expected_size != obj->desc_size) {
889 WARN("%s: invalid object, computed size %zu != size %zu\n",
890 __func__, expected_size, obj->desc_size);
891 return -EINVAL;
892 }
893
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100894 total_page_count = 0;
895
896 for (size_t i = 0; i < count; i++) {
897 total_page_count +=
898 comp->address_range_array[i].page_count;
899 }
900 if (comp->total_page_count != total_page_count) {
Demi Marie Obenour00d36b22023-01-12 13:24:50 -0500901 WARN("%s: invalid object, desc total_page_count %u != %" PRIu64 "\n",
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100902 __func__, comp->total_page_count,
903 total_page_count);
904 return -EINVAL;
905 }
906 }
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000907 return 0;
908}
909
910/**
911 * spmc_shmem_check_state_obj - Check if the descriptor describes memory
912 * regions that are currently involved with an
913 * existing memory transactions. This implies that
914 * the memory is not in a valid state for lending.
915 * @obj: Object containing ffa_memory_region_descriptor.
916 *
917 * Return: 0 if object is valid, -EINVAL if invalid memory state.
918 */
Marc Bonnicid1907f02022-04-19 17:42:53 +0100919static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
920 uint32_t ffa_version)
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000921{
922 size_t obj_offset = 0;
923 struct spmc_shmem_obj *inflight_obj;
924
925 struct ffa_comp_mrd *other_mrd;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100926 struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
927 ffa_version);
928
929 if (requested_mrd == NULL) {
930 return -EINVAL;
931 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100932
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000933 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
934 &obj_offset);
935
936 while (inflight_obj != NULL) {
937 /*
938 * Don't compare the transaction to itself or to partially
939 * transmitted descriptors.
940 */
941 if ((obj->desc.handle != inflight_obj->desc.handle) &&
942 (obj->desc_size == obj->desc_filled)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100943 other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
Marc Bonnici344ca9d2022-05-20 14:38:55 +0100944 FFA_VERSION_COMPILED);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100945 if (other_mrd == NULL) {
946 return -EINVAL;
947 }
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000948 if (overlapping_memory_regions(requested_mrd,
949 other_mrd)) {
950 return -EINVAL;
951 }
952 }
953
954 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
955 &obj_offset);
956 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100957 return 0;
958}
959
960static long spmc_ffa_fill_desc(struct mailbox *mbox,
961 struct spmc_shmem_obj *obj,
962 uint32_t fragment_length,
963 ffa_mtd_flag32_t mtd_flag,
Marc Bonnicid1907f02022-04-19 17:42:53 +0100964 uint32_t ffa_version,
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100965 void *smc_handle)
966{
967 int ret;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100968 size_t emad_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100969 uint32_t handle_low;
970 uint32_t handle_high;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100971 struct ffa_emad_v1_0 *emad;
972 struct ffa_emad_v1_0 *other_emad;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100973
974 if (mbox->rxtx_page_count == 0U) {
975 WARN("%s: buffer pair not registered.\n", __func__);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100976 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100977 goto err_arg;
978 }
979
980 if (fragment_length > mbox->rxtx_page_count * PAGE_SIZE_4KB) {
981 WARN("%s: bad fragment size %u > %u buffer size\n", __func__,
982 fragment_length, mbox->rxtx_page_count * PAGE_SIZE_4KB);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100983 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100984 goto err_arg;
985 }
986
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100987 if (fragment_length > obj->desc_size - obj->desc_filled) {
988 WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
989 fragment_length, obj->desc_size - obj->desc_filled);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100990 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100991 goto err_arg;
992 }
993
Marc Bonnicif0f45dc2022-10-18 13:57:16 +0100994 memcpy((uint8_t *)&obj->desc + obj->desc_filled,
995 (uint8_t *) mbox->tx_buffer, fragment_length);
996
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100997 /* Ensure that the sender ID resides in the normal world. */
998 if (ffa_is_secure_world_id(obj->desc.sender_id)) {
999 WARN("%s: Invalid sender ID 0x%x.\n",
1000 __func__, obj->desc.sender_id);
1001 ret = FFA_ERROR_DENIED;
1002 goto err_arg;
1003 }
1004
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001005 /* Ensure the NS bit is set to 0. */
1006 if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1007 WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1008 ret = FFA_ERROR_INVALID_PARAMETER;
1009 goto err_arg;
1010 }
1011
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001012 /*
1013 * We don't currently support any optional flags so ensure none are
1014 * requested.
1015 */
1016 if (obj->desc.flags != 0U && mtd_flag != 0U &&
1017 (obj->desc.flags != mtd_flag)) {
1018 WARN("%s: invalid memory transaction flags %u != %u\n",
1019 __func__, obj->desc.flags, mtd_flag);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001020 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001021 goto err_arg;
1022 }
1023
1024 if (obj->desc_filled == 0U) {
1025 /* First fragment, descriptor header has been copied */
Demi Marie Obenour4ed9df42022-12-30 19:30:58 -05001026 ret = spmc_validate_mtd_start(&obj->desc, ffa_version,
1027 fragment_length, obj->desc_size);
1028 if (ret != 0) {
1029 goto err_bad_desc;
1030 }
1031
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001032 obj->desc.handle = spmc_shmem_obj_state.next_handle++;
1033 obj->desc.flags |= mtd_flag;
1034 }
1035
1036 obj->desc_filled += fragment_length;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001037
1038 handle_low = (uint32_t)obj->desc.handle;
1039 handle_high = obj->desc.handle >> 32;
1040
1041 if (obj->desc_filled != obj->desc_size) {
1042 SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
1043 handle_high, obj->desc_filled,
1044 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
1045 }
1046
Marc Bonnici336630f2022-01-13 11:39:10 +00001047 /* The full descriptor has been received, perform any final checks. */
1048
Demi Marie Obenourcdd3e722023-01-11 14:16:37 -05001049 ret = spmc_shmem_check_obj(obj, ffa_version);
1050 if (ret != 0) {
1051 ret = FFA_ERROR_INVALID_PARAMETER;
1052 goto err_bad_desc;
1053 }
1054
Marc Bonnici336630f2022-01-13 11:39:10 +00001055 /* Ensure partition IDs are not duplicated. */
1056 for (size_t i = 0; i < obj->desc.emad_count; i++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001057 emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
1058 &emad_size);
Demi Marie Obenour57bf10c2022-12-31 11:11:18 -05001059
Marc Bonnici336630f2022-01-13 11:39:10 +00001060 for (size_t j = i + 1; j < obj->desc.emad_count; j++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001061 other_emad = spmc_shmem_obj_get_emad(&obj->desc, j,
1062 ffa_version,
1063 &emad_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001064
1065 if (emad->mapd.endpoint_id ==
1066 other_emad->mapd.endpoint_id) {
1067 WARN("%s: Duplicated endpoint id 0x%x\n",
1068 __func__, emad->mapd.endpoint_id);
1069 ret = FFA_ERROR_INVALID_PARAMETER;
1070 goto err_bad_desc;
1071 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001072 }
1073 }
1074
Marc Bonnicid1907f02022-04-19 17:42:53 +01001075 ret = spmc_shmem_check_state_obj(obj, ffa_version);
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00001076 if (ret) {
1077 ERROR("%s: invalid memory region descriptor.\n", __func__);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001078 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00001079 goto err_bad_desc;
1080 }
1081
Marc Bonnicid1907f02022-04-19 17:42:53 +01001082 /*
1083 * Everything checks out, if the sender was using FF-A v1.0, convert
1084 * the descriptor format to use the v1.1 structures.
1085 */
1086 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1087 struct spmc_shmem_obj *v1_1_obj;
1088 uint64_t mem_handle;
1089
1090 /* Calculate the size that the v1.1 descriptor will required. */
1091 size_t v1_1_desc_size =
1092 spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
vallau0146dbac22022-08-08 14:10:14 +02001093 obj->desc_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001094
1095 if (v1_1_desc_size == 0U) {
1096 ERROR("%s: cannot determine size of descriptor.\n",
1097 __func__);
1098 goto err_arg;
1099 }
1100
1101 /* Get a new obj to store the v1.1 descriptor. */
1102 v1_1_obj =
1103 spmc_shmem_obj_alloc(&spmc_shmem_obj_state, v1_1_desc_size);
1104
vallau018f830992022-08-09 18:03:28 +02001105 if (!v1_1_obj) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001106 ret = FFA_ERROR_NO_MEMORY;
1107 goto err_arg;
1108 }
1109
1110 /* Perform the conversion from v1.0 to v1.1. */
1111 v1_1_obj->desc_size = v1_1_desc_size;
1112 v1_1_obj->desc_filled = v1_1_desc_size;
1113 if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
1114 ERROR("%s: Could not convert mtd!\n", __func__);
1115 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
1116 goto err_arg;
1117 }
1118
1119 /*
1120 * We're finished with the v1.0 descriptor so free it
1121 * and continue our checks with the new v1.1 descriptor.
1122 */
1123 mem_handle = obj->desc.handle;
1124 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1125 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1126 if (obj == NULL) {
1127 ERROR("%s: Failed to find converted descriptor.\n",
1128 __func__);
1129 ret = FFA_ERROR_INVALID_PARAMETER;
1130 return spmc_ffa_error_return(smc_handle, ret);
1131 }
1132 }
1133
Marc Bonnici503320e2022-02-21 15:02:36 +00001134 /* Allow for platform specific operations to be performed. */
1135 ret = plat_spmc_shmem_begin(&obj->desc);
1136 if (ret != 0) {
1137 goto err_arg;
1138 }
1139
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001140 SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
1141 0, 0, 0);
1142
1143err_bad_desc:
1144err_arg:
1145 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001146 return spmc_ffa_error_return(smc_handle, ret);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001147}
1148
1149/**
1150 * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
1151 * @client: Client state.
1152 * @total_length: Total length of shared memory descriptor.
1153 * @fragment_length: Length of fragment of shared memory descriptor passed in
1154 * this call.
1155 * @address: Not supported, must be 0.
1156 * @page_count: Not supported, must be 0.
1157 * @smc_handle: Handle passed to smc call. Used to return
1158 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1159 *
1160 * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
1161 * to share or lend memory from non-secure os to secure os (with no stream
1162 * endpoints).
1163 *
1164 * Return: 0 on success, error code on failure.
1165 */
1166long spmc_ffa_mem_send(uint32_t smc_fid,
1167 bool secure_origin,
1168 uint64_t total_length,
1169 uint32_t fragment_length,
1170 uint64_t address,
1171 uint32_t page_count,
1172 void *cookie,
1173 void *handle,
1174 uint64_t flags)
1175
1176{
1177 long ret;
1178 struct spmc_shmem_obj *obj;
1179 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1180 ffa_mtd_flag32_t mtd_flag;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001181 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Demi Marie Obenour1f9f8302022-12-30 19:14:18 -05001182 size_t min_desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001183
1184 if (address != 0U || page_count != 0U) {
1185 WARN("%s: custom memory region for message not supported.\n",
1186 __func__);
1187 return spmc_ffa_error_return(handle,
1188 FFA_ERROR_INVALID_PARAMETER);
1189 }
1190
1191 if (secure_origin) {
1192 WARN("%s: unsupported share direction.\n", __func__);
1193 return spmc_ffa_error_return(handle,
1194 FFA_ERROR_INVALID_PARAMETER);
1195 }
1196
Demi Marie Obenour1f9f8302022-12-30 19:14:18 -05001197 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1198 min_desc_size = sizeof(struct ffa_mtd_v1_0);
1199 } else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
1200 min_desc_size = sizeof(struct ffa_mtd);
1201 } else {
1202 WARN("%s: bad FF-A version.\n", __func__);
1203 return spmc_ffa_error_return(handle,
1204 FFA_ERROR_INVALID_PARAMETER);
1205 }
1206
1207 /* Check if the descriptor is too small for the FF-A version. */
1208 if (fragment_length < min_desc_size) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001209 WARN("%s: bad first fragment size %u < %zu\n",
Marc Bonnicid1907f02022-04-19 17:42:53 +01001210 __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001211 return spmc_ffa_error_return(handle,
1212 FFA_ERROR_INVALID_PARAMETER);
1213 }
1214
1215 if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
1216 mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
1217 } else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
1218 mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
1219 } else {
1220 WARN("%s: invalid memory management operation.\n", __func__);
1221 return spmc_ffa_error_return(handle,
1222 FFA_ERROR_INVALID_PARAMETER);
1223 }
1224
1225 spin_lock(&spmc_shmem_obj_state.lock);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001226 obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
1227 if (obj == NULL) {
1228 ret = FFA_ERROR_NO_MEMORY;
1229 goto err_unlock;
1230 }
1231
1232 spin_lock(&mbox->lock);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001233 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
1234 ffa_version, handle);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001235 spin_unlock(&mbox->lock);
1236
1237 spin_unlock(&spmc_shmem_obj_state.lock);
1238 return ret;
1239
1240err_unlock:
1241 spin_unlock(&spmc_shmem_obj_state.lock);
1242 return spmc_ffa_error_return(handle, ret);
1243}
1244
1245/**
1246 * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
1247 * @client: Client state.
1248 * @handle_low: Handle_low value returned from FFA_MEM_FRAG_RX.
1249 * @handle_high: Handle_high value returned from FFA_MEM_FRAG_RX.
1250 * @fragment_length: Length of fragments transmitted.
1251 * @sender_id: Vmid of sender in bits [31:16]
1252 * @smc_handle: Handle passed to smc call. Used to return
1253 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1254 *
1255 * Return: @smc_handle on success, error code on failure.
1256 */
1257long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
1258 bool secure_origin,
1259 uint64_t handle_low,
1260 uint64_t handle_high,
1261 uint32_t fragment_length,
1262 uint32_t sender_id,
1263 void *cookie,
1264 void *handle,
1265 uint64_t flags)
1266{
1267 long ret;
1268 uint32_t desc_sender_id;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001269 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001270 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1271
1272 struct spmc_shmem_obj *obj;
1273 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1274
1275 spin_lock(&spmc_shmem_obj_state.lock);
1276
1277 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1278 if (obj == NULL) {
1279 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1280 __func__, mem_handle);
1281 ret = FFA_ERROR_INVALID_PARAMETER;
1282 goto err_unlock;
1283 }
1284
1285 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1286 if (sender_id != desc_sender_id) {
1287 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1288 sender_id, desc_sender_id);
1289 ret = FFA_ERROR_INVALID_PARAMETER;
1290 goto err_unlock;
1291 }
1292
1293 if (obj->desc_filled == obj->desc_size) {
1294 WARN("%s: object desc already filled, %zu\n", __func__,
1295 obj->desc_filled);
1296 ret = FFA_ERROR_INVALID_PARAMETER;
1297 goto err_unlock;
1298 }
1299
1300 spin_lock(&mbox->lock);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001301 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
1302 handle);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001303 spin_unlock(&mbox->lock);
1304
1305 spin_unlock(&spmc_shmem_obj_state.lock);
1306 return ret;
1307
1308err_unlock:
1309 spin_unlock(&spmc_shmem_obj_state.lock);
1310 return spmc_ffa_error_return(handle, ret);
1311}
1312
1313/**
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001314 * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor
1315 * if the caller implements a version greater
1316 * than FF-A 1.0 or if they have requested
1317 * the functionality.
1318 * TODO: We are assuming that the caller is
1319 * an SP. To support retrieval from the
1320 * normal world this function will need to be
1321 * expanded accordingly.
1322 * @resp: Descriptor populated in callers RX buffer.
1323 * @sp_ctx: Context of the calling SP.
1324 */
1325void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp,
1326 struct secure_partition_desc *sp_ctx)
1327{
1328 if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) ||
1329 sp_ctx->ns_bit_requested) {
1330 /*
1331 * Currently memory senders must reside in the normal
1332 * world, and we do not have the functionlaity to change
1333 * the state of memory dynamically. Therefore we can always set
1334 * the NS bit to 1.
1335 */
1336 resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
1337 }
1338}
1339
1340/**
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001341 * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
1342 * @smc_fid: FID of SMC
1343 * @total_length: Total length of retrieve request descriptor if this is
1344 * the first call. Otherwise (unsupported) must be 0.
1345 * @fragment_length: Length of fragment of retrieve request descriptor passed
1346 * in this call. Only @fragment_length == @length is
1347 * supported by this implementation.
1348 * @address: Not supported, must be 0.
1349 * @page_count: Not supported, must be 0.
1350 * @smc_handle: Handle passed to smc call. Used to return
1351 * FFA_MEM_RETRIEVE_RESP.
1352 *
1353 * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
1354 * Used by secure os to retrieve memory already shared by non-secure os.
1355 * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
1356 * the client must call FFA_MEM_FRAG_RX until the full response has been
1357 * received.
1358 *
1359 * Return: @handle on success, error code on failure.
1360 */
1361long
1362spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
1363 bool secure_origin,
1364 uint32_t total_length,
1365 uint32_t fragment_length,
1366 uint64_t address,
1367 uint32_t page_count,
1368 void *cookie,
1369 void *handle,
1370 uint64_t flags)
1371{
1372 int ret;
1373 size_t buf_size;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001374 size_t copy_size = 0;
1375 size_t min_desc_size;
1376 size_t out_desc_size = 0;
1377
1378 /*
1379 * Currently we are only accessing fields that are the same in both the
1380 * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
1381 * here. We only need validate against the appropriate struct size.
1382 */
1383 struct ffa_mtd *resp;
1384 const struct ffa_mtd *req;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001385 struct spmc_shmem_obj *obj = NULL;
1386 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001387 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001388 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001389
1390 if (!secure_origin) {
1391 WARN("%s: unsupported retrieve req direction.\n", __func__);
1392 return spmc_ffa_error_return(handle,
1393 FFA_ERROR_INVALID_PARAMETER);
1394 }
1395
1396 if (address != 0U || page_count != 0U) {
1397 WARN("%s: custom memory region not supported.\n", __func__);
1398 return spmc_ffa_error_return(handle,
1399 FFA_ERROR_INVALID_PARAMETER);
1400 }
1401
1402 spin_lock(&mbox->lock);
1403
1404 req = mbox->tx_buffer;
1405 resp = mbox->rx_buffer;
1406 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1407
1408 if (mbox->rxtx_page_count == 0U) {
1409 WARN("%s: buffer pair not registered.\n", __func__);
1410 ret = FFA_ERROR_INVALID_PARAMETER;
1411 goto err_unlock_mailbox;
1412 }
1413
1414 if (mbox->state != MAILBOX_STATE_EMPTY) {
1415 WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
1416 ret = FFA_ERROR_DENIED;
1417 goto err_unlock_mailbox;
1418 }
1419
1420 if (fragment_length != total_length) {
1421 WARN("%s: fragmented retrieve request not supported.\n",
1422 __func__);
1423 ret = FFA_ERROR_INVALID_PARAMETER;
1424 goto err_unlock_mailbox;
1425 }
1426
Marc Bonnici336630f2022-01-13 11:39:10 +00001427 if (req->emad_count == 0U) {
1428 WARN("%s: unsupported attribute desc count %u.\n",
1429 __func__, obj->desc.emad_count);
vallau01460d3962022-08-09 17:06:53 +02001430 ret = FFA_ERROR_INVALID_PARAMETER;
1431 goto err_unlock_mailbox;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001432 }
1433
Marc Bonnicid1907f02022-04-19 17:42:53 +01001434 /* Determine the appropriate minimum descriptor size. */
1435 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1436 min_desc_size = sizeof(struct ffa_mtd_v1_0);
1437 } else {
1438 min_desc_size = sizeof(struct ffa_mtd);
1439 }
1440 if (total_length < min_desc_size) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001441 WARN("%s: invalid length %u < %zu\n", __func__, total_length,
Marc Bonnicid1907f02022-04-19 17:42:53 +01001442 min_desc_size);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001443 ret = FFA_ERROR_INVALID_PARAMETER;
1444 goto err_unlock_mailbox;
1445 }
1446
1447 spin_lock(&spmc_shmem_obj_state.lock);
1448
1449 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1450 if (obj == NULL) {
1451 ret = FFA_ERROR_INVALID_PARAMETER;
1452 goto err_unlock_all;
1453 }
1454
1455 if (obj->desc_filled != obj->desc_size) {
1456 WARN("%s: incomplete object desc filled %zu < size %zu\n",
1457 __func__, obj->desc_filled, obj->desc_size);
1458 ret = FFA_ERROR_INVALID_PARAMETER;
1459 goto err_unlock_all;
1460 }
1461
1462 if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
1463 WARN("%s: wrong sender id 0x%x != 0x%x\n",
1464 __func__, req->sender_id, obj->desc.sender_id);
1465 ret = FFA_ERROR_INVALID_PARAMETER;
1466 goto err_unlock_all;
1467 }
1468
1469 if (req->emad_count != 0U && req->tag != obj->desc.tag) {
1470 WARN("%s: wrong tag 0x%lx != 0x%lx\n",
1471 __func__, req->tag, obj->desc.tag);
1472 ret = FFA_ERROR_INVALID_PARAMETER;
1473 goto err_unlock_all;
1474 }
1475
Marc Bonnici336630f2022-01-13 11:39:10 +00001476 if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
1477 WARN("%s: mistmatch of endpoint counts %u != %u\n",
1478 __func__, req->emad_count, obj->desc.emad_count);
1479 ret = FFA_ERROR_INVALID_PARAMETER;
1480 goto err_unlock_all;
1481 }
1482
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001483 /* Ensure the NS bit is set to 0 in the request. */
1484 if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1485 WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1486 ret = FFA_ERROR_INVALID_PARAMETER;
1487 goto err_unlock_all;
1488 }
1489
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001490 if (req->flags != 0U) {
1491 if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
1492 (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
1493 /*
1494 * If the retrieve request specifies the memory
1495 * transaction ensure it matches what we expect.
1496 */
1497 WARN("%s: wrong mem transaction flags %x != %x\n",
1498 __func__, req->flags, obj->desc.flags);
1499 ret = FFA_ERROR_INVALID_PARAMETER;
1500 goto err_unlock_all;
1501 }
1502
1503 if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
1504 req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
1505 /*
1506 * Current implementation does not support donate and
1507 * it supports no other flags.
1508 */
1509 WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
1510 ret = FFA_ERROR_INVALID_PARAMETER;
1511 goto err_unlock_all;
1512 }
1513 }
1514
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001515 /* Validate the caller is a valid participant. */
Shruti Gupta20ce06c2022-08-25 14:22:53 +01001516 if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001517 WARN("%s: Invalid endpoint ID (0x%x).\n",
1518 __func__, sp_ctx->sp_id);
1519 ret = FFA_ERROR_INVALID_PARAMETER;
1520 goto err_unlock_all;
1521 }
1522
Marc Bonnicid1907f02022-04-19 17:42:53 +01001523 /* Validate that the provided emad offset and structure is valid.*/
1524 for (size_t i = 0; i < req->emad_count; i++) {
1525 size_t emad_size;
1526 struct ffa_emad_v1_0 *emad;
1527
1528 emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1529 &emad_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001530
1531 if ((uintptr_t) emad >= (uintptr_t)
1532 ((uint8_t *) req + total_length)) {
1533 WARN("Invalid emad access.\n");
1534 ret = FFA_ERROR_INVALID_PARAMETER;
1535 goto err_unlock_all;
1536 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001537 }
1538
1539 /*
1540 * Validate all the endpoints match in the case of multiple
1541 * borrowers. We don't mandate that the order of the borrowers
1542 * must match in the descriptors therefore check to see if the
1543 * endpoints match in any order.
1544 */
1545 for (size_t i = 0; i < req->emad_count; i++) {
1546 bool found = false;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001547 size_t emad_size;
1548 struct ffa_emad_v1_0 *emad;
1549 struct ffa_emad_v1_0 *other_emad;
1550
1551 emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1552 &emad_size);
Marc Bonnici336630f2022-01-13 11:39:10 +00001553
1554 for (size_t j = 0; j < obj->desc.emad_count; j++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001555 other_emad = spmc_shmem_obj_get_emad(
1556 &obj->desc, j, MAKE_FFA_VERSION(1, 1),
1557 &emad_size);
1558
Marc Bonnicid1907f02022-04-19 17:42:53 +01001559 if (req->emad_count &&
1560 emad->mapd.endpoint_id ==
1561 other_emad->mapd.endpoint_id) {
Marc Bonnici336630f2022-01-13 11:39:10 +00001562 found = true;
1563 break;
1564 }
1565 }
1566
1567 if (!found) {
1568 WARN("%s: invalid receiver id (0x%x).\n",
Marc Bonnicid1907f02022-04-19 17:42:53 +01001569 __func__, emad->mapd.endpoint_id);
Marc Bonnici336630f2022-01-13 11:39:10 +00001570 ret = FFA_ERROR_INVALID_PARAMETER;
1571 goto err_unlock_all;
1572 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001573 }
1574
1575 mbox->state = MAILBOX_STATE_FULL;
1576
1577 if (req->emad_count != 0U) {
1578 obj->in_use++;
1579 }
1580
Marc Bonnicid1907f02022-04-19 17:42:53 +01001581 /*
1582 * If the caller is v1.0 convert the descriptor, otherwise copy
1583 * directly.
1584 */
1585 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1586 ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
1587 &copy_size,
1588 &out_desc_size);
1589 if (ret != 0U) {
1590 ERROR("%s: Failed to process descriptor.\n", __func__);
1591 goto err_unlock_all;
1592 }
1593 } else {
1594 copy_size = MIN(obj->desc_size, buf_size);
1595 out_desc_size = obj->desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001596
Marc Bonnicid1907f02022-04-19 17:42:53 +01001597 memcpy(resp, &obj->desc, copy_size);
1598 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001599
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001600 /* Set the NS bit in the response if applicable. */
1601 spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx);
1602
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001603 spin_unlock(&spmc_shmem_obj_state.lock);
1604 spin_unlock(&mbox->lock);
1605
Marc Bonnicid1907f02022-04-19 17:42:53 +01001606 SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001607 copy_size, 0, 0, 0, 0, 0);
1608
1609err_unlock_all:
1610 spin_unlock(&spmc_shmem_obj_state.lock);
1611err_unlock_mailbox:
1612 spin_unlock(&mbox->lock);
1613 return spmc_ffa_error_return(handle, ret);
1614}
1615
1616/**
1617 * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
1618 * @client: Client state.
1619 * @handle_low: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
1620 * @handle_high: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
1621 * @fragment_offset: Byte offset in descriptor to resume at.
1622 * @sender_id: Bit[31:16]: Endpoint id of sender if client is a
1623 * hypervisor. 0 otherwise.
1624 * @smc_handle: Handle passed to smc call. Used to return
1625 * FFA_MEM_FRAG_TX.
1626 *
1627 * Return: @smc_handle on success, error code on failure.
1628 */
1629long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
1630 bool secure_origin,
1631 uint32_t handle_low,
1632 uint32_t handle_high,
1633 uint32_t fragment_offset,
1634 uint32_t sender_id,
1635 void *cookie,
1636 void *handle,
1637 uint64_t flags)
1638{
1639 int ret;
1640 void *src;
1641 size_t buf_size;
1642 size_t copy_size;
1643 size_t full_copy_size;
1644 uint32_t desc_sender_id;
1645 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1646 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1647 struct spmc_shmem_obj *obj;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001648 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001649
1650 if (!secure_origin) {
1651 WARN("%s: can only be called from swld.\n",
1652 __func__);
1653 return spmc_ffa_error_return(handle,
1654 FFA_ERROR_INVALID_PARAMETER);
1655 }
1656
1657 spin_lock(&spmc_shmem_obj_state.lock);
1658
1659 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1660 if (obj == NULL) {
1661 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1662 __func__, mem_handle);
1663 ret = FFA_ERROR_INVALID_PARAMETER;
1664 goto err_unlock_shmem;
1665 }
1666
1667 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1668 if (sender_id != 0U && sender_id != desc_sender_id) {
1669 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1670 sender_id, desc_sender_id);
1671 ret = FFA_ERROR_INVALID_PARAMETER;
1672 goto err_unlock_shmem;
1673 }
1674
1675 if (fragment_offset >= obj->desc_size) {
1676 WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
1677 __func__, fragment_offset, obj->desc_size);
1678 ret = FFA_ERROR_INVALID_PARAMETER;
1679 goto err_unlock_shmem;
1680 }
1681
1682 spin_lock(&mbox->lock);
1683
1684 if (mbox->rxtx_page_count == 0U) {
1685 WARN("%s: buffer pair not registered.\n", __func__);
1686 ret = FFA_ERROR_INVALID_PARAMETER;
1687 goto err_unlock_all;
1688 }
1689
1690 if (mbox->state != MAILBOX_STATE_EMPTY) {
1691 WARN("%s: RX Buffer is full!\n", __func__);
1692 ret = FFA_ERROR_DENIED;
1693 goto err_unlock_all;
1694 }
1695
1696 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1697
1698 mbox->state = MAILBOX_STATE_FULL;
1699
Marc Bonnicid1907f02022-04-19 17:42:53 +01001700 /*
1701 * If the caller is v1.0 convert the descriptor, otherwise copy
1702 * directly.
1703 */
1704 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1705 size_t out_desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001706
Marc Bonnicid1907f02022-04-19 17:42:53 +01001707 ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
1708 buf_size,
1709 fragment_offset,
1710 &copy_size,
1711 &out_desc_size);
1712 if (ret != 0U) {
1713 ERROR("%s: Failed to process descriptor.\n", __func__);
1714 goto err_unlock_all;
1715 }
1716 } else {
1717 full_copy_size = obj->desc_size - fragment_offset;
1718 copy_size = MIN(full_copy_size, buf_size);
1719
1720 src = &obj->desc;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001721
Marc Bonnicid1907f02022-04-19 17:42:53 +01001722 memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
1723 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001724
1725 spin_unlock(&mbox->lock);
1726 spin_unlock(&spmc_shmem_obj_state.lock);
1727
1728 SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
1729 copy_size, sender_id, 0, 0, 0);
1730
1731err_unlock_all:
1732 spin_unlock(&mbox->lock);
1733err_unlock_shmem:
1734 spin_unlock(&spmc_shmem_obj_state.lock);
1735 return spmc_ffa_error_return(handle, ret);
1736}
1737
1738/**
1739 * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
1740 * @client: Client state.
1741 *
1742 * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
1743 * Used by secure os release previously shared memory to non-secure os.
1744 *
1745 * The handle to release must be in the client's (secure os's) transmit buffer.
1746 *
1747 * Return: 0 on success, error code on failure.
1748 */
1749int spmc_ffa_mem_relinquish(uint32_t smc_fid,
1750 bool secure_origin,
1751 uint32_t handle_low,
1752 uint32_t handle_high,
1753 uint32_t fragment_offset,
1754 uint32_t sender_id,
1755 void *cookie,
1756 void *handle,
1757 uint64_t flags)
1758{
1759 int ret;
1760 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1761 struct spmc_shmem_obj *obj;
1762 const struct ffa_mem_relinquish_descriptor *req;
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001763 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001764
1765 if (!secure_origin) {
1766 WARN("%s: unsupported relinquish direction.\n", __func__);
1767 return spmc_ffa_error_return(handle,
1768 FFA_ERROR_INVALID_PARAMETER);
1769 }
1770
1771 spin_lock(&mbox->lock);
1772
1773 if (mbox->rxtx_page_count == 0U) {
1774 WARN("%s: buffer pair not registered.\n", __func__);
1775 ret = FFA_ERROR_INVALID_PARAMETER;
1776 goto err_unlock_mailbox;
1777 }
1778
1779 req = mbox->tx_buffer;
1780
1781 if (req->flags != 0U) {
1782 WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
1783 ret = FFA_ERROR_INVALID_PARAMETER;
1784 goto err_unlock_mailbox;
1785 }
1786
Marc Bonnici336630f2022-01-13 11:39:10 +00001787 if (req->endpoint_count == 0) {
1788 WARN("%s: endpoint count cannot be 0.\n", __func__);
1789 ret = FFA_ERROR_INVALID_PARAMETER;
1790 goto err_unlock_mailbox;
1791 }
1792
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001793 spin_lock(&spmc_shmem_obj_state.lock);
1794
1795 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1796 if (obj == NULL) {
1797 ret = FFA_ERROR_INVALID_PARAMETER;
1798 goto err_unlock_all;
1799 }
1800
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001801 /*
1802 * Validate the endpoint ID was populated correctly. We don't currently
1803 * support proxy endpoints so the endpoint count should always be 1.
1804 */
1805 if (req->endpoint_count != 1U) {
1806 WARN("%s: unsupported endpoint count %u != 1\n", __func__,
1807 req->endpoint_count);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001808 ret = FFA_ERROR_INVALID_PARAMETER;
1809 goto err_unlock_all;
1810 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001811
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001812 /* Validate provided endpoint ID matches the partition ID. */
1813 if (req->endpoint_array[0] != sp_ctx->sp_id) {
1814 WARN("%s: invalid endpoint ID %u != %u\n", __func__,
1815 req->endpoint_array[0], sp_ctx->sp_id);
1816 ret = FFA_ERROR_INVALID_PARAMETER;
1817 goto err_unlock_all;
1818 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001819
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001820 /* Validate the caller is a valid participant. */
Shruti Gupta20ce06c2022-08-25 14:22:53 +01001821 if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001822 WARN("%s: Invalid endpoint ID (0x%x).\n",
1823 __func__, req->endpoint_array[0]);
1824 ret = FFA_ERROR_INVALID_PARAMETER;
1825 goto err_unlock_all;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001826 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001827
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001828 if (obj->in_use == 0U) {
1829 ret = FFA_ERROR_INVALID_PARAMETER;
1830 goto err_unlock_all;
1831 }
1832 obj->in_use--;
1833
1834 spin_unlock(&spmc_shmem_obj_state.lock);
1835 spin_unlock(&mbox->lock);
1836
1837 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1838
1839err_unlock_all:
1840 spin_unlock(&spmc_shmem_obj_state.lock);
1841err_unlock_mailbox:
1842 spin_unlock(&mbox->lock);
1843 return spmc_ffa_error_return(handle, ret);
1844}
1845
1846/**
1847 * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
1848 * @client: Client state.
1849 * @handle_low: Unique handle of shared memory object to reclaim. Bit[31:0].
1850 * @handle_high: Unique handle of shared memory object to reclaim.
1851 * Bit[63:32].
1852 * @flags: Unsupported, ignored.
1853 *
1854 * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
1855 * Used by non-secure os reclaim memory previously shared with secure os.
1856 *
1857 * Return: 0 on success, error code on failure.
1858 */
1859int spmc_ffa_mem_reclaim(uint32_t smc_fid,
1860 bool secure_origin,
1861 uint32_t handle_low,
1862 uint32_t handle_high,
1863 uint32_t mem_flags,
1864 uint64_t x4,
1865 void *cookie,
1866 void *handle,
1867 uint64_t flags)
1868{
1869 int ret;
1870 struct spmc_shmem_obj *obj;
1871 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1872
1873 if (secure_origin) {
1874 WARN("%s: unsupported reclaim direction.\n", __func__);
1875 return spmc_ffa_error_return(handle,
1876 FFA_ERROR_INVALID_PARAMETER);
1877 }
1878
1879 if (mem_flags != 0U) {
1880 WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
1881 return spmc_ffa_error_return(handle,
1882 FFA_ERROR_INVALID_PARAMETER);
1883 }
1884
1885 spin_lock(&spmc_shmem_obj_state.lock);
1886
1887 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1888 if (obj == NULL) {
1889 ret = FFA_ERROR_INVALID_PARAMETER;
1890 goto err_unlock;
1891 }
1892 if (obj->in_use != 0U) {
1893 ret = FFA_ERROR_DENIED;
1894 goto err_unlock;
1895 }
Marc Bonnici503320e2022-02-21 15:02:36 +00001896
Marc Bonnici82e28f12022-10-18 13:39:48 +01001897 if (obj->desc_filled != obj->desc_size) {
1898 WARN("%s: incomplete object desc filled %zu < size %zu\n",
1899 __func__, obj->desc_filled, obj->desc_size);
1900 ret = FFA_ERROR_INVALID_PARAMETER;
1901 goto err_unlock;
1902 }
1903
Marc Bonnici503320e2022-02-21 15:02:36 +00001904 /* Allow for platform specific operations to be performed. */
1905 ret = plat_spmc_shmem_reclaim(&obj->desc);
1906 if (ret != 0) {
1907 goto err_unlock;
1908 }
1909
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001910 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1911 spin_unlock(&spmc_shmem_obj_state.lock);
1912
1913 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1914
1915err_unlock:
1916 spin_unlock(&spmc_shmem_obj_state.lock);
1917 return spmc_ffa_error_return(handle, ret);
1918}