blob: c039350a37d9ebfbdeb749b9cf54b1ff21f51525 [file] [log] [blame]
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001/*
2 * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00006#include <assert.h>
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01007#include <errno.h>
8
9#include <common/debug.h>
10#include <common/runtime_svc.h>
11#include <lib/object_pool.h>
12#include <lib/spinlock.h>
13#include <lib/xlat_tables/xlat_tables_v2.h>
14#include <services/ffa_svc.h>
15#include "spmc.h"
16#include "spmc_shared_mem.h"
17
18#include <platform_def.h>
19
20/**
21 * struct spmc_shmem_obj - Shared memory object.
22 * @desc_size: Size of @desc.
23 * @desc_filled: Size of @desc already received.
24 * @in_use: Number of clients that have called ffa_mem_retrieve_req
25 * without a matching ffa_mem_relinquish call.
26 * @desc: FF-A memory region descriptor passed in ffa_mem_share.
27 */
28struct spmc_shmem_obj {
29 size_t desc_size;
30 size_t desc_filled;
31 size_t in_use;
Marc Bonnicid1907f02022-04-19 17:42:53 +010032 struct ffa_mtd desc;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010033};
34
35/*
36 * Declare our data structure to store the metadata of memory share requests.
37 * The main datastore is allocated on a per platform basis to ensure enough
38 * storage can be made available.
39 * The address of the data store will be populated by the SPMC during its
40 * initialization.
41 */
42
43struct spmc_shmem_obj_state spmc_shmem_obj_state = {
44 /* Set start value for handle so top 32 bits are needed quickly. */
45 .next_handle = 0xffffffc0U,
46};
47
48/**
49 * spmc_shmem_obj_size - Convert from descriptor size to object size.
50 * @desc_size: Size of struct ffa_memory_region_descriptor object.
51 *
52 * Return: Size of struct spmc_shmem_obj object.
53 */
54static size_t spmc_shmem_obj_size(size_t desc_size)
55{
56 return desc_size + offsetof(struct spmc_shmem_obj, desc);
57}
58
59/**
60 * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
61 * @state: Global state.
62 * @desc_size: Size of struct ffa_memory_region_descriptor object that
63 * allocated object will hold.
64 *
65 * Return: Pointer to newly allocated object, or %NULL if there not enough space
66 * left. The returned pointer is only valid while @state is locked, to
67 * used it again after unlocking @state, spmc_shmem_obj_lookup must be
68 * called.
69 */
70static struct spmc_shmem_obj *
71spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
72{
73 struct spmc_shmem_obj *obj;
74 size_t free = state->data_size - state->allocated;
Marc Bonnicib774f562022-10-18 14:03:13 +010075 size_t obj_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010076
77 if (state->data == NULL) {
78 ERROR("Missing shmem datastore!\n");
79 return NULL;
80 }
81
Marc Bonnicib774f562022-10-18 14:03:13 +010082 obj_size = spmc_shmem_obj_size(desc_size);
83
84 /* Ensure the obj size has not overflowed. */
85 if (obj_size < desc_size) {
86 WARN("%s(0x%zx) desc_size overflow\n",
87 __func__, desc_size);
88 return NULL;
89 }
90
91 if (obj_size > free) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010092 WARN("%s(0x%zx) failed, free 0x%zx\n",
93 __func__, desc_size, free);
94 return NULL;
95 }
96 obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
Marc Bonnicid1907f02022-04-19 17:42:53 +010097 obj->desc = (struct ffa_mtd) {0};
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010098 obj->desc_size = desc_size;
99 obj->desc_filled = 0;
100 obj->in_use = 0;
Marc Bonnicib774f562022-10-18 14:03:13 +0100101 state->allocated += obj_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100102 return obj;
103}
104
105/**
106 * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
107 * @state: Global state.
108 * @obj: Object to free.
109 *
110 * Release memory used by @obj. Other objects may move, so on return all
111 * pointers to struct spmc_shmem_obj object should be considered invalid, not
112 * just @obj.
113 *
114 * The current implementation always compacts the remaining objects to simplify
115 * the allocator and to avoid fragmentation.
116 */
117
118static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
119 struct spmc_shmem_obj *obj)
120{
121 size_t free_size = spmc_shmem_obj_size(obj->desc_size);
122 uint8_t *shift_dest = (uint8_t *)obj;
123 uint8_t *shift_src = shift_dest + free_size;
124 size_t shift_size = state->allocated - (shift_src - state->data);
125
126 if (shift_size != 0U) {
127 memmove(shift_dest, shift_src, shift_size);
128 }
129 state->allocated -= free_size;
130}
131
132/**
133 * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
134 * @state: Global state.
135 * @handle: Unique handle of object to return.
136 *
137 * Return: struct spmc_shmem_obj_state object with handle matching @handle.
138 * %NULL, if not object in @state->data has a matching handle.
139 */
140static struct spmc_shmem_obj *
141spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
142{
143 uint8_t *curr = state->data;
144
145 while (curr - state->data < state->allocated) {
146 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
147
148 if (obj->desc.handle == handle) {
149 return obj;
150 }
151 curr += spmc_shmem_obj_size(obj->desc_size);
152 }
153 return NULL;
154}
155
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000156/**
157 * spmc_shmem_obj_get_next - Get the next memory object from an offset.
158 * @offset: Offset used to track which objects have previously been
159 * returned.
160 *
161 * Return: the next struct spmc_shmem_obj_state object from the provided
162 * offset.
163 * %NULL, if there are no more objects.
164 */
165static struct spmc_shmem_obj *
166spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
167{
168 uint8_t *curr = state->data + *offset;
169
170 if (curr - state->data < state->allocated) {
171 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
172
173 *offset += spmc_shmem_obj_size(obj->desc_size);
174
175 return obj;
176 }
177 return NULL;
178}
179
Marc Bonnicid1907f02022-04-19 17:42:53 +0100180/*******************************************************************************
181 * FF-A memory descriptor helper functions.
182 ******************************************************************************/
183/**
184 * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
185 * clients FF-A version.
186 * @desc: The memory transaction descriptor.
187 * @index: The index of the emad element to be accessed.
188 * @ffa_version: FF-A version of the provided structure.
189 * @emad_size: Will be populated with the size of the returned emad
190 * descriptor.
191 * Return: A pointer to the requested emad structure.
192 */
193static void *
194spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
195 uint32_t ffa_version, size_t *emad_size)
196{
197 uint8_t *emad;
198 /*
199 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
200 * format, otherwise assume it is a v1.1 format.
201 */
202 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
203 /* Cast our descriptor to the v1.0 format. */
204 struct ffa_mtd_v1_0 *mtd_v1_0 =
205 (struct ffa_mtd_v1_0 *) desc;
206 emad = (uint8_t *) &(mtd_v1_0->emad);
207 *emad_size = sizeof(struct ffa_emad_v1_0);
208 } else {
209 if (!is_aligned(desc->emad_offset, 16)) {
210 WARN("Emad offset is not aligned.\n");
211 return NULL;
212 }
213 emad = ((uint8_t *) desc + desc->emad_offset);
214 *emad_size = desc->emad_size;
215 }
216 return (emad + (*emad_size * index));
217}
218
219/**
220 * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
221 * FF-A version of the descriptor.
222 * @obj: Object containing ffa_memory_region_descriptor.
223 *
224 * Return: struct ffa_comp_mrd object corresponding to the composite memory
225 * region descriptor.
226 */
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100227static struct ffa_comp_mrd *
Marc Bonnicid1907f02022-04-19 17:42:53 +0100228spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100229{
Marc Bonnicid1907f02022-04-19 17:42:53 +0100230 size_t emad_size;
231 /*
232 * The comp_mrd_offset field of the emad descriptor remains consistent
233 * between FF-A versions therefore we can use the v1.0 descriptor here
234 * in all cases.
235 */
236 struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
237 ffa_version,
238 &emad_size);
239 /* Ensure the emad array was found. */
240 if (emad == NULL) {
241 return NULL;
242 }
243
244 /* Ensure the composite descriptor offset is aligned. */
245 if (!is_aligned(emad->comp_mrd_offset, 8)) {
246 WARN("Unaligned composite memory region descriptor offset.\n");
247 return NULL;
248 }
249
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100250 return (struct ffa_comp_mrd *)
Marc Bonnicid1907f02022-04-19 17:42:53 +0100251 ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100252}
253
254/**
255 * spmc_shmem_obj_ffa_constituent_size - Calculate variable size part of obj.
256 * @obj: Object containing ffa_memory_region_descriptor.
257 *
258 * Return: Size of ffa_constituent_memory_region_descriptors in @obj.
259 */
260static size_t
Marc Bonnicid1907f02022-04-19 17:42:53 +0100261spmc_shmem_obj_ffa_constituent_size(struct spmc_shmem_obj *obj,
262 uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100263{
Marc Bonnicid1907f02022-04-19 17:42:53 +0100264 struct ffa_comp_mrd *comp_mrd;
265
266 comp_mrd = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
267 if (comp_mrd == NULL) {
268 return 0;
269 }
270 return comp_mrd->address_range_count * sizeof(struct ffa_cons_mrd);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100271}
272
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100273/**
274 * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
275 * a given memory transaction.
276 * @sp_id: Partition ID to validate.
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100277 * @obj: The shared memory object containing the descriptor
278 * of the memory transaction.
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100279 * Return: true if ID is valid, else false.
280 */
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100281bool spmc_shmem_obj_validate_id(struct spmc_shmem_obj *obj, uint16_t sp_id)
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100282{
283 bool found = false;
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100284 struct ffa_mtd *desc = &obj->desc;
285 size_t desc_size = obj->desc_size;
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100286
287 /* Validate the partition is a valid participant. */
288 for (unsigned int i = 0U; i < desc->emad_count; i++) {
289 size_t emad_size;
290 struct ffa_emad_v1_0 *emad;
291
292 emad = spmc_shmem_obj_get_emad(desc, i,
293 MAKE_FFA_VERSION(1, 1),
294 &emad_size);
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100295 /*
296 * Validate the calculated emad address resides within the
297 * descriptor.
298 */
299 if ((emad == NULL) || (uintptr_t) emad >=
300 (uintptr_t)((uint8_t *) desc + desc_size)) {
301 VERBOSE("Invalid emad.\n");
302 break;
303 }
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100304 if (sp_id == emad->mapd.endpoint_id) {
305 found = true;
306 break;
307 }
308 }
309 return found;
310}
311
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000312/*
313 * Compare two memory regions to determine if any range overlaps with another
314 * ongoing memory transaction.
315 */
316static bool
317overlapping_memory_regions(struct ffa_comp_mrd *region1,
318 struct ffa_comp_mrd *region2)
319{
320 uint64_t region1_start;
321 uint64_t region1_size;
322 uint64_t region1_end;
323 uint64_t region2_start;
324 uint64_t region2_size;
325 uint64_t region2_end;
326
327 assert(region1 != NULL);
328 assert(region2 != NULL);
329
330 if (region1 == region2) {
331 return true;
332 }
333
334 /*
335 * Check each memory region in the request against existing
336 * transactions.
337 */
338 for (size_t i = 0; i < region1->address_range_count; i++) {
339
340 region1_start = region1->address_range_array[i].address;
341 region1_size =
342 region1->address_range_array[i].page_count *
343 PAGE_SIZE_4KB;
344 region1_end = region1_start + region1_size;
345
346 for (size_t j = 0; j < region2->address_range_count; j++) {
347
348 region2_start = region2->address_range_array[j].address;
349 region2_size =
350 region2->address_range_array[j].page_count *
351 PAGE_SIZE_4KB;
352 region2_end = region2_start + region2_size;
353
Marc Bonnici79669bb2022-10-18 13:50:04 +0100354 /* Check if regions are not overlapping. */
355 if (!((region2_end <= region1_start) ||
356 (region1_end <= region2_start))) {
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000357 WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
358 region1_start, region1_end,
359 region2_start, region2_end);
360 return true;
361 }
362 }
363 }
364 return false;
365}
366
Marc Bonnicid1907f02022-04-19 17:42:53 +0100367/*******************************************************************************
368 * FF-A v1.0 Memory Descriptor Conversion Helpers.
369 ******************************************************************************/
370/**
371 * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
372 * converted descriptor.
373 * @orig: The original v1.0 memory transaction descriptor.
374 * @desc_size: The size of the original v1.0 memory transaction descriptor.
375 *
376 * Return: the size required to store the descriptor store in the v1.1 format.
377 */
378static size_t
379spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
380{
381 size_t size = 0;
382 struct ffa_comp_mrd *mrd;
383 struct ffa_emad_v1_0 *emad_array = orig->emad;
384
385 /* Get the size of the v1.1 descriptor. */
386 size += sizeof(struct ffa_mtd);
387
388 /* Add the size of the emad descriptors. */
389 size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
390
391 /* Add the size of the composite mrds. */
392 size += sizeof(struct ffa_comp_mrd);
393
394 /* Add the size of the constituent mrds. */
395 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
396 emad_array[0].comp_mrd_offset);
397
398 /* Check the calculated address is within the memory descriptor. */
Marc Bonnicif744c992022-10-18 18:01:44 +0100399 if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
400 (uintptr_t)((uint8_t *) orig + desc_size)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100401 return 0;
402 }
403 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
404
405 return size;
406}
407
408/**
409 * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
410 * converted descriptor.
411 * @orig: The original v1.1 memory transaction descriptor.
412 * @desc_size: The size of the original v1.1 memory transaction descriptor.
413 *
414 * Return: the size required to store the descriptor store in the v1.0 format.
415 */
416static size_t
417spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
418{
419 size_t size = 0;
420 struct ffa_comp_mrd *mrd;
421 struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
422 ((uint8_t *) orig +
423 orig->emad_offset);
424
425 /* Get the size of the v1.0 descriptor. */
426 size += sizeof(struct ffa_mtd_v1_0);
427
428 /* Add the size of the v1.0 emad descriptors. */
429 size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
430
431 /* Add the size of the composite mrds. */
432 size += sizeof(struct ffa_comp_mrd);
433
434 /* Add the size of the constituent mrds. */
435 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
436 emad_array[0].comp_mrd_offset);
437
438 /* Check the calculated address is within the memory descriptor. */
Marc Bonnicif744c992022-10-18 18:01:44 +0100439 if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
440 (uintptr_t)((uint8_t *) orig + desc_size)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100441 return 0;
442 }
443 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
444
445 return size;
446}
447
448/**
449 * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
450 * @out_obj: The shared memory object to populate the converted descriptor.
451 * @orig: The shared memory object containing the v1.0 descriptor.
452 *
453 * Return: true if the conversion is successful else false.
454 */
455static bool
456spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
457 struct spmc_shmem_obj *orig)
458{
459 struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
460 struct ffa_mtd *out = &out_obj->desc;
461 struct ffa_emad_v1_0 *emad_array_in;
462 struct ffa_emad_v1_0 *emad_array_out;
463 struct ffa_comp_mrd *mrd_in;
464 struct ffa_comp_mrd *mrd_out;
465
466 size_t mrd_in_offset;
467 size_t mrd_out_offset;
468 size_t mrd_size = 0;
469
470 /* Populate the new descriptor format from the v1.0 struct. */
471 out->sender_id = mtd_orig->sender_id;
472 out->memory_region_attributes = mtd_orig->memory_region_attributes;
473 out->flags = mtd_orig->flags;
474 out->handle = mtd_orig->handle;
475 out->tag = mtd_orig->tag;
476 out->emad_count = mtd_orig->emad_count;
477 out->emad_size = sizeof(struct ffa_emad_v1_0);
478
479 /*
480 * We will locate the emad descriptors directly after the ffa_mtd
481 * struct. This will be 8-byte aligned.
482 */
483 out->emad_offset = sizeof(struct ffa_mtd);
484
485 emad_array_in = mtd_orig->emad;
486 emad_array_out = (struct ffa_emad_v1_0 *)
487 ((uint8_t *) out + out->emad_offset);
488
489 /* Copy across the emad structs. */
490 for (unsigned int i = 0U; i < out->emad_count; i++) {
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100491 /* Bound check for emad array. */
492 if (((uint8_t *)emad_array_in + sizeof(struct ffa_emad_v1_0)) >
493 ((uint8_t *) mtd_orig + orig->desc_size)) {
494 VERBOSE("%s: Invalid mtd structure.\n", __func__);
495 return false;
496 }
Marc Bonnicid1907f02022-04-19 17:42:53 +0100497 memcpy(&emad_array_out[i], &emad_array_in[i],
498 sizeof(struct ffa_emad_v1_0));
499 }
500
501 /* Place the mrd descriptors after the end of the emad descriptors.*/
502 mrd_in_offset = emad_array_in->comp_mrd_offset;
503 mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
504 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
505
506 /* Add the size of the composite memory region descriptor. */
507 mrd_size += sizeof(struct ffa_comp_mrd);
508
509 /* Find the mrd descriptor. */
510 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
511
512 /* Add the size of the constituent memory region descriptors. */
513 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
514
515 /*
516 * Update the offset in the emads by the delta between the input and
517 * output addresses.
518 */
519 for (unsigned int i = 0U; i < out->emad_count; i++) {
520 emad_array_out[i].comp_mrd_offset =
521 emad_array_in[i].comp_mrd_offset +
522 (mrd_out_offset - mrd_in_offset);
523 }
524
525 /* Verify that we stay within bound of the memory descriptors. */
526 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
527 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
528 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
529 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
530 ERROR("%s: Invalid mrd structure.\n", __func__);
531 return false;
532 }
533
534 /* Copy the mrd descriptors directly. */
535 memcpy(mrd_out, mrd_in, mrd_size);
536
537 return true;
538}
539
540/**
541 * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
542 * v1.0 memory object.
543 * @out_obj: The shared memory object to populate the v1.0 descriptor.
544 * @orig: The shared memory object containing the v1.1 descriptor.
545 *
546 * Return: true if the conversion is successful else false.
547 */
548static bool
549spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
550 struct spmc_shmem_obj *orig)
551{
552 struct ffa_mtd *mtd_orig = &orig->desc;
553 struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
554 struct ffa_emad_v1_0 *emad_in;
555 struct ffa_emad_v1_0 *emad_array_in;
556 struct ffa_emad_v1_0 *emad_array_out;
557 struct ffa_comp_mrd *mrd_in;
558 struct ffa_comp_mrd *mrd_out;
559
560 size_t mrd_in_offset;
561 size_t mrd_out_offset;
562 size_t emad_out_array_size;
563 size_t mrd_size = 0;
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100564 size_t orig_desc_size = orig->desc_size;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100565
566 /* Populate the v1.0 descriptor format from the v1.1 struct. */
567 out->sender_id = mtd_orig->sender_id;
568 out->memory_region_attributes = mtd_orig->memory_region_attributes;
569 out->flags = mtd_orig->flags;
570 out->handle = mtd_orig->handle;
571 out->tag = mtd_orig->tag;
572 out->emad_count = mtd_orig->emad_count;
573
574 /* Determine the location of the emad array in both descriptors. */
575 emad_array_in = (struct ffa_emad_v1_0 *)
576 ((uint8_t *) mtd_orig + mtd_orig->emad_offset);
577 emad_array_out = out->emad;
578
579 /* Copy across the emad structs. */
580 emad_in = emad_array_in;
581 for (unsigned int i = 0U; i < out->emad_count; i++) {
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100582 /* Bound check for emad array. */
583 if (((uint8_t *)emad_in + sizeof(struct ffa_emad_v1_0)) >
584 ((uint8_t *) mtd_orig + orig_desc_size)) {
585 VERBOSE("%s: Invalid mtd structure.\n", __func__);
586 return false;
587 }
Marc Bonnicid1907f02022-04-19 17:42:53 +0100588 memcpy(&emad_array_out[i], emad_in,
589 sizeof(struct ffa_emad_v1_0));
590
591 emad_in += mtd_orig->emad_size;
592 }
593
594 /* Place the mrd descriptors after the end of the emad descriptors. */
595 emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
596
597 mrd_out_offset = (uint8_t *) out->emad - (uint8_t *) out +
598 emad_out_array_size;
599
600 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
601
602 mrd_in_offset = mtd_orig->emad_offset +
603 (mtd_orig->emad_size * mtd_orig->emad_count);
604
605 /* Add the size of the composite memory region descriptor. */
606 mrd_size += sizeof(struct ffa_comp_mrd);
607
608 /* Find the mrd descriptor. */
609 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
610
611 /* Add the size of the constituent memory region descriptors. */
612 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
613
614 /*
615 * Update the offset in the emads by the delta between the input and
616 * output addresses.
617 */
618 emad_in = emad_array_in;
619
620 for (unsigned int i = 0U; i < out->emad_count; i++) {
621 emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
622 (mrd_out_offset -
623 mrd_in_offset);
624 emad_in += mtd_orig->emad_size;
625 }
626
627 /* Verify that we stay within bound of the memory descriptors. */
628 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
629 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
630 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
631 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
632 ERROR("%s: Invalid mrd structure.\n", __func__);
633 return false;
634 }
635
636 /* Copy the mrd descriptors directly. */
637 memcpy(mrd_out, mrd_in, mrd_size);
638
639 return true;
640}
641
642/**
643 * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
644 * the v1.0 format and populates the
645 * provided buffer.
646 * @dst: Buffer to populate v1.0 ffa_memory_region_descriptor.
647 * @orig_obj: Object containing v1.1 ffa_memory_region_descriptor.
648 * @buf_size: Size of the buffer to populate.
649 * @offset: The offset of the converted descriptor to copy.
650 * @copy_size: Will be populated with the number of bytes copied.
651 * @out_desc_size: Will be populated with the total size of the v1.0
652 * descriptor.
653 *
654 * Return: 0 if conversion and population succeeded.
655 * Note: This function invalidates the reference to @orig therefore
656 * `spmc_shmem_obj_lookup` must be called if further usage is required.
657 */
658static uint32_t
659spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
660 size_t buf_size, size_t offset,
661 size_t *copy_size, size_t *v1_0_desc_size)
662{
663 struct spmc_shmem_obj *v1_0_obj;
664
665 /* Calculate the size that the v1.0 descriptor will require. */
666 *v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
667 &orig_obj->desc, orig_obj->desc_size);
668
669 if (*v1_0_desc_size == 0) {
670 ERROR("%s: cannot determine size of descriptor.\n",
671 __func__);
672 return FFA_ERROR_INVALID_PARAMETER;
673 }
674
675 /* Get a new obj to store the v1.0 descriptor. */
676 v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
677 *v1_0_desc_size);
678
679 if (!v1_0_obj) {
680 return FFA_ERROR_NO_MEMORY;
681 }
682
683 /* Perform the conversion from v1.1 to v1.0. */
684 if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
685 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
686 return FFA_ERROR_INVALID_PARAMETER;
687 }
688
689 *copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
690 memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
691
692 /*
693 * We're finished with the v1.0 descriptor for now so free it.
694 * Note that this will invalidate any references to the v1.1
695 * descriptor.
696 */
697 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
698
699 return 0;
700}
701
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100702/**
703 * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
Marc Bonnicid1907f02022-04-19 17:42:53 +0100704 * @obj: Object containing ffa_memory_region_descriptor.
705 * @ffa_version: FF-A version of the provided descriptor.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100706 *
Marc Bonnici336630f2022-01-13 11:39:10 +0000707 * Return: 0 if object is valid, -EINVAL if constituent_memory_region_descriptor
708 * offset or count is invalid.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100709 */
Marc Bonnicid1907f02022-04-19 17:42:53 +0100710static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
711 uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100712{
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000713 uint32_t comp_mrd_offset = 0;
714
Marc Bonnici336630f2022-01-13 11:39:10 +0000715 if (obj->desc.emad_count == 0U) {
716 WARN("%s: unsupported attribute desc count %u.\n",
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100717 __func__, obj->desc.emad_count);
718 return -EINVAL;
719 }
720
721 for (size_t emad_num = 0; emad_num < obj->desc.emad_count; emad_num++) {
722 size_t size;
723 size_t count;
724 size_t expected_size;
725 size_t total_page_count;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100726 size_t emad_size;
727 size_t desc_size;
728 size_t header_emad_size;
729 uint32_t offset;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100730 struct ffa_comp_mrd *comp;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100731 struct ffa_emad_v1_0 *emad;
732
733 emad = spmc_shmem_obj_get_emad(&obj->desc, emad_num,
734 ffa_version, &emad_size);
735 if (emad == NULL) {
736 WARN("%s: invalid emad structure.\n", __func__);
737 return -EINVAL;
738 }
739
740 /*
741 * Validate the calculated emad address resides within the
742 * descriptor.
743 */
744 if ((uintptr_t) emad >=
745 (uintptr_t)((uint8_t *) &obj->desc + obj->desc_size)) {
746 WARN("Invalid emad access.\n");
747 return -EINVAL;
748 }
749
750 offset = emad->comp_mrd_offset;
751
752 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
753 desc_size = sizeof(struct ffa_mtd_v1_0);
754 } else {
755 desc_size = sizeof(struct ffa_mtd);
756 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100757
Marc Bonnicid1907f02022-04-19 17:42:53 +0100758 header_emad_size = desc_size +
759 (obj->desc.emad_count * emad_size);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100760
761 if (offset < header_emad_size) {
762 WARN("%s: invalid object, offset %u < header + emad %zu\n",
763 __func__, offset, header_emad_size);
764 return -EINVAL;
765 }
766
767 size = obj->desc_size;
768
769 if (offset > size) {
770 WARN("%s: invalid object, offset %u > total size %zu\n",
771 __func__, offset, obj->desc_size);
772 return -EINVAL;
773 }
774 size -= offset;
775
776 if (size < sizeof(struct ffa_comp_mrd)) {
777 WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
778 __func__, offset, obj->desc_size);
779 return -EINVAL;
780 }
781 size -= sizeof(struct ffa_comp_mrd);
782
783 count = size / sizeof(struct ffa_cons_mrd);
784
Marc Bonnicid1907f02022-04-19 17:42:53 +0100785 comp = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
786
787 if (comp == NULL) {
788 WARN("%s: invalid comp_mrd offset\n", __func__);
789 return -EINVAL;
790 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100791
792 if (comp->address_range_count != count) {
793 WARN("%s: invalid object, desc count %u != %zu\n",
794 __func__, comp->address_range_count, count);
795 return -EINVAL;
796 }
797
798 expected_size = offset + sizeof(*comp) +
Marc Bonnicid1907f02022-04-19 17:42:53 +0100799 spmc_shmem_obj_ffa_constituent_size(obj,
800 ffa_version);
801
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100802 if (expected_size != obj->desc_size) {
803 WARN("%s: invalid object, computed size %zu != size %zu\n",
804 __func__, expected_size, obj->desc_size);
805 return -EINVAL;
806 }
807
808 if (obj->desc_filled < obj->desc_size) {
809 /*
810 * The whole descriptor has not yet been received.
811 * Skip final checks.
812 */
813 return 0;
814 }
815
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000816 /*
817 * The offset provided to the composite memory region descriptor
818 * should be consistent across endpoint descriptors. Store the
819 * first entry and compare against subsequent entries.
820 */
821 if (comp_mrd_offset == 0) {
822 comp_mrd_offset = offset;
823 } else {
824 if (comp_mrd_offset != offset) {
825 ERROR("%s: mismatching offsets provided, %u != %u\n",
826 __func__, offset, comp_mrd_offset);
827 return -EINVAL;
828 }
829 }
830
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100831 total_page_count = 0;
832
833 for (size_t i = 0; i < count; i++) {
834 total_page_count +=
835 comp->address_range_array[i].page_count;
836 }
837 if (comp->total_page_count != total_page_count) {
838 WARN("%s: invalid object, desc total_page_count %u != %zu\n",
839 __func__, comp->total_page_count,
840 total_page_count);
841 return -EINVAL;
842 }
843 }
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000844 return 0;
845}
846
847/**
848 * spmc_shmem_check_state_obj - Check if the descriptor describes memory
849 * regions that are currently involved with an
850 * existing memory transactions. This implies that
851 * the memory is not in a valid state for lending.
852 * @obj: Object containing ffa_memory_region_descriptor.
853 *
854 * Return: 0 if object is valid, -EINVAL if invalid memory state.
855 */
Marc Bonnicid1907f02022-04-19 17:42:53 +0100856static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
857 uint32_t ffa_version)
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000858{
859 size_t obj_offset = 0;
860 struct spmc_shmem_obj *inflight_obj;
861
862 struct ffa_comp_mrd *other_mrd;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100863 struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
864 ffa_version);
865
866 if (requested_mrd == NULL) {
867 return -EINVAL;
868 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100869
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000870 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
871 &obj_offset);
872
873 while (inflight_obj != NULL) {
874 /*
875 * Don't compare the transaction to itself or to partially
876 * transmitted descriptors.
877 */
878 if ((obj->desc.handle != inflight_obj->desc.handle) &&
879 (obj->desc_size == obj->desc_filled)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100880 other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
Marc Bonnici344ca9d2022-05-20 14:38:55 +0100881 FFA_VERSION_COMPILED);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100882 if (other_mrd == NULL) {
883 return -EINVAL;
884 }
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000885 if (overlapping_memory_regions(requested_mrd,
886 other_mrd)) {
887 return -EINVAL;
888 }
889 }
890
891 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
892 &obj_offset);
893 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100894 return 0;
895}
896
897static long spmc_ffa_fill_desc(struct mailbox *mbox,
898 struct spmc_shmem_obj *obj,
899 uint32_t fragment_length,
900 ffa_mtd_flag32_t mtd_flag,
Marc Bonnicid1907f02022-04-19 17:42:53 +0100901 uint32_t ffa_version,
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100902 void *smc_handle)
903{
904 int ret;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100905 size_t emad_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100906 uint32_t handle_low;
907 uint32_t handle_high;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100908 struct ffa_emad_v1_0 *emad;
909 struct ffa_emad_v1_0 *other_emad;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100910
911 if (mbox->rxtx_page_count == 0U) {
912 WARN("%s: buffer pair not registered.\n", __func__);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100913 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100914 goto err_arg;
915 }
916
917 if (fragment_length > mbox->rxtx_page_count * PAGE_SIZE_4KB) {
918 WARN("%s: bad fragment size %u > %u buffer size\n", __func__,
919 fragment_length, mbox->rxtx_page_count * PAGE_SIZE_4KB);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100920 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100921 goto err_arg;
922 }
923
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100924 if (fragment_length > obj->desc_size - obj->desc_filled) {
925 WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
926 fragment_length, obj->desc_size - obj->desc_filled);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100927 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100928 goto err_arg;
929 }
930
Marc Bonnicif0f45dc2022-10-18 13:57:16 +0100931 memcpy((uint8_t *)&obj->desc + obj->desc_filled,
932 (uint8_t *) mbox->tx_buffer, fragment_length);
933
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100934 /* Ensure that the sender ID resides in the normal world. */
935 if (ffa_is_secure_world_id(obj->desc.sender_id)) {
936 WARN("%s: Invalid sender ID 0x%x.\n",
937 __func__, obj->desc.sender_id);
938 ret = FFA_ERROR_DENIED;
939 goto err_arg;
940 }
941
Marc Bonnici08f28ef2022-04-19 16:52:59 +0100942 /* Ensure the NS bit is set to 0. */
943 if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
944 WARN("%s: NS mem attributes flags MBZ.\n", __func__);
945 ret = FFA_ERROR_INVALID_PARAMETER;
946 goto err_arg;
947 }
948
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100949 /*
950 * We don't currently support any optional flags so ensure none are
951 * requested.
952 */
953 if (obj->desc.flags != 0U && mtd_flag != 0U &&
954 (obj->desc.flags != mtd_flag)) {
955 WARN("%s: invalid memory transaction flags %u != %u\n",
956 __func__, obj->desc.flags, mtd_flag);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100957 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100958 goto err_arg;
959 }
960
961 if (obj->desc_filled == 0U) {
962 /* First fragment, descriptor header has been copied */
963 obj->desc.handle = spmc_shmem_obj_state.next_handle++;
964 obj->desc.flags |= mtd_flag;
965 }
966
967 obj->desc_filled += fragment_length;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100968 ret = spmc_shmem_check_obj(obj, ffa_version);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100969 if (ret != 0) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100970 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100971 goto err_bad_desc;
972 }
973
974 handle_low = (uint32_t)obj->desc.handle;
975 handle_high = obj->desc.handle >> 32;
976
977 if (obj->desc_filled != obj->desc_size) {
978 SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
979 handle_high, obj->desc_filled,
980 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
981 }
982
Marc Bonnici336630f2022-01-13 11:39:10 +0000983 /* The full descriptor has been received, perform any final checks. */
984
985 /*
986 * If a partition ID resides in the secure world validate that the
987 * partition ID is for a known partition. Ignore any partition ID
988 * belonging to the normal world as it is assumed the Hypervisor will
989 * have validated these.
990 */
991 for (size_t i = 0; i < obj->desc.emad_count; i++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100992 emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
993 &emad_size);
994 if (emad == NULL) {
995 ret = FFA_ERROR_INVALID_PARAMETER;
996 goto err_bad_desc;
997 }
998
999 ffa_endpoint_id16_t ep_id = emad->mapd.endpoint_id;
Marc Bonnici336630f2022-01-13 11:39:10 +00001000
1001 if (ffa_is_secure_world_id(ep_id)) {
1002 if (spmc_get_sp_ctx(ep_id) == NULL) {
1003 WARN("%s: Invalid receiver id 0x%x\n",
1004 __func__, ep_id);
1005 ret = FFA_ERROR_INVALID_PARAMETER;
1006 goto err_bad_desc;
1007 }
1008 }
1009 }
1010
1011 /* Ensure partition IDs are not duplicated. */
1012 for (size_t i = 0; i < obj->desc.emad_count; i++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001013 emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
1014 &emad_size);
1015 if (emad == NULL) {
1016 ret = FFA_ERROR_INVALID_PARAMETER;
1017 goto err_bad_desc;
1018 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001019 for (size_t j = i + 1; j < obj->desc.emad_count; j++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001020 other_emad = spmc_shmem_obj_get_emad(&obj->desc, j,
1021 ffa_version,
1022 &emad_size);
1023 if (other_emad == NULL) {
Marc Bonnici336630f2022-01-13 11:39:10 +00001024 ret = FFA_ERROR_INVALID_PARAMETER;
1025 goto err_bad_desc;
1026 }
Marc Bonnicid1907f02022-04-19 17:42:53 +01001027
1028 if (emad->mapd.endpoint_id ==
1029 other_emad->mapd.endpoint_id) {
1030 WARN("%s: Duplicated endpoint id 0x%x\n",
1031 __func__, emad->mapd.endpoint_id);
1032 ret = FFA_ERROR_INVALID_PARAMETER;
1033 goto err_bad_desc;
1034 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001035 }
1036 }
1037
Marc Bonnicid1907f02022-04-19 17:42:53 +01001038 ret = spmc_shmem_check_state_obj(obj, ffa_version);
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00001039 if (ret) {
1040 ERROR("%s: invalid memory region descriptor.\n", __func__);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001041 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00001042 goto err_bad_desc;
1043 }
1044
Marc Bonnicid1907f02022-04-19 17:42:53 +01001045 /*
1046 * Everything checks out, if the sender was using FF-A v1.0, convert
1047 * the descriptor format to use the v1.1 structures.
1048 */
1049 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1050 struct spmc_shmem_obj *v1_1_obj;
1051 uint64_t mem_handle;
1052
1053 /* Calculate the size that the v1.1 descriptor will required. */
1054 size_t v1_1_desc_size =
1055 spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
vallau0146dbac22022-08-08 14:10:14 +02001056 obj->desc_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001057
1058 if (v1_1_desc_size == 0U) {
1059 ERROR("%s: cannot determine size of descriptor.\n",
1060 __func__);
1061 goto err_arg;
1062 }
1063
1064 /* Get a new obj to store the v1.1 descriptor. */
1065 v1_1_obj =
1066 spmc_shmem_obj_alloc(&spmc_shmem_obj_state, v1_1_desc_size);
1067
vallau018f830992022-08-09 18:03:28 +02001068 if (!v1_1_obj) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001069 ret = FFA_ERROR_NO_MEMORY;
1070 goto err_arg;
1071 }
1072
1073 /* Perform the conversion from v1.0 to v1.1. */
1074 v1_1_obj->desc_size = v1_1_desc_size;
1075 v1_1_obj->desc_filled = v1_1_desc_size;
1076 if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
1077 ERROR("%s: Could not convert mtd!\n", __func__);
1078 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
1079 goto err_arg;
1080 }
1081
1082 /*
1083 * We're finished with the v1.0 descriptor so free it
1084 * and continue our checks with the new v1.1 descriptor.
1085 */
1086 mem_handle = obj->desc.handle;
1087 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1088 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1089 if (obj == NULL) {
1090 ERROR("%s: Failed to find converted descriptor.\n",
1091 __func__);
1092 ret = FFA_ERROR_INVALID_PARAMETER;
1093 return spmc_ffa_error_return(smc_handle, ret);
1094 }
1095 }
1096
Marc Bonnici503320e2022-02-21 15:02:36 +00001097 /* Allow for platform specific operations to be performed. */
1098 ret = plat_spmc_shmem_begin(&obj->desc);
1099 if (ret != 0) {
1100 goto err_arg;
1101 }
1102
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001103 SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
1104 0, 0, 0);
1105
1106err_bad_desc:
1107err_arg:
1108 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001109 return spmc_ffa_error_return(smc_handle, ret);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001110}
1111
1112/**
1113 * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
1114 * @client: Client state.
1115 * @total_length: Total length of shared memory descriptor.
1116 * @fragment_length: Length of fragment of shared memory descriptor passed in
1117 * this call.
1118 * @address: Not supported, must be 0.
1119 * @page_count: Not supported, must be 0.
1120 * @smc_handle: Handle passed to smc call. Used to return
1121 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1122 *
1123 * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
1124 * to share or lend memory from non-secure os to secure os (with no stream
1125 * endpoints).
1126 *
1127 * Return: 0 on success, error code on failure.
1128 */
1129long spmc_ffa_mem_send(uint32_t smc_fid,
1130 bool secure_origin,
1131 uint64_t total_length,
1132 uint32_t fragment_length,
1133 uint64_t address,
1134 uint32_t page_count,
1135 void *cookie,
1136 void *handle,
1137 uint64_t flags)
1138
1139{
1140 long ret;
1141 struct spmc_shmem_obj *obj;
1142 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1143 ffa_mtd_flag32_t mtd_flag;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001144 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001145
1146 if (address != 0U || page_count != 0U) {
1147 WARN("%s: custom memory region for message not supported.\n",
1148 __func__);
1149 return spmc_ffa_error_return(handle,
1150 FFA_ERROR_INVALID_PARAMETER);
1151 }
1152
1153 if (secure_origin) {
1154 WARN("%s: unsupported share direction.\n", __func__);
1155 return spmc_ffa_error_return(handle,
1156 FFA_ERROR_INVALID_PARAMETER);
1157 }
1158
Marc Bonnicid1907f02022-04-19 17:42:53 +01001159 /*
1160 * Check if the descriptor is smaller than the v1.0 descriptor. The
1161 * descriptor cannot be smaller than this structure.
1162 */
1163 if (fragment_length < sizeof(struct ffa_mtd_v1_0)) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001164 WARN("%s: bad first fragment size %u < %zu\n",
Marc Bonnicid1907f02022-04-19 17:42:53 +01001165 __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001166 return spmc_ffa_error_return(handle,
1167 FFA_ERROR_INVALID_PARAMETER);
1168 }
1169
1170 if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
1171 mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
1172 } else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
1173 mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
1174 } else {
1175 WARN("%s: invalid memory management operation.\n", __func__);
1176 return spmc_ffa_error_return(handle,
1177 FFA_ERROR_INVALID_PARAMETER);
1178 }
1179
1180 spin_lock(&spmc_shmem_obj_state.lock);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001181 obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
1182 if (obj == NULL) {
1183 ret = FFA_ERROR_NO_MEMORY;
1184 goto err_unlock;
1185 }
1186
1187 spin_lock(&mbox->lock);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001188 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
1189 ffa_version, handle);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001190 spin_unlock(&mbox->lock);
1191
1192 spin_unlock(&spmc_shmem_obj_state.lock);
1193 return ret;
1194
1195err_unlock:
1196 spin_unlock(&spmc_shmem_obj_state.lock);
1197 return spmc_ffa_error_return(handle, ret);
1198}
1199
1200/**
1201 * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
1202 * @client: Client state.
1203 * @handle_low: Handle_low value returned from FFA_MEM_FRAG_RX.
1204 * @handle_high: Handle_high value returned from FFA_MEM_FRAG_RX.
1205 * @fragment_length: Length of fragments transmitted.
1206 * @sender_id: Vmid of sender in bits [31:16]
1207 * @smc_handle: Handle passed to smc call. Used to return
1208 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1209 *
1210 * Return: @smc_handle on success, error code on failure.
1211 */
1212long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
1213 bool secure_origin,
1214 uint64_t handle_low,
1215 uint64_t handle_high,
1216 uint32_t fragment_length,
1217 uint32_t sender_id,
1218 void *cookie,
1219 void *handle,
1220 uint64_t flags)
1221{
1222 long ret;
1223 uint32_t desc_sender_id;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001224 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001225 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1226
1227 struct spmc_shmem_obj *obj;
1228 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1229
1230 spin_lock(&spmc_shmem_obj_state.lock);
1231
1232 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1233 if (obj == NULL) {
1234 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1235 __func__, mem_handle);
1236 ret = FFA_ERROR_INVALID_PARAMETER;
1237 goto err_unlock;
1238 }
1239
1240 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1241 if (sender_id != desc_sender_id) {
1242 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1243 sender_id, desc_sender_id);
1244 ret = FFA_ERROR_INVALID_PARAMETER;
1245 goto err_unlock;
1246 }
1247
1248 if (obj->desc_filled == obj->desc_size) {
1249 WARN("%s: object desc already filled, %zu\n", __func__,
1250 obj->desc_filled);
1251 ret = FFA_ERROR_INVALID_PARAMETER;
1252 goto err_unlock;
1253 }
1254
1255 spin_lock(&mbox->lock);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001256 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
1257 handle);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001258 spin_unlock(&mbox->lock);
1259
1260 spin_unlock(&spmc_shmem_obj_state.lock);
1261 return ret;
1262
1263err_unlock:
1264 spin_unlock(&spmc_shmem_obj_state.lock);
1265 return spmc_ffa_error_return(handle, ret);
1266}
1267
1268/**
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001269 * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor
1270 * if the caller implements a version greater
1271 * than FF-A 1.0 or if they have requested
1272 * the functionality.
1273 * TODO: We are assuming that the caller is
1274 * an SP. To support retrieval from the
1275 * normal world this function will need to be
1276 * expanded accordingly.
1277 * @resp: Descriptor populated in callers RX buffer.
1278 * @sp_ctx: Context of the calling SP.
1279 */
1280void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp,
1281 struct secure_partition_desc *sp_ctx)
1282{
1283 if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) ||
1284 sp_ctx->ns_bit_requested) {
1285 /*
1286 * Currently memory senders must reside in the normal
1287 * world, and we do not have the functionlaity to change
1288 * the state of memory dynamically. Therefore we can always set
1289 * the NS bit to 1.
1290 */
1291 resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
1292 }
1293}
1294
1295/**
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001296 * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
1297 * @smc_fid: FID of SMC
1298 * @total_length: Total length of retrieve request descriptor if this is
1299 * the first call. Otherwise (unsupported) must be 0.
1300 * @fragment_length: Length of fragment of retrieve request descriptor passed
1301 * in this call. Only @fragment_length == @length is
1302 * supported by this implementation.
1303 * @address: Not supported, must be 0.
1304 * @page_count: Not supported, must be 0.
1305 * @smc_handle: Handle passed to smc call. Used to return
1306 * FFA_MEM_RETRIEVE_RESP.
1307 *
1308 * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
1309 * Used by secure os to retrieve memory already shared by non-secure os.
1310 * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
1311 * the client must call FFA_MEM_FRAG_RX until the full response has been
1312 * received.
1313 *
1314 * Return: @handle on success, error code on failure.
1315 */
1316long
1317spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
1318 bool secure_origin,
1319 uint32_t total_length,
1320 uint32_t fragment_length,
1321 uint64_t address,
1322 uint32_t page_count,
1323 void *cookie,
1324 void *handle,
1325 uint64_t flags)
1326{
1327 int ret;
1328 size_t buf_size;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001329 size_t copy_size = 0;
1330 size_t min_desc_size;
1331 size_t out_desc_size = 0;
1332
1333 /*
1334 * Currently we are only accessing fields that are the same in both the
1335 * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
1336 * here. We only need validate against the appropriate struct size.
1337 */
1338 struct ffa_mtd *resp;
1339 const struct ffa_mtd *req;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001340 struct spmc_shmem_obj *obj = NULL;
1341 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001342 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001343 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001344
1345 if (!secure_origin) {
1346 WARN("%s: unsupported retrieve req direction.\n", __func__);
1347 return spmc_ffa_error_return(handle,
1348 FFA_ERROR_INVALID_PARAMETER);
1349 }
1350
1351 if (address != 0U || page_count != 0U) {
1352 WARN("%s: custom memory region not supported.\n", __func__);
1353 return spmc_ffa_error_return(handle,
1354 FFA_ERROR_INVALID_PARAMETER);
1355 }
1356
1357 spin_lock(&mbox->lock);
1358
1359 req = mbox->tx_buffer;
1360 resp = mbox->rx_buffer;
1361 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1362
1363 if (mbox->rxtx_page_count == 0U) {
1364 WARN("%s: buffer pair not registered.\n", __func__);
1365 ret = FFA_ERROR_INVALID_PARAMETER;
1366 goto err_unlock_mailbox;
1367 }
1368
1369 if (mbox->state != MAILBOX_STATE_EMPTY) {
1370 WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
1371 ret = FFA_ERROR_DENIED;
1372 goto err_unlock_mailbox;
1373 }
1374
1375 if (fragment_length != total_length) {
1376 WARN("%s: fragmented retrieve request not supported.\n",
1377 __func__);
1378 ret = FFA_ERROR_INVALID_PARAMETER;
1379 goto err_unlock_mailbox;
1380 }
1381
Marc Bonnici336630f2022-01-13 11:39:10 +00001382 if (req->emad_count == 0U) {
1383 WARN("%s: unsupported attribute desc count %u.\n",
1384 __func__, obj->desc.emad_count);
vallau01460d3962022-08-09 17:06:53 +02001385 ret = FFA_ERROR_INVALID_PARAMETER;
1386 goto err_unlock_mailbox;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001387 }
1388
Marc Bonnicid1907f02022-04-19 17:42:53 +01001389 /* Determine the appropriate minimum descriptor size. */
1390 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1391 min_desc_size = sizeof(struct ffa_mtd_v1_0);
1392 } else {
1393 min_desc_size = sizeof(struct ffa_mtd);
1394 }
1395 if (total_length < min_desc_size) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001396 WARN("%s: invalid length %u < %zu\n", __func__, total_length,
Marc Bonnicid1907f02022-04-19 17:42:53 +01001397 min_desc_size);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001398 ret = FFA_ERROR_INVALID_PARAMETER;
1399 goto err_unlock_mailbox;
1400 }
1401
1402 spin_lock(&spmc_shmem_obj_state.lock);
1403
1404 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1405 if (obj == NULL) {
1406 ret = FFA_ERROR_INVALID_PARAMETER;
1407 goto err_unlock_all;
1408 }
1409
1410 if (obj->desc_filled != obj->desc_size) {
1411 WARN("%s: incomplete object desc filled %zu < size %zu\n",
1412 __func__, obj->desc_filled, obj->desc_size);
1413 ret = FFA_ERROR_INVALID_PARAMETER;
1414 goto err_unlock_all;
1415 }
1416
1417 if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
1418 WARN("%s: wrong sender id 0x%x != 0x%x\n",
1419 __func__, req->sender_id, obj->desc.sender_id);
1420 ret = FFA_ERROR_INVALID_PARAMETER;
1421 goto err_unlock_all;
1422 }
1423
1424 if (req->emad_count != 0U && req->tag != obj->desc.tag) {
1425 WARN("%s: wrong tag 0x%lx != 0x%lx\n",
1426 __func__, req->tag, obj->desc.tag);
1427 ret = FFA_ERROR_INVALID_PARAMETER;
1428 goto err_unlock_all;
1429 }
1430
Marc Bonnici336630f2022-01-13 11:39:10 +00001431 if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
1432 WARN("%s: mistmatch of endpoint counts %u != %u\n",
1433 __func__, req->emad_count, obj->desc.emad_count);
1434 ret = FFA_ERROR_INVALID_PARAMETER;
1435 goto err_unlock_all;
1436 }
1437
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001438 /* Ensure the NS bit is set to 0 in the request. */
1439 if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1440 WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1441 ret = FFA_ERROR_INVALID_PARAMETER;
1442 goto err_unlock_all;
1443 }
1444
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001445 if (req->flags != 0U) {
1446 if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
1447 (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
1448 /*
1449 * If the retrieve request specifies the memory
1450 * transaction ensure it matches what we expect.
1451 */
1452 WARN("%s: wrong mem transaction flags %x != %x\n",
1453 __func__, req->flags, obj->desc.flags);
1454 ret = FFA_ERROR_INVALID_PARAMETER;
1455 goto err_unlock_all;
1456 }
1457
1458 if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
1459 req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
1460 /*
1461 * Current implementation does not support donate and
1462 * it supports no other flags.
1463 */
1464 WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
1465 ret = FFA_ERROR_INVALID_PARAMETER;
1466 goto err_unlock_all;
1467 }
1468 }
1469
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001470 /* Validate the caller is a valid participant. */
Shruti Gupta20ce06c2022-08-25 14:22:53 +01001471 if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001472 WARN("%s: Invalid endpoint ID (0x%x).\n",
1473 __func__, sp_ctx->sp_id);
1474 ret = FFA_ERROR_INVALID_PARAMETER;
1475 goto err_unlock_all;
1476 }
1477
Marc Bonnicid1907f02022-04-19 17:42:53 +01001478 /* Validate that the provided emad offset and structure is valid.*/
1479 for (size_t i = 0; i < req->emad_count; i++) {
1480 size_t emad_size;
1481 struct ffa_emad_v1_0 *emad;
1482
1483 emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1484 &emad_size);
1485 if (emad == NULL) {
1486 WARN("%s: invalid emad structure.\n", __func__);
1487 ret = FFA_ERROR_INVALID_PARAMETER;
1488 goto err_unlock_all;
1489 }
1490
1491 if ((uintptr_t) emad >= (uintptr_t)
1492 ((uint8_t *) req + total_length)) {
1493 WARN("Invalid emad access.\n");
1494 ret = FFA_ERROR_INVALID_PARAMETER;
1495 goto err_unlock_all;
1496 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001497 }
1498
1499 /*
1500 * Validate all the endpoints match in the case of multiple
1501 * borrowers. We don't mandate that the order of the borrowers
1502 * must match in the descriptors therefore check to see if the
1503 * endpoints match in any order.
1504 */
1505 for (size_t i = 0; i < req->emad_count; i++) {
1506 bool found = false;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001507 size_t emad_size;
1508 struct ffa_emad_v1_0 *emad;
1509 struct ffa_emad_v1_0 *other_emad;
1510
1511 emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1512 &emad_size);
1513 if (emad == NULL) {
1514 ret = FFA_ERROR_INVALID_PARAMETER;
1515 goto err_unlock_all;
1516 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001517
1518 for (size_t j = 0; j < obj->desc.emad_count; j++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001519 other_emad = spmc_shmem_obj_get_emad(
1520 &obj->desc, j, MAKE_FFA_VERSION(1, 1),
1521 &emad_size);
1522
1523 if (other_emad == NULL) {
1524 ret = FFA_ERROR_INVALID_PARAMETER;
1525 goto err_unlock_all;
1526 }
1527
1528 if (req->emad_count &&
1529 emad->mapd.endpoint_id ==
1530 other_emad->mapd.endpoint_id) {
Marc Bonnici336630f2022-01-13 11:39:10 +00001531 found = true;
1532 break;
1533 }
1534 }
1535
1536 if (!found) {
1537 WARN("%s: invalid receiver id (0x%x).\n",
Marc Bonnicid1907f02022-04-19 17:42:53 +01001538 __func__, emad->mapd.endpoint_id);
Marc Bonnici336630f2022-01-13 11:39:10 +00001539 ret = FFA_ERROR_INVALID_PARAMETER;
1540 goto err_unlock_all;
1541 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001542 }
1543
1544 mbox->state = MAILBOX_STATE_FULL;
1545
1546 if (req->emad_count != 0U) {
1547 obj->in_use++;
1548 }
1549
Marc Bonnicid1907f02022-04-19 17:42:53 +01001550 /*
1551 * If the caller is v1.0 convert the descriptor, otherwise copy
1552 * directly.
1553 */
1554 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1555 ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
1556 &copy_size,
1557 &out_desc_size);
1558 if (ret != 0U) {
1559 ERROR("%s: Failed to process descriptor.\n", __func__);
1560 goto err_unlock_all;
1561 }
1562 } else {
1563 copy_size = MIN(obj->desc_size, buf_size);
1564 out_desc_size = obj->desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001565
Marc Bonnicid1907f02022-04-19 17:42:53 +01001566 memcpy(resp, &obj->desc, copy_size);
1567 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001568
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001569 /* Set the NS bit in the response if applicable. */
1570 spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx);
1571
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001572 spin_unlock(&spmc_shmem_obj_state.lock);
1573 spin_unlock(&mbox->lock);
1574
Marc Bonnicid1907f02022-04-19 17:42:53 +01001575 SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001576 copy_size, 0, 0, 0, 0, 0);
1577
1578err_unlock_all:
1579 spin_unlock(&spmc_shmem_obj_state.lock);
1580err_unlock_mailbox:
1581 spin_unlock(&mbox->lock);
1582 return spmc_ffa_error_return(handle, ret);
1583}
1584
1585/**
1586 * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
1587 * @client: Client state.
1588 * @handle_low: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
1589 * @handle_high: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
1590 * @fragment_offset: Byte offset in descriptor to resume at.
1591 * @sender_id: Bit[31:16]: Endpoint id of sender if client is a
1592 * hypervisor. 0 otherwise.
1593 * @smc_handle: Handle passed to smc call. Used to return
1594 * FFA_MEM_FRAG_TX.
1595 *
1596 * Return: @smc_handle on success, error code on failure.
1597 */
1598long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
1599 bool secure_origin,
1600 uint32_t handle_low,
1601 uint32_t handle_high,
1602 uint32_t fragment_offset,
1603 uint32_t sender_id,
1604 void *cookie,
1605 void *handle,
1606 uint64_t flags)
1607{
1608 int ret;
1609 void *src;
1610 size_t buf_size;
1611 size_t copy_size;
1612 size_t full_copy_size;
1613 uint32_t desc_sender_id;
1614 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1615 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1616 struct spmc_shmem_obj *obj;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001617 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001618
1619 if (!secure_origin) {
1620 WARN("%s: can only be called from swld.\n",
1621 __func__);
1622 return spmc_ffa_error_return(handle,
1623 FFA_ERROR_INVALID_PARAMETER);
1624 }
1625
1626 spin_lock(&spmc_shmem_obj_state.lock);
1627
1628 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1629 if (obj == NULL) {
1630 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1631 __func__, mem_handle);
1632 ret = FFA_ERROR_INVALID_PARAMETER;
1633 goto err_unlock_shmem;
1634 }
1635
1636 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1637 if (sender_id != 0U && sender_id != desc_sender_id) {
1638 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1639 sender_id, desc_sender_id);
1640 ret = FFA_ERROR_INVALID_PARAMETER;
1641 goto err_unlock_shmem;
1642 }
1643
1644 if (fragment_offset >= obj->desc_size) {
1645 WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
1646 __func__, fragment_offset, obj->desc_size);
1647 ret = FFA_ERROR_INVALID_PARAMETER;
1648 goto err_unlock_shmem;
1649 }
1650
1651 spin_lock(&mbox->lock);
1652
1653 if (mbox->rxtx_page_count == 0U) {
1654 WARN("%s: buffer pair not registered.\n", __func__);
1655 ret = FFA_ERROR_INVALID_PARAMETER;
1656 goto err_unlock_all;
1657 }
1658
1659 if (mbox->state != MAILBOX_STATE_EMPTY) {
1660 WARN("%s: RX Buffer is full!\n", __func__);
1661 ret = FFA_ERROR_DENIED;
1662 goto err_unlock_all;
1663 }
1664
1665 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1666
1667 mbox->state = MAILBOX_STATE_FULL;
1668
Marc Bonnicid1907f02022-04-19 17:42:53 +01001669 /*
1670 * If the caller is v1.0 convert the descriptor, otherwise copy
1671 * directly.
1672 */
1673 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1674 size_t out_desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001675
Marc Bonnicid1907f02022-04-19 17:42:53 +01001676 ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
1677 buf_size,
1678 fragment_offset,
1679 &copy_size,
1680 &out_desc_size);
1681 if (ret != 0U) {
1682 ERROR("%s: Failed to process descriptor.\n", __func__);
1683 goto err_unlock_all;
1684 }
1685 } else {
1686 full_copy_size = obj->desc_size - fragment_offset;
1687 copy_size = MIN(full_copy_size, buf_size);
1688
1689 src = &obj->desc;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001690
Marc Bonnicid1907f02022-04-19 17:42:53 +01001691 memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
1692 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001693
1694 spin_unlock(&mbox->lock);
1695 spin_unlock(&spmc_shmem_obj_state.lock);
1696
1697 SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
1698 copy_size, sender_id, 0, 0, 0);
1699
1700err_unlock_all:
1701 spin_unlock(&mbox->lock);
1702err_unlock_shmem:
1703 spin_unlock(&spmc_shmem_obj_state.lock);
1704 return spmc_ffa_error_return(handle, ret);
1705}
1706
1707/**
1708 * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
1709 * @client: Client state.
1710 *
1711 * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
1712 * Used by secure os release previously shared memory to non-secure os.
1713 *
1714 * The handle to release must be in the client's (secure os's) transmit buffer.
1715 *
1716 * Return: 0 on success, error code on failure.
1717 */
1718int spmc_ffa_mem_relinquish(uint32_t smc_fid,
1719 bool secure_origin,
1720 uint32_t handle_low,
1721 uint32_t handle_high,
1722 uint32_t fragment_offset,
1723 uint32_t sender_id,
1724 void *cookie,
1725 void *handle,
1726 uint64_t flags)
1727{
1728 int ret;
1729 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1730 struct spmc_shmem_obj *obj;
1731 const struct ffa_mem_relinquish_descriptor *req;
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001732 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001733
1734 if (!secure_origin) {
1735 WARN("%s: unsupported relinquish direction.\n", __func__);
1736 return spmc_ffa_error_return(handle,
1737 FFA_ERROR_INVALID_PARAMETER);
1738 }
1739
1740 spin_lock(&mbox->lock);
1741
1742 if (mbox->rxtx_page_count == 0U) {
1743 WARN("%s: buffer pair not registered.\n", __func__);
1744 ret = FFA_ERROR_INVALID_PARAMETER;
1745 goto err_unlock_mailbox;
1746 }
1747
1748 req = mbox->tx_buffer;
1749
1750 if (req->flags != 0U) {
1751 WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
1752 ret = FFA_ERROR_INVALID_PARAMETER;
1753 goto err_unlock_mailbox;
1754 }
1755
Marc Bonnici336630f2022-01-13 11:39:10 +00001756 if (req->endpoint_count == 0) {
1757 WARN("%s: endpoint count cannot be 0.\n", __func__);
1758 ret = FFA_ERROR_INVALID_PARAMETER;
1759 goto err_unlock_mailbox;
1760 }
1761
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001762 spin_lock(&spmc_shmem_obj_state.lock);
1763
1764 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1765 if (obj == NULL) {
1766 ret = FFA_ERROR_INVALID_PARAMETER;
1767 goto err_unlock_all;
1768 }
1769
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001770 /*
1771 * Validate the endpoint ID was populated correctly. We don't currently
1772 * support proxy endpoints so the endpoint count should always be 1.
1773 */
1774 if (req->endpoint_count != 1U) {
1775 WARN("%s: unsupported endpoint count %u != 1\n", __func__,
1776 req->endpoint_count);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001777 ret = FFA_ERROR_INVALID_PARAMETER;
1778 goto err_unlock_all;
1779 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001780
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001781 /* Validate provided endpoint ID matches the partition ID. */
1782 if (req->endpoint_array[0] != sp_ctx->sp_id) {
1783 WARN("%s: invalid endpoint ID %u != %u\n", __func__,
1784 req->endpoint_array[0], sp_ctx->sp_id);
1785 ret = FFA_ERROR_INVALID_PARAMETER;
1786 goto err_unlock_all;
1787 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001788
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001789 /* Validate the caller is a valid participant. */
Shruti Gupta20ce06c2022-08-25 14:22:53 +01001790 if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001791 WARN("%s: Invalid endpoint ID (0x%x).\n",
1792 __func__, req->endpoint_array[0]);
1793 ret = FFA_ERROR_INVALID_PARAMETER;
1794 goto err_unlock_all;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001795 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001796
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001797 if (obj->in_use == 0U) {
1798 ret = FFA_ERROR_INVALID_PARAMETER;
1799 goto err_unlock_all;
1800 }
1801 obj->in_use--;
1802
1803 spin_unlock(&spmc_shmem_obj_state.lock);
1804 spin_unlock(&mbox->lock);
1805
1806 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1807
1808err_unlock_all:
1809 spin_unlock(&spmc_shmem_obj_state.lock);
1810err_unlock_mailbox:
1811 spin_unlock(&mbox->lock);
1812 return spmc_ffa_error_return(handle, ret);
1813}
1814
1815/**
1816 * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
1817 * @client: Client state.
1818 * @handle_low: Unique handle of shared memory object to reclaim. Bit[31:0].
1819 * @handle_high: Unique handle of shared memory object to reclaim.
1820 * Bit[63:32].
1821 * @flags: Unsupported, ignored.
1822 *
1823 * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
1824 * Used by non-secure os reclaim memory previously shared with secure os.
1825 *
1826 * Return: 0 on success, error code on failure.
1827 */
1828int spmc_ffa_mem_reclaim(uint32_t smc_fid,
1829 bool secure_origin,
1830 uint32_t handle_low,
1831 uint32_t handle_high,
1832 uint32_t mem_flags,
1833 uint64_t x4,
1834 void *cookie,
1835 void *handle,
1836 uint64_t flags)
1837{
1838 int ret;
1839 struct spmc_shmem_obj *obj;
1840 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1841
1842 if (secure_origin) {
1843 WARN("%s: unsupported reclaim direction.\n", __func__);
1844 return spmc_ffa_error_return(handle,
1845 FFA_ERROR_INVALID_PARAMETER);
1846 }
1847
1848 if (mem_flags != 0U) {
1849 WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
1850 return spmc_ffa_error_return(handle,
1851 FFA_ERROR_INVALID_PARAMETER);
1852 }
1853
1854 spin_lock(&spmc_shmem_obj_state.lock);
1855
1856 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1857 if (obj == NULL) {
1858 ret = FFA_ERROR_INVALID_PARAMETER;
1859 goto err_unlock;
1860 }
1861 if (obj->in_use != 0U) {
1862 ret = FFA_ERROR_DENIED;
1863 goto err_unlock;
1864 }
Marc Bonnici503320e2022-02-21 15:02:36 +00001865
Marc Bonnici82e28f12022-10-18 13:39:48 +01001866 if (obj->desc_filled != obj->desc_size) {
1867 WARN("%s: incomplete object desc filled %zu < size %zu\n",
1868 __func__, obj->desc_filled, obj->desc_size);
1869 ret = FFA_ERROR_INVALID_PARAMETER;
1870 goto err_unlock;
1871 }
1872
Marc Bonnici503320e2022-02-21 15:02:36 +00001873 /* Allow for platform specific operations to be performed. */
1874 ret = plat_spmc_shmem_reclaim(&obj->desc);
1875 if (ret != 0) {
1876 goto err_unlock;
1877 }
1878
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001879 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1880 spin_unlock(&spmc_shmem_obj_state.lock);
1881
1882 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1883
1884err_unlock:
1885 spin_unlock(&spmc_shmem_obj_state.lock);
1886 return spmc_ffa_error_return(handle, ret);
1887}