blob: 870ba3133356610368764536fee489e20cfc34a1 [file] [log] [blame]
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001/*
Demi Marie Obenour1f9f8302022-12-30 19:14:18 -05002 * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00006#include <assert.h>
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01007#include <errno.h>
8
9#include <common/debug.h>
10#include <common/runtime_svc.h>
11#include <lib/object_pool.h>
12#include <lib/spinlock.h>
13#include <lib/xlat_tables/xlat_tables_v2.h>
14#include <services/ffa_svc.h>
15#include "spmc.h"
16#include "spmc_shared_mem.h"
17
18#include <platform_def.h>
19
20/**
21 * struct spmc_shmem_obj - Shared memory object.
22 * @desc_size: Size of @desc.
23 * @desc_filled: Size of @desc already received.
24 * @in_use: Number of clients that have called ffa_mem_retrieve_req
25 * without a matching ffa_mem_relinquish call.
26 * @desc: FF-A memory region descriptor passed in ffa_mem_share.
27 */
28struct spmc_shmem_obj {
29 size_t desc_size;
30 size_t desc_filled;
31 size_t in_use;
Marc Bonnicid1907f02022-04-19 17:42:53 +010032 struct ffa_mtd desc;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010033};
34
35/*
36 * Declare our data structure to store the metadata of memory share requests.
37 * The main datastore is allocated on a per platform basis to ensure enough
38 * storage can be made available.
39 * The address of the data store will be populated by the SPMC during its
40 * initialization.
41 */
42
43struct spmc_shmem_obj_state spmc_shmem_obj_state = {
44 /* Set start value for handle so top 32 bits are needed quickly. */
45 .next_handle = 0xffffffc0U,
46};
47
48/**
49 * spmc_shmem_obj_size - Convert from descriptor size to object size.
50 * @desc_size: Size of struct ffa_memory_region_descriptor object.
51 *
52 * Return: Size of struct spmc_shmem_obj object.
53 */
54static size_t spmc_shmem_obj_size(size_t desc_size)
55{
56 return desc_size + offsetof(struct spmc_shmem_obj, desc);
57}
58
59/**
60 * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
61 * @state: Global state.
62 * @desc_size: Size of struct ffa_memory_region_descriptor object that
63 * allocated object will hold.
64 *
65 * Return: Pointer to newly allocated object, or %NULL if there not enough space
66 * left. The returned pointer is only valid while @state is locked, to
67 * used it again after unlocking @state, spmc_shmem_obj_lookup must be
68 * called.
69 */
70static struct spmc_shmem_obj *
71spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
72{
73 struct spmc_shmem_obj *obj;
74 size_t free = state->data_size - state->allocated;
Marc Bonnicib774f562022-10-18 14:03:13 +010075 size_t obj_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010076
77 if (state->data == NULL) {
78 ERROR("Missing shmem datastore!\n");
79 return NULL;
80 }
81
Marc Bonnicib774f562022-10-18 14:03:13 +010082 obj_size = spmc_shmem_obj_size(desc_size);
83
84 /* Ensure the obj size has not overflowed. */
85 if (obj_size < desc_size) {
86 WARN("%s(0x%zx) desc_size overflow\n",
87 __func__, desc_size);
88 return NULL;
89 }
90
91 if (obj_size > free) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010092 WARN("%s(0x%zx) failed, free 0x%zx\n",
93 __func__, desc_size, free);
94 return NULL;
95 }
96 obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
Marc Bonnicid1907f02022-04-19 17:42:53 +010097 obj->desc = (struct ffa_mtd) {0};
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010098 obj->desc_size = desc_size;
99 obj->desc_filled = 0;
100 obj->in_use = 0;
Marc Bonnicib774f562022-10-18 14:03:13 +0100101 state->allocated += obj_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100102 return obj;
103}
104
105/**
106 * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
107 * @state: Global state.
108 * @obj: Object to free.
109 *
110 * Release memory used by @obj. Other objects may move, so on return all
111 * pointers to struct spmc_shmem_obj object should be considered invalid, not
112 * just @obj.
113 *
114 * The current implementation always compacts the remaining objects to simplify
115 * the allocator and to avoid fragmentation.
116 */
117
118static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
119 struct spmc_shmem_obj *obj)
120{
121 size_t free_size = spmc_shmem_obj_size(obj->desc_size);
122 uint8_t *shift_dest = (uint8_t *)obj;
123 uint8_t *shift_src = shift_dest + free_size;
124 size_t shift_size = state->allocated - (shift_src - state->data);
125
126 if (shift_size != 0U) {
127 memmove(shift_dest, shift_src, shift_size);
128 }
129 state->allocated -= free_size;
130}
131
132/**
133 * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
134 * @state: Global state.
135 * @handle: Unique handle of object to return.
136 *
137 * Return: struct spmc_shmem_obj_state object with handle matching @handle.
138 * %NULL, if not object in @state->data has a matching handle.
139 */
140static struct spmc_shmem_obj *
141spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
142{
143 uint8_t *curr = state->data;
144
145 while (curr - state->data < state->allocated) {
146 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
147
148 if (obj->desc.handle == handle) {
149 return obj;
150 }
151 curr += spmc_shmem_obj_size(obj->desc_size);
152 }
153 return NULL;
154}
155
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000156/**
157 * spmc_shmem_obj_get_next - Get the next memory object from an offset.
158 * @offset: Offset used to track which objects have previously been
159 * returned.
160 *
161 * Return: the next struct spmc_shmem_obj_state object from the provided
162 * offset.
163 * %NULL, if there are no more objects.
164 */
165static struct spmc_shmem_obj *
166spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
167{
168 uint8_t *curr = state->data + *offset;
169
170 if (curr - state->data < state->allocated) {
171 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
172
173 *offset += spmc_shmem_obj_size(obj->desc_size);
174
175 return obj;
176 }
177 return NULL;
178}
179
Marc Bonnicid1907f02022-04-19 17:42:53 +0100180/*******************************************************************************
181 * FF-A memory descriptor helper functions.
182 ******************************************************************************/
183/**
184 * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
185 * clients FF-A version.
186 * @desc: The memory transaction descriptor.
187 * @index: The index of the emad element to be accessed.
188 * @ffa_version: FF-A version of the provided structure.
189 * @emad_size: Will be populated with the size of the returned emad
190 * descriptor.
191 * Return: A pointer to the requested emad structure.
192 */
193static void *
194spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
195 uint32_t ffa_version, size_t *emad_size)
196{
197 uint8_t *emad;
198 /*
199 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
200 * format, otherwise assume it is a v1.1 format.
201 */
202 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
203 /* Cast our descriptor to the v1.0 format. */
204 struct ffa_mtd_v1_0 *mtd_v1_0 =
205 (struct ffa_mtd_v1_0 *) desc;
206 emad = (uint8_t *) &(mtd_v1_0->emad);
207 *emad_size = sizeof(struct ffa_emad_v1_0);
208 } else {
209 if (!is_aligned(desc->emad_offset, 16)) {
210 WARN("Emad offset is not aligned.\n");
211 return NULL;
212 }
213 emad = ((uint8_t *) desc + desc->emad_offset);
214 *emad_size = desc->emad_size;
215 }
216 return (emad + (*emad_size * index));
217}
218
219/**
220 * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
221 * FF-A version of the descriptor.
222 * @obj: Object containing ffa_memory_region_descriptor.
223 *
224 * Return: struct ffa_comp_mrd object corresponding to the composite memory
225 * region descriptor.
226 */
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100227static struct ffa_comp_mrd *
Marc Bonnicid1907f02022-04-19 17:42:53 +0100228spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100229{
Marc Bonnicid1907f02022-04-19 17:42:53 +0100230 size_t emad_size;
231 /*
232 * The comp_mrd_offset field of the emad descriptor remains consistent
233 * between FF-A versions therefore we can use the v1.0 descriptor here
234 * in all cases.
235 */
236 struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
237 ffa_version,
238 &emad_size);
239 /* Ensure the emad array was found. */
240 if (emad == NULL) {
241 return NULL;
242 }
243
244 /* Ensure the composite descriptor offset is aligned. */
245 if (!is_aligned(emad->comp_mrd_offset, 8)) {
246 WARN("Unaligned composite memory region descriptor offset.\n");
247 return NULL;
248 }
249
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100250 return (struct ffa_comp_mrd *)
Marc Bonnicid1907f02022-04-19 17:42:53 +0100251 ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100252}
253
254/**
255 * spmc_shmem_obj_ffa_constituent_size - Calculate variable size part of obj.
256 * @obj: Object containing ffa_memory_region_descriptor.
257 *
258 * Return: Size of ffa_constituent_memory_region_descriptors in @obj.
259 */
260static size_t
Marc Bonnicid1907f02022-04-19 17:42:53 +0100261spmc_shmem_obj_ffa_constituent_size(struct spmc_shmem_obj *obj,
262 uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100263{
Marc Bonnicid1907f02022-04-19 17:42:53 +0100264 struct ffa_comp_mrd *comp_mrd;
265
266 comp_mrd = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
267 if (comp_mrd == NULL) {
268 return 0;
269 }
270 return comp_mrd->address_range_count * sizeof(struct ffa_cons_mrd);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100271}
272
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100273/**
274 * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
275 * a given memory transaction.
276 * @sp_id: Partition ID to validate.
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100277 * @obj: The shared memory object containing the descriptor
278 * of the memory transaction.
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100279 * Return: true if ID is valid, else false.
280 */
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100281bool spmc_shmem_obj_validate_id(struct spmc_shmem_obj *obj, uint16_t sp_id)
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100282{
283 bool found = false;
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100284 struct ffa_mtd *desc = &obj->desc;
285 size_t desc_size = obj->desc_size;
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100286
287 /* Validate the partition is a valid participant. */
288 for (unsigned int i = 0U; i < desc->emad_count; i++) {
289 size_t emad_size;
290 struct ffa_emad_v1_0 *emad;
291
292 emad = spmc_shmem_obj_get_emad(desc, i,
293 MAKE_FFA_VERSION(1, 1),
294 &emad_size);
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100295 /*
296 * Validate the calculated emad address resides within the
297 * descriptor.
298 */
299 if ((emad == NULL) || (uintptr_t) emad >=
300 (uintptr_t)((uint8_t *) desc + desc_size)) {
301 VERBOSE("Invalid emad.\n");
302 break;
303 }
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100304 if (sp_id == emad->mapd.endpoint_id) {
305 found = true;
306 break;
307 }
308 }
309 return found;
310}
311
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000312/*
313 * Compare two memory regions to determine if any range overlaps with another
314 * ongoing memory transaction.
315 */
316static bool
317overlapping_memory_regions(struct ffa_comp_mrd *region1,
318 struct ffa_comp_mrd *region2)
319{
320 uint64_t region1_start;
321 uint64_t region1_size;
322 uint64_t region1_end;
323 uint64_t region2_start;
324 uint64_t region2_size;
325 uint64_t region2_end;
326
327 assert(region1 != NULL);
328 assert(region2 != NULL);
329
330 if (region1 == region2) {
331 return true;
332 }
333
334 /*
335 * Check each memory region in the request against existing
336 * transactions.
337 */
338 for (size_t i = 0; i < region1->address_range_count; i++) {
339
340 region1_start = region1->address_range_array[i].address;
341 region1_size =
342 region1->address_range_array[i].page_count *
343 PAGE_SIZE_4KB;
344 region1_end = region1_start + region1_size;
345
346 for (size_t j = 0; j < region2->address_range_count; j++) {
347
348 region2_start = region2->address_range_array[j].address;
349 region2_size =
350 region2->address_range_array[j].page_count *
351 PAGE_SIZE_4KB;
352 region2_end = region2_start + region2_size;
353
Marc Bonnici79669bb2022-10-18 13:50:04 +0100354 /* Check if regions are not overlapping. */
355 if (!((region2_end <= region1_start) ||
356 (region1_end <= region2_start))) {
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000357 WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
358 region1_start, region1_end,
359 region2_start, region2_end);
360 return true;
361 }
362 }
363 }
364 return false;
365}
366
Marc Bonnicid1907f02022-04-19 17:42:53 +0100367/*******************************************************************************
368 * FF-A v1.0 Memory Descriptor Conversion Helpers.
369 ******************************************************************************/
370/**
371 * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
372 * converted descriptor.
373 * @orig: The original v1.0 memory transaction descriptor.
374 * @desc_size: The size of the original v1.0 memory transaction descriptor.
375 *
376 * Return: the size required to store the descriptor store in the v1.1 format.
377 */
378static size_t
379spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
380{
381 size_t size = 0;
382 struct ffa_comp_mrd *mrd;
383 struct ffa_emad_v1_0 *emad_array = orig->emad;
384
385 /* Get the size of the v1.1 descriptor. */
386 size += sizeof(struct ffa_mtd);
387
388 /* Add the size of the emad descriptors. */
389 size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
390
391 /* Add the size of the composite mrds. */
392 size += sizeof(struct ffa_comp_mrd);
393
394 /* Add the size of the constituent mrds. */
395 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
396 emad_array[0].comp_mrd_offset);
397
398 /* Check the calculated address is within the memory descriptor. */
Marc Bonnicif744c992022-10-18 18:01:44 +0100399 if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
400 (uintptr_t)((uint8_t *) orig + desc_size)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100401 return 0;
402 }
403 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
404
405 return size;
406}
407
408/**
409 * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
410 * converted descriptor.
411 * @orig: The original v1.1 memory transaction descriptor.
412 * @desc_size: The size of the original v1.1 memory transaction descriptor.
413 *
414 * Return: the size required to store the descriptor store in the v1.0 format.
415 */
416static size_t
417spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
418{
419 size_t size = 0;
420 struct ffa_comp_mrd *mrd;
421 struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
422 ((uint8_t *) orig +
423 orig->emad_offset);
424
425 /* Get the size of the v1.0 descriptor. */
426 size += sizeof(struct ffa_mtd_v1_0);
427
428 /* Add the size of the v1.0 emad descriptors. */
429 size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
430
431 /* Add the size of the composite mrds. */
432 size += sizeof(struct ffa_comp_mrd);
433
434 /* Add the size of the constituent mrds. */
435 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
436 emad_array[0].comp_mrd_offset);
437
438 /* Check the calculated address is within the memory descriptor. */
Marc Bonnicif744c992022-10-18 18:01:44 +0100439 if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
440 (uintptr_t)((uint8_t *) orig + desc_size)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100441 return 0;
442 }
443 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
444
445 return size;
446}
447
448/**
449 * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
450 * @out_obj: The shared memory object to populate the converted descriptor.
451 * @orig: The shared memory object containing the v1.0 descriptor.
452 *
453 * Return: true if the conversion is successful else false.
454 */
455static bool
456spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
457 struct spmc_shmem_obj *orig)
458{
459 struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
460 struct ffa_mtd *out = &out_obj->desc;
461 struct ffa_emad_v1_0 *emad_array_in;
462 struct ffa_emad_v1_0 *emad_array_out;
463 struct ffa_comp_mrd *mrd_in;
464 struct ffa_comp_mrd *mrd_out;
465
466 size_t mrd_in_offset;
467 size_t mrd_out_offset;
468 size_t mrd_size = 0;
469
470 /* Populate the new descriptor format from the v1.0 struct. */
471 out->sender_id = mtd_orig->sender_id;
472 out->memory_region_attributes = mtd_orig->memory_region_attributes;
473 out->flags = mtd_orig->flags;
474 out->handle = mtd_orig->handle;
475 out->tag = mtd_orig->tag;
476 out->emad_count = mtd_orig->emad_count;
477 out->emad_size = sizeof(struct ffa_emad_v1_0);
478
479 /*
480 * We will locate the emad descriptors directly after the ffa_mtd
481 * struct. This will be 8-byte aligned.
482 */
483 out->emad_offset = sizeof(struct ffa_mtd);
484
485 emad_array_in = mtd_orig->emad;
486 emad_array_out = (struct ffa_emad_v1_0 *)
487 ((uint8_t *) out + out->emad_offset);
488
489 /* Copy across the emad structs. */
490 for (unsigned int i = 0U; i < out->emad_count; i++) {
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100491 /* Bound check for emad array. */
492 if (((uint8_t *)emad_array_in + sizeof(struct ffa_emad_v1_0)) >
493 ((uint8_t *) mtd_orig + orig->desc_size)) {
494 VERBOSE("%s: Invalid mtd structure.\n", __func__);
495 return false;
496 }
Marc Bonnicid1907f02022-04-19 17:42:53 +0100497 memcpy(&emad_array_out[i], &emad_array_in[i],
498 sizeof(struct ffa_emad_v1_0));
499 }
500
501 /* Place the mrd descriptors after the end of the emad descriptors.*/
502 mrd_in_offset = emad_array_in->comp_mrd_offset;
503 mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
504 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
505
506 /* Add the size of the composite memory region descriptor. */
507 mrd_size += sizeof(struct ffa_comp_mrd);
508
509 /* Find the mrd descriptor. */
510 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
511
512 /* Add the size of the constituent memory region descriptors. */
513 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
514
515 /*
516 * Update the offset in the emads by the delta between the input and
517 * output addresses.
518 */
519 for (unsigned int i = 0U; i < out->emad_count; i++) {
520 emad_array_out[i].comp_mrd_offset =
521 emad_array_in[i].comp_mrd_offset +
522 (mrd_out_offset - mrd_in_offset);
523 }
524
525 /* Verify that we stay within bound of the memory descriptors. */
526 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
527 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
528 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
529 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
530 ERROR("%s: Invalid mrd structure.\n", __func__);
531 return false;
532 }
533
534 /* Copy the mrd descriptors directly. */
535 memcpy(mrd_out, mrd_in, mrd_size);
536
537 return true;
538}
539
540/**
541 * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
542 * v1.0 memory object.
543 * @out_obj: The shared memory object to populate the v1.0 descriptor.
544 * @orig: The shared memory object containing the v1.1 descriptor.
545 *
546 * Return: true if the conversion is successful else false.
547 */
548static bool
549spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
550 struct spmc_shmem_obj *orig)
551{
552 struct ffa_mtd *mtd_orig = &orig->desc;
553 struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
554 struct ffa_emad_v1_0 *emad_in;
555 struct ffa_emad_v1_0 *emad_array_in;
556 struct ffa_emad_v1_0 *emad_array_out;
557 struct ffa_comp_mrd *mrd_in;
558 struct ffa_comp_mrd *mrd_out;
559
560 size_t mrd_in_offset;
561 size_t mrd_out_offset;
562 size_t emad_out_array_size;
563 size_t mrd_size = 0;
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100564 size_t orig_desc_size = orig->desc_size;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100565
566 /* Populate the v1.0 descriptor format from the v1.1 struct. */
567 out->sender_id = mtd_orig->sender_id;
568 out->memory_region_attributes = mtd_orig->memory_region_attributes;
569 out->flags = mtd_orig->flags;
570 out->handle = mtd_orig->handle;
571 out->tag = mtd_orig->tag;
572 out->emad_count = mtd_orig->emad_count;
573
574 /* Determine the location of the emad array in both descriptors. */
575 emad_array_in = (struct ffa_emad_v1_0 *)
576 ((uint8_t *) mtd_orig + mtd_orig->emad_offset);
577 emad_array_out = out->emad;
578
579 /* Copy across the emad structs. */
580 emad_in = emad_array_in;
581 for (unsigned int i = 0U; i < out->emad_count; i++) {
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100582 /* Bound check for emad array. */
583 if (((uint8_t *)emad_in + sizeof(struct ffa_emad_v1_0)) >
584 ((uint8_t *) mtd_orig + orig_desc_size)) {
585 VERBOSE("%s: Invalid mtd structure.\n", __func__);
586 return false;
587 }
Marc Bonnicid1907f02022-04-19 17:42:53 +0100588 memcpy(&emad_array_out[i], emad_in,
589 sizeof(struct ffa_emad_v1_0));
590
591 emad_in += mtd_orig->emad_size;
592 }
593
594 /* Place the mrd descriptors after the end of the emad descriptors. */
595 emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
596
597 mrd_out_offset = (uint8_t *) out->emad - (uint8_t *) out +
598 emad_out_array_size;
599
600 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
601
602 mrd_in_offset = mtd_orig->emad_offset +
603 (mtd_orig->emad_size * mtd_orig->emad_count);
604
605 /* Add the size of the composite memory region descriptor. */
606 mrd_size += sizeof(struct ffa_comp_mrd);
607
608 /* Find the mrd descriptor. */
609 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
610
611 /* Add the size of the constituent memory region descriptors. */
612 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
613
614 /*
615 * Update the offset in the emads by the delta between the input and
616 * output addresses.
617 */
618 emad_in = emad_array_in;
619
620 for (unsigned int i = 0U; i < out->emad_count; i++) {
621 emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
622 (mrd_out_offset -
623 mrd_in_offset);
624 emad_in += mtd_orig->emad_size;
625 }
626
627 /* Verify that we stay within bound of the memory descriptors. */
628 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
629 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
630 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
631 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
632 ERROR("%s: Invalid mrd structure.\n", __func__);
633 return false;
634 }
635
636 /* Copy the mrd descriptors directly. */
637 memcpy(mrd_out, mrd_in, mrd_size);
638
639 return true;
640}
641
642/**
643 * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
644 * the v1.0 format and populates the
645 * provided buffer.
646 * @dst: Buffer to populate v1.0 ffa_memory_region_descriptor.
647 * @orig_obj: Object containing v1.1 ffa_memory_region_descriptor.
648 * @buf_size: Size of the buffer to populate.
649 * @offset: The offset of the converted descriptor to copy.
650 * @copy_size: Will be populated with the number of bytes copied.
651 * @out_desc_size: Will be populated with the total size of the v1.0
652 * descriptor.
653 *
654 * Return: 0 if conversion and population succeeded.
655 * Note: This function invalidates the reference to @orig therefore
656 * `spmc_shmem_obj_lookup` must be called if further usage is required.
657 */
658static uint32_t
659spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
660 size_t buf_size, size_t offset,
661 size_t *copy_size, size_t *v1_0_desc_size)
662{
663 struct spmc_shmem_obj *v1_0_obj;
664
665 /* Calculate the size that the v1.0 descriptor will require. */
666 *v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
667 &orig_obj->desc, orig_obj->desc_size);
668
669 if (*v1_0_desc_size == 0) {
670 ERROR("%s: cannot determine size of descriptor.\n",
671 __func__);
672 return FFA_ERROR_INVALID_PARAMETER;
673 }
674
675 /* Get a new obj to store the v1.0 descriptor. */
676 v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
677 *v1_0_desc_size);
678
679 if (!v1_0_obj) {
680 return FFA_ERROR_NO_MEMORY;
681 }
682
683 /* Perform the conversion from v1.1 to v1.0. */
684 if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
685 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
686 return FFA_ERROR_INVALID_PARAMETER;
687 }
688
689 *copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
690 memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
691
692 /*
693 * We're finished with the v1.0 descriptor for now so free it.
694 * Note that this will invalidate any references to the v1.1
695 * descriptor.
696 */
697 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
698
699 return 0;
700}
701
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100702/**
703 * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
Marc Bonnicid1907f02022-04-19 17:42:53 +0100704 * @obj: Object containing ffa_memory_region_descriptor.
705 * @ffa_version: FF-A version of the provided descriptor.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100706 *
Marc Bonnici336630f2022-01-13 11:39:10 +0000707 * Return: 0 if object is valid, -EINVAL if constituent_memory_region_descriptor
708 * offset or count is invalid.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100709 */
Marc Bonnicid1907f02022-04-19 17:42:53 +0100710static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
711 uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100712{
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000713 uint32_t comp_mrd_offset = 0;
714
Marc Bonnici336630f2022-01-13 11:39:10 +0000715 if (obj->desc.emad_count == 0U) {
716 WARN("%s: unsupported attribute desc count %u.\n",
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100717 __func__, obj->desc.emad_count);
718 return -EINVAL;
719 }
720
721 for (size_t emad_num = 0; emad_num < obj->desc.emad_count; emad_num++) {
722 size_t size;
723 size_t count;
724 size_t expected_size;
725 size_t total_page_count;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100726 size_t emad_size;
727 size_t desc_size;
728 size_t header_emad_size;
729 uint32_t offset;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100730 struct ffa_comp_mrd *comp;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100731 struct ffa_emad_v1_0 *emad;
732
733 emad = spmc_shmem_obj_get_emad(&obj->desc, emad_num,
734 ffa_version, &emad_size);
735 if (emad == NULL) {
736 WARN("%s: invalid emad structure.\n", __func__);
737 return -EINVAL;
738 }
739
740 /*
741 * Validate the calculated emad address resides within the
742 * descriptor.
743 */
744 if ((uintptr_t) emad >=
745 (uintptr_t)((uint8_t *) &obj->desc + obj->desc_size)) {
746 WARN("Invalid emad access.\n");
747 return -EINVAL;
748 }
749
750 offset = emad->comp_mrd_offset;
751
752 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
753 desc_size = sizeof(struct ffa_mtd_v1_0);
754 } else {
755 desc_size = sizeof(struct ffa_mtd);
756 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100757
Marc Bonnicid1907f02022-04-19 17:42:53 +0100758 header_emad_size = desc_size +
759 (obj->desc.emad_count * emad_size);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100760
761 if (offset < header_emad_size) {
762 WARN("%s: invalid object, offset %u < header + emad %zu\n",
763 __func__, offset, header_emad_size);
764 return -EINVAL;
765 }
766
767 size = obj->desc_size;
768
769 if (offset > size) {
770 WARN("%s: invalid object, offset %u > total size %zu\n",
771 __func__, offset, obj->desc_size);
772 return -EINVAL;
773 }
774 size -= offset;
775
776 if (size < sizeof(struct ffa_comp_mrd)) {
777 WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
778 __func__, offset, obj->desc_size);
779 return -EINVAL;
780 }
781 size -= sizeof(struct ffa_comp_mrd);
782
783 count = size / sizeof(struct ffa_cons_mrd);
784
Marc Bonnicid1907f02022-04-19 17:42:53 +0100785 comp = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
786
787 if (comp == NULL) {
788 WARN("%s: invalid comp_mrd offset\n", __func__);
789 return -EINVAL;
790 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100791
792 if (comp->address_range_count != count) {
793 WARN("%s: invalid object, desc count %u != %zu\n",
794 __func__, comp->address_range_count, count);
795 return -EINVAL;
796 }
797
798 expected_size = offset + sizeof(*comp) +
Marc Bonnicid1907f02022-04-19 17:42:53 +0100799 spmc_shmem_obj_ffa_constituent_size(obj,
800 ffa_version);
801
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100802 if (expected_size != obj->desc_size) {
803 WARN("%s: invalid object, computed size %zu != size %zu\n",
804 __func__, expected_size, obj->desc_size);
805 return -EINVAL;
806 }
807
808 if (obj->desc_filled < obj->desc_size) {
809 /*
810 * The whole descriptor has not yet been received.
811 * Skip final checks.
812 */
813 return 0;
814 }
815
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000816 /*
817 * The offset provided to the composite memory region descriptor
818 * should be consistent across endpoint descriptors. Store the
819 * first entry and compare against subsequent entries.
820 */
821 if (comp_mrd_offset == 0) {
822 comp_mrd_offset = offset;
823 } else {
824 if (comp_mrd_offset != offset) {
825 ERROR("%s: mismatching offsets provided, %u != %u\n",
826 __func__, offset, comp_mrd_offset);
827 return -EINVAL;
828 }
829 }
830
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100831 total_page_count = 0;
832
833 for (size_t i = 0; i < count; i++) {
834 total_page_count +=
835 comp->address_range_array[i].page_count;
836 }
837 if (comp->total_page_count != total_page_count) {
838 WARN("%s: invalid object, desc total_page_count %u != %zu\n",
839 __func__, comp->total_page_count,
840 total_page_count);
841 return -EINVAL;
842 }
843 }
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000844 return 0;
845}
846
847/**
848 * spmc_shmem_check_state_obj - Check if the descriptor describes memory
849 * regions that are currently involved with an
850 * existing memory transactions. This implies that
851 * the memory is not in a valid state for lending.
852 * @obj: Object containing ffa_memory_region_descriptor.
853 *
854 * Return: 0 if object is valid, -EINVAL if invalid memory state.
855 */
Marc Bonnicid1907f02022-04-19 17:42:53 +0100856static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
857 uint32_t ffa_version)
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000858{
859 size_t obj_offset = 0;
860 struct spmc_shmem_obj *inflight_obj;
861
862 struct ffa_comp_mrd *other_mrd;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100863 struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
864 ffa_version);
865
866 if (requested_mrd == NULL) {
867 return -EINVAL;
868 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100869
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000870 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
871 &obj_offset);
872
873 while (inflight_obj != NULL) {
874 /*
875 * Don't compare the transaction to itself or to partially
876 * transmitted descriptors.
877 */
878 if ((obj->desc.handle != inflight_obj->desc.handle) &&
879 (obj->desc_size == obj->desc_filled)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100880 other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
Marc Bonnici344ca9d2022-05-20 14:38:55 +0100881 FFA_VERSION_COMPILED);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100882 if (other_mrd == NULL) {
883 return -EINVAL;
884 }
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000885 if (overlapping_memory_regions(requested_mrd,
886 other_mrd)) {
887 return -EINVAL;
888 }
889 }
890
891 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
892 &obj_offset);
893 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100894 return 0;
895}
896
897static long spmc_ffa_fill_desc(struct mailbox *mbox,
898 struct spmc_shmem_obj *obj,
899 uint32_t fragment_length,
900 ffa_mtd_flag32_t mtd_flag,
Marc Bonnicid1907f02022-04-19 17:42:53 +0100901 uint32_t ffa_version,
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100902 void *smc_handle)
903{
904 int ret;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100905 size_t emad_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100906 uint32_t handle_low;
907 uint32_t handle_high;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100908 struct ffa_emad_v1_0 *emad;
909 struct ffa_emad_v1_0 *other_emad;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100910
911 if (mbox->rxtx_page_count == 0U) {
912 WARN("%s: buffer pair not registered.\n", __func__);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100913 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100914 goto err_arg;
915 }
916
917 if (fragment_length > mbox->rxtx_page_count * PAGE_SIZE_4KB) {
918 WARN("%s: bad fragment size %u > %u buffer size\n", __func__,
919 fragment_length, mbox->rxtx_page_count * PAGE_SIZE_4KB);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100920 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100921 goto err_arg;
922 }
923
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100924 if (fragment_length > obj->desc_size - obj->desc_filled) {
925 WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
926 fragment_length, obj->desc_size - obj->desc_filled);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100927 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100928 goto err_arg;
929 }
930
Marc Bonnicif0f45dc2022-10-18 13:57:16 +0100931 memcpy((uint8_t *)&obj->desc + obj->desc_filled,
932 (uint8_t *) mbox->tx_buffer, fragment_length);
933
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100934 /* Ensure that the sender ID resides in the normal world. */
935 if (ffa_is_secure_world_id(obj->desc.sender_id)) {
936 WARN("%s: Invalid sender ID 0x%x.\n",
937 __func__, obj->desc.sender_id);
938 ret = FFA_ERROR_DENIED;
939 goto err_arg;
940 }
941
Marc Bonnici08f28ef2022-04-19 16:52:59 +0100942 /* Ensure the NS bit is set to 0. */
943 if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
944 WARN("%s: NS mem attributes flags MBZ.\n", __func__);
945 ret = FFA_ERROR_INVALID_PARAMETER;
946 goto err_arg;
947 }
948
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100949 /*
950 * We don't currently support any optional flags so ensure none are
951 * requested.
952 */
953 if (obj->desc.flags != 0U && mtd_flag != 0U &&
954 (obj->desc.flags != mtd_flag)) {
955 WARN("%s: invalid memory transaction flags %u != %u\n",
956 __func__, obj->desc.flags, mtd_flag);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100957 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100958 goto err_arg;
959 }
960
961 if (obj->desc_filled == 0U) {
962 /* First fragment, descriptor header has been copied */
963 obj->desc.handle = spmc_shmem_obj_state.next_handle++;
964 obj->desc.flags |= mtd_flag;
965 }
966
967 obj->desc_filled += fragment_length;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100968 ret = spmc_shmem_check_obj(obj, ffa_version);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100969 if (ret != 0) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100970 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100971 goto err_bad_desc;
972 }
973
974 handle_low = (uint32_t)obj->desc.handle;
975 handle_high = obj->desc.handle >> 32;
976
977 if (obj->desc_filled != obj->desc_size) {
978 SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
979 handle_high, obj->desc_filled,
980 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
981 }
982
Marc Bonnici336630f2022-01-13 11:39:10 +0000983 /* The full descriptor has been received, perform any final checks. */
984
985 /*
986 * If a partition ID resides in the secure world validate that the
987 * partition ID is for a known partition. Ignore any partition ID
988 * belonging to the normal world as it is assumed the Hypervisor will
989 * have validated these.
990 */
991 for (size_t i = 0; i < obj->desc.emad_count; i++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100992 emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
993 &emad_size);
994 if (emad == NULL) {
995 ret = FFA_ERROR_INVALID_PARAMETER;
996 goto err_bad_desc;
997 }
998
999 ffa_endpoint_id16_t ep_id = emad->mapd.endpoint_id;
Marc Bonnici336630f2022-01-13 11:39:10 +00001000
1001 if (ffa_is_secure_world_id(ep_id)) {
1002 if (spmc_get_sp_ctx(ep_id) == NULL) {
1003 WARN("%s: Invalid receiver id 0x%x\n",
1004 __func__, ep_id);
1005 ret = FFA_ERROR_INVALID_PARAMETER;
1006 goto err_bad_desc;
1007 }
1008 }
1009 }
1010
1011 /* Ensure partition IDs are not duplicated. */
1012 for (size_t i = 0; i < obj->desc.emad_count; i++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001013 emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
1014 &emad_size);
1015 if (emad == NULL) {
1016 ret = FFA_ERROR_INVALID_PARAMETER;
1017 goto err_bad_desc;
1018 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001019 for (size_t j = i + 1; j < obj->desc.emad_count; j++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001020 other_emad = spmc_shmem_obj_get_emad(&obj->desc, j,
1021 ffa_version,
1022 &emad_size);
1023 if (other_emad == NULL) {
Marc Bonnici336630f2022-01-13 11:39:10 +00001024 ret = FFA_ERROR_INVALID_PARAMETER;
1025 goto err_bad_desc;
1026 }
Marc Bonnicid1907f02022-04-19 17:42:53 +01001027
1028 if (emad->mapd.endpoint_id ==
1029 other_emad->mapd.endpoint_id) {
1030 WARN("%s: Duplicated endpoint id 0x%x\n",
1031 __func__, emad->mapd.endpoint_id);
1032 ret = FFA_ERROR_INVALID_PARAMETER;
1033 goto err_bad_desc;
1034 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001035 }
1036 }
1037
Marc Bonnicid1907f02022-04-19 17:42:53 +01001038 ret = spmc_shmem_check_state_obj(obj, ffa_version);
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00001039 if (ret) {
1040 ERROR("%s: invalid memory region descriptor.\n", __func__);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001041 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00001042 goto err_bad_desc;
1043 }
1044
Marc Bonnicid1907f02022-04-19 17:42:53 +01001045 /*
1046 * Everything checks out, if the sender was using FF-A v1.0, convert
1047 * the descriptor format to use the v1.1 structures.
1048 */
1049 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1050 struct spmc_shmem_obj *v1_1_obj;
1051 uint64_t mem_handle;
1052
1053 /* Calculate the size that the v1.1 descriptor will required. */
1054 size_t v1_1_desc_size =
1055 spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
vallau0146dbac22022-08-08 14:10:14 +02001056 obj->desc_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001057
1058 if (v1_1_desc_size == 0U) {
1059 ERROR("%s: cannot determine size of descriptor.\n",
1060 __func__);
1061 goto err_arg;
1062 }
1063
1064 /* Get a new obj to store the v1.1 descriptor. */
1065 v1_1_obj =
1066 spmc_shmem_obj_alloc(&spmc_shmem_obj_state, v1_1_desc_size);
1067
vallau018f830992022-08-09 18:03:28 +02001068 if (!v1_1_obj) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001069 ret = FFA_ERROR_NO_MEMORY;
1070 goto err_arg;
1071 }
1072
1073 /* Perform the conversion from v1.0 to v1.1. */
1074 v1_1_obj->desc_size = v1_1_desc_size;
1075 v1_1_obj->desc_filled = v1_1_desc_size;
1076 if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
1077 ERROR("%s: Could not convert mtd!\n", __func__);
1078 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
1079 goto err_arg;
1080 }
1081
1082 /*
1083 * We're finished with the v1.0 descriptor so free it
1084 * and continue our checks with the new v1.1 descriptor.
1085 */
1086 mem_handle = obj->desc.handle;
1087 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1088 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1089 if (obj == NULL) {
1090 ERROR("%s: Failed to find converted descriptor.\n",
1091 __func__);
1092 ret = FFA_ERROR_INVALID_PARAMETER;
1093 return spmc_ffa_error_return(smc_handle, ret);
1094 }
1095 }
1096
Marc Bonnici503320e2022-02-21 15:02:36 +00001097 /* Allow for platform specific operations to be performed. */
1098 ret = plat_spmc_shmem_begin(&obj->desc);
1099 if (ret != 0) {
1100 goto err_arg;
1101 }
1102
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001103 SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
1104 0, 0, 0);
1105
1106err_bad_desc:
1107err_arg:
1108 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001109 return spmc_ffa_error_return(smc_handle, ret);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001110}
1111
1112/**
1113 * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
1114 * @client: Client state.
1115 * @total_length: Total length of shared memory descriptor.
1116 * @fragment_length: Length of fragment of shared memory descriptor passed in
1117 * this call.
1118 * @address: Not supported, must be 0.
1119 * @page_count: Not supported, must be 0.
1120 * @smc_handle: Handle passed to smc call. Used to return
1121 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1122 *
1123 * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
1124 * to share or lend memory from non-secure os to secure os (with no stream
1125 * endpoints).
1126 *
1127 * Return: 0 on success, error code on failure.
1128 */
1129long spmc_ffa_mem_send(uint32_t smc_fid,
1130 bool secure_origin,
1131 uint64_t total_length,
1132 uint32_t fragment_length,
1133 uint64_t address,
1134 uint32_t page_count,
1135 void *cookie,
1136 void *handle,
1137 uint64_t flags)
1138
1139{
1140 long ret;
1141 struct spmc_shmem_obj *obj;
1142 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1143 ffa_mtd_flag32_t mtd_flag;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001144 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Demi Marie Obenour1f9f8302022-12-30 19:14:18 -05001145 size_t min_desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001146
1147 if (address != 0U || page_count != 0U) {
1148 WARN("%s: custom memory region for message not supported.\n",
1149 __func__);
1150 return spmc_ffa_error_return(handle,
1151 FFA_ERROR_INVALID_PARAMETER);
1152 }
1153
1154 if (secure_origin) {
1155 WARN("%s: unsupported share direction.\n", __func__);
1156 return spmc_ffa_error_return(handle,
1157 FFA_ERROR_INVALID_PARAMETER);
1158 }
1159
Demi Marie Obenour1f9f8302022-12-30 19:14:18 -05001160 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1161 min_desc_size = sizeof(struct ffa_mtd_v1_0);
1162 } else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
1163 min_desc_size = sizeof(struct ffa_mtd);
1164 } else {
1165 WARN("%s: bad FF-A version.\n", __func__);
1166 return spmc_ffa_error_return(handle,
1167 FFA_ERROR_INVALID_PARAMETER);
1168 }
1169
1170 /* Check if the descriptor is too small for the FF-A version. */
1171 if (fragment_length < min_desc_size) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001172 WARN("%s: bad first fragment size %u < %zu\n",
Marc Bonnicid1907f02022-04-19 17:42:53 +01001173 __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001174 return spmc_ffa_error_return(handle,
1175 FFA_ERROR_INVALID_PARAMETER);
1176 }
1177
1178 if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
1179 mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
1180 } else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
1181 mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
1182 } else {
1183 WARN("%s: invalid memory management operation.\n", __func__);
1184 return spmc_ffa_error_return(handle,
1185 FFA_ERROR_INVALID_PARAMETER);
1186 }
1187
1188 spin_lock(&spmc_shmem_obj_state.lock);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001189 obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
1190 if (obj == NULL) {
1191 ret = FFA_ERROR_NO_MEMORY;
1192 goto err_unlock;
1193 }
1194
1195 spin_lock(&mbox->lock);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001196 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
1197 ffa_version, handle);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001198 spin_unlock(&mbox->lock);
1199
1200 spin_unlock(&spmc_shmem_obj_state.lock);
1201 return ret;
1202
1203err_unlock:
1204 spin_unlock(&spmc_shmem_obj_state.lock);
1205 return spmc_ffa_error_return(handle, ret);
1206}
1207
1208/**
1209 * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
1210 * @client: Client state.
1211 * @handle_low: Handle_low value returned from FFA_MEM_FRAG_RX.
1212 * @handle_high: Handle_high value returned from FFA_MEM_FRAG_RX.
1213 * @fragment_length: Length of fragments transmitted.
1214 * @sender_id: Vmid of sender in bits [31:16]
1215 * @smc_handle: Handle passed to smc call. Used to return
1216 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1217 *
1218 * Return: @smc_handle on success, error code on failure.
1219 */
1220long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
1221 bool secure_origin,
1222 uint64_t handle_low,
1223 uint64_t handle_high,
1224 uint32_t fragment_length,
1225 uint32_t sender_id,
1226 void *cookie,
1227 void *handle,
1228 uint64_t flags)
1229{
1230 long ret;
1231 uint32_t desc_sender_id;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001232 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001233 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1234
1235 struct spmc_shmem_obj *obj;
1236 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1237
1238 spin_lock(&spmc_shmem_obj_state.lock);
1239
1240 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1241 if (obj == NULL) {
1242 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1243 __func__, mem_handle);
1244 ret = FFA_ERROR_INVALID_PARAMETER;
1245 goto err_unlock;
1246 }
1247
1248 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1249 if (sender_id != desc_sender_id) {
1250 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1251 sender_id, desc_sender_id);
1252 ret = FFA_ERROR_INVALID_PARAMETER;
1253 goto err_unlock;
1254 }
1255
1256 if (obj->desc_filled == obj->desc_size) {
1257 WARN("%s: object desc already filled, %zu\n", __func__,
1258 obj->desc_filled);
1259 ret = FFA_ERROR_INVALID_PARAMETER;
1260 goto err_unlock;
1261 }
1262
1263 spin_lock(&mbox->lock);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001264 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
1265 handle);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001266 spin_unlock(&mbox->lock);
1267
1268 spin_unlock(&spmc_shmem_obj_state.lock);
1269 return ret;
1270
1271err_unlock:
1272 spin_unlock(&spmc_shmem_obj_state.lock);
1273 return spmc_ffa_error_return(handle, ret);
1274}
1275
1276/**
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001277 * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor
1278 * if the caller implements a version greater
1279 * than FF-A 1.0 or if they have requested
1280 * the functionality.
1281 * TODO: We are assuming that the caller is
1282 * an SP. To support retrieval from the
1283 * normal world this function will need to be
1284 * expanded accordingly.
1285 * @resp: Descriptor populated in callers RX buffer.
1286 * @sp_ctx: Context of the calling SP.
1287 */
1288void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp,
1289 struct secure_partition_desc *sp_ctx)
1290{
1291 if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) ||
1292 sp_ctx->ns_bit_requested) {
1293 /*
1294 * Currently memory senders must reside in the normal
1295 * world, and we do not have the functionlaity to change
1296 * the state of memory dynamically. Therefore we can always set
1297 * the NS bit to 1.
1298 */
1299 resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
1300 }
1301}
1302
1303/**
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001304 * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
1305 * @smc_fid: FID of SMC
1306 * @total_length: Total length of retrieve request descriptor if this is
1307 * the first call. Otherwise (unsupported) must be 0.
1308 * @fragment_length: Length of fragment of retrieve request descriptor passed
1309 * in this call. Only @fragment_length == @length is
1310 * supported by this implementation.
1311 * @address: Not supported, must be 0.
1312 * @page_count: Not supported, must be 0.
1313 * @smc_handle: Handle passed to smc call. Used to return
1314 * FFA_MEM_RETRIEVE_RESP.
1315 *
1316 * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
1317 * Used by secure os to retrieve memory already shared by non-secure os.
1318 * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
1319 * the client must call FFA_MEM_FRAG_RX until the full response has been
1320 * received.
1321 *
1322 * Return: @handle on success, error code on failure.
1323 */
1324long
1325spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
1326 bool secure_origin,
1327 uint32_t total_length,
1328 uint32_t fragment_length,
1329 uint64_t address,
1330 uint32_t page_count,
1331 void *cookie,
1332 void *handle,
1333 uint64_t flags)
1334{
1335 int ret;
1336 size_t buf_size;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001337 size_t copy_size = 0;
1338 size_t min_desc_size;
1339 size_t out_desc_size = 0;
1340
1341 /*
1342 * Currently we are only accessing fields that are the same in both the
1343 * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
1344 * here. We only need validate against the appropriate struct size.
1345 */
1346 struct ffa_mtd *resp;
1347 const struct ffa_mtd *req;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001348 struct spmc_shmem_obj *obj = NULL;
1349 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001350 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001351 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001352
1353 if (!secure_origin) {
1354 WARN("%s: unsupported retrieve req direction.\n", __func__);
1355 return spmc_ffa_error_return(handle,
1356 FFA_ERROR_INVALID_PARAMETER);
1357 }
1358
1359 if (address != 0U || page_count != 0U) {
1360 WARN("%s: custom memory region not supported.\n", __func__);
1361 return spmc_ffa_error_return(handle,
1362 FFA_ERROR_INVALID_PARAMETER);
1363 }
1364
1365 spin_lock(&mbox->lock);
1366
1367 req = mbox->tx_buffer;
1368 resp = mbox->rx_buffer;
1369 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1370
1371 if (mbox->rxtx_page_count == 0U) {
1372 WARN("%s: buffer pair not registered.\n", __func__);
1373 ret = FFA_ERROR_INVALID_PARAMETER;
1374 goto err_unlock_mailbox;
1375 }
1376
1377 if (mbox->state != MAILBOX_STATE_EMPTY) {
1378 WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
1379 ret = FFA_ERROR_DENIED;
1380 goto err_unlock_mailbox;
1381 }
1382
1383 if (fragment_length != total_length) {
1384 WARN("%s: fragmented retrieve request not supported.\n",
1385 __func__);
1386 ret = FFA_ERROR_INVALID_PARAMETER;
1387 goto err_unlock_mailbox;
1388 }
1389
Marc Bonnici336630f2022-01-13 11:39:10 +00001390 if (req->emad_count == 0U) {
1391 WARN("%s: unsupported attribute desc count %u.\n",
1392 __func__, obj->desc.emad_count);
vallau01460d3962022-08-09 17:06:53 +02001393 ret = FFA_ERROR_INVALID_PARAMETER;
1394 goto err_unlock_mailbox;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001395 }
1396
Marc Bonnicid1907f02022-04-19 17:42:53 +01001397 /* Determine the appropriate minimum descriptor size. */
1398 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1399 min_desc_size = sizeof(struct ffa_mtd_v1_0);
1400 } else {
1401 min_desc_size = sizeof(struct ffa_mtd);
1402 }
1403 if (total_length < min_desc_size) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001404 WARN("%s: invalid length %u < %zu\n", __func__, total_length,
Marc Bonnicid1907f02022-04-19 17:42:53 +01001405 min_desc_size);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001406 ret = FFA_ERROR_INVALID_PARAMETER;
1407 goto err_unlock_mailbox;
1408 }
1409
1410 spin_lock(&spmc_shmem_obj_state.lock);
1411
1412 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1413 if (obj == NULL) {
1414 ret = FFA_ERROR_INVALID_PARAMETER;
1415 goto err_unlock_all;
1416 }
1417
1418 if (obj->desc_filled != obj->desc_size) {
1419 WARN("%s: incomplete object desc filled %zu < size %zu\n",
1420 __func__, obj->desc_filled, obj->desc_size);
1421 ret = FFA_ERROR_INVALID_PARAMETER;
1422 goto err_unlock_all;
1423 }
1424
1425 if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
1426 WARN("%s: wrong sender id 0x%x != 0x%x\n",
1427 __func__, req->sender_id, obj->desc.sender_id);
1428 ret = FFA_ERROR_INVALID_PARAMETER;
1429 goto err_unlock_all;
1430 }
1431
1432 if (req->emad_count != 0U && req->tag != obj->desc.tag) {
1433 WARN("%s: wrong tag 0x%lx != 0x%lx\n",
1434 __func__, req->tag, obj->desc.tag);
1435 ret = FFA_ERROR_INVALID_PARAMETER;
1436 goto err_unlock_all;
1437 }
1438
Marc Bonnici336630f2022-01-13 11:39:10 +00001439 if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
1440 WARN("%s: mistmatch of endpoint counts %u != %u\n",
1441 __func__, req->emad_count, obj->desc.emad_count);
1442 ret = FFA_ERROR_INVALID_PARAMETER;
1443 goto err_unlock_all;
1444 }
1445
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001446 /* Ensure the NS bit is set to 0 in the request. */
1447 if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1448 WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1449 ret = FFA_ERROR_INVALID_PARAMETER;
1450 goto err_unlock_all;
1451 }
1452
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001453 if (req->flags != 0U) {
1454 if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
1455 (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
1456 /*
1457 * If the retrieve request specifies the memory
1458 * transaction ensure it matches what we expect.
1459 */
1460 WARN("%s: wrong mem transaction flags %x != %x\n",
1461 __func__, req->flags, obj->desc.flags);
1462 ret = FFA_ERROR_INVALID_PARAMETER;
1463 goto err_unlock_all;
1464 }
1465
1466 if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
1467 req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
1468 /*
1469 * Current implementation does not support donate and
1470 * it supports no other flags.
1471 */
1472 WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
1473 ret = FFA_ERROR_INVALID_PARAMETER;
1474 goto err_unlock_all;
1475 }
1476 }
1477
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001478 /* Validate the caller is a valid participant. */
Shruti Gupta20ce06c2022-08-25 14:22:53 +01001479 if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001480 WARN("%s: Invalid endpoint ID (0x%x).\n",
1481 __func__, sp_ctx->sp_id);
1482 ret = FFA_ERROR_INVALID_PARAMETER;
1483 goto err_unlock_all;
1484 }
1485
Marc Bonnicid1907f02022-04-19 17:42:53 +01001486 /* Validate that the provided emad offset and structure is valid.*/
1487 for (size_t i = 0; i < req->emad_count; i++) {
1488 size_t emad_size;
1489 struct ffa_emad_v1_0 *emad;
1490
1491 emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1492 &emad_size);
1493 if (emad == NULL) {
1494 WARN("%s: invalid emad structure.\n", __func__);
1495 ret = FFA_ERROR_INVALID_PARAMETER;
1496 goto err_unlock_all;
1497 }
1498
1499 if ((uintptr_t) emad >= (uintptr_t)
1500 ((uint8_t *) req + total_length)) {
1501 WARN("Invalid emad access.\n");
1502 ret = FFA_ERROR_INVALID_PARAMETER;
1503 goto err_unlock_all;
1504 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001505 }
1506
1507 /*
1508 * Validate all the endpoints match in the case of multiple
1509 * borrowers. We don't mandate that the order of the borrowers
1510 * must match in the descriptors therefore check to see if the
1511 * endpoints match in any order.
1512 */
1513 for (size_t i = 0; i < req->emad_count; i++) {
1514 bool found = false;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001515 size_t emad_size;
1516 struct ffa_emad_v1_0 *emad;
1517 struct ffa_emad_v1_0 *other_emad;
1518
1519 emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1520 &emad_size);
1521 if (emad == NULL) {
1522 ret = FFA_ERROR_INVALID_PARAMETER;
1523 goto err_unlock_all;
1524 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001525
1526 for (size_t j = 0; j < obj->desc.emad_count; j++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001527 other_emad = spmc_shmem_obj_get_emad(
1528 &obj->desc, j, MAKE_FFA_VERSION(1, 1),
1529 &emad_size);
1530
1531 if (other_emad == NULL) {
1532 ret = FFA_ERROR_INVALID_PARAMETER;
1533 goto err_unlock_all;
1534 }
1535
1536 if (req->emad_count &&
1537 emad->mapd.endpoint_id ==
1538 other_emad->mapd.endpoint_id) {
Marc Bonnici336630f2022-01-13 11:39:10 +00001539 found = true;
1540 break;
1541 }
1542 }
1543
1544 if (!found) {
1545 WARN("%s: invalid receiver id (0x%x).\n",
Marc Bonnicid1907f02022-04-19 17:42:53 +01001546 __func__, emad->mapd.endpoint_id);
Marc Bonnici336630f2022-01-13 11:39:10 +00001547 ret = FFA_ERROR_INVALID_PARAMETER;
1548 goto err_unlock_all;
1549 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001550 }
1551
1552 mbox->state = MAILBOX_STATE_FULL;
1553
1554 if (req->emad_count != 0U) {
1555 obj->in_use++;
1556 }
1557
Marc Bonnicid1907f02022-04-19 17:42:53 +01001558 /*
1559 * If the caller is v1.0 convert the descriptor, otherwise copy
1560 * directly.
1561 */
1562 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1563 ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
1564 &copy_size,
1565 &out_desc_size);
1566 if (ret != 0U) {
1567 ERROR("%s: Failed to process descriptor.\n", __func__);
1568 goto err_unlock_all;
1569 }
1570 } else {
1571 copy_size = MIN(obj->desc_size, buf_size);
1572 out_desc_size = obj->desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001573
Marc Bonnicid1907f02022-04-19 17:42:53 +01001574 memcpy(resp, &obj->desc, copy_size);
1575 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001576
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001577 /* Set the NS bit in the response if applicable. */
1578 spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx);
1579
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001580 spin_unlock(&spmc_shmem_obj_state.lock);
1581 spin_unlock(&mbox->lock);
1582
Marc Bonnicid1907f02022-04-19 17:42:53 +01001583 SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001584 copy_size, 0, 0, 0, 0, 0);
1585
1586err_unlock_all:
1587 spin_unlock(&spmc_shmem_obj_state.lock);
1588err_unlock_mailbox:
1589 spin_unlock(&mbox->lock);
1590 return spmc_ffa_error_return(handle, ret);
1591}
1592
1593/**
1594 * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
1595 * @client: Client state.
1596 * @handle_low: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
1597 * @handle_high: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
1598 * @fragment_offset: Byte offset in descriptor to resume at.
1599 * @sender_id: Bit[31:16]: Endpoint id of sender if client is a
1600 * hypervisor. 0 otherwise.
1601 * @smc_handle: Handle passed to smc call. Used to return
1602 * FFA_MEM_FRAG_TX.
1603 *
1604 * Return: @smc_handle on success, error code on failure.
1605 */
1606long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
1607 bool secure_origin,
1608 uint32_t handle_low,
1609 uint32_t handle_high,
1610 uint32_t fragment_offset,
1611 uint32_t sender_id,
1612 void *cookie,
1613 void *handle,
1614 uint64_t flags)
1615{
1616 int ret;
1617 void *src;
1618 size_t buf_size;
1619 size_t copy_size;
1620 size_t full_copy_size;
1621 uint32_t desc_sender_id;
1622 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1623 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1624 struct spmc_shmem_obj *obj;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001625 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001626
1627 if (!secure_origin) {
1628 WARN("%s: can only be called from swld.\n",
1629 __func__);
1630 return spmc_ffa_error_return(handle,
1631 FFA_ERROR_INVALID_PARAMETER);
1632 }
1633
1634 spin_lock(&spmc_shmem_obj_state.lock);
1635
1636 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1637 if (obj == NULL) {
1638 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1639 __func__, mem_handle);
1640 ret = FFA_ERROR_INVALID_PARAMETER;
1641 goto err_unlock_shmem;
1642 }
1643
1644 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1645 if (sender_id != 0U && sender_id != desc_sender_id) {
1646 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1647 sender_id, desc_sender_id);
1648 ret = FFA_ERROR_INVALID_PARAMETER;
1649 goto err_unlock_shmem;
1650 }
1651
1652 if (fragment_offset >= obj->desc_size) {
1653 WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
1654 __func__, fragment_offset, obj->desc_size);
1655 ret = FFA_ERROR_INVALID_PARAMETER;
1656 goto err_unlock_shmem;
1657 }
1658
1659 spin_lock(&mbox->lock);
1660
1661 if (mbox->rxtx_page_count == 0U) {
1662 WARN("%s: buffer pair not registered.\n", __func__);
1663 ret = FFA_ERROR_INVALID_PARAMETER;
1664 goto err_unlock_all;
1665 }
1666
1667 if (mbox->state != MAILBOX_STATE_EMPTY) {
1668 WARN("%s: RX Buffer is full!\n", __func__);
1669 ret = FFA_ERROR_DENIED;
1670 goto err_unlock_all;
1671 }
1672
1673 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1674
1675 mbox->state = MAILBOX_STATE_FULL;
1676
Marc Bonnicid1907f02022-04-19 17:42:53 +01001677 /*
1678 * If the caller is v1.0 convert the descriptor, otherwise copy
1679 * directly.
1680 */
1681 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1682 size_t out_desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001683
Marc Bonnicid1907f02022-04-19 17:42:53 +01001684 ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
1685 buf_size,
1686 fragment_offset,
1687 &copy_size,
1688 &out_desc_size);
1689 if (ret != 0U) {
1690 ERROR("%s: Failed to process descriptor.\n", __func__);
1691 goto err_unlock_all;
1692 }
1693 } else {
1694 full_copy_size = obj->desc_size - fragment_offset;
1695 copy_size = MIN(full_copy_size, buf_size);
1696
1697 src = &obj->desc;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001698
Marc Bonnicid1907f02022-04-19 17:42:53 +01001699 memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
1700 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001701
1702 spin_unlock(&mbox->lock);
1703 spin_unlock(&spmc_shmem_obj_state.lock);
1704
1705 SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
1706 copy_size, sender_id, 0, 0, 0);
1707
1708err_unlock_all:
1709 spin_unlock(&mbox->lock);
1710err_unlock_shmem:
1711 spin_unlock(&spmc_shmem_obj_state.lock);
1712 return spmc_ffa_error_return(handle, ret);
1713}
1714
1715/**
1716 * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
1717 * @client: Client state.
1718 *
1719 * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
1720 * Used by secure os release previously shared memory to non-secure os.
1721 *
1722 * The handle to release must be in the client's (secure os's) transmit buffer.
1723 *
1724 * Return: 0 on success, error code on failure.
1725 */
1726int spmc_ffa_mem_relinquish(uint32_t smc_fid,
1727 bool secure_origin,
1728 uint32_t handle_low,
1729 uint32_t handle_high,
1730 uint32_t fragment_offset,
1731 uint32_t sender_id,
1732 void *cookie,
1733 void *handle,
1734 uint64_t flags)
1735{
1736 int ret;
1737 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1738 struct spmc_shmem_obj *obj;
1739 const struct ffa_mem_relinquish_descriptor *req;
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001740 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001741
1742 if (!secure_origin) {
1743 WARN("%s: unsupported relinquish direction.\n", __func__);
1744 return spmc_ffa_error_return(handle,
1745 FFA_ERROR_INVALID_PARAMETER);
1746 }
1747
1748 spin_lock(&mbox->lock);
1749
1750 if (mbox->rxtx_page_count == 0U) {
1751 WARN("%s: buffer pair not registered.\n", __func__);
1752 ret = FFA_ERROR_INVALID_PARAMETER;
1753 goto err_unlock_mailbox;
1754 }
1755
1756 req = mbox->tx_buffer;
1757
1758 if (req->flags != 0U) {
1759 WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
1760 ret = FFA_ERROR_INVALID_PARAMETER;
1761 goto err_unlock_mailbox;
1762 }
1763
Marc Bonnici336630f2022-01-13 11:39:10 +00001764 if (req->endpoint_count == 0) {
1765 WARN("%s: endpoint count cannot be 0.\n", __func__);
1766 ret = FFA_ERROR_INVALID_PARAMETER;
1767 goto err_unlock_mailbox;
1768 }
1769
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001770 spin_lock(&spmc_shmem_obj_state.lock);
1771
1772 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1773 if (obj == NULL) {
1774 ret = FFA_ERROR_INVALID_PARAMETER;
1775 goto err_unlock_all;
1776 }
1777
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001778 /*
1779 * Validate the endpoint ID was populated correctly. We don't currently
1780 * support proxy endpoints so the endpoint count should always be 1.
1781 */
1782 if (req->endpoint_count != 1U) {
1783 WARN("%s: unsupported endpoint count %u != 1\n", __func__,
1784 req->endpoint_count);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001785 ret = FFA_ERROR_INVALID_PARAMETER;
1786 goto err_unlock_all;
1787 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001788
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001789 /* Validate provided endpoint ID matches the partition ID. */
1790 if (req->endpoint_array[0] != sp_ctx->sp_id) {
1791 WARN("%s: invalid endpoint ID %u != %u\n", __func__,
1792 req->endpoint_array[0], sp_ctx->sp_id);
1793 ret = FFA_ERROR_INVALID_PARAMETER;
1794 goto err_unlock_all;
1795 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001796
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001797 /* Validate the caller is a valid participant. */
Shruti Gupta20ce06c2022-08-25 14:22:53 +01001798 if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001799 WARN("%s: Invalid endpoint ID (0x%x).\n",
1800 __func__, req->endpoint_array[0]);
1801 ret = FFA_ERROR_INVALID_PARAMETER;
1802 goto err_unlock_all;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001803 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001804
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001805 if (obj->in_use == 0U) {
1806 ret = FFA_ERROR_INVALID_PARAMETER;
1807 goto err_unlock_all;
1808 }
1809 obj->in_use--;
1810
1811 spin_unlock(&spmc_shmem_obj_state.lock);
1812 spin_unlock(&mbox->lock);
1813
1814 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1815
1816err_unlock_all:
1817 spin_unlock(&spmc_shmem_obj_state.lock);
1818err_unlock_mailbox:
1819 spin_unlock(&mbox->lock);
1820 return spmc_ffa_error_return(handle, ret);
1821}
1822
1823/**
1824 * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
1825 * @client: Client state.
1826 * @handle_low: Unique handle of shared memory object to reclaim. Bit[31:0].
1827 * @handle_high: Unique handle of shared memory object to reclaim.
1828 * Bit[63:32].
1829 * @flags: Unsupported, ignored.
1830 *
1831 * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
1832 * Used by non-secure os reclaim memory previously shared with secure os.
1833 *
1834 * Return: 0 on success, error code on failure.
1835 */
1836int spmc_ffa_mem_reclaim(uint32_t smc_fid,
1837 bool secure_origin,
1838 uint32_t handle_low,
1839 uint32_t handle_high,
1840 uint32_t mem_flags,
1841 uint64_t x4,
1842 void *cookie,
1843 void *handle,
1844 uint64_t flags)
1845{
1846 int ret;
1847 struct spmc_shmem_obj *obj;
1848 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1849
1850 if (secure_origin) {
1851 WARN("%s: unsupported reclaim direction.\n", __func__);
1852 return spmc_ffa_error_return(handle,
1853 FFA_ERROR_INVALID_PARAMETER);
1854 }
1855
1856 if (mem_flags != 0U) {
1857 WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
1858 return spmc_ffa_error_return(handle,
1859 FFA_ERROR_INVALID_PARAMETER);
1860 }
1861
1862 spin_lock(&spmc_shmem_obj_state.lock);
1863
1864 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1865 if (obj == NULL) {
1866 ret = FFA_ERROR_INVALID_PARAMETER;
1867 goto err_unlock;
1868 }
1869 if (obj->in_use != 0U) {
1870 ret = FFA_ERROR_DENIED;
1871 goto err_unlock;
1872 }
Marc Bonnici503320e2022-02-21 15:02:36 +00001873
Marc Bonnici82e28f12022-10-18 13:39:48 +01001874 if (obj->desc_filled != obj->desc_size) {
1875 WARN("%s: incomplete object desc filled %zu < size %zu\n",
1876 __func__, obj->desc_filled, obj->desc_size);
1877 ret = FFA_ERROR_INVALID_PARAMETER;
1878 goto err_unlock;
1879 }
1880
Marc Bonnici503320e2022-02-21 15:02:36 +00001881 /* Allow for platform specific operations to be performed. */
1882 ret = plat_spmc_shmem_reclaim(&obj->desc);
1883 if (ret != 0) {
1884 goto err_unlock;
1885 }
1886
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001887 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1888 spin_unlock(&spmc_shmem_obj_state.lock);
1889
1890 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1891
1892err_unlock:
1893 spin_unlock(&spmc_shmem_obj_state.lock);
1894 return spmc_ffa_error_return(handle, ret);
1895}