blob: a38647c1d4c47fb84f32e42ff2f297294d98c6f3 [file] [log] [blame]
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001/*
Demi Marie Obenour1f9f8302022-12-30 19:14:18 -05002 * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00006#include <assert.h>
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01007#include <errno.h>
Demi Marie Obenour4ed9df42022-12-30 19:30:58 -05008#include <inttypes.h>
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01009
10#include <common/debug.h>
11#include <common/runtime_svc.h>
12#include <lib/object_pool.h>
13#include <lib/spinlock.h>
14#include <lib/xlat_tables/xlat_tables_v2.h>
15#include <services/ffa_svc.h>
16#include "spmc.h"
17#include "spmc_shared_mem.h"
18
19#include <platform_def.h>
20
21/**
22 * struct spmc_shmem_obj - Shared memory object.
23 * @desc_size: Size of @desc.
24 * @desc_filled: Size of @desc already received.
25 * @in_use: Number of clients that have called ffa_mem_retrieve_req
26 * without a matching ffa_mem_relinquish call.
27 * @desc: FF-A memory region descriptor passed in ffa_mem_share.
28 */
29struct spmc_shmem_obj {
30 size_t desc_size;
31 size_t desc_filled;
32 size_t in_use;
Marc Bonnicid1907f02022-04-19 17:42:53 +010033 struct ffa_mtd desc;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010034};
35
36/*
37 * Declare our data structure to store the metadata of memory share requests.
38 * The main datastore is allocated on a per platform basis to ensure enough
39 * storage can be made available.
40 * The address of the data store will be populated by the SPMC during its
41 * initialization.
42 */
43
44struct spmc_shmem_obj_state spmc_shmem_obj_state = {
45 /* Set start value for handle so top 32 bits are needed quickly. */
46 .next_handle = 0xffffffc0U,
47};
48
49/**
50 * spmc_shmem_obj_size - Convert from descriptor size to object size.
51 * @desc_size: Size of struct ffa_memory_region_descriptor object.
52 *
53 * Return: Size of struct spmc_shmem_obj object.
54 */
55static size_t spmc_shmem_obj_size(size_t desc_size)
56{
57 return desc_size + offsetof(struct spmc_shmem_obj, desc);
58}
59
60/**
61 * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
62 * @state: Global state.
63 * @desc_size: Size of struct ffa_memory_region_descriptor object that
64 * allocated object will hold.
65 *
66 * Return: Pointer to newly allocated object, or %NULL if there not enough space
67 * left. The returned pointer is only valid while @state is locked, to
68 * used it again after unlocking @state, spmc_shmem_obj_lookup must be
69 * called.
70 */
71static struct spmc_shmem_obj *
72spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
73{
74 struct spmc_shmem_obj *obj;
75 size_t free = state->data_size - state->allocated;
Marc Bonnicib774f562022-10-18 14:03:13 +010076 size_t obj_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010077
78 if (state->data == NULL) {
79 ERROR("Missing shmem datastore!\n");
80 return NULL;
81 }
82
Marc Bonnicib774f562022-10-18 14:03:13 +010083 obj_size = spmc_shmem_obj_size(desc_size);
84
85 /* Ensure the obj size has not overflowed. */
86 if (obj_size < desc_size) {
87 WARN("%s(0x%zx) desc_size overflow\n",
88 __func__, desc_size);
89 return NULL;
90 }
91
92 if (obj_size > free) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010093 WARN("%s(0x%zx) failed, free 0x%zx\n",
94 __func__, desc_size, free);
95 return NULL;
96 }
97 obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
Marc Bonnicid1907f02022-04-19 17:42:53 +010098 obj->desc = (struct ffa_mtd) {0};
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010099 obj->desc_size = desc_size;
100 obj->desc_filled = 0;
101 obj->in_use = 0;
Marc Bonnicib774f562022-10-18 14:03:13 +0100102 state->allocated += obj_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100103 return obj;
104}
105
106/**
107 * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
108 * @state: Global state.
109 * @obj: Object to free.
110 *
111 * Release memory used by @obj. Other objects may move, so on return all
112 * pointers to struct spmc_shmem_obj object should be considered invalid, not
113 * just @obj.
114 *
115 * The current implementation always compacts the remaining objects to simplify
116 * the allocator and to avoid fragmentation.
117 */
118
119static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
120 struct spmc_shmem_obj *obj)
121{
122 size_t free_size = spmc_shmem_obj_size(obj->desc_size);
123 uint8_t *shift_dest = (uint8_t *)obj;
124 uint8_t *shift_src = shift_dest + free_size;
125 size_t shift_size = state->allocated - (shift_src - state->data);
126
127 if (shift_size != 0U) {
128 memmove(shift_dest, shift_src, shift_size);
129 }
130 state->allocated -= free_size;
131}
132
133/**
134 * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
135 * @state: Global state.
136 * @handle: Unique handle of object to return.
137 *
138 * Return: struct spmc_shmem_obj_state object with handle matching @handle.
139 * %NULL, if not object in @state->data has a matching handle.
140 */
141static struct spmc_shmem_obj *
142spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
143{
144 uint8_t *curr = state->data;
145
146 while (curr - state->data < state->allocated) {
147 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
148
149 if (obj->desc.handle == handle) {
150 return obj;
151 }
152 curr += spmc_shmem_obj_size(obj->desc_size);
153 }
154 return NULL;
155}
156
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000157/**
158 * spmc_shmem_obj_get_next - Get the next memory object from an offset.
159 * @offset: Offset used to track which objects have previously been
160 * returned.
161 *
162 * Return: the next struct spmc_shmem_obj_state object from the provided
163 * offset.
164 * %NULL, if there are no more objects.
165 */
166static struct spmc_shmem_obj *
167spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
168{
169 uint8_t *curr = state->data + *offset;
170
171 if (curr - state->data < state->allocated) {
172 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
173
174 *offset += spmc_shmem_obj_size(obj->desc_size);
175
176 return obj;
177 }
178 return NULL;
179}
180
Marc Bonnicid1907f02022-04-19 17:42:53 +0100181/*******************************************************************************
182 * FF-A memory descriptor helper functions.
183 ******************************************************************************/
184/**
185 * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
186 * clients FF-A version.
187 * @desc: The memory transaction descriptor.
188 * @index: The index of the emad element to be accessed.
189 * @ffa_version: FF-A version of the provided structure.
190 * @emad_size: Will be populated with the size of the returned emad
191 * descriptor.
192 * Return: A pointer to the requested emad structure.
193 */
194static void *
195spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
196 uint32_t ffa_version, size_t *emad_size)
197{
198 uint8_t *emad;
Demi Marie Obenour32167a02023-01-11 10:51:01 -0500199
200 assert(index < desc->emad_count);
201
Marc Bonnicid1907f02022-04-19 17:42:53 +0100202 /*
203 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
204 * format, otherwise assume it is a v1.1 format.
205 */
206 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
Demi Marie Obenour57bf10c2022-12-31 11:11:18 -0500207 emad = (uint8_t *)desc + offsetof(struct ffa_mtd_v1_0, emad);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100208 *emad_size = sizeof(struct ffa_emad_v1_0);
209 } else {
Demi Marie Obenour57bf10c2022-12-31 11:11:18 -0500210 assert(is_aligned(desc->emad_offset, 16));
Marc Bonnicid1907f02022-04-19 17:42:53 +0100211 emad = ((uint8_t *) desc + desc->emad_offset);
212 *emad_size = desc->emad_size;
213 }
Demi Marie Obenour57bf10c2022-12-31 11:11:18 -0500214
215 assert(((uint64_t)index * (uint64_t)*emad_size) <= UINT32_MAX);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100216 return (emad + (*emad_size * index));
217}
218
219/**
220 * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
221 * FF-A version of the descriptor.
222 * @obj: Object containing ffa_memory_region_descriptor.
223 *
224 * Return: struct ffa_comp_mrd object corresponding to the composite memory
225 * region descriptor.
226 */
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100227static struct ffa_comp_mrd *
Marc Bonnicid1907f02022-04-19 17:42:53 +0100228spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100229{
Marc Bonnicid1907f02022-04-19 17:42:53 +0100230 size_t emad_size;
231 /*
232 * The comp_mrd_offset field of the emad descriptor remains consistent
233 * between FF-A versions therefore we can use the v1.0 descriptor here
234 * in all cases.
235 */
236 struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
237 ffa_version,
238 &emad_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100239
240 /* Ensure the composite descriptor offset is aligned. */
241 if (!is_aligned(emad->comp_mrd_offset, 8)) {
242 WARN("Unaligned composite memory region descriptor offset.\n");
243 return NULL;
244 }
245
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100246 return (struct ffa_comp_mrd *)
Marc Bonnicid1907f02022-04-19 17:42:53 +0100247 ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100248}
249
250/**
251 * spmc_shmem_obj_ffa_constituent_size - Calculate variable size part of obj.
252 * @obj: Object containing ffa_memory_region_descriptor.
253 *
254 * Return: Size of ffa_constituent_memory_region_descriptors in @obj.
255 */
256static size_t
Marc Bonnicid1907f02022-04-19 17:42:53 +0100257spmc_shmem_obj_ffa_constituent_size(struct spmc_shmem_obj *obj,
258 uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100259{
Marc Bonnicid1907f02022-04-19 17:42:53 +0100260 struct ffa_comp_mrd *comp_mrd;
261
262 comp_mrd = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
263 if (comp_mrd == NULL) {
264 return 0;
265 }
266 return comp_mrd->address_range_count * sizeof(struct ffa_cons_mrd);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100267}
268
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100269/**
270 * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
271 * a given memory transaction.
272 * @sp_id: Partition ID to validate.
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100273 * @obj: The shared memory object containing the descriptor
274 * of the memory transaction.
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100275 * Return: true if ID is valid, else false.
276 */
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100277bool spmc_shmem_obj_validate_id(struct spmc_shmem_obj *obj, uint16_t sp_id)
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100278{
279 bool found = false;
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100280 struct ffa_mtd *desc = &obj->desc;
281 size_t desc_size = obj->desc_size;
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100282
283 /* Validate the partition is a valid participant. */
284 for (unsigned int i = 0U; i < desc->emad_count; i++) {
285 size_t emad_size;
286 struct ffa_emad_v1_0 *emad;
287
288 emad = spmc_shmem_obj_get_emad(desc, i,
289 MAKE_FFA_VERSION(1, 1),
290 &emad_size);
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100291 /*
292 * Validate the calculated emad address resides within the
293 * descriptor.
294 */
295 if ((emad == NULL) || (uintptr_t) emad >=
296 (uintptr_t)((uint8_t *) desc + desc_size)) {
297 VERBOSE("Invalid emad.\n");
298 break;
299 }
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100300 if (sp_id == emad->mapd.endpoint_id) {
301 found = true;
302 break;
303 }
304 }
305 return found;
306}
307
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000308/*
309 * Compare two memory regions to determine if any range overlaps with another
310 * ongoing memory transaction.
311 */
312static bool
313overlapping_memory_regions(struct ffa_comp_mrd *region1,
314 struct ffa_comp_mrd *region2)
315{
316 uint64_t region1_start;
317 uint64_t region1_size;
318 uint64_t region1_end;
319 uint64_t region2_start;
320 uint64_t region2_size;
321 uint64_t region2_end;
322
323 assert(region1 != NULL);
324 assert(region2 != NULL);
325
326 if (region1 == region2) {
327 return true;
328 }
329
330 /*
331 * Check each memory region in the request against existing
332 * transactions.
333 */
334 for (size_t i = 0; i < region1->address_range_count; i++) {
335
336 region1_start = region1->address_range_array[i].address;
337 region1_size =
338 region1->address_range_array[i].page_count *
339 PAGE_SIZE_4KB;
340 region1_end = region1_start + region1_size;
341
342 for (size_t j = 0; j < region2->address_range_count; j++) {
343
344 region2_start = region2->address_range_array[j].address;
345 region2_size =
346 region2->address_range_array[j].page_count *
347 PAGE_SIZE_4KB;
348 region2_end = region2_start + region2_size;
349
Marc Bonnici79669bb2022-10-18 13:50:04 +0100350 /* Check if regions are not overlapping. */
351 if (!((region2_end <= region1_start) ||
352 (region1_end <= region2_start))) {
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000353 WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
354 region1_start, region1_end,
355 region2_start, region2_end);
356 return true;
357 }
358 }
359 }
360 return false;
361}
362
Marc Bonnicid1907f02022-04-19 17:42:53 +0100363/*******************************************************************************
364 * FF-A v1.0 Memory Descriptor Conversion Helpers.
365 ******************************************************************************/
366/**
367 * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
368 * converted descriptor.
369 * @orig: The original v1.0 memory transaction descriptor.
370 * @desc_size: The size of the original v1.0 memory transaction descriptor.
371 *
372 * Return: the size required to store the descriptor store in the v1.1 format.
373 */
374static size_t
375spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
376{
377 size_t size = 0;
378 struct ffa_comp_mrd *mrd;
379 struct ffa_emad_v1_0 *emad_array = orig->emad;
380
381 /* Get the size of the v1.1 descriptor. */
382 size += sizeof(struct ffa_mtd);
383
384 /* Add the size of the emad descriptors. */
385 size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
386
387 /* Add the size of the composite mrds. */
388 size += sizeof(struct ffa_comp_mrd);
389
390 /* Add the size of the constituent mrds. */
391 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
392 emad_array[0].comp_mrd_offset);
393
394 /* Check the calculated address is within the memory descriptor. */
Marc Bonnicif744c992022-10-18 18:01:44 +0100395 if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
396 (uintptr_t)((uint8_t *) orig + desc_size)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100397 return 0;
398 }
399 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
400
401 return size;
402}
403
404/**
405 * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
406 * converted descriptor.
407 * @orig: The original v1.1 memory transaction descriptor.
408 * @desc_size: The size of the original v1.1 memory transaction descriptor.
409 *
410 * Return: the size required to store the descriptor store in the v1.0 format.
411 */
412static size_t
413spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
414{
415 size_t size = 0;
416 struct ffa_comp_mrd *mrd;
417 struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
418 ((uint8_t *) orig +
419 orig->emad_offset);
420
421 /* Get the size of the v1.0 descriptor. */
422 size += sizeof(struct ffa_mtd_v1_0);
423
424 /* Add the size of the v1.0 emad descriptors. */
425 size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
426
427 /* Add the size of the composite mrds. */
428 size += sizeof(struct ffa_comp_mrd);
429
430 /* Add the size of the constituent mrds. */
431 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
432 emad_array[0].comp_mrd_offset);
433
434 /* Check the calculated address is within the memory descriptor. */
Marc Bonnicif744c992022-10-18 18:01:44 +0100435 if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
436 (uintptr_t)((uint8_t *) orig + desc_size)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100437 return 0;
438 }
439 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
440
441 return size;
442}
443
444/**
445 * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
446 * @out_obj: The shared memory object to populate the converted descriptor.
447 * @orig: The shared memory object containing the v1.0 descriptor.
448 *
449 * Return: true if the conversion is successful else false.
450 */
451static bool
452spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
453 struct spmc_shmem_obj *orig)
454{
455 struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
456 struct ffa_mtd *out = &out_obj->desc;
457 struct ffa_emad_v1_0 *emad_array_in;
458 struct ffa_emad_v1_0 *emad_array_out;
459 struct ffa_comp_mrd *mrd_in;
460 struct ffa_comp_mrd *mrd_out;
461
462 size_t mrd_in_offset;
463 size_t mrd_out_offset;
464 size_t mrd_size = 0;
465
466 /* Populate the new descriptor format from the v1.0 struct. */
467 out->sender_id = mtd_orig->sender_id;
468 out->memory_region_attributes = mtd_orig->memory_region_attributes;
469 out->flags = mtd_orig->flags;
470 out->handle = mtd_orig->handle;
471 out->tag = mtd_orig->tag;
472 out->emad_count = mtd_orig->emad_count;
473 out->emad_size = sizeof(struct ffa_emad_v1_0);
474
475 /*
476 * We will locate the emad descriptors directly after the ffa_mtd
477 * struct. This will be 8-byte aligned.
478 */
479 out->emad_offset = sizeof(struct ffa_mtd);
480
481 emad_array_in = mtd_orig->emad;
482 emad_array_out = (struct ffa_emad_v1_0 *)
483 ((uint8_t *) out + out->emad_offset);
484
485 /* Copy across the emad structs. */
486 for (unsigned int i = 0U; i < out->emad_count; i++) {
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100487 /* Bound check for emad array. */
488 if (((uint8_t *)emad_array_in + sizeof(struct ffa_emad_v1_0)) >
489 ((uint8_t *) mtd_orig + orig->desc_size)) {
490 VERBOSE("%s: Invalid mtd structure.\n", __func__);
491 return false;
492 }
Marc Bonnicid1907f02022-04-19 17:42:53 +0100493 memcpy(&emad_array_out[i], &emad_array_in[i],
494 sizeof(struct ffa_emad_v1_0));
495 }
496
497 /* Place the mrd descriptors after the end of the emad descriptors.*/
498 mrd_in_offset = emad_array_in->comp_mrd_offset;
499 mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
500 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
501
502 /* Add the size of the composite memory region descriptor. */
503 mrd_size += sizeof(struct ffa_comp_mrd);
504
505 /* Find the mrd descriptor. */
506 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
507
508 /* Add the size of the constituent memory region descriptors. */
509 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
510
511 /*
512 * Update the offset in the emads by the delta between the input and
513 * output addresses.
514 */
515 for (unsigned int i = 0U; i < out->emad_count; i++) {
516 emad_array_out[i].comp_mrd_offset =
517 emad_array_in[i].comp_mrd_offset +
518 (mrd_out_offset - mrd_in_offset);
519 }
520
521 /* Verify that we stay within bound of the memory descriptors. */
522 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
523 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
524 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
525 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
526 ERROR("%s: Invalid mrd structure.\n", __func__);
527 return false;
528 }
529
530 /* Copy the mrd descriptors directly. */
531 memcpy(mrd_out, mrd_in, mrd_size);
532
533 return true;
534}
535
536/**
537 * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
538 * v1.0 memory object.
539 * @out_obj: The shared memory object to populate the v1.0 descriptor.
540 * @orig: The shared memory object containing the v1.1 descriptor.
541 *
542 * Return: true if the conversion is successful else false.
543 */
544static bool
545spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
546 struct spmc_shmem_obj *orig)
547{
548 struct ffa_mtd *mtd_orig = &orig->desc;
549 struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
550 struct ffa_emad_v1_0 *emad_in;
551 struct ffa_emad_v1_0 *emad_array_in;
552 struct ffa_emad_v1_0 *emad_array_out;
553 struct ffa_comp_mrd *mrd_in;
554 struct ffa_comp_mrd *mrd_out;
555
556 size_t mrd_in_offset;
557 size_t mrd_out_offset;
558 size_t emad_out_array_size;
559 size_t mrd_size = 0;
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100560 size_t orig_desc_size = orig->desc_size;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100561
562 /* Populate the v1.0 descriptor format from the v1.1 struct. */
563 out->sender_id = mtd_orig->sender_id;
564 out->memory_region_attributes = mtd_orig->memory_region_attributes;
565 out->flags = mtd_orig->flags;
566 out->handle = mtd_orig->handle;
567 out->tag = mtd_orig->tag;
568 out->emad_count = mtd_orig->emad_count;
569
570 /* Determine the location of the emad array in both descriptors. */
571 emad_array_in = (struct ffa_emad_v1_0 *)
572 ((uint8_t *) mtd_orig + mtd_orig->emad_offset);
573 emad_array_out = out->emad;
574
575 /* Copy across the emad structs. */
576 emad_in = emad_array_in;
577 for (unsigned int i = 0U; i < out->emad_count; i++) {
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100578 /* Bound check for emad array. */
579 if (((uint8_t *)emad_in + sizeof(struct ffa_emad_v1_0)) >
580 ((uint8_t *) mtd_orig + orig_desc_size)) {
581 VERBOSE("%s: Invalid mtd structure.\n", __func__);
582 return false;
583 }
Marc Bonnicid1907f02022-04-19 17:42:53 +0100584 memcpy(&emad_array_out[i], emad_in,
585 sizeof(struct ffa_emad_v1_0));
586
587 emad_in += mtd_orig->emad_size;
588 }
589
590 /* Place the mrd descriptors after the end of the emad descriptors. */
591 emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
592
593 mrd_out_offset = (uint8_t *) out->emad - (uint8_t *) out +
594 emad_out_array_size;
595
596 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
597
598 mrd_in_offset = mtd_orig->emad_offset +
599 (mtd_orig->emad_size * mtd_orig->emad_count);
600
601 /* Add the size of the composite memory region descriptor. */
602 mrd_size += sizeof(struct ffa_comp_mrd);
603
604 /* Find the mrd descriptor. */
605 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
606
607 /* Add the size of the constituent memory region descriptors. */
608 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
609
610 /*
611 * Update the offset in the emads by the delta between the input and
612 * output addresses.
613 */
614 emad_in = emad_array_in;
615
616 for (unsigned int i = 0U; i < out->emad_count; i++) {
617 emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
618 (mrd_out_offset -
619 mrd_in_offset);
620 emad_in += mtd_orig->emad_size;
621 }
622
623 /* Verify that we stay within bound of the memory descriptors. */
624 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
625 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
626 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
627 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
628 ERROR("%s: Invalid mrd structure.\n", __func__);
629 return false;
630 }
631
632 /* Copy the mrd descriptors directly. */
633 memcpy(mrd_out, mrd_in, mrd_size);
634
635 return true;
636}
637
638/**
639 * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
640 * the v1.0 format and populates the
641 * provided buffer.
642 * @dst: Buffer to populate v1.0 ffa_memory_region_descriptor.
643 * @orig_obj: Object containing v1.1 ffa_memory_region_descriptor.
644 * @buf_size: Size of the buffer to populate.
645 * @offset: The offset of the converted descriptor to copy.
646 * @copy_size: Will be populated with the number of bytes copied.
647 * @out_desc_size: Will be populated with the total size of the v1.0
648 * descriptor.
649 *
650 * Return: 0 if conversion and population succeeded.
651 * Note: This function invalidates the reference to @orig therefore
652 * `spmc_shmem_obj_lookup` must be called if further usage is required.
653 */
654static uint32_t
655spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
656 size_t buf_size, size_t offset,
657 size_t *copy_size, size_t *v1_0_desc_size)
658{
659 struct spmc_shmem_obj *v1_0_obj;
660
661 /* Calculate the size that the v1.0 descriptor will require. */
662 *v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
663 &orig_obj->desc, orig_obj->desc_size);
664
665 if (*v1_0_desc_size == 0) {
666 ERROR("%s: cannot determine size of descriptor.\n",
667 __func__);
668 return FFA_ERROR_INVALID_PARAMETER;
669 }
670
671 /* Get a new obj to store the v1.0 descriptor. */
672 v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
673 *v1_0_desc_size);
674
675 if (!v1_0_obj) {
676 return FFA_ERROR_NO_MEMORY;
677 }
678
679 /* Perform the conversion from v1.1 to v1.0. */
680 if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
681 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
682 return FFA_ERROR_INVALID_PARAMETER;
683 }
684
685 *copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
686 memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
687
688 /*
689 * We're finished with the v1.0 descriptor for now so free it.
690 * Note that this will invalidate any references to the v1.1
691 * descriptor.
692 */
693 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
694
695 return 0;
696}
697
Demi Marie Obenour4ed9df42022-12-30 19:30:58 -0500698static int
699spmc_validate_mtd_start(struct ffa_mtd *desc, uint32_t ffa_version,
700 size_t fragment_length, size_t total_length)
701{
702 unsigned long long emad_end;
703 unsigned long long emad_size;
704 unsigned long long emad_offset;
705 unsigned int min_desc_size;
706
707 /* Determine the appropriate minimum descriptor size. */
708 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
709 min_desc_size = sizeof(struct ffa_mtd_v1_0);
710 } else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
711 min_desc_size = sizeof(struct ffa_mtd);
712 } else {
713 return FFA_ERROR_INVALID_PARAMETER;
714 }
715 if (fragment_length < min_desc_size) {
716 WARN("%s: invalid length %zu < %u\n", __func__, fragment_length,
717 min_desc_size);
718 return FFA_ERROR_INVALID_PARAMETER;
719 }
720
721 if (desc->emad_count == 0U) {
722 WARN("%s: unsupported attribute desc count %u.\n",
723 __func__, desc->emad_count);
724 return FFA_ERROR_INVALID_PARAMETER;
725 }
726
727 /*
728 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
729 * format, otherwise assume it is a v1.1 format.
730 */
731 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
732 emad_offset = emad_size = sizeof(struct ffa_emad_v1_0);
733 } else {
734 if (!is_aligned(desc->emad_offset, 16)) {
735 WARN("%s: Emad offset %" PRIx32 " is not 16-byte aligned.\n",
736 __func__, desc->emad_offset);
737 return FFA_ERROR_INVALID_PARAMETER;
738 }
739 if (desc->emad_offset < sizeof(struct ffa_mtd)) {
740 WARN("%s: Emad offset too small: 0x%" PRIx32 " < 0x%zx.\n",
741 __func__, desc->emad_offset,
742 sizeof(struct ffa_mtd));
743 return FFA_ERROR_INVALID_PARAMETER;
744 }
745 emad_offset = desc->emad_offset;
746 if (desc->emad_size < sizeof(struct ffa_emad_v1_0)) {
747 WARN("%s: Bad emad size (%" PRIu32 " < %zu).\n", __func__,
748 desc->emad_size, sizeof(struct ffa_emad_v1_0));
749 return FFA_ERROR_INVALID_PARAMETER;
750 }
751 if (!is_aligned(desc->emad_size, 16)) {
752 WARN("%s: Emad size 0x%" PRIx32 " is not 16-byte aligned.\n",
753 __func__, desc->emad_size);
754 return FFA_ERROR_INVALID_PARAMETER;
755 }
756 emad_size = desc->emad_size;
757 }
758
759 /*
760 * Overflow is impossible: the arithmetic happens in at least 64-bit
761 * precision, but all of the operands are bounded by UINT32_MAX, and
762 * ((2^32 - 1)^2 + (2^32 - 1) + (2^32 - 1)) = ((2^32 - 1) * (2^32 + 1))
763 * = (2^64 - 1).
764 */
765 CASSERT(sizeof(desc->emad_count == 4), assert_emad_count_max_too_large);
766 emad_end = (desc->emad_count * (unsigned long long)emad_size) +
767 (unsigned long long)sizeof(struct ffa_comp_mrd) +
768 (unsigned long long)emad_offset;
769
770 if (emad_end > total_length) {
771 WARN("%s: Composite memory region extends beyond descriptor: 0x%llx > 0x%zx\n",
772 __func__, emad_end, total_length);
773 return FFA_ERROR_INVALID_PARAMETER;
774 }
775
776 return 0;
777}
778
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100779/**
780 * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
Marc Bonnicid1907f02022-04-19 17:42:53 +0100781 * @obj: Object containing ffa_memory_region_descriptor.
782 * @ffa_version: FF-A version of the provided descriptor.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100783 *
Marc Bonnici336630f2022-01-13 11:39:10 +0000784 * Return: 0 if object is valid, -EINVAL if constituent_memory_region_descriptor
785 * offset or count is invalid.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100786 */
Marc Bonnicid1907f02022-04-19 17:42:53 +0100787static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
788 uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100789{
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500790 const struct ffa_emad_v1_0 *emad;
791 size_t emad_size;
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000792 uint32_t comp_mrd_offset = 0;
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500793
Demi Marie Obenourf00e4d72023-01-12 13:25:23 -0500794 if (obj->desc_filled != obj->desc_size) {
795 ERROR("BUG: %s called on incomplete object (%zu != %zu)\n",
796 __func__, obj->desc_filled, obj->desc_size);
797 panic();
798 }
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000799
Demi Marie Obenourf00e4d72023-01-12 13:25:23 -0500800 if (spmc_validate_mtd_start(&obj->desc, ffa_version,
801 obj->desc_filled, obj->desc_size)) {
802 ERROR("BUG: %s called on object with corrupt memory region descriptor\n",
803 __func__);
804 panic();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100805 }
806
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500807 emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
808 ffa_version, &emad_size);
809
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100810 for (size_t emad_num = 0; emad_num < obj->desc.emad_count; emad_num++) {
811 size_t size;
812 size_t count;
813 size_t expected_size;
Demi Marie Obenour00d36b22023-01-12 13:24:50 -0500814 uint64_t total_page_count;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100815 size_t header_emad_size;
816 uint32_t offset;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100817 struct ffa_comp_mrd *comp;
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500818 ffa_endpoint_id16_t ep_id;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100819
820 /*
821 * Validate the calculated emad address resides within the
822 * descriptor.
823 */
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500824 if ((uintptr_t) emad >
825 ((uintptr_t) &obj->desc + obj->desc_size - emad_size)) {
826 ERROR("BUG: Invalid emad access not detected earlier.\n");
827 panic();
Marc Bonnicid1907f02022-04-19 17:42:53 +0100828 }
829
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500830 emad = (const struct ffa_emad_v1_0 *)((const uint8_t *)emad + emad_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100831 offset = emad->comp_mrd_offset;
832
Demi Marie Obenour8711be32023-01-11 14:20:07 -0500833 /*
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500834 * If a partition ID resides in the secure world validate that
835 * the partition ID is for a known partition. Ignore any
836 * partition ID belonging to the normal world as it is assumed
837 * the Hypervisor will have validated these.
838 */
839 ep_id = emad->mapd.endpoint_id;
840 if (ffa_is_secure_world_id(ep_id)) {
841 if (spmc_get_sp_ctx(ep_id) == NULL) {
842 WARN("%s: Invalid receiver id 0x%x\n",
843 __func__, ep_id);
844 return -EINVAL;
845 }
846 }
847
848 /*
Demi Marie Obenour8711be32023-01-11 14:20:07 -0500849 * The offset provided to the composite memory region descriptor
850 * should be consistent across endpoint descriptors. Store the
851 * first entry and compare against subsequent entries.
852 */
853 if (comp_mrd_offset == 0) {
854 comp_mrd_offset = offset;
855 } else {
856 if (comp_mrd_offset != offset) {
857 ERROR("%s: mismatching offsets provided, %u != %u\n",
858 __func__, offset, comp_mrd_offset);
859 return -EINVAL;
860 }
861 continue; /* Remainder only executed on first iteration. */
862 }
863
Demi Marie Obenour2bb87352023-01-11 14:25:24 -0500864 header_emad_size = (size_t)((uint8_t *)emad - (uint8_t *)&obj->desc) +
Marc Bonnicid1907f02022-04-19 17:42:53 +0100865 (obj->desc.emad_count * emad_size);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100866
867 if (offset < header_emad_size) {
868 WARN("%s: invalid object, offset %u < header + emad %zu\n",
869 __func__, offset, header_emad_size);
870 return -EINVAL;
871 }
872
873 size = obj->desc_size;
874
875 if (offset > size) {
876 WARN("%s: invalid object, offset %u > total size %zu\n",
877 __func__, offset, obj->desc_size);
878 return -EINVAL;
879 }
880 size -= offset;
881
882 if (size < sizeof(struct ffa_comp_mrd)) {
883 WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
884 __func__, offset, obj->desc_size);
885 return -EINVAL;
886 }
887 size -= sizeof(struct ffa_comp_mrd);
888
889 count = size / sizeof(struct ffa_cons_mrd);
890
Marc Bonnicid1907f02022-04-19 17:42:53 +0100891 comp = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
892
893 if (comp == NULL) {
894 WARN("%s: invalid comp_mrd offset\n", __func__);
895 return -EINVAL;
896 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100897
898 if (comp->address_range_count != count) {
899 WARN("%s: invalid object, desc count %u != %zu\n",
900 __func__, comp->address_range_count, count);
901 return -EINVAL;
902 }
903
904 expected_size = offset + sizeof(*comp) +
Marc Bonnicid1907f02022-04-19 17:42:53 +0100905 spmc_shmem_obj_ffa_constituent_size(obj,
906 ffa_version);
907
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100908 if (expected_size != obj->desc_size) {
909 WARN("%s: invalid object, computed size %zu != size %zu\n",
910 __func__, expected_size, obj->desc_size);
911 return -EINVAL;
912 }
913
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100914 total_page_count = 0;
915
916 for (size_t i = 0; i < count; i++) {
917 total_page_count +=
918 comp->address_range_array[i].page_count;
919 }
920 if (comp->total_page_count != total_page_count) {
Demi Marie Obenour00d36b22023-01-12 13:24:50 -0500921 WARN("%s: invalid object, desc total_page_count %u != %" PRIu64 "\n",
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100922 __func__, comp->total_page_count,
923 total_page_count);
924 return -EINVAL;
925 }
926 }
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000927 return 0;
928}
929
930/**
931 * spmc_shmem_check_state_obj - Check if the descriptor describes memory
932 * regions that are currently involved with an
933 * existing memory transactions. This implies that
934 * the memory is not in a valid state for lending.
935 * @obj: Object containing ffa_memory_region_descriptor.
936 *
937 * Return: 0 if object is valid, -EINVAL if invalid memory state.
938 */
Marc Bonnicid1907f02022-04-19 17:42:53 +0100939static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
940 uint32_t ffa_version)
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000941{
942 size_t obj_offset = 0;
943 struct spmc_shmem_obj *inflight_obj;
944
945 struct ffa_comp_mrd *other_mrd;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100946 struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
947 ffa_version);
948
949 if (requested_mrd == NULL) {
950 return -EINVAL;
951 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100952
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000953 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
954 &obj_offset);
955
956 while (inflight_obj != NULL) {
957 /*
958 * Don't compare the transaction to itself or to partially
959 * transmitted descriptors.
960 */
961 if ((obj->desc.handle != inflight_obj->desc.handle) &&
962 (obj->desc_size == obj->desc_filled)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100963 other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
Marc Bonnici344ca9d2022-05-20 14:38:55 +0100964 FFA_VERSION_COMPILED);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100965 if (other_mrd == NULL) {
966 return -EINVAL;
967 }
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000968 if (overlapping_memory_regions(requested_mrd,
969 other_mrd)) {
970 return -EINVAL;
971 }
972 }
973
974 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
975 &obj_offset);
976 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100977 return 0;
978}
979
980static long spmc_ffa_fill_desc(struct mailbox *mbox,
981 struct spmc_shmem_obj *obj,
982 uint32_t fragment_length,
983 ffa_mtd_flag32_t mtd_flag,
Marc Bonnicid1907f02022-04-19 17:42:53 +0100984 uint32_t ffa_version,
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100985 void *smc_handle)
986{
987 int ret;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100988 size_t emad_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100989 uint32_t handle_low;
990 uint32_t handle_high;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100991 struct ffa_emad_v1_0 *emad;
992 struct ffa_emad_v1_0 *other_emad;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100993
994 if (mbox->rxtx_page_count == 0U) {
995 WARN("%s: buffer pair not registered.\n", __func__);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100996 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100997 goto err_arg;
998 }
999
1000 if (fragment_length > mbox->rxtx_page_count * PAGE_SIZE_4KB) {
1001 WARN("%s: bad fragment size %u > %u buffer size\n", __func__,
1002 fragment_length, mbox->rxtx_page_count * PAGE_SIZE_4KB);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001003 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001004 goto err_arg;
1005 }
1006
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001007 if (fragment_length > obj->desc_size - obj->desc_filled) {
1008 WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
1009 fragment_length, obj->desc_size - obj->desc_filled);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001010 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001011 goto err_arg;
1012 }
1013
Marc Bonnicif0f45dc2022-10-18 13:57:16 +01001014 memcpy((uint8_t *)&obj->desc + obj->desc_filled,
1015 (uint8_t *) mbox->tx_buffer, fragment_length);
1016
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001017 /* Ensure that the sender ID resides in the normal world. */
1018 if (ffa_is_secure_world_id(obj->desc.sender_id)) {
1019 WARN("%s: Invalid sender ID 0x%x.\n",
1020 __func__, obj->desc.sender_id);
1021 ret = FFA_ERROR_DENIED;
1022 goto err_arg;
1023 }
1024
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001025 /* Ensure the NS bit is set to 0. */
1026 if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1027 WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1028 ret = FFA_ERROR_INVALID_PARAMETER;
1029 goto err_arg;
1030 }
1031
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001032 /*
1033 * We don't currently support any optional flags so ensure none are
1034 * requested.
1035 */
1036 if (obj->desc.flags != 0U && mtd_flag != 0U &&
1037 (obj->desc.flags != mtd_flag)) {
1038 WARN("%s: invalid memory transaction flags %u != %u\n",
1039 __func__, obj->desc.flags, mtd_flag);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001040 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001041 goto err_arg;
1042 }
1043
1044 if (obj->desc_filled == 0U) {
1045 /* First fragment, descriptor header has been copied */
Demi Marie Obenour4ed9df42022-12-30 19:30:58 -05001046 ret = spmc_validate_mtd_start(&obj->desc, ffa_version,
1047 fragment_length, obj->desc_size);
1048 if (ret != 0) {
1049 goto err_bad_desc;
1050 }
1051
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001052 obj->desc.handle = spmc_shmem_obj_state.next_handle++;
1053 obj->desc.flags |= mtd_flag;
1054 }
1055
1056 obj->desc_filled += fragment_length;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001057
1058 handle_low = (uint32_t)obj->desc.handle;
1059 handle_high = obj->desc.handle >> 32;
1060
1061 if (obj->desc_filled != obj->desc_size) {
1062 SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
1063 handle_high, obj->desc_filled,
1064 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
1065 }
1066
Marc Bonnici336630f2022-01-13 11:39:10 +00001067 /* The full descriptor has been received, perform any final checks. */
1068
Demi Marie Obenourcdd3e722023-01-11 14:16:37 -05001069 ret = spmc_shmem_check_obj(obj, ffa_version);
1070 if (ret != 0) {
1071 ret = FFA_ERROR_INVALID_PARAMETER;
1072 goto err_bad_desc;
1073 }
1074
Marc Bonnici336630f2022-01-13 11:39:10 +00001075 /* Ensure partition IDs are not duplicated. */
1076 for (size_t i = 0; i < obj->desc.emad_count; i++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001077 emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
1078 &emad_size);
Demi Marie Obenour57bf10c2022-12-31 11:11:18 -05001079
Marc Bonnici336630f2022-01-13 11:39:10 +00001080 for (size_t j = i + 1; j < obj->desc.emad_count; j++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001081 other_emad = spmc_shmem_obj_get_emad(&obj->desc, j,
1082 ffa_version,
1083 &emad_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001084
1085 if (emad->mapd.endpoint_id ==
1086 other_emad->mapd.endpoint_id) {
1087 WARN("%s: Duplicated endpoint id 0x%x\n",
1088 __func__, emad->mapd.endpoint_id);
1089 ret = FFA_ERROR_INVALID_PARAMETER;
1090 goto err_bad_desc;
1091 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001092 }
1093 }
1094
Marc Bonnicid1907f02022-04-19 17:42:53 +01001095 ret = spmc_shmem_check_state_obj(obj, ffa_version);
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00001096 if (ret) {
1097 ERROR("%s: invalid memory region descriptor.\n", __func__);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001098 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00001099 goto err_bad_desc;
1100 }
1101
Marc Bonnicid1907f02022-04-19 17:42:53 +01001102 /*
1103 * Everything checks out, if the sender was using FF-A v1.0, convert
1104 * the descriptor format to use the v1.1 structures.
1105 */
1106 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1107 struct spmc_shmem_obj *v1_1_obj;
1108 uint64_t mem_handle;
1109
1110 /* Calculate the size that the v1.1 descriptor will required. */
1111 size_t v1_1_desc_size =
1112 spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
vallau0146dbac22022-08-08 14:10:14 +02001113 obj->desc_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001114
1115 if (v1_1_desc_size == 0U) {
1116 ERROR("%s: cannot determine size of descriptor.\n",
1117 __func__);
1118 goto err_arg;
1119 }
1120
1121 /* Get a new obj to store the v1.1 descriptor. */
1122 v1_1_obj =
1123 spmc_shmem_obj_alloc(&spmc_shmem_obj_state, v1_1_desc_size);
1124
vallau018f830992022-08-09 18:03:28 +02001125 if (!v1_1_obj) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001126 ret = FFA_ERROR_NO_MEMORY;
1127 goto err_arg;
1128 }
1129
1130 /* Perform the conversion from v1.0 to v1.1. */
1131 v1_1_obj->desc_size = v1_1_desc_size;
1132 v1_1_obj->desc_filled = v1_1_desc_size;
1133 if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
1134 ERROR("%s: Could not convert mtd!\n", __func__);
1135 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
1136 goto err_arg;
1137 }
1138
1139 /*
1140 * We're finished with the v1.0 descriptor so free it
1141 * and continue our checks with the new v1.1 descriptor.
1142 */
1143 mem_handle = obj->desc.handle;
1144 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1145 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1146 if (obj == NULL) {
1147 ERROR("%s: Failed to find converted descriptor.\n",
1148 __func__);
1149 ret = FFA_ERROR_INVALID_PARAMETER;
1150 return spmc_ffa_error_return(smc_handle, ret);
1151 }
1152 }
1153
Marc Bonnici503320e2022-02-21 15:02:36 +00001154 /* Allow for platform specific operations to be performed. */
1155 ret = plat_spmc_shmem_begin(&obj->desc);
1156 if (ret != 0) {
1157 goto err_arg;
1158 }
1159
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001160 SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
1161 0, 0, 0);
1162
1163err_bad_desc:
1164err_arg:
1165 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001166 return spmc_ffa_error_return(smc_handle, ret);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001167}
1168
1169/**
1170 * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
1171 * @client: Client state.
1172 * @total_length: Total length of shared memory descriptor.
1173 * @fragment_length: Length of fragment of shared memory descriptor passed in
1174 * this call.
1175 * @address: Not supported, must be 0.
1176 * @page_count: Not supported, must be 0.
1177 * @smc_handle: Handle passed to smc call. Used to return
1178 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1179 *
1180 * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
1181 * to share or lend memory from non-secure os to secure os (with no stream
1182 * endpoints).
1183 *
1184 * Return: 0 on success, error code on failure.
1185 */
1186long spmc_ffa_mem_send(uint32_t smc_fid,
1187 bool secure_origin,
1188 uint64_t total_length,
1189 uint32_t fragment_length,
1190 uint64_t address,
1191 uint32_t page_count,
1192 void *cookie,
1193 void *handle,
1194 uint64_t flags)
1195
1196{
1197 long ret;
1198 struct spmc_shmem_obj *obj;
1199 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1200 ffa_mtd_flag32_t mtd_flag;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001201 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Demi Marie Obenour1f9f8302022-12-30 19:14:18 -05001202 size_t min_desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001203
1204 if (address != 0U || page_count != 0U) {
1205 WARN("%s: custom memory region for message not supported.\n",
1206 __func__);
1207 return spmc_ffa_error_return(handle,
1208 FFA_ERROR_INVALID_PARAMETER);
1209 }
1210
1211 if (secure_origin) {
1212 WARN("%s: unsupported share direction.\n", __func__);
1213 return spmc_ffa_error_return(handle,
1214 FFA_ERROR_INVALID_PARAMETER);
1215 }
1216
Demi Marie Obenour1f9f8302022-12-30 19:14:18 -05001217 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1218 min_desc_size = sizeof(struct ffa_mtd_v1_0);
1219 } else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
1220 min_desc_size = sizeof(struct ffa_mtd);
1221 } else {
1222 WARN("%s: bad FF-A version.\n", __func__);
1223 return spmc_ffa_error_return(handle,
1224 FFA_ERROR_INVALID_PARAMETER);
1225 }
1226
1227 /* Check if the descriptor is too small for the FF-A version. */
1228 if (fragment_length < min_desc_size) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001229 WARN("%s: bad first fragment size %u < %zu\n",
Marc Bonnicid1907f02022-04-19 17:42:53 +01001230 __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001231 return spmc_ffa_error_return(handle,
1232 FFA_ERROR_INVALID_PARAMETER);
1233 }
1234
1235 if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
1236 mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
1237 } else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
1238 mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
1239 } else {
1240 WARN("%s: invalid memory management operation.\n", __func__);
1241 return spmc_ffa_error_return(handle,
1242 FFA_ERROR_INVALID_PARAMETER);
1243 }
1244
1245 spin_lock(&spmc_shmem_obj_state.lock);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001246 obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
1247 if (obj == NULL) {
1248 ret = FFA_ERROR_NO_MEMORY;
1249 goto err_unlock;
1250 }
1251
1252 spin_lock(&mbox->lock);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001253 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
1254 ffa_version, handle);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001255 spin_unlock(&mbox->lock);
1256
1257 spin_unlock(&spmc_shmem_obj_state.lock);
1258 return ret;
1259
1260err_unlock:
1261 spin_unlock(&spmc_shmem_obj_state.lock);
1262 return spmc_ffa_error_return(handle, ret);
1263}
1264
1265/**
1266 * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
1267 * @client: Client state.
1268 * @handle_low: Handle_low value returned from FFA_MEM_FRAG_RX.
1269 * @handle_high: Handle_high value returned from FFA_MEM_FRAG_RX.
1270 * @fragment_length: Length of fragments transmitted.
1271 * @sender_id: Vmid of sender in bits [31:16]
1272 * @smc_handle: Handle passed to smc call. Used to return
1273 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1274 *
1275 * Return: @smc_handle on success, error code on failure.
1276 */
1277long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
1278 bool secure_origin,
1279 uint64_t handle_low,
1280 uint64_t handle_high,
1281 uint32_t fragment_length,
1282 uint32_t sender_id,
1283 void *cookie,
1284 void *handle,
1285 uint64_t flags)
1286{
1287 long ret;
1288 uint32_t desc_sender_id;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001289 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001290 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1291
1292 struct spmc_shmem_obj *obj;
1293 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1294
1295 spin_lock(&spmc_shmem_obj_state.lock);
1296
1297 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1298 if (obj == NULL) {
1299 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1300 __func__, mem_handle);
1301 ret = FFA_ERROR_INVALID_PARAMETER;
1302 goto err_unlock;
1303 }
1304
1305 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1306 if (sender_id != desc_sender_id) {
1307 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1308 sender_id, desc_sender_id);
1309 ret = FFA_ERROR_INVALID_PARAMETER;
1310 goto err_unlock;
1311 }
1312
1313 if (obj->desc_filled == obj->desc_size) {
1314 WARN("%s: object desc already filled, %zu\n", __func__,
1315 obj->desc_filled);
1316 ret = FFA_ERROR_INVALID_PARAMETER;
1317 goto err_unlock;
1318 }
1319
1320 spin_lock(&mbox->lock);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001321 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
1322 handle);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001323 spin_unlock(&mbox->lock);
1324
1325 spin_unlock(&spmc_shmem_obj_state.lock);
1326 return ret;
1327
1328err_unlock:
1329 spin_unlock(&spmc_shmem_obj_state.lock);
1330 return spmc_ffa_error_return(handle, ret);
1331}
1332
1333/**
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001334 * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor
1335 * if the caller implements a version greater
1336 * than FF-A 1.0 or if they have requested
1337 * the functionality.
1338 * TODO: We are assuming that the caller is
1339 * an SP. To support retrieval from the
1340 * normal world this function will need to be
1341 * expanded accordingly.
1342 * @resp: Descriptor populated in callers RX buffer.
1343 * @sp_ctx: Context of the calling SP.
1344 */
1345void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp,
1346 struct secure_partition_desc *sp_ctx)
1347{
1348 if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) ||
1349 sp_ctx->ns_bit_requested) {
1350 /*
1351 * Currently memory senders must reside in the normal
1352 * world, and we do not have the functionlaity to change
1353 * the state of memory dynamically. Therefore we can always set
1354 * the NS bit to 1.
1355 */
1356 resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
1357 }
1358}
1359
1360/**
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001361 * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
1362 * @smc_fid: FID of SMC
1363 * @total_length: Total length of retrieve request descriptor if this is
1364 * the first call. Otherwise (unsupported) must be 0.
1365 * @fragment_length: Length of fragment of retrieve request descriptor passed
1366 * in this call. Only @fragment_length == @length is
1367 * supported by this implementation.
1368 * @address: Not supported, must be 0.
1369 * @page_count: Not supported, must be 0.
1370 * @smc_handle: Handle passed to smc call. Used to return
1371 * FFA_MEM_RETRIEVE_RESP.
1372 *
1373 * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
1374 * Used by secure os to retrieve memory already shared by non-secure os.
1375 * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
1376 * the client must call FFA_MEM_FRAG_RX until the full response has been
1377 * received.
1378 *
1379 * Return: @handle on success, error code on failure.
1380 */
1381long
1382spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
1383 bool secure_origin,
1384 uint32_t total_length,
1385 uint32_t fragment_length,
1386 uint64_t address,
1387 uint32_t page_count,
1388 void *cookie,
1389 void *handle,
1390 uint64_t flags)
1391{
1392 int ret;
1393 size_t buf_size;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001394 size_t copy_size = 0;
1395 size_t min_desc_size;
1396 size_t out_desc_size = 0;
1397
1398 /*
1399 * Currently we are only accessing fields that are the same in both the
1400 * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
1401 * here. We only need validate against the appropriate struct size.
1402 */
1403 struct ffa_mtd *resp;
1404 const struct ffa_mtd *req;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001405 struct spmc_shmem_obj *obj = NULL;
1406 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001407 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001408 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001409
1410 if (!secure_origin) {
1411 WARN("%s: unsupported retrieve req direction.\n", __func__);
1412 return spmc_ffa_error_return(handle,
1413 FFA_ERROR_INVALID_PARAMETER);
1414 }
1415
1416 if (address != 0U || page_count != 0U) {
1417 WARN("%s: custom memory region not supported.\n", __func__);
1418 return spmc_ffa_error_return(handle,
1419 FFA_ERROR_INVALID_PARAMETER);
1420 }
1421
1422 spin_lock(&mbox->lock);
1423
1424 req = mbox->tx_buffer;
1425 resp = mbox->rx_buffer;
1426 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1427
1428 if (mbox->rxtx_page_count == 0U) {
1429 WARN("%s: buffer pair not registered.\n", __func__);
1430 ret = FFA_ERROR_INVALID_PARAMETER;
1431 goto err_unlock_mailbox;
1432 }
1433
1434 if (mbox->state != MAILBOX_STATE_EMPTY) {
1435 WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
1436 ret = FFA_ERROR_DENIED;
1437 goto err_unlock_mailbox;
1438 }
1439
1440 if (fragment_length != total_length) {
1441 WARN("%s: fragmented retrieve request not supported.\n",
1442 __func__);
1443 ret = FFA_ERROR_INVALID_PARAMETER;
1444 goto err_unlock_mailbox;
1445 }
1446
Marc Bonnici336630f2022-01-13 11:39:10 +00001447 if (req->emad_count == 0U) {
1448 WARN("%s: unsupported attribute desc count %u.\n",
1449 __func__, obj->desc.emad_count);
vallau01460d3962022-08-09 17:06:53 +02001450 ret = FFA_ERROR_INVALID_PARAMETER;
1451 goto err_unlock_mailbox;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001452 }
1453
Marc Bonnicid1907f02022-04-19 17:42:53 +01001454 /* Determine the appropriate minimum descriptor size. */
1455 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1456 min_desc_size = sizeof(struct ffa_mtd_v1_0);
1457 } else {
1458 min_desc_size = sizeof(struct ffa_mtd);
1459 }
1460 if (total_length < min_desc_size) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001461 WARN("%s: invalid length %u < %zu\n", __func__, total_length,
Marc Bonnicid1907f02022-04-19 17:42:53 +01001462 min_desc_size);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001463 ret = FFA_ERROR_INVALID_PARAMETER;
1464 goto err_unlock_mailbox;
1465 }
1466
1467 spin_lock(&spmc_shmem_obj_state.lock);
1468
1469 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1470 if (obj == NULL) {
1471 ret = FFA_ERROR_INVALID_PARAMETER;
1472 goto err_unlock_all;
1473 }
1474
1475 if (obj->desc_filled != obj->desc_size) {
1476 WARN("%s: incomplete object desc filled %zu < size %zu\n",
1477 __func__, obj->desc_filled, obj->desc_size);
1478 ret = FFA_ERROR_INVALID_PARAMETER;
1479 goto err_unlock_all;
1480 }
1481
1482 if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
1483 WARN("%s: wrong sender id 0x%x != 0x%x\n",
1484 __func__, req->sender_id, obj->desc.sender_id);
1485 ret = FFA_ERROR_INVALID_PARAMETER;
1486 goto err_unlock_all;
1487 }
1488
1489 if (req->emad_count != 0U && req->tag != obj->desc.tag) {
1490 WARN("%s: wrong tag 0x%lx != 0x%lx\n",
1491 __func__, req->tag, obj->desc.tag);
1492 ret = FFA_ERROR_INVALID_PARAMETER;
1493 goto err_unlock_all;
1494 }
1495
Marc Bonnici336630f2022-01-13 11:39:10 +00001496 if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
1497 WARN("%s: mistmatch of endpoint counts %u != %u\n",
1498 __func__, req->emad_count, obj->desc.emad_count);
1499 ret = FFA_ERROR_INVALID_PARAMETER;
1500 goto err_unlock_all;
1501 }
1502
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001503 /* Ensure the NS bit is set to 0 in the request. */
1504 if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1505 WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1506 ret = FFA_ERROR_INVALID_PARAMETER;
1507 goto err_unlock_all;
1508 }
1509
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001510 if (req->flags != 0U) {
1511 if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
1512 (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
1513 /*
1514 * If the retrieve request specifies the memory
1515 * transaction ensure it matches what we expect.
1516 */
1517 WARN("%s: wrong mem transaction flags %x != %x\n",
1518 __func__, req->flags, obj->desc.flags);
1519 ret = FFA_ERROR_INVALID_PARAMETER;
1520 goto err_unlock_all;
1521 }
1522
1523 if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
1524 req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
1525 /*
1526 * Current implementation does not support donate and
1527 * it supports no other flags.
1528 */
1529 WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
1530 ret = FFA_ERROR_INVALID_PARAMETER;
1531 goto err_unlock_all;
1532 }
1533 }
1534
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001535 /* Validate the caller is a valid participant. */
Shruti Gupta20ce06c2022-08-25 14:22:53 +01001536 if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001537 WARN("%s: Invalid endpoint ID (0x%x).\n",
1538 __func__, sp_ctx->sp_id);
1539 ret = FFA_ERROR_INVALID_PARAMETER;
1540 goto err_unlock_all;
1541 }
1542
Marc Bonnicid1907f02022-04-19 17:42:53 +01001543 /* Validate that the provided emad offset and structure is valid.*/
1544 for (size_t i = 0; i < req->emad_count; i++) {
1545 size_t emad_size;
1546 struct ffa_emad_v1_0 *emad;
1547
1548 emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1549 &emad_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001550
1551 if ((uintptr_t) emad >= (uintptr_t)
1552 ((uint8_t *) req + total_length)) {
1553 WARN("Invalid emad access.\n");
1554 ret = FFA_ERROR_INVALID_PARAMETER;
1555 goto err_unlock_all;
1556 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001557 }
1558
1559 /*
1560 * Validate all the endpoints match in the case of multiple
1561 * borrowers. We don't mandate that the order of the borrowers
1562 * must match in the descriptors therefore check to see if the
1563 * endpoints match in any order.
1564 */
1565 for (size_t i = 0; i < req->emad_count; i++) {
1566 bool found = false;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001567 size_t emad_size;
1568 struct ffa_emad_v1_0 *emad;
1569 struct ffa_emad_v1_0 *other_emad;
1570
1571 emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1572 &emad_size);
Marc Bonnici336630f2022-01-13 11:39:10 +00001573
1574 for (size_t j = 0; j < obj->desc.emad_count; j++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001575 other_emad = spmc_shmem_obj_get_emad(
1576 &obj->desc, j, MAKE_FFA_VERSION(1, 1),
1577 &emad_size);
1578
Marc Bonnicid1907f02022-04-19 17:42:53 +01001579 if (req->emad_count &&
1580 emad->mapd.endpoint_id ==
1581 other_emad->mapd.endpoint_id) {
Marc Bonnici336630f2022-01-13 11:39:10 +00001582 found = true;
1583 break;
1584 }
1585 }
1586
1587 if (!found) {
1588 WARN("%s: invalid receiver id (0x%x).\n",
Marc Bonnicid1907f02022-04-19 17:42:53 +01001589 __func__, emad->mapd.endpoint_id);
Marc Bonnici336630f2022-01-13 11:39:10 +00001590 ret = FFA_ERROR_INVALID_PARAMETER;
1591 goto err_unlock_all;
1592 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001593 }
1594
1595 mbox->state = MAILBOX_STATE_FULL;
1596
1597 if (req->emad_count != 0U) {
1598 obj->in_use++;
1599 }
1600
Marc Bonnicid1907f02022-04-19 17:42:53 +01001601 /*
1602 * If the caller is v1.0 convert the descriptor, otherwise copy
1603 * directly.
1604 */
1605 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1606 ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
1607 &copy_size,
1608 &out_desc_size);
1609 if (ret != 0U) {
1610 ERROR("%s: Failed to process descriptor.\n", __func__);
1611 goto err_unlock_all;
1612 }
1613 } else {
1614 copy_size = MIN(obj->desc_size, buf_size);
1615 out_desc_size = obj->desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001616
Marc Bonnicid1907f02022-04-19 17:42:53 +01001617 memcpy(resp, &obj->desc, copy_size);
1618 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001619
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001620 /* Set the NS bit in the response if applicable. */
1621 spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx);
1622
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001623 spin_unlock(&spmc_shmem_obj_state.lock);
1624 spin_unlock(&mbox->lock);
1625
Marc Bonnicid1907f02022-04-19 17:42:53 +01001626 SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001627 copy_size, 0, 0, 0, 0, 0);
1628
1629err_unlock_all:
1630 spin_unlock(&spmc_shmem_obj_state.lock);
1631err_unlock_mailbox:
1632 spin_unlock(&mbox->lock);
1633 return spmc_ffa_error_return(handle, ret);
1634}
1635
1636/**
1637 * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
1638 * @client: Client state.
1639 * @handle_low: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
1640 * @handle_high: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
1641 * @fragment_offset: Byte offset in descriptor to resume at.
1642 * @sender_id: Bit[31:16]: Endpoint id of sender if client is a
1643 * hypervisor. 0 otherwise.
1644 * @smc_handle: Handle passed to smc call. Used to return
1645 * FFA_MEM_FRAG_TX.
1646 *
1647 * Return: @smc_handle on success, error code on failure.
1648 */
1649long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
1650 bool secure_origin,
1651 uint32_t handle_low,
1652 uint32_t handle_high,
1653 uint32_t fragment_offset,
1654 uint32_t sender_id,
1655 void *cookie,
1656 void *handle,
1657 uint64_t flags)
1658{
1659 int ret;
1660 void *src;
1661 size_t buf_size;
1662 size_t copy_size;
1663 size_t full_copy_size;
1664 uint32_t desc_sender_id;
1665 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1666 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1667 struct spmc_shmem_obj *obj;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001668 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001669
1670 if (!secure_origin) {
1671 WARN("%s: can only be called from swld.\n",
1672 __func__);
1673 return spmc_ffa_error_return(handle,
1674 FFA_ERROR_INVALID_PARAMETER);
1675 }
1676
1677 spin_lock(&spmc_shmem_obj_state.lock);
1678
1679 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1680 if (obj == NULL) {
1681 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1682 __func__, mem_handle);
1683 ret = FFA_ERROR_INVALID_PARAMETER;
1684 goto err_unlock_shmem;
1685 }
1686
1687 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1688 if (sender_id != 0U && sender_id != desc_sender_id) {
1689 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1690 sender_id, desc_sender_id);
1691 ret = FFA_ERROR_INVALID_PARAMETER;
1692 goto err_unlock_shmem;
1693 }
1694
1695 if (fragment_offset >= obj->desc_size) {
1696 WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
1697 __func__, fragment_offset, obj->desc_size);
1698 ret = FFA_ERROR_INVALID_PARAMETER;
1699 goto err_unlock_shmem;
1700 }
1701
1702 spin_lock(&mbox->lock);
1703
1704 if (mbox->rxtx_page_count == 0U) {
1705 WARN("%s: buffer pair not registered.\n", __func__);
1706 ret = FFA_ERROR_INVALID_PARAMETER;
1707 goto err_unlock_all;
1708 }
1709
1710 if (mbox->state != MAILBOX_STATE_EMPTY) {
1711 WARN("%s: RX Buffer is full!\n", __func__);
1712 ret = FFA_ERROR_DENIED;
1713 goto err_unlock_all;
1714 }
1715
1716 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1717
1718 mbox->state = MAILBOX_STATE_FULL;
1719
Marc Bonnicid1907f02022-04-19 17:42:53 +01001720 /*
1721 * If the caller is v1.0 convert the descriptor, otherwise copy
1722 * directly.
1723 */
1724 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1725 size_t out_desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001726
Marc Bonnicid1907f02022-04-19 17:42:53 +01001727 ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
1728 buf_size,
1729 fragment_offset,
1730 &copy_size,
1731 &out_desc_size);
1732 if (ret != 0U) {
1733 ERROR("%s: Failed to process descriptor.\n", __func__);
1734 goto err_unlock_all;
1735 }
1736 } else {
1737 full_copy_size = obj->desc_size - fragment_offset;
1738 copy_size = MIN(full_copy_size, buf_size);
1739
1740 src = &obj->desc;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001741
Marc Bonnicid1907f02022-04-19 17:42:53 +01001742 memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
1743 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001744
1745 spin_unlock(&mbox->lock);
1746 spin_unlock(&spmc_shmem_obj_state.lock);
1747
1748 SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
1749 copy_size, sender_id, 0, 0, 0);
1750
1751err_unlock_all:
1752 spin_unlock(&mbox->lock);
1753err_unlock_shmem:
1754 spin_unlock(&spmc_shmem_obj_state.lock);
1755 return spmc_ffa_error_return(handle, ret);
1756}
1757
1758/**
1759 * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
1760 * @client: Client state.
1761 *
1762 * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
1763 * Used by secure os release previously shared memory to non-secure os.
1764 *
1765 * The handle to release must be in the client's (secure os's) transmit buffer.
1766 *
1767 * Return: 0 on success, error code on failure.
1768 */
1769int spmc_ffa_mem_relinquish(uint32_t smc_fid,
1770 bool secure_origin,
1771 uint32_t handle_low,
1772 uint32_t handle_high,
1773 uint32_t fragment_offset,
1774 uint32_t sender_id,
1775 void *cookie,
1776 void *handle,
1777 uint64_t flags)
1778{
1779 int ret;
1780 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1781 struct spmc_shmem_obj *obj;
1782 const struct ffa_mem_relinquish_descriptor *req;
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001783 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001784
1785 if (!secure_origin) {
1786 WARN("%s: unsupported relinquish direction.\n", __func__);
1787 return spmc_ffa_error_return(handle,
1788 FFA_ERROR_INVALID_PARAMETER);
1789 }
1790
1791 spin_lock(&mbox->lock);
1792
1793 if (mbox->rxtx_page_count == 0U) {
1794 WARN("%s: buffer pair not registered.\n", __func__);
1795 ret = FFA_ERROR_INVALID_PARAMETER;
1796 goto err_unlock_mailbox;
1797 }
1798
1799 req = mbox->tx_buffer;
1800
1801 if (req->flags != 0U) {
1802 WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
1803 ret = FFA_ERROR_INVALID_PARAMETER;
1804 goto err_unlock_mailbox;
1805 }
1806
Marc Bonnici336630f2022-01-13 11:39:10 +00001807 if (req->endpoint_count == 0) {
1808 WARN("%s: endpoint count cannot be 0.\n", __func__);
1809 ret = FFA_ERROR_INVALID_PARAMETER;
1810 goto err_unlock_mailbox;
1811 }
1812
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001813 spin_lock(&spmc_shmem_obj_state.lock);
1814
1815 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1816 if (obj == NULL) {
1817 ret = FFA_ERROR_INVALID_PARAMETER;
1818 goto err_unlock_all;
1819 }
1820
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001821 /*
1822 * Validate the endpoint ID was populated correctly. We don't currently
1823 * support proxy endpoints so the endpoint count should always be 1.
1824 */
1825 if (req->endpoint_count != 1U) {
1826 WARN("%s: unsupported endpoint count %u != 1\n", __func__,
1827 req->endpoint_count);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001828 ret = FFA_ERROR_INVALID_PARAMETER;
1829 goto err_unlock_all;
1830 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001831
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001832 /* Validate provided endpoint ID matches the partition ID. */
1833 if (req->endpoint_array[0] != sp_ctx->sp_id) {
1834 WARN("%s: invalid endpoint ID %u != %u\n", __func__,
1835 req->endpoint_array[0], sp_ctx->sp_id);
1836 ret = FFA_ERROR_INVALID_PARAMETER;
1837 goto err_unlock_all;
1838 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001839
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001840 /* Validate the caller is a valid participant. */
Shruti Gupta20ce06c2022-08-25 14:22:53 +01001841 if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001842 WARN("%s: Invalid endpoint ID (0x%x).\n",
1843 __func__, req->endpoint_array[0]);
1844 ret = FFA_ERROR_INVALID_PARAMETER;
1845 goto err_unlock_all;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001846 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001847
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001848 if (obj->in_use == 0U) {
1849 ret = FFA_ERROR_INVALID_PARAMETER;
1850 goto err_unlock_all;
1851 }
1852 obj->in_use--;
1853
1854 spin_unlock(&spmc_shmem_obj_state.lock);
1855 spin_unlock(&mbox->lock);
1856
1857 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1858
1859err_unlock_all:
1860 spin_unlock(&spmc_shmem_obj_state.lock);
1861err_unlock_mailbox:
1862 spin_unlock(&mbox->lock);
1863 return spmc_ffa_error_return(handle, ret);
1864}
1865
1866/**
1867 * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
1868 * @client: Client state.
1869 * @handle_low: Unique handle of shared memory object to reclaim. Bit[31:0].
1870 * @handle_high: Unique handle of shared memory object to reclaim.
1871 * Bit[63:32].
1872 * @flags: Unsupported, ignored.
1873 *
1874 * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
1875 * Used by non-secure os reclaim memory previously shared with secure os.
1876 *
1877 * Return: 0 on success, error code on failure.
1878 */
1879int spmc_ffa_mem_reclaim(uint32_t smc_fid,
1880 bool secure_origin,
1881 uint32_t handle_low,
1882 uint32_t handle_high,
1883 uint32_t mem_flags,
1884 uint64_t x4,
1885 void *cookie,
1886 void *handle,
1887 uint64_t flags)
1888{
1889 int ret;
1890 struct spmc_shmem_obj *obj;
1891 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1892
1893 if (secure_origin) {
1894 WARN("%s: unsupported reclaim direction.\n", __func__);
1895 return spmc_ffa_error_return(handle,
1896 FFA_ERROR_INVALID_PARAMETER);
1897 }
1898
1899 if (mem_flags != 0U) {
1900 WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
1901 return spmc_ffa_error_return(handle,
1902 FFA_ERROR_INVALID_PARAMETER);
1903 }
1904
1905 spin_lock(&spmc_shmem_obj_state.lock);
1906
1907 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1908 if (obj == NULL) {
1909 ret = FFA_ERROR_INVALID_PARAMETER;
1910 goto err_unlock;
1911 }
1912 if (obj->in_use != 0U) {
1913 ret = FFA_ERROR_DENIED;
1914 goto err_unlock;
1915 }
Marc Bonnici503320e2022-02-21 15:02:36 +00001916
Marc Bonnici82e28f12022-10-18 13:39:48 +01001917 if (obj->desc_filled != obj->desc_size) {
1918 WARN("%s: incomplete object desc filled %zu < size %zu\n",
1919 __func__, obj->desc_filled, obj->desc_size);
1920 ret = FFA_ERROR_INVALID_PARAMETER;
1921 goto err_unlock;
1922 }
1923
Marc Bonnici503320e2022-02-21 15:02:36 +00001924 /* Allow for platform specific operations to be performed. */
1925 ret = plat_spmc_shmem_reclaim(&obj->desc);
1926 if (ret != 0) {
1927 goto err_unlock;
1928 }
1929
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001930 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1931 spin_unlock(&spmc_shmem_obj_state.lock);
1932
1933 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1934
1935err_unlock:
1936 spin_unlock(&spmc_shmem_obj_state.lock);
1937 return spmc_ffa_error_return(handle, ret);
1938}