blob: 646e65f617f7b21896a34c73fcc1c7c37fc31442 [file] [log] [blame]
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001/*
Demi Marie Obenour1f9f8302022-12-30 19:14:18 -05002 * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00006#include <assert.h>
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01007#include <errno.h>
Demi Marie Obenour4ed9df42022-12-30 19:30:58 -05008#include <inttypes.h>
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01009
10#include <common/debug.h>
11#include <common/runtime_svc.h>
12#include <lib/object_pool.h>
13#include <lib/spinlock.h>
14#include <lib/xlat_tables/xlat_tables_v2.h>
15#include <services/ffa_svc.h>
16#include "spmc.h"
17#include "spmc_shared_mem.h"
18
19#include <platform_def.h>
20
21/**
22 * struct spmc_shmem_obj - Shared memory object.
23 * @desc_size: Size of @desc.
24 * @desc_filled: Size of @desc already received.
25 * @in_use: Number of clients that have called ffa_mem_retrieve_req
26 * without a matching ffa_mem_relinquish call.
27 * @desc: FF-A memory region descriptor passed in ffa_mem_share.
28 */
29struct spmc_shmem_obj {
30 size_t desc_size;
31 size_t desc_filled;
32 size_t in_use;
Marc Bonnicid1907f02022-04-19 17:42:53 +010033 struct ffa_mtd desc;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010034};
35
36/*
37 * Declare our data structure to store the metadata of memory share requests.
38 * The main datastore is allocated on a per platform basis to ensure enough
39 * storage can be made available.
40 * The address of the data store will be populated by the SPMC during its
41 * initialization.
42 */
43
44struct spmc_shmem_obj_state spmc_shmem_obj_state = {
45 /* Set start value for handle so top 32 bits are needed quickly. */
46 .next_handle = 0xffffffc0U,
47};
48
49/**
50 * spmc_shmem_obj_size - Convert from descriptor size to object size.
51 * @desc_size: Size of struct ffa_memory_region_descriptor object.
52 *
53 * Return: Size of struct spmc_shmem_obj object.
54 */
55static size_t spmc_shmem_obj_size(size_t desc_size)
56{
57 return desc_size + offsetof(struct spmc_shmem_obj, desc);
58}
59
60/**
61 * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
62 * @state: Global state.
63 * @desc_size: Size of struct ffa_memory_region_descriptor object that
64 * allocated object will hold.
65 *
66 * Return: Pointer to newly allocated object, or %NULL if there not enough space
67 * left. The returned pointer is only valid while @state is locked, to
68 * used it again after unlocking @state, spmc_shmem_obj_lookup must be
69 * called.
70 */
71static struct spmc_shmem_obj *
72spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
73{
74 struct spmc_shmem_obj *obj;
75 size_t free = state->data_size - state->allocated;
Marc Bonnicib774f562022-10-18 14:03:13 +010076 size_t obj_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010077
78 if (state->data == NULL) {
79 ERROR("Missing shmem datastore!\n");
80 return NULL;
81 }
82
Marc Bonnicib774f562022-10-18 14:03:13 +010083 obj_size = spmc_shmem_obj_size(desc_size);
84
85 /* Ensure the obj size has not overflowed. */
86 if (obj_size < desc_size) {
87 WARN("%s(0x%zx) desc_size overflow\n",
88 __func__, desc_size);
89 return NULL;
90 }
91
92 if (obj_size > free) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010093 WARN("%s(0x%zx) failed, free 0x%zx\n",
94 __func__, desc_size, free);
95 return NULL;
96 }
97 obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
Marc Bonnicid1907f02022-04-19 17:42:53 +010098 obj->desc = (struct ffa_mtd) {0};
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010099 obj->desc_size = desc_size;
100 obj->desc_filled = 0;
101 obj->in_use = 0;
Marc Bonnicib774f562022-10-18 14:03:13 +0100102 state->allocated += obj_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100103 return obj;
104}
105
106/**
107 * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
108 * @state: Global state.
109 * @obj: Object to free.
110 *
111 * Release memory used by @obj. Other objects may move, so on return all
112 * pointers to struct spmc_shmem_obj object should be considered invalid, not
113 * just @obj.
114 *
115 * The current implementation always compacts the remaining objects to simplify
116 * the allocator and to avoid fragmentation.
117 */
118
119static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
120 struct spmc_shmem_obj *obj)
121{
122 size_t free_size = spmc_shmem_obj_size(obj->desc_size);
123 uint8_t *shift_dest = (uint8_t *)obj;
124 uint8_t *shift_src = shift_dest + free_size;
125 size_t shift_size = state->allocated - (shift_src - state->data);
126
127 if (shift_size != 0U) {
128 memmove(shift_dest, shift_src, shift_size);
129 }
130 state->allocated -= free_size;
131}
132
133/**
134 * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
135 * @state: Global state.
136 * @handle: Unique handle of object to return.
137 *
138 * Return: struct spmc_shmem_obj_state object with handle matching @handle.
139 * %NULL, if not object in @state->data has a matching handle.
140 */
141static struct spmc_shmem_obj *
142spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
143{
144 uint8_t *curr = state->data;
145
146 while (curr - state->data < state->allocated) {
147 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
148
149 if (obj->desc.handle == handle) {
150 return obj;
151 }
152 curr += spmc_shmem_obj_size(obj->desc_size);
153 }
154 return NULL;
155}
156
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000157/**
158 * spmc_shmem_obj_get_next - Get the next memory object from an offset.
159 * @offset: Offset used to track which objects have previously been
160 * returned.
161 *
162 * Return: the next struct spmc_shmem_obj_state object from the provided
163 * offset.
164 * %NULL, if there are no more objects.
165 */
166static struct spmc_shmem_obj *
167spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
168{
169 uint8_t *curr = state->data + *offset;
170
171 if (curr - state->data < state->allocated) {
172 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
173
174 *offset += spmc_shmem_obj_size(obj->desc_size);
175
176 return obj;
177 }
178 return NULL;
179}
180
Marc Bonnicid1907f02022-04-19 17:42:53 +0100181/*******************************************************************************
182 * FF-A memory descriptor helper functions.
183 ******************************************************************************/
184/**
185 * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
186 * clients FF-A version.
187 * @desc: The memory transaction descriptor.
188 * @index: The index of the emad element to be accessed.
189 * @ffa_version: FF-A version of the provided structure.
190 * @emad_size: Will be populated with the size of the returned emad
191 * descriptor.
192 * Return: A pointer to the requested emad structure.
193 */
194static void *
195spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
196 uint32_t ffa_version, size_t *emad_size)
197{
198 uint8_t *emad;
Demi Marie Obenour32167a02023-01-11 10:51:01 -0500199
200 assert(index < desc->emad_count);
201
Marc Bonnicid1907f02022-04-19 17:42:53 +0100202 /*
203 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
204 * format, otherwise assume it is a v1.1 format.
205 */
206 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
Demi Marie Obenour57bf10c2022-12-31 11:11:18 -0500207 emad = (uint8_t *)desc + offsetof(struct ffa_mtd_v1_0, emad);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100208 *emad_size = sizeof(struct ffa_emad_v1_0);
209 } else {
Demi Marie Obenour57bf10c2022-12-31 11:11:18 -0500210 assert(is_aligned(desc->emad_offset, 16));
Marc Bonnicid1907f02022-04-19 17:42:53 +0100211 emad = ((uint8_t *) desc + desc->emad_offset);
212 *emad_size = desc->emad_size;
213 }
Demi Marie Obenour57bf10c2022-12-31 11:11:18 -0500214
215 assert(((uint64_t)index * (uint64_t)*emad_size) <= UINT32_MAX);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100216 return (emad + (*emad_size * index));
217}
218
219/**
220 * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
221 * FF-A version of the descriptor.
222 * @obj: Object containing ffa_memory_region_descriptor.
223 *
224 * Return: struct ffa_comp_mrd object corresponding to the composite memory
225 * region descriptor.
226 */
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100227static struct ffa_comp_mrd *
Marc Bonnicid1907f02022-04-19 17:42:53 +0100228spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100229{
Marc Bonnicid1907f02022-04-19 17:42:53 +0100230 size_t emad_size;
231 /*
232 * The comp_mrd_offset field of the emad descriptor remains consistent
233 * between FF-A versions therefore we can use the v1.0 descriptor here
234 * in all cases.
235 */
236 struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
237 ffa_version,
238 &emad_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100239
240 /* Ensure the composite descriptor offset is aligned. */
241 if (!is_aligned(emad->comp_mrd_offset, 8)) {
242 WARN("Unaligned composite memory region descriptor offset.\n");
243 return NULL;
244 }
245
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100246 return (struct ffa_comp_mrd *)
Marc Bonnicid1907f02022-04-19 17:42:53 +0100247 ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100248}
249
250/**
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100251 * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
252 * a given memory transaction.
253 * @sp_id: Partition ID to validate.
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100254 * @obj: The shared memory object containing the descriptor
255 * of the memory transaction.
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100256 * Return: true if ID is valid, else false.
257 */
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100258bool spmc_shmem_obj_validate_id(struct spmc_shmem_obj *obj, uint16_t sp_id)
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100259{
260 bool found = false;
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100261 struct ffa_mtd *desc = &obj->desc;
262 size_t desc_size = obj->desc_size;
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100263
264 /* Validate the partition is a valid participant. */
265 for (unsigned int i = 0U; i < desc->emad_count; i++) {
266 size_t emad_size;
267 struct ffa_emad_v1_0 *emad;
268
269 emad = spmc_shmem_obj_get_emad(desc, i,
270 MAKE_FFA_VERSION(1, 1),
271 &emad_size);
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100272 /*
273 * Validate the calculated emad address resides within the
274 * descriptor.
275 */
276 if ((emad == NULL) || (uintptr_t) emad >=
277 (uintptr_t)((uint8_t *) desc + desc_size)) {
278 VERBOSE("Invalid emad.\n");
279 break;
280 }
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100281 if (sp_id == emad->mapd.endpoint_id) {
282 found = true;
283 break;
284 }
285 }
286 return found;
287}
288
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000289/*
290 * Compare two memory regions to determine if any range overlaps with another
291 * ongoing memory transaction.
292 */
293static bool
294overlapping_memory_regions(struct ffa_comp_mrd *region1,
295 struct ffa_comp_mrd *region2)
296{
297 uint64_t region1_start;
298 uint64_t region1_size;
299 uint64_t region1_end;
300 uint64_t region2_start;
301 uint64_t region2_size;
302 uint64_t region2_end;
303
304 assert(region1 != NULL);
305 assert(region2 != NULL);
306
307 if (region1 == region2) {
308 return true;
309 }
310
311 /*
312 * Check each memory region in the request against existing
313 * transactions.
314 */
315 for (size_t i = 0; i < region1->address_range_count; i++) {
316
317 region1_start = region1->address_range_array[i].address;
318 region1_size =
319 region1->address_range_array[i].page_count *
320 PAGE_SIZE_4KB;
321 region1_end = region1_start + region1_size;
322
323 for (size_t j = 0; j < region2->address_range_count; j++) {
324
325 region2_start = region2->address_range_array[j].address;
326 region2_size =
327 region2->address_range_array[j].page_count *
328 PAGE_SIZE_4KB;
329 region2_end = region2_start + region2_size;
330
Marc Bonnici79669bb2022-10-18 13:50:04 +0100331 /* Check if regions are not overlapping. */
332 if (!((region2_end <= region1_start) ||
333 (region1_end <= region2_start))) {
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000334 WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
335 region1_start, region1_end,
336 region2_start, region2_end);
337 return true;
338 }
339 }
340 }
341 return false;
342}
343
Marc Bonnicid1907f02022-04-19 17:42:53 +0100344/*******************************************************************************
345 * FF-A v1.0 Memory Descriptor Conversion Helpers.
346 ******************************************************************************/
347/**
348 * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
349 * converted descriptor.
350 * @orig: The original v1.0 memory transaction descriptor.
351 * @desc_size: The size of the original v1.0 memory transaction descriptor.
352 *
353 * Return: the size required to store the descriptor store in the v1.1 format.
354 */
Demi Marie Obenour81f0adc2023-01-12 14:28:32 -0500355static uint64_t
Marc Bonnicid1907f02022-04-19 17:42:53 +0100356spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
357{
Demi Marie Obenour81f0adc2023-01-12 14:28:32 -0500358 uint64_t size = 0;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100359 struct ffa_comp_mrd *mrd;
360 struct ffa_emad_v1_0 *emad_array = orig->emad;
361
362 /* Get the size of the v1.1 descriptor. */
363 size += sizeof(struct ffa_mtd);
364
365 /* Add the size of the emad descriptors. */
366 size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
367
368 /* Add the size of the composite mrds. */
369 size += sizeof(struct ffa_comp_mrd);
370
371 /* Add the size of the constituent mrds. */
372 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
373 emad_array[0].comp_mrd_offset);
374
Demi Marie Obenour81f0adc2023-01-12 14:28:32 -0500375 /* Add the size of the memory region descriptors. */
Marc Bonnicid1907f02022-04-19 17:42:53 +0100376 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
377
378 return size;
379}
380
381/**
382 * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
383 * converted descriptor.
384 * @orig: The original v1.1 memory transaction descriptor.
385 * @desc_size: The size of the original v1.1 memory transaction descriptor.
386 *
387 * Return: the size required to store the descriptor store in the v1.0 format.
388 */
389static size_t
390spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
391{
392 size_t size = 0;
393 struct ffa_comp_mrd *mrd;
394 struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
395 ((uint8_t *) orig +
396 orig->emad_offset);
397
398 /* Get the size of the v1.0 descriptor. */
399 size += sizeof(struct ffa_mtd_v1_0);
400
401 /* Add the size of the v1.0 emad descriptors. */
402 size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
403
404 /* Add the size of the composite mrds. */
405 size += sizeof(struct ffa_comp_mrd);
406
407 /* Add the size of the constituent mrds. */
408 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
409 emad_array[0].comp_mrd_offset);
410
411 /* Check the calculated address is within the memory descriptor. */
Marc Bonnicif744c992022-10-18 18:01:44 +0100412 if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
413 (uintptr_t)((uint8_t *) orig + desc_size)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100414 return 0;
415 }
416 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
417
418 return size;
419}
420
421/**
422 * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
423 * @out_obj: The shared memory object to populate the converted descriptor.
424 * @orig: The shared memory object containing the v1.0 descriptor.
425 *
426 * Return: true if the conversion is successful else false.
427 */
428static bool
429spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
430 struct spmc_shmem_obj *orig)
431{
432 struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
433 struct ffa_mtd *out = &out_obj->desc;
434 struct ffa_emad_v1_0 *emad_array_in;
435 struct ffa_emad_v1_0 *emad_array_out;
436 struct ffa_comp_mrd *mrd_in;
437 struct ffa_comp_mrd *mrd_out;
438
439 size_t mrd_in_offset;
440 size_t mrd_out_offset;
441 size_t mrd_size = 0;
442
443 /* Populate the new descriptor format from the v1.0 struct. */
444 out->sender_id = mtd_orig->sender_id;
445 out->memory_region_attributes = mtd_orig->memory_region_attributes;
446 out->flags = mtd_orig->flags;
447 out->handle = mtd_orig->handle;
448 out->tag = mtd_orig->tag;
449 out->emad_count = mtd_orig->emad_count;
450 out->emad_size = sizeof(struct ffa_emad_v1_0);
451
452 /*
453 * We will locate the emad descriptors directly after the ffa_mtd
454 * struct. This will be 8-byte aligned.
455 */
456 out->emad_offset = sizeof(struct ffa_mtd);
457
458 emad_array_in = mtd_orig->emad;
459 emad_array_out = (struct ffa_emad_v1_0 *)
460 ((uint8_t *) out + out->emad_offset);
461
462 /* Copy across the emad structs. */
463 for (unsigned int i = 0U; i < out->emad_count; i++) {
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100464 /* Bound check for emad array. */
465 if (((uint8_t *)emad_array_in + sizeof(struct ffa_emad_v1_0)) >
466 ((uint8_t *) mtd_orig + orig->desc_size)) {
467 VERBOSE("%s: Invalid mtd structure.\n", __func__);
468 return false;
469 }
Marc Bonnicid1907f02022-04-19 17:42:53 +0100470 memcpy(&emad_array_out[i], &emad_array_in[i],
471 sizeof(struct ffa_emad_v1_0));
472 }
473
474 /* Place the mrd descriptors after the end of the emad descriptors.*/
475 mrd_in_offset = emad_array_in->comp_mrd_offset;
476 mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
477 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
478
479 /* Add the size of the composite memory region descriptor. */
480 mrd_size += sizeof(struct ffa_comp_mrd);
481
482 /* Find the mrd descriptor. */
483 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
484
485 /* Add the size of the constituent memory region descriptors. */
486 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
487
488 /*
489 * Update the offset in the emads by the delta between the input and
490 * output addresses.
491 */
492 for (unsigned int i = 0U; i < out->emad_count; i++) {
493 emad_array_out[i].comp_mrd_offset =
494 emad_array_in[i].comp_mrd_offset +
495 (mrd_out_offset - mrd_in_offset);
496 }
497
498 /* Verify that we stay within bound of the memory descriptors. */
499 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
500 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
501 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
502 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
503 ERROR("%s: Invalid mrd structure.\n", __func__);
504 return false;
505 }
506
507 /* Copy the mrd descriptors directly. */
508 memcpy(mrd_out, mrd_in, mrd_size);
509
510 return true;
511}
512
513/**
514 * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
515 * v1.0 memory object.
516 * @out_obj: The shared memory object to populate the v1.0 descriptor.
517 * @orig: The shared memory object containing the v1.1 descriptor.
518 *
519 * Return: true if the conversion is successful else false.
520 */
521static bool
522spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
523 struct spmc_shmem_obj *orig)
524{
525 struct ffa_mtd *mtd_orig = &orig->desc;
526 struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
527 struct ffa_emad_v1_0 *emad_in;
528 struct ffa_emad_v1_0 *emad_array_in;
529 struct ffa_emad_v1_0 *emad_array_out;
530 struct ffa_comp_mrd *mrd_in;
531 struct ffa_comp_mrd *mrd_out;
532
533 size_t mrd_in_offset;
534 size_t mrd_out_offset;
535 size_t emad_out_array_size;
536 size_t mrd_size = 0;
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100537 size_t orig_desc_size = orig->desc_size;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100538
539 /* Populate the v1.0 descriptor format from the v1.1 struct. */
540 out->sender_id = mtd_orig->sender_id;
541 out->memory_region_attributes = mtd_orig->memory_region_attributes;
542 out->flags = mtd_orig->flags;
543 out->handle = mtd_orig->handle;
544 out->tag = mtd_orig->tag;
545 out->emad_count = mtd_orig->emad_count;
546
547 /* Determine the location of the emad array in both descriptors. */
548 emad_array_in = (struct ffa_emad_v1_0 *)
549 ((uint8_t *) mtd_orig + mtd_orig->emad_offset);
550 emad_array_out = out->emad;
551
552 /* Copy across the emad structs. */
553 emad_in = emad_array_in;
554 for (unsigned int i = 0U; i < out->emad_count; i++) {
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100555 /* Bound check for emad array. */
556 if (((uint8_t *)emad_in + sizeof(struct ffa_emad_v1_0)) >
557 ((uint8_t *) mtd_orig + orig_desc_size)) {
558 VERBOSE("%s: Invalid mtd structure.\n", __func__);
559 return false;
560 }
Marc Bonnicid1907f02022-04-19 17:42:53 +0100561 memcpy(&emad_array_out[i], emad_in,
562 sizeof(struct ffa_emad_v1_0));
563
564 emad_in += mtd_orig->emad_size;
565 }
566
567 /* Place the mrd descriptors after the end of the emad descriptors. */
568 emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
569
570 mrd_out_offset = (uint8_t *) out->emad - (uint8_t *) out +
571 emad_out_array_size;
572
573 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
574
575 mrd_in_offset = mtd_orig->emad_offset +
576 (mtd_orig->emad_size * mtd_orig->emad_count);
577
578 /* Add the size of the composite memory region descriptor. */
579 mrd_size += sizeof(struct ffa_comp_mrd);
580
581 /* Find the mrd descriptor. */
582 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
583
584 /* Add the size of the constituent memory region descriptors. */
585 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
586
587 /*
588 * Update the offset in the emads by the delta between the input and
589 * output addresses.
590 */
591 emad_in = emad_array_in;
592
593 for (unsigned int i = 0U; i < out->emad_count; i++) {
594 emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
595 (mrd_out_offset -
596 mrd_in_offset);
597 emad_in += mtd_orig->emad_size;
598 }
599
600 /* Verify that we stay within bound of the memory descriptors. */
601 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
602 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
603 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
604 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
605 ERROR("%s: Invalid mrd structure.\n", __func__);
606 return false;
607 }
608
609 /* Copy the mrd descriptors directly. */
610 memcpy(mrd_out, mrd_in, mrd_size);
611
612 return true;
613}
614
615/**
616 * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
617 * the v1.0 format and populates the
618 * provided buffer.
619 * @dst: Buffer to populate v1.0 ffa_memory_region_descriptor.
620 * @orig_obj: Object containing v1.1 ffa_memory_region_descriptor.
621 * @buf_size: Size of the buffer to populate.
622 * @offset: The offset of the converted descriptor to copy.
623 * @copy_size: Will be populated with the number of bytes copied.
624 * @out_desc_size: Will be populated with the total size of the v1.0
625 * descriptor.
626 *
627 * Return: 0 if conversion and population succeeded.
628 * Note: This function invalidates the reference to @orig therefore
629 * `spmc_shmem_obj_lookup` must be called if further usage is required.
630 */
631static uint32_t
632spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
633 size_t buf_size, size_t offset,
634 size_t *copy_size, size_t *v1_0_desc_size)
635{
636 struct spmc_shmem_obj *v1_0_obj;
637
638 /* Calculate the size that the v1.0 descriptor will require. */
639 *v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
640 &orig_obj->desc, orig_obj->desc_size);
641
642 if (*v1_0_desc_size == 0) {
643 ERROR("%s: cannot determine size of descriptor.\n",
644 __func__);
645 return FFA_ERROR_INVALID_PARAMETER;
646 }
647
648 /* Get a new obj to store the v1.0 descriptor. */
649 v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
650 *v1_0_desc_size);
651
652 if (!v1_0_obj) {
653 return FFA_ERROR_NO_MEMORY;
654 }
655
656 /* Perform the conversion from v1.1 to v1.0. */
657 if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
658 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
659 return FFA_ERROR_INVALID_PARAMETER;
660 }
661
662 *copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
663 memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
664
665 /*
666 * We're finished with the v1.0 descriptor for now so free it.
667 * Note that this will invalidate any references to the v1.1
668 * descriptor.
669 */
670 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
671
672 return 0;
673}
674
Demi Marie Obenour4ed9df42022-12-30 19:30:58 -0500675static int
676spmc_validate_mtd_start(struct ffa_mtd *desc, uint32_t ffa_version,
677 size_t fragment_length, size_t total_length)
678{
679 unsigned long long emad_end;
680 unsigned long long emad_size;
681 unsigned long long emad_offset;
682 unsigned int min_desc_size;
683
684 /* Determine the appropriate minimum descriptor size. */
685 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
686 min_desc_size = sizeof(struct ffa_mtd_v1_0);
687 } else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
688 min_desc_size = sizeof(struct ffa_mtd);
689 } else {
690 return FFA_ERROR_INVALID_PARAMETER;
691 }
692 if (fragment_length < min_desc_size) {
693 WARN("%s: invalid length %zu < %u\n", __func__, fragment_length,
694 min_desc_size);
695 return FFA_ERROR_INVALID_PARAMETER;
696 }
697
698 if (desc->emad_count == 0U) {
699 WARN("%s: unsupported attribute desc count %u.\n",
700 __func__, desc->emad_count);
701 return FFA_ERROR_INVALID_PARAMETER;
702 }
703
704 /*
705 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
706 * format, otherwise assume it is a v1.1 format.
707 */
708 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
709 emad_offset = emad_size = sizeof(struct ffa_emad_v1_0);
710 } else {
711 if (!is_aligned(desc->emad_offset, 16)) {
712 WARN("%s: Emad offset %" PRIx32 " is not 16-byte aligned.\n",
713 __func__, desc->emad_offset);
714 return FFA_ERROR_INVALID_PARAMETER;
715 }
716 if (desc->emad_offset < sizeof(struct ffa_mtd)) {
717 WARN("%s: Emad offset too small: 0x%" PRIx32 " < 0x%zx.\n",
718 __func__, desc->emad_offset,
719 sizeof(struct ffa_mtd));
720 return FFA_ERROR_INVALID_PARAMETER;
721 }
722 emad_offset = desc->emad_offset;
723 if (desc->emad_size < sizeof(struct ffa_emad_v1_0)) {
724 WARN("%s: Bad emad size (%" PRIu32 " < %zu).\n", __func__,
725 desc->emad_size, sizeof(struct ffa_emad_v1_0));
726 return FFA_ERROR_INVALID_PARAMETER;
727 }
728 if (!is_aligned(desc->emad_size, 16)) {
729 WARN("%s: Emad size 0x%" PRIx32 " is not 16-byte aligned.\n",
730 __func__, desc->emad_size);
731 return FFA_ERROR_INVALID_PARAMETER;
732 }
733 emad_size = desc->emad_size;
734 }
735
736 /*
737 * Overflow is impossible: the arithmetic happens in at least 64-bit
738 * precision, but all of the operands are bounded by UINT32_MAX, and
739 * ((2^32 - 1)^2 + (2^32 - 1) + (2^32 - 1)) = ((2^32 - 1) * (2^32 + 1))
740 * = (2^64 - 1).
741 */
742 CASSERT(sizeof(desc->emad_count == 4), assert_emad_count_max_too_large);
743 emad_end = (desc->emad_count * (unsigned long long)emad_size) +
744 (unsigned long long)sizeof(struct ffa_comp_mrd) +
745 (unsigned long long)emad_offset;
746
747 if (emad_end > total_length) {
748 WARN("%s: Composite memory region extends beyond descriptor: 0x%llx > 0x%zx\n",
749 __func__, emad_end, total_length);
750 return FFA_ERROR_INVALID_PARAMETER;
751 }
752
753 return 0;
754}
755
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100756/**
757 * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
Marc Bonnicid1907f02022-04-19 17:42:53 +0100758 * @obj: Object containing ffa_memory_region_descriptor.
759 * @ffa_version: FF-A version of the provided descriptor.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100760 *
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500761 * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if
762 * constituent_memory_region_descriptor offset or count is invalid.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100763 */
Marc Bonnicid1907f02022-04-19 17:42:53 +0100764static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
765 uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100766{
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500767 const struct ffa_emad_v1_0 *emad;
768 size_t emad_size;
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000769 uint32_t comp_mrd_offset = 0;
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500770
Demi Marie Obenourf00e4d72023-01-12 13:25:23 -0500771 if (obj->desc_filled != obj->desc_size) {
772 ERROR("BUG: %s called on incomplete object (%zu != %zu)\n",
773 __func__, obj->desc_filled, obj->desc_size);
774 panic();
775 }
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000776
Demi Marie Obenourf00e4d72023-01-12 13:25:23 -0500777 if (spmc_validate_mtd_start(&obj->desc, ffa_version,
778 obj->desc_filled, obj->desc_size)) {
779 ERROR("BUG: %s called on object with corrupt memory region descriptor\n",
780 __func__);
781 panic();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100782 }
783
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500784 emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
785 ffa_version, &emad_size);
786
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100787 for (size_t emad_num = 0; emad_num < obj->desc.emad_count; emad_num++) {
788 size_t size;
789 size_t count;
790 size_t expected_size;
Demi Marie Obenour00d36b22023-01-12 13:24:50 -0500791 uint64_t total_page_count;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100792 size_t header_emad_size;
793 uint32_t offset;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100794 struct ffa_comp_mrd *comp;
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500795 ffa_endpoint_id16_t ep_id;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100796
797 /*
798 * Validate the calculated emad address resides within the
799 * descriptor.
800 */
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500801 if ((uintptr_t) emad >
802 ((uintptr_t) &obj->desc + obj->desc_size - emad_size)) {
803 ERROR("BUG: Invalid emad access not detected earlier.\n");
804 panic();
Marc Bonnicid1907f02022-04-19 17:42:53 +0100805 }
806
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500807 emad = (const struct ffa_emad_v1_0 *)((const uint8_t *)emad + emad_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100808 offset = emad->comp_mrd_offset;
809
Demi Marie Obenour8711be32023-01-11 14:20:07 -0500810 /*
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500811 * If a partition ID resides in the secure world validate that
812 * the partition ID is for a known partition. Ignore any
813 * partition ID belonging to the normal world as it is assumed
814 * the Hypervisor will have validated these.
815 */
816 ep_id = emad->mapd.endpoint_id;
817 if (ffa_is_secure_world_id(ep_id)) {
818 if (spmc_get_sp_ctx(ep_id) == NULL) {
819 WARN("%s: Invalid receiver id 0x%x\n",
820 __func__, ep_id);
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500821 return FFA_ERROR_INVALID_PARAMETER;
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500822 }
823 }
824
825 /*
Demi Marie Obenour8711be32023-01-11 14:20:07 -0500826 * The offset provided to the composite memory region descriptor
827 * should be consistent across endpoint descriptors. Store the
828 * first entry and compare against subsequent entries.
829 */
830 if (comp_mrd_offset == 0) {
831 comp_mrd_offset = offset;
832 } else {
833 if (comp_mrd_offset != offset) {
834 ERROR("%s: mismatching offsets provided, %u != %u\n",
835 __func__, offset, comp_mrd_offset);
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500836 return FFA_ERROR_INVALID_PARAMETER;
Demi Marie Obenour8711be32023-01-11 14:20:07 -0500837 }
838 continue; /* Remainder only executed on first iteration. */
839 }
840
Demi Marie Obenour2bb87352023-01-11 14:25:24 -0500841 header_emad_size = (size_t)((uint8_t *)emad - (uint8_t *)&obj->desc) +
Marc Bonnicid1907f02022-04-19 17:42:53 +0100842 (obj->desc.emad_count * emad_size);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100843
844 if (offset < header_emad_size) {
845 WARN("%s: invalid object, offset %u < header + emad %zu\n",
846 __func__, offset, header_emad_size);
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500847 return FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100848 }
849
850 size = obj->desc_size;
851
852 if (offset > size) {
853 WARN("%s: invalid object, offset %u > total size %zu\n",
854 __func__, offset, obj->desc_size);
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500855 return FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100856 }
857 size -= offset;
858
859 if (size < sizeof(struct ffa_comp_mrd)) {
860 WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
861 __func__, offset, obj->desc_size);
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500862 return FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100863 }
864 size -= sizeof(struct ffa_comp_mrd);
865
866 count = size / sizeof(struct ffa_cons_mrd);
867
Marc Bonnicid1907f02022-04-19 17:42:53 +0100868 comp = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
869
870 if (comp == NULL) {
871 WARN("%s: invalid comp_mrd offset\n", __func__);
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500872 return FFA_ERROR_INVALID_PARAMETER;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100873 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100874
875 if (comp->address_range_count != count) {
876 WARN("%s: invalid object, desc count %u != %zu\n",
877 __func__, comp->address_range_count, count);
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500878 return FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100879 }
880
881 expected_size = offset + sizeof(*comp) +
Demi Marie Obenour33e66662023-01-12 13:52:16 -0500882 count * sizeof(struct ffa_cons_mrd);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100883
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100884 if (expected_size != obj->desc_size) {
885 WARN("%s: invalid object, computed size %zu != size %zu\n",
886 __func__, expected_size, obj->desc_size);
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500887 return FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100888 }
889
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100890 total_page_count = 0;
891
892 for (size_t i = 0; i < count; i++) {
893 total_page_count +=
894 comp->address_range_array[i].page_count;
895 }
896 if (comp->total_page_count != total_page_count) {
Demi Marie Obenour00d36b22023-01-12 13:24:50 -0500897 WARN("%s: invalid object, desc total_page_count %u != %" PRIu64 "\n",
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100898 __func__, comp->total_page_count,
899 total_page_count);
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500900 return FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100901 }
902 }
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000903 return 0;
904}
905
906/**
907 * spmc_shmem_check_state_obj - Check if the descriptor describes memory
908 * regions that are currently involved with an
909 * existing memory transactions. This implies that
910 * the memory is not in a valid state for lending.
911 * @obj: Object containing ffa_memory_region_descriptor.
912 *
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500913 * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if invalid memory
914 * state.
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000915 */
Marc Bonnicid1907f02022-04-19 17:42:53 +0100916static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
917 uint32_t ffa_version)
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000918{
919 size_t obj_offset = 0;
920 struct spmc_shmem_obj *inflight_obj;
921
922 struct ffa_comp_mrd *other_mrd;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100923 struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
924 ffa_version);
925
926 if (requested_mrd == NULL) {
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500927 return FFA_ERROR_INVALID_PARAMETER;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100928 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100929
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000930 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
931 &obj_offset);
932
933 while (inflight_obj != NULL) {
934 /*
935 * Don't compare the transaction to itself or to partially
936 * transmitted descriptors.
937 */
938 if ((obj->desc.handle != inflight_obj->desc.handle) &&
939 (obj->desc_size == obj->desc_filled)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100940 other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
Marc Bonnici344ca9d2022-05-20 14:38:55 +0100941 FFA_VERSION_COMPILED);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100942 if (other_mrd == NULL) {
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500943 return FFA_ERROR_INVALID_PARAMETER;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100944 }
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000945 if (overlapping_memory_regions(requested_mrd,
946 other_mrd)) {
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500947 return FFA_ERROR_INVALID_PARAMETER;
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000948 }
949 }
950
951 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
952 &obj_offset);
953 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100954 return 0;
955}
956
957static long spmc_ffa_fill_desc(struct mailbox *mbox,
958 struct spmc_shmem_obj *obj,
959 uint32_t fragment_length,
960 ffa_mtd_flag32_t mtd_flag,
Marc Bonnicid1907f02022-04-19 17:42:53 +0100961 uint32_t ffa_version,
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100962 void *smc_handle)
963{
964 int ret;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100965 size_t emad_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100966 uint32_t handle_low;
967 uint32_t handle_high;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100968 struct ffa_emad_v1_0 *emad;
969 struct ffa_emad_v1_0 *other_emad;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100970
971 if (mbox->rxtx_page_count == 0U) {
972 WARN("%s: buffer pair not registered.\n", __func__);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100973 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100974 goto err_arg;
975 }
976
Demi Marie Obenour4a3a6d32023-01-12 14:22:18 -0500977 CASSERT(sizeof(mbox->rxtx_page_count) == 4, assert_bogus_page_count);
978 if (fragment_length > (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB) {
979 WARN("%s: bad fragment size %u > %" PRIu64 " buffer size\n", __func__,
980 fragment_length, (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100981 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100982 goto err_arg;
983 }
984
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100985 if (fragment_length > obj->desc_size - obj->desc_filled) {
986 WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
987 fragment_length, obj->desc_size - obj->desc_filled);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100988 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100989 goto err_arg;
990 }
991
Marc Bonnicif0f45dc2022-10-18 13:57:16 +0100992 memcpy((uint8_t *)&obj->desc + obj->desc_filled,
993 (uint8_t *) mbox->tx_buffer, fragment_length);
994
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100995 /* Ensure that the sender ID resides in the normal world. */
996 if (ffa_is_secure_world_id(obj->desc.sender_id)) {
997 WARN("%s: Invalid sender ID 0x%x.\n",
998 __func__, obj->desc.sender_id);
999 ret = FFA_ERROR_DENIED;
1000 goto err_arg;
1001 }
1002
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001003 /* Ensure the NS bit is set to 0. */
1004 if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1005 WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1006 ret = FFA_ERROR_INVALID_PARAMETER;
1007 goto err_arg;
1008 }
1009
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001010 /*
1011 * We don't currently support any optional flags so ensure none are
1012 * requested.
1013 */
1014 if (obj->desc.flags != 0U && mtd_flag != 0U &&
1015 (obj->desc.flags != mtd_flag)) {
1016 WARN("%s: invalid memory transaction flags %u != %u\n",
1017 __func__, obj->desc.flags, mtd_flag);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001018 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001019 goto err_arg;
1020 }
1021
1022 if (obj->desc_filled == 0U) {
1023 /* First fragment, descriptor header has been copied */
Demi Marie Obenour4ed9df42022-12-30 19:30:58 -05001024 ret = spmc_validate_mtd_start(&obj->desc, ffa_version,
1025 fragment_length, obj->desc_size);
1026 if (ret != 0) {
1027 goto err_bad_desc;
1028 }
1029
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001030 obj->desc.handle = spmc_shmem_obj_state.next_handle++;
1031 obj->desc.flags |= mtd_flag;
1032 }
1033
1034 obj->desc_filled += fragment_length;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001035
1036 handle_low = (uint32_t)obj->desc.handle;
1037 handle_high = obj->desc.handle >> 32;
1038
1039 if (obj->desc_filled != obj->desc_size) {
1040 SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
1041 handle_high, obj->desc_filled,
1042 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
1043 }
1044
Marc Bonnici336630f2022-01-13 11:39:10 +00001045 /* The full descriptor has been received, perform any final checks. */
1046
Demi Marie Obenourcdd3e722023-01-11 14:16:37 -05001047 ret = spmc_shmem_check_obj(obj, ffa_version);
1048 if (ret != 0) {
Demi Marie Obenourcdd3e722023-01-11 14:16:37 -05001049 goto err_bad_desc;
1050 }
1051
Marc Bonnici336630f2022-01-13 11:39:10 +00001052 /* Ensure partition IDs are not duplicated. */
1053 for (size_t i = 0; i < obj->desc.emad_count; i++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001054 emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
1055 &emad_size);
Demi Marie Obenour57bf10c2022-12-31 11:11:18 -05001056
Marc Bonnici336630f2022-01-13 11:39:10 +00001057 for (size_t j = i + 1; j < obj->desc.emad_count; j++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001058 other_emad = spmc_shmem_obj_get_emad(&obj->desc, j,
1059 ffa_version,
1060 &emad_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001061
1062 if (emad->mapd.endpoint_id ==
1063 other_emad->mapd.endpoint_id) {
1064 WARN("%s: Duplicated endpoint id 0x%x\n",
1065 __func__, emad->mapd.endpoint_id);
1066 ret = FFA_ERROR_INVALID_PARAMETER;
1067 goto err_bad_desc;
1068 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001069 }
1070 }
1071
Marc Bonnicid1907f02022-04-19 17:42:53 +01001072 ret = spmc_shmem_check_state_obj(obj, ffa_version);
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00001073 if (ret) {
1074 ERROR("%s: invalid memory region descriptor.\n", __func__);
1075 goto err_bad_desc;
1076 }
1077
Marc Bonnicid1907f02022-04-19 17:42:53 +01001078 /*
1079 * Everything checks out, if the sender was using FF-A v1.0, convert
1080 * the descriptor format to use the v1.1 structures.
1081 */
1082 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1083 struct spmc_shmem_obj *v1_1_obj;
1084 uint64_t mem_handle;
1085
1086 /* Calculate the size that the v1.1 descriptor will required. */
Demi Marie Obenour81f0adc2023-01-12 14:28:32 -05001087 uint64_t v1_1_desc_size =
Marc Bonnicid1907f02022-04-19 17:42:53 +01001088 spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
vallau0146dbac22022-08-08 14:10:14 +02001089 obj->desc_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001090
Demi Marie Obenour81f0adc2023-01-12 14:28:32 -05001091 if (v1_1_desc_size > UINT32_MAX) {
1092 ret = FFA_ERROR_NO_MEMORY;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001093 goto err_arg;
1094 }
1095
1096 /* Get a new obj to store the v1.1 descriptor. */
1097 v1_1_obj =
Demi Marie Obenour81f0adc2023-01-12 14:28:32 -05001098 spmc_shmem_obj_alloc(&spmc_shmem_obj_state, (size_t)v1_1_desc_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001099
vallau018f830992022-08-09 18:03:28 +02001100 if (!v1_1_obj) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001101 ret = FFA_ERROR_NO_MEMORY;
1102 goto err_arg;
1103 }
1104
1105 /* Perform the conversion from v1.0 to v1.1. */
Demi Marie Obenour81f0adc2023-01-12 14:28:32 -05001106 v1_1_obj->desc_size = (uint32_t)v1_1_desc_size;
1107 v1_1_obj->desc_filled = (uint32_t)v1_1_desc_size;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001108 if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
1109 ERROR("%s: Could not convert mtd!\n", __func__);
1110 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
1111 goto err_arg;
1112 }
1113
1114 /*
1115 * We're finished with the v1.0 descriptor so free it
1116 * and continue our checks with the new v1.1 descriptor.
1117 */
1118 mem_handle = obj->desc.handle;
1119 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1120 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1121 if (obj == NULL) {
1122 ERROR("%s: Failed to find converted descriptor.\n",
1123 __func__);
1124 ret = FFA_ERROR_INVALID_PARAMETER;
1125 return spmc_ffa_error_return(smc_handle, ret);
1126 }
1127 }
1128
Marc Bonnici503320e2022-02-21 15:02:36 +00001129 /* Allow for platform specific operations to be performed. */
1130 ret = plat_spmc_shmem_begin(&obj->desc);
1131 if (ret != 0) {
1132 goto err_arg;
1133 }
1134
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001135 SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
1136 0, 0, 0);
1137
1138err_bad_desc:
1139err_arg:
1140 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001141 return spmc_ffa_error_return(smc_handle, ret);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001142}
1143
1144/**
1145 * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
1146 * @client: Client state.
1147 * @total_length: Total length of shared memory descriptor.
1148 * @fragment_length: Length of fragment of shared memory descriptor passed in
1149 * this call.
1150 * @address: Not supported, must be 0.
1151 * @page_count: Not supported, must be 0.
1152 * @smc_handle: Handle passed to smc call. Used to return
1153 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1154 *
1155 * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
1156 * to share or lend memory from non-secure os to secure os (with no stream
1157 * endpoints).
1158 *
1159 * Return: 0 on success, error code on failure.
1160 */
1161long spmc_ffa_mem_send(uint32_t smc_fid,
1162 bool secure_origin,
1163 uint64_t total_length,
1164 uint32_t fragment_length,
1165 uint64_t address,
1166 uint32_t page_count,
1167 void *cookie,
1168 void *handle,
1169 uint64_t flags)
1170
1171{
1172 long ret;
1173 struct spmc_shmem_obj *obj;
1174 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1175 ffa_mtd_flag32_t mtd_flag;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001176 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Demi Marie Obenour1f9f8302022-12-30 19:14:18 -05001177 size_t min_desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001178
1179 if (address != 0U || page_count != 0U) {
1180 WARN("%s: custom memory region for message not supported.\n",
1181 __func__);
1182 return spmc_ffa_error_return(handle,
1183 FFA_ERROR_INVALID_PARAMETER);
1184 }
1185
1186 if (secure_origin) {
1187 WARN("%s: unsupported share direction.\n", __func__);
1188 return spmc_ffa_error_return(handle,
1189 FFA_ERROR_INVALID_PARAMETER);
1190 }
1191
Demi Marie Obenour1f9f8302022-12-30 19:14:18 -05001192 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1193 min_desc_size = sizeof(struct ffa_mtd_v1_0);
1194 } else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
1195 min_desc_size = sizeof(struct ffa_mtd);
1196 } else {
1197 WARN("%s: bad FF-A version.\n", __func__);
1198 return spmc_ffa_error_return(handle,
1199 FFA_ERROR_INVALID_PARAMETER);
1200 }
1201
1202 /* Check if the descriptor is too small for the FF-A version. */
1203 if (fragment_length < min_desc_size) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001204 WARN("%s: bad first fragment size %u < %zu\n",
Marc Bonnicid1907f02022-04-19 17:42:53 +01001205 __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001206 return spmc_ffa_error_return(handle,
1207 FFA_ERROR_INVALID_PARAMETER);
1208 }
1209
1210 if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
1211 mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
1212 } else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
1213 mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
1214 } else {
1215 WARN("%s: invalid memory management operation.\n", __func__);
1216 return spmc_ffa_error_return(handle,
1217 FFA_ERROR_INVALID_PARAMETER);
1218 }
1219
1220 spin_lock(&spmc_shmem_obj_state.lock);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001221 obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
1222 if (obj == NULL) {
1223 ret = FFA_ERROR_NO_MEMORY;
1224 goto err_unlock;
1225 }
1226
1227 spin_lock(&mbox->lock);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001228 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
1229 ffa_version, handle);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001230 spin_unlock(&mbox->lock);
1231
1232 spin_unlock(&spmc_shmem_obj_state.lock);
1233 return ret;
1234
1235err_unlock:
1236 spin_unlock(&spmc_shmem_obj_state.lock);
1237 return spmc_ffa_error_return(handle, ret);
1238}
1239
1240/**
1241 * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
1242 * @client: Client state.
1243 * @handle_low: Handle_low value returned from FFA_MEM_FRAG_RX.
1244 * @handle_high: Handle_high value returned from FFA_MEM_FRAG_RX.
1245 * @fragment_length: Length of fragments transmitted.
1246 * @sender_id: Vmid of sender in bits [31:16]
1247 * @smc_handle: Handle passed to smc call. Used to return
1248 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1249 *
1250 * Return: @smc_handle on success, error code on failure.
1251 */
1252long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
1253 bool secure_origin,
1254 uint64_t handle_low,
1255 uint64_t handle_high,
1256 uint32_t fragment_length,
1257 uint32_t sender_id,
1258 void *cookie,
1259 void *handle,
1260 uint64_t flags)
1261{
1262 long ret;
1263 uint32_t desc_sender_id;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001264 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001265 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1266
1267 struct spmc_shmem_obj *obj;
1268 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1269
1270 spin_lock(&spmc_shmem_obj_state.lock);
1271
1272 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1273 if (obj == NULL) {
1274 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1275 __func__, mem_handle);
1276 ret = FFA_ERROR_INVALID_PARAMETER;
1277 goto err_unlock;
1278 }
1279
1280 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1281 if (sender_id != desc_sender_id) {
1282 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1283 sender_id, desc_sender_id);
1284 ret = FFA_ERROR_INVALID_PARAMETER;
1285 goto err_unlock;
1286 }
1287
1288 if (obj->desc_filled == obj->desc_size) {
1289 WARN("%s: object desc already filled, %zu\n", __func__,
1290 obj->desc_filled);
1291 ret = FFA_ERROR_INVALID_PARAMETER;
1292 goto err_unlock;
1293 }
1294
1295 spin_lock(&mbox->lock);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001296 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
1297 handle);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001298 spin_unlock(&mbox->lock);
1299
1300 spin_unlock(&spmc_shmem_obj_state.lock);
1301 return ret;
1302
1303err_unlock:
1304 spin_unlock(&spmc_shmem_obj_state.lock);
1305 return spmc_ffa_error_return(handle, ret);
1306}
1307
1308/**
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001309 * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor
1310 * if the caller implements a version greater
1311 * than FF-A 1.0 or if they have requested
1312 * the functionality.
1313 * TODO: We are assuming that the caller is
1314 * an SP. To support retrieval from the
1315 * normal world this function will need to be
1316 * expanded accordingly.
1317 * @resp: Descriptor populated in callers RX buffer.
1318 * @sp_ctx: Context of the calling SP.
1319 */
1320void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp,
1321 struct secure_partition_desc *sp_ctx)
1322{
1323 if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) ||
1324 sp_ctx->ns_bit_requested) {
1325 /*
1326 * Currently memory senders must reside in the normal
1327 * world, and we do not have the functionlaity to change
1328 * the state of memory dynamically. Therefore we can always set
1329 * the NS bit to 1.
1330 */
1331 resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
1332 }
1333}
1334
1335/**
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001336 * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
1337 * @smc_fid: FID of SMC
1338 * @total_length: Total length of retrieve request descriptor if this is
1339 * the first call. Otherwise (unsupported) must be 0.
1340 * @fragment_length: Length of fragment of retrieve request descriptor passed
1341 * in this call. Only @fragment_length == @length is
1342 * supported by this implementation.
1343 * @address: Not supported, must be 0.
1344 * @page_count: Not supported, must be 0.
1345 * @smc_handle: Handle passed to smc call. Used to return
1346 * FFA_MEM_RETRIEVE_RESP.
1347 *
1348 * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
1349 * Used by secure os to retrieve memory already shared by non-secure os.
1350 * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
1351 * the client must call FFA_MEM_FRAG_RX until the full response has been
1352 * received.
1353 *
1354 * Return: @handle on success, error code on failure.
1355 */
1356long
1357spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
1358 bool secure_origin,
1359 uint32_t total_length,
1360 uint32_t fragment_length,
1361 uint64_t address,
1362 uint32_t page_count,
1363 void *cookie,
1364 void *handle,
1365 uint64_t flags)
1366{
1367 int ret;
1368 size_t buf_size;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001369 size_t copy_size = 0;
1370 size_t min_desc_size;
1371 size_t out_desc_size = 0;
1372
1373 /*
1374 * Currently we are only accessing fields that are the same in both the
1375 * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
1376 * here. We only need validate against the appropriate struct size.
1377 */
1378 struct ffa_mtd *resp;
1379 const struct ffa_mtd *req;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001380 struct spmc_shmem_obj *obj = NULL;
1381 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001382 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001383 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001384
1385 if (!secure_origin) {
1386 WARN("%s: unsupported retrieve req direction.\n", __func__);
1387 return spmc_ffa_error_return(handle,
1388 FFA_ERROR_INVALID_PARAMETER);
1389 }
1390
1391 if (address != 0U || page_count != 0U) {
1392 WARN("%s: custom memory region not supported.\n", __func__);
1393 return spmc_ffa_error_return(handle,
1394 FFA_ERROR_INVALID_PARAMETER);
1395 }
1396
1397 spin_lock(&mbox->lock);
1398
1399 req = mbox->tx_buffer;
1400 resp = mbox->rx_buffer;
1401 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1402
1403 if (mbox->rxtx_page_count == 0U) {
1404 WARN("%s: buffer pair not registered.\n", __func__);
1405 ret = FFA_ERROR_INVALID_PARAMETER;
1406 goto err_unlock_mailbox;
1407 }
1408
1409 if (mbox->state != MAILBOX_STATE_EMPTY) {
1410 WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
1411 ret = FFA_ERROR_DENIED;
1412 goto err_unlock_mailbox;
1413 }
1414
1415 if (fragment_length != total_length) {
1416 WARN("%s: fragmented retrieve request not supported.\n",
1417 __func__);
1418 ret = FFA_ERROR_INVALID_PARAMETER;
1419 goto err_unlock_mailbox;
1420 }
1421
Marc Bonnici336630f2022-01-13 11:39:10 +00001422 if (req->emad_count == 0U) {
1423 WARN("%s: unsupported attribute desc count %u.\n",
1424 __func__, obj->desc.emad_count);
vallau01460d3962022-08-09 17:06:53 +02001425 ret = FFA_ERROR_INVALID_PARAMETER;
1426 goto err_unlock_mailbox;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001427 }
1428
Marc Bonnicid1907f02022-04-19 17:42:53 +01001429 /* Determine the appropriate minimum descriptor size. */
1430 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1431 min_desc_size = sizeof(struct ffa_mtd_v1_0);
1432 } else {
1433 min_desc_size = sizeof(struct ffa_mtd);
1434 }
1435 if (total_length < min_desc_size) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001436 WARN("%s: invalid length %u < %zu\n", __func__, total_length,
Marc Bonnicid1907f02022-04-19 17:42:53 +01001437 min_desc_size);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001438 ret = FFA_ERROR_INVALID_PARAMETER;
1439 goto err_unlock_mailbox;
1440 }
1441
1442 spin_lock(&spmc_shmem_obj_state.lock);
1443
1444 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1445 if (obj == NULL) {
1446 ret = FFA_ERROR_INVALID_PARAMETER;
1447 goto err_unlock_all;
1448 }
1449
1450 if (obj->desc_filled != obj->desc_size) {
1451 WARN("%s: incomplete object desc filled %zu < size %zu\n",
1452 __func__, obj->desc_filled, obj->desc_size);
1453 ret = FFA_ERROR_INVALID_PARAMETER;
1454 goto err_unlock_all;
1455 }
1456
1457 if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
1458 WARN("%s: wrong sender id 0x%x != 0x%x\n",
1459 __func__, req->sender_id, obj->desc.sender_id);
1460 ret = FFA_ERROR_INVALID_PARAMETER;
1461 goto err_unlock_all;
1462 }
1463
1464 if (req->emad_count != 0U && req->tag != obj->desc.tag) {
1465 WARN("%s: wrong tag 0x%lx != 0x%lx\n",
1466 __func__, req->tag, obj->desc.tag);
1467 ret = FFA_ERROR_INVALID_PARAMETER;
1468 goto err_unlock_all;
1469 }
1470
Marc Bonnici336630f2022-01-13 11:39:10 +00001471 if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
1472 WARN("%s: mistmatch of endpoint counts %u != %u\n",
1473 __func__, req->emad_count, obj->desc.emad_count);
1474 ret = FFA_ERROR_INVALID_PARAMETER;
1475 goto err_unlock_all;
1476 }
1477
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001478 /* Ensure the NS bit is set to 0 in the request. */
1479 if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1480 WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1481 ret = FFA_ERROR_INVALID_PARAMETER;
1482 goto err_unlock_all;
1483 }
1484
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001485 if (req->flags != 0U) {
1486 if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
1487 (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
1488 /*
1489 * If the retrieve request specifies the memory
1490 * transaction ensure it matches what we expect.
1491 */
1492 WARN("%s: wrong mem transaction flags %x != %x\n",
1493 __func__, req->flags, obj->desc.flags);
1494 ret = FFA_ERROR_INVALID_PARAMETER;
1495 goto err_unlock_all;
1496 }
1497
1498 if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
1499 req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
1500 /*
1501 * Current implementation does not support donate and
1502 * it supports no other flags.
1503 */
1504 WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
1505 ret = FFA_ERROR_INVALID_PARAMETER;
1506 goto err_unlock_all;
1507 }
1508 }
1509
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001510 /* Validate the caller is a valid participant. */
Shruti Gupta20ce06c2022-08-25 14:22:53 +01001511 if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001512 WARN("%s: Invalid endpoint ID (0x%x).\n",
1513 __func__, sp_ctx->sp_id);
1514 ret = FFA_ERROR_INVALID_PARAMETER;
1515 goto err_unlock_all;
1516 }
1517
Marc Bonnicid1907f02022-04-19 17:42:53 +01001518 /* Validate that the provided emad offset and structure is valid.*/
1519 for (size_t i = 0; i < req->emad_count; i++) {
1520 size_t emad_size;
1521 struct ffa_emad_v1_0 *emad;
1522
1523 emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1524 &emad_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001525
1526 if ((uintptr_t) emad >= (uintptr_t)
1527 ((uint8_t *) req + total_length)) {
1528 WARN("Invalid emad access.\n");
1529 ret = FFA_ERROR_INVALID_PARAMETER;
1530 goto err_unlock_all;
1531 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001532 }
1533
1534 /*
1535 * Validate all the endpoints match in the case of multiple
1536 * borrowers. We don't mandate that the order of the borrowers
1537 * must match in the descriptors therefore check to see if the
1538 * endpoints match in any order.
1539 */
1540 for (size_t i = 0; i < req->emad_count; i++) {
1541 bool found = false;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001542 size_t emad_size;
1543 struct ffa_emad_v1_0 *emad;
1544 struct ffa_emad_v1_0 *other_emad;
1545
1546 emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1547 &emad_size);
Marc Bonnici336630f2022-01-13 11:39:10 +00001548
1549 for (size_t j = 0; j < obj->desc.emad_count; j++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001550 other_emad = spmc_shmem_obj_get_emad(
1551 &obj->desc, j, MAKE_FFA_VERSION(1, 1),
1552 &emad_size);
1553
Marc Bonnicid1907f02022-04-19 17:42:53 +01001554 if (req->emad_count &&
1555 emad->mapd.endpoint_id ==
1556 other_emad->mapd.endpoint_id) {
Marc Bonnici336630f2022-01-13 11:39:10 +00001557 found = true;
1558 break;
1559 }
1560 }
1561
1562 if (!found) {
1563 WARN("%s: invalid receiver id (0x%x).\n",
Marc Bonnicid1907f02022-04-19 17:42:53 +01001564 __func__, emad->mapd.endpoint_id);
Marc Bonnici336630f2022-01-13 11:39:10 +00001565 ret = FFA_ERROR_INVALID_PARAMETER;
1566 goto err_unlock_all;
1567 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001568 }
1569
1570 mbox->state = MAILBOX_STATE_FULL;
1571
1572 if (req->emad_count != 0U) {
1573 obj->in_use++;
1574 }
1575
Marc Bonnicid1907f02022-04-19 17:42:53 +01001576 /*
1577 * If the caller is v1.0 convert the descriptor, otherwise copy
1578 * directly.
1579 */
1580 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1581 ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
1582 &copy_size,
1583 &out_desc_size);
1584 if (ret != 0U) {
1585 ERROR("%s: Failed to process descriptor.\n", __func__);
1586 goto err_unlock_all;
1587 }
1588 } else {
1589 copy_size = MIN(obj->desc_size, buf_size);
1590 out_desc_size = obj->desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001591
Marc Bonnicid1907f02022-04-19 17:42:53 +01001592 memcpy(resp, &obj->desc, copy_size);
1593 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001594
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001595 /* Set the NS bit in the response if applicable. */
1596 spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx);
1597
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001598 spin_unlock(&spmc_shmem_obj_state.lock);
1599 spin_unlock(&mbox->lock);
1600
Marc Bonnicid1907f02022-04-19 17:42:53 +01001601 SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001602 copy_size, 0, 0, 0, 0, 0);
1603
1604err_unlock_all:
1605 spin_unlock(&spmc_shmem_obj_state.lock);
1606err_unlock_mailbox:
1607 spin_unlock(&mbox->lock);
1608 return spmc_ffa_error_return(handle, ret);
1609}
1610
1611/**
1612 * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
1613 * @client: Client state.
1614 * @handle_low: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
1615 * @handle_high: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
1616 * @fragment_offset: Byte offset in descriptor to resume at.
1617 * @sender_id: Bit[31:16]: Endpoint id of sender if client is a
1618 * hypervisor. 0 otherwise.
1619 * @smc_handle: Handle passed to smc call. Used to return
1620 * FFA_MEM_FRAG_TX.
1621 *
1622 * Return: @smc_handle on success, error code on failure.
1623 */
1624long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
1625 bool secure_origin,
1626 uint32_t handle_low,
1627 uint32_t handle_high,
1628 uint32_t fragment_offset,
1629 uint32_t sender_id,
1630 void *cookie,
1631 void *handle,
1632 uint64_t flags)
1633{
1634 int ret;
1635 void *src;
1636 size_t buf_size;
1637 size_t copy_size;
1638 size_t full_copy_size;
1639 uint32_t desc_sender_id;
1640 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1641 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1642 struct spmc_shmem_obj *obj;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001643 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001644
1645 if (!secure_origin) {
1646 WARN("%s: can only be called from swld.\n",
1647 __func__);
1648 return spmc_ffa_error_return(handle,
1649 FFA_ERROR_INVALID_PARAMETER);
1650 }
1651
1652 spin_lock(&spmc_shmem_obj_state.lock);
1653
1654 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1655 if (obj == NULL) {
1656 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1657 __func__, mem_handle);
1658 ret = FFA_ERROR_INVALID_PARAMETER;
1659 goto err_unlock_shmem;
1660 }
1661
1662 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1663 if (sender_id != 0U && sender_id != desc_sender_id) {
1664 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1665 sender_id, desc_sender_id);
1666 ret = FFA_ERROR_INVALID_PARAMETER;
1667 goto err_unlock_shmem;
1668 }
1669
1670 if (fragment_offset >= obj->desc_size) {
1671 WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
1672 __func__, fragment_offset, obj->desc_size);
1673 ret = FFA_ERROR_INVALID_PARAMETER;
1674 goto err_unlock_shmem;
1675 }
1676
1677 spin_lock(&mbox->lock);
1678
1679 if (mbox->rxtx_page_count == 0U) {
1680 WARN("%s: buffer pair not registered.\n", __func__);
1681 ret = FFA_ERROR_INVALID_PARAMETER;
1682 goto err_unlock_all;
1683 }
1684
1685 if (mbox->state != MAILBOX_STATE_EMPTY) {
1686 WARN("%s: RX Buffer is full!\n", __func__);
1687 ret = FFA_ERROR_DENIED;
1688 goto err_unlock_all;
1689 }
1690
1691 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1692
1693 mbox->state = MAILBOX_STATE_FULL;
1694
Marc Bonnicid1907f02022-04-19 17:42:53 +01001695 /*
1696 * If the caller is v1.0 convert the descriptor, otherwise copy
1697 * directly.
1698 */
1699 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1700 size_t out_desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001701
Marc Bonnicid1907f02022-04-19 17:42:53 +01001702 ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
1703 buf_size,
1704 fragment_offset,
1705 &copy_size,
1706 &out_desc_size);
1707 if (ret != 0U) {
1708 ERROR("%s: Failed to process descriptor.\n", __func__);
1709 goto err_unlock_all;
1710 }
1711 } else {
1712 full_copy_size = obj->desc_size - fragment_offset;
1713 copy_size = MIN(full_copy_size, buf_size);
1714
1715 src = &obj->desc;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001716
Marc Bonnicid1907f02022-04-19 17:42:53 +01001717 memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
1718 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001719
1720 spin_unlock(&mbox->lock);
1721 spin_unlock(&spmc_shmem_obj_state.lock);
1722
1723 SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
1724 copy_size, sender_id, 0, 0, 0);
1725
1726err_unlock_all:
1727 spin_unlock(&mbox->lock);
1728err_unlock_shmem:
1729 spin_unlock(&spmc_shmem_obj_state.lock);
1730 return spmc_ffa_error_return(handle, ret);
1731}
1732
1733/**
1734 * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
1735 * @client: Client state.
1736 *
1737 * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
1738 * Used by secure os release previously shared memory to non-secure os.
1739 *
1740 * The handle to release must be in the client's (secure os's) transmit buffer.
1741 *
1742 * Return: 0 on success, error code on failure.
1743 */
1744int spmc_ffa_mem_relinquish(uint32_t smc_fid,
1745 bool secure_origin,
1746 uint32_t handle_low,
1747 uint32_t handle_high,
1748 uint32_t fragment_offset,
1749 uint32_t sender_id,
1750 void *cookie,
1751 void *handle,
1752 uint64_t flags)
1753{
1754 int ret;
1755 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1756 struct spmc_shmem_obj *obj;
1757 const struct ffa_mem_relinquish_descriptor *req;
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001758 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001759
1760 if (!secure_origin) {
1761 WARN("%s: unsupported relinquish direction.\n", __func__);
1762 return spmc_ffa_error_return(handle,
1763 FFA_ERROR_INVALID_PARAMETER);
1764 }
1765
1766 spin_lock(&mbox->lock);
1767
1768 if (mbox->rxtx_page_count == 0U) {
1769 WARN("%s: buffer pair not registered.\n", __func__);
1770 ret = FFA_ERROR_INVALID_PARAMETER;
1771 goto err_unlock_mailbox;
1772 }
1773
1774 req = mbox->tx_buffer;
1775
1776 if (req->flags != 0U) {
1777 WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
1778 ret = FFA_ERROR_INVALID_PARAMETER;
1779 goto err_unlock_mailbox;
1780 }
1781
Marc Bonnici336630f2022-01-13 11:39:10 +00001782 if (req->endpoint_count == 0) {
1783 WARN("%s: endpoint count cannot be 0.\n", __func__);
1784 ret = FFA_ERROR_INVALID_PARAMETER;
1785 goto err_unlock_mailbox;
1786 }
1787
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001788 spin_lock(&spmc_shmem_obj_state.lock);
1789
1790 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1791 if (obj == NULL) {
1792 ret = FFA_ERROR_INVALID_PARAMETER;
1793 goto err_unlock_all;
1794 }
1795
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001796 /*
1797 * Validate the endpoint ID was populated correctly. We don't currently
1798 * support proxy endpoints so the endpoint count should always be 1.
1799 */
1800 if (req->endpoint_count != 1U) {
1801 WARN("%s: unsupported endpoint count %u != 1\n", __func__,
1802 req->endpoint_count);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001803 ret = FFA_ERROR_INVALID_PARAMETER;
1804 goto err_unlock_all;
1805 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001806
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001807 /* Validate provided endpoint ID matches the partition ID. */
1808 if (req->endpoint_array[0] != sp_ctx->sp_id) {
1809 WARN("%s: invalid endpoint ID %u != %u\n", __func__,
1810 req->endpoint_array[0], sp_ctx->sp_id);
1811 ret = FFA_ERROR_INVALID_PARAMETER;
1812 goto err_unlock_all;
1813 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001814
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001815 /* Validate the caller is a valid participant. */
Shruti Gupta20ce06c2022-08-25 14:22:53 +01001816 if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001817 WARN("%s: Invalid endpoint ID (0x%x).\n",
1818 __func__, req->endpoint_array[0]);
1819 ret = FFA_ERROR_INVALID_PARAMETER;
1820 goto err_unlock_all;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001821 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001822
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001823 if (obj->in_use == 0U) {
1824 ret = FFA_ERROR_INVALID_PARAMETER;
1825 goto err_unlock_all;
1826 }
1827 obj->in_use--;
1828
1829 spin_unlock(&spmc_shmem_obj_state.lock);
1830 spin_unlock(&mbox->lock);
1831
1832 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1833
1834err_unlock_all:
1835 spin_unlock(&spmc_shmem_obj_state.lock);
1836err_unlock_mailbox:
1837 spin_unlock(&mbox->lock);
1838 return spmc_ffa_error_return(handle, ret);
1839}
1840
1841/**
1842 * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
1843 * @client: Client state.
1844 * @handle_low: Unique handle of shared memory object to reclaim. Bit[31:0].
1845 * @handle_high: Unique handle of shared memory object to reclaim.
1846 * Bit[63:32].
1847 * @flags: Unsupported, ignored.
1848 *
1849 * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
1850 * Used by non-secure os reclaim memory previously shared with secure os.
1851 *
1852 * Return: 0 on success, error code on failure.
1853 */
1854int spmc_ffa_mem_reclaim(uint32_t smc_fid,
1855 bool secure_origin,
1856 uint32_t handle_low,
1857 uint32_t handle_high,
1858 uint32_t mem_flags,
1859 uint64_t x4,
1860 void *cookie,
1861 void *handle,
1862 uint64_t flags)
1863{
1864 int ret;
1865 struct spmc_shmem_obj *obj;
1866 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1867
1868 if (secure_origin) {
1869 WARN("%s: unsupported reclaim direction.\n", __func__);
1870 return spmc_ffa_error_return(handle,
1871 FFA_ERROR_INVALID_PARAMETER);
1872 }
1873
1874 if (mem_flags != 0U) {
1875 WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
1876 return spmc_ffa_error_return(handle,
1877 FFA_ERROR_INVALID_PARAMETER);
1878 }
1879
1880 spin_lock(&spmc_shmem_obj_state.lock);
1881
1882 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1883 if (obj == NULL) {
1884 ret = FFA_ERROR_INVALID_PARAMETER;
1885 goto err_unlock;
1886 }
1887 if (obj->in_use != 0U) {
1888 ret = FFA_ERROR_DENIED;
1889 goto err_unlock;
1890 }
Marc Bonnici503320e2022-02-21 15:02:36 +00001891
Marc Bonnici82e28f12022-10-18 13:39:48 +01001892 if (obj->desc_filled != obj->desc_size) {
1893 WARN("%s: incomplete object desc filled %zu < size %zu\n",
1894 __func__, obj->desc_filled, obj->desc_size);
1895 ret = FFA_ERROR_INVALID_PARAMETER;
1896 goto err_unlock;
1897 }
1898
Marc Bonnici503320e2022-02-21 15:02:36 +00001899 /* Allow for platform specific operations to be performed. */
1900 ret = plat_spmc_shmem_reclaim(&obj->desc);
1901 if (ret != 0) {
1902 goto err_unlock;
1903 }
1904
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001905 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1906 spin_unlock(&spmc_shmem_obj_state.lock);
1907
1908 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1909
1910err_unlock:
1911 spin_unlock(&spmc_shmem_obj_state.lock);
1912 return spmc_ffa_error_return(handle, ret);
1913}