blob: 5c3d580ce6700a4ef15b6b4a3ae87bcc6bc672af [file] [log] [blame]
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001/*
Demi Marie Obenour1f9f8302022-12-30 19:14:18 -05002 * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00006#include <assert.h>
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01007#include <errno.h>
Demi Marie Obenour4ed9df42022-12-30 19:30:58 -05008#include <inttypes.h>
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01009
10#include <common/debug.h>
11#include <common/runtime_svc.h>
12#include <lib/object_pool.h>
13#include <lib/spinlock.h>
14#include <lib/xlat_tables/xlat_tables_v2.h>
15#include <services/ffa_svc.h>
16#include "spmc.h"
17#include "spmc_shared_mem.h"
18
19#include <platform_def.h>
20
21/**
22 * struct spmc_shmem_obj - Shared memory object.
23 * @desc_size: Size of @desc.
24 * @desc_filled: Size of @desc already received.
25 * @in_use: Number of clients that have called ffa_mem_retrieve_req
26 * without a matching ffa_mem_relinquish call.
27 * @desc: FF-A memory region descriptor passed in ffa_mem_share.
28 */
29struct spmc_shmem_obj {
30 size_t desc_size;
31 size_t desc_filled;
32 size_t in_use;
Marc Bonnicid1907f02022-04-19 17:42:53 +010033 struct ffa_mtd desc;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010034};
35
36/*
37 * Declare our data structure to store the metadata of memory share requests.
38 * The main datastore is allocated on a per platform basis to ensure enough
39 * storage can be made available.
40 * The address of the data store will be populated by the SPMC during its
41 * initialization.
42 */
43
44struct spmc_shmem_obj_state spmc_shmem_obj_state = {
45 /* Set start value for handle so top 32 bits are needed quickly. */
46 .next_handle = 0xffffffc0U,
47};
48
49/**
50 * spmc_shmem_obj_size - Convert from descriptor size to object size.
51 * @desc_size: Size of struct ffa_memory_region_descriptor object.
52 *
53 * Return: Size of struct spmc_shmem_obj object.
54 */
55static size_t spmc_shmem_obj_size(size_t desc_size)
56{
57 return desc_size + offsetof(struct spmc_shmem_obj, desc);
58}
59
60/**
61 * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
62 * @state: Global state.
63 * @desc_size: Size of struct ffa_memory_region_descriptor object that
64 * allocated object will hold.
65 *
66 * Return: Pointer to newly allocated object, or %NULL if there not enough space
67 * left. The returned pointer is only valid while @state is locked, to
68 * used it again after unlocking @state, spmc_shmem_obj_lookup must be
69 * called.
70 */
71static struct spmc_shmem_obj *
72spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
73{
74 struct spmc_shmem_obj *obj;
75 size_t free = state->data_size - state->allocated;
Marc Bonnicib774f562022-10-18 14:03:13 +010076 size_t obj_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +010077
78 if (state->data == NULL) {
79 ERROR("Missing shmem datastore!\n");
80 return NULL;
81 }
82
Demi Marie Obenourc39cb4c2023-01-15 14:15:14 -050083 /* Ensure that descriptor size is aligned */
84 if (!is_aligned(desc_size, 16)) {
85 WARN("%s(0x%zx) desc_size not 16-byte aligned\n",
86 __func__, desc_size);
87 return NULL;
88 }
89
Marc Bonnicib774f562022-10-18 14:03:13 +010090 obj_size = spmc_shmem_obj_size(desc_size);
91
92 /* Ensure the obj size has not overflowed. */
93 if (obj_size < desc_size) {
94 WARN("%s(0x%zx) desc_size overflow\n",
95 __func__, desc_size);
96 return NULL;
97 }
98
99 if (obj_size > free) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100100 WARN("%s(0x%zx) failed, free 0x%zx\n",
101 __func__, desc_size, free);
102 return NULL;
103 }
104 obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100105 obj->desc = (struct ffa_mtd) {0};
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100106 obj->desc_size = desc_size;
107 obj->desc_filled = 0;
108 obj->in_use = 0;
Marc Bonnicib774f562022-10-18 14:03:13 +0100109 state->allocated += obj_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100110 return obj;
111}
112
113/**
114 * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
115 * @state: Global state.
116 * @obj: Object to free.
117 *
118 * Release memory used by @obj. Other objects may move, so on return all
119 * pointers to struct spmc_shmem_obj object should be considered invalid, not
120 * just @obj.
121 *
122 * The current implementation always compacts the remaining objects to simplify
123 * the allocator and to avoid fragmentation.
124 */
125
126static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
127 struct spmc_shmem_obj *obj)
128{
129 size_t free_size = spmc_shmem_obj_size(obj->desc_size);
130 uint8_t *shift_dest = (uint8_t *)obj;
131 uint8_t *shift_src = shift_dest + free_size;
132 size_t shift_size = state->allocated - (shift_src - state->data);
133
134 if (shift_size != 0U) {
135 memmove(shift_dest, shift_src, shift_size);
136 }
137 state->allocated -= free_size;
138}
139
140/**
141 * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
142 * @state: Global state.
143 * @handle: Unique handle of object to return.
144 *
145 * Return: struct spmc_shmem_obj_state object with handle matching @handle.
146 * %NULL, if not object in @state->data has a matching handle.
147 */
148static struct spmc_shmem_obj *
149spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
150{
151 uint8_t *curr = state->data;
152
153 while (curr - state->data < state->allocated) {
154 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
155
156 if (obj->desc.handle == handle) {
157 return obj;
158 }
159 curr += spmc_shmem_obj_size(obj->desc_size);
160 }
161 return NULL;
162}
163
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000164/**
165 * spmc_shmem_obj_get_next - Get the next memory object from an offset.
166 * @offset: Offset used to track which objects have previously been
167 * returned.
168 *
169 * Return: the next struct spmc_shmem_obj_state object from the provided
170 * offset.
171 * %NULL, if there are no more objects.
172 */
173static struct spmc_shmem_obj *
174spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
175{
176 uint8_t *curr = state->data + *offset;
177
178 if (curr - state->data < state->allocated) {
179 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
180
181 *offset += spmc_shmem_obj_size(obj->desc_size);
182
183 return obj;
184 }
185 return NULL;
186}
187
Marc Bonnicid1907f02022-04-19 17:42:53 +0100188/*******************************************************************************
189 * FF-A memory descriptor helper functions.
190 ******************************************************************************/
191/**
192 * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
193 * clients FF-A version.
194 * @desc: The memory transaction descriptor.
195 * @index: The index of the emad element to be accessed.
196 * @ffa_version: FF-A version of the provided structure.
197 * @emad_size: Will be populated with the size of the returned emad
198 * descriptor.
199 * Return: A pointer to the requested emad structure.
200 */
201static void *
202spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
203 uint32_t ffa_version, size_t *emad_size)
204{
205 uint8_t *emad;
Demi Marie Obenour32167a02023-01-11 10:51:01 -0500206
207 assert(index < desc->emad_count);
208
Marc Bonnicid1907f02022-04-19 17:42:53 +0100209 /*
210 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
211 * format, otherwise assume it is a v1.1 format.
212 */
213 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
Demi Marie Obenour57bf10c2022-12-31 11:11:18 -0500214 emad = (uint8_t *)desc + offsetof(struct ffa_mtd_v1_0, emad);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100215 *emad_size = sizeof(struct ffa_emad_v1_0);
216 } else {
Demi Marie Obenour57bf10c2022-12-31 11:11:18 -0500217 assert(is_aligned(desc->emad_offset, 16));
Marc Bonnicid1907f02022-04-19 17:42:53 +0100218 emad = ((uint8_t *) desc + desc->emad_offset);
219 *emad_size = desc->emad_size;
220 }
Demi Marie Obenour57bf10c2022-12-31 11:11:18 -0500221
222 assert(((uint64_t)index * (uint64_t)*emad_size) <= UINT32_MAX);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100223 return (emad + (*emad_size * index));
224}
225
226/**
227 * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
228 * FF-A version of the descriptor.
229 * @obj: Object containing ffa_memory_region_descriptor.
230 *
231 * Return: struct ffa_comp_mrd object corresponding to the composite memory
232 * region descriptor.
233 */
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100234static struct ffa_comp_mrd *
Marc Bonnicid1907f02022-04-19 17:42:53 +0100235spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100236{
Marc Bonnicid1907f02022-04-19 17:42:53 +0100237 size_t emad_size;
238 /*
239 * The comp_mrd_offset field of the emad descriptor remains consistent
240 * between FF-A versions therefore we can use the v1.0 descriptor here
241 * in all cases.
242 */
243 struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
244 ffa_version,
245 &emad_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100246
247 /* Ensure the composite descriptor offset is aligned. */
248 if (!is_aligned(emad->comp_mrd_offset, 8)) {
249 WARN("Unaligned composite memory region descriptor offset.\n");
250 return NULL;
251 }
252
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100253 return (struct ffa_comp_mrd *)
Marc Bonnicid1907f02022-04-19 17:42:53 +0100254 ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100255}
256
257/**
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100258 * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
259 * a given memory transaction.
260 * @sp_id: Partition ID to validate.
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100261 * @obj: The shared memory object containing the descriptor
262 * of the memory transaction.
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100263 * Return: true if ID is valid, else false.
264 */
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100265bool spmc_shmem_obj_validate_id(struct spmc_shmem_obj *obj, uint16_t sp_id)
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100266{
267 bool found = false;
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100268 struct ffa_mtd *desc = &obj->desc;
269 size_t desc_size = obj->desc_size;
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100270
271 /* Validate the partition is a valid participant. */
272 for (unsigned int i = 0U; i < desc->emad_count; i++) {
273 size_t emad_size;
274 struct ffa_emad_v1_0 *emad;
275
276 emad = spmc_shmem_obj_get_emad(desc, i,
277 MAKE_FFA_VERSION(1, 1),
278 &emad_size);
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100279 /*
280 * Validate the calculated emad address resides within the
281 * descriptor.
282 */
283 if ((emad == NULL) || (uintptr_t) emad >=
284 (uintptr_t)((uint8_t *) desc + desc_size)) {
285 VERBOSE("Invalid emad.\n");
286 break;
287 }
Marc Bonnici9bdcb742022-06-06 14:37:57 +0100288 if (sp_id == emad->mapd.endpoint_id) {
289 found = true;
290 break;
291 }
292 }
293 return found;
294}
295
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000296/*
297 * Compare two memory regions to determine if any range overlaps with another
298 * ongoing memory transaction.
299 */
300static bool
301overlapping_memory_regions(struct ffa_comp_mrd *region1,
302 struct ffa_comp_mrd *region2)
303{
304 uint64_t region1_start;
305 uint64_t region1_size;
306 uint64_t region1_end;
307 uint64_t region2_start;
308 uint64_t region2_size;
309 uint64_t region2_end;
310
311 assert(region1 != NULL);
312 assert(region2 != NULL);
313
314 if (region1 == region2) {
315 return true;
316 }
317
318 /*
319 * Check each memory region in the request against existing
320 * transactions.
321 */
322 for (size_t i = 0; i < region1->address_range_count; i++) {
323
324 region1_start = region1->address_range_array[i].address;
325 region1_size =
326 region1->address_range_array[i].page_count *
327 PAGE_SIZE_4KB;
328 region1_end = region1_start + region1_size;
329
330 for (size_t j = 0; j < region2->address_range_count; j++) {
331
332 region2_start = region2->address_range_array[j].address;
333 region2_size =
334 region2->address_range_array[j].page_count *
335 PAGE_SIZE_4KB;
336 region2_end = region2_start + region2_size;
337
Marc Bonnici79669bb2022-10-18 13:50:04 +0100338 /* Check if regions are not overlapping. */
339 if (!((region2_end <= region1_start) ||
340 (region1_end <= region2_start))) {
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000341 WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
342 region1_start, region1_end,
343 region2_start, region2_end);
344 return true;
345 }
346 }
347 }
348 return false;
349}
350
Marc Bonnicid1907f02022-04-19 17:42:53 +0100351/*******************************************************************************
352 * FF-A v1.0 Memory Descriptor Conversion Helpers.
353 ******************************************************************************/
354/**
355 * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
356 * converted descriptor.
357 * @orig: The original v1.0 memory transaction descriptor.
358 * @desc_size: The size of the original v1.0 memory transaction descriptor.
359 *
360 * Return: the size required to store the descriptor store in the v1.1 format.
361 */
Demi Marie Obenour81f0adc2023-01-12 14:28:32 -0500362static uint64_t
Marc Bonnicid1907f02022-04-19 17:42:53 +0100363spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
364{
Demi Marie Obenour81f0adc2023-01-12 14:28:32 -0500365 uint64_t size = 0;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100366 struct ffa_comp_mrd *mrd;
367 struct ffa_emad_v1_0 *emad_array = orig->emad;
368
369 /* Get the size of the v1.1 descriptor. */
370 size += sizeof(struct ffa_mtd);
371
372 /* Add the size of the emad descriptors. */
373 size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
374
375 /* Add the size of the composite mrds. */
376 size += sizeof(struct ffa_comp_mrd);
377
378 /* Add the size of the constituent mrds. */
379 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
380 emad_array[0].comp_mrd_offset);
381
Demi Marie Obenour81f0adc2023-01-12 14:28:32 -0500382 /* Add the size of the memory region descriptors. */
Marc Bonnicid1907f02022-04-19 17:42:53 +0100383 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
384
385 return size;
386}
387
388/**
389 * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
390 * converted descriptor.
391 * @orig: The original v1.1 memory transaction descriptor.
392 * @desc_size: The size of the original v1.1 memory transaction descriptor.
393 *
394 * Return: the size required to store the descriptor store in the v1.0 format.
395 */
396static size_t
397spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
398{
399 size_t size = 0;
400 struct ffa_comp_mrd *mrd;
401 struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
402 ((uint8_t *) orig +
403 orig->emad_offset);
404
405 /* Get the size of the v1.0 descriptor. */
406 size += sizeof(struct ffa_mtd_v1_0);
407
408 /* Add the size of the v1.0 emad descriptors. */
409 size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
410
411 /* Add the size of the composite mrds. */
412 size += sizeof(struct ffa_comp_mrd);
413
414 /* Add the size of the constituent mrds. */
415 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
416 emad_array[0].comp_mrd_offset);
417
418 /* Check the calculated address is within the memory descriptor. */
Marc Bonnicif744c992022-10-18 18:01:44 +0100419 if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
420 (uintptr_t)((uint8_t *) orig + desc_size)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100421 return 0;
422 }
423 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
424
425 return size;
426}
427
428/**
429 * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
430 * @out_obj: The shared memory object to populate the converted descriptor.
431 * @orig: The shared memory object containing the v1.0 descriptor.
432 *
433 * Return: true if the conversion is successful else false.
434 */
435static bool
436spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
437 struct spmc_shmem_obj *orig)
438{
439 struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
440 struct ffa_mtd *out = &out_obj->desc;
441 struct ffa_emad_v1_0 *emad_array_in;
442 struct ffa_emad_v1_0 *emad_array_out;
443 struct ffa_comp_mrd *mrd_in;
444 struct ffa_comp_mrd *mrd_out;
445
446 size_t mrd_in_offset;
447 size_t mrd_out_offset;
448 size_t mrd_size = 0;
449
450 /* Populate the new descriptor format from the v1.0 struct. */
451 out->sender_id = mtd_orig->sender_id;
452 out->memory_region_attributes = mtd_orig->memory_region_attributes;
453 out->flags = mtd_orig->flags;
454 out->handle = mtd_orig->handle;
455 out->tag = mtd_orig->tag;
456 out->emad_count = mtd_orig->emad_count;
457 out->emad_size = sizeof(struct ffa_emad_v1_0);
458
459 /*
460 * We will locate the emad descriptors directly after the ffa_mtd
461 * struct. This will be 8-byte aligned.
462 */
463 out->emad_offset = sizeof(struct ffa_mtd);
464
465 emad_array_in = mtd_orig->emad;
466 emad_array_out = (struct ffa_emad_v1_0 *)
467 ((uint8_t *) out + out->emad_offset);
468
469 /* Copy across the emad structs. */
470 for (unsigned int i = 0U; i < out->emad_count; i++) {
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100471 /* Bound check for emad array. */
472 if (((uint8_t *)emad_array_in + sizeof(struct ffa_emad_v1_0)) >
473 ((uint8_t *) mtd_orig + orig->desc_size)) {
474 VERBOSE("%s: Invalid mtd structure.\n", __func__);
475 return false;
476 }
Marc Bonnicid1907f02022-04-19 17:42:53 +0100477 memcpy(&emad_array_out[i], &emad_array_in[i],
478 sizeof(struct ffa_emad_v1_0));
479 }
480
481 /* Place the mrd descriptors after the end of the emad descriptors.*/
482 mrd_in_offset = emad_array_in->comp_mrd_offset;
483 mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
484 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
485
486 /* Add the size of the composite memory region descriptor. */
487 mrd_size += sizeof(struct ffa_comp_mrd);
488
489 /* Find the mrd descriptor. */
490 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
491
492 /* Add the size of the constituent memory region descriptors. */
493 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
494
495 /*
496 * Update the offset in the emads by the delta between the input and
497 * output addresses.
498 */
499 for (unsigned int i = 0U; i < out->emad_count; i++) {
500 emad_array_out[i].comp_mrd_offset =
501 emad_array_in[i].comp_mrd_offset +
502 (mrd_out_offset - mrd_in_offset);
503 }
504
505 /* Verify that we stay within bound of the memory descriptors. */
506 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
507 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
508 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
509 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
510 ERROR("%s: Invalid mrd structure.\n", __func__);
511 return false;
512 }
513
514 /* Copy the mrd descriptors directly. */
515 memcpy(mrd_out, mrd_in, mrd_size);
516
517 return true;
518}
519
520/**
521 * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
522 * v1.0 memory object.
523 * @out_obj: The shared memory object to populate the v1.0 descriptor.
524 * @orig: The shared memory object containing the v1.1 descriptor.
525 *
526 * Return: true if the conversion is successful else false.
527 */
528static bool
529spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
530 struct spmc_shmem_obj *orig)
531{
532 struct ffa_mtd *mtd_orig = &orig->desc;
533 struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
534 struct ffa_emad_v1_0 *emad_in;
535 struct ffa_emad_v1_0 *emad_array_in;
536 struct ffa_emad_v1_0 *emad_array_out;
537 struct ffa_comp_mrd *mrd_in;
538 struct ffa_comp_mrd *mrd_out;
539
540 size_t mrd_in_offset;
541 size_t mrd_out_offset;
542 size_t emad_out_array_size;
543 size_t mrd_size = 0;
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100544 size_t orig_desc_size = orig->desc_size;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100545
546 /* Populate the v1.0 descriptor format from the v1.1 struct. */
547 out->sender_id = mtd_orig->sender_id;
548 out->memory_region_attributes = mtd_orig->memory_region_attributes;
549 out->flags = mtd_orig->flags;
550 out->handle = mtd_orig->handle;
551 out->tag = mtd_orig->tag;
552 out->emad_count = mtd_orig->emad_count;
553
554 /* Determine the location of the emad array in both descriptors. */
555 emad_array_in = (struct ffa_emad_v1_0 *)
556 ((uint8_t *) mtd_orig + mtd_orig->emad_offset);
557 emad_array_out = out->emad;
558
559 /* Copy across the emad structs. */
560 emad_in = emad_array_in;
561 for (unsigned int i = 0U; i < out->emad_count; i++) {
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100562 /* Bound check for emad array. */
563 if (((uint8_t *)emad_in + sizeof(struct ffa_emad_v1_0)) >
564 ((uint8_t *) mtd_orig + orig_desc_size)) {
565 VERBOSE("%s: Invalid mtd structure.\n", __func__);
566 return false;
567 }
Marc Bonnicid1907f02022-04-19 17:42:53 +0100568 memcpy(&emad_array_out[i], emad_in,
569 sizeof(struct ffa_emad_v1_0));
570
571 emad_in += mtd_orig->emad_size;
572 }
573
574 /* Place the mrd descriptors after the end of the emad descriptors. */
575 emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
576
577 mrd_out_offset = (uint8_t *) out->emad - (uint8_t *) out +
578 emad_out_array_size;
579
580 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
581
582 mrd_in_offset = mtd_orig->emad_offset +
583 (mtd_orig->emad_size * mtd_orig->emad_count);
584
585 /* Add the size of the composite memory region descriptor. */
586 mrd_size += sizeof(struct ffa_comp_mrd);
587
588 /* Find the mrd descriptor. */
589 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
590
591 /* Add the size of the constituent memory region descriptors. */
592 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
593
594 /*
595 * Update the offset in the emads by the delta between the input and
596 * output addresses.
597 */
598 emad_in = emad_array_in;
599
600 for (unsigned int i = 0U; i < out->emad_count; i++) {
601 emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
602 (mrd_out_offset -
603 mrd_in_offset);
604 emad_in += mtd_orig->emad_size;
605 }
606
607 /* Verify that we stay within bound of the memory descriptors. */
608 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
609 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
610 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
611 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
612 ERROR("%s: Invalid mrd structure.\n", __func__);
613 return false;
614 }
615
616 /* Copy the mrd descriptors directly. */
617 memcpy(mrd_out, mrd_in, mrd_size);
618
619 return true;
620}
621
622/**
623 * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
624 * the v1.0 format and populates the
625 * provided buffer.
626 * @dst: Buffer to populate v1.0 ffa_memory_region_descriptor.
627 * @orig_obj: Object containing v1.1 ffa_memory_region_descriptor.
628 * @buf_size: Size of the buffer to populate.
629 * @offset: The offset of the converted descriptor to copy.
630 * @copy_size: Will be populated with the number of bytes copied.
631 * @out_desc_size: Will be populated with the total size of the v1.0
632 * descriptor.
633 *
634 * Return: 0 if conversion and population succeeded.
635 * Note: This function invalidates the reference to @orig therefore
636 * `spmc_shmem_obj_lookup` must be called if further usage is required.
637 */
638static uint32_t
639spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
640 size_t buf_size, size_t offset,
641 size_t *copy_size, size_t *v1_0_desc_size)
642{
643 struct spmc_shmem_obj *v1_0_obj;
644
645 /* Calculate the size that the v1.0 descriptor will require. */
646 *v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
647 &orig_obj->desc, orig_obj->desc_size);
648
649 if (*v1_0_desc_size == 0) {
650 ERROR("%s: cannot determine size of descriptor.\n",
651 __func__);
652 return FFA_ERROR_INVALID_PARAMETER;
653 }
654
655 /* Get a new obj to store the v1.0 descriptor. */
656 v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
657 *v1_0_desc_size);
658
659 if (!v1_0_obj) {
660 return FFA_ERROR_NO_MEMORY;
661 }
662
663 /* Perform the conversion from v1.1 to v1.0. */
664 if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
665 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
666 return FFA_ERROR_INVALID_PARAMETER;
667 }
668
669 *copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
670 memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
671
672 /*
673 * We're finished with the v1.0 descriptor for now so free it.
674 * Note that this will invalidate any references to the v1.1
675 * descriptor.
676 */
677 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
678
679 return 0;
680}
681
Demi Marie Obenour4ed9df42022-12-30 19:30:58 -0500682static int
683spmc_validate_mtd_start(struct ffa_mtd *desc, uint32_t ffa_version,
684 size_t fragment_length, size_t total_length)
685{
686 unsigned long long emad_end;
687 unsigned long long emad_size;
688 unsigned long long emad_offset;
689 unsigned int min_desc_size;
690
691 /* Determine the appropriate minimum descriptor size. */
692 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
693 min_desc_size = sizeof(struct ffa_mtd_v1_0);
694 } else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
695 min_desc_size = sizeof(struct ffa_mtd);
696 } else {
697 return FFA_ERROR_INVALID_PARAMETER;
698 }
699 if (fragment_length < min_desc_size) {
700 WARN("%s: invalid length %zu < %u\n", __func__, fragment_length,
701 min_desc_size);
702 return FFA_ERROR_INVALID_PARAMETER;
703 }
704
705 if (desc->emad_count == 0U) {
706 WARN("%s: unsupported attribute desc count %u.\n",
707 __func__, desc->emad_count);
708 return FFA_ERROR_INVALID_PARAMETER;
709 }
710
711 /*
712 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
713 * format, otherwise assume it is a v1.1 format.
714 */
715 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
716 emad_offset = emad_size = sizeof(struct ffa_emad_v1_0);
717 } else {
718 if (!is_aligned(desc->emad_offset, 16)) {
719 WARN("%s: Emad offset %" PRIx32 " is not 16-byte aligned.\n",
720 __func__, desc->emad_offset);
721 return FFA_ERROR_INVALID_PARAMETER;
722 }
723 if (desc->emad_offset < sizeof(struct ffa_mtd)) {
724 WARN("%s: Emad offset too small: 0x%" PRIx32 " < 0x%zx.\n",
725 __func__, desc->emad_offset,
726 sizeof(struct ffa_mtd));
727 return FFA_ERROR_INVALID_PARAMETER;
728 }
729 emad_offset = desc->emad_offset;
730 if (desc->emad_size < sizeof(struct ffa_emad_v1_0)) {
731 WARN("%s: Bad emad size (%" PRIu32 " < %zu).\n", __func__,
732 desc->emad_size, sizeof(struct ffa_emad_v1_0));
733 return FFA_ERROR_INVALID_PARAMETER;
734 }
735 if (!is_aligned(desc->emad_size, 16)) {
736 WARN("%s: Emad size 0x%" PRIx32 " is not 16-byte aligned.\n",
737 __func__, desc->emad_size);
738 return FFA_ERROR_INVALID_PARAMETER;
739 }
740 emad_size = desc->emad_size;
741 }
742
743 /*
744 * Overflow is impossible: the arithmetic happens in at least 64-bit
745 * precision, but all of the operands are bounded by UINT32_MAX, and
746 * ((2^32 - 1)^2 + (2^32 - 1) + (2^32 - 1)) = ((2^32 - 1) * (2^32 + 1))
747 * = (2^64 - 1).
748 */
749 CASSERT(sizeof(desc->emad_count == 4), assert_emad_count_max_too_large);
750 emad_end = (desc->emad_count * (unsigned long long)emad_size) +
751 (unsigned long long)sizeof(struct ffa_comp_mrd) +
752 (unsigned long long)emad_offset;
753
754 if (emad_end > total_length) {
755 WARN("%s: Composite memory region extends beyond descriptor: 0x%llx > 0x%zx\n",
756 __func__, emad_end, total_length);
757 return FFA_ERROR_INVALID_PARAMETER;
758 }
759
760 return 0;
761}
762
Demi Marie Obenour023d2122023-01-15 14:18:13 -0500763static inline const struct ffa_emad_v1_0 *
764emad_advance(const struct ffa_emad_v1_0 *emad, size_t offset)
765{
766 return (const struct ffa_emad_v1_0 *)((const uint8_t *)emad + offset);
767}
768
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100769/**
770 * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
Marc Bonnicid1907f02022-04-19 17:42:53 +0100771 * @obj: Object containing ffa_memory_region_descriptor.
772 * @ffa_version: FF-A version of the provided descriptor.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100773 *
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500774 * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if
775 * constituent_memory_region_descriptor offset or count is invalid.
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100776 */
Marc Bonnicid1907f02022-04-19 17:42:53 +0100777static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
778 uint32_t ffa_version)
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100779{
Demi Marie Obenour14f19632023-01-15 14:18:13 -0500780 unsigned long long total_page_count;
Demi Marie Obenour023d2122023-01-15 14:18:13 -0500781 const struct ffa_emad_v1_0 *first_emad;
782 const struct ffa_emad_v1_0 *end_emad;
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500783 size_t emad_size;
Demi Marie Obenour14f19632023-01-15 14:18:13 -0500784 uint32_t comp_mrd_offset;
Demi Marie Obenour07d8e2c2023-01-15 14:43:47 -0500785 size_t header_emad_size;
786 size_t size;
787 size_t count;
788 size_t expected_size;
Demi Marie Obenour14f19632023-01-15 14:18:13 -0500789 const struct ffa_comp_mrd *comp;
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500790
Demi Marie Obenourf00e4d72023-01-12 13:25:23 -0500791 if (obj->desc_filled != obj->desc_size) {
792 ERROR("BUG: %s called on incomplete object (%zu != %zu)\n",
793 __func__, obj->desc_filled, obj->desc_size);
794 panic();
795 }
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000796
Demi Marie Obenourf00e4d72023-01-12 13:25:23 -0500797 if (spmc_validate_mtd_start(&obj->desc, ffa_version,
798 obj->desc_filled, obj->desc_size)) {
799 ERROR("BUG: %s called on object with corrupt memory region descriptor\n",
800 __func__);
801 panic();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100802 }
803
Demi Marie Obenour023d2122023-01-15 14:18:13 -0500804 first_emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
805 ffa_version, &emad_size);
806 end_emad = emad_advance(first_emad, obj->desc.emad_count * emad_size);
Demi Marie Obenour07d8e2c2023-01-15 14:43:47 -0500807 comp_mrd_offset = first_emad->comp_mrd_offset;
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500808
Demi Marie Obenour023d2122023-01-15 14:18:13 -0500809 /* Loop through the endpoint descriptors, validating each of them. */
Demi Marie Obenour70179dc2023-01-15 14:18:13 -0500810 for (const struct ffa_emad_v1_0 *emad = first_emad; emad < end_emad;) {
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500811 ffa_endpoint_id16_t ep_id;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100812
Demi Marie Obenour8711be32023-01-11 14:20:07 -0500813 /*
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500814 * If a partition ID resides in the secure world validate that
815 * the partition ID is for a known partition. Ignore any
816 * partition ID belonging to the normal world as it is assumed
817 * the Hypervisor will have validated these.
818 */
819 ep_id = emad->mapd.endpoint_id;
820 if (ffa_is_secure_world_id(ep_id)) {
821 if (spmc_get_sp_ctx(ep_id) == NULL) {
822 WARN("%s: Invalid receiver id 0x%x\n",
823 __func__, ep_id);
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500824 return FFA_ERROR_INVALID_PARAMETER;
Demi Marie Obenourb720be72023-01-12 13:33:02 -0500825 }
826 }
827
828 /*
Demi Marie Obenour8711be32023-01-11 14:20:07 -0500829 * The offset provided to the composite memory region descriptor
Demi Marie Obenour07d8e2c2023-01-15 14:43:47 -0500830 * should be consistent across endpoint descriptors.
Demi Marie Obenour8711be32023-01-11 14:20:07 -0500831 */
Demi Marie Obenour07d8e2c2023-01-15 14:43:47 -0500832 if (comp_mrd_offset != emad->comp_mrd_offset) {
833 ERROR("%s: mismatching offsets provided, %u != %u\n",
834 __func__, emad->comp_mrd_offset, comp_mrd_offset);
835 return FFA_ERROR_INVALID_PARAMETER;
Demi Marie Obenour8711be32023-01-11 14:20:07 -0500836 }
Demi Marie Obenour70179dc2023-01-15 14:18:13 -0500837
838 /* Advance to the next endpoint descriptor */
839 emad = emad_advance(emad, emad_size);
840
841 /*
842 * Ensure neither this emad nor any subsequent emads have
843 * the same partition ID as the previous emad.
844 */
845 for (const struct ffa_emad_v1_0 *other_emad = emad;
846 other_emad < end_emad;
847 other_emad = emad_advance(other_emad, emad_size)) {
848 if (ep_id == other_emad->mapd.endpoint_id) {
849 WARN("%s: Duplicated endpoint id 0x%x\n",
850 __func__, emad->mapd.endpoint_id);
851 return FFA_ERROR_INVALID_PARAMETER;
852 }
853 }
Demi Marie Obenour07d8e2c2023-01-15 14:43:47 -0500854 }
Demi Marie Obenour8711be32023-01-11 14:20:07 -0500855
Demi Marie Obenour07d8e2c2023-01-15 14:43:47 -0500856 header_emad_size = (size_t)((const uint8_t *)end_emad -
857 (const uint8_t *)&obj->desc);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100858
Demi Marie Obenourf86a7382023-01-15 14:18:13 -0500859 /*
860 * Check that the composite descriptor
861 * is after the endpoint descriptors.
862 */
Demi Marie Obenour07d8e2c2023-01-15 14:43:47 -0500863 if (comp_mrd_offset < header_emad_size) {
864 WARN("%s: invalid object, offset %u < header + emad %zu\n",
865 __func__, comp_mrd_offset, header_emad_size);
866 return FFA_ERROR_INVALID_PARAMETER;
867 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100868
Demi Marie Obenour07d8e2c2023-01-15 14:43:47 -0500869 /* Ensure the composite descriptor offset is aligned. */
870 if (!is_aligned(comp_mrd_offset, 16)) {
871 WARN("%s: invalid object, unaligned composite memory "
872 "region descriptor offset %u.\n",
873 __func__, comp_mrd_offset);
874 return FFA_ERROR_INVALID_PARAMETER;
875 }
Demi Marie Obenour05da8112023-01-15 14:37:36 -0500876
Demi Marie Obenour07d8e2c2023-01-15 14:43:47 -0500877 size = obj->desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100878
Demi Marie Obenourf86a7382023-01-15 14:18:13 -0500879 /* Check that the composite descriptor is in bounds. */
Demi Marie Obenour07d8e2c2023-01-15 14:43:47 -0500880 if (comp_mrd_offset > size) {
881 WARN("%s: invalid object, offset %u > total size %zu\n",
882 __func__, comp_mrd_offset, obj->desc_size);
883 return FFA_ERROR_INVALID_PARAMETER;
884 }
885 size -= comp_mrd_offset;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100886
Demi Marie Obenour14f19632023-01-15 14:18:13 -0500887 /* Check that there is enough space for the composite descriptor. */
Demi Marie Obenour07d8e2c2023-01-15 14:43:47 -0500888 if (size < sizeof(struct ffa_comp_mrd)) {
889 WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
890 __func__, comp_mrd_offset, obj->desc_size);
891 return FFA_ERROR_INVALID_PARAMETER;
892 }
Demi Marie Obenour14f19632023-01-15 14:18:13 -0500893 size -= sizeof(*comp);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100894
Demi Marie Obenour07d8e2c2023-01-15 14:43:47 -0500895 count = size / sizeof(struct ffa_cons_mrd);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100896
Demi Marie Obenour14f19632023-01-15 14:18:13 -0500897 comp = (const struct ffa_comp_mrd *)
898 ((const uint8_t *)(&obj->desc) + comp_mrd_offset);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100899
Demi Marie Obenour07d8e2c2023-01-15 14:43:47 -0500900 if (comp->address_range_count != count) {
901 WARN("%s: invalid object, desc count %u != %zu\n",
902 __func__, comp->address_range_count, count);
903 return FFA_ERROR_INVALID_PARAMETER;
904 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100905
Demi Marie Obenour14f19632023-01-15 14:18:13 -0500906 /* Ensure that the expected and actual sizes are equal. */
Demi Marie Obenour07d8e2c2023-01-15 14:43:47 -0500907 expected_size = comp_mrd_offset + sizeof(*comp) +
908 count * sizeof(struct ffa_cons_mrd);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100909
Demi Marie Obenour07d8e2c2023-01-15 14:43:47 -0500910 if (expected_size != obj->desc_size) {
911 WARN("%s: invalid object, computed size %zu != size %zu\n",
912 __func__, expected_size, obj->desc_size);
913 return FFA_ERROR_INVALID_PARAMETER;
914 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100915
Demi Marie Obenour07d8e2c2023-01-15 14:43:47 -0500916 total_page_count = 0;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100917
Demi Marie Obenour14f19632023-01-15 14:18:13 -0500918 /*
919 * comp->address_range_count is 32-bit, so 'count' must fit in a
920 * uint32_t at this point.
921 */
Demi Marie Obenour07d8e2c2023-01-15 14:43:47 -0500922 for (size_t i = 0; i < count; i++) {
Demi Marie Obenour17967122023-01-15 15:31:45 -0500923 const struct ffa_cons_mrd *mrd = comp->address_range_array + i;
924
925 if (!is_aligned(mrd->address, PAGE_SIZE)) {
926 WARN("%s: invalid object, address in region descriptor "
927 "%zu not 4K aligned (got 0x%016llx)",
928 __func__, i, (unsigned long long)mrd->address);
929 }
930
Demi Marie Obenour14f19632023-01-15 14:18:13 -0500931 /*
932 * No overflow possible: total_page_count can hold at
933 * least 2^64 - 1, but will be have at most 2^32 - 1.
934 * values added to it, each of which cannot exceed 2^32 - 1.
935 */
Demi Marie Obenour17967122023-01-15 15:31:45 -0500936 total_page_count += mrd->page_count;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100937 }
Demi Marie Obenour14f19632023-01-15 14:18:13 -0500938
Demi Marie Obenour07d8e2c2023-01-15 14:43:47 -0500939 if (comp->total_page_count != total_page_count) {
Demi Marie Obenour14f19632023-01-15 14:18:13 -0500940 WARN("%s: invalid object, desc total_page_count %u != %llu\n",
941 __func__, comp->total_page_count, total_page_count);
Demi Marie Obenour07d8e2c2023-01-15 14:43:47 -0500942 return FFA_ERROR_INVALID_PARAMETER;
943 }
944
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000945 return 0;
946}
947
948/**
949 * spmc_shmem_check_state_obj - Check if the descriptor describes memory
950 * regions that are currently involved with an
951 * existing memory transactions. This implies that
952 * the memory is not in a valid state for lending.
953 * @obj: Object containing ffa_memory_region_descriptor.
954 *
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500955 * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if invalid memory
956 * state.
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000957 */
Marc Bonnicid1907f02022-04-19 17:42:53 +0100958static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
959 uint32_t ffa_version)
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000960{
961 size_t obj_offset = 0;
962 struct spmc_shmem_obj *inflight_obj;
963
964 struct ffa_comp_mrd *other_mrd;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100965 struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
966 ffa_version);
967
968 if (requested_mrd == NULL) {
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500969 return FFA_ERROR_INVALID_PARAMETER;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100970 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100971
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000972 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
973 &obj_offset);
974
975 while (inflight_obj != NULL) {
976 /*
977 * Don't compare the transaction to itself or to partially
978 * transmitted descriptors.
979 */
980 if ((obj->desc.handle != inflight_obj->desc.handle) &&
981 (obj->desc_size == obj->desc_filled)) {
Marc Bonnicid1907f02022-04-19 17:42:53 +0100982 other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
Marc Bonnici344ca9d2022-05-20 14:38:55 +0100983 FFA_VERSION_COMPILED);
Marc Bonnicid1907f02022-04-19 17:42:53 +0100984 if (other_mrd == NULL) {
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500985 return FFA_ERROR_INVALID_PARAMETER;
Marc Bonnicid1907f02022-04-19 17:42:53 +0100986 }
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000987 if (overlapping_memory_regions(requested_mrd,
988 other_mrd)) {
Demi Marie Obenourf215bb72023-01-12 14:40:22 -0500989 return FFA_ERROR_INVALID_PARAMETER;
Marc Bonnicic31ec9e2022-01-21 10:34:55 +0000990 }
991 }
992
993 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
994 &obj_offset);
995 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +0100996 return 0;
997}
998
999static long spmc_ffa_fill_desc(struct mailbox *mbox,
1000 struct spmc_shmem_obj *obj,
1001 uint32_t fragment_length,
1002 ffa_mtd_flag32_t mtd_flag,
Marc Bonnicid1907f02022-04-19 17:42:53 +01001003 uint32_t ffa_version,
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001004 void *smc_handle)
1005{
1006 int ret;
1007 uint32_t handle_low;
1008 uint32_t handle_high;
1009
1010 if (mbox->rxtx_page_count == 0U) {
1011 WARN("%s: buffer pair not registered.\n", __func__);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001012 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001013 goto err_arg;
1014 }
1015
Demi Marie Obenour4a3a6d32023-01-12 14:22:18 -05001016 CASSERT(sizeof(mbox->rxtx_page_count) == 4, assert_bogus_page_count);
1017 if (fragment_length > (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB) {
1018 WARN("%s: bad fragment size %u > %" PRIu64 " buffer size\n", __func__,
1019 fragment_length, (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001020 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001021 goto err_arg;
1022 }
1023
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001024 if (fragment_length > obj->desc_size - obj->desc_filled) {
1025 WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
1026 fragment_length, obj->desc_size - obj->desc_filled);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001027 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001028 goto err_arg;
1029 }
1030
Marc Bonnicif0f45dc2022-10-18 13:57:16 +01001031 memcpy((uint8_t *)&obj->desc + obj->desc_filled,
1032 (uint8_t *) mbox->tx_buffer, fragment_length);
1033
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001034 /* Ensure that the sender ID resides in the normal world. */
1035 if (ffa_is_secure_world_id(obj->desc.sender_id)) {
1036 WARN("%s: Invalid sender ID 0x%x.\n",
1037 __func__, obj->desc.sender_id);
1038 ret = FFA_ERROR_DENIED;
1039 goto err_arg;
1040 }
1041
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001042 /* Ensure the NS bit is set to 0. */
1043 if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1044 WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1045 ret = FFA_ERROR_INVALID_PARAMETER;
1046 goto err_arg;
1047 }
1048
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001049 /*
1050 * We don't currently support any optional flags so ensure none are
1051 * requested.
1052 */
1053 if (obj->desc.flags != 0U && mtd_flag != 0U &&
1054 (obj->desc.flags != mtd_flag)) {
1055 WARN("%s: invalid memory transaction flags %u != %u\n",
1056 __func__, obj->desc.flags, mtd_flag);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001057 ret = FFA_ERROR_INVALID_PARAMETER;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001058 goto err_arg;
1059 }
1060
1061 if (obj->desc_filled == 0U) {
1062 /* First fragment, descriptor header has been copied */
Demi Marie Obenour4ed9df42022-12-30 19:30:58 -05001063 ret = spmc_validate_mtd_start(&obj->desc, ffa_version,
1064 fragment_length, obj->desc_size);
1065 if (ret != 0) {
1066 goto err_bad_desc;
1067 }
1068
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001069 obj->desc.handle = spmc_shmem_obj_state.next_handle++;
1070 obj->desc.flags |= mtd_flag;
1071 }
1072
1073 obj->desc_filled += fragment_length;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001074
1075 handle_low = (uint32_t)obj->desc.handle;
1076 handle_high = obj->desc.handle >> 32;
1077
1078 if (obj->desc_filled != obj->desc_size) {
1079 SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
1080 handle_high, obj->desc_filled,
1081 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
1082 }
1083
Marc Bonnici336630f2022-01-13 11:39:10 +00001084 /* The full descriptor has been received, perform any final checks. */
1085
Demi Marie Obenourcdd3e722023-01-11 14:16:37 -05001086 ret = spmc_shmem_check_obj(obj, ffa_version);
1087 if (ret != 0) {
Demi Marie Obenourcdd3e722023-01-11 14:16:37 -05001088 goto err_bad_desc;
1089 }
1090
Marc Bonnicid1907f02022-04-19 17:42:53 +01001091 ret = spmc_shmem_check_state_obj(obj, ffa_version);
Marc Bonnicic31ec9e2022-01-21 10:34:55 +00001092 if (ret) {
1093 ERROR("%s: invalid memory region descriptor.\n", __func__);
1094 goto err_bad_desc;
1095 }
1096
Marc Bonnicid1907f02022-04-19 17:42:53 +01001097 /*
1098 * Everything checks out, if the sender was using FF-A v1.0, convert
1099 * the descriptor format to use the v1.1 structures.
1100 */
1101 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1102 struct spmc_shmem_obj *v1_1_obj;
1103 uint64_t mem_handle;
1104
1105 /* Calculate the size that the v1.1 descriptor will required. */
Demi Marie Obenour81f0adc2023-01-12 14:28:32 -05001106 uint64_t v1_1_desc_size =
Marc Bonnicid1907f02022-04-19 17:42:53 +01001107 spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
vallau0146dbac22022-08-08 14:10:14 +02001108 obj->desc_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001109
Demi Marie Obenour81f0adc2023-01-12 14:28:32 -05001110 if (v1_1_desc_size > UINT32_MAX) {
1111 ret = FFA_ERROR_NO_MEMORY;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001112 goto err_arg;
1113 }
1114
1115 /* Get a new obj to store the v1.1 descriptor. */
1116 v1_1_obj =
Demi Marie Obenour81f0adc2023-01-12 14:28:32 -05001117 spmc_shmem_obj_alloc(&spmc_shmem_obj_state, (size_t)v1_1_desc_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001118
vallau018f830992022-08-09 18:03:28 +02001119 if (!v1_1_obj) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001120 ret = FFA_ERROR_NO_MEMORY;
1121 goto err_arg;
1122 }
1123
1124 /* Perform the conversion from v1.0 to v1.1. */
Demi Marie Obenour81f0adc2023-01-12 14:28:32 -05001125 v1_1_obj->desc_size = (uint32_t)v1_1_desc_size;
1126 v1_1_obj->desc_filled = (uint32_t)v1_1_desc_size;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001127 if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
1128 ERROR("%s: Could not convert mtd!\n", __func__);
1129 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
1130 goto err_arg;
1131 }
1132
1133 /*
1134 * We're finished with the v1.0 descriptor so free it
1135 * and continue our checks with the new v1.1 descriptor.
1136 */
1137 mem_handle = obj->desc.handle;
1138 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1139 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1140 if (obj == NULL) {
1141 ERROR("%s: Failed to find converted descriptor.\n",
1142 __func__);
1143 ret = FFA_ERROR_INVALID_PARAMETER;
1144 return spmc_ffa_error_return(smc_handle, ret);
1145 }
1146 }
1147
Marc Bonnici503320e2022-02-21 15:02:36 +00001148 /* Allow for platform specific operations to be performed. */
1149 ret = plat_spmc_shmem_begin(&obj->desc);
1150 if (ret != 0) {
1151 goto err_arg;
1152 }
1153
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001154 SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
1155 0, 0, 0);
1156
1157err_bad_desc:
1158err_arg:
1159 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001160 return spmc_ffa_error_return(smc_handle, ret);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001161}
1162
1163/**
1164 * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
1165 * @client: Client state.
1166 * @total_length: Total length of shared memory descriptor.
1167 * @fragment_length: Length of fragment of shared memory descriptor passed in
1168 * this call.
1169 * @address: Not supported, must be 0.
1170 * @page_count: Not supported, must be 0.
1171 * @smc_handle: Handle passed to smc call. Used to return
1172 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1173 *
1174 * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
1175 * to share or lend memory from non-secure os to secure os (with no stream
1176 * endpoints).
1177 *
1178 * Return: 0 on success, error code on failure.
1179 */
1180long spmc_ffa_mem_send(uint32_t smc_fid,
1181 bool secure_origin,
1182 uint64_t total_length,
1183 uint32_t fragment_length,
1184 uint64_t address,
1185 uint32_t page_count,
1186 void *cookie,
1187 void *handle,
1188 uint64_t flags)
1189
1190{
1191 long ret;
1192 struct spmc_shmem_obj *obj;
1193 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1194 ffa_mtd_flag32_t mtd_flag;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001195 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Demi Marie Obenour1f9f8302022-12-30 19:14:18 -05001196 size_t min_desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001197
1198 if (address != 0U || page_count != 0U) {
1199 WARN("%s: custom memory region for message not supported.\n",
1200 __func__);
1201 return spmc_ffa_error_return(handle,
1202 FFA_ERROR_INVALID_PARAMETER);
1203 }
1204
1205 if (secure_origin) {
1206 WARN("%s: unsupported share direction.\n", __func__);
1207 return spmc_ffa_error_return(handle,
1208 FFA_ERROR_INVALID_PARAMETER);
1209 }
1210
Demi Marie Obenour1f9f8302022-12-30 19:14:18 -05001211 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1212 min_desc_size = sizeof(struct ffa_mtd_v1_0);
1213 } else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
1214 min_desc_size = sizeof(struct ffa_mtd);
1215 } else {
1216 WARN("%s: bad FF-A version.\n", __func__);
1217 return spmc_ffa_error_return(handle,
1218 FFA_ERROR_INVALID_PARAMETER);
1219 }
1220
1221 /* Check if the descriptor is too small for the FF-A version. */
1222 if (fragment_length < min_desc_size) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001223 WARN("%s: bad first fragment size %u < %zu\n",
Marc Bonnicid1907f02022-04-19 17:42:53 +01001224 __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001225 return spmc_ffa_error_return(handle,
1226 FFA_ERROR_INVALID_PARAMETER);
1227 }
1228
1229 if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
1230 mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
1231 } else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
1232 mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
1233 } else {
1234 WARN("%s: invalid memory management operation.\n", __func__);
1235 return spmc_ffa_error_return(handle,
1236 FFA_ERROR_INVALID_PARAMETER);
1237 }
1238
1239 spin_lock(&spmc_shmem_obj_state.lock);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001240 obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
1241 if (obj == NULL) {
1242 ret = FFA_ERROR_NO_MEMORY;
1243 goto err_unlock;
1244 }
1245
1246 spin_lock(&mbox->lock);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001247 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
1248 ffa_version, handle);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001249 spin_unlock(&mbox->lock);
1250
1251 spin_unlock(&spmc_shmem_obj_state.lock);
1252 return ret;
1253
1254err_unlock:
1255 spin_unlock(&spmc_shmem_obj_state.lock);
1256 return spmc_ffa_error_return(handle, ret);
1257}
1258
1259/**
1260 * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
1261 * @client: Client state.
1262 * @handle_low: Handle_low value returned from FFA_MEM_FRAG_RX.
1263 * @handle_high: Handle_high value returned from FFA_MEM_FRAG_RX.
1264 * @fragment_length: Length of fragments transmitted.
1265 * @sender_id: Vmid of sender in bits [31:16]
1266 * @smc_handle: Handle passed to smc call. Used to return
1267 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1268 *
1269 * Return: @smc_handle on success, error code on failure.
1270 */
1271long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
1272 bool secure_origin,
1273 uint64_t handle_low,
1274 uint64_t handle_high,
1275 uint32_t fragment_length,
1276 uint32_t sender_id,
1277 void *cookie,
1278 void *handle,
1279 uint64_t flags)
1280{
1281 long ret;
1282 uint32_t desc_sender_id;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001283 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001284 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1285
1286 struct spmc_shmem_obj *obj;
1287 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1288
1289 spin_lock(&spmc_shmem_obj_state.lock);
1290
1291 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1292 if (obj == NULL) {
1293 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1294 __func__, mem_handle);
1295 ret = FFA_ERROR_INVALID_PARAMETER;
1296 goto err_unlock;
1297 }
1298
1299 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1300 if (sender_id != desc_sender_id) {
1301 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1302 sender_id, desc_sender_id);
1303 ret = FFA_ERROR_INVALID_PARAMETER;
1304 goto err_unlock;
1305 }
1306
1307 if (obj->desc_filled == obj->desc_size) {
1308 WARN("%s: object desc already filled, %zu\n", __func__,
1309 obj->desc_filled);
1310 ret = FFA_ERROR_INVALID_PARAMETER;
1311 goto err_unlock;
1312 }
1313
1314 spin_lock(&mbox->lock);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001315 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
1316 handle);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001317 spin_unlock(&mbox->lock);
1318
1319 spin_unlock(&spmc_shmem_obj_state.lock);
1320 return ret;
1321
1322err_unlock:
1323 spin_unlock(&spmc_shmem_obj_state.lock);
1324 return spmc_ffa_error_return(handle, ret);
1325}
1326
1327/**
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001328 * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor
1329 * if the caller implements a version greater
1330 * than FF-A 1.0 or if they have requested
1331 * the functionality.
1332 * TODO: We are assuming that the caller is
1333 * an SP. To support retrieval from the
1334 * normal world this function will need to be
1335 * expanded accordingly.
1336 * @resp: Descriptor populated in callers RX buffer.
1337 * @sp_ctx: Context of the calling SP.
1338 */
1339void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp,
1340 struct secure_partition_desc *sp_ctx)
1341{
1342 if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) ||
1343 sp_ctx->ns_bit_requested) {
1344 /*
1345 * Currently memory senders must reside in the normal
1346 * world, and we do not have the functionlaity to change
1347 * the state of memory dynamically. Therefore we can always set
1348 * the NS bit to 1.
1349 */
1350 resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
1351 }
1352}
1353
1354/**
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001355 * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
1356 * @smc_fid: FID of SMC
1357 * @total_length: Total length of retrieve request descriptor if this is
1358 * the first call. Otherwise (unsupported) must be 0.
1359 * @fragment_length: Length of fragment of retrieve request descriptor passed
1360 * in this call. Only @fragment_length == @length is
1361 * supported by this implementation.
1362 * @address: Not supported, must be 0.
1363 * @page_count: Not supported, must be 0.
1364 * @smc_handle: Handle passed to smc call. Used to return
1365 * FFA_MEM_RETRIEVE_RESP.
1366 *
1367 * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
1368 * Used by secure os to retrieve memory already shared by non-secure os.
1369 * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
1370 * the client must call FFA_MEM_FRAG_RX until the full response has been
1371 * received.
1372 *
1373 * Return: @handle on success, error code on failure.
1374 */
1375long
1376spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
1377 bool secure_origin,
1378 uint32_t total_length,
1379 uint32_t fragment_length,
1380 uint64_t address,
1381 uint32_t page_count,
1382 void *cookie,
1383 void *handle,
1384 uint64_t flags)
1385{
1386 int ret;
1387 size_t buf_size;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001388 size_t copy_size = 0;
1389 size_t min_desc_size;
1390 size_t out_desc_size = 0;
1391
1392 /*
1393 * Currently we are only accessing fields that are the same in both the
1394 * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
1395 * here. We only need validate against the appropriate struct size.
1396 */
1397 struct ffa_mtd *resp;
1398 const struct ffa_mtd *req;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001399 struct spmc_shmem_obj *obj = NULL;
1400 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001401 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001402 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001403
1404 if (!secure_origin) {
1405 WARN("%s: unsupported retrieve req direction.\n", __func__);
1406 return spmc_ffa_error_return(handle,
1407 FFA_ERROR_INVALID_PARAMETER);
1408 }
1409
1410 if (address != 0U || page_count != 0U) {
1411 WARN("%s: custom memory region not supported.\n", __func__);
1412 return spmc_ffa_error_return(handle,
1413 FFA_ERROR_INVALID_PARAMETER);
1414 }
1415
1416 spin_lock(&mbox->lock);
1417
1418 req = mbox->tx_buffer;
1419 resp = mbox->rx_buffer;
1420 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1421
1422 if (mbox->rxtx_page_count == 0U) {
1423 WARN("%s: buffer pair not registered.\n", __func__);
1424 ret = FFA_ERROR_INVALID_PARAMETER;
1425 goto err_unlock_mailbox;
1426 }
1427
1428 if (mbox->state != MAILBOX_STATE_EMPTY) {
1429 WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
1430 ret = FFA_ERROR_DENIED;
1431 goto err_unlock_mailbox;
1432 }
1433
1434 if (fragment_length != total_length) {
1435 WARN("%s: fragmented retrieve request not supported.\n",
1436 __func__);
1437 ret = FFA_ERROR_INVALID_PARAMETER;
1438 goto err_unlock_mailbox;
1439 }
1440
Marc Bonnici336630f2022-01-13 11:39:10 +00001441 if (req->emad_count == 0U) {
1442 WARN("%s: unsupported attribute desc count %u.\n",
1443 __func__, obj->desc.emad_count);
vallau01460d3962022-08-09 17:06:53 +02001444 ret = FFA_ERROR_INVALID_PARAMETER;
1445 goto err_unlock_mailbox;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001446 }
1447
Marc Bonnicid1907f02022-04-19 17:42:53 +01001448 /* Determine the appropriate minimum descriptor size. */
1449 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1450 min_desc_size = sizeof(struct ffa_mtd_v1_0);
1451 } else {
1452 min_desc_size = sizeof(struct ffa_mtd);
1453 }
1454 if (total_length < min_desc_size) {
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001455 WARN("%s: invalid length %u < %zu\n", __func__, total_length,
Marc Bonnicid1907f02022-04-19 17:42:53 +01001456 min_desc_size);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001457 ret = FFA_ERROR_INVALID_PARAMETER;
1458 goto err_unlock_mailbox;
1459 }
1460
1461 spin_lock(&spmc_shmem_obj_state.lock);
1462
1463 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1464 if (obj == NULL) {
1465 ret = FFA_ERROR_INVALID_PARAMETER;
1466 goto err_unlock_all;
1467 }
1468
1469 if (obj->desc_filled != obj->desc_size) {
1470 WARN("%s: incomplete object desc filled %zu < size %zu\n",
1471 __func__, obj->desc_filled, obj->desc_size);
1472 ret = FFA_ERROR_INVALID_PARAMETER;
1473 goto err_unlock_all;
1474 }
1475
1476 if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
1477 WARN("%s: wrong sender id 0x%x != 0x%x\n",
1478 __func__, req->sender_id, obj->desc.sender_id);
1479 ret = FFA_ERROR_INVALID_PARAMETER;
1480 goto err_unlock_all;
1481 }
1482
1483 if (req->emad_count != 0U && req->tag != obj->desc.tag) {
1484 WARN("%s: wrong tag 0x%lx != 0x%lx\n",
1485 __func__, req->tag, obj->desc.tag);
1486 ret = FFA_ERROR_INVALID_PARAMETER;
1487 goto err_unlock_all;
1488 }
1489
Marc Bonnici336630f2022-01-13 11:39:10 +00001490 if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
1491 WARN("%s: mistmatch of endpoint counts %u != %u\n",
1492 __func__, req->emad_count, obj->desc.emad_count);
1493 ret = FFA_ERROR_INVALID_PARAMETER;
1494 goto err_unlock_all;
1495 }
1496
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001497 /* Ensure the NS bit is set to 0 in the request. */
1498 if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1499 WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1500 ret = FFA_ERROR_INVALID_PARAMETER;
1501 goto err_unlock_all;
1502 }
1503
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001504 if (req->flags != 0U) {
1505 if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
1506 (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
1507 /*
1508 * If the retrieve request specifies the memory
1509 * transaction ensure it matches what we expect.
1510 */
1511 WARN("%s: wrong mem transaction flags %x != %x\n",
1512 __func__, req->flags, obj->desc.flags);
1513 ret = FFA_ERROR_INVALID_PARAMETER;
1514 goto err_unlock_all;
1515 }
1516
1517 if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
1518 req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
1519 /*
1520 * Current implementation does not support donate and
1521 * it supports no other flags.
1522 */
1523 WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
1524 ret = FFA_ERROR_INVALID_PARAMETER;
1525 goto err_unlock_all;
1526 }
1527 }
1528
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001529 /* Validate the caller is a valid participant. */
Shruti Gupta20ce06c2022-08-25 14:22:53 +01001530 if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001531 WARN("%s: Invalid endpoint ID (0x%x).\n",
1532 __func__, sp_ctx->sp_id);
1533 ret = FFA_ERROR_INVALID_PARAMETER;
1534 goto err_unlock_all;
1535 }
1536
Marc Bonnicid1907f02022-04-19 17:42:53 +01001537 /* Validate that the provided emad offset and structure is valid.*/
1538 for (size_t i = 0; i < req->emad_count; i++) {
1539 size_t emad_size;
1540 struct ffa_emad_v1_0 *emad;
1541
1542 emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1543 &emad_size);
Marc Bonnicid1907f02022-04-19 17:42:53 +01001544
1545 if ((uintptr_t) emad >= (uintptr_t)
1546 ((uint8_t *) req + total_length)) {
1547 WARN("Invalid emad access.\n");
1548 ret = FFA_ERROR_INVALID_PARAMETER;
1549 goto err_unlock_all;
1550 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001551 }
1552
1553 /*
1554 * Validate all the endpoints match in the case of multiple
1555 * borrowers. We don't mandate that the order of the borrowers
1556 * must match in the descriptors therefore check to see if the
1557 * endpoints match in any order.
1558 */
1559 for (size_t i = 0; i < req->emad_count; i++) {
1560 bool found = false;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001561 size_t emad_size;
1562 struct ffa_emad_v1_0 *emad;
1563 struct ffa_emad_v1_0 *other_emad;
1564
1565 emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1566 &emad_size);
Marc Bonnici336630f2022-01-13 11:39:10 +00001567
1568 for (size_t j = 0; j < obj->desc.emad_count; j++) {
Marc Bonnicid1907f02022-04-19 17:42:53 +01001569 other_emad = spmc_shmem_obj_get_emad(
1570 &obj->desc, j, MAKE_FFA_VERSION(1, 1),
1571 &emad_size);
1572
Marc Bonnicid1907f02022-04-19 17:42:53 +01001573 if (req->emad_count &&
1574 emad->mapd.endpoint_id ==
1575 other_emad->mapd.endpoint_id) {
Marc Bonnici336630f2022-01-13 11:39:10 +00001576 found = true;
1577 break;
1578 }
1579 }
1580
1581 if (!found) {
1582 WARN("%s: invalid receiver id (0x%x).\n",
Marc Bonnicid1907f02022-04-19 17:42:53 +01001583 __func__, emad->mapd.endpoint_id);
Marc Bonnici336630f2022-01-13 11:39:10 +00001584 ret = FFA_ERROR_INVALID_PARAMETER;
1585 goto err_unlock_all;
1586 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001587 }
1588
1589 mbox->state = MAILBOX_STATE_FULL;
1590
1591 if (req->emad_count != 0U) {
1592 obj->in_use++;
1593 }
1594
Marc Bonnicid1907f02022-04-19 17:42:53 +01001595 /*
1596 * If the caller is v1.0 convert the descriptor, otherwise copy
1597 * directly.
1598 */
1599 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1600 ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
1601 &copy_size,
1602 &out_desc_size);
1603 if (ret != 0U) {
1604 ERROR("%s: Failed to process descriptor.\n", __func__);
1605 goto err_unlock_all;
1606 }
1607 } else {
1608 copy_size = MIN(obj->desc_size, buf_size);
1609 out_desc_size = obj->desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001610
Marc Bonnicid1907f02022-04-19 17:42:53 +01001611 memcpy(resp, &obj->desc, copy_size);
1612 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001613
Marc Bonnici08f28ef2022-04-19 16:52:59 +01001614 /* Set the NS bit in the response if applicable. */
1615 spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx);
1616
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001617 spin_unlock(&spmc_shmem_obj_state.lock);
1618 spin_unlock(&mbox->lock);
1619
Marc Bonnicid1907f02022-04-19 17:42:53 +01001620 SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001621 copy_size, 0, 0, 0, 0, 0);
1622
1623err_unlock_all:
1624 spin_unlock(&spmc_shmem_obj_state.lock);
1625err_unlock_mailbox:
1626 spin_unlock(&mbox->lock);
1627 return spmc_ffa_error_return(handle, ret);
1628}
1629
1630/**
1631 * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
1632 * @client: Client state.
1633 * @handle_low: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
1634 * @handle_high: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
1635 * @fragment_offset: Byte offset in descriptor to resume at.
1636 * @sender_id: Bit[31:16]: Endpoint id of sender if client is a
1637 * hypervisor. 0 otherwise.
1638 * @smc_handle: Handle passed to smc call. Used to return
1639 * FFA_MEM_FRAG_TX.
1640 *
1641 * Return: @smc_handle on success, error code on failure.
1642 */
1643long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
1644 bool secure_origin,
1645 uint32_t handle_low,
1646 uint32_t handle_high,
1647 uint32_t fragment_offset,
1648 uint32_t sender_id,
1649 void *cookie,
1650 void *handle,
1651 uint64_t flags)
1652{
1653 int ret;
1654 void *src;
1655 size_t buf_size;
1656 size_t copy_size;
1657 size_t full_copy_size;
1658 uint32_t desc_sender_id;
1659 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1660 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1661 struct spmc_shmem_obj *obj;
Marc Bonnicid1907f02022-04-19 17:42:53 +01001662 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001663
1664 if (!secure_origin) {
1665 WARN("%s: can only be called from swld.\n",
1666 __func__);
1667 return spmc_ffa_error_return(handle,
1668 FFA_ERROR_INVALID_PARAMETER);
1669 }
1670
1671 spin_lock(&spmc_shmem_obj_state.lock);
1672
1673 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1674 if (obj == NULL) {
1675 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1676 __func__, mem_handle);
1677 ret = FFA_ERROR_INVALID_PARAMETER;
1678 goto err_unlock_shmem;
1679 }
1680
1681 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1682 if (sender_id != 0U && sender_id != desc_sender_id) {
1683 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1684 sender_id, desc_sender_id);
1685 ret = FFA_ERROR_INVALID_PARAMETER;
1686 goto err_unlock_shmem;
1687 }
1688
1689 if (fragment_offset >= obj->desc_size) {
1690 WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
1691 __func__, fragment_offset, obj->desc_size);
1692 ret = FFA_ERROR_INVALID_PARAMETER;
1693 goto err_unlock_shmem;
1694 }
1695
1696 spin_lock(&mbox->lock);
1697
1698 if (mbox->rxtx_page_count == 0U) {
1699 WARN("%s: buffer pair not registered.\n", __func__);
1700 ret = FFA_ERROR_INVALID_PARAMETER;
1701 goto err_unlock_all;
1702 }
1703
1704 if (mbox->state != MAILBOX_STATE_EMPTY) {
1705 WARN("%s: RX Buffer is full!\n", __func__);
1706 ret = FFA_ERROR_DENIED;
1707 goto err_unlock_all;
1708 }
1709
1710 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1711
1712 mbox->state = MAILBOX_STATE_FULL;
1713
Marc Bonnicid1907f02022-04-19 17:42:53 +01001714 /*
1715 * If the caller is v1.0 convert the descriptor, otherwise copy
1716 * directly.
1717 */
1718 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1719 size_t out_desc_size;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001720
Marc Bonnicid1907f02022-04-19 17:42:53 +01001721 ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
1722 buf_size,
1723 fragment_offset,
1724 &copy_size,
1725 &out_desc_size);
1726 if (ret != 0U) {
1727 ERROR("%s: Failed to process descriptor.\n", __func__);
1728 goto err_unlock_all;
1729 }
1730 } else {
1731 full_copy_size = obj->desc_size - fragment_offset;
1732 copy_size = MIN(full_copy_size, buf_size);
1733
1734 src = &obj->desc;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001735
Marc Bonnicid1907f02022-04-19 17:42:53 +01001736 memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
1737 }
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001738
1739 spin_unlock(&mbox->lock);
1740 spin_unlock(&spmc_shmem_obj_state.lock);
1741
1742 SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
1743 copy_size, sender_id, 0, 0, 0);
1744
1745err_unlock_all:
1746 spin_unlock(&mbox->lock);
1747err_unlock_shmem:
1748 spin_unlock(&spmc_shmem_obj_state.lock);
1749 return spmc_ffa_error_return(handle, ret);
1750}
1751
1752/**
1753 * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
1754 * @client: Client state.
1755 *
1756 * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
1757 * Used by secure os release previously shared memory to non-secure os.
1758 *
1759 * The handle to release must be in the client's (secure os's) transmit buffer.
1760 *
1761 * Return: 0 on success, error code on failure.
1762 */
1763int spmc_ffa_mem_relinquish(uint32_t smc_fid,
1764 bool secure_origin,
1765 uint32_t handle_low,
1766 uint32_t handle_high,
1767 uint32_t fragment_offset,
1768 uint32_t sender_id,
1769 void *cookie,
1770 void *handle,
1771 uint64_t flags)
1772{
1773 int ret;
1774 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1775 struct spmc_shmem_obj *obj;
1776 const struct ffa_mem_relinquish_descriptor *req;
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001777 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001778
1779 if (!secure_origin) {
1780 WARN("%s: unsupported relinquish direction.\n", __func__);
1781 return spmc_ffa_error_return(handle,
1782 FFA_ERROR_INVALID_PARAMETER);
1783 }
1784
1785 spin_lock(&mbox->lock);
1786
1787 if (mbox->rxtx_page_count == 0U) {
1788 WARN("%s: buffer pair not registered.\n", __func__);
1789 ret = FFA_ERROR_INVALID_PARAMETER;
1790 goto err_unlock_mailbox;
1791 }
1792
1793 req = mbox->tx_buffer;
1794
1795 if (req->flags != 0U) {
1796 WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
1797 ret = FFA_ERROR_INVALID_PARAMETER;
1798 goto err_unlock_mailbox;
1799 }
1800
Marc Bonnici336630f2022-01-13 11:39:10 +00001801 if (req->endpoint_count == 0) {
1802 WARN("%s: endpoint count cannot be 0.\n", __func__);
1803 ret = FFA_ERROR_INVALID_PARAMETER;
1804 goto err_unlock_mailbox;
1805 }
1806
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001807 spin_lock(&spmc_shmem_obj_state.lock);
1808
1809 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1810 if (obj == NULL) {
1811 ret = FFA_ERROR_INVALID_PARAMETER;
1812 goto err_unlock_all;
1813 }
1814
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001815 /*
1816 * Validate the endpoint ID was populated correctly. We don't currently
1817 * support proxy endpoints so the endpoint count should always be 1.
1818 */
1819 if (req->endpoint_count != 1U) {
1820 WARN("%s: unsupported endpoint count %u != 1\n", __func__,
1821 req->endpoint_count);
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001822 ret = FFA_ERROR_INVALID_PARAMETER;
1823 goto err_unlock_all;
1824 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001825
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001826 /* Validate provided endpoint ID matches the partition ID. */
1827 if (req->endpoint_array[0] != sp_ctx->sp_id) {
1828 WARN("%s: invalid endpoint ID %u != %u\n", __func__,
1829 req->endpoint_array[0], sp_ctx->sp_id);
1830 ret = FFA_ERROR_INVALID_PARAMETER;
1831 goto err_unlock_all;
1832 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001833
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001834 /* Validate the caller is a valid participant. */
Shruti Gupta20ce06c2022-08-25 14:22:53 +01001835 if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
Marc Bonnici9bdcb742022-06-06 14:37:57 +01001836 WARN("%s: Invalid endpoint ID (0x%x).\n",
1837 __func__, req->endpoint_array[0]);
1838 ret = FFA_ERROR_INVALID_PARAMETER;
1839 goto err_unlock_all;
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001840 }
Marc Bonnici336630f2022-01-13 11:39:10 +00001841
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001842 if (obj->in_use == 0U) {
1843 ret = FFA_ERROR_INVALID_PARAMETER;
1844 goto err_unlock_all;
1845 }
1846 obj->in_use--;
1847
1848 spin_unlock(&spmc_shmem_obj_state.lock);
1849 spin_unlock(&mbox->lock);
1850
1851 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1852
1853err_unlock_all:
1854 spin_unlock(&spmc_shmem_obj_state.lock);
1855err_unlock_mailbox:
1856 spin_unlock(&mbox->lock);
1857 return spmc_ffa_error_return(handle, ret);
1858}
1859
1860/**
1861 * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
1862 * @client: Client state.
1863 * @handle_low: Unique handle of shared memory object to reclaim. Bit[31:0].
1864 * @handle_high: Unique handle of shared memory object to reclaim.
1865 * Bit[63:32].
1866 * @flags: Unsupported, ignored.
1867 *
1868 * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
1869 * Used by non-secure os reclaim memory previously shared with secure os.
1870 *
1871 * Return: 0 on success, error code on failure.
1872 */
1873int spmc_ffa_mem_reclaim(uint32_t smc_fid,
1874 bool secure_origin,
1875 uint32_t handle_low,
1876 uint32_t handle_high,
1877 uint32_t mem_flags,
1878 uint64_t x4,
1879 void *cookie,
1880 void *handle,
1881 uint64_t flags)
1882{
1883 int ret;
1884 struct spmc_shmem_obj *obj;
1885 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1886
1887 if (secure_origin) {
1888 WARN("%s: unsupported reclaim direction.\n", __func__);
1889 return spmc_ffa_error_return(handle,
1890 FFA_ERROR_INVALID_PARAMETER);
1891 }
1892
1893 if (mem_flags != 0U) {
1894 WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
1895 return spmc_ffa_error_return(handle,
1896 FFA_ERROR_INVALID_PARAMETER);
1897 }
1898
1899 spin_lock(&spmc_shmem_obj_state.lock);
1900
1901 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1902 if (obj == NULL) {
1903 ret = FFA_ERROR_INVALID_PARAMETER;
1904 goto err_unlock;
1905 }
1906 if (obj->in_use != 0U) {
1907 ret = FFA_ERROR_DENIED;
1908 goto err_unlock;
1909 }
Marc Bonnici503320e2022-02-21 15:02:36 +00001910
Marc Bonnici82e28f12022-10-18 13:39:48 +01001911 if (obj->desc_filled != obj->desc_size) {
1912 WARN("%s: incomplete object desc filled %zu < size %zu\n",
1913 __func__, obj->desc_filled, obj->desc_size);
1914 ret = FFA_ERROR_INVALID_PARAMETER;
1915 goto err_unlock;
1916 }
1917
Marc Bonnici503320e2022-02-21 15:02:36 +00001918 /* Allow for platform specific operations to be performed. */
1919 ret = plat_spmc_shmem_reclaim(&obj->desc);
1920 if (ret != 0) {
1921 goto err_unlock;
1922 }
1923
Marc Bonnici9f23c8d2021-10-01 16:06:04 +01001924 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1925 spin_unlock(&spmc_shmem_obj_state.lock);
1926
1927 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1928
1929err_unlock:
1930 spin_unlock(&spmc_shmem_obj_state.lock);
1931 return spmc_ffa_error_return(handle, ret);
1932}