1 /*
2  * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 #include <assert.h>
7 #include <errno.h>
8 #include <inttypes.h>
9 
10 #include <common/debug.h>
11 #include <common/runtime_svc.h>
12 #include <lib/object_pool.h>
13 #include <lib/spinlock.h>
14 #include <lib/xlat_tables/xlat_tables_v2.h>
15 #include <services/ffa_svc.h>
16 #include "spmc.h"
17 #include "spmc_shared_mem.h"
18 
19 #include <platform_def.h>
20 
21 /**
22  * struct spmc_shmem_obj - Shared memory object.
23  * @desc_size:      Size of @desc.
24  * @desc_filled:    Size of @desc already received.
25  * @in_use:         Number of clients that have called ffa_mem_retrieve_req
26  *                  without a matching ffa_mem_relinquish call.
27  * @desc:           FF-A memory region descriptor passed in ffa_mem_share.
28  */
29 struct spmc_shmem_obj {
30 	size_t desc_size;
31 	size_t desc_filled;
32 	size_t in_use;
33 	struct ffa_mtd desc;
34 };
35 
36 /*
37  * Declare our data structure to store the metadata of memory share requests.
38  * The main datastore is allocated on a per platform basis to ensure enough
39  * storage can be made available.
40  * The address of the data store will be populated by the SPMC during its
41  * initialization.
42  */
43 
44 struct spmc_shmem_obj_state spmc_shmem_obj_state = {
45 	/* Set start value for handle so top 32 bits are needed quickly. */
46 	.next_handle = 0xffffffc0U,
47 };
48 
49 /**
50  * spmc_shmem_obj_size - Convert from descriptor size to object size.
51  * @desc_size:  Size of struct ffa_memory_region_descriptor object.
52  *
53  * Return: Size of struct spmc_shmem_obj object.
54  */
spmc_shmem_obj_size(size_t desc_size)55 static size_t spmc_shmem_obj_size(size_t desc_size)
56 {
57 	return desc_size + offsetof(struct spmc_shmem_obj, desc);
58 }
59 
60 /**
61  * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
62  * @state:      Global state.
63  * @desc_size:  Size of struct ffa_memory_region_descriptor object that
64  *              allocated object will hold.
65  *
66  * Return: Pointer to newly allocated object, or %NULL if there not enough space
67  *         left. The returned pointer is only valid while @state is locked, to
68  *         used it again after unlocking @state, spmc_shmem_obj_lookup must be
69  *         called.
70  */
71 static struct spmc_shmem_obj *
spmc_shmem_obj_alloc(struct spmc_shmem_obj_state * state,size_t desc_size)72 spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
73 {
74 	struct spmc_shmem_obj *obj;
75 	size_t free = state->data_size - state->allocated;
76 	size_t obj_size;
77 
78 	if (state->data == NULL) {
79 		ERROR("Missing shmem datastore!\n");
80 		return NULL;
81 	}
82 
83 	/* Ensure that descriptor size is aligned */
84 	if (!is_aligned(desc_size, 16)) {
85 		WARN("%s(0x%zx) desc_size not 16-byte aligned\n",
86 		     __func__, desc_size);
87 		return NULL;
88 	}
89 
90 	obj_size = spmc_shmem_obj_size(desc_size);
91 
92 	/* Ensure the obj size has not overflowed. */
93 	if (obj_size < desc_size) {
94 		WARN("%s(0x%zx) desc_size overflow\n",
95 		     __func__, desc_size);
96 		return NULL;
97 	}
98 
99 	if (obj_size > free) {
100 		WARN("%s(0x%zx) failed, free 0x%zx\n",
101 		     __func__, desc_size, free);
102 		return NULL;
103 	}
104 	obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
105 	obj->desc = (struct ffa_mtd) {0};
106 	obj->desc_size = desc_size;
107 	obj->desc_filled = 0;
108 	obj->in_use = 0;
109 	state->allocated += obj_size;
110 	return obj;
111 }
112 
113 /**
114  * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
115  * @state:      Global state.
116  * @obj:        Object to free.
117  *
118  * Release memory used by @obj. Other objects may move, so on return all
119  * pointers to struct spmc_shmem_obj object should be considered invalid, not
120  * just @obj.
121  *
122  * The current implementation always compacts the remaining objects to simplify
123  * the allocator and to avoid fragmentation.
124  */
125 
spmc_shmem_obj_free(struct spmc_shmem_obj_state * state,struct spmc_shmem_obj * obj)126 static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
127 				  struct spmc_shmem_obj *obj)
128 {
129 	size_t free_size = spmc_shmem_obj_size(obj->desc_size);
130 	uint8_t *shift_dest = (uint8_t *)obj;
131 	uint8_t *shift_src = shift_dest + free_size;
132 	size_t shift_size = state->allocated - (shift_src - state->data);
133 
134 	if (shift_size != 0U) {
135 		memmove(shift_dest, shift_src, shift_size);
136 	}
137 	state->allocated -= free_size;
138 }
139 
140 /**
141  * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
142  * @state:      Global state.
143  * @handle:     Unique handle of object to return.
144  *
145  * Return: struct spmc_shmem_obj_state object with handle matching @handle.
146  *         %NULL, if not object in @state->data has a matching handle.
147  */
148 static struct spmc_shmem_obj *
spmc_shmem_obj_lookup(struct spmc_shmem_obj_state * state,uint64_t handle)149 spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
150 {
151 	uint8_t *curr = state->data;
152 
153 	while (curr - state->data < state->allocated) {
154 		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
155 
156 		if (obj->desc.handle == handle) {
157 			return obj;
158 		}
159 		curr += spmc_shmem_obj_size(obj->desc_size);
160 	}
161 	return NULL;
162 }
163 
164 /**
165  * spmc_shmem_obj_get_next - Get the next memory object from an offset.
166  * @offset:     Offset used to track which objects have previously been
167  *              returned.
168  *
169  * Return: the next struct spmc_shmem_obj_state object from the provided
170  *	   offset.
171  *	   %NULL, if there are no more objects.
172  */
173 static struct spmc_shmem_obj *
spmc_shmem_obj_get_next(struct spmc_shmem_obj_state * state,size_t * offset)174 spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
175 {
176 	uint8_t *curr = state->data + *offset;
177 
178 	if (curr - state->data < state->allocated) {
179 		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
180 
181 		*offset += spmc_shmem_obj_size(obj->desc_size);
182 
183 		return obj;
184 	}
185 	return NULL;
186 }
187 
188 /*******************************************************************************
189  * FF-A memory descriptor helper functions.
190  ******************************************************************************/
191 /**
192  * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
193  *                           clients FF-A version.
194  * @desc:         The memory transaction descriptor.
195  * @index:        The index of the emad element to be accessed.
196  * @ffa_version:  FF-A version of the provided structure.
197  * @emad_size:    Will be populated with the size of the returned emad
198  *                descriptor.
199  * Return: A pointer to the requested emad structure.
200  */
201 static void *
spmc_shmem_obj_get_emad(const struct ffa_mtd * desc,uint32_t index,uint32_t ffa_version,size_t * emad_size)202 spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
203 			uint32_t ffa_version, size_t *emad_size)
204 {
205 	uint8_t *emad;
206 
207 	assert(index < desc->emad_count);
208 
209 	/*
210 	 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
211 	 * format, otherwise assume it is a v1.1 format.
212 	 */
213 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
214 		emad = (uint8_t *)desc + offsetof(struct ffa_mtd_v1_0, emad);
215 		*emad_size = sizeof(struct ffa_emad_v1_0);
216 	} else {
217 		assert(is_aligned(desc->emad_offset, 16));
218 		emad = ((uint8_t *) desc + desc->emad_offset);
219 		*emad_size = desc->emad_size;
220 	}
221 
222 	assert(((uint64_t)index * (uint64_t)*emad_size) <= UINT32_MAX);
223 	return (emad + (*emad_size * index));
224 }
225 
226 /**
227  * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
228  *				 FF-A version of the descriptor.
229  * @obj:    Object containing ffa_memory_region_descriptor.
230  *
231  * Return: struct ffa_comp_mrd object corresponding to the composite memory
232  *	   region descriptor.
233  */
234 static struct ffa_comp_mrd *
spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj * obj,uint32_t ffa_version)235 spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
236 {
237 	size_t emad_size;
238 	/*
239 	 * The comp_mrd_offset field of the emad descriptor remains consistent
240 	 * between FF-A versions therefore we can use the v1.0 descriptor here
241 	 * in all cases.
242 	 */
243 	struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
244 							     ffa_version,
245 							     &emad_size);
246 
247 	/* Ensure the composite descriptor offset is aligned. */
248 	if (!is_aligned(emad->comp_mrd_offset, 8)) {
249 		WARN("Unaligned composite memory region descriptor offset.\n");
250 		return NULL;
251 	}
252 
253 	return (struct ffa_comp_mrd *)
254 	       ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
255 }
256 
257 /**
258  * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
259  *				a given memory transaction.
260  * @sp_id:      Partition ID to validate.
261  * @obj:        The shared memory object containing the descriptor
262  *              of the memory transaction.
263  * Return: true if ID is valid, else false.
264  */
spmc_shmem_obj_validate_id(struct spmc_shmem_obj * obj,uint16_t sp_id)265 bool spmc_shmem_obj_validate_id(struct spmc_shmem_obj *obj, uint16_t sp_id)
266 {
267 	bool found = false;
268 	struct ffa_mtd *desc = &obj->desc;
269 	size_t desc_size = obj->desc_size;
270 
271 	/* Validate the partition is a valid participant. */
272 	for (unsigned int i = 0U; i < desc->emad_count; i++) {
273 		size_t emad_size;
274 		struct ffa_emad_v1_0 *emad;
275 
276 		emad = spmc_shmem_obj_get_emad(desc, i,
277 					       MAKE_FFA_VERSION(1, 1),
278 					       &emad_size);
279 		/*
280 		 * Validate the calculated emad address resides within the
281 		 * descriptor.
282 		 */
283 		if ((emad == NULL) || (uintptr_t) emad >=
284 		    (uintptr_t)((uint8_t *) desc + desc_size)) {
285 			VERBOSE("Invalid emad.\n");
286 			break;
287 		}
288 		if (sp_id == emad->mapd.endpoint_id) {
289 			found = true;
290 			break;
291 		}
292 	}
293 	return found;
294 }
295 
296 /*
297  * Compare two memory regions to determine if any range overlaps with another
298  * ongoing memory transaction.
299  */
300 static bool
overlapping_memory_regions(struct ffa_comp_mrd * region1,struct ffa_comp_mrd * region2)301 overlapping_memory_regions(struct ffa_comp_mrd *region1,
302 			   struct ffa_comp_mrd *region2)
303 {
304 	uint64_t region1_start;
305 	uint64_t region1_size;
306 	uint64_t region1_end;
307 	uint64_t region2_start;
308 	uint64_t region2_size;
309 	uint64_t region2_end;
310 
311 	assert(region1 != NULL);
312 	assert(region2 != NULL);
313 
314 	if (region1 == region2) {
315 		return true;
316 	}
317 
318 	/*
319 	 * Check each memory region in the request against existing
320 	 * transactions.
321 	 */
322 	for (size_t i = 0; i < region1->address_range_count; i++) {
323 
324 		region1_start = region1->address_range_array[i].address;
325 		region1_size =
326 			region1->address_range_array[i].page_count *
327 			PAGE_SIZE_4KB;
328 		region1_end = region1_start + region1_size;
329 
330 		for (size_t j = 0; j < region2->address_range_count; j++) {
331 
332 			region2_start = region2->address_range_array[j].address;
333 			region2_size =
334 				region2->address_range_array[j].page_count *
335 				PAGE_SIZE_4KB;
336 			region2_end = region2_start + region2_size;
337 
338 			/* Check if regions are not overlapping. */
339 			if (!((region2_end <= region1_start) ||
340 			      (region1_end <= region2_start))) {
341 				WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
342 				     region1_start, region1_end,
343 				     region2_start, region2_end);
344 				return true;
345 			}
346 		}
347 	}
348 	return false;
349 }
350 
351 /*******************************************************************************
352  * FF-A v1.0 Memory Descriptor Conversion Helpers.
353  ******************************************************************************/
354 /**
355  * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
356  *                                     converted descriptor.
357  * @orig:       The original v1.0 memory transaction descriptor.
358  * @desc_size:  The size of the original v1.0 memory transaction descriptor.
359  *
360  * Return: the size required to store the descriptor store in the v1.1 format.
361  */
362 static uint64_t
spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 * orig,size_t desc_size)363 spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
364 {
365 	uint64_t size = 0;
366 	struct ffa_comp_mrd *mrd;
367 	struct ffa_emad_v1_0 *emad_array = orig->emad;
368 
369 	/* Get the size of the v1.1 descriptor. */
370 	size += sizeof(struct ffa_mtd);
371 
372 	/* Add the size of the emad descriptors. */
373 	size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
374 
375 	/* Add the size of the composite mrds. */
376 	size += sizeof(struct ffa_comp_mrd);
377 
378 	/* Add the size of the constituent mrds. */
379 	mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
380 	      emad_array[0].comp_mrd_offset);
381 
382 	/* Add the size of the memory region descriptors. */
383 	size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
384 
385 	return size;
386 }
387 
388 /**
389  * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
390  *                                     converted descriptor.
391  * @orig:       The original v1.1 memory transaction descriptor.
392  * @desc_size:  The size of the original v1.1 memory transaction descriptor.
393  *
394  * Return: the size required to store the descriptor store in the v1.0 format.
395  */
396 static size_t
spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd * orig,size_t desc_size)397 spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
398 {
399 	size_t size = 0;
400 	struct ffa_comp_mrd *mrd;
401 	struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
402 					   ((uint8_t *) orig +
403 					    orig->emad_offset);
404 
405 	/* Get the size of the v1.0 descriptor. */
406 	size += sizeof(struct ffa_mtd_v1_0);
407 
408 	/* Add the size of the v1.0 emad descriptors. */
409 	size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
410 
411 	/* Add the size of the composite mrds. */
412 	size += sizeof(struct ffa_comp_mrd);
413 
414 	/* Add the size of the constituent mrds. */
415 	mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
416 	      emad_array[0].comp_mrd_offset);
417 
418 	/* Check the calculated address is within the memory descriptor. */
419 	if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
420 	    (uintptr_t)((uint8_t *) orig + desc_size)) {
421 		return 0;
422 	}
423 	size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
424 
425 	return size;
426 }
427 
428 /**
429  * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
430  * @out_obj:	The shared memory object to populate the converted descriptor.
431  * @orig:	The shared memory object containing the v1.0 descriptor.
432  *
433  * Return: true if the conversion is successful else false.
434  */
435 static bool
spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj * out_obj,struct spmc_shmem_obj * orig)436 spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
437 				     struct spmc_shmem_obj *orig)
438 {
439 	struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
440 	struct ffa_mtd *out = &out_obj->desc;
441 	struct ffa_emad_v1_0 *emad_array_in;
442 	struct ffa_emad_v1_0 *emad_array_out;
443 	struct ffa_comp_mrd *mrd_in;
444 	struct ffa_comp_mrd *mrd_out;
445 
446 	size_t mrd_in_offset;
447 	size_t mrd_out_offset;
448 	size_t mrd_size = 0;
449 
450 	/* Populate the new descriptor format from the v1.0 struct. */
451 	out->sender_id = mtd_orig->sender_id;
452 	out->memory_region_attributes = mtd_orig->memory_region_attributes;
453 	out->flags = mtd_orig->flags;
454 	out->handle = mtd_orig->handle;
455 	out->tag = mtd_orig->tag;
456 	out->emad_count = mtd_orig->emad_count;
457 	out->emad_size = sizeof(struct ffa_emad_v1_0);
458 
459 	/*
460 	 * We will locate the emad descriptors directly after the ffa_mtd
461 	 * struct. This will be 8-byte aligned.
462 	 */
463 	out->emad_offset = sizeof(struct ffa_mtd);
464 
465 	emad_array_in = mtd_orig->emad;
466 	emad_array_out = (struct ffa_emad_v1_0 *)
467 			 ((uint8_t *) out + out->emad_offset);
468 
469 	/* Copy across the emad structs. */
470 	for (unsigned int i = 0U; i < out->emad_count; i++) {
471 		/* Bound check for emad array. */
472 		if (((uint8_t *)emad_array_in + sizeof(struct ffa_emad_v1_0)) >
473 		    ((uint8_t *) mtd_orig + orig->desc_size)) {
474 			VERBOSE("%s: Invalid mtd structure.\n", __func__);
475 			return false;
476 		}
477 		memcpy(&emad_array_out[i], &emad_array_in[i],
478 		       sizeof(struct ffa_emad_v1_0));
479 	}
480 
481 	/* Place the mrd descriptors after the end of the emad descriptors.*/
482 	mrd_in_offset = emad_array_in->comp_mrd_offset;
483 	mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
484 	mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
485 
486 	/* Add the size of the composite memory region descriptor. */
487 	mrd_size += sizeof(struct ffa_comp_mrd);
488 
489 	/* Find the mrd descriptor. */
490 	mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
491 
492 	/* Add the size of the constituent memory region descriptors. */
493 	mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
494 
495 	/*
496 	 * Update the offset in the emads by the delta between the input and
497 	 * output addresses.
498 	 */
499 	for (unsigned int i = 0U; i < out->emad_count; i++) {
500 		emad_array_out[i].comp_mrd_offset =
501 			emad_array_in[i].comp_mrd_offset +
502 			(mrd_out_offset - mrd_in_offset);
503 	}
504 
505 	/* Verify that we stay within bound of the memory descriptors. */
506 	if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
507 	     (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
508 	    ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
509 	     (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
510 		ERROR("%s: Invalid mrd structure.\n", __func__);
511 		return false;
512 	}
513 
514 	/* Copy the mrd descriptors directly. */
515 	memcpy(mrd_out, mrd_in, mrd_size);
516 
517 	return true;
518 }
519 
520 /**
521  * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
522  *                                v1.0 memory object.
523  * @out_obj:    The shared memory object to populate the v1.0 descriptor.
524  * @orig:       The shared memory object containing the v1.1 descriptor.
525  *
526  * Return: true if the conversion is successful else false.
527  */
528 static bool
spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj * out_obj,struct spmc_shmem_obj * orig)529 spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
530 			     struct spmc_shmem_obj *orig)
531 {
532 	struct ffa_mtd *mtd_orig = &orig->desc;
533 	struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
534 	struct ffa_emad_v1_0 *emad_in;
535 	struct ffa_emad_v1_0 *emad_array_in;
536 	struct ffa_emad_v1_0 *emad_array_out;
537 	struct ffa_comp_mrd *mrd_in;
538 	struct ffa_comp_mrd *mrd_out;
539 
540 	size_t mrd_in_offset;
541 	size_t mrd_out_offset;
542 	size_t emad_out_array_size;
543 	size_t mrd_size = 0;
544 	size_t orig_desc_size = orig->desc_size;
545 
546 	/* Populate the v1.0 descriptor format from the v1.1 struct. */
547 	out->sender_id = mtd_orig->sender_id;
548 	out->memory_region_attributes = mtd_orig->memory_region_attributes;
549 	out->flags = mtd_orig->flags;
550 	out->handle = mtd_orig->handle;
551 	out->tag = mtd_orig->tag;
552 	out->emad_count = mtd_orig->emad_count;
553 
554 	/* Determine the location of the emad array in both descriptors. */
555 	emad_array_in = (struct ffa_emad_v1_0 *)
556 			((uint8_t *) mtd_orig + mtd_orig->emad_offset);
557 	emad_array_out = out->emad;
558 
559 	/* Copy across the emad structs. */
560 	emad_in = emad_array_in;
561 	for (unsigned int i = 0U; i < out->emad_count; i++) {
562 		/* Bound check for emad array. */
563 		if (((uint8_t *)emad_in + sizeof(struct ffa_emad_v1_0)) >
564 				((uint8_t *) mtd_orig + orig_desc_size)) {
565 			VERBOSE("%s: Invalid mtd structure.\n", __func__);
566 			return false;
567 		}
568 		memcpy(&emad_array_out[i], emad_in,
569 		       sizeof(struct ffa_emad_v1_0));
570 
571 		emad_in +=  mtd_orig->emad_size;
572 	}
573 
574 	/* Place the mrd descriptors after the end of the emad descriptors. */
575 	emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
576 
577 	mrd_out_offset =  (uint8_t *) out->emad - (uint8_t *) out +
578 			  emad_out_array_size;
579 
580 	mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
581 
582 	mrd_in_offset = mtd_orig->emad_offset +
583 			(mtd_orig->emad_size * mtd_orig->emad_count);
584 
585 	/* Add the size of the composite memory region descriptor. */
586 	mrd_size += sizeof(struct ffa_comp_mrd);
587 
588 	/* Find the mrd descriptor. */
589 	mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
590 
591 	/* Add the size of the constituent memory region descriptors. */
592 	mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
593 
594 	/*
595 	 * Update the offset in the emads by the delta between the input and
596 	 * output addresses.
597 	 */
598 	emad_in = emad_array_in;
599 
600 	for (unsigned int i = 0U; i < out->emad_count; i++) {
601 		emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
602 						    (mrd_out_offset -
603 						     mrd_in_offset);
604 		emad_in +=  mtd_orig->emad_size;
605 	}
606 
607 	/* Verify that we stay within bound of the memory descriptors. */
608 	if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
609 	     (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
610 	    ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
611 	     (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
612 		ERROR("%s: Invalid mrd structure.\n", __func__);
613 		return false;
614 	}
615 
616 	/* Copy the mrd descriptors directly. */
617 	memcpy(mrd_out, mrd_in, mrd_size);
618 
619 	return true;
620 }
621 
622 /**
623  * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
624  *                                     the v1.0 format and populates the
625  *                                     provided buffer.
626  * @dst:	    Buffer to populate v1.0 ffa_memory_region_descriptor.
627  * @orig_obj:	    Object containing v1.1 ffa_memory_region_descriptor.
628  * @buf_size:	    Size of the buffer to populate.
629  * @offset:	    The offset of the converted descriptor to copy.
630  * @copy_size:	    Will be populated with the number of bytes copied.
631  * @out_desc_size:  Will be populated with the total size of the v1.0
632  *                  descriptor.
633  *
634  * Return: 0 if conversion and population succeeded.
635  * Note: This function invalidates the reference to @orig therefore
636  * `spmc_shmem_obj_lookup` must be called if further usage is required.
637  */
638 static uint32_t
spmc_populate_ffa_v1_0_descriptor(void * dst,struct spmc_shmem_obj * orig_obj,size_t buf_size,size_t offset,size_t * copy_size,size_t * v1_0_desc_size)639 spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
640 				 size_t buf_size, size_t offset,
641 				 size_t *copy_size, size_t *v1_0_desc_size)
642 {
643 		struct spmc_shmem_obj *v1_0_obj;
644 
645 		/* Calculate the size that the v1.0 descriptor will require. */
646 		*v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
647 					&orig_obj->desc, orig_obj->desc_size);
648 
649 		if (*v1_0_desc_size == 0) {
650 			ERROR("%s: cannot determine size of descriptor.\n",
651 			      __func__);
652 			return FFA_ERROR_INVALID_PARAMETER;
653 		}
654 
655 		/* Get a new obj to store the v1.0 descriptor. */
656 		v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
657 						*v1_0_desc_size);
658 
659 		if (!v1_0_obj) {
660 			return FFA_ERROR_NO_MEMORY;
661 		}
662 
663 		/* Perform the conversion from v1.1 to v1.0. */
664 		if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
665 			spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
666 			return FFA_ERROR_INVALID_PARAMETER;
667 		}
668 
669 		*copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
670 		memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
671 
672 		/*
673 		 * We're finished with the v1.0 descriptor for now so free it.
674 		 * Note that this will invalidate any references to the v1.1
675 		 * descriptor.
676 		 */
677 		spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
678 
679 		return 0;
680 }
681 
682 static int
spmc_validate_mtd_start(struct ffa_mtd * desc,uint32_t ffa_version,size_t fragment_length,size_t total_length)683 spmc_validate_mtd_start(struct ffa_mtd *desc, uint32_t ffa_version,
684 			size_t fragment_length, size_t total_length)
685 {
686 	unsigned long long emad_end;
687 	unsigned long long emad_size;
688 	unsigned long long emad_offset;
689 	unsigned int min_desc_size;
690 
691 	/* Determine the appropriate minimum descriptor size. */
692 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
693 		min_desc_size = sizeof(struct ffa_mtd_v1_0);
694 	} else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
695 		min_desc_size = sizeof(struct ffa_mtd);
696 	} else {
697 		return FFA_ERROR_INVALID_PARAMETER;
698 	}
699 	if (fragment_length < min_desc_size) {
700 		WARN("%s: invalid length %zu < %u\n", __func__, fragment_length,
701 		     min_desc_size);
702 		return FFA_ERROR_INVALID_PARAMETER;
703 	}
704 
705 	if (desc->emad_count == 0U) {
706 		WARN("%s: unsupported attribute desc count %u.\n",
707 		     __func__, desc->emad_count);
708 		return FFA_ERROR_INVALID_PARAMETER;
709 	}
710 
711 	/*
712 	 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
713 	 * format, otherwise assume it is a v1.1 format.
714 	 */
715 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
716 		emad_offset = emad_size = sizeof(struct ffa_emad_v1_0);
717 	} else {
718 		if (!is_aligned(desc->emad_offset, 16)) {
719 			WARN("%s: Emad offset %" PRIx32 " is not 16-byte aligned.\n",
720 			     __func__, desc->emad_offset);
721 			return FFA_ERROR_INVALID_PARAMETER;
722 		}
723 		if (desc->emad_offset < sizeof(struct ffa_mtd)) {
724 			WARN("%s: Emad offset too small: 0x%" PRIx32 " < 0x%zx.\n",
725 			     __func__, desc->emad_offset,
726 			     sizeof(struct ffa_mtd));
727 			return FFA_ERROR_INVALID_PARAMETER;
728 		}
729 		emad_offset = desc->emad_offset;
730 		if (desc->emad_size < sizeof(struct ffa_emad_v1_0)) {
731 			WARN("%s: Bad emad size (%" PRIu32 " < %zu).\n", __func__,
732 			     desc->emad_size, sizeof(struct ffa_emad_v1_0));
733 			return FFA_ERROR_INVALID_PARAMETER;
734 		}
735 		if (!is_aligned(desc->emad_size, 16)) {
736 			WARN("%s: Emad size 0x%" PRIx32 " is not 16-byte aligned.\n",
737 			     __func__, desc->emad_size);
738 			return FFA_ERROR_INVALID_PARAMETER;
739 		}
740 		emad_size = desc->emad_size;
741 	}
742 
743 	/*
744 	 * Overflow is impossible: the arithmetic happens in at least 64-bit
745 	 * precision, but all of the operands are bounded by UINT32_MAX, and
746 	 *   ((2^32 - 1) * (2^32 - 1) + (2^32 - 1) + (2^32 - 1))
747 	 * = ((2^32 - 1) * ((2^32 - 1) + 1 + 1))
748 	 * = ((2^32 - 1) * (2^32 + 1))
749 	 * = (2^64 - 1).
750 	 */
751 	CASSERT(sizeof(desc->emad_count) == 4, assert_emad_count_max_too_large);
752 	emad_end = (desc->emad_count * (unsigned long long)emad_size) +
753 		   (unsigned long long)sizeof(struct ffa_comp_mrd) +
754 		   (unsigned long long)emad_offset;
755 
756 	if (emad_end > total_length) {
757 		WARN("%s: Composite memory region extends beyond descriptor: 0x%llx > 0x%zx\n",
758 		     __func__, emad_end, total_length);
759 		return FFA_ERROR_INVALID_PARAMETER;
760 	}
761 
762 	return 0;
763 }
764 
765 static inline const struct ffa_emad_v1_0 *
emad_advance(const struct ffa_emad_v1_0 * emad,size_t offset)766 emad_advance(const struct ffa_emad_v1_0 *emad, size_t offset)
767 {
768 	return (const struct ffa_emad_v1_0 *)((const uint8_t *)emad + offset);
769 }
770 
771 /**
772  * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
773  * @obj:	  Object containing ffa_memory_region_descriptor.
774  * @ffa_version:  FF-A version of the provided descriptor.
775  *
776  * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if
777  * constituent_memory_region_descriptor offset or count is invalid.
778  */
spmc_shmem_check_obj(struct spmc_shmem_obj * obj,uint32_t ffa_version)779 static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
780 				uint32_t ffa_version)
781 {
782 	unsigned long long total_page_count;
783 	const struct ffa_emad_v1_0 *first_emad;
784 	const struct ffa_emad_v1_0 *end_emad;
785 	size_t emad_size;
786 	uint32_t comp_mrd_offset;
787 	size_t header_emad_size;
788 	size_t size;
789 	size_t count;
790 	size_t expected_size;
791 	const struct ffa_comp_mrd *comp;
792 
793 	if (obj->desc_filled != obj->desc_size) {
794 		ERROR("BUG: %s called on incomplete object (%zu != %zu)\n",
795 		      __func__, obj->desc_filled, obj->desc_size);
796 		panic();
797 	}
798 
799 	if (spmc_validate_mtd_start(&obj->desc, ffa_version,
800 				    obj->desc_filled, obj->desc_size)) {
801 		ERROR("BUG: %s called on object with corrupt memory region descriptor\n",
802 		      __func__);
803 		panic();
804 	}
805 
806 	first_emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
807 					     ffa_version, &emad_size);
808 	end_emad = emad_advance(first_emad, obj->desc.emad_count * emad_size);
809 	comp_mrd_offset = first_emad->comp_mrd_offset;
810 
811 	/* Loop through the endpoint descriptors, validating each of them. */
812 	for (const struct ffa_emad_v1_0 *emad = first_emad; emad < end_emad;) {
813 		ffa_endpoint_id16_t ep_id;
814 
815 		/*
816 		 * If a partition ID resides in the secure world validate that
817 		 * the partition ID is for a known partition. Ignore any
818 		 * partition ID belonging to the normal world as it is assumed
819 		 * the Hypervisor will have validated these.
820 		 */
821 		ep_id = emad->mapd.endpoint_id;
822 		if (ffa_is_secure_world_id(ep_id)) {
823 			if (spmc_get_sp_ctx(ep_id) == NULL) {
824 				WARN("%s: Invalid receiver id 0x%x\n",
825 				     __func__, ep_id);
826 				return FFA_ERROR_INVALID_PARAMETER;
827 			}
828 		}
829 
830 		/*
831 		 * The offset provided to the composite memory region descriptor
832 		 * should be consistent across endpoint descriptors.
833 		 */
834 		if (comp_mrd_offset != emad->comp_mrd_offset) {
835 			ERROR("%s: mismatching offsets provided, %u != %u\n",
836 			       __func__, emad->comp_mrd_offset, comp_mrd_offset);
837 			return FFA_ERROR_INVALID_PARAMETER;
838 		}
839 
840 		/* Advance to the next endpoint descriptor */
841 		emad = emad_advance(emad, emad_size);
842 
843 		/*
844 		 * Ensure neither this emad nor any subsequent emads have
845 		 * the same partition ID as the previous emad.
846 		 */
847 		for (const struct ffa_emad_v1_0 *other_emad = emad;
848 		     other_emad < end_emad;
849 		     other_emad = emad_advance(other_emad, emad_size)) {
850 			if (ep_id == other_emad->mapd.endpoint_id) {
851 				WARN("%s: Duplicated endpoint id 0x%x\n",
852 				     __func__, emad->mapd.endpoint_id);
853 				return FFA_ERROR_INVALID_PARAMETER;
854 			}
855 		}
856 	}
857 
858 	header_emad_size = (size_t)((const uint8_t *)end_emad -
859 				    (const uint8_t *)&obj->desc);
860 
861 	/*
862 	 * Check that the composite descriptor
863 	 * is after the endpoint descriptors.
864 	 */
865 	if (comp_mrd_offset < header_emad_size) {
866 		WARN("%s: invalid object, offset %u < header + emad %zu\n",
867 		     __func__, comp_mrd_offset, header_emad_size);
868 		return FFA_ERROR_INVALID_PARAMETER;
869 	}
870 
871 	/* Ensure the composite descriptor offset is aligned. */
872 	if (!is_aligned(comp_mrd_offset, 16)) {
873 		WARN("%s: invalid object, unaligned composite memory "
874 		     "region descriptor offset %u.\n",
875 		     __func__, comp_mrd_offset);
876 		return FFA_ERROR_INVALID_PARAMETER;
877 	}
878 
879 	size = obj->desc_size;
880 
881 	/* Check that the composite descriptor is in bounds. */
882 	if (comp_mrd_offset > size) {
883 		WARN("%s: invalid object, offset %u > total size %zu\n",
884 		     __func__, comp_mrd_offset, obj->desc_size);
885 		return FFA_ERROR_INVALID_PARAMETER;
886 	}
887 	size -= comp_mrd_offset;
888 
889 	/* Check that there is enough space for the composite descriptor. */
890 	if (size < sizeof(struct ffa_comp_mrd)) {
891 		WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
892 		     __func__, comp_mrd_offset, obj->desc_size);
893 		return FFA_ERROR_INVALID_PARAMETER;
894 	}
895 	size -= sizeof(*comp);
896 
897 	count = size / sizeof(struct ffa_cons_mrd);
898 
899 	comp = (const struct ffa_comp_mrd *)
900 	       ((const uint8_t *)(&obj->desc) + comp_mrd_offset);
901 
902 	if (comp->address_range_count != count) {
903 		WARN("%s: invalid object, desc count %u != %zu\n",
904 		     __func__, comp->address_range_count, count);
905 		return FFA_ERROR_INVALID_PARAMETER;
906 	}
907 
908 	/* Ensure that the expected and actual sizes are equal. */
909 	expected_size = comp_mrd_offset + sizeof(*comp) +
910 		count * sizeof(struct ffa_cons_mrd);
911 
912 	if (expected_size != obj->desc_size) {
913 		WARN("%s: invalid object, computed size %zu != size %zu\n",
914 		       __func__, expected_size, obj->desc_size);
915 		return FFA_ERROR_INVALID_PARAMETER;
916 	}
917 
918 	total_page_count = 0;
919 
920 	/*
921 	 * comp->address_range_count is 32-bit, so 'count' must fit in a
922 	 * uint32_t at this point.
923 	 */
924 	for (size_t i = 0; i < count; i++) {
925 		const struct ffa_cons_mrd *mrd = comp->address_range_array + i;
926 
927 		if (!is_aligned(mrd->address, PAGE_SIZE)) {
928 			WARN("%s: invalid object, address in region descriptor "
929 			     "%zu not 4K aligned (got 0x%016llx)",
930 			     __func__, i, (unsigned long long)mrd->address);
931 		}
932 
933 		/*
934 		 * No overflow possible: total_page_count can hold at
935 		 * least 2^64 - 1, but will be have at most 2^32 - 1.
936 		 * values added to it, each of which cannot exceed 2^32 - 1.
937 		 */
938 		total_page_count += mrd->page_count;
939 	}
940 
941 	if (comp->total_page_count != total_page_count) {
942 		WARN("%s: invalid object, desc total_page_count %u != %llu\n",
943 		     __func__, comp->total_page_count, total_page_count);
944 		return FFA_ERROR_INVALID_PARAMETER;
945 	}
946 
947 	return 0;
948 }
949 
950 /**
951  * spmc_shmem_check_state_obj - Check if the descriptor describes memory
952  *				regions that are currently involved with an
953  *				existing memory transactions. This implies that
954  *				the memory is not in a valid state for lending.
955  * @obj:    Object containing ffa_memory_region_descriptor.
956  *
957  * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if invalid memory
958  * state.
959  */
spmc_shmem_check_state_obj(struct spmc_shmem_obj * obj,uint32_t ffa_version)960 static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
961 				      uint32_t ffa_version)
962 {
963 	size_t obj_offset = 0;
964 	struct spmc_shmem_obj *inflight_obj;
965 
966 	struct ffa_comp_mrd *other_mrd;
967 	struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
968 								  ffa_version);
969 
970 	if (requested_mrd == NULL) {
971 		return FFA_ERROR_INVALID_PARAMETER;
972 	}
973 
974 	inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
975 					       &obj_offset);
976 
977 	while (inflight_obj != NULL) {
978 		/*
979 		 * Don't compare the transaction to itself or to partially
980 		 * transmitted descriptors.
981 		 */
982 		if ((obj->desc.handle != inflight_obj->desc.handle) &&
983 		    (obj->desc_size == obj->desc_filled)) {
984 			other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
985 							  FFA_VERSION_COMPILED);
986 			if (other_mrd == NULL) {
987 				return FFA_ERROR_INVALID_PARAMETER;
988 			}
989 			if (overlapping_memory_regions(requested_mrd,
990 						       other_mrd)) {
991 				return FFA_ERROR_INVALID_PARAMETER;
992 			}
993 		}
994 
995 		inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
996 						       &obj_offset);
997 	}
998 	return 0;
999 }
1000 
spmc_ffa_fill_desc(struct mailbox * mbox,struct spmc_shmem_obj * obj,uint32_t fragment_length,ffa_mtd_flag32_t mtd_flag,uint32_t ffa_version,void * smc_handle)1001 static long spmc_ffa_fill_desc(struct mailbox *mbox,
1002 			       struct spmc_shmem_obj *obj,
1003 			       uint32_t fragment_length,
1004 			       ffa_mtd_flag32_t mtd_flag,
1005 			       uint32_t ffa_version,
1006 			       void *smc_handle)
1007 {
1008 	int ret;
1009 	uint32_t handle_low;
1010 	uint32_t handle_high;
1011 
1012 	if (mbox->rxtx_page_count == 0U) {
1013 		WARN("%s: buffer pair not registered.\n", __func__);
1014 		ret = FFA_ERROR_INVALID_PARAMETER;
1015 		goto err_arg;
1016 	}
1017 
1018 	CASSERT(sizeof(mbox->rxtx_page_count) == 4, assert_bogus_page_count);
1019 	if (fragment_length > (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB) {
1020 		WARN("%s: bad fragment size %u > %" PRIu64 " buffer size\n", __func__,
1021 		     fragment_length, (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB);
1022 		ret = FFA_ERROR_INVALID_PARAMETER;
1023 		goto err_arg;
1024 	}
1025 
1026 	if (fragment_length > obj->desc_size - obj->desc_filled) {
1027 		WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
1028 		     fragment_length, obj->desc_size - obj->desc_filled);
1029 		ret = FFA_ERROR_INVALID_PARAMETER;
1030 		goto err_arg;
1031 	}
1032 
1033 	memcpy((uint8_t *)&obj->desc + obj->desc_filled,
1034 	       (uint8_t *) mbox->tx_buffer, fragment_length);
1035 
1036 	/* Ensure that the sender ID resides in the normal world. */
1037 	if (ffa_is_secure_world_id(obj->desc.sender_id)) {
1038 		WARN("%s: Invalid sender ID 0x%x.\n",
1039 		     __func__, obj->desc.sender_id);
1040 		ret = FFA_ERROR_DENIED;
1041 		goto err_arg;
1042 	}
1043 
1044 	/* Ensure the NS bit is set to 0. */
1045 	if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1046 		WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1047 		ret = FFA_ERROR_INVALID_PARAMETER;
1048 		goto err_arg;
1049 	}
1050 
1051 	/*
1052 	 * We don't currently support any optional flags so ensure none are
1053 	 * requested.
1054 	 */
1055 	if (obj->desc.flags != 0U && mtd_flag != 0U &&
1056 	    (obj->desc.flags != mtd_flag)) {
1057 		WARN("%s: invalid memory transaction flags %u != %u\n",
1058 		     __func__, obj->desc.flags, mtd_flag);
1059 		ret = FFA_ERROR_INVALID_PARAMETER;
1060 		goto err_arg;
1061 	}
1062 
1063 	if (obj->desc_filled == 0U) {
1064 		/* First fragment, descriptor header has been copied */
1065 		ret = spmc_validate_mtd_start(&obj->desc, ffa_version,
1066 					      fragment_length, obj->desc_size);
1067 		if (ret != 0) {
1068 			goto err_bad_desc;
1069 		}
1070 
1071 		obj->desc.handle = spmc_shmem_obj_state.next_handle++;
1072 		obj->desc.flags |= mtd_flag;
1073 	}
1074 
1075 	obj->desc_filled += fragment_length;
1076 
1077 	handle_low = (uint32_t)obj->desc.handle;
1078 	handle_high = obj->desc.handle >> 32;
1079 
1080 	if (obj->desc_filled != obj->desc_size) {
1081 		SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
1082 			 handle_high, obj->desc_filled,
1083 			 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
1084 	}
1085 
1086 	/* The full descriptor has been received, perform any final checks. */
1087 
1088 	/* Ensure the NS bit is set to 1 since we only allow non-secure senders. */
1089 	obj->desc.memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
1090 
1091 	ret = spmc_shmem_check_obj(obj, ffa_version);
1092 	if (ret != 0) {
1093 		goto err_bad_desc;
1094 	}
1095 
1096 	ret = spmc_shmem_check_state_obj(obj, ffa_version);
1097 	if (ret) {
1098 		ERROR("%s: invalid memory region descriptor.\n", __func__);
1099 		goto err_bad_desc;
1100 	}
1101 
1102 	/*
1103 	 * Everything checks out, if the sender was using FF-A v1.0, convert
1104 	 * the descriptor format to use the v1.1 structures.
1105 	 */
1106 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1107 		struct spmc_shmem_obj *v1_1_obj;
1108 		uint64_t mem_handle;
1109 
1110 		/* Calculate the size that the v1.1 descriptor will required. */
1111 		uint64_t v1_1_desc_size =
1112 		    spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
1113 						      obj->desc_size);
1114 
1115 		if (v1_1_desc_size > UINT32_MAX) {
1116 			ret = FFA_ERROR_NO_MEMORY;
1117 			goto err_arg;
1118 		}
1119 
1120 		/* Get a new obj to store the v1.1 descriptor. */
1121 		v1_1_obj =
1122 		    spmc_shmem_obj_alloc(&spmc_shmem_obj_state, (size_t)v1_1_desc_size);
1123 
1124 		if (!v1_1_obj) {
1125 			ret = FFA_ERROR_NO_MEMORY;
1126 			goto err_arg;
1127 		}
1128 
1129 		/* Perform the conversion from v1.0 to v1.1. */
1130 		v1_1_obj->desc_size = (uint32_t)v1_1_desc_size;
1131 		v1_1_obj->desc_filled = (uint32_t)v1_1_desc_size;
1132 		if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
1133 			ERROR("%s: Could not convert mtd!\n", __func__);
1134 			spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
1135 			goto err_arg;
1136 		}
1137 
1138 		/*
1139 		 * We're finished with the v1.0 descriptor so free it
1140 		 * and continue our checks with the new v1.1 descriptor.
1141 		 */
1142 		mem_handle = obj->desc.handle;
1143 		spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1144 		obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1145 		if (obj == NULL) {
1146 			ERROR("%s: Failed to find converted descriptor.\n",
1147 			     __func__);
1148 			ret = FFA_ERROR_INVALID_PARAMETER;
1149 			return spmc_ffa_error_return(smc_handle, ret);
1150 		}
1151 	}
1152 
1153 	/* Allow for platform specific operations to be performed. */
1154 	ret = plat_spmc_shmem_begin(&obj->desc);
1155 	if (ret != 0) {
1156 		goto err_arg;
1157 	}
1158 
1159 	SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
1160 		 0, 0, 0);
1161 
1162 err_bad_desc:
1163 err_arg:
1164 	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1165 	return spmc_ffa_error_return(smc_handle, ret);
1166 }
1167 
1168 /**
1169  * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
1170  * @client:             Client state.
1171  * @total_length:       Total length of shared memory descriptor.
1172  * @fragment_length:    Length of fragment of shared memory descriptor passed in
1173  *                      this call.
1174  * @address:            Not supported, must be 0.
1175  * @page_count:         Not supported, must be 0.
1176  * @smc_handle:         Handle passed to smc call. Used to return
1177  *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1178  *
1179  * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
1180  * to share or lend memory from non-secure os to secure os (with no stream
1181  * endpoints).
1182  *
1183  * Return: 0 on success, error code on failure.
1184  */
spmc_ffa_mem_send(uint32_t smc_fid,bool secure_origin,uint64_t total_length,uint32_t fragment_length,uint64_t address,uint32_t page_count,void * cookie,void * handle,uint64_t flags)1185 long spmc_ffa_mem_send(uint32_t smc_fid,
1186 			bool secure_origin,
1187 			uint64_t total_length,
1188 			uint32_t fragment_length,
1189 			uint64_t address,
1190 			uint32_t page_count,
1191 			void *cookie,
1192 			void *handle,
1193 			uint64_t flags)
1194 
1195 {
1196 	long ret;
1197 	struct spmc_shmem_obj *obj;
1198 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1199 	ffa_mtd_flag32_t mtd_flag;
1200 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1201 	size_t min_desc_size;
1202 
1203 	if (address != 0U || page_count != 0U) {
1204 		WARN("%s: custom memory region for message not supported.\n",
1205 		     __func__);
1206 		return spmc_ffa_error_return(handle,
1207 					     FFA_ERROR_INVALID_PARAMETER);
1208 	}
1209 
1210 	if (secure_origin) {
1211 		WARN("%s: unsupported share direction.\n", __func__);
1212 		return spmc_ffa_error_return(handle,
1213 					     FFA_ERROR_INVALID_PARAMETER);
1214 	}
1215 
1216 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1217 		min_desc_size = sizeof(struct ffa_mtd_v1_0);
1218 	} else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
1219 		min_desc_size = sizeof(struct ffa_mtd);
1220 	} else {
1221 		WARN("%s: bad FF-A version.\n", __func__);
1222 		return spmc_ffa_error_return(handle,
1223 					     FFA_ERROR_INVALID_PARAMETER);
1224 	}
1225 
1226 	/* Check if the descriptor is too small for the FF-A version. */
1227 	if (fragment_length < min_desc_size) {
1228 		WARN("%s: bad first fragment size %u < %zu\n",
1229 		     __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
1230 		return spmc_ffa_error_return(handle,
1231 					     FFA_ERROR_INVALID_PARAMETER);
1232 	}
1233 
1234 	if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
1235 		mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
1236 	} else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
1237 		mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
1238 	} else {
1239 		WARN("%s: invalid memory management operation.\n", __func__);
1240 		return spmc_ffa_error_return(handle,
1241 					     FFA_ERROR_INVALID_PARAMETER);
1242 	}
1243 
1244 	spin_lock(&spmc_shmem_obj_state.lock);
1245 	obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
1246 	if (obj == NULL) {
1247 		ret = FFA_ERROR_NO_MEMORY;
1248 		goto err_unlock;
1249 	}
1250 
1251 	spin_lock(&mbox->lock);
1252 	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
1253 				 ffa_version, handle);
1254 	spin_unlock(&mbox->lock);
1255 
1256 	spin_unlock(&spmc_shmem_obj_state.lock);
1257 	return ret;
1258 
1259 err_unlock:
1260 	spin_unlock(&spmc_shmem_obj_state.lock);
1261 	return spmc_ffa_error_return(handle, ret);
1262 }
1263 
1264 /**
1265  * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
1266  * @client:             Client state.
1267  * @handle_low:         Handle_low value returned from FFA_MEM_FRAG_RX.
1268  * @handle_high:        Handle_high value returned from FFA_MEM_FRAG_RX.
1269  * @fragment_length:    Length of fragments transmitted.
1270  * @sender_id:          Vmid of sender in bits [31:16]
1271  * @smc_handle:         Handle passed to smc call. Used to return
1272  *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1273  *
1274  * Return: @smc_handle on success, error code on failure.
1275  */
spmc_ffa_mem_frag_tx(uint32_t smc_fid,bool secure_origin,uint64_t handle_low,uint64_t handle_high,uint32_t fragment_length,uint32_t sender_id,void * cookie,void * handle,uint64_t flags)1276 long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
1277 			  bool secure_origin,
1278 			  uint64_t handle_low,
1279 			  uint64_t handle_high,
1280 			  uint32_t fragment_length,
1281 			  uint32_t sender_id,
1282 			  void *cookie,
1283 			  void *handle,
1284 			  uint64_t flags)
1285 {
1286 	long ret;
1287 	uint32_t desc_sender_id;
1288 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1289 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1290 
1291 	struct spmc_shmem_obj *obj;
1292 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1293 
1294 	spin_lock(&spmc_shmem_obj_state.lock);
1295 
1296 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1297 	if (obj == NULL) {
1298 		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1299 		     __func__, mem_handle);
1300 		ret = FFA_ERROR_INVALID_PARAMETER;
1301 		goto err_unlock;
1302 	}
1303 
1304 	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1305 	if (sender_id != desc_sender_id) {
1306 		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1307 		     sender_id, desc_sender_id);
1308 		ret = FFA_ERROR_INVALID_PARAMETER;
1309 		goto err_unlock;
1310 	}
1311 
1312 	if (obj->desc_filled == obj->desc_size) {
1313 		WARN("%s: object desc already filled, %zu\n", __func__,
1314 		     obj->desc_filled);
1315 		ret = FFA_ERROR_INVALID_PARAMETER;
1316 		goto err_unlock;
1317 	}
1318 
1319 	spin_lock(&mbox->lock);
1320 	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
1321 				 handle);
1322 	spin_unlock(&mbox->lock);
1323 
1324 	spin_unlock(&spmc_shmem_obj_state.lock);
1325 	return ret;
1326 
1327 err_unlock:
1328 	spin_unlock(&spmc_shmem_obj_state.lock);
1329 	return spmc_ffa_error_return(handle, ret);
1330 }
1331 
1332 /**
1333  * spmc_ffa_mem_retrieve_clear_ns_bit - Clear the NS bit in the response descriptor
1334  *					if the caller implements a version smaller
1335  *					than FF-A 1.1 and if they have not requested
1336  *					the functionality.
1337  *					TODO: We are assuming that the caller is
1338  *					an SP. To support retrieval from the
1339  *					normal world this function will need to be
1340  *					expanded accordingly.
1341  * @resp:       Descriptor populated in callers RX buffer.
1342  * @sp_ctx:     Context of the calling SP.
1343  */
spmc_ffa_mem_retrieve_clear_ns_bit(struct ffa_mtd * resp,struct secure_partition_desc * sp_ctx)1344 void spmc_ffa_mem_retrieve_clear_ns_bit(struct ffa_mtd *resp,
1345 			 struct secure_partition_desc *sp_ctx)
1346 {
1347 	if (sp_ctx->ffa_version < MAKE_FFA_VERSION(1, 1) &&
1348 	    !sp_ctx->ns_bit_requested) {
1349 		resp->memory_region_attributes &= ~FFA_MEM_ATTR_NS_BIT;
1350 	}
1351 }
1352 
1353 /**
1354  * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
1355  * @smc_fid:            FID of SMC
1356  * @total_length:       Total length of retrieve request descriptor if this is
1357  *                      the first call. Otherwise (unsupported) must be 0.
1358  * @fragment_length:    Length of fragment of retrieve request descriptor passed
1359  *                      in this call. Only @fragment_length == @length is
1360  *                      supported by this implementation.
1361  * @address:            Not supported, must be 0.
1362  * @page_count:         Not supported, must be 0.
1363  * @smc_handle:         Handle passed to smc call. Used to return
1364  *                      FFA_MEM_RETRIEVE_RESP.
1365  *
1366  * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
1367  * Used by secure os to retrieve memory already shared by non-secure os,
1368  * or by the hypervisor to retrieve the memory region for a specific handle.
1369  * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
1370  * the client must call FFA_MEM_FRAG_RX until the full response has been
1371  * received.
1372  *
1373  * Return: @handle on success, error code on failure.
1374  */
1375 long
spmc_ffa_mem_retrieve_req(uint32_t smc_fid,bool secure_origin,uint32_t total_length,uint32_t fragment_length,uint64_t address,uint32_t page_count,void * cookie,void * handle,uint64_t flags)1376 spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
1377 			  bool secure_origin,
1378 			  uint32_t total_length,
1379 			  uint32_t fragment_length,
1380 			  uint64_t address,
1381 			  uint32_t page_count,
1382 			  void *cookie,
1383 			  void *handle,
1384 			  uint64_t flags)
1385 {
1386 	int ret;
1387 	size_t buf_size;
1388 	size_t copy_size = 0;
1389 	size_t min_desc_size;
1390 	size_t out_desc_size = 0;
1391 
1392 	/*
1393 	 * Currently we are only accessing fields that are the same in both the
1394 	 * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
1395 	 * here. We only need validate against the appropriate struct size.
1396 	 */
1397 	struct ffa_mtd *resp;
1398 	const struct ffa_mtd *req;
1399 	struct spmc_shmem_obj *obj = NULL;
1400 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1401 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1402 	struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
1403 
1404 	if (address != 0U || page_count != 0U) {
1405 		WARN("%s: custom memory region not supported.\n", __func__);
1406 		return spmc_ffa_error_return(handle,
1407 					     FFA_ERROR_INVALID_PARAMETER);
1408 	}
1409 
1410 	spin_lock(&mbox->lock);
1411 
1412 	req = mbox->tx_buffer;
1413 	resp = mbox->rx_buffer;
1414 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1415 
1416 	if (mbox->rxtx_page_count == 0U) {
1417 		WARN("%s: buffer pair not registered.\n", __func__);
1418 		ret = FFA_ERROR_INVALID_PARAMETER;
1419 		goto err_unlock_mailbox;
1420 	}
1421 
1422 	if (mbox->state != MAILBOX_STATE_EMPTY) {
1423 		WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
1424 		ret = FFA_ERROR_DENIED;
1425 		goto err_unlock_mailbox;
1426 	}
1427 
1428 	if (fragment_length != total_length) {
1429 		WARN("%s: fragmented retrieve request not supported.\n",
1430 		     __func__);
1431 		ret = FFA_ERROR_INVALID_PARAMETER;
1432 		goto err_unlock_mailbox;
1433 	}
1434 
1435 	/* req->emad_count is not set for retrieve by hypervisor */
1436 	if (secure_origin && req->emad_count == 0U) {
1437 		WARN("%s: unsupported attribute desc count %u.\n",
1438 		     __func__, obj->desc.emad_count);
1439 		ret = FFA_ERROR_INVALID_PARAMETER;
1440 		goto err_unlock_mailbox;
1441 	}
1442 
1443 	/* Determine the appropriate minimum descriptor size. */
1444 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1445 		min_desc_size = sizeof(struct ffa_mtd_v1_0);
1446 	} else {
1447 		min_desc_size = sizeof(struct ffa_mtd);
1448 	}
1449 	if (total_length < min_desc_size) {
1450 		WARN("%s: invalid length %u < %zu\n", __func__, total_length,
1451 		     min_desc_size);
1452 		ret = FFA_ERROR_INVALID_PARAMETER;
1453 		goto err_unlock_mailbox;
1454 	}
1455 
1456 	spin_lock(&spmc_shmem_obj_state.lock);
1457 
1458 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1459 	if (obj == NULL) {
1460 		ret = FFA_ERROR_INVALID_PARAMETER;
1461 		goto err_unlock_all;
1462 	}
1463 
1464 	if (obj->desc_filled != obj->desc_size) {
1465 		WARN("%s: incomplete object desc filled %zu < size %zu\n",
1466 		     __func__, obj->desc_filled, obj->desc_size);
1467 		ret = FFA_ERROR_INVALID_PARAMETER;
1468 		goto err_unlock_all;
1469 	}
1470 
1471 	if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
1472 		WARN("%s: wrong sender id 0x%x != 0x%x\n",
1473 		     __func__, req->sender_id, obj->desc.sender_id);
1474 		ret = FFA_ERROR_INVALID_PARAMETER;
1475 		goto err_unlock_all;
1476 	}
1477 
1478 	if (req->emad_count != 0U && req->tag != obj->desc.tag) {
1479 		WARN("%s: wrong tag 0x%lx != 0x%lx\n",
1480 		     __func__, req->tag, obj->desc.tag);
1481 		ret = FFA_ERROR_INVALID_PARAMETER;
1482 		goto err_unlock_all;
1483 	}
1484 
1485 	if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
1486 		WARN("%s: mistmatch of endpoint counts %u != %u\n",
1487 		     __func__, req->emad_count, obj->desc.emad_count);
1488 		ret = FFA_ERROR_INVALID_PARAMETER;
1489 		goto err_unlock_all;
1490 	}
1491 
1492 	/*
1493 	 * TODO: add support for descriptors with more than one EMAD
1494 	 * If we get a retrieve from the hypervisor, we currently just
1495 	 * copy the existing descriptor below as is. The spec requires
1496 	 * that the returned descriptor only contain one EMAD, so for
1497 	 * now we enforce that here.
1498 	 */
1499 	if (req->emad_count == 0U && obj->desc.emad_count != 1U) {
1500 		WARN("%s: unsupported endpoint count %u != 1\n", __func__,
1501 		     obj->desc.emad_count);
1502 		ret = FFA_ERROR_INVALID_PARAMETER;
1503 		goto err_unlock_all;
1504 	}
1505 
1506 	/* Ensure the NS bit is set to 0 in the request. */
1507 	if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1508 		WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1509 		ret = FFA_ERROR_INVALID_PARAMETER;
1510 		goto err_unlock_all;
1511 	}
1512 
1513 	if (req->flags != 0U) {
1514 		if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
1515 		    (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
1516 			/*
1517 			 * If the retrieve request specifies the memory
1518 			 * transaction ensure it matches what we expect.
1519 			 */
1520 			WARN("%s: wrong mem transaction flags %x != %x\n",
1521 			__func__, req->flags, obj->desc.flags);
1522 			ret = FFA_ERROR_INVALID_PARAMETER;
1523 			goto err_unlock_all;
1524 		}
1525 
1526 		if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
1527 		    req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
1528 			/*
1529 			 * Current implementation does not support donate and
1530 			 * it supports no other flags.
1531 			 */
1532 			WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
1533 			ret = FFA_ERROR_INVALID_PARAMETER;
1534 			goto err_unlock_all;
1535 		}
1536 	}
1537 
1538 	/* Validate the caller is a valid participant. */
1539 	if (req->emad_count != 0U &&
1540 	    !spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
1541 		WARN("%s: Invalid endpoint ID (0x%x).\n",
1542 			__func__, sp_ctx->sp_id);
1543 		ret = FFA_ERROR_INVALID_PARAMETER;
1544 		goto err_unlock_all;
1545 	}
1546 
1547 	/* Validate that the provided emad offset and structure is valid.*/
1548 	for (size_t i = 0; i < req->emad_count; i++) {
1549 		size_t emad_size;
1550 		struct ffa_emad_v1_0 *emad;
1551 
1552 		emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1553 					       &emad_size);
1554 
1555 		if ((uintptr_t) emad >= (uintptr_t)
1556 					((uint8_t *) req + total_length)) {
1557 			WARN("Invalid emad access.\n");
1558 			ret = FFA_ERROR_INVALID_PARAMETER;
1559 			goto err_unlock_all;
1560 		}
1561 	}
1562 
1563 	/*
1564 	 * Validate all the endpoints match in the case of multiple
1565 	 * borrowers. We don't mandate that the order of the borrowers
1566 	 * must match in the descriptors therefore check to see if the
1567 	 * endpoints match in any order.
1568 	 */
1569 	for (size_t i = 0; i < req->emad_count; i++) {
1570 		bool found = false;
1571 		size_t emad_size;
1572 		struct ffa_emad_v1_0 *emad;
1573 		struct ffa_emad_v1_0 *other_emad;
1574 
1575 		emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1576 					       &emad_size);
1577 
1578 		for (size_t j = 0; j < obj->desc.emad_count; j++) {
1579 			other_emad = spmc_shmem_obj_get_emad(
1580 					&obj->desc, j, MAKE_FFA_VERSION(1, 1),
1581 					&emad_size);
1582 
1583 			if (req->emad_count &&
1584 			    emad->mapd.endpoint_id ==
1585 			    other_emad->mapd.endpoint_id) {
1586 				found = true;
1587 				break;
1588 			}
1589 		}
1590 
1591 		if (!found) {
1592 			WARN("%s: invalid receiver id (0x%x).\n",
1593 			     __func__, emad->mapd.endpoint_id);
1594 			ret = FFA_ERROR_INVALID_PARAMETER;
1595 			goto err_unlock_all;
1596 		}
1597 	}
1598 
1599 	mbox->state = MAILBOX_STATE_FULL;
1600 
1601 	if (req->emad_count != 0U) {
1602 		obj->in_use++;
1603 	}
1604 
1605 	/*
1606 	 * If the caller is v1.0 convert the descriptor, otherwise copy
1607 	 * directly.
1608 	 */
1609 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1610 		ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
1611 							&copy_size,
1612 							&out_desc_size);
1613 		if (ret != 0U) {
1614 			ERROR("%s: Failed to process descriptor.\n", __func__);
1615 			goto err_unlock_all;
1616 		}
1617 	} else {
1618 		copy_size = MIN(obj->desc_size, buf_size);
1619 		out_desc_size = obj->desc_size;
1620 
1621 		memcpy(resp, &obj->desc, copy_size);
1622 	}
1623 
1624 	if (req->emad_count == 0U) {
1625 		size_t emad_size;
1626 		struct ffa_emad_v1_0 *emad;
1627 
1628 		assert(resp->handle == req->handle);
1629 		assert(resp->emad_count == 1U);
1630 
1631 		emad = spmc_shmem_obj_get_emad(resp, 0, ffa_version,
1632 					       &emad_size);
1633 		/*
1634 		 * The Sender endpoint ID field must be set to the Lender
1635 		 * or Owner VM ID in the transaction descriptor.
1636 		 */
1637 		emad->mapd.endpoint_id = obj->desc.sender_id;
1638 	}
1639 
1640 	/* Clear the NS bit in the response if applicable. */
1641 	if (secure_origin) {
1642 		spmc_ffa_mem_retrieve_clear_ns_bit(resp, sp_ctx);
1643 	} else {
1644 		/*
1645 		 * The NS bit is set by the SPMC in the corresponding invocation
1646 		 * of the FFA_MEM_RETRIEVE_RESP ABI at the Non-secure physical
1647 		 * FF-A instance as follows.
1648 		 */
1649 		if (ffa_version > MAKE_FFA_VERSION(1, 0)) {
1650 			/*
1651 			 * The bit is set to b’1 if the version of the Framework
1652 			 * implemented by the Hypervisor is greater than v1.0
1653 			 */
1654 			resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
1655 		} else {
1656 			/*
1657 			 * The bit is set to b’0 if the version of the Framework
1658 			 * implemented by the Hypervisor is v1.0
1659 			 */
1660 			resp->memory_region_attributes &= ~FFA_MEM_ATTR_NS_BIT;
1661 		}
1662 	}
1663 
1664 	spin_unlock(&spmc_shmem_obj_state.lock);
1665 	spin_unlock(&mbox->lock);
1666 
1667 	SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
1668 		 copy_size, 0, 0, 0, 0, 0);
1669 
1670 err_unlock_all:
1671 	spin_unlock(&spmc_shmem_obj_state.lock);
1672 err_unlock_mailbox:
1673 	spin_unlock(&mbox->lock);
1674 	return spmc_ffa_error_return(handle, ret);
1675 }
1676 
1677 /**
1678  * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
1679  * @client:             Client state.
1680  * @handle_low:         Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
1681  * @handle_high:        Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
1682  * @fragment_offset:    Byte offset in descriptor to resume at.
1683  * @sender_id:          Bit[31:16]: Endpoint id of sender if client is a
1684  *                      hypervisor. 0 otherwise.
1685  * @smc_handle:         Handle passed to smc call. Used to return
1686  *                      FFA_MEM_FRAG_TX.
1687  *
1688  * Return: @smc_handle on success, error code on failure.
1689  */
spmc_ffa_mem_frag_rx(uint32_t smc_fid,bool secure_origin,uint32_t handle_low,uint32_t handle_high,uint32_t fragment_offset,uint32_t sender_id,void * cookie,void * handle,uint64_t flags)1690 long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
1691 			  bool secure_origin,
1692 			  uint32_t handle_low,
1693 			  uint32_t handle_high,
1694 			  uint32_t fragment_offset,
1695 			  uint32_t sender_id,
1696 			  void *cookie,
1697 			  void *handle,
1698 			  uint64_t flags)
1699 {
1700 	int ret;
1701 	void *src;
1702 	size_t buf_size;
1703 	size_t copy_size;
1704 	size_t full_copy_size;
1705 	uint32_t desc_sender_id;
1706 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1707 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1708 	struct spmc_shmem_obj *obj;
1709 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1710 
1711 	spin_lock(&spmc_shmem_obj_state.lock);
1712 
1713 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1714 	if (obj == NULL) {
1715 		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1716 		     __func__, mem_handle);
1717 		ret = FFA_ERROR_INVALID_PARAMETER;
1718 		goto err_unlock_shmem;
1719 	}
1720 
1721 	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1722 	if (sender_id != 0U && sender_id != desc_sender_id) {
1723 		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1724 		     sender_id, desc_sender_id);
1725 		ret = FFA_ERROR_INVALID_PARAMETER;
1726 		goto err_unlock_shmem;
1727 	}
1728 
1729 	if (fragment_offset >= obj->desc_size) {
1730 		WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
1731 		     __func__, fragment_offset, obj->desc_size);
1732 		ret = FFA_ERROR_INVALID_PARAMETER;
1733 		goto err_unlock_shmem;
1734 	}
1735 
1736 	spin_lock(&mbox->lock);
1737 
1738 	if (mbox->rxtx_page_count == 0U) {
1739 		WARN("%s: buffer pair not registered.\n", __func__);
1740 		ret = FFA_ERROR_INVALID_PARAMETER;
1741 		goto err_unlock_all;
1742 	}
1743 
1744 	if (mbox->state != MAILBOX_STATE_EMPTY) {
1745 		WARN("%s: RX Buffer is full!\n", __func__);
1746 		ret = FFA_ERROR_DENIED;
1747 		goto err_unlock_all;
1748 	}
1749 
1750 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1751 
1752 	mbox->state = MAILBOX_STATE_FULL;
1753 
1754 	/*
1755 	 * If the caller is v1.0 convert the descriptor, otherwise copy
1756 	 * directly.
1757 	 */
1758 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1759 		size_t out_desc_size;
1760 
1761 		ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
1762 							buf_size,
1763 							fragment_offset,
1764 							&copy_size,
1765 							&out_desc_size);
1766 		if (ret != 0U) {
1767 			ERROR("%s: Failed to process descriptor.\n", __func__);
1768 			goto err_unlock_all;
1769 		}
1770 	} else {
1771 		full_copy_size = obj->desc_size - fragment_offset;
1772 		copy_size = MIN(full_copy_size, buf_size);
1773 
1774 		src = &obj->desc;
1775 
1776 		memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
1777 	}
1778 
1779 	spin_unlock(&mbox->lock);
1780 	spin_unlock(&spmc_shmem_obj_state.lock);
1781 
1782 	SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
1783 		 copy_size, sender_id, 0, 0, 0);
1784 
1785 err_unlock_all:
1786 	spin_unlock(&mbox->lock);
1787 err_unlock_shmem:
1788 	spin_unlock(&spmc_shmem_obj_state.lock);
1789 	return spmc_ffa_error_return(handle, ret);
1790 }
1791 
1792 /**
1793  * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
1794  * @client:             Client state.
1795  *
1796  * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
1797  * Used by secure os release previously shared memory to non-secure os.
1798  *
1799  * The handle to release must be in the client's (secure os's) transmit buffer.
1800  *
1801  * Return: 0 on success, error code on failure.
1802  */
spmc_ffa_mem_relinquish(uint32_t smc_fid,bool secure_origin,uint32_t handle_low,uint32_t handle_high,uint32_t fragment_offset,uint32_t sender_id,void * cookie,void * handle,uint64_t flags)1803 int spmc_ffa_mem_relinquish(uint32_t smc_fid,
1804 			    bool secure_origin,
1805 			    uint32_t handle_low,
1806 			    uint32_t handle_high,
1807 			    uint32_t fragment_offset,
1808 			    uint32_t sender_id,
1809 			    void *cookie,
1810 			    void *handle,
1811 			    uint64_t flags)
1812 {
1813 	int ret;
1814 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1815 	struct spmc_shmem_obj *obj;
1816 	const struct ffa_mem_relinquish_descriptor *req;
1817 	struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
1818 
1819 	if (!secure_origin) {
1820 		WARN("%s: unsupported relinquish direction.\n", __func__);
1821 		return spmc_ffa_error_return(handle,
1822 					     FFA_ERROR_INVALID_PARAMETER);
1823 	}
1824 
1825 	spin_lock(&mbox->lock);
1826 
1827 	if (mbox->rxtx_page_count == 0U) {
1828 		WARN("%s: buffer pair not registered.\n", __func__);
1829 		ret = FFA_ERROR_INVALID_PARAMETER;
1830 		goto err_unlock_mailbox;
1831 	}
1832 
1833 	req = mbox->tx_buffer;
1834 
1835 	if (req->flags != 0U) {
1836 		WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
1837 		ret = FFA_ERROR_INVALID_PARAMETER;
1838 		goto err_unlock_mailbox;
1839 	}
1840 
1841 	if (req->endpoint_count == 0) {
1842 		WARN("%s: endpoint count cannot be 0.\n", __func__);
1843 		ret = FFA_ERROR_INVALID_PARAMETER;
1844 		goto err_unlock_mailbox;
1845 	}
1846 
1847 	spin_lock(&spmc_shmem_obj_state.lock);
1848 
1849 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1850 	if (obj == NULL) {
1851 		ret = FFA_ERROR_INVALID_PARAMETER;
1852 		goto err_unlock_all;
1853 	}
1854 
1855 	/*
1856 	 * Validate the endpoint ID was populated correctly. We don't currently
1857 	 * support proxy endpoints so the endpoint count should always be 1.
1858 	 */
1859 	if (req->endpoint_count != 1U) {
1860 		WARN("%s: unsupported endpoint count %u != 1\n", __func__,
1861 		     req->endpoint_count);
1862 		ret = FFA_ERROR_INVALID_PARAMETER;
1863 		goto err_unlock_all;
1864 	}
1865 
1866 	/* Validate provided endpoint ID matches the partition ID. */
1867 	if (req->endpoint_array[0] != sp_ctx->sp_id) {
1868 		WARN("%s: invalid endpoint ID %u != %u\n", __func__,
1869 		     req->endpoint_array[0], sp_ctx->sp_id);
1870 		ret = FFA_ERROR_INVALID_PARAMETER;
1871 		goto err_unlock_all;
1872 	}
1873 
1874 	/* Validate the caller is a valid participant. */
1875 	if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
1876 		WARN("%s: Invalid endpoint ID (0x%x).\n",
1877 			__func__, req->endpoint_array[0]);
1878 		ret = FFA_ERROR_INVALID_PARAMETER;
1879 		goto err_unlock_all;
1880 	}
1881 
1882 	if (obj->in_use == 0U) {
1883 		ret = FFA_ERROR_INVALID_PARAMETER;
1884 		goto err_unlock_all;
1885 	}
1886 	obj->in_use--;
1887 
1888 	spin_unlock(&spmc_shmem_obj_state.lock);
1889 	spin_unlock(&mbox->lock);
1890 
1891 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1892 
1893 err_unlock_all:
1894 	spin_unlock(&spmc_shmem_obj_state.lock);
1895 err_unlock_mailbox:
1896 	spin_unlock(&mbox->lock);
1897 	return spmc_ffa_error_return(handle, ret);
1898 }
1899 
1900 /**
1901  * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
1902  * @client:         Client state.
1903  * @handle_low:     Unique handle of shared memory object to reclaim. Bit[31:0].
1904  * @handle_high:    Unique handle of shared memory object to reclaim.
1905  *                  Bit[63:32].
1906  * @flags:          Unsupported, ignored.
1907  *
1908  * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
1909  * Used by non-secure os reclaim memory previously shared with secure os.
1910  *
1911  * Return: 0 on success, error code on failure.
1912  */
spmc_ffa_mem_reclaim(uint32_t smc_fid,bool secure_origin,uint32_t handle_low,uint32_t handle_high,uint32_t mem_flags,uint64_t x4,void * cookie,void * handle,uint64_t flags)1913 int spmc_ffa_mem_reclaim(uint32_t smc_fid,
1914 			 bool secure_origin,
1915 			 uint32_t handle_low,
1916 			 uint32_t handle_high,
1917 			 uint32_t mem_flags,
1918 			 uint64_t x4,
1919 			 void *cookie,
1920 			 void *handle,
1921 			 uint64_t flags)
1922 {
1923 	int ret;
1924 	struct spmc_shmem_obj *obj;
1925 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1926 
1927 	if (secure_origin) {
1928 		WARN("%s: unsupported reclaim direction.\n", __func__);
1929 		return spmc_ffa_error_return(handle,
1930 					     FFA_ERROR_INVALID_PARAMETER);
1931 	}
1932 
1933 	if (mem_flags != 0U) {
1934 		WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
1935 		return spmc_ffa_error_return(handle,
1936 					     FFA_ERROR_INVALID_PARAMETER);
1937 	}
1938 
1939 	spin_lock(&spmc_shmem_obj_state.lock);
1940 
1941 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1942 	if (obj == NULL) {
1943 		ret = FFA_ERROR_INVALID_PARAMETER;
1944 		goto err_unlock;
1945 	}
1946 	if (obj->in_use != 0U) {
1947 		ret = FFA_ERROR_DENIED;
1948 		goto err_unlock;
1949 	}
1950 
1951 	if (obj->desc_filled != obj->desc_size) {
1952 		WARN("%s: incomplete object desc filled %zu < size %zu\n",
1953 		     __func__, obj->desc_filled, obj->desc_size);
1954 		ret = FFA_ERROR_INVALID_PARAMETER;
1955 		goto err_unlock;
1956 	}
1957 
1958 	/* Allow for platform specific operations to be performed. */
1959 	ret = plat_spmc_shmem_reclaim(&obj->desc);
1960 	if (ret != 0) {
1961 		goto err_unlock;
1962 	}
1963 
1964 	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1965 	spin_unlock(&spmc_shmem_obj_state.lock);
1966 
1967 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1968 
1969 err_unlock:
1970 	spin_unlock(&spmc_shmem_obj_state.lock);
1971 	return spmc_ffa_error_return(handle, ret);
1972 }
1973