1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Broadcom BCM2835 V4L2 driver
4  *
5  * Copyright © 2013 Raspberry Pi (Trading) Ltd.
6  *
7  * Authors: Vincent Sanders @ Collabora
8  *          Dave Stevenson @ Broadcom
9  *		(now [email protected])
10  *          Simon Mellor @ Broadcom
11  *          Luke Diamand @ Broadcom
12  *
13  * V4L2 driver MMAL vchiq interface code
14  */
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/mm.h>
23 #include <linux/slab.h>
24 #include <linux/completion.h>
25 #include <linux/vmalloc.h>
26 #include <media/videobuf2-vmalloc.h>
27 
28 #include "../include/linux/raspberrypi/vchiq.h"
29 #include "../interface/vchiq_arm/vchiq_arm.h"
30 #include "mmal-common.h"
31 #include "mmal-vchiq.h"
32 #include "mmal-msg.h"
33 
34 /*
35  * maximum number of components supported.
36  * This matches the maximum permitted by default on the VPU
37  */
38 #define VCHIQ_MMAL_MAX_COMPONENTS 64
39 
40 /*
41  * Timeout for synchronous msg responses in seconds.
42  * Helpful to increase this if stopping in the VPU debugger.
43  */
44 #define SYNC_MSG_TIMEOUT       3
45 
46 /*#define FULL_MSG_DUMP 1*/
47 
48 #ifdef DEBUG
49 static const char *const msg_type_names[] = {
50 	"UNKNOWN",
51 	"QUIT",
52 	"SERVICE_CLOSED",
53 	"GET_VERSION",
54 	"COMPONENT_CREATE",
55 	"COMPONENT_DESTROY",
56 	"COMPONENT_ENABLE",
57 	"COMPONENT_DISABLE",
58 	"PORT_INFO_GET",
59 	"PORT_INFO_SET",
60 	"PORT_ACTION",
61 	"BUFFER_FROM_HOST",
62 	"BUFFER_TO_HOST",
63 	"GET_STATS",
64 	"PORT_PARAMETER_SET",
65 	"PORT_PARAMETER_GET",
66 	"EVENT_TO_HOST",
67 	"GET_CORE_STATS_FOR_PORT",
68 	"OPAQUE_ALLOCATOR",
69 	"CONSUME_MEM",
70 	"LMK",
71 	"OPAQUE_ALLOCATOR_DESC",
72 	"DRM_GET_LHS32",
73 	"DRM_GET_TIME",
74 	"BUFFER_FROM_HOST_ZEROLEN",
75 	"PORT_FLUSH",
76 	"HOST_LOG",
77 };
78 #endif
79 
80 static const char *const port_action_type_names[] = {
81 	"UNKNOWN",
82 	"ENABLE",
83 	"DISABLE",
84 	"FLUSH",
85 	"CONNECT",
86 	"DISCONNECT",
87 	"SET_REQUIREMENTS",
88 };
89 
90 #if defined(DEBUG)
91 #if defined(FULL_MSG_DUMP)
92 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)				\
93 	do {								\
94 		pr_debug(TITLE" type:%s(%d) length:%d\n",		\
95 			 msg_type_names[(MSG)->h.type],			\
96 			 (MSG)->h.type, (MSG_LEN));			\
97 		print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET,	\
98 			       16, 4, (MSG),				\
99 			       sizeof(struct mmal_msg_header), 1);	\
100 		print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET,	\
101 			       16, 4,					\
102 			       ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
103 			       (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
104 	} while (0)
105 #else
106 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)				\
107 	{								\
108 		pr_debug(TITLE" type:%s(%d) length:%d\n",		\
109 			 msg_type_names[(MSG)->h.type],			\
110 			 (MSG)->h.type, (MSG_LEN));			\
111 	}
112 #endif
113 #else
114 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
115 #endif
116 
117 struct vchiq_mmal_instance;
118 
119 /* normal message context */
120 struct mmal_msg_context {
121 	struct vchiq_mmal_instance *instance;
122 
123 	/* Index in the context_map idr so that we can find the
124 	 * mmal_msg_context again when servicing the VCHI reply.
125 	 */
126 	int handle;
127 
128 	union {
129 		struct {
130 			/* work struct for buffer_cb callback */
131 			struct work_struct work;
132 			/* work struct for deferred callback */
133 			struct work_struct buffer_to_host_work;
134 			/* mmal instance */
135 			struct vchiq_mmal_instance *instance;
136 			/* mmal port */
137 			struct vchiq_mmal_port *port;
138 			/* actual buffer used to store bulk reply */
139 			struct mmal_buffer *buffer;
140 			/* amount of buffer used */
141 			unsigned long buffer_used;
142 			/* MMAL buffer flags */
143 			u32 mmal_flags;
144 			/* Presentation and Decode timestamps */
145 			s64 pts;
146 			s64 dts;
147 
148 			int status;	/* context status */
149 
150 		} bulk;		/* bulk data */
151 
152 		struct {
153 			/* message handle to release */
154 			struct vchiq_header *msg_handle;
155 			/* pointer to received message */
156 			struct mmal_msg *msg;
157 			/* received message length */
158 			u32 msg_len;
159 			/* completion upon reply */
160 			struct completion cmplt;
161 		} sync;		/* synchronous response */
162 	} u;
163 
164 };
165 
166 struct vchiq_mmal_instance {
167 	unsigned int service_handle;
168 
169 	/* ensure serialised access to service */
170 	struct mutex vchiq_mutex;
171 
172 	struct idr context_map;
173 	/* protect accesses to context_map */
174 	struct mutex context_map_lock;
175 
176 	struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
177 
178 	/* ordered workqueue to process all bulk operations */
179 	struct workqueue_struct *bulk_wq;
180 
181 	/* handle for a vchiq instance */
182 	struct vchiq_instance *vchiq_instance;
183 };
184 
185 static struct mmal_msg_context *
get_msg_context(struct vchiq_mmal_instance * instance)186 get_msg_context(struct vchiq_mmal_instance *instance)
187 {
188 	struct mmal_msg_context *msg_context;
189 	int handle;
190 
191 	/* todo: should this be allocated from a pool to avoid kzalloc */
192 	msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
193 
194 	if (!msg_context)
195 		return ERR_PTR(-ENOMEM);
196 
197 	/* Create an ID that will be passed along with our message so
198 	 * that when we service the VCHI reply, we can look up what
199 	 * message is being replied to.
200 	 */
201 	mutex_lock(&instance->context_map_lock);
202 	handle = idr_alloc(&instance->context_map, msg_context,
203 			   0, 0, GFP_KERNEL);
204 	mutex_unlock(&instance->context_map_lock);
205 
206 	if (handle < 0) {
207 		kfree(msg_context);
208 		return ERR_PTR(handle);
209 	}
210 
211 	msg_context->instance = instance;
212 	msg_context->handle = handle;
213 
214 	return msg_context;
215 }
216 
217 static struct mmal_msg_context *
lookup_msg_context(struct vchiq_mmal_instance * instance,int handle)218 lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
219 {
220 	return idr_find(&instance->context_map, handle);
221 }
222 
223 static void
release_msg_context(struct mmal_msg_context * msg_context)224 release_msg_context(struct mmal_msg_context *msg_context)
225 {
226 	struct vchiq_mmal_instance *instance = msg_context->instance;
227 
228 	mutex_lock(&instance->context_map_lock);
229 	idr_remove(&instance->context_map, msg_context->handle);
230 	mutex_unlock(&instance->context_map_lock);
231 	kfree(msg_context);
232 }
233 
234 /* deals with receipt of event to host message */
event_to_host_cb(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,u32 msg_len)235 static void event_to_host_cb(struct vchiq_mmal_instance *instance,
236 			     struct mmal_msg *msg, u32 msg_len)
237 {
238 	pr_debug("unhandled event\n");
239 	pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n",
240 		 msg->u.event_to_host.client_component,
241 		 msg->u.event_to_host.port_type,
242 		 msg->u.event_to_host.port_num,
243 		 msg->u.event_to_host.cmd, msg->u.event_to_host.length);
244 }
245 
246 /* workqueue scheduled callback
247  *
248  * we do this because it is important we do not call any other vchiq
249  * sync calls from within the message delivery thread
250  */
buffer_work_cb(struct work_struct * work)251 static void buffer_work_cb(struct work_struct *work)
252 {
253 	struct mmal_msg_context *msg_context =
254 		container_of(work, struct mmal_msg_context, u.bulk.work);
255 	struct mmal_buffer *buffer = msg_context->u.bulk.buffer;
256 
257 	if (!buffer) {
258 		pr_err("%s: ctx: %p, No mmal buffer to pass details\n",
259 		       __func__, msg_context);
260 		return;
261 	}
262 
263 	buffer->length = msg_context->u.bulk.buffer_used;
264 	buffer->mmal_flags = msg_context->u.bulk.mmal_flags;
265 	buffer->dts = msg_context->u.bulk.dts;
266 	buffer->pts = msg_context->u.bulk.pts;
267 
268 	atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
269 
270 	msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
271 					    msg_context->u.bulk.port,
272 					    msg_context->u.bulk.status,
273 					    msg_context->u.bulk.buffer);
274 }
275 
276 /* workqueue scheduled callback to handle receiving buffers
277  *
278  * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
279  * If we block in the service_callback context then we can't process the
280  * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
281  * vchiq_bulk_receive() call to complete.
282  */
buffer_to_host_work_cb(struct work_struct * work)283 static void buffer_to_host_work_cb(struct work_struct *work)
284 {
285 	struct mmal_msg_context *msg_context =
286 		container_of(work, struct mmal_msg_context,
287 			     u.bulk.buffer_to_host_work);
288 	struct vchiq_mmal_instance *instance = msg_context->instance;
289 	unsigned long len = msg_context->u.bulk.buffer_used;
290 	int ret;
291 
292 	if (!len)
293 		/* Dummy receive to ensure the buffers remain in order */
294 		len = 8;
295 	/* queue the bulk submission */
296 	vchiq_use_service(instance->vchiq_instance, instance->service_handle);
297 	ret = vchiq_bulk_receive(instance->vchiq_instance, instance->service_handle,
298 				 msg_context->u.bulk.buffer->buffer,
299 				 /* Actual receive needs to be a multiple
300 				  * of 4 bytes
301 				  */
302 				(len + 3) & ~3,
303 				msg_context,
304 				VCHIQ_BULK_MODE_CALLBACK);
305 
306 	vchiq_release_service(instance->vchiq_instance, instance->service_handle);
307 
308 	if (ret != 0)
309 		pr_err("%s: ctx: %p, vchiq_bulk_receive failed %d\n",
310 		       __func__, msg_context, ret);
311 }
312 
313 /* enqueue a bulk receive for a given message context */
bulk_receive(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,struct mmal_msg_context * msg_context)314 static int bulk_receive(struct vchiq_mmal_instance *instance,
315 			struct mmal_msg *msg,
316 			struct mmal_msg_context *msg_context)
317 {
318 	unsigned long rd_len;
319 
320 	rd_len = msg->u.buffer_from_host.buffer_header.length;
321 
322 	if (!msg_context->u.bulk.buffer) {
323 		pr_err("bulk.buffer not configured - error in buffer_from_host\n");
324 
325 		/* todo: this is a serious error, we should never have
326 		 * committed a buffer_to_host operation to the mmal
327 		 * port without the buffer to back it up (underflow
328 		 * handling) and there is no obvious way to deal with
329 		 * this - how is the mmal servie going to react when
330 		 * we fail to do the xfer and reschedule a buffer when
331 		 * it arrives? perhaps a starved flag to indicate a
332 		 * waiting bulk receive?
333 		 */
334 
335 		return -EINVAL;
336 	}
337 
338 	/* ensure we do not overrun the available buffer */
339 	if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
340 		rd_len = msg_context->u.bulk.buffer->buffer_size;
341 		pr_warn("short read as not enough receive buffer space\n");
342 		/* todo: is this the correct response, what happens to
343 		 * the rest of the message data?
344 		 */
345 	}
346 
347 	/* store length */
348 	msg_context->u.bulk.buffer_used = rd_len;
349 	msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
350 	msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
351 
352 	queue_work(msg_context->instance->bulk_wq,
353 		   &msg_context->u.bulk.buffer_to_host_work);
354 
355 	return 0;
356 }
357 
358 /* data in message, memcpy from packet into output buffer */
inline_receive(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,struct mmal_msg_context * msg_context)359 static int inline_receive(struct vchiq_mmal_instance *instance,
360 			  struct mmal_msg *msg,
361 			  struct mmal_msg_context *msg_context)
362 {
363 	memcpy(msg_context->u.bulk.buffer->buffer,
364 	       msg->u.buffer_from_host.short_data,
365 	       msg->u.buffer_from_host.payload_in_message);
366 
367 	msg_context->u.bulk.buffer_used =
368 	    msg->u.buffer_from_host.payload_in_message;
369 
370 	return 0;
371 }
372 
373 /* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
374 static int
buffer_from_host(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,struct mmal_buffer * buf)375 buffer_from_host(struct vchiq_mmal_instance *instance,
376 		 struct vchiq_mmal_port *port, struct mmal_buffer *buf)
377 {
378 	struct mmal_msg_context *msg_context;
379 	struct mmal_msg m;
380 	int ret;
381 
382 	if (!port->enabled)
383 		return -EINVAL;
384 
385 	pr_debug("instance:%u buffer:%p\n", instance->service_handle, buf);
386 
387 	/* get context */
388 	if (!buf->msg_context) {
389 		pr_err("%s: msg_context not allocated, buf %p\n", __func__,
390 		       buf);
391 		return -EINVAL;
392 	}
393 	msg_context = buf->msg_context;
394 
395 	/* store bulk message context for when data arrives */
396 	msg_context->u.bulk.instance = instance;
397 	msg_context->u.bulk.port = port;
398 	msg_context->u.bulk.buffer = buf;
399 	msg_context->u.bulk.buffer_used = 0;
400 
401 	/* initialise work structure ready to schedule callback */
402 	INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
403 	INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
404 		  buffer_to_host_work_cb);
405 
406 	atomic_inc(&port->buffers_with_vpu);
407 
408 	/* prep the buffer from host message */
409 	memset(&m, 0xbc, sizeof(m));	/* just to make debug clearer */
410 
411 	m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
412 	m.h.magic = MMAL_MAGIC;
413 	m.h.context = msg_context->handle;
414 	m.h.status = 0;
415 
416 	/* drvbuf is our private data passed back */
417 	m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
418 	m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
419 	m.u.buffer_from_host.drvbuf.port_handle = port->handle;
420 	m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
421 
422 	/* buffer header */
423 	m.u.buffer_from_host.buffer_header.cmd = 0;
424 	m.u.buffer_from_host.buffer_header.data =
425 		(u32)(unsigned long)buf->buffer;
426 	m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
427 	m.u.buffer_from_host.buffer_header.length = 0;	/* nothing used yet */
428 	m.u.buffer_from_host.buffer_header.offset = 0;	/* no offset */
429 	m.u.buffer_from_host.buffer_header.flags = 0;	/* no flags */
430 	m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
431 	m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
432 
433 	/* clear buffer type specific data */
434 	memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
435 	       sizeof(m.u.buffer_from_host.buffer_header_type_specific));
436 
437 	/* no payload in message */
438 	m.u.buffer_from_host.payload_in_message = 0;
439 
440 	vchiq_use_service(instance->vchiq_instance, instance->service_handle);
441 
442 	ret = vchiq_queue_kernel_message(instance->vchiq_instance, instance->service_handle, &m,
443 					 sizeof(struct mmal_msg_header) +
444 					 sizeof(m.u.buffer_from_host));
445 	if (ret)
446 		atomic_dec(&port->buffers_with_vpu);
447 
448 	vchiq_release_service(instance->vchiq_instance, instance->service_handle);
449 
450 	return ret;
451 }
452 
453 /* deals with receipt of buffer to host message */
buffer_to_host_cb(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,u32 msg_len)454 static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
455 			      struct mmal_msg *msg, u32 msg_len)
456 {
457 	struct mmal_msg_context *msg_context;
458 	u32 handle;
459 
460 	pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
461 		 __func__, instance, msg, msg_len);
462 
463 	if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
464 		handle = msg->u.buffer_from_host.drvbuf.client_context;
465 		msg_context = lookup_msg_context(instance, handle);
466 
467 		if (!msg_context) {
468 			pr_err("drvbuf.client_context(%u) is invalid\n",
469 			       handle);
470 			return;
471 		}
472 	} else {
473 		pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
474 		return;
475 	}
476 
477 	msg_context->u.bulk.mmal_flags =
478 				msg->u.buffer_from_host.buffer_header.flags;
479 
480 	if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
481 		/* message reception had an error */
482 		pr_warn("error %d in reply\n", msg->h.status);
483 
484 		msg_context->u.bulk.status = msg->h.status;
485 
486 	} else if (msg->u.buffer_from_host.buffer_header.length == 0) {
487 		/* empty buffer */
488 		if (msg->u.buffer_from_host.buffer_header.flags &
489 		    MMAL_BUFFER_HEADER_FLAG_EOS) {
490 			msg_context->u.bulk.status =
491 			    bulk_receive(instance, msg, msg_context);
492 			if (msg_context->u.bulk.status == 0)
493 				return;	/* successful bulk submission, bulk
494 					 * completion will trigger callback
495 					 */
496 		} else {
497 			/* do callback with empty buffer - not EOS though */
498 			msg_context->u.bulk.status = 0;
499 			msg_context->u.bulk.buffer_used = 0;
500 		}
501 	} else if (msg->u.buffer_from_host.payload_in_message == 0) {
502 		/* data is not in message, queue a bulk receive */
503 		msg_context->u.bulk.status =
504 		    bulk_receive(instance, msg, msg_context);
505 		if (msg_context->u.bulk.status == 0)
506 			return;	/* successful bulk submission, bulk
507 				 * completion will trigger callback
508 				 */
509 
510 		/* failed to submit buffer, this will end badly */
511 		pr_err("error %d on bulk submission\n",
512 		       msg_context->u.bulk.status);
513 
514 	} else if (msg->u.buffer_from_host.payload_in_message <=
515 		   MMAL_VC_SHORT_DATA) {
516 		/* data payload within message */
517 		msg_context->u.bulk.status = inline_receive(instance, msg,
518 							    msg_context);
519 	} else {
520 		pr_err("message with invalid short payload\n");
521 
522 		/* signal error */
523 		msg_context->u.bulk.status = -EINVAL;
524 		msg_context->u.bulk.buffer_used =
525 		    msg->u.buffer_from_host.payload_in_message;
526 	}
527 
528 	/* schedule the port callback */
529 	schedule_work(&msg_context->u.bulk.work);
530 }
531 
bulk_receive_cb(struct vchiq_mmal_instance * instance,struct mmal_msg_context * msg_context)532 static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
533 			    struct mmal_msg_context *msg_context)
534 {
535 	msg_context->u.bulk.status = 0;
536 
537 	/* schedule the port callback */
538 	schedule_work(&msg_context->u.bulk.work);
539 }
540 
bulk_abort_cb(struct vchiq_mmal_instance * instance,struct mmal_msg_context * msg_context)541 static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
542 			  struct mmal_msg_context *msg_context)
543 {
544 	pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
545 
546 	msg_context->u.bulk.status = -EINTR;
547 
548 	schedule_work(&msg_context->u.bulk.work);
549 }
550 
551 /* incoming event service callback */
mmal_service_callback(struct vchiq_instance * vchiq_instance,enum vchiq_reason reason,struct vchiq_header * header,unsigned int handle,void * cb_data,void __user * cb_userdata)552 static int mmal_service_callback(struct vchiq_instance *vchiq_instance,
553 				 enum vchiq_reason reason, struct vchiq_header *header,
554 				 unsigned int handle, void *cb_data,
555 				 void __user *cb_userdata)
556 {
557 	struct vchiq_mmal_instance *instance = vchiq_get_service_userdata(vchiq_instance, handle);
558 	u32 msg_len;
559 	struct mmal_msg *msg;
560 	struct mmal_msg_context *msg_context;
561 
562 	if (!instance) {
563 		pr_err("Message callback passed NULL instance\n");
564 		return 0;
565 	}
566 
567 	switch (reason) {
568 	case VCHIQ_MESSAGE_AVAILABLE:
569 		msg = (void *)header->data;
570 		msg_len = header->size;
571 
572 		DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
573 
574 		/* handling is different for buffer messages */
575 		switch (msg->h.type) {
576 		case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
577 			vchiq_release_message(vchiq_instance, handle, header);
578 			break;
579 
580 		case MMAL_MSG_TYPE_EVENT_TO_HOST:
581 			event_to_host_cb(instance, msg, msg_len);
582 			vchiq_release_message(vchiq_instance, handle, header);
583 
584 			break;
585 
586 		case MMAL_MSG_TYPE_BUFFER_TO_HOST:
587 			buffer_to_host_cb(instance, msg, msg_len);
588 			vchiq_release_message(vchiq_instance, handle, header);
589 			break;
590 
591 		default:
592 			/* messages dependent on header context to complete */
593 			if (!msg->h.context) {
594 				pr_err("received message context was null!\n");
595 				vchiq_release_message(vchiq_instance, handle, header);
596 				break;
597 			}
598 
599 			msg_context = lookup_msg_context(instance,
600 							 msg->h.context);
601 			if (!msg_context) {
602 				pr_err("received invalid message context %u!\n",
603 				       msg->h.context);
604 				vchiq_release_message(vchiq_instance, handle, header);
605 				break;
606 			}
607 
608 			/* fill in context values */
609 			msg_context->u.sync.msg_handle = header;
610 			msg_context->u.sync.msg = msg;
611 			msg_context->u.sync.msg_len = msg_len;
612 
613 			/* todo: should this check (completion_done()
614 			 * == 1) for no one waiting? or do we need a
615 			 * flag to tell us the completion has been
616 			 * interrupted so we can free the message and
617 			 * its context. This probably also solves the
618 			 * message arriving after interruption todo
619 			 * below
620 			 */
621 
622 			/* complete message so caller knows it happened */
623 			complete(&msg_context->u.sync.cmplt);
624 			break;
625 		}
626 
627 		break;
628 
629 	case VCHIQ_BULK_RECEIVE_DONE:
630 		bulk_receive_cb(instance, cb_data);
631 		break;
632 
633 	case VCHIQ_BULK_RECEIVE_ABORTED:
634 		bulk_abort_cb(instance, cb_data);
635 		break;
636 
637 	case VCHIQ_SERVICE_CLOSED:
638 		/* TODO: consider if this requires action if received when
639 		 * driver is not explicitly closing the service
640 		 */
641 		break;
642 
643 	default:
644 		pr_err("Received unhandled message reason %d\n", reason);
645 		break;
646 	}
647 
648 	return 0;
649 }
650 
send_synchronous_mmal_msg(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,unsigned int payload_len,struct mmal_msg ** msg_out,struct vchiq_header ** msg_handle)651 static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
652 				     struct mmal_msg *msg,
653 				     unsigned int payload_len,
654 				     struct mmal_msg **msg_out,
655 				     struct vchiq_header **msg_handle)
656 {
657 	struct mmal_msg_context *msg_context;
658 	int ret;
659 	unsigned long time_left;
660 
661 	/* payload size must not cause message to exceed max size */
662 	if (payload_len >
663 	    (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
664 		pr_err("payload length %d exceeds max:%d\n", payload_len,
665 		       (int)(MMAL_MSG_MAX_SIZE -
666 			    sizeof(struct mmal_msg_header)));
667 		return -EINVAL;
668 	}
669 
670 	msg_context = get_msg_context(instance);
671 	if (IS_ERR(msg_context))
672 		return PTR_ERR(msg_context);
673 
674 	init_completion(&msg_context->u.sync.cmplt);
675 
676 	msg->h.magic = MMAL_MAGIC;
677 	msg->h.context = msg_context->handle;
678 	msg->h.status = 0;
679 
680 	DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
681 		     ">>> sync message");
682 
683 	vchiq_use_service(instance->vchiq_instance, instance->service_handle);
684 
685 	ret = vchiq_queue_kernel_message(instance->vchiq_instance, instance->service_handle, msg,
686 					 sizeof(struct mmal_msg_header) +
687 					 payload_len);
688 
689 	vchiq_release_service(instance->vchiq_instance, instance->service_handle);
690 
691 	if (ret) {
692 		pr_err("error %d queuing message\n", ret);
693 		release_msg_context(msg_context);
694 		return ret;
695 	}
696 
697 	time_left = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
698 						SYNC_MSG_TIMEOUT * HZ);
699 	if (time_left == 0) {
700 		pr_err("timed out waiting for sync completion\n");
701 		ret = -ETIME;
702 		/* todo: what happens if the message arrives after aborting */
703 		release_msg_context(msg_context);
704 		return ret;
705 	}
706 
707 	*msg_out = msg_context->u.sync.msg;
708 	*msg_handle = msg_context->u.sync.msg_handle;
709 	release_msg_context(msg_context);
710 
711 	return 0;
712 }
713 
dump_port_info(struct vchiq_mmal_port * port)714 static void dump_port_info(struct vchiq_mmal_port *port)
715 {
716 	pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
717 
718 	pr_debug("buffer minimum num:%d size:%d align:%d\n",
719 		 port->minimum_buffer.num,
720 		 port->minimum_buffer.size, port->minimum_buffer.alignment);
721 
722 	pr_debug("buffer recommended num:%d size:%d align:%d\n",
723 		 port->recommended_buffer.num,
724 		 port->recommended_buffer.size,
725 		 port->recommended_buffer.alignment);
726 
727 	pr_debug("buffer current values num:%d size:%d align:%d\n",
728 		 port->current_buffer.num,
729 		 port->current_buffer.size, port->current_buffer.alignment);
730 
731 	pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
732 		 port->format.type,
733 		 port->format.encoding, port->format.encoding_variant);
734 
735 	pr_debug("		    bitrate:%d flags:0x%x\n",
736 		 port->format.bitrate, port->format.flags);
737 
738 	if (port->format.type == MMAL_ES_TYPE_VIDEO) {
739 		pr_debug
740 		    ("es video format: width:%d height:%d colourspace:0x%x\n",
741 		     port->es.video.width, port->es.video.height,
742 		     port->es.video.color_space);
743 
744 		pr_debug("		 : crop xywh %d,%d,%d,%d\n",
745 			 port->es.video.crop.x,
746 			 port->es.video.crop.y,
747 			 port->es.video.crop.width, port->es.video.crop.height);
748 		pr_debug("		 : framerate %d/%d  aspect %d/%d\n",
749 			 port->es.video.frame_rate.numerator,
750 			 port->es.video.frame_rate.denominator,
751 			 port->es.video.par.numerator, port->es.video.par.denominator);
752 	}
753 }
754 
port_to_mmal_msg(struct vchiq_mmal_port * port,struct mmal_port * p)755 static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
756 {
757 	/* todo do readonly fields need setting at all? */
758 	p->type = port->type;
759 	p->index = port->index;
760 	p->index_all = 0;
761 	p->is_enabled = port->enabled;
762 	p->buffer_num_min = port->minimum_buffer.num;
763 	p->buffer_size_min = port->minimum_buffer.size;
764 	p->buffer_alignment_min = port->minimum_buffer.alignment;
765 	p->buffer_num_recommended = port->recommended_buffer.num;
766 	p->buffer_size_recommended = port->recommended_buffer.size;
767 
768 	/* only three writable fields in a port */
769 	p->buffer_num = port->current_buffer.num;
770 	p->buffer_size = port->current_buffer.size;
771 	p->userdata = (u32)(unsigned long)port;
772 }
773 
port_info_set(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)774 static int port_info_set(struct vchiq_mmal_instance *instance,
775 			 struct vchiq_mmal_port *port)
776 {
777 	int ret;
778 	struct mmal_msg m;
779 	struct mmal_msg *rmsg;
780 	struct vchiq_header *rmsg_handle;
781 
782 	pr_debug("setting port info port %p\n", port);
783 	if (!port)
784 		return -1;
785 	dump_port_info(port);
786 
787 	m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
788 
789 	m.u.port_info_set.component_handle = port->component->handle;
790 	m.u.port_info_set.port_type = port->type;
791 	m.u.port_info_set.port_index = port->index;
792 
793 	port_to_mmal_msg(port, &m.u.port_info_set.port);
794 
795 	/* elementary stream format setup */
796 	m.u.port_info_set.format.type = port->format.type;
797 	m.u.port_info_set.format.encoding = port->format.encoding;
798 	m.u.port_info_set.format.encoding_variant =
799 	    port->format.encoding_variant;
800 	m.u.port_info_set.format.bitrate = port->format.bitrate;
801 	m.u.port_info_set.format.flags = port->format.flags;
802 
803 	memcpy(&m.u.port_info_set.es, &port->es,
804 	       sizeof(union mmal_es_specific_format));
805 
806 	m.u.port_info_set.format.extradata_size = port->format.extradata_size;
807 	memcpy(&m.u.port_info_set.extradata, port->format.extradata,
808 	       port->format.extradata_size);
809 
810 	ret = send_synchronous_mmal_msg(instance, &m,
811 					sizeof(m.u.port_info_set),
812 					&rmsg, &rmsg_handle);
813 	if (ret)
814 		return ret;
815 
816 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
817 		/* got an unexpected message type in reply */
818 		ret = -EINVAL;
819 		goto release_msg;
820 	}
821 
822 	/* return operation status */
823 	ret = -rmsg->u.port_info_get_reply.status;
824 
825 	pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
826 		 port->component->handle, port->handle);
827 
828 release_msg:
829 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
830 
831 	return ret;
832 }
833 
834 /* use port info get message to retrieve port information */
port_info_get(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)835 static int port_info_get(struct vchiq_mmal_instance *instance,
836 			 struct vchiq_mmal_port *port)
837 {
838 	int ret;
839 	struct mmal_msg m;
840 	struct mmal_msg *rmsg;
841 	struct vchiq_header *rmsg_handle;
842 
843 	/* port info time */
844 	m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
845 	m.u.port_info_get.component_handle = port->component->handle;
846 	m.u.port_info_get.port_type = port->type;
847 	m.u.port_info_get.index = port->index;
848 
849 	ret = send_synchronous_mmal_msg(instance, &m,
850 					sizeof(m.u.port_info_get),
851 					&rmsg, &rmsg_handle);
852 	if (ret)
853 		return ret;
854 
855 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
856 		/* got an unexpected message type in reply */
857 		ret = -EINVAL;
858 		goto release_msg;
859 	}
860 
861 	/* return operation status */
862 	ret = -rmsg->u.port_info_get_reply.status;
863 	if (ret != MMAL_MSG_STATUS_SUCCESS)
864 		goto release_msg;
865 
866 	if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
867 		port->enabled = false;
868 	else
869 		port->enabled = true;
870 
871 	/* copy the values out of the message */
872 	port->handle = rmsg->u.port_info_get_reply.port_handle;
873 
874 	/* port type and index cached to use on port info set because
875 	 * it does not use a port handle
876 	 */
877 	port->type = rmsg->u.port_info_get_reply.port_type;
878 	port->index = rmsg->u.port_info_get_reply.port_index;
879 
880 	port->minimum_buffer.num =
881 	    rmsg->u.port_info_get_reply.port.buffer_num_min;
882 	port->minimum_buffer.size =
883 	    rmsg->u.port_info_get_reply.port.buffer_size_min;
884 	port->minimum_buffer.alignment =
885 	    rmsg->u.port_info_get_reply.port.buffer_alignment_min;
886 
887 	port->recommended_buffer.alignment =
888 	    rmsg->u.port_info_get_reply.port.buffer_alignment_min;
889 	port->recommended_buffer.num =
890 	    rmsg->u.port_info_get_reply.port.buffer_num_recommended;
891 
892 	port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
893 	port->current_buffer.size =
894 	    rmsg->u.port_info_get_reply.port.buffer_size;
895 
896 	/* stream format */
897 	port->format.type = rmsg->u.port_info_get_reply.format.type;
898 	port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
899 	port->format.encoding_variant =
900 	    rmsg->u.port_info_get_reply.format.encoding_variant;
901 	port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
902 	port->format.flags = rmsg->u.port_info_get_reply.format.flags;
903 
904 	/* elementary stream format */
905 	memcpy(&port->es,
906 	       &rmsg->u.port_info_get_reply.es,
907 	       sizeof(union mmal_es_specific_format));
908 	port->format.es = &port->es;
909 
910 	port->format.extradata_size =
911 	    rmsg->u.port_info_get_reply.format.extradata_size;
912 	memcpy(port->format.extradata,
913 	       rmsg->u.port_info_get_reply.extradata,
914 	       port->format.extradata_size);
915 
916 	pr_debug("received port info\n");
917 	dump_port_info(port);
918 
919 release_msg:
920 
921 	pr_debug("%s:result:%d component:0x%x port:%d\n",
922 		 __func__, ret, port->component->handle, port->handle);
923 
924 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
925 
926 	return ret;
927 }
928 
929 /* create component on vc */
create_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component,const char * name)930 static int create_component(struct vchiq_mmal_instance *instance,
931 			    struct vchiq_mmal_component *component,
932 			    const char *name)
933 {
934 	int ret;
935 	struct mmal_msg m;
936 	struct mmal_msg *rmsg;
937 	struct vchiq_header *rmsg_handle;
938 
939 	/* build component create message */
940 	m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
941 	m.u.component_create.client_component = component->client_component;
942 	strscpy_pad(m.u.component_create.name, name,
943 		    sizeof(m.u.component_create.name));
944 	m.u.component_create.pid = 0;
945 
946 	ret = send_synchronous_mmal_msg(instance, &m,
947 					sizeof(m.u.component_create),
948 					&rmsg, &rmsg_handle);
949 	if (ret)
950 		return ret;
951 
952 	if (rmsg->h.type != m.h.type) {
953 		/* got an unexpected message type in reply */
954 		ret = -EINVAL;
955 		goto release_msg;
956 	}
957 
958 	ret = -rmsg->u.component_create_reply.status;
959 	if (ret != MMAL_MSG_STATUS_SUCCESS)
960 		goto release_msg;
961 
962 	/* a valid component response received */
963 	component->handle = rmsg->u.component_create_reply.component_handle;
964 	component->inputs = rmsg->u.component_create_reply.input_num;
965 	component->outputs = rmsg->u.component_create_reply.output_num;
966 	component->clocks = rmsg->u.component_create_reply.clock_num;
967 
968 	pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
969 		 component->handle,
970 		 component->inputs, component->outputs, component->clocks);
971 
972 release_msg:
973 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
974 
975 	return ret;
976 }
977 
978 /* destroys a component on vc */
destroy_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)979 static int destroy_component(struct vchiq_mmal_instance *instance,
980 			     struct vchiq_mmal_component *component)
981 {
982 	int ret;
983 	struct mmal_msg m;
984 	struct mmal_msg *rmsg;
985 	struct vchiq_header *rmsg_handle;
986 
987 	m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
988 	m.u.component_destroy.component_handle = component->handle;
989 
990 	ret = send_synchronous_mmal_msg(instance, &m,
991 					sizeof(m.u.component_destroy),
992 					&rmsg, &rmsg_handle);
993 	if (ret)
994 		return ret;
995 
996 	if (rmsg->h.type != m.h.type) {
997 		/* got an unexpected message type in reply */
998 		ret = -EINVAL;
999 		goto release_msg;
1000 	}
1001 
1002 	ret = -rmsg->u.component_destroy_reply.status;
1003 
1004 release_msg:
1005 
1006 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1007 
1008 	return ret;
1009 }
1010 
1011 /* enable a component on vc */
enable_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1012 static int enable_component(struct vchiq_mmal_instance *instance,
1013 			    struct vchiq_mmal_component *component)
1014 {
1015 	int ret;
1016 	struct mmal_msg m;
1017 	struct mmal_msg *rmsg;
1018 	struct vchiq_header *rmsg_handle;
1019 
1020 	m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
1021 	m.u.component_enable.component_handle = component->handle;
1022 
1023 	ret = send_synchronous_mmal_msg(instance, &m,
1024 					sizeof(m.u.component_enable),
1025 					&rmsg, &rmsg_handle);
1026 	if (ret)
1027 		return ret;
1028 
1029 	if (rmsg->h.type != m.h.type) {
1030 		/* got an unexpected message type in reply */
1031 		ret = -EINVAL;
1032 		goto release_msg;
1033 	}
1034 
1035 	ret = -rmsg->u.component_enable_reply.status;
1036 
1037 release_msg:
1038 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1039 
1040 	return ret;
1041 }
1042 
1043 /* disable a component on vc */
disable_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1044 static int disable_component(struct vchiq_mmal_instance *instance,
1045 			     struct vchiq_mmal_component *component)
1046 {
1047 	int ret;
1048 	struct mmal_msg m;
1049 	struct mmal_msg *rmsg;
1050 	struct vchiq_header *rmsg_handle;
1051 
1052 	m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1053 	m.u.component_disable.component_handle = component->handle;
1054 
1055 	ret = send_synchronous_mmal_msg(instance, &m,
1056 					sizeof(m.u.component_disable),
1057 					&rmsg, &rmsg_handle);
1058 	if (ret)
1059 		return ret;
1060 
1061 	if (rmsg->h.type != m.h.type) {
1062 		/* got an unexpected message type in reply */
1063 		ret = -EINVAL;
1064 		goto release_msg;
1065 	}
1066 
1067 	ret = -rmsg->u.component_disable_reply.status;
1068 
1069 release_msg:
1070 
1071 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1072 
1073 	return ret;
1074 }
1075 
1076 /* get version of mmal implementation */
get_version(struct vchiq_mmal_instance * instance,u32 * major_out,u32 * minor_out)1077 static int get_version(struct vchiq_mmal_instance *instance,
1078 		       u32 *major_out, u32 *minor_out)
1079 {
1080 	int ret;
1081 	struct mmal_msg m;
1082 	struct mmal_msg *rmsg;
1083 	struct vchiq_header *rmsg_handle;
1084 
1085 	m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1086 
1087 	ret = send_synchronous_mmal_msg(instance, &m,
1088 					sizeof(m.u.version),
1089 					&rmsg, &rmsg_handle);
1090 	if (ret)
1091 		return ret;
1092 
1093 	if (rmsg->h.type != m.h.type) {
1094 		/* got an unexpected message type in reply */
1095 		ret = -EINVAL;
1096 		goto release_msg;
1097 	}
1098 
1099 	*major_out = rmsg->u.version.major;
1100 	*minor_out = rmsg->u.version.minor;
1101 
1102 release_msg:
1103 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1104 
1105 	return ret;
1106 }
1107 
1108 /* do a port action with a port as a parameter */
port_action_port(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,enum mmal_msg_port_action_type action_type)1109 static int port_action_port(struct vchiq_mmal_instance *instance,
1110 			    struct vchiq_mmal_port *port,
1111 			    enum mmal_msg_port_action_type action_type)
1112 {
1113 	int ret;
1114 	struct mmal_msg m;
1115 	struct mmal_msg *rmsg;
1116 	struct vchiq_header *rmsg_handle;
1117 
1118 	m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1119 	m.u.port_action_port.component_handle = port->component->handle;
1120 	m.u.port_action_port.port_handle = port->handle;
1121 	m.u.port_action_port.action = action_type;
1122 
1123 	port_to_mmal_msg(port, &m.u.port_action_port.port);
1124 
1125 	ret = send_synchronous_mmal_msg(instance, &m,
1126 					sizeof(m.u.port_action_port),
1127 					&rmsg, &rmsg_handle);
1128 	if (ret)
1129 		return ret;
1130 
1131 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1132 		/* got an unexpected message type in reply */
1133 		ret = -EINVAL;
1134 		goto release_msg;
1135 	}
1136 
1137 	ret = -rmsg->u.port_action_reply.status;
1138 
1139 	pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1140 		 __func__,
1141 		 ret, port->component->handle, port->handle,
1142 		 port_action_type_names[action_type], action_type);
1143 
1144 release_msg:
1145 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1146 
1147 	return ret;
1148 }
1149 
1150 /* do a port action with handles as parameters */
port_action_handle(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,enum mmal_msg_port_action_type action_type,u32 connect_component_handle,u32 connect_port_handle)1151 static int port_action_handle(struct vchiq_mmal_instance *instance,
1152 			      struct vchiq_mmal_port *port,
1153 			      enum mmal_msg_port_action_type action_type,
1154 			      u32 connect_component_handle,
1155 			      u32 connect_port_handle)
1156 {
1157 	int ret;
1158 	struct mmal_msg m;
1159 	struct mmal_msg *rmsg;
1160 	struct vchiq_header *rmsg_handle;
1161 
1162 	m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1163 
1164 	m.u.port_action_handle.component_handle = port->component->handle;
1165 	m.u.port_action_handle.port_handle = port->handle;
1166 	m.u.port_action_handle.action = action_type;
1167 
1168 	m.u.port_action_handle.connect_component_handle =
1169 	    connect_component_handle;
1170 	m.u.port_action_handle.connect_port_handle = connect_port_handle;
1171 
1172 	ret = send_synchronous_mmal_msg(instance, &m,
1173 					sizeof(m.u.port_action_handle),
1174 					&rmsg, &rmsg_handle);
1175 	if (ret)
1176 		return ret;
1177 
1178 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1179 		/* got an unexpected message type in reply */
1180 		ret = -EINVAL;
1181 		goto release_msg;
1182 	}
1183 
1184 	ret = -rmsg->u.port_action_reply.status;
1185 
1186 	pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
1187 		 __func__,
1188 		 ret, port->component->handle, port->handle,
1189 		 port_action_type_names[action_type],
1190 		 action_type, connect_component_handle, connect_port_handle);
1191 
1192 release_msg:
1193 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1194 
1195 	return ret;
1196 }
1197 
port_parameter_set(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter_id,void * value,u32 value_size)1198 static int port_parameter_set(struct vchiq_mmal_instance *instance,
1199 			      struct vchiq_mmal_port *port,
1200 			      u32 parameter_id, void *value, u32 value_size)
1201 {
1202 	int ret;
1203 	struct mmal_msg m;
1204 	struct mmal_msg *rmsg;
1205 	struct vchiq_header *rmsg_handle;
1206 
1207 	m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1208 
1209 	m.u.port_parameter_set.component_handle = port->component->handle;
1210 	m.u.port_parameter_set.port_handle = port->handle;
1211 	m.u.port_parameter_set.id = parameter_id;
1212 	m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1213 	memcpy(&m.u.port_parameter_set.value, value, value_size);
1214 
1215 	ret = send_synchronous_mmal_msg(instance, &m,
1216 					(4 * sizeof(u32)) + value_size,
1217 					&rmsg, &rmsg_handle);
1218 	if (ret)
1219 		return ret;
1220 
1221 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1222 		/* got an unexpected message type in reply */
1223 		ret = -EINVAL;
1224 		goto release_msg;
1225 	}
1226 
1227 	ret = -rmsg->u.port_parameter_set_reply.status;
1228 
1229 	pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1230 		 __func__,
1231 		 ret, port->component->handle, port->handle, parameter_id);
1232 
1233 release_msg:
1234 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1235 
1236 	return ret;
1237 }
1238 
port_parameter_get(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter_id,void * value,u32 * value_size)1239 static int port_parameter_get(struct vchiq_mmal_instance *instance,
1240 			      struct vchiq_mmal_port *port,
1241 			      u32 parameter_id, void *value, u32 *value_size)
1242 {
1243 	int ret;
1244 	struct mmal_msg m;
1245 	struct mmal_msg *rmsg;
1246 	struct vchiq_header *rmsg_handle;
1247 
1248 	m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1249 
1250 	m.u.port_parameter_get.component_handle = port->component->handle;
1251 	m.u.port_parameter_get.port_handle = port->handle;
1252 	m.u.port_parameter_get.id = parameter_id;
1253 	m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1254 
1255 	ret = send_synchronous_mmal_msg(instance, &m,
1256 					sizeof(struct
1257 					       mmal_msg_port_parameter_get),
1258 					&rmsg, &rmsg_handle);
1259 	if (ret)
1260 		return ret;
1261 
1262 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1263 		/* got an unexpected message type in reply */
1264 		pr_err("Incorrect reply type %d\n", rmsg->h.type);
1265 		ret = -EINVAL;
1266 		goto release_msg;
1267 	}
1268 
1269 	ret = rmsg->u.port_parameter_get_reply.status;
1270 
1271 	/* port_parameter_get_reply.size includes the header,
1272 	 * whilst *value_size doesn't.
1273 	 */
1274 	rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
1275 
1276 	if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
1277 		/* Copy only as much as we have space for
1278 		 * but report true size of parameter
1279 		 */
1280 		memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1281 		       *value_size);
1282 	} else {
1283 		memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1284 		       rmsg->u.port_parameter_get_reply.size);
1285 	}
1286 	/* Always report the size of the returned parameter to the caller */
1287 	*value_size = rmsg->u.port_parameter_get_reply.size;
1288 
1289 	pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1290 		 ret, port->component->handle, port->handle, parameter_id);
1291 
1292 release_msg:
1293 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1294 
1295 	return ret;
1296 }
1297 
1298 /* disables a port and drains buffers from it */
port_disable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1299 static int port_disable(struct vchiq_mmal_instance *instance,
1300 			struct vchiq_mmal_port *port)
1301 {
1302 	int ret;
1303 	struct list_head *q, *buf_head;
1304 	unsigned long flags = 0;
1305 
1306 	if (!port->enabled)
1307 		return 0;
1308 
1309 	port->enabled = false;
1310 
1311 	ret = port_action_port(instance, port,
1312 			       MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1313 	if (ret == 0) {
1314 		/*
1315 		 * Drain all queued buffers on port. This should only
1316 		 * apply to buffers that have been queued before the port
1317 		 * has been enabled. If the port has been enabled and buffers
1318 		 * passed, then the buffers should have been removed from this
1319 		 * list, and we should get the relevant callbacks via VCHIQ
1320 		 * to release the buffers.
1321 		 */
1322 		spin_lock_irqsave(&port->slock, flags);
1323 
1324 		list_for_each_safe(buf_head, q, &port->buffers) {
1325 			struct mmal_buffer *mmalbuf;
1326 
1327 			mmalbuf = list_entry(buf_head, struct mmal_buffer,
1328 					     list);
1329 			list_del(buf_head);
1330 			if (port->buffer_cb) {
1331 				mmalbuf->length = 0;
1332 				mmalbuf->mmal_flags = 0;
1333 				mmalbuf->dts = MMAL_TIME_UNKNOWN;
1334 				mmalbuf->pts = MMAL_TIME_UNKNOWN;
1335 				port->buffer_cb(instance,
1336 						port, 0, mmalbuf);
1337 			}
1338 		}
1339 
1340 		spin_unlock_irqrestore(&port->slock, flags);
1341 
1342 		ret = port_info_get(instance, port);
1343 	}
1344 
1345 	return ret;
1346 }
1347 
1348 /* enable a port */
port_enable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1349 static int port_enable(struct vchiq_mmal_instance *instance,
1350 		       struct vchiq_mmal_port *port)
1351 {
1352 	unsigned int hdr_count;
1353 	struct list_head *q, *buf_head;
1354 	int ret;
1355 
1356 	if (port->enabled)
1357 		return 0;
1358 
1359 	ret = port_action_port(instance, port,
1360 			       MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1361 	if (ret)
1362 		goto done;
1363 
1364 	port->enabled = true;
1365 
1366 	if (port->buffer_cb) {
1367 		/* send buffer headers to videocore */
1368 		hdr_count = 1;
1369 		list_for_each_safe(buf_head, q, &port->buffers) {
1370 			struct mmal_buffer *mmalbuf;
1371 
1372 			mmalbuf = list_entry(buf_head, struct mmal_buffer,
1373 					     list);
1374 			ret = buffer_from_host(instance, port, mmalbuf);
1375 			if (ret)
1376 				goto done;
1377 
1378 			list_del(buf_head);
1379 			hdr_count++;
1380 			if (hdr_count > port->current_buffer.num)
1381 				break;
1382 		}
1383 	}
1384 
1385 	ret = port_info_get(instance, port);
1386 
1387 done:
1388 	return ret;
1389 }
1390 
1391 /* ------------------------------------------------------------------
1392  * Exported API
1393  *------------------------------------------------------------------
1394  */
1395 
vchiq_mmal_port_set_format(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1396 int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1397 			       struct vchiq_mmal_port *port)
1398 {
1399 	int ret;
1400 
1401 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1402 		return -EINTR;
1403 
1404 	ret = port_info_set(instance, port);
1405 	if (ret)
1406 		goto release_unlock;
1407 
1408 	/* read what has actually been set */
1409 	ret = port_info_get(instance, port);
1410 
1411 release_unlock:
1412 	mutex_unlock(&instance->vchiq_mutex);
1413 
1414 	return ret;
1415 }
1416 EXPORT_SYMBOL_GPL(vchiq_mmal_port_set_format);
1417 
vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter,void * value,u32 value_size)1418 int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1419 				  struct vchiq_mmal_port *port,
1420 				  u32 parameter, void *value, u32 value_size)
1421 {
1422 	int ret;
1423 
1424 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1425 		return -EINTR;
1426 
1427 	ret = port_parameter_set(instance, port, parameter, value, value_size);
1428 
1429 	mutex_unlock(&instance->vchiq_mutex);
1430 
1431 	return ret;
1432 }
1433 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_set);
1434 
vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter,void * value,u32 * value_size)1435 int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1436 				  struct vchiq_mmal_port *port,
1437 				  u32 parameter, void *value, u32 *value_size)
1438 {
1439 	int ret;
1440 
1441 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1442 		return -EINTR;
1443 
1444 	ret = port_parameter_get(instance, port, parameter, value, value_size);
1445 
1446 	mutex_unlock(&instance->vchiq_mutex);
1447 
1448 	return ret;
1449 }
1450 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_get);
1451 
1452 /* enable a port
1453  *
1454  * enables a port and queues buffers for satisfying callbacks if we
1455  * provide a callback handler
1456  */
vchiq_mmal_port_enable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,vchiq_mmal_buffer_cb buffer_cb)1457 int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1458 			   struct vchiq_mmal_port *port,
1459 			   vchiq_mmal_buffer_cb buffer_cb)
1460 {
1461 	int ret;
1462 
1463 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1464 		return -EINTR;
1465 
1466 	/* already enabled - noop */
1467 	if (port->enabled) {
1468 		ret = 0;
1469 		goto unlock;
1470 	}
1471 
1472 	port->buffer_cb = buffer_cb;
1473 
1474 	ret = port_enable(instance, port);
1475 
1476 unlock:
1477 	mutex_unlock(&instance->vchiq_mutex);
1478 
1479 	return ret;
1480 }
1481 EXPORT_SYMBOL_GPL(vchiq_mmal_port_enable);
1482 
vchiq_mmal_port_disable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1483 int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1484 			    struct vchiq_mmal_port *port)
1485 {
1486 	int ret;
1487 
1488 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1489 		return -EINTR;
1490 
1491 	if (!port->enabled) {
1492 		mutex_unlock(&instance->vchiq_mutex);
1493 		return 0;
1494 	}
1495 
1496 	ret = port_disable(instance, port);
1497 
1498 	mutex_unlock(&instance->vchiq_mutex);
1499 
1500 	return ret;
1501 }
1502 EXPORT_SYMBOL_GPL(vchiq_mmal_port_disable);
1503 
1504 /* ports will be connected in a tunneled manner so data buffers
1505  * are not handled by client.
1506  */
vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * src,struct vchiq_mmal_port * dst)1507 int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1508 				   struct vchiq_mmal_port *src,
1509 				   struct vchiq_mmal_port *dst)
1510 {
1511 	int ret;
1512 
1513 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1514 		return -EINTR;
1515 
1516 	/* disconnect ports if connected */
1517 	if (src->connected) {
1518 		ret = port_disable(instance, src);
1519 		if (ret) {
1520 			pr_err("failed disabling src port(%d)\n", ret);
1521 			goto release_unlock;
1522 		}
1523 
1524 		/* do not need to disable the destination port as they
1525 		 * are connected and it is done automatically
1526 		 */
1527 
1528 		ret = port_action_handle(instance, src,
1529 					 MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1530 					 src->connected->component->handle,
1531 					 src->connected->handle);
1532 		if (ret < 0) {
1533 			pr_err("failed disconnecting src port\n");
1534 			goto release_unlock;
1535 		}
1536 		src->connected->enabled = false;
1537 		src->connected = NULL;
1538 	}
1539 
1540 	if (!dst) {
1541 		/* do not make new connection */
1542 		ret = 0;
1543 		pr_debug("not making new connection\n");
1544 		goto release_unlock;
1545 	}
1546 
1547 	/* copy src port format to dst */
1548 	dst->format.encoding = src->format.encoding;
1549 	dst->es.video.width = src->es.video.width;
1550 	dst->es.video.height = src->es.video.height;
1551 	dst->es.video.crop.x = src->es.video.crop.x;
1552 	dst->es.video.crop.y = src->es.video.crop.y;
1553 	dst->es.video.crop.width = src->es.video.crop.width;
1554 	dst->es.video.crop.height = src->es.video.crop.height;
1555 	dst->es.video.frame_rate.numerator = src->es.video.frame_rate.numerator;
1556 	dst->es.video.frame_rate.denominator = src->es.video.frame_rate.denominator;
1557 
1558 	/* set new format */
1559 	ret = port_info_set(instance, dst);
1560 	if (ret) {
1561 		pr_debug("setting port info failed\n");
1562 		goto release_unlock;
1563 	}
1564 
1565 	/* read what has actually been set */
1566 	ret = port_info_get(instance, dst);
1567 	if (ret) {
1568 		pr_debug("read back port info failed\n");
1569 		goto release_unlock;
1570 	}
1571 
1572 	/* connect two ports together */
1573 	ret = port_action_handle(instance, src,
1574 				 MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1575 				 dst->component->handle, dst->handle);
1576 	if (ret < 0) {
1577 		pr_debug("connecting port %d:%d to %d:%d failed\n",
1578 			 src->component->handle, src->handle,
1579 			 dst->component->handle, dst->handle);
1580 		goto release_unlock;
1581 	}
1582 	src->connected = dst;
1583 
1584 release_unlock:
1585 
1586 	mutex_unlock(&instance->vchiq_mutex);
1587 
1588 	return ret;
1589 }
1590 EXPORT_SYMBOL_GPL(vchiq_mmal_port_connect_tunnel);
1591 
vchiq_mmal_submit_buffer(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,struct mmal_buffer * buffer)1592 int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1593 			     struct vchiq_mmal_port *port,
1594 			     struct mmal_buffer *buffer)
1595 {
1596 	unsigned long flags = 0;
1597 	int ret;
1598 
1599 	ret = buffer_from_host(instance, port, buffer);
1600 	if (ret == -EINVAL) {
1601 		/* Port is disabled. Queue for when it is enabled. */
1602 		spin_lock_irqsave(&port->slock, flags);
1603 		list_add_tail(&buffer->list, &port->buffers);
1604 		spin_unlock_irqrestore(&port->slock, flags);
1605 	}
1606 
1607 	return 0;
1608 }
1609 EXPORT_SYMBOL_GPL(vchiq_mmal_submit_buffer);
1610 
mmal_vchi_buffer_init(struct vchiq_mmal_instance * instance,struct mmal_buffer * buf)1611 int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
1612 			  struct mmal_buffer *buf)
1613 {
1614 	struct mmal_msg_context *msg_context = get_msg_context(instance);
1615 
1616 	if (IS_ERR(msg_context))
1617 		return (PTR_ERR(msg_context));
1618 
1619 	buf->msg_context = msg_context;
1620 	return 0;
1621 }
1622 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_init);
1623 
mmal_vchi_buffer_cleanup(struct mmal_buffer * buf)1624 int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
1625 {
1626 	struct mmal_msg_context *msg_context = buf->msg_context;
1627 
1628 	if (msg_context)
1629 		release_msg_context(msg_context);
1630 	buf->msg_context = NULL;
1631 
1632 	return 0;
1633 }
1634 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_cleanup);
1635 
1636 /* Initialise a mmal component and its ports
1637  *
1638  */
vchiq_mmal_component_init(struct vchiq_mmal_instance * instance,const char * name,struct vchiq_mmal_component ** component_out)1639 int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1640 			      const char *name,
1641 			      struct vchiq_mmal_component **component_out)
1642 {
1643 	int ret;
1644 	int idx;		/* port index */
1645 	struct vchiq_mmal_component *component = NULL;
1646 
1647 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1648 		return -EINTR;
1649 
1650 	for (idx = 0; idx < VCHIQ_MMAL_MAX_COMPONENTS; idx++) {
1651 		if (!instance->component[idx].in_use) {
1652 			component = &instance->component[idx];
1653 			component->in_use = true;
1654 			break;
1655 		}
1656 	}
1657 
1658 	if (!component) {
1659 		ret = -EINVAL;	/* todo is this correct error? */
1660 		goto unlock;
1661 	}
1662 
1663 	/* We need a handle to reference back to our component structure.
1664 	 * Use the array index in instance->component rather than rolling
1665 	 * another IDR.
1666 	 */
1667 	component->client_component = idx;
1668 
1669 	ret = create_component(instance, component, name);
1670 	if (ret < 0) {
1671 		pr_err("%s: failed to create component %d (Not enough GPU mem?)\n",
1672 		       __func__, ret);
1673 		goto unlock;
1674 	}
1675 
1676 	/* ports info needs gathering */
1677 	component->control.type = MMAL_PORT_TYPE_CONTROL;
1678 	component->control.index = 0;
1679 	component->control.component = component;
1680 	spin_lock_init(&component->control.slock);
1681 	INIT_LIST_HEAD(&component->control.buffers);
1682 	ret = port_info_get(instance, &component->control);
1683 	if (ret < 0)
1684 		goto release_component;
1685 
1686 	for (idx = 0; idx < component->inputs; idx++) {
1687 		component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1688 		component->input[idx].index = idx;
1689 		component->input[idx].component = component;
1690 		spin_lock_init(&component->input[idx].slock);
1691 		INIT_LIST_HEAD(&component->input[idx].buffers);
1692 		ret = port_info_get(instance, &component->input[idx]);
1693 		if (ret < 0)
1694 			goto release_component;
1695 	}
1696 
1697 	for (idx = 0; idx < component->outputs; idx++) {
1698 		component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1699 		component->output[idx].index = idx;
1700 		component->output[idx].component = component;
1701 		spin_lock_init(&component->output[idx].slock);
1702 		INIT_LIST_HEAD(&component->output[idx].buffers);
1703 		ret = port_info_get(instance, &component->output[idx]);
1704 		if (ret < 0)
1705 			goto release_component;
1706 	}
1707 
1708 	for (idx = 0; idx < component->clocks; idx++) {
1709 		component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1710 		component->clock[idx].index = idx;
1711 		component->clock[idx].component = component;
1712 		spin_lock_init(&component->clock[idx].slock);
1713 		INIT_LIST_HEAD(&component->clock[idx].buffers);
1714 		ret = port_info_get(instance, &component->clock[idx]);
1715 		if (ret < 0)
1716 			goto release_component;
1717 	}
1718 
1719 	*component_out = component;
1720 
1721 	mutex_unlock(&instance->vchiq_mutex);
1722 
1723 	return 0;
1724 
1725 release_component:
1726 	destroy_component(instance, component);
1727 unlock:
1728 	if (component)
1729 		component->in_use = false;
1730 	mutex_unlock(&instance->vchiq_mutex);
1731 
1732 	return ret;
1733 }
1734 EXPORT_SYMBOL_GPL(vchiq_mmal_component_init);
1735 
1736 /*
1737  * cause a mmal component to be destroyed
1738  */
vchiq_mmal_component_finalise(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1739 int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1740 				  struct vchiq_mmal_component *component)
1741 {
1742 	int ret;
1743 
1744 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1745 		return -EINTR;
1746 
1747 	if (component->enabled)
1748 		ret = disable_component(instance, component);
1749 
1750 	ret = destroy_component(instance, component);
1751 
1752 	component->in_use = false;
1753 
1754 	mutex_unlock(&instance->vchiq_mutex);
1755 
1756 	return ret;
1757 }
1758 EXPORT_SYMBOL_GPL(vchiq_mmal_component_finalise);
1759 
1760 /*
1761  * cause a mmal component to be enabled
1762  */
vchiq_mmal_component_enable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1763 int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
1764 				struct vchiq_mmal_component *component)
1765 {
1766 	int ret;
1767 
1768 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1769 		return -EINTR;
1770 
1771 	if (component->enabled) {
1772 		mutex_unlock(&instance->vchiq_mutex);
1773 		return 0;
1774 	}
1775 
1776 	ret = enable_component(instance, component);
1777 	if (ret == 0)
1778 		component->enabled = true;
1779 
1780 	mutex_unlock(&instance->vchiq_mutex);
1781 
1782 	return ret;
1783 }
1784 EXPORT_SYMBOL_GPL(vchiq_mmal_component_enable);
1785 
1786 /*
1787  * cause a mmal component to be enabled
1788  */
vchiq_mmal_component_disable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1789 int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
1790 				 struct vchiq_mmal_component *component)
1791 {
1792 	int ret;
1793 
1794 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1795 		return -EINTR;
1796 
1797 	if (!component->enabled) {
1798 		mutex_unlock(&instance->vchiq_mutex);
1799 		return 0;
1800 	}
1801 
1802 	ret = disable_component(instance, component);
1803 	if (ret == 0)
1804 		component->enabled = false;
1805 
1806 	mutex_unlock(&instance->vchiq_mutex);
1807 
1808 	return ret;
1809 }
1810 EXPORT_SYMBOL_GPL(vchiq_mmal_component_disable);
1811 
vchiq_mmal_version(struct vchiq_mmal_instance * instance,u32 * major_out,u32 * minor_out)1812 int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
1813 		       u32 *major_out, u32 *minor_out)
1814 {
1815 	int ret;
1816 
1817 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1818 		return -EINTR;
1819 
1820 	ret = get_version(instance, major_out, minor_out);
1821 
1822 	mutex_unlock(&instance->vchiq_mutex);
1823 
1824 	return ret;
1825 }
1826 EXPORT_SYMBOL_GPL(vchiq_mmal_version);
1827 
vchiq_mmal_finalise(struct vchiq_mmal_instance * instance)1828 int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
1829 {
1830 	int status = 0;
1831 
1832 	if (!instance)
1833 		return -EINVAL;
1834 
1835 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1836 		return -EINTR;
1837 
1838 	vchiq_use_service(instance->vchiq_instance, instance->service_handle);
1839 
1840 	status = vchiq_close_service(instance->vchiq_instance, instance->service_handle);
1841 	if (status != 0)
1842 		pr_err("mmal-vchiq: VCHIQ close failed\n");
1843 
1844 	mutex_unlock(&instance->vchiq_mutex);
1845 
1846 	vchiq_shutdown(instance->vchiq_instance);
1847 	destroy_workqueue(instance->bulk_wq);
1848 
1849 	idr_destroy(&instance->context_map);
1850 
1851 	kfree(instance);
1852 
1853 	return status;
1854 }
1855 EXPORT_SYMBOL_GPL(vchiq_mmal_finalise);
1856 
vchiq_mmal_init(struct device * dev,struct vchiq_mmal_instance ** out_instance)1857 int vchiq_mmal_init(struct device *dev, struct vchiq_mmal_instance **out_instance)
1858 {
1859 	int status;
1860 	int err = -ENODEV;
1861 	struct vchiq_mmal_instance *instance;
1862 	struct vchiq_instance *vchiq_instance;
1863 	struct vchiq_service_params_kernel params = {
1864 		.version		= VC_MMAL_VER,
1865 		.version_min		= VC_MMAL_MIN_VER,
1866 		.fourcc			= VCHIQ_MAKE_FOURCC('m', 'm', 'a', 'l'),
1867 		.callback		= mmal_service_callback,
1868 		.userdata		= NULL,
1869 	};
1870 	struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(dev->parent);
1871 
1872 	/* compile time checks to ensure structure size as they are
1873 	 * directly (de)serialised from memory.
1874 	 */
1875 
1876 	/* ensure the header structure has packed to the correct size */
1877 	BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
1878 
1879 	/* ensure message structure does not exceed maximum length */
1880 	BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
1881 
1882 	/* mmal port struct is correct size */
1883 	BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
1884 
1885 	/* create a vchi instance */
1886 	status = vchiq_initialise(&mgmt->state, &vchiq_instance);
1887 	if (status) {
1888 		pr_err("Failed to initialise VCHI instance (status=%d)\n",
1889 		       status);
1890 		return -EIO;
1891 	}
1892 
1893 	status = vchiq_connect(vchiq_instance);
1894 	if (status) {
1895 		pr_err("Failed to connect VCHI instance (status=%d)\n", status);
1896 		err = -EIO;
1897 		goto err_shutdown_vchiq;
1898 	}
1899 
1900 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1901 
1902 	if (!instance) {
1903 		err = -ENOMEM;
1904 		goto err_shutdown_vchiq;
1905 	}
1906 
1907 	mutex_init(&instance->vchiq_mutex);
1908 
1909 	instance->vchiq_instance = vchiq_instance;
1910 
1911 	mutex_init(&instance->context_map_lock);
1912 	idr_init_base(&instance->context_map, 1);
1913 
1914 	params.userdata = instance;
1915 
1916 	instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
1917 						    WQ_MEM_RECLAIM);
1918 	if (!instance->bulk_wq)
1919 		goto err_free;
1920 
1921 	status = vchiq_open_service(vchiq_instance, &params,
1922 				    &instance->service_handle);
1923 	if (status) {
1924 		pr_err("Failed to open VCHI service connection (status=%d)\n",
1925 		       status);
1926 		goto err_close_services;
1927 	}
1928 
1929 	vchiq_release_service(instance->vchiq_instance, instance->service_handle);
1930 
1931 	*out_instance = instance;
1932 
1933 	return 0;
1934 
1935 err_close_services:
1936 	vchiq_close_service(instance->vchiq_instance, instance->service_handle);
1937 	destroy_workqueue(instance->bulk_wq);
1938 err_free:
1939 	kfree(instance);
1940 err_shutdown_vchiq:
1941 	vchiq_shutdown(vchiq_instance);
1942 	return err;
1943 }
1944 EXPORT_SYMBOL_GPL(vchiq_mmal_init);
1945 
1946 MODULE_DESCRIPTION("BCM2835 MMAL VCHIQ interface");
1947 MODULE_AUTHOR("Dave Stevenson, <[email protected]>");
1948 MODULE_LICENSE("GPL");
1949